1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/base/power/main.c - Where the driver meets power management.
4  *
5  * Copyright (c) 2003 Patrick Mochel
6  * Copyright (c) 2003 Open Source Development Lab
7  *
8  * The driver model core calls device_pm_add() when a device is registered.
9  * This will initialize the embedded device_pm_info object in the device
10  * and add it to the list of power-controlled devices. sysfs entries for
11  * controlling device power management will also be added.
12  *
13  * A separate list is used for keeping track of power info, because the power
14  * domain dependencies may differ from the ancestral dependencies that the
15  * subsystem list maintains.
16  */
17 
18 #define pr_fmt(fmt) "PM: " fmt
19 #define dev_fmt pr_fmt
20 
21 #include <linux/device.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm-trace.h>
27 #include <linux/pm_wakeirq.h>
28 #include <linux/interrupt.h>
29 #include <linux/sched.h>
30 #include <linux/sched/debug.h>
31 #include <linux/async.h>
32 #include <linux/suspend.h>
33 #include <trace/events/power.h>
34 #include <linux/cpufreq.h>
35 #include <linux/cpuidle.h>
36 #include <linux/devfreq.h>
37 #include <linux/timer.h>
38 
39 #include "../base.h"
40 #include "power.h"
41 
42 typedef int (*pm_callback_t)(struct device *);
43 
44 #define list_for_each_entry_rcu_locked(pos, head, member) \
45 	list_for_each_entry_rcu(pos, head, member, \
46 			device_links_read_lock_held())
47 
48 /*
49  * The entries in the dpm_list list are in a depth first order, simply
50  * because children are guaranteed to be discovered after parents, and
51  * are inserted at the back of the list on discovery.
52  *
53  * Since device_pm_add() may be called with a device lock held,
54  * we must never try to acquire a device lock while holding
55  * dpm_list_mutex.
56  */
57 
58 LIST_HEAD(dpm_list);
59 static LIST_HEAD(dpm_prepared_list);
60 static LIST_HEAD(dpm_suspended_list);
61 static LIST_HEAD(dpm_late_early_list);
62 static LIST_HEAD(dpm_noirq_list);
63 
64 struct suspend_stats suspend_stats;
65 static DEFINE_MUTEX(dpm_list_mtx);
66 static pm_message_t pm_transition;
67 
68 static int async_error;
69 
pm_verb(int event)70 static const char *pm_verb(int event)
71 {
72 	switch (event) {
73 	case PM_EVENT_SUSPEND:
74 		return "suspend";
75 	case PM_EVENT_RESUME:
76 		return "resume";
77 	case PM_EVENT_FREEZE:
78 		return "freeze";
79 	case PM_EVENT_QUIESCE:
80 		return "quiesce";
81 	case PM_EVENT_HIBERNATE:
82 		return "hibernate";
83 	case PM_EVENT_THAW:
84 		return "thaw";
85 	case PM_EVENT_RESTORE:
86 		return "restore";
87 	case PM_EVENT_RECOVER:
88 		return "recover";
89 	default:
90 		return "(unknown PM event)";
91 	}
92 }
93 
94 /**
95  * device_pm_sleep_init - Initialize system suspend-related device fields.
96  * @dev: Device object being initialized.
97  */
device_pm_sleep_init(struct device *dev)98 void device_pm_sleep_init(struct device *dev)
99 {
100 	dev->power.is_prepared = false;
101 	dev->power.is_suspended = false;
102 	dev->power.is_noirq_suspended = false;
103 	dev->power.is_late_suspended = false;
104 	init_completion(&dev->power.completion);
105 	complete_all(&dev->power.completion);
106 	dev->power.wakeup = NULL;
107 	INIT_LIST_HEAD(&dev->power.entry);
108 }
109 
110 /**
111  * device_pm_lock - Lock the list of active devices used by the PM core.
112  */
device_pm_lock(void)113 void device_pm_lock(void)
114 {
115 	mutex_lock(&dpm_list_mtx);
116 }
117 
118 /**
119  * device_pm_unlock - Unlock the list of active devices used by the PM core.
120  */
device_pm_unlock(void)121 void device_pm_unlock(void)
122 {
123 	mutex_unlock(&dpm_list_mtx);
124 }
125 
126 /**
127  * device_pm_add - Add a device to the PM core's list of active devices.
128  * @dev: Device to add to the list.
129  */
device_pm_add(struct device *dev)130 void device_pm_add(struct device *dev)
131 {
132 	/* Skip PM setup/initialization. */
133 	if (device_pm_not_required(dev))
134 		return;
135 
136 	pr_debug("Adding info for %s:%s\n",
137 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
138 	device_pm_check_callbacks(dev);
139 	mutex_lock(&dpm_list_mtx);
140 	if (dev->parent && dev->parent->power.is_prepared)
141 		dev_warn(dev, "parent %s should not be sleeping\n",
142 			dev_name(dev->parent));
143 	list_add_tail(&dev->power.entry, &dpm_list);
144 	dev->power.in_dpm_list = true;
145 	mutex_unlock(&dpm_list_mtx);
146 }
147 
148 /**
149  * device_pm_remove - Remove a device from the PM core's list of active devices.
150  * @dev: Device to be removed from the list.
151  */
device_pm_remove(struct device *dev)152 void device_pm_remove(struct device *dev)
153 {
154 	if (device_pm_not_required(dev))
155 		return;
156 
157 	pr_debug("Removing info for %s:%s\n",
158 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
159 	complete_all(&dev->power.completion);
160 	mutex_lock(&dpm_list_mtx);
161 	list_del_init(&dev->power.entry);
162 	dev->power.in_dpm_list = false;
163 	mutex_unlock(&dpm_list_mtx);
164 	device_wakeup_disable(dev);
165 	pm_runtime_remove(dev);
166 	device_pm_check_callbacks(dev);
167 }
168 
169 /**
170  * device_pm_move_before - Move device in the PM core's list of active devices.
171  * @deva: Device to move in dpm_list.
172  * @devb: Device @deva should come before.
173  */
device_pm_move_before(struct device *deva, struct device *devb)174 void device_pm_move_before(struct device *deva, struct device *devb)
175 {
176 	pr_debug("Moving %s:%s before %s:%s\n",
177 		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
178 		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
179 	/* Delete deva from dpm_list and reinsert before devb. */
180 	list_move_tail(&deva->power.entry, &devb->power.entry);
181 }
182 
183 /**
184  * device_pm_move_after - Move device in the PM core's list of active devices.
185  * @deva: Device to move in dpm_list.
186  * @devb: Device @deva should come after.
187  */
device_pm_move_after(struct device *deva, struct device *devb)188 void device_pm_move_after(struct device *deva, struct device *devb)
189 {
190 	pr_debug("Moving %s:%s after %s:%s\n",
191 		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
192 		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
193 	/* Delete deva from dpm_list and reinsert after devb. */
194 	list_move(&deva->power.entry, &devb->power.entry);
195 }
196 
197 /**
198  * device_pm_move_last - Move device to end of the PM core's list of devices.
199  * @dev: Device to move in dpm_list.
200  */
device_pm_move_last(struct device *dev)201 void device_pm_move_last(struct device *dev)
202 {
203 	pr_debug("Moving %s:%s to end of list\n",
204 		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
205 	list_move_tail(&dev->power.entry, &dpm_list);
206 }
207 
initcall_debug_start(struct device *dev, void *cb)208 static ktime_t initcall_debug_start(struct device *dev, void *cb)
209 {
210 	if (!pm_print_times_enabled)
211 		return 0;
212 
213 	dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
214 		 task_pid_nr(current),
215 		 dev->parent ? dev_name(dev->parent) : "none");
216 	return ktime_get();
217 }
218 
initcall_debug_report(struct device *dev, ktime_t calltime, void *cb, int error)219 static void initcall_debug_report(struct device *dev, ktime_t calltime,
220 				  void *cb, int error)
221 {
222 	ktime_t rettime;
223 	s64 nsecs;
224 
225 	if (!pm_print_times_enabled)
226 		return;
227 
228 	rettime = ktime_get();
229 	nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
230 
231 	dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
232 		 (unsigned long long)nsecs >> 10);
233 }
234 
235 /**
236  * dpm_wait - Wait for a PM operation to complete.
237  * @dev: Device to wait for.
238  * @async: If unset, wait only if the device's power.async_suspend flag is set.
239  */
dpm_wait(struct device *dev, bool async)240 static void dpm_wait(struct device *dev, bool async)
241 {
242 	if (!dev)
243 		return;
244 
245 	if (async || (pm_async_enabled && dev->power.async_suspend))
246 		wait_for_completion(&dev->power.completion);
247 }
248 
dpm_wait_fn(struct device *dev, void *async_ptr)249 static int dpm_wait_fn(struct device *dev, void *async_ptr)
250 {
251 	dpm_wait(dev, *((bool *)async_ptr));
252 	return 0;
253 }
254 
dpm_wait_for_children(struct device *dev, bool async)255 static void dpm_wait_for_children(struct device *dev, bool async)
256 {
257        device_for_each_child(dev, &async, dpm_wait_fn);
258 }
259 
dpm_wait_for_suppliers(struct device *dev, bool async)260 static void dpm_wait_for_suppliers(struct device *dev, bool async)
261 {
262 	struct device_link *link;
263 	int idx;
264 
265 	idx = device_links_read_lock();
266 
267 	/*
268 	 * If the supplier goes away right after we've checked the link to it,
269 	 * we'll wait for its completion to change the state, but that's fine,
270 	 * because the only things that will block as a result are the SRCU
271 	 * callbacks freeing the link objects for the links in the list we're
272 	 * walking.
273 	 */
274 	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
275 		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
276 			dpm_wait(link->supplier, async);
277 
278 	device_links_read_unlock(idx);
279 }
280 
dpm_wait_for_superior(struct device *dev, bool async)281 static bool dpm_wait_for_superior(struct device *dev, bool async)
282 {
283 	struct device *parent;
284 
285 	/*
286 	 * If the device is resumed asynchronously and the parent's callback
287 	 * deletes both the device and the parent itself, the parent object may
288 	 * be freed while this function is running, so avoid that by reference
289 	 * counting the parent once more unless the device has been deleted
290 	 * already (in which case return right away).
291 	 */
292 	mutex_lock(&dpm_list_mtx);
293 
294 	if (!device_pm_initialized(dev)) {
295 		mutex_unlock(&dpm_list_mtx);
296 		return false;
297 	}
298 
299 	parent = get_device(dev->parent);
300 
301 	mutex_unlock(&dpm_list_mtx);
302 
303 	dpm_wait(parent, async);
304 	put_device(parent);
305 
306 	dpm_wait_for_suppliers(dev, async);
307 
308 	/*
309 	 * If the parent's callback has deleted the device, attempting to resume
310 	 * it would be invalid, so avoid doing that then.
311 	 */
312 	return device_pm_initialized(dev);
313 }
314 
dpm_wait_for_consumers(struct device *dev, bool async)315 static void dpm_wait_for_consumers(struct device *dev, bool async)
316 {
317 	struct device_link *link;
318 	int idx;
319 
320 	idx = device_links_read_lock();
321 
322 	/*
323 	 * The status of a device link can only be changed from "dormant" by a
324 	 * probe, but that cannot happen during system suspend/resume.  In
325 	 * theory it can change to "dormant" at that time, but then it is
326 	 * reasonable to wait for the target device anyway (eg. if it goes
327 	 * away, it's better to wait for it to go away completely and then
328 	 * continue instead of trying to continue in parallel with its
329 	 * unregistration).
330 	 */
331 	list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
332 		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
333 			dpm_wait(link->consumer, async);
334 
335 	device_links_read_unlock(idx);
336 }
337 
dpm_wait_for_subordinate(struct device *dev, bool async)338 static void dpm_wait_for_subordinate(struct device *dev, bool async)
339 {
340 	dpm_wait_for_children(dev, async);
341 	dpm_wait_for_consumers(dev, async);
342 }
343 
344 /**
345  * pm_op - Return the PM operation appropriate for given PM event.
346  * @ops: PM operations to choose from.
347  * @state: PM transition of the system being carried out.
348  */
pm_op(const struct dev_pm_ops *ops, pm_message_t state)349 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
350 {
351 	switch (state.event) {
352 #ifdef CONFIG_SUSPEND
353 	case PM_EVENT_SUSPEND:
354 		return ops->suspend;
355 	case PM_EVENT_RESUME:
356 		return ops->resume;
357 #endif /* CONFIG_SUSPEND */
358 #ifdef CONFIG_HIBERNATE_CALLBACKS
359 	case PM_EVENT_FREEZE:
360 	case PM_EVENT_QUIESCE:
361 		return ops->freeze;
362 	case PM_EVENT_HIBERNATE:
363 		return ops->poweroff;
364 	case PM_EVENT_THAW:
365 	case PM_EVENT_RECOVER:
366 		return ops->thaw;
367 	case PM_EVENT_RESTORE:
368 		return ops->restore;
369 #endif /* CONFIG_HIBERNATE_CALLBACKS */
370 	}
371 
372 	return NULL;
373 }
374 
375 /**
376  * pm_late_early_op - Return the PM operation appropriate for given PM event.
377  * @ops: PM operations to choose from.
378  * @state: PM transition of the system being carried out.
379  *
380  * Runtime PM is disabled for @dev while this function is being executed.
381  */
pm_late_early_op(const struct dev_pm_ops *ops, pm_message_t state)382 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
383 				      pm_message_t state)
384 {
385 	switch (state.event) {
386 #ifdef CONFIG_SUSPEND
387 	case PM_EVENT_SUSPEND:
388 		return ops->suspend_late;
389 	case PM_EVENT_RESUME:
390 		return ops->resume_early;
391 #endif /* CONFIG_SUSPEND */
392 #ifdef CONFIG_HIBERNATE_CALLBACKS
393 	case PM_EVENT_FREEZE:
394 	case PM_EVENT_QUIESCE:
395 		return ops->freeze_late;
396 	case PM_EVENT_HIBERNATE:
397 		return ops->poweroff_late;
398 	case PM_EVENT_THAW:
399 	case PM_EVENT_RECOVER:
400 		return ops->thaw_early;
401 	case PM_EVENT_RESTORE:
402 		return ops->restore_early;
403 #endif /* CONFIG_HIBERNATE_CALLBACKS */
404 	}
405 
406 	return NULL;
407 }
408 
409 /**
410  * pm_noirq_op - Return the PM operation appropriate for given PM event.
411  * @ops: PM operations to choose from.
412  * @state: PM transition of the system being carried out.
413  *
414  * The driver of @dev will not receive interrupts while this function is being
415  * executed.
416  */
pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)417 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
418 {
419 	switch (state.event) {
420 #ifdef CONFIG_SUSPEND
421 	case PM_EVENT_SUSPEND:
422 		return ops->suspend_noirq;
423 	case PM_EVENT_RESUME:
424 		return ops->resume_noirq;
425 #endif /* CONFIG_SUSPEND */
426 #ifdef CONFIG_HIBERNATE_CALLBACKS
427 	case PM_EVENT_FREEZE:
428 	case PM_EVENT_QUIESCE:
429 		return ops->freeze_noirq;
430 	case PM_EVENT_HIBERNATE:
431 		return ops->poweroff_noirq;
432 	case PM_EVENT_THAW:
433 	case PM_EVENT_RECOVER:
434 		return ops->thaw_noirq;
435 	case PM_EVENT_RESTORE:
436 		return ops->restore_noirq;
437 #endif /* CONFIG_HIBERNATE_CALLBACKS */
438 	}
439 
440 	return NULL;
441 }
442 
pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)443 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
444 {
445 	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
446 		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
447 		", may wakeup" : "");
448 }
449 
pm_dev_err(struct device *dev, pm_message_t state, const char *info, int error)450 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
451 			int error)
452 {
453 	dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info,
454 		error);
455 }
456 
dpm_show_time(ktime_t starttime, pm_message_t state, int error, const char *info)457 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
458 			  const char *info)
459 {
460 	ktime_t calltime;
461 	u64 usecs64;
462 	int usecs;
463 
464 	calltime = ktime_get();
465 	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
466 	do_div(usecs64, NSEC_PER_USEC);
467 	usecs = usecs64;
468 	if (usecs == 0)
469 		usecs = 1;
470 
471 	pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
472 		  info ?: "", info ? " " : "", pm_verb(state.event),
473 		  error ? "aborted" : "complete",
474 		  usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
475 }
476 
dpm_run_callback(pm_callback_t cb, struct device *dev, pm_message_t state, const char *info)477 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
478 			    pm_message_t state, const char *info)
479 {
480 	ktime_t calltime;
481 	int error;
482 
483 	if (!cb)
484 		return 0;
485 
486 	calltime = initcall_debug_start(dev, cb);
487 
488 	pm_dev_dbg(dev, state, info);
489 	trace_device_pm_callback_start(dev, info, state.event);
490 	error = cb(dev);
491 	trace_device_pm_callback_end(dev, error);
492 	suspend_report_result(cb, error);
493 
494 	initcall_debug_report(dev, calltime, cb, error);
495 
496 	return error;
497 }
498 
499 #ifdef CONFIG_DPM_WATCHDOG
500 struct dpm_watchdog {
501 	struct device		*dev;
502 	struct task_struct	*tsk;
503 	struct timer_list	timer;
504 };
505 
506 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
507 	struct dpm_watchdog wd
508 
509 /**
510  * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
511  * @t: The timer that PM watchdog depends on.
512  *
513  * Called when a driver has timed out suspending or resuming.
514  * There's not much we can do here to recover so panic() to
515  * capture a crash-dump in pstore.
516  */
dpm_watchdog_handler(struct timer_list *t)517 static void dpm_watchdog_handler(struct timer_list *t)
518 {
519 	struct dpm_watchdog *wd = from_timer(wd, t, timer);
520 
521 	dev_emerg(wd->dev, "**** DPM device timeout ****\n");
522 	show_stack(wd->tsk, NULL, KERN_EMERG);
523 	panic("%s %s: unrecoverable failure\n",
524 		dev_driver_string(wd->dev), dev_name(wd->dev));
525 }
526 
527 /**
528  * dpm_watchdog_set - Enable pm watchdog for given device.
529  * @wd: Watchdog. Must be allocated on the stack.
530  * @dev: Device to handle.
531  */
dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)532 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
533 {
534 	struct timer_list *timer = &wd->timer;
535 
536 	wd->dev = dev;
537 	wd->tsk = current;
538 
539 	timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
540 	/* use same timeout value for both suspend and resume */
541 	timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
542 	add_timer(timer);
543 }
544 
545 /**
546  * dpm_watchdog_clear - Disable suspend/resume watchdog.
547  * @wd: Watchdog to disable.
548  */
dpm_watchdog_clear(struct dpm_watchdog *wd)549 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
550 {
551 	struct timer_list *timer = &wd->timer;
552 
553 	del_timer_sync(timer);
554 	destroy_timer_on_stack(timer);
555 }
556 #else
557 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
558 #define dpm_watchdog_set(x, y)
559 #define dpm_watchdog_clear(x)
560 #endif
561 
562 /*------------------------- Resume routines -------------------------*/
563 
564 /**
565  * dev_pm_skip_resume - System-wide device resume optimization check.
566  * @dev: Target device.
567  *
568  * Return:
569  * - %false if the transition under way is RESTORE.
570  * - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
571  * - The logical negation of %power.must_resume otherwise (that is, when the
572  *   transition under way is RESUME).
573  */
dev_pm_skip_resume(struct device *dev)574 bool dev_pm_skip_resume(struct device *dev)
575 {
576 	if (pm_transition.event == PM_EVENT_RESTORE)
577 		return false;
578 
579 	if (pm_transition.event == PM_EVENT_THAW)
580 		return dev_pm_skip_suspend(dev);
581 
582 	return !dev->power.must_resume;
583 }
584 
585 /**
586  * __device_resume_noirq - Execute a "noirq resume" callback for given device.
587  * @dev: Device to handle.
588  * @state: PM transition of the system being carried out.
589  * @async: If true, the device is being resumed asynchronously.
590  *
591  * The driver of @dev will not receive interrupts while this function is being
592  * executed.
593  */
__device_resume_noirq(struct device *dev, pm_message_t state, bool async)594 static void __device_resume_noirq(struct device *dev, pm_message_t state, bool async)
595 {
596 	pm_callback_t callback = NULL;
597 	const char *info = NULL;
598 	bool skip_resume;
599 	int error = 0;
600 
601 	TRACE_DEVICE(dev);
602 	TRACE_RESUME(0);
603 
604 	if (dev->power.syscore || dev->power.direct_complete)
605 		goto Out;
606 
607 	if (!dev->power.is_noirq_suspended)
608 		goto Out;
609 
610 	if (!dpm_wait_for_superior(dev, async))
611 		goto Out;
612 
613 	skip_resume = dev_pm_skip_resume(dev);
614 	/*
615 	 * If the driver callback is skipped below or by the middle layer
616 	 * callback and device_resume_early() also skips the driver callback for
617 	 * this device later, it needs to appear as "suspended" to PM-runtime,
618 	 * so change its status accordingly.
619 	 *
620 	 * Otherwise, the device is going to be resumed, so set its PM-runtime
621 	 * status to "active", but do that only if DPM_FLAG_SMART_SUSPEND is set
622 	 * to avoid confusing drivers that don't use it.
623 	 */
624 	if (skip_resume)
625 		pm_runtime_set_suspended(dev);
626 	else if (dev_pm_skip_suspend(dev))
627 		pm_runtime_set_active(dev);
628 
629 	if (dev->pm_domain) {
630 		info = "noirq power domain ";
631 		callback = pm_noirq_op(&dev->pm_domain->ops, state);
632 	} else if (dev->type && dev->type->pm) {
633 		info = "noirq type ";
634 		callback = pm_noirq_op(dev->type->pm, state);
635 	} else if (dev->class && dev->class->pm) {
636 		info = "noirq class ";
637 		callback = pm_noirq_op(dev->class->pm, state);
638 	} else if (dev->bus && dev->bus->pm) {
639 		info = "noirq bus ";
640 		callback = pm_noirq_op(dev->bus->pm, state);
641 	}
642 	if (callback)
643 		goto Run;
644 
645 	if (skip_resume)
646 		goto Skip;
647 
648 	if (dev->driver && dev->driver->pm) {
649 		info = "noirq driver ";
650 		callback = pm_noirq_op(dev->driver->pm, state);
651 	}
652 
653 Run:
654 	error = dpm_run_callback(callback, dev, state, info);
655 
656 Skip:
657 	dev->power.is_noirq_suspended = false;
658 
659 Out:
660 	complete_all(&dev->power.completion);
661 	TRACE_RESUME(error);
662 
663 	if (error) {
664 		suspend_stats.failed_resume_noirq++;
665 		dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
666 		dpm_save_failed_dev(dev_name(dev));
667 		pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
668 	}
669 }
670 
is_async(struct device *dev)671 static bool is_async(struct device *dev)
672 {
673 	return dev->power.async_suspend && pm_async_enabled
674 		&& !pm_trace_is_enabled();
675 }
676 
dpm_async_fn(struct device *dev, async_func_t func)677 static bool dpm_async_fn(struct device *dev, async_func_t func)
678 {
679 	reinit_completion(&dev->power.completion);
680 
681 	if (!is_async(dev))
682 		return false;
683 
684 	get_device(dev);
685 
686 	if (async_schedule_dev_nocall(func, dev))
687 		return true;
688 
689 	put_device(dev);
690 
691 	return false;
692 }
693 
async_resume_noirq(void *data, async_cookie_t cookie)694 static void async_resume_noirq(void *data, async_cookie_t cookie)
695 {
696 	struct device *dev = data;
697 
698 	__device_resume_noirq(dev, pm_transition, true);
699 	put_device(dev);
700 }
701 
device_resume_noirq(struct device *dev)702 static void device_resume_noirq(struct device *dev)
703 {
704 	if (dpm_async_fn(dev, async_resume_noirq))
705 		return;
706 
707 	__device_resume_noirq(dev, pm_transition, false);
708 }
709 
dpm_noirq_resume_devices(pm_message_t state)710 static void dpm_noirq_resume_devices(pm_message_t state)
711 {
712 	struct device *dev;
713 	ktime_t starttime = ktime_get();
714 
715 	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
716 	mutex_lock(&dpm_list_mtx);
717 	pm_transition = state;
718 
719 	while (!list_empty(&dpm_noirq_list)) {
720 		dev = to_device(dpm_noirq_list.next);
721 		get_device(dev);
722 		list_move_tail(&dev->power.entry, &dpm_late_early_list);
723 
724 		mutex_unlock(&dpm_list_mtx);
725 
726 		device_resume_noirq(dev);
727 
728 		put_device(dev);
729 
730 		mutex_lock(&dpm_list_mtx);
731 	}
732 	mutex_unlock(&dpm_list_mtx);
733 	async_synchronize_full();
734 	dpm_show_time(starttime, state, 0, "noirq");
735 	trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
736 }
737 
738 /**
739  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
740  * @state: PM transition of the system being carried out.
741  *
742  * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
743  * allow device drivers' interrupt handlers to be called.
744  */
dpm_resume_noirq(pm_message_t state)745 void dpm_resume_noirq(pm_message_t state)
746 {
747 	dpm_noirq_resume_devices(state);
748 
749 	resume_device_irqs();
750 	device_wakeup_disarm_wake_irqs();
751 
752 	cpuidle_resume();
753 }
754 
755 /**
756  * __device_resume_early - Execute an "early resume" callback for given device.
757  * @dev: Device to handle.
758  * @state: PM transition of the system being carried out.
759  * @async: If true, the device is being resumed asynchronously.
760  *
761  * Runtime PM is disabled for @dev while this function is being executed.
762  */
__device_resume_early(struct device *dev, pm_message_t state, bool async)763 static void __device_resume_early(struct device *dev, pm_message_t state, bool async)
764 {
765 	pm_callback_t callback = NULL;
766 	const char *info = NULL;
767 	int error = 0;
768 
769 	TRACE_DEVICE(dev);
770 	TRACE_RESUME(0);
771 
772 	if (dev->power.syscore || dev->power.direct_complete)
773 		goto Out;
774 
775 	if (!dev->power.is_late_suspended)
776 		goto Out;
777 
778 	if (!dpm_wait_for_superior(dev, async))
779 		goto Out;
780 
781 	if (dev->pm_domain) {
782 		info = "early power domain ";
783 		callback = pm_late_early_op(&dev->pm_domain->ops, state);
784 	} else if (dev->type && dev->type->pm) {
785 		info = "early type ";
786 		callback = pm_late_early_op(dev->type->pm, state);
787 	} else if (dev->class && dev->class->pm) {
788 		info = "early class ";
789 		callback = pm_late_early_op(dev->class->pm, state);
790 	} else if (dev->bus && dev->bus->pm) {
791 		info = "early bus ";
792 		callback = pm_late_early_op(dev->bus->pm, state);
793 	}
794 	if (callback)
795 		goto Run;
796 
797 	if (dev_pm_skip_resume(dev))
798 		goto Skip;
799 
800 	if (dev->driver && dev->driver->pm) {
801 		info = "early driver ";
802 		callback = pm_late_early_op(dev->driver->pm, state);
803 	}
804 
805 Run:
806 	error = dpm_run_callback(callback, dev, state, info);
807 
808 Skip:
809 	dev->power.is_late_suspended = false;
810 
811 Out:
812 	TRACE_RESUME(error);
813 
814 	pm_runtime_enable(dev);
815 	complete_all(&dev->power.completion);
816 
817 	if (error) {
818 		suspend_stats.failed_resume_early++;
819 		dpm_save_failed_step(SUSPEND_RESUME_EARLY);
820 		dpm_save_failed_dev(dev_name(dev));
821 		pm_dev_err(dev, state, async ? " async early" : " early", error);
822 	}
823 }
824 
async_resume_early(void *data, async_cookie_t cookie)825 static void async_resume_early(void *data, async_cookie_t cookie)
826 {
827 	struct device *dev = data;
828 
829 	__device_resume_early(dev, pm_transition, true);
830 	put_device(dev);
831 }
832 
device_resume_early(struct device *dev)833 static void device_resume_early(struct device *dev)
834 {
835 	if (dpm_async_fn(dev, async_resume_early))
836 		return;
837 
838 	__device_resume_early(dev, pm_transition, false);
839 }
840 
841 /**
842  * dpm_resume_early - Execute "early resume" callbacks for all devices.
843  * @state: PM transition of the system being carried out.
844  */
dpm_resume_early(pm_message_t state)845 void dpm_resume_early(pm_message_t state)
846 {
847 	struct device *dev;
848 	ktime_t starttime = ktime_get();
849 
850 	trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
851 	mutex_lock(&dpm_list_mtx);
852 	pm_transition = state;
853 
854 	while (!list_empty(&dpm_late_early_list)) {
855 		dev = to_device(dpm_late_early_list.next);
856 		get_device(dev);
857 		list_move_tail(&dev->power.entry, &dpm_suspended_list);
858 
859 		mutex_unlock(&dpm_list_mtx);
860 
861 		device_resume_early(dev);
862 
863 		put_device(dev);
864 
865 		mutex_lock(&dpm_list_mtx);
866 	}
867 	mutex_unlock(&dpm_list_mtx);
868 	async_synchronize_full();
869 	dpm_show_time(starttime, state, 0, "early");
870 	trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
871 }
872 
873 /**
874  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
875  * @state: PM transition of the system being carried out.
876  */
dpm_resume_start(pm_message_t state)877 void dpm_resume_start(pm_message_t state)
878 {
879 	dpm_resume_noirq(state);
880 	dpm_resume_early(state);
881 }
882 EXPORT_SYMBOL_GPL(dpm_resume_start);
883 
884 /**
885  * __device_resume - Execute "resume" callbacks for given device.
886  * @dev: Device to handle.
887  * @state: PM transition of the system being carried out.
888  * @async: If true, the device is being resumed asynchronously.
889  */
__device_resume(struct device *dev, pm_message_t state, bool async)890 static void __device_resume(struct device *dev, pm_message_t state, bool async)
891 {
892 	pm_callback_t callback = NULL;
893 	const char *info = NULL;
894 	int error = 0;
895 	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
896 
897 	TRACE_DEVICE(dev);
898 	TRACE_RESUME(0);
899 
900 	if (dev->power.syscore)
901 		goto Complete;
902 
903 	if (dev->power.direct_complete) {
904 		/* Match the pm_runtime_disable() in __device_suspend(). */
905 		pm_runtime_enable(dev);
906 		goto Complete;
907 	}
908 
909 	if (!dpm_wait_for_superior(dev, async))
910 		goto Complete;
911 
912 	dpm_watchdog_set(&wd, dev);
913 	device_lock(dev);
914 
915 	/*
916 	 * This is a fib.  But we'll allow new children to be added below
917 	 * a resumed device, even if the device hasn't been completed yet.
918 	 */
919 	dev->power.is_prepared = false;
920 
921 	if (!dev->power.is_suspended)
922 		goto Unlock;
923 
924 	if (dev->pm_domain) {
925 		info = "power domain ";
926 		callback = pm_op(&dev->pm_domain->ops, state);
927 		goto Driver;
928 	}
929 
930 	if (dev->type && dev->type->pm) {
931 		info = "type ";
932 		callback = pm_op(dev->type->pm, state);
933 		goto Driver;
934 	}
935 
936 	if (dev->class && dev->class->pm) {
937 		info = "class ";
938 		callback = pm_op(dev->class->pm, state);
939 		goto Driver;
940 	}
941 
942 	if (dev->bus) {
943 		if (dev->bus->pm) {
944 			info = "bus ";
945 			callback = pm_op(dev->bus->pm, state);
946 		} else if (dev->bus->resume) {
947 			info = "legacy bus ";
948 			callback = dev->bus->resume;
949 			goto End;
950 		}
951 	}
952 
953  Driver:
954 	if (!callback && dev->driver && dev->driver->pm) {
955 		info = "driver ";
956 		callback = pm_op(dev->driver->pm, state);
957 	}
958 
959  End:
960 	error = dpm_run_callback(callback, dev, state, info);
961 	dev->power.is_suspended = false;
962 
963  Unlock:
964 	device_unlock(dev);
965 	dpm_watchdog_clear(&wd);
966 
967  Complete:
968 	complete_all(&dev->power.completion);
969 
970 	TRACE_RESUME(error);
971 
972 	if (error) {
973 		suspend_stats.failed_resume++;
974 		dpm_save_failed_step(SUSPEND_RESUME);
975 		dpm_save_failed_dev(dev_name(dev));
976 		pm_dev_err(dev, state, async ? " async" : "", error);
977 	}
978 }
979 
async_resume(void *data, async_cookie_t cookie)980 static void async_resume(void *data, async_cookie_t cookie)
981 {
982 	struct device *dev = data;
983 
984 	__device_resume(dev, pm_transition, true);
985 	put_device(dev);
986 }
987 
device_resume(struct device *dev)988 static void device_resume(struct device *dev)
989 {
990 	if (dpm_async_fn(dev, async_resume))
991 		return;
992 
993 	__device_resume(dev, pm_transition, false);
994 }
995 
996 /**
997  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
998  * @state: PM transition of the system being carried out.
999  *
1000  * Execute the appropriate "resume" callback for all devices whose status
1001  * indicates that they are suspended.
1002  */
dpm_resume(pm_message_t state)1003 void dpm_resume(pm_message_t state)
1004 {
1005 	struct device *dev;
1006 	ktime_t starttime = ktime_get();
1007 
1008 	trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1009 	might_sleep();
1010 
1011 	mutex_lock(&dpm_list_mtx);
1012 	pm_transition = state;
1013 	async_error = 0;
1014 
1015 	while (!list_empty(&dpm_suspended_list)) {
1016 		dev = to_device(dpm_suspended_list.next);
1017 
1018 		get_device(dev);
1019 
1020 		mutex_unlock(&dpm_list_mtx);
1021 
1022 		device_resume(dev);
1023 
1024 		mutex_lock(&dpm_list_mtx);
1025 
1026 		if (!list_empty(&dev->power.entry))
1027 			list_move_tail(&dev->power.entry, &dpm_prepared_list);
1028 
1029 		mutex_unlock(&dpm_list_mtx);
1030 
1031 		put_device(dev);
1032 
1033 		mutex_lock(&dpm_list_mtx);
1034 	}
1035 	mutex_unlock(&dpm_list_mtx);
1036 	async_synchronize_full();
1037 	dpm_show_time(starttime, state, 0, NULL);
1038 
1039 	cpufreq_resume();
1040 	devfreq_resume();
1041 	trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1042 }
1043 
1044 /**
1045  * device_complete - Complete a PM transition for given device.
1046  * @dev: Device to handle.
1047  * @state: PM transition of the system being carried out.
1048  */
device_complete(struct device *dev, pm_message_t state)1049 static void device_complete(struct device *dev, pm_message_t state)
1050 {
1051 	void (*callback)(struct device *) = NULL;
1052 	const char *info = NULL;
1053 
1054 	if (dev->power.syscore)
1055 		goto out;
1056 
1057 	device_lock(dev);
1058 
1059 	if (dev->pm_domain) {
1060 		info = "completing power domain ";
1061 		callback = dev->pm_domain->ops.complete;
1062 	} else if (dev->type && dev->type->pm) {
1063 		info = "completing type ";
1064 		callback = dev->type->pm->complete;
1065 	} else if (dev->class && dev->class->pm) {
1066 		info = "completing class ";
1067 		callback = dev->class->pm->complete;
1068 	} else if (dev->bus && dev->bus->pm) {
1069 		info = "completing bus ";
1070 		callback = dev->bus->pm->complete;
1071 	}
1072 
1073 	if (!callback && dev->driver && dev->driver->pm) {
1074 		info = "completing driver ";
1075 		callback = dev->driver->pm->complete;
1076 	}
1077 
1078 	if (callback) {
1079 		pm_dev_dbg(dev, state, info);
1080 		callback(dev);
1081 	}
1082 
1083 	device_unlock(dev);
1084 
1085 out:
1086 	pm_runtime_put(dev);
1087 }
1088 
1089 /**
1090  * dpm_complete - Complete a PM transition for all non-sysdev devices.
1091  * @state: PM transition of the system being carried out.
1092  *
1093  * Execute the ->complete() callbacks for all devices whose PM status is not
1094  * DPM_ON (this allows new devices to be registered).
1095  */
dpm_complete(pm_message_t state)1096 void dpm_complete(pm_message_t state)
1097 {
1098 	struct list_head list;
1099 
1100 	trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1101 	might_sleep();
1102 
1103 	INIT_LIST_HEAD(&list);
1104 	mutex_lock(&dpm_list_mtx);
1105 	while (!list_empty(&dpm_prepared_list)) {
1106 		struct device *dev = to_device(dpm_prepared_list.prev);
1107 
1108 		get_device(dev);
1109 		dev->power.is_prepared = false;
1110 		list_move(&dev->power.entry, &list);
1111 
1112 		mutex_unlock(&dpm_list_mtx);
1113 
1114 		trace_device_pm_callback_start(dev, "", state.event);
1115 		device_complete(dev, state);
1116 		trace_device_pm_callback_end(dev, 0);
1117 
1118 		put_device(dev);
1119 
1120 		mutex_lock(&dpm_list_mtx);
1121 	}
1122 	list_splice(&list, &dpm_list);
1123 	mutex_unlock(&dpm_list_mtx);
1124 
1125 	/* Allow device probing and trigger re-probing of deferred devices */
1126 	device_unblock_probing();
1127 	trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1128 }
1129 
1130 /**
1131  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1132  * @state: PM transition of the system being carried out.
1133  *
1134  * Execute "resume" callbacks for all devices and complete the PM transition of
1135  * the system.
1136  */
dpm_resume_end(pm_message_t state)1137 void dpm_resume_end(pm_message_t state)
1138 {
1139 	dpm_resume(state);
1140 	dpm_complete(state);
1141 }
1142 EXPORT_SYMBOL_GPL(dpm_resume_end);
1143 
1144 
1145 /*------------------------- Suspend routines -------------------------*/
1146 
1147 /**
1148  * resume_event - Return a "resume" message for given "suspend" sleep state.
1149  * @sleep_state: PM message representing a sleep state.
1150  *
1151  * Return a PM message representing the resume event corresponding to given
1152  * sleep state.
1153  */
resume_event(pm_message_t sleep_state)1154 static pm_message_t resume_event(pm_message_t sleep_state)
1155 {
1156 	switch (sleep_state.event) {
1157 	case PM_EVENT_SUSPEND:
1158 		return PMSG_RESUME;
1159 	case PM_EVENT_FREEZE:
1160 	case PM_EVENT_QUIESCE:
1161 		return PMSG_RECOVER;
1162 	case PM_EVENT_HIBERNATE:
1163 		return PMSG_RESTORE;
1164 	}
1165 	return PMSG_ON;
1166 }
1167 
dpm_superior_set_must_resume(struct device *dev)1168 static void dpm_superior_set_must_resume(struct device *dev)
1169 {
1170 	struct device_link *link;
1171 	int idx;
1172 
1173 	if (dev->parent)
1174 		dev->parent->power.must_resume = true;
1175 
1176 	idx = device_links_read_lock();
1177 
1178 	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
1179 		link->supplier->power.must_resume = true;
1180 
1181 	device_links_read_unlock(idx);
1182 }
1183 
1184 /**
1185  * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1186  * @dev: Device to handle.
1187  * @state: PM transition of the system being carried out.
1188  * @async: If true, the device is being suspended asynchronously.
1189  *
1190  * The driver of @dev will not receive interrupts while this function is being
1191  * executed.
1192  */
__device_suspend_noirq(struct device *dev, pm_message_t state, bool async)1193 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1194 {
1195 	pm_callback_t callback = NULL;
1196 	const char *info = NULL;
1197 	int error = 0;
1198 
1199 	TRACE_DEVICE(dev);
1200 	TRACE_SUSPEND(0);
1201 
1202 	dpm_wait_for_subordinate(dev, async);
1203 
1204 	if (async_error)
1205 		goto Complete;
1206 
1207 	if (dev->power.syscore || dev->power.direct_complete)
1208 		goto Complete;
1209 
1210 	if (dev->pm_domain) {
1211 		info = "noirq power domain ";
1212 		callback = pm_noirq_op(&dev->pm_domain->ops, state);
1213 	} else if (dev->type && dev->type->pm) {
1214 		info = "noirq type ";
1215 		callback = pm_noirq_op(dev->type->pm, state);
1216 	} else if (dev->class && dev->class->pm) {
1217 		info = "noirq class ";
1218 		callback = pm_noirq_op(dev->class->pm, state);
1219 	} else if (dev->bus && dev->bus->pm) {
1220 		info = "noirq bus ";
1221 		callback = pm_noirq_op(dev->bus->pm, state);
1222 	}
1223 	if (callback)
1224 		goto Run;
1225 
1226 	if (dev_pm_skip_suspend(dev))
1227 		goto Skip;
1228 
1229 	if (dev->driver && dev->driver->pm) {
1230 		info = "noirq driver ";
1231 		callback = pm_noirq_op(dev->driver->pm, state);
1232 	}
1233 
1234 Run:
1235 	error = dpm_run_callback(callback, dev, state, info);
1236 	if (error) {
1237 		async_error = error;
1238 		goto Complete;
1239 	}
1240 
1241 Skip:
1242 	dev->power.is_noirq_suspended = true;
1243 
1244 	/*
1245 	 * Skipping the resume of devices that were in use right before the
1246 	 * system suspend (as indicated by their PM-runtime usage counters)
1247 	 * would be suboptimal.  Also resume them if doing that is not allowed
1248 	 * to be skipped.
1249 	 */
1250 	if (atomic_read(&dev->power.usage_count) > 1 ||
1251 	    !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
1252 	      dev->power.may_skip_resume))
1253 		dev->power.must_resume = true;
1254 
1255 	if (dev->power.must_resume)
1256 		dpm_superior_set_must_resume(dev);
1257 
1258 Complete:
1259 	complete_all(&dev->power.completion);
1260 	TRACE_SUSPEND(error);
1261 	return error;
1262 }
1263 
async_suspend_noirq(void *data, async_cookie_t cookie)1264 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1265 {
1266 	struct device *dev = data;
1267 	int error;
1268 
1269 	error = __device_suspend_noirq(dev, pm_transition, true);
1270 	if (error) {
1271 		dpm_save_failed_dev(dev_name(dev));
1272 		pm_dev_err(dev, pm_transition, " async", error);
1273 	}
1274 
1275 	put_device(dev);
1276 }
1277 
device_suspend_noirq(struct device *dev)1278 static int device_suspend_noirq(struct device *dev)
1279 {
1280 	if (dpm_async_fn(dev, async_suspend_noirq))
1281 		return 0;
1282 
1283 	return __device_suspend_noirq(dev, pm_transition, false);
1284 }
1285 
dpm_noirq_suspend_devices(pm_message_t state)1286 static int dpm_noirq_suspend_devices(pm_message_t state)
1287 {
1288 	ktime_t starttime = ktime_get();
1289 	int error = 0;
1290 
1291 	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1292 	mutex_lock(&dpm_list_mtx);
1293 	pm_transition = state;
1294 	async_error = 0;
1295 
1296 	while (!list_empty(&dpm_late_early_list)) {
1297 		struct device *dev = to_device(dpm_late_early_list.prev);
1298 
1299 		get_device(dev);
1300 		mutex_unlock(&dpm_list_mtx);
1301 
1302 		error = device_suspend_noirq(dev);
1303 
1304 		mutex_lock(&dpm_list_mtx);
1305 
1306 		if (error) {
1307 			pm_dev_err(dev, state, " noirq", error);
1308 			dpm_save_failed_dev(dev_name(dev));
1309 		} else if (!list_empty(&dev->power.entry)) {
1310 			list_move(&dev->power.entry, &dpm_noirq_list);
1311 		}
1312 
1313 		mutex_unlock(&dpm_list_mtx);
1314 
1315 		put_device(dev);
1316 
1317 		mutex_lock(&dpm_list_mtx);
1318 
1319 		if (error || async_error)
1320 			break;
1321 	}
1322 	mutex_unlock(&dpm_list_mtx);
1323 	async_synchronize_full();
1324 	if (!error)
1325 		error = async_error;
1326 
1327 	if (error) {
1328 		suspend_stats.failed_suspend_noirq++;
1329 		dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1330 	}
1331 	dpm_show_time(starttime, state, error, "noirq");
1332 	trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1333 	return error;
1334 }
1335 
1336 /**
1337  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1338  * @state: PM transition of the system being carried out.
1339  *
1340  * Prevent device drivers' interrupt handlers from being called and invoke
1341  * "noirq" suspend callbacks for all non-sysdev devices.
1342  */
dpm_suspend_noirq(pm_message_t state)1343 int dpm_suspend_noirq(pm_message_t state)
1344 {
1345 	int ret;
1346 
1347 	cpuidle_pause();
1348 
1349 	device_wakeup_arm_wake_irqs();
1350 	suspend_device_irqs();
1351 
1352 	ret = dpm_noirq_suspend_devices(state);
1353 	if (ret)
1354 		dpm_resume_noirq(resume_event(state));
1355 
1356 	return ret;
1357 }
1358 
dpm_propagate_wakeup_to_parent(struct device *dev)1359 static void dpm_propagate_wakeup_to_parent(struct device *dev)
1360 {
1361 	struct device *parent = dev->parent;
1362 
1363 	if (!parent)
1364 		return;
1365 
1366 	spin_lock_irq(&parent->power.lock);
1367 
1368 	if (dev->power.wakeup_path && !parent->power.ignore_children)
1369 		parent->power.wakeup_path = true;
1370 
1371 	spin_unlock_irq(&parent->power.lock);
1372 }
1373 
1374 /**
1375  * __device_suspend_late - Execute a "late suspend" callback for given device.
1376  * @dev: Device to handle.
1377  * @state: PM transition of the system being carried out.
1378  * @async: If true, the device is being suspended asynchronously.
1379  *
1380  * Runtime PM is disabled for @dev while this function is being executed.
1381  */
__device_suspend_late(struct device *dev, pm_message_t state, bool async)1382 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1383 {
1384 	pm_callback_t callback = NULL;
1385 	const char *info = NULL;
1386 	int error = 0;
1387 
1388 	TRACE_DEVICE(dev);
1389 	TRACE_SUSPEND(0);
1390 
1391 	__pm_runtime_disable(dev, false);
1392 
1393 	dpm_wait_for_subordinate(dev, async);
1394 
1395 	if (async_error)
1396 		goto Complete;
1397 
1398 	if (pm_wakeup_pending()) {
1399 		async_error = -EBUSY;
1400 		goto Complete;
1401 	}
1402 
1403 	if (dev->power.syscore || dev->power.direct_complete)
1404 		goto Complete;
1405 
1406 	if (dev->pm_domain) {
1407 		info = "late power domain ";
1408 		callback = pm_late_early_op(&dev->pm_domain->ops, state);
1409 	} else if (dev->type && dev->type->pm) {
1410 		info = "late type ";
1411 		callback = pm_late_early_op(dev->type->pm, state);
1412 	} else if (dev->class && dev->class->pm) {
1413 		info = "late class ";
1414 		callback = pm_late_early_op(dev->class->pm, state);
1415 	} else if (dev->bus && dev->bus->pm) {
1416 		info = "late bus ";
1417 		callback = pm_late_early_op(dev->bus->pm, state);
1418 	}
1419 	if (callback)
1420 		goto Run;
1421 
1422 	if (dev_pm_skip_suspend(dev))
1423 		goto Skip;
1424 
1425 	if (dev->driver && dev->driver->pm) {
1426 		info = "late driver ";
1427 		callback = pm_late_early_op(dev->driver->pm, state);
1428 	}
1429 
1430 Run:
1431 	error = dpm_run_callback(callback, dev, state, info);
1432 	if (error) {
1433 		async_error = error;
1434 		goto Complete;
1435 	}
1436 	dpm_propagate_wakeup_to_parent(dev);
1437 
1438 Skip:
1439 	dev->power.is_late_suspended = true;
1440 
1441 Complete:
1442 	TRACE_SUSPEND(error);
1443 	complete_all(&dev->power.completion);
1444 	return error;
1445 }
1446 
async_suspend_late(void *data, async_cookie_t cookie)1447 static void async_suspend_late(void *data, async_cookie_t cookie)
1448 {
1449 	struct device *dev = data;
1450 	int error;
1451 
1452 	error = __device_suspend_late(dev, pm_transition, true);
1453 	if (error) {
1454 		dpm_save_failed_dev(dev_name(dev));
1455 		pm_dev_err(dev, pm_transition, " async", error);
1456 	}
1457 	put_device(dev);
1458 }
1459 
device_suspend_late(struct device *dev)1460 static int device_suspend_late(struct device *dev)
1461 {
1462 	if (dpm_async_fn(dev, async_suspend_late))
1463 		return 0;
1464 
1465 	return __device_suspend_late(dev, pm_transition, false);
1466 }
1467 
1468 /**
1469  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1470  * @state: PM transition of the system being carried out.
1471  */
dpm_suspend_late(pm_message_t state)1472 int dpm_suspend_late(pm_message_t state)
1473 {
1474 	ktime_t starttime = ktime_get();
1475 	int error = 0;
1476 
1477 	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1478 	mutex_lock(&dpm_list_mtx);
1479 	pm_transition = state;
1480 	async_error = 0;
1481 
1482 	while (!list_empty(&dpm_suspended_list)) {
1483 		struct device *dev = to_device(dpm_suspended_list.prev);
1484 
1485 		get_device(dev);
1486 
1487 		mutex_unlock(&dpm_list_mtx);
1488 
1489 		error = device_suspend_late(dev);
1490 
1491 		mutex_lock(&dpm_list_mtx);
1492 
1493 		if (!list_empty(&dev->power.entry))
1494 			list_move(&dev->power.entry, &dpm_late_early_list);
1495 
1496 		if (error) {
1497 			pm_dev_err(dev, state, " late", error);
1498 			dpm_save_failed_dev(dev_name(dev));
1499 		}
1500 
1501 		mutex_unlock(&dpm_list_mtx);
1502 
1503 		put_device(dev);
1504 
1505 		mutex_lock(&dpm_list_mtx);
1506 
1507 		if (error || async_error)
1508 			break;
1509 	}
1510 	mutex_unlock(&dpm_list_mtx);
1511 	async_synchronize_full();
1512 	if (!error)
1513 		error = async_error;
1514 	if (error) {
1515 		suspend_stats.failed_suspend_late++;
1516 		dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1517 		dpm_resume_early(resume_event(state));
1518 	}
1519 	dpm_show_time(starttime, state, error, "late");
1520 	trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1521 	return error;
1522 }
1523 
1524 /**
1525  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1526  * @state: PM transition of the system being carried out.
1527  */
dpm_suspend_end(pm_message_t state)1528 int dpm_suspend_end(pm_message_t state)
1529 {
1530 	ktime_t starttime = ktime_get();
1531 	int error;
1532 
1533 	error = dpm_suspend_late(state);
1534 	if (error)
1535 		goto out;
1536 
1537 	error = dpm_suspend_noirq(state);
1538 	if (error)
1539 		dpm_resume_early(resume_event(state));
1540 
1541 out:
1542 	dpm_show_time(starttime, state, error, "end");
1543 	return error;
1544 }
1545 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1546 
1547 /**
1548  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1549  * @dev: Device to suspend.
1550  * @state: PM transition of the system being carried out.
1551  * @cb: Suspend callback to execute.
1552  * @info: string description of caller.
1553  */
legacy_suspend(struct device *dev, pm_message_t state, int (*cb)(struct device *dev, pm_message_t state), const char *info)1554 static int legacy_suspend(struct device *dev, pm_message_t state,
1555 			  int (*cb)(struct device *dev, pm_message_t state),
1556 			  const char *info)
1557 {
1558 	int error;
1559 	ktime_t calltime;
1560 
1561 	calltime = initcall_debug_start(dev, cb);
1562 
1563 	trace_device_pm_callback_start(dev, info, state.event);
1564 	error = cb(dev, state);
1565 	trace_device_pm_callback_end(dev, error);
1566 	suspend_report_result(cb, error);
1567 
1568 	initcall_debug_report(dev, calltime, cb, error);
1569 
1570 	return error;
1571 }
1572 
dpm_clear_superiors_direct_complete(struct device *dev)1573 static void dpm_clear_superiors_direct_complete(struct device *dev)
1574 {
1575 	struct device_link *link;
1576 	int idx;
1577 
1578 	if (dev->parent) {
1579 		spin_lock_irq(&dev->parent->power.lock);
1580 		dev->parent->power.direct_complete = false;
1581 		spin_unlock_irq(&dev->parent->power.lock);
1582 	}
1583 
1584 	idx = device_links_read_lock();
1585 
1586 	list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
1587 		spin_lock_irq(&link->supplier->power.lock);
1588 		link->supplier->power.direct_complete = false;
1589 		spin_unlock_irq(&link->supplier->power.lock);
1590 	}
1591 
1592 	device_links_read_unlock(idx);
1593 }
1594 
1595 /**
1596  * __device_suspend - Execute "suspend" callbacks for given device.
1597  * @dev: Device to handle.
1598  * @state: PM transition of the system being carried out.
1599  * @async: If true, the device is being suspended asynchronously.
1600  */
__device_suspend(struct device *dev, pm_message_t state, bool async)1601 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1602 {
1603 	pm_callback_t callback = NULL;
1604 	const char *info = NULL;
1605 	int error = 0;
1606 	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1607 
1608 	TRACE_DEVICE(dev);
1609 	TRACE_SUSPEND(0);
1610 
1611 	dpm_wait_for_subordinate(dev, async);
1612 
1613 	if (async_error) {
1614 		dev->power.direct_complete = false;
1615 		goto Complete;
1616 	}
1617 
1618 	/*
1619 	 * Wait for possible runtime PM transitions of the device in progress
1620 	 * to complete and if there's a runtime resume request pending for it,
1621 	 * resume it before proceeding with invoking the system-wide suspend
1622 	 * callbacks for it.
1623 	 *
1624 	 * If the system-wide suspend callbacks below change the configuration
1625 	 * of the device, they must disable runtime PM for it or otherwise
1626 	 * ensure that its runtime-resume callbacks will not be confused by that
1627 	 * change in case they are invoked going forward.
1628 	 */
1629 	pm_runtime_barrier(dev);
1630 
1631 	if (pm_wakeup_pending()) {
1632 		dev->power.direct_complete = false;
1633 		async_error = -EBUSY;
1634 		goto Complete;
1635 	}
1636 
1637 	if (dev->power.syscore)
1638 		goto Complete;
1639 
1640 	/* Avoid direct_complete to let wakeup_path propagate. */
1641 	if (device_may_wakeup(dev) || dev->power.wakeup_path)
1642 		dev->power.direct_complete = false;
1643 
1644 	if (dev->power.direct_complete) {
1645 		if (pm_runtime_status_suspended(dev)) {
1646 			pm_runtime_disable(dev);
1647 			if (pm_runtime_status_suspended(dev)) {
1648 				pm_dev_dbg(dev, state, "direct-complete ");
1649 				goto Complete;
1650 			}
1651 
1652 			pm_runtime_enable(dev);
1653 		}
1654 		dev->power.direct_complete = false;
1655 	}
1656 
1657 	dev->power.may_skip_resume = true;
1658 	dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME);
1659 
1660 	dpm_watchdog_set(&wd, dev);
1661 	device_lock(dev);
1662 
1663 	if (dev->pm_domain) {
1664 		info = "power domain ";
1665 		callback = pm_op(&dev->pm_domain->ops, state);
1666 		goto Run;
1667 	}
1668 
1669 	if (dev->type && dev->type->pm) {
1670 		info = "type ";
1671 		callback = pm_op(dev->type->pm, state);
1672 		goto Run;
1673 	}
1674 
1675 	if (dev->class && dev->class->pm) {
1676 		info = "class ";
1677 		callback = pm_op(dev->class->pm, state);
1678 		goto Run;
1679 	}
1680 
1681 	if (dev->bus) {
1682 		if (dev->bus->pm) {
1683 			info = "bus ";
1684 			callback = pm_op(dev->bus->pm, state);
1685 		} else if (dev->bus->suspend) {
1686 			pm_dev_dbg(dev, state, "legacy bus ");
1687 			error = legacy_suspend(dev, state, dev->bus->suspend,
1688 						"legacy bus ");
1689 			goto End;
1690 		}
1691 	}
1692 
1693  Run:
1694 	if (!callback && dev->driver && dev->driver->pm) {
1695 		info = "driver ";
1696 		callback = pm_op(dev->driver->pm, state);
1697 	}
1698 
1699 	error = dpm_run_callback(callback, dev, state, info);
1700 
1701  End:
1702 	if (!error) {
1703 		dev->power.is_suspended = true;
1704 		if (device_may_wakeup(dev))
1705 			dev->power.wakeup_path = true;
1706 
1707 		dpm_propagate_wakeup_to_parent(dev);
1708 		dpm_clear_superiors_direct_complete(dev);
1709 	}
1710 
1711 	device_unlock(dev);
1712 	dpm_watchdog_clear(&wd);
1713 
1714  Complete:
1715 	if (error)
1716 		async_error = error;
1717 
1718 	complete_all(&dev->power.completion);
1719 	TRACE_SUSPEND(error);
1720 	return error;
1721 }
1722 
async_suspend(void *data, async_cookie_t cookie)1723 static void async_suspend(void *data, async_cookie_t cookie)
1724 {
1725 	struct device *dev = data;
1726 	int error;
1727 
1728 	error = __device_suspend(dev, pm_transition, true);
1729 	if (error) {
1730 		dpm_save_failed_dev(dev_name(dev));
1731 		pm_dev_err(dev, pm_transition, " async", error);
1732 	}
1733 
1734 	put_device(dev);
1735 }
1736 
device_suspend(struct device *dev)1737 static int device_suspend(struct device *dev)
1738 {
1739 	if (dpm_async_fn(dev, async_suspend))
1740 		return 0;
1741 
1742 	return __device_suspend(dev, pm_transition, false);
1743 }
1744 
1745 /**
1746  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1747  * @state: PM transition of the system being carried out.
1748  */
dpm_suspend(pm_message_t state)1749 int dpm_suspend(pm_message_t state)
1750 {
1751 	ktime_t starttime = ktime_get();
1752 	int error = 0;
1753 
1754 	trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1755 	might_sleep();
1756 
1757 	devfreq_suspend();
1758 	cpufreq_suspend();
1759 
1760 	mutex_lock(&dpm_list_mtx);
1761 	pm_transition = state;
1762 	async_error = 0;
1763 	while (!list_empty(&dpm_prepared_list)) {
1764 		struct device *dev = to_device(dpm_prepared_list.prev);
1765 
1766 		get_device(dev);
1767 
1768 		mutex_unlock(&dpm_list_mtx);
1769 
1770 		error = device_suspend(dev);
1771 
1772 		mutex_lock(&dpm_list_mtx);
1773 
1774 		if (error) {
1775 			pm_dev_err(dev, state, "", error);
1776 			dpm_save_failed_dev(dev_name(dev));
1777 		} else if (!list_empty(&dev->power.entry)) {
1778 			list_move(&dev->power.entry, &dpm_suspended_list);
1779 		}
1780 
1781 		mutex_unlock(&dpm_list_mtx);
1782 
1783 		put_device(dev);
1784 
1785 		mutex_lock(&dpm_list_mtx);
1786 
1787 		if (error || async_error)
1788 			break;
1789 	}
1790 	mutex_unlock(&dpm_list_mtx);
1791 	async_synchronize_full();
1792 	if (!error)
1793 		error = async_error;
1794 	if (error) {
1795 		suspend_stats.failed_suspend++;
1796 		dpm_save_failed_step(SUSPEND_SUSPEND);
1797 	}
1798 	dpm_show_time(starttime, state, error, NULL);
1799 	trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1800 	return error;
1801 }
1802 
1803 /**
1804  * device_prepare - Prepare a device for system power transition.
1805  * @dev: Device to handle.
1806  * @state: PM transition of the system being carried out.
1807  *
1808  * Execute the ->prepare() callback(s) for given device.  No new children of the
1809  * device may be registered after this function has returned.
1810  */
device_prepare(struct device *dev, pm_message_t state)1811 static int device_prepare(struct device *dev, pm_message_t state)
1812 {
1813 	int (*callback)(struct device *) = NULL;
1814 	int ret = 0;
1815 
1816 	/*
1817 	 * If a device's parent goes into runtime suspend at the wrong time,
1818 	 * it won't be possible to resume the device.  To prevent this we
1819 	 * block runtime suspend here, during the prepare phase, and allow
1820 	 * it again during the complete phase.
1821 	 */
1822 	pm_runtime_get_noresume(dev);
1823 
1824 	if (dev->power.syscore)
1825 		return 0;
1826 
1827 	device_lock(dev);
1828 
1829 	dev->power.wakeup_path = false;
1830 
1831 	if (dev->power.no_pm_callbacks)
1832 		goto unlock;
1833 
1834 	if (dev->pm_domain)
1835 		callback = dev->pm_domain->ops.prepare;
1836 	else if (dev->type && dev->type->pm)
1837 		callback = dev->type->pm->prepare;
1838 	else if (dev->class && dev->class->pm)
1839 		callback = dev->class->pm->prepare;
1840 	else if (dev->bus && dev->bus->pm)
1841 		callback = dev->bus->pm->prepare;
1842 
1843 	if (!callback && dev->driver && dev->driver->pm)
1844 		callback = dev->driver->pm->prepare;
1845 
1846 	if (callback)
1847 		ret = callback(dev);
1848 
1849 unlock:
1850 	device_unlock(dev);
1851 
1852 	if (ret < 0) {
1853 		suspend_report_result(callback, ret);
1854 		pm_runtime_put(dev);
1855 		return ret;
1856 	}
1857 	/*
1858 	 * A positive return value from ->prepare() means "this device appears
1859 	 * to be runtime-suspended and its state is fine, so if it really is
1860 	 * runtime-suspended, you can leave it in that state provided that you
1861 	 * will do the same thing with all of its descendants".  This only
1862 	 * applies to suspend transitions, however.
1863 	 */
1864 	spin_lock_irq(&dev->power.lock);
1865 	dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1866 		(ret > 0 || dev->power.no_pm_callbacks) &&
1867 		!dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
1868 	spin_unlock_irq(&dev->power.lock);
1869 	return 0;
1870 }
1871 
1872 /**
1873  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1874  * @state: PM transition of the system being carried out.
1875  *
1876  * Execute the ->prepare() callback(s) for all devices.
1877  */
dpm_prepare(pm_message_t state)1878 int dpm_prepare(pm_message_t state)
1879 {
1880 	int error = 0;
1881 
1882 	trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1883 	might_sleep();
1884 
1885 	/*
1886 	 * Give a chance for the known devices to complete their probes, before
1887 	 * disable probing of devices. This sync point is important at least
1888 	 * at boot time + hibernation restore.
1889 	 */
1890 	wait_for_device_probe();
1891 	/*
1892 	 * It is unsafe if probing of devices will happen during suspend or
1893 	 * hibernation and system behavior will be unpredictable in this case.
1894 	 * So, let's prohibit device's probing here and defer their probes
1895 	 * instead. The normal behavior will be restored in dpm_complete().
1896 	 */
1897 	device_block_probing();
1898 
1899 	mutex_lock(&dpm_list_mtx);
1900 	while (!list_empty(&dpm_list) && !error) {
1901 		struct device *dev = to_device(dpm_list.next);
1902 
1903 		get_device(dev);
1904 
1905 		mutex_unlock(&dpm_list_mtx);
1906 
1907 		trace_device_pm_callback_start(dev, "", state.event);
1908 		error = device_prepare(dev, state);
1909 		trace_device_pm_callback_end(dev, error);
1910 
1911 		mutex_lock(&dpm_list_mtx);
1912 
1913 		if (!error) {
1914 			dev->power.is_prepared = true;
1915 			if (!list_empty(&dev->power.entry))
1916 				list_move_tail(&dev->power.entry, &dpm_prepared_list);
1917 		} else if (error == -EAGAIN) {
1918 			error = 0;
1919 		} else {
1920 			dev_info(dev, "not prepared for power transition: code %d\n",
1921 				 error);
1922 		}
1923 
1924 		mutex_unlock(&dpm_list_mtx);
1925 
1926 		put_device(dev);
1927 
1928 		mutex_lock(&dpm_list_mtx);
1929 	}
1930 	mutex_unlock(&dpm_list_mtx);
1931 	trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1932 	return error;
1933 }
1934 
1935 /**
1936  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1937  * @state: PM transition of the system being carried out.
1938  *
1939  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1940  * callbacks for them.
1941  */
dpm_suspend_start(pm_message_t state)1942 int dpm_suspend_start(pm_message_t state)
1943 {
1944 	ktime_t starttime = ktime_get();
1945 	int error;
1946 
1947 	error = dpm_prepare(state);
1948 	if (error) {
1949 		suspend_stats.failed_prepare++;
1950 		dpm_save_failed_step(SUSPEND_PREPARE);
1951 	} else
1952 		error = dpm_suspend(state);
1953 	dpm_show_time(starttime, state, error, "start");
1954 	return error;
1955 }
1956 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1957 
__suspend_report_result(const char *function, void *fn, int ret)1958 void __suspend_report_result(const char *function, void *fn, int ret)
1959 {
1960 	if (ret)
1961 		pr_err("%s(): %pS returns %d\n", function, fn, ret);
1962 }
1963 EXPORT_SYMBOL_GPL(__suspend_report_result);
1964 
1965 /**
1966  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1967  * @subordinate: Device that needs to wait for @dev.
1968  * @dev: Device to wait for.
1969  */
device_pm_wait_for_dev(struct device *subordinate, struct device *dev)1970 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1971 {
1972 	dpm_wait(dev, subordinate->power.async_suspend);
1973 	return async_error;
1974 }
1975 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1976 
1977 /**
1978  * dpm_for_each_dev - device iterator.
1979  * @data: data for the callback.
1980  * @fn: function to be called for each device.
1981  *
1982  * Iterate over devices in dpm_list, and call @fn for each device,
1983  * passing it @data.
1984  */
dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))1985 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1986 {
1987 	struct device *dev;
1988 
1989 	if (!fn)
1990 		return;
1991 
1992 	device_pm_lock();
1993 	list_for_each_entry(dev, &dpm_list, power.entry)
1994 		fn(dev, data);
1995 	device_pm_unlock();
1996 }
1997 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
1998 
pm_ops_is_empty(const struct dev_pm_ops *ops)1999 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
2000 {
2001 	if (!ops)
2002 		return true;
2003 
2004 	return !ops->prepare &&
2005 	       !ops->suspend &&
2006 	       !ops->suspend_late &&
2007 	       !ops->suspend_noirq &&
2008 	       !ops->resume_noirq &&
2009 	       !ops->resume_early &&
2010 	       !ops->resume &&
2011 	       !ops->complete;
2012 }
2013 
device_pm_check_callbacks(struct device *dev)2014 void device_pm_check_callbacks(struct device *dev)
2015 {
2016 	unsigned long flags;
2017 
2018 	spin_lock_irqsave(&dev->power.lock, flags);
2019 	dev->power.no_pm_callbacks =
2020 		(!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2021 		 !dev->bus->suspend && !dev->bus->resume)) &&
2022 		(!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2023 		(!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2024 		(!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2025 		(!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2026 		 !dev->driver->suspend && !dev->driver->resume));
2027 	spin_unlock_irqrestore(&dev->power.lock, flags);
2028 }
2029 
dev_pm_skip_suspend(struct device *dev)2030 bool dev_pm_skip_suspend(struct device *dev)
2031 {
2032 	return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
2033 		pm_runtime_status_suspended(dev);
2034 }
2035