Lines Matching refs:dev

95  * @dev: Device object being initialized.
97 void device_pm_sleep_init(struct device *dev)
99 dev->power.is_prepared = false;
100 dev->power.is_suspended = false;
101 dev->power.is_noirq_suspended = false;
102 dev->power.is_late_suspended = false;
103 init_completion(&dev->power.completion);
104 complete_all(&dev->power.completion);
105 dev->power.wakeup = NULL;
106 INIT_LIST_HEAD(&dev->power.entry);
127 * @dev: Device to add to the list.
129 void device_pm_add(struct device *dev)
132 if (device_pm_not_required(dev))
136 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
137 device_pm_check_callbacks(dev);
139 if (dev->parent && dev->parent->power.is_prepared)
140 dev_warn(dev, "parent %s should not be sleeping\n",
141 dev_name(dev->parent));
142 list_add_tail(&dev->power.entry, &dpm_list);
143 dev->power.in_dpm_list = true;
149 * @dev: Device to be removed from the list.
151 void device_pm_remove(struct device *dev)
153 if (device_pm_not_required(dev))
157 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
158 complete_all(&dev->power.completion);
160 list_del_init(&dev->power.entry);
161 dev->power.in_dpm_list = false;
163 device_wakeup_disable(dev);
164 pm_runtime_remove(dev);
165 device_pm_check_callbacks(dev);
198 * @dev: Device to move in dpm_list.
200 void device_pm_move_last(struct device *dev)
203 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
204 list_move_tail(&dev->power.entry, &dpm_list);
207 static ktime_t initcall_debug_start(struct device *dev, void *cb)
212 dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
214 dev->parent ? dev_name(dev->parent) : "none");
218 static void initcall_debug_report(struct device *dev, ktime_t calltime,
227 dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
233 * @dev: Device to wait for.
236 static void dpm_wait(struct device *dev, bool async)
238 if (!dev)
241 if (async || (pm_async_enabled && dev->power.async_suspend))
242 wait_for_completion(&dev->power.completion);
245 static int dpm_wait_fn(struct device *dev, void *async_ptr)
247 dpm_wait(dev, *((bool *)async_ptr));
251 static void dpm_wait_for_children(struct device *dev, bool async)
253 device_for_each_child(dev, &async, dpm_wait_fn);
256 static void dpm_wait_for_suppliers(struct device *dev, bool async)
270 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
277 static bool dpm_wait_for_superior(struct device *dev, bool async)
290 if (!device_pm_initialized(dev)) {
295 parent = get_device(dev->parent);
302 dpm_wait_for_suppliers(dev, async);
308 return device_pm_initialized(dev);
311 static void dpm_wait_for_consumers(struct device *dev, bool async)
327 list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
334 static void dpm_wait_for_subordinate(struct device *dev, bool async)
336 dpm_wait_for_children(dev, async);
337 dpm_wait_for_consumers(dev, async);
376 * Runtime PM is disabled for @dev while this function is being executed.
410 * The driver of @dev will not receive interrupts while this function is being
439 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
441 dev_dbg(dev, "%s%s%s driver flags: %x\n", info, pm_verb(state.event),
442 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
443 ", may wakeup" : "", dev->power.driver_flags);
446 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
449 dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info,
473 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
482 calltime = initcall_debug_start(dev, cb);
484 pm_dev_dbg(dev, state, info);
485 trace_device_pm_callback_start(dev, info, state.event);
486 error = cb(dev);
487 trace_device_pm_callback_end(dev, error);
488 suspend_report_result(dev, cb, error);
490 initcall_debug_report(dev, calltime, cb, error);
497 struct device *dev;
517 dev_emerg(wd->dev, "**** DPM device timeout ****\n");
520 dev_driver_string(wd->dev), dev_name(wd->dev));
526 * @dev: Device to handle.
528 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
532 wd->dev = dev;
562 * @dev: Target device.
570 bool dev_pm_skip_resume(struct device *dev)
576 return dev_pm_skip_suspend(dev);
578 return !dev->power.must_resume;
583 * @dev: Device to handle.
587 * The driver of @dev will not receive interrupts while this function is being
590 static void __device_resume_noirq(struct device *dev, pm_message_t state, bool async)
597 TRACE_DEVICE(dev);
600 if (dev->power.syscore || dev->power.direct_complete)
603 if (!dev->power.is_noirq_suspended)
606 if (!dpm_wait_for_superior(dev, async))
609 skip_resume = dev_pm_skip_resume(dev);
621 pm_runtime_set_suspended(dev);
622 else if (dev_pm_skip_suspend(dev))
623 pm_runtime_set_active(dev);
625 if (dev->pm_domain) {
627 callback = pm_noirq_op(&dev->pm_domain->ops, state);
628 } else if (dev->type && dev->type->pm) {
630 callback = pm_noirq_op(dev->type->pm, state);
631 } else if (dev->class && dev->class->pm) {
633 callback = pm_noirq_op(dev->class->pm, state);
634 } else if (dev->bus && dev->bus->pm) {
636 callback = pm_noirq_op(dev->bus->pm, state);
644 if (dev->driver && dev->driver->pm) {
646 callback = pm_noirq_op(dev->driver->pm, state);
650 error = dpm_run_callback(callback, dev, state, info);
653 dev->power.is_noirq_suspended = false;
656 complete_all(&dev->power.completion);
662 dpm_save_failed_dev(dev_name(dev));
663 pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
667 static bool is_async(struct device *dev)
669 return dev->power.async_suspend && pm_async_enabled
673 static bool dpm_async_fn(struct device *dev, async_func_t func)
675 reinit_completion(&dev->power.completion);
677 if (!is_async(dev))
680 get_device(dev);
682 if (async_schedule_dev_nocall(func, dev))
685 put_device(dev);
692 struct device *dev = data;
694 __device_resume_noirq(dev, pm_transition, true);
695 put_device(dev);
698 static void device_resume_noirq(struct device *dev)
700 if (dpm_async_fn(dev, async_resume_noirq))
703 __device_resume_noirq(dev, pm_transition, false);
708 struct device *dev;
716 dev = to_device(dpm_noirq_list.next);
717 get_device(dev);
718 list_move_tail(&dev->power.entry, &dpm_late_early_list);
722 device_resume_noirq(dev);
724 put_device(dev);
751 * @dev: Device to handle.
755 * Runtime PM is disabled for @dev while this function is being executed.
757 static void __device_resume_early(struct device *dev, pm_message_t state, bool async)
763 TRACE_DEVICE(dev);
766 if (dev->power.syscore || dev->power.direct_complete)
769 if (!dev->power.is_late_suspended)
772 if (!dpm_wait_for_superior(dev, async))
775 if (dev->pm_domain) {
777 callback = pm_late_early_op(&dev->pm_domain->ops, state);
778 } else if (dev->type && dev->type->pm) {
780 callback = pm_late_early_op(dev->type->pm, state);
781 } else if (dev->class && dev->class->pm) {
783 callback = pm_late_early_op(dev->class->pm, state);
784 } else if (dev->bus && dev->bus->pm) {
786 callback = pm_late_early_op(dev->bus->pm, state);
791 if (dev_pm_skip_resume(dev))
794 if (dev->driver && dev->driver->pm) {
796 callback = pm_late_early_op(dev->driver->pm, state);
800 error = dpm_run_callback(callback, dev, state, info);
803 dev->power.is_late_suspended = false;
808 pm_runtime_enable(dev);
809 complete_all(&dev->power.completion);
814 dpm_save_failed_dev(dev_name(dev));
815 pm_dev_err(dev, state, async ? " async early" : " early", error);
821 struct device *dev = data;
823 __device_resume_early(dev, pm_transition, true);
824 put_device(dev);
827 static void device_resume_early(struct device *dev)
829 if (dpm_async_fn(dev, async_resume_early))
832 __device_resume_early(dev, pm_transition, false);
841 struct device *dev;
849 dev = to_device(dpm_late_early_list.next);
850 get_device(dev);
851 list_move_tail(&dev->power.entry, &dpm_suspended_list);
855 device_resume_early(dev);
857 put_device(dev);
880 * @dev: Device to handle.
884 static void __device_resume(struct device *dev, pm_message_t state, bool async)
891 TRACE_DEVICE(dev);
894 if (dev->power.syscore)
897 if (dev->power.direct_complete) {
899 pm_runtime_enable(dev);
903 if (!dpm_wait_for_superior(dev, async))
906 dpm_watchdog_set(&wd, dev);
907 device_lock(dev);
913 dev->power.is_prepared = false;
915 if (!dev->power.is_suspended)
918 if (dev->pm_domain) {
920 callback = pm_op(&dev->pm_domain->ops, state);
924 if (dev->type && dev->type->pm) {
926 callback = pm_op(dev->type->pm, state);
930 if (dev->class && dev->class->pm) {
932 callback = pm_op(dev->class->pm, state);
936 if (dev->bus) {
937 if (dev->bus->pm) {
939 callback = pm_op(dev->bus->pm, state);
940 } else if (dev->bus->resume) {
942 callback = dev->bus->resume;
948 if (!callback && dev->driver && dev->driver->pm) {
950 callback = pm_op(dev->driver->pm, state);
954 error = dpm_run_callback(callback, dev, state, info);
955 dev->power.is_suspended = false;
958 device_unlock(dev);
962 complete_all(&dev->power.completion);
969 dpm_save_failed_dev(dev_name(dev));
970 pm_dev_err(dev, state, async ? " async" : "", error);
976 struct device *dev = data;
978 __device_resume(dev, pm_transition, true);
979 put_device(dev);
982 static void device_resume(struct device *dev)
984 if (dpm_async_fn(dev, async_resume))
987 __device_resume(dev, pm_transition, false);
999 struct device *dev;
1010 dev = to_device(dpm_suspended_list.next);
1012 get_device(dev);
1016 device_resume(dev);
1020 if (!list_empty(&dev->power.entry))
1021 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1025 put_device(dev);
1040 * @dev: Device to handle.
1043 static void device_complete(struct device *dev, pm_message_t state)
1048 if (dev->power.syscore)
1051 device_lock(dev);
1053 if (dev->pm_domain) {
1055 callback = dev->pm_domain->ops.complete;
1056 } else if (dev->type && dev->type->pm) {
1058 callback = dev->type->pm->complete;
1059 } else if (dev->class && dev->class->pm) {
1061 callback = dev->class->pm->complete;
1062 } else if (dev->bus && dev->bus->pm) {
1064 callback = dev->bus->pm->complete;
1067 if (!callback && dev->driver && dev->driver->pm) {
1069 callback = dev->driver->pm->complete;
1073 pm_dev_dbg(dev, state, info);
1074 callback(dev);
1077 device_unlock(dev);
1080 pm_runtime_put(dev);
1100 struct device *dev = to_device(dpm_prepared_list.prev);
1102 get_device(dev);
1103 dev->power.is_prepared = false;
1104 list_move(&dev->power.entry, &list);
1108 trace_device_pm_callback_start(dev, "", state.event);
1109 device_complete(dev, state);
1110 trace_device_pm_callback_end(dev, 0);
1112 put_device(dev);
1162 static void dpm_superior_set_must_resume(struct device *dev)
1167 if (dev->parent)
1168 dev->parent->power.must_resume = true;
1172 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
1180 * @dev: Device to handle.
1184 * The driver of @dev will not receive interrupts while this function is being
1187 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1193 TRACE_DEVICE(dev);
1196 dpm_wait_for_subordinate(dev, async);
1201 if (dev->power.syscore || dev->power.direct_complete)
1204 if (dev->pm_domain) {
1206 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1207 } else if (dev->type && dev->type->pm) {
1209 callback = pm_noirq_op(dev->type->pm, state);
1210 } else if (dev->class && dev->class->pm) {
1212 callback = pm_noirq_op(dev->class->pm, state);
1213 } else if (dev->bus && dev->bus->pm) {
1215 callback = pm_noirq_op(dev->bus->pm, state);
1220 if (dev_pm_skip_suspend(dev))
1223 if (dev->driver && dev->driver->pm) {
1225 callback = pm_noirq_op(dev->driver->pm, state);
1229 error = dpm_run_callback(callback, dev, state, info);
1236 dev->power.is_noirq_suspended = true;
1244 if (atomic_read(&dev->power.usage_count) > 1 ||
1245 !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
1246 dev->power.may_skip_resume))
1247 dev->power.must_resume = true;
1249 if (dev->power.must_resume)
1250 dpm_superior_set_must_resume(dev);
1253 complete_all(&dev->power.completion);
1260 struct device *dev = data;
1263 error = __device_suspend_noirq(dev, pm_transition, true);
1265 dpm_save_failed_dev(dev_name(dev));
1266 pm_dev_err(dev, pm_transition, " async", error);
1269 put_device(dev);
1272 static int device_suspend_noirq(struct device *dev)
1274 if (dpm_async_fn(dev, async_suspend_noirq))
1277 return __device_suspend_noirq(dev, pm_transition, false);
1291 struct device *dev = to_device(dpm_late_early_list.prev);
1293 get_device(dev);
1296 error = device_suspend_noirq(dev);
1301 pm_dev_err(dev, state, " noirq", error);
1302 dpm_save_failed_dev(dev_name(dev));
1303 } else if (!list_empty(&dev->power.entry)) {
1304 list_move(&dev->power.entry, &dpm_noirq_list);
1309 put_device(dev);
1351 static void dpm_propagate_wakeup_to_parent(struct device *dev)
1353 struct device *parent = dev->parent;
1360 if (device_wakeup_path(dev) && !parent->power.ignore_children)
1368 * @dev: Device to handle.
1372 * Runtime PM is disabled for @dev while this function is being executed.
1374 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1380 TRACE_DEVICE(dev);
1383 __pm_runtime_disable(dev, false);
1385 dpm_wait_for_subordinate(dev, async);
1395 if (dev->power.syscore || dev->power.direct_complete)
1398 if (dev->pm_domain) {
1400 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1401 } else if (dev->type && dev->type->pm) {
1403 callback = pm_late_early_op(dev->type->pm, state);
1404 } else if (dev->class && dev->class->pm) {
1406 callback = pm_late_early_op(dev->class->pm, state);
1407 } else if (dev->bus && dev->bus->pm) {
1409 callback = pm_late_early_op(dev->bus->pm, state);
1414 if (dev_pm_skip_suspend(dev))
1417 if (dev->driver && dev->driver->pm) {
1419 callback = pm_late_early_op(dev->driver->pm, state);
1423 error = dpm_run_callback(callback, dev, state, info);
1428 dpm_propagate_wakeup_to_parent(dev);
1431 dev->power.is_late_suspended = true;
1435 complete_all(&dev->power.completion);
1441 struct device *dev = data;
1444 error = __device_suspend_late(dev, pm_transition, true);
1446 dpm_save_failed_dev(dev_name(dev));
1447 pm_dev_err(dev, pm_transition, " async", error);
1449 put_device(dev);
1452 static int device_suspend_late(struct device *dev)
1454 if (dpm_async_fn(dev, async_suspend_late))
1457 return __device_suspend_late(dev, pm_transition, false);
1476 struct device *dev = to_device(dpm_suspended_list.prev);
1478 get_device(dev);
1482 error = device_suspend_late(dev);
1486 if (!list_empty(&dev->power.entry))
1487 list_move(&dev->power.entry, &dpm_late_early_list);
1490 pm_dev_err(dev, state, " late", error);
1491 dpm_save_failed_dev(dev_name(dev));
1496 put_device(dev);
1542 * @dev: Device to suspend.
1547 static int legacy_suspend(struct device *dev, pm_message_t state,
1548 int (*cb)(struct device *dev, pm_message_t state),
1554 calltime = initcall_debug_start(dev, cb);
1556 trace_device_pm_callback_start(dev, info, state.event);
1557 error = cb(dev, state);
1558 trace_device_pm_callback_end(dev, error);
1559 suspend_report_result(dev, cb, error);
1561 initcall_debug_report(dev, calltime, cb, error);
1566 static void dpm_clear_superiors_direct_complete(struct device *dev)
1571 if (dev->parent) {
1572 spin_lock_irq(&dev->parent->power.lock);
1573 dev->parent->power.direct_complete = false;
1574 spin_unlock_irq(&dev->parent->power.lock);
1579 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
1590 * @dev: Device to handle.
1594 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1601 TRACE_DEVICE(dev);
1604 dpm_wait_for_subordinate(dev, async);
1607 dev->power.direct_complete = false;
1622 pm_runtime_barrier(dev);
1625 dev->power.direct_complete = false;
1630 if (dev->power.syscore)
1634 if (device_may_wakeup(dev) || device_wakeup_path(dev))
1635 dev->power.direct_complete = false;
1637 if (dev->power.direct_complete) {
1638 if (pm_runtime_status_suspended(dev)) {
1639 pm_runtime_disable(dev);
1640 if (pm_runtime_status_suspended(dev)) {
1641 pm_dev_dbg(dev, state, "direct-complete ");
1645 pm_runtime_enable(dev);
1647 dev->power.direct_complete = false;
1650 dev->power.may_skip_resume = true;
1651 dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME);
1653 dpm_watchdog_set(&wd, dev);
1654 device_lock(dev);
1656 if (dev->pm_domain) {
1658 callback = pm_op(&dev->pm_domain->ops, state);
1662 if (dev->type && dev->type->pm) {
1664 callback = pm_op(dev->type->pm, state);
1668 if (dev->class && dev->class->pm) {
1670 callback = pm_op(dev->class->pm, state);
1674 if (dev->bus) {
1675 if (dev->bus->pm) {
1677 callback = pm_op(dev->bus->pm, state);
1678 } else if (dev->bus->suspend) {
1679 pm_dev_dbg(dev, state, "legacy bus ");
1680 error = legacy_suspend(dev, state, dev->bus->suspend,
1687 if (!callback && dev->driver && dev->driver->pm) {
1689 callback = pm_op(dev->driver->pm, state);
1692 error = dpm_run_callback(callback, dev, state, info);
1696 dev->power.is_suspended = true;
1697 if (device_may_wakeup(dev))
1698 dev->power.wakeup_path = true;
1700 dpm_propagate_wakeup_to_parent(dev);
1701 dpm_clear_superiors_direct_complete(dev);
1704 device_unlock(dev);
1711 complete_all(&dev->power.completion);
1718 struct device *dev = data;
1721 error = __device_suspend(dev, pm_transition, true);
1723 dpm_save_failed_dev(dev_name(dev));
1724 pm_dev_err(dev, pm_transition, " async", error);
1727 put_device(dev);
1730 static int device_suspend(struct device *dev)
1732 if (dpm_async_fn(dev, async_suspend))
1735 return __device_suspend(dev, pm_transition, false);
1757 struct device *dev = to_device(dpm_prepared_list.prev);
1759 get_device(dev);
1763 error = device_suspend(dev);
1768 pm_dev_err(dev, state, "", error);
1769 dpm_save_failed_dev(dev_name(dev));
1770 } else if (!list_empty(&dev->power.entry)) {
1771 list_move(&dev->power.entry, &dpm_suspended_list);
1776 put_device(dev);
1798 * @dev: Device to handle.
1804 static int device_prepare(struct device *dev, pm_message_t state)
1815 pm_runtime_get_noresume(dev);
1817 if (dev->power.syscore)
1820 device_lock(dev);
1822 dev->power.wakeup_path = false;
1824 if (dev->power.no_pm_callbacks)
1827 if (dev->pm_domain)
1828 callback = dev->pm_domain->ops.prepare;
1829 else if (dev->type && dev->type->pm)
1830 callback = dev->type->pm->prepare;
1831 else if (dev->class && dev->class->pm)
1832 callback = dev->class->pm->prepare;
1833 else if (dev->bus && dev->bus->pm)
1834 callback = dev->bus->pm->prepare;
1836 if (!callback && dev->driver && dev->driver->pm)
1837 callback = dev->driver->pm->prepare;
1840 ret = callback(dev);
1843 device_unlock(dev);
1846 suspend_report_result(dev, callback, ret);
1847 pm_runtime_put(dev);
1857 spin_lock_irq(&dev->power.lock);
1858 dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1859 (ret > 0 || dev->power.no_pm_callbacks) &&
1860 !dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
1861 spin_unlock_irq(&dev->power.lock);
1894 struct device *dev = to_device(dpm_list.next);
1896 get_device(dev);
1900 trace_device_pm_callback_start(dev, "", state.event);
1901 error = device_prepare(dev, state);
1902 trace_device_pm_callback_end(dev, error);
1907 dev->power.is_prepared = true;
1908 if (!list_empty(&dev->power.entry))
1909 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1913 dev_info(dev, "not prepared for power transition: code %d\n",
1919 put_device(dev);
1951 void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret)
1954 dev_err(dev, "%s(): %pS returns %d\n", function, fn, ret);
1960 * @subordinate: Device that needs to wait for @dev.
1961 * @dev: Device to wait for.
1963 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1965 dpm_wait(dev, subordinate->power.async_suspend);
1980 struct device *dev;
1986 list_for_each_entry(dev, &dpm_list, power.entry)
1987 fn(dev, data);
2007 void device_pm_check_callbacks(struct device *dev)
2011 spin_lock_irqsave(&dev->power.lock, flags);
2012 dev->power.no_pm_callbacks =
2013 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2014 !dev->bus->suspend && !dev->bus->resume)) &&
2015 (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2016 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2017 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2018 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2019 !dev->driver->suspend && !dev->driver->resume));
2020 spin_unlock_irqrestore(&dev->power.lock, flags);
2023 bool dev_pm_skip_suspend(struct device *dev)
2025 return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
2026 pm_runtime_status_suspended(dev);