Lines Matching refs:dev
96 * @dev: Device object being initialized.
98 void device_pm_sleep_init(struct device *dev)
100 dev->power.is_prepared = false;
101 dev->power.is_suspended = false;
102 dev->power.is_noirq_suspended = false;
103 dev->power.is_late_suspended = false;
104 init_completion(&dev->power.completion);
105 complete_all(&dev->power.completion);
106 dev->power.wakeup = NULL;
107 INIT_LIST_HEAD(&dev->power.entry);
128 * @dev: Device to add to the list.
130 void device_pm_add(struct device *dev)
133 if (device_pm_not_required(dev))
137 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
138 device_pm_check_callbacks(dev);
140 if (dev->parent && dev->parent->power.is_prepared)
141 dev_warn(dev, "parent %s should not be sleeping\n",
142 dev_name(dev->parent));
143 list_add_tail(&dev->power.entry, &dpm_list);
144 dev->power.in_dpm_list = true;
150 * @dev: Device to be removed from the list.
152 void device_pm_remove(struct device *dev)
154 if (device_pm_not_required(dev))
158 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
159 complete_all(&dev->power.completion);
161 list_del_init(&dev->power.entry);
162 dev->power.in_dpm_list = false;
164 device_wakeup_disable(dev);
165 pm_runtime_remove(dev);
166 device_pm_check_callbacks(dev);
199 * @dev: Device to move in dpm_list.
201 void device_pm_move_last(struct device *dev)
204 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
205 list_move_tail(&dev->power.entry, &dpm_list);
208 static ktime_t initcall_debug_start(struct device *dev, void *cb)
213 dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
215 dev->parent ? dev_name(dev->parent) : "none");
219 static void initcall_debug_report(struct device *dev, ktime_t calltime,
231 dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
237 * @dev: Device to wait for.
240 static void dpm_wait(struct device *dev, bool async)
242 if (!dev)
245 if (async || (pm_async_enabled && dev->power.async_suspend))
246 wait_for_completion(&dev->power.completion);
249 static int dpm_wait_fn(struct device *dev, void *async_ptr)
251 dpm_wait(dev, *((bool *)async_ptr));
255 static void dpm_wait_for_children(struct device *dev, bool async)
257 device_for_each_child(dev, &async, dpm_wait_fn);
260 static void dpm_wait_for_suppliers(struct device *dev, bool async)
274 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
281 static bool dpm_wait_for_superior(struct device *dev, bool async)
294 if (!device_pm_initialized(dev)) {
299 parent = get_device(dev->parent);
306 dpm_wait_for_suppliers(dev, async);
312 return device_pm_initialized(dev);
315 static void dpm_wait_for_consumers(struct device *dev, bool async)
331 list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
338 static void dpm_wait_for_subordinate(struct device *dev, bool async)
340 dpm_wait_for_children(dev, async);
341 dpm_wait_for_consumers(dev, async);
380 * Runtime PM is disabled for @dev while this function is being executed.
414 * The driver of @dev will not receive interrupts while this function is being
443 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
445 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
446 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
450 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
453 dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info,
477 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
486 calltime = initcall_debug_start(dev, cb);
488 pm_dev_dbg(dev, state, info);
489 trace_device_pm_callback_start(dev, info, state.event);
490 error = cb(dev);
491 trace_device_pm_callback_end(dev, error);
494 initcall_debug_report(dev, calltime, cb, error);
501 struct device *dev;
521 dev_emerg(wd->dev, "**** DPM device timeout ****\n");
524 dev_driver_string(wd->dev), dev_name(wd->dev));
530 * @dev: Device to handle.
532 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
536 wd->dev = dev;
566 * @dev: Target device.
574 bool dev_pm_skip_resume(struct device *dev)
580 return dev_pm_skip_suspend(dev);
582 return !dev->power.must_resume;
587 * @dev: Device to handle.
591 * The driver of @dev will not receive interrupts while this function is being
594 static void __device_resume_noirq(struct device *dev, pm_message_t state, bool async)
601 TRACE_DEVICE(dev);
604 if (dev->power.syscore || dev->power.direct_complete)
607 if (!dev->power.is_noirq_suspended)
610 if (!dpm_wait_for_superior(dev, async))
613 skip_resume = dev_pm_skip_resume(dev);
625 pm_runtime_set_suspended(dev);
626 else if (dev_pm_skip_suspend(dev))
627 pm_runtime_set_active(dev);
629 if (dev->pm_domain) {
631 callback = pm_noirq_op(&dev->pm_domain->ops, state);
632 } else if (dev->type && dev->type->pm) {
634 callback = pm_noirq_op(dev->type->pm, state);
635 } else if (dev->class && dev->class->pm) {
637 callback = pm_noirq_op(dev->class->pm, state);
638 } else if (dev->bus && dev->bus->pm) {
640 callback = pm_noirq_op(dev->bus->pm, state);
648 if (dev->driver && dev->driver->pm) {
650 callback = pm_noirq_op(dev->driver->pm, state);
654 error = dpm_run_callback(callback, dev, state, info);
657 dev->power.is_noirq_suspended = false;
660 complete_all(&dev->power.completion);
666 dpm_save_failed_dev(dev_name(dev));
667 pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
671 static bool is_async(struct device *dev)
673 return dev->power.async_suspend && pm_async_enabled
677 static bool dpm_async_fn(struct device *dev, async_func_t func)
679 reinit_completion(&dev->power.completion);
681 if (!is_async(dev))
684 get_device(dev);
686 if (async_schedule_dev_nocall(func, dev))
689 put_device(dev);
696 struct device *dev = data;
698 __device_resume_noirq(dev, pm_transition, true);
699 put_device(dev);
702 static void device_resume_noirq(struct device *dev)
704 if (dpm_async_fn(dev, async_resume_noirq))
707 __device_resume_noirq(dev, pm_transition, false);
712 struct device *dev;
720 dev = to_device(dpm_noirq_list.next);
721 get_device(dev);
722 list_move_tail(&dev->power.entry, &dpm_late_early_list);
726 device_resume_noirq(dev);
728 put_device(dev);
757 * @dev: Device to handle.
761 * Runtime PM is disabled for @dev while this function is being executed.
763 static void __device_resume_early(struct device *dev, pm_message_t state, bool async)
769 TRACE_DEVICE(dev);
772 if (dev->power.syscore || dev->power.direct_complete)
775 if (!dev->power.is_late_suspended)
778 if (!dpm_wait_for_superior(dev, async))
781 if (dev->pm_domain) {
783 callback = pm_late_early_op(&dev->pm_domain->ops, state);
784 } else if (dev->type && dev->type->pm) {
786 callback = pm_late_early_op(dev->type->pm, state);
787 } else if (dev->class && dev->class->pm) {
789 callback = pm_late_early_op(dev->class->pm, state);
790 } else if (dev->bus && dev->bus->pm) {
792 callback = pm_late_early_op(dev->bus->pm, state);
797 if (dev_pm_skip_resume(dev))
800 if (dev->driver && dev->driver->pm) {
802 callback = pm_late_early_op(dev->driver->pm, state);
806 error = dpm_run_callback(callback, dev, state, info);
809 dev->power.is_late_suspended = false;
814 pm_runtime_enable(dev);
815 complete_all(&dev->power.completion);
820 dpm_save_failed_dev(dev_name(dev));
821 pm_dev_err(dev, state, async ? " async early" : " early", error);
827 struct device *dev = data;
829 __device_resume_early(dev, pm_transition, true);
830 put_device(dev);
833 static void device_resume_early(struct device *dev)
835 if (dpm_async_fn(dev, async_resume_early))
838 __device_resume_early(dev, pm_transition, false);
847 struct device *dev;
855 dev = to_device(dpm_late_early_list.next);
856 get_device(dev);
857 list_move_tail(&dev->power.entry, &dpm_suspended_list);
861 device_resume_early(dev);
863 put_device(dev);
886 * @dev: Device to handle.
890 static void __device_resume(struct device *dev, pm_message_t state, bool async)
897 TRACE_DEVICE(dev);
900 if (dev->power.syscore)
903 if (dev->power.direct_complete) {
905 pm_runtime_enable(dev);
909 if (!dpm_wait_for_superior(dev, async))
912 dpm_watchdog_set(&wd, dev);
913 device_lock(dev);
919 dev->power.is_prepared = false;
921 if (!dev->power.is_suspended)
924 if (dev->pm_domain) {
926 callback = pm_op(&dev->pm_domain->ops, state);
930 if (dev->type && dev->type->pm) {
932 callback = pm_op(dev->type->pm, state);
936 if (dev->class && dev->class->pm) {
938 callback = pm_op(dev->class->pm, state);
942 if (dev->bus) {
943 if (dev->bus->pm) {
945 callback = pm_op(dev->bus->pm, state);
946 } else if (dev->bus->resume) {
948 callback = dev->bus->resume;
954 if (!callback && dev->driver && dev->driver->pm) {
956 callback = pm_op(dev->driver->pm, state);
960 error = dpm_run_callback(callback, dev, state, info);
961 dev->power.is_suspended = false;
964 device_unlock(dev);
968 complete_all(&dev->power.completion);
975 dpm_save_failed_dev(dev_name(dev));
976 pm_dev_err(dev, state, async ? " async" : "", error);
982 struct device *dev = data;
984 __device_resume(dev, pm_transition, true);
985 put_device(dev);
988 static void device_resume(struct device *dev)
990 if (dpm_async_fn(dev, async_resume))
993 __device_resume(dev, pm_transition, false);
1005 struct device *dev;
1016 dev = to_device(dpm_suspended_list.next);
1018 get_device(dev);
1022 device_resume(dev);
1026 if (!list_empty(&dev->power.entry))
1027 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1031 put_device(dev);
1046 * @dev: Device to handle.
1049 static void device_complete(struct device *dev, pm_message_t state)
1054 if (dev->power.syscore)
1057 device_lock(dev);
1059 if (dev->pm_domain) {
1061 callback = dev->pm_domain->ops.complete;
1062 } else if (dev->type && dev->type->pm) {
1064 callback = dev->type->pm->complete;
1065 } else if (dev->class && dev->class->pm) {
1067 callback = dev->class->pm->complete;
1068 } else if (dev->bus && dev->bus->pm) {
1070 callback = dev->bus->pm->complete;
1073 if (!callback && dev->driver && dev->driver->pm) {
1075 callback = dev->driver->pm->complete;
1079 pm_dev_dbg(dev, state, info);
1080 callback(dev);
1083 device_unlock(dev);
1086 pm_runtime_put(dev);
1106 struct device *dev = to_device(dpm_prepared_list.prev);
1108 get_device(dev);
1109 dev->power.is_prepared = false;
1110 list_move(&dev->power.entry, &list);
1114 trace_device_pm_callback_start(dev, "", state.event);
1115 device_complete(dev, state);
1116 trace_device_pm_callback_end(dev, 0);
1118 put_device(dev);
1168 static void dpm_superior_set_must_resume(struct device *dev)
1173 if (dev->parent)
1174 dev->parent->power.must_resume = true;
1178 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
1186 * @dev: Device to handle.
1190 * The driver of @dev will not receive interrupts while this function is being
1193 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1199 TRACE_DEVICE(dev);
1202 dpm_wait_for_subordinate(dev, async);
1207 if (dev->power.syscore || dev->power.direct_complete)
1210 if (dev->pm_domain) {
1212 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1213 } else if (dev->type && dev->type->pm) {
1215 callback = pm_noirq_op(dev->type->pm, state);
1216 } else if (dev->class && dev->class->pm) {
1218 callback = pm_noirq_op(dev->class->pm, state);
1219 } else if (dev->bus && dev->bus->pm) {
1221 callback = pm_noirq_op(dev->bus->pm, state);
1226 if (dev_pm_skip_suspend(dev))
1229 if (dev->driver && dev->driver->pm) {
1231 callback = pm_noirq_op(dev->driver->pm, state);
1235 error = dpm_run_callback(callback, dev, state, info);
1242 dev->power.is_noirq_suspended = true;
1250 if (atomic_read(&dev->power.usage_count) > 1 ||
1251 !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
1252 dev->power.may_skip_resume))
1253 dev->power.must_resume = true;
1255 if (dev->power.must_resume)
1256 dpm_superior_set_must_resume(dev);
1259 complete_all(&dev->power.completion);
1266 struct device *dev = data;
1269 error = __device_suspend_noirq(dev, pm_transition, true);
1271 dpm_save_failed_dev(dev_name(dev));
1272 pm_dev_err(dev, pm_transition, " async", error);
1275 put_device(dev);
1278 static int device_suspend_noirq(struct device *dev)
1280 if (dpm_async_fn(dev, async_suspend_noirq))
1283 return __device_suspend_noirq(dev, pm_transition, false);
1297 struct device *dev = to_device(dpm_late_early_list.prev);
1299 get_device(dev);
1302 error = device_suspend_noirq(dev);
1307 pm_dev_err(dev, state, " noirq", error);
1308 dpm_save_failed_dev(dev_name(dev));
1309 } else if (!list_empty(&dev->power.entry)) {
1310 list_move(&dev->power.entry, &dpm_noirq_list);
1315 put_device(dev);
1359 static void dpm_propagate_wakeup_to_parent(struct device *dev)
1361 struct device *parent = dev->parent;
1368 if (dev->power.wakeup_path && !parent->power.ignore_children)
1376 * @dev: Device to handle.
1380 * Runtime PM is disabled for @dev while this function is being executed.
1382 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1388 TRACE_DEVICE(dev);
1391 __pm_runtime_disable(dev, false);
1393 dpm_wait_for_subordinate(dev, async);
1403 if (dev->power.syscore || dev->power.direct_complete)
1406 if (dev->pm_domain) {
1408 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1409 } else if (dev->type && dev->type->pm) {
1411 callback = pm_late_early_op(dev->type->pm, state);
1412 } else if (dev->class && dev->class->pm) {
1414 callback = pm_late_early_op(dev->class->pm, state);
1415 } else if (dev->bus && dev->bus->pm) {
1417 callback = pm_late_early_op(dev->bus->pm, state);
1422 if (dev_pm_skip_suspend(dev))
1425 if (dev->driver && dev->driver->pm) {
1427 callback = pm_late_early_op(dev->driver->pm, state);
1431 error = dpm_run_callback(callback, dev, state, info);
1436 dpm_propagate_wakeup_to_parent(dev);
1439 dev->power.is_late_suspended = true;
1443 complete_all(&dev->power.completion);
1449 struct device *dev = data;
1452 error = __device_suspend_late(dev, pm_transition, true);
1454 dpm_save_failed_dev(dev_name(dev));
1455 pm_dev_err(dev, pm_transition, " async", error);
1457 put_device(dev);
1460 static int device_suspend_late(struct device *dev)
1462 if (dpm_async_fn(dev, async_suspend_late))
1465 return __device_suspend_late(dev, pm_transition, false);
1483 struct device *dev = to_device(dpm_suspended_list.prev);
1485 get_device(dev);
1489 error = device_suspend_late(dev);
1493 if (!list_empty(&dev->power.entry))
1494 list_move(&dev->power.entry, &dpm_late_early_list);
1497 pm_dev_err(dev, state, " late", error);
1498 dpm_save_failed_dev(dev_name(dev));
1503 put_device(dev);
1549 * @dev: Device to suspend.
1554 static int legacy_suspend(struct device *dev, pm_message_t state,
1555 int (*cb)(struct device *dev, pm_message_t state),
1561 calltime = initcall_debug_start(dev, cb);
1563 trace_device_pm_callback_start(dev, info, state.event);
1564 error = cb(dev, state);
1565 trace_device_pm_callback_end(dev, error);
1568 initcall_debug_report(dev, calltime, cb, error);
1573 static void dpm_clear_superiors_direct_complete(struct device *dev)
1578 if (dev->parent) {
1579 spin_lock_irq(&dev->parent->power.lock);
1580 dev->parent->power.direct_complete = false;
1581 spin_unlock_irq(&dev->parent->power.lock);
1586 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
1597 * @dev: Device to handle.
1601 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1608 TRACE_DEVICE(dev);
1611 dpm_wait_for_subordinate(dev, async);
1614 dev->power.direct_complete = false;
1629 pm_runtime_barrier(dev);
1632 dev->power.direct_complete = false;
1637 if (dev->power.syscore)
1641 if (device_may_wakeup(dev) || dev->power.wakeup_path)
1642 dev->power.direct_complete = false;
1644 if (dev->power.direct_complete) {
1645 if (pm_runtime_status_suspended(dev)) {
1646 pm_runtime_disable(dev);
1647 if (pm_runtime_status_suspended(dev)) {
1648 pm_dev_dbg(dev, state, "direct-complete ");
1652 pm_runtime_enable(dev);
1654 dev->power.direct_complete = false;
1657 dev->power.may_skip_resume = true;
1658 dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME);
1660 dpm_watchdog_set(&wd, dev);
1661 device_lock(dev);
1663 if (dev->pm_domain) {
1665 callback = pm_op(&dev->pm_domain->ops, state);
1669 if (dev->type && dev->type->pm) {
1671 callback = pm_op(dev->type->pm, state);
1675 if (dev->class && dev->class->pm) {
1677 callback = pm_op(dev->class->pm, state);
1681 if (dev->bus) {
1682 if (dev->bus->pm) {
1684 callback = pm_op(dev->bus->pm, state);
1685 } else if (dev->bus->suspend) {
1686 pm_dev_dbg(dev, state, "legacy bus ");
1687 error = legacy_suspend(dev, state, dev->bus->suspend,
1694 if (!callback && dev->driver && dev->driver->pm) {
1696 callback = pm_op(dev->driver->pm, state);
1699 error = dpm_run_callback(callback, dev, state, info);
1703 dev->power.is_suspended = true;
1704 if (device_may_wakeup(dev))
1705 dev->power.wakeup_path = true;
1707 dpm_propagate_wakeup_to_parent(dev);
1708 dpm_clear_superiors_direct_complete(dev);
1711 device_unlock(dev);
1718 complete_all(&dev->power.completion);
1725 struct device *dev = data;
1728 error = __device_suspend(dev, pm_transition, true);
1730 dpm_save_failed_dev(dev_name(dev));
1731 pm_dev_err(dev, pm_transition, " async", error);
1734 put_device(dev);
1737 static int device_suspend(struct device *dev)
1739 if (dpm_async_fn(dev, async_suspend))
1742 return __device_suspend(dev, pm_transition, false);
1764 struct device *dev = to_device(dpm_prepared_list.prev);
1766 get_device(dev);
1770 error = device_suspend(dev);
1775 pm_dev_err(dev, state, "", error);
1776 dpm_save_failed_dev(dev_name(dev));
1777 } else if (!list_empty(&dev->power.entry)) {
1778 list_move(&dev->power.entry, &dpm_suspended_list);
1783 put_device(dev);
1805 * @dev: Device to handle.
1811 static int device_prepare(struct device *dev, pm_message_t state)
1822 pm_runtime_get_noresume(dev);
1824 if (dev->power.syscore)
1827 device_lock(dev);
1829 dev->power.wakeup_path = false;
1831 if (dev->power.no_pm_callbacks)
1834 if (dev->pm_domain)
1835 callback = dev->pm_domain->ops.prepare;
1836 else if (dev->type && dev->type->pm)
1837 callback = dev->type->pm->prepare;
1838 else if (dev->class && dev->class->pm)
1839 callback = dev->class->pm->prepare;
1840 else if (dev->bus && dev->bus->pm)
1841 callback = dev->bus->pm->prepare;
1843 if (!callback && dev->driver && dev->driver->pm)
1844 callback = dev->driver->pm->prepare;
1847 ret = callback(dev);
1850 device_unlock(dev);
1854 pm_runtime_put(dev);
1864 spin_lock_irq(&dev->power.lock);
1865 dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1866 (ret > 0 || dev->power.no_pm_callbacks) &&
1867 !dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
1868 spin_unlock_irq(&dev->power.lock);
1901 struct device *dev = to_device(dpm_list.next);
1903 get_device(dev);
1907 trace_device_pm_callback_start(dev, "", state.event);
1908 error = device_prepare(dev, state);
1909 trace_device_pm_callback_end(dev, error);
1914 dev->power.is_prepared = true;
1915 if (!list_empty(&dev->power.entry))
1916 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1920 dev_info(dev, "not prepared for power transition: code %d\n",
1926 put_device(dev);
1967 * @subordinate: Device that needs to wait for @dev.
1968 * @dev: Device to wait for.
1970 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1972 dpm_wait(dev, subordinate->power.async_suspend);
1987 struct device *dev;
1993 list_for_each_entry(dev, &dpm_list, power.entry)
1994 fn(dev, data);
2014 void device_pm_check_callbacks(struct device *dev)
2018 spin_lock_irqsave(&dev->power.lock, flags);
2019 dev->power.no_pm_callbacks =
2020 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2021 !dev->bus->suspend && !dev->bus->resume)) &&
2022 (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2023 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2024 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2025 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2026 !dev->driver->suspend && !dev->driver->resume));
2027 spin_unlock_irqrestore(&dev->power.lock, flags);
2030 bool dev_pm_skip_suspend(struct device *dev)
2032 return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
2033 pm_runtime_status_suspended(dev);