Lines Matching refs:dev

21 static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
26 if (dev->pm_domain)
27 ops = &dev->pm_domain->ops;
28 else if (dev->type && dev->type->pm)
29 ops = dev->type->pm;
30 else if (dev->class && dev->class->pm)
31 ops = dev->class->pm;
32 else if (dev->bus && dev->bus->pm)
33 ops = dev->bus->pm;
42 if (!cb && dev->driver && dev->driver->pm)
43 cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
48 #define RPM_GET_CALLBACK(dev, callback) \
49 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
51 static int rpm_resume(struct device *dev, int rpmflags);
52 static int rpm_suspend(struct device *dev, int rpmflags);
56 * @dev: Device to update the accounting for
65 static void update_pm_runtime_accounting(struct device *dev)
69 if (dev->power.disable_depth > 0)
72 last = dev->power.accounting_timestamp;
75 dev->power.accounting_timestamp = now;
87 if (dev->power.runtime_status == RPM_SUSPENDED)
88 dev->power.suspended_time += delta;
90 dev->power.active_time += delta;
93 static void __update_runtime_status(struct device *dev, enum rpm_status status)
95 update_pm_runtime_accounting(dev);
96 dev->power.runtime_status = status;
99 static u64 rpm_get_accounted_time(struct device *dev, bool suspended)
104 spin_lock_irqsave(&dev->power.lock, flags);
106 update_pm_runtime_accounting(dev);
107 time = suspended ? dev->power.suspended_time : dev->power.active_time;
109 spin_unlock_irqrestore(&dev->power.lock, flags);
114 u64 pm_runtime_active_time(struct device *dev)
116 return rpm_get_accounted_time(dev, false);
119 u64 pm_runtime_suspended_time(struct device *dev)
121 return rpm_get_accounted_time(dev, true);
127 * @dev: Device to handle.
129 static void pm_runtime_deactivate_timer(struct device *dev)
131 if (dev->power.timer_expires > 0) {
132 hrtimer_try_to_cancel(&dev->power.suspend_timer);
133 dev->power.timer_expires = 0;
139 * @dev: Device to handle.
141 static void pm_runtime_cancel_pending(struct device *dev)
143 pm_runtime_deactivate_timer(dev);
148 dev->power.request = RPM_REQ_NONE;
153 * @dev: Device to handle.
160 * This function may be called either with or without dev->power.lock held.
163 u64 pm_runtime_autosuspend_expiration(struct device *dev)
168 if (!dev->power.use_autosuspend)
171 autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
175 expires = READ_ONCE(dev->power.last_busy);
184 static int dev_memalloc_noio(struct device *dev, void *data)
186 return dev->power.memalloc_noio;
191 * @dev: Device to handle.
217 void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
226 spin_lock_irq(&dev->power.lock);
227 enabled = dev->power.memalloc_noio;
228 dev->power.memalloc_noio = enable;
229 spin_unlock_irq(&dev->power.lock);
238 dev = dev->parent;
245 if (!dev || (!enable &&
246 device_for_each_child(dev, NULL,
256 * @dev: Device to test.
258 static int rpm_check_suspend_allowed(struct device *dev)
262 if (dev->power.runtime_error)
264 else if (dev->power.disable_depth > 0)
266 else if (atomic_read(&dev->power.usage_count) > 0)
268 else if (!dev->power.ignore_children &&
269 atomic_read(&dev->power.child_count))
273 else if ((dev->power.deferred_resume
274 && dev->power.runtime_status == RPM_SUSPENDING)
275 || (dev->power.request_pending
276 && dev->power.request == RPM_REQ_RESUME))
278 else if (__dev_pm_qos_resume_latency(dev) == 0)
280 else if (dev->power.runtime_status == RPM_SUSPENDED)
286 static int rpm_get_suppliers(struct device *dev)
290 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
329 static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)
333 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
341 static void rpm_put_suppliers(struct device *dev)
343 __rpm_put_suppliers(dev, true);
346 static void rpm_suspend_suppliers(struct device *dev)
351 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
361 * @dev: Device to run the callback for.
363 static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
364 __releases(&dev->power.lock) __acquires(&dev->power.lock)
367 bool use_links = dev->power.links_count > 0;
369 if (dev->power.irq_safe) {
370 spin_unlock(&dev->power.lock);
372 spin_unlock_irq(&dev->power.lock);
381 if (use_links && dev->power.runtime_status == RPM_RESUMING) {
384 retval = rpm_get_suppliers(dev);
386 rpm_put_suppliers(dev);
394 retval = cb(dev);
396 if (dev->power.irq_safe) {
397 spin_lock(&dev->power.lock);
407 && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
408 || (dev->power.runtime_status == RPM_RESUMING && retval))) {
411 __rpm_put_suppliers(dev, false);
417 spin_lock_irq(&dev->power.lock);
425 * @dev: Device to notify the bus type about.
434 * This function must be called under dev->power.lock with interrupts disabled.
436 static int rpm_idle(struct device *dev, int rpmflags)
441 trace_rpm_idle_rcuidle(dev, rpmflags);
442 retval = rpm_check_suspend_allowed(dev);
447 else if (dev->power.runtime_status != RPM_ACTIVE)
454 else if (dev->power.request_pending &&
455 dev->power.request > RPM_REQ_IDLE)
459 else if (dev->power.idle_notification)
465 dev->power.request = RPM_REQ_NONE;
467 callback = RPM_GET_CALLBACK(dev, runtime_idle);
470 if (!callback || dev->power.no_callbacks)
475 dev->power.request = RPM_REQ_IDLE;
476 if (!dev->power.request_pending) {
477 dev->power.request_pending = true;
478 queue_work(pm_wq, &dev->power.work);
480 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
484 dev->power.idle_notification = true;
486 if (dev->power.irq_safe)
487 spin_unlock(&dev->power.lock);
489 spin_unlock_irq(&dev->power.lock);
491 retval = callback(dev);
493 if (dev->power.irq_safe)
494 spin_lock(&dev->power.lock);
496 spin_lock_irq(&dev->power.lock);
498 dev->power.idle_notification = false;
499 wake_up_all(&dev->power.wait_queue);
502 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
503 return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
509 * @dev: Device to run the callback for.
511 static int rpm_callback(int (*cb)(struct device *), struct device *dev)
518 if (dev->power.memalloc_noio) {
531 retval = __rpm_callback(cb, dev);
534 retval = __rpm_callback(cb, dev);
537 dev->power.runtime_error = retval;
543 * @dev: Device to suspend.
555 * ignore_children of parent->power and irq_safe of dev->power are not set).
560 * This function must be called under dev->power.lock with interrupts disabled.
562 static int rpm_suspend(struct device *dev, int rpmflags)
563 __releases(&dev->power.lock) __acquires(&dev->power.lock)
569 trace_rpm_suspend_rcuidle(dev, rpmflags);
572 retval = rpm_check_suspend_allowed(dev);
577 if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC))
584 && dev->power.runtime_status != RPM_SUSPENDING) {
585 u64 expires = pm_runtime_autosuspend_expiration(dev);
589 dev->power.request = RPM_REQ_NONE;
598 if (!(dev->power.timer_expires &&
599 dev->power.timer_expires <= expires)) {
604 u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
607 dev->power.timer_expires = expires;
608 hrtimer_start_range_ns(&dev->power.suspend_timer,
613 dev->power.timer_autosuspends = 1;
619 pm_runtime_cancel_pending(dev);
621 if (dev->power.runtime_status == RPM_SUSPENDING) {
629 if (dev->power.irq_safe) {
630 spin_unlock(&dev->power.lock);
634 spin_lock(&dev->power.lock);
640 prepare_to_wait(&dev->power.wait_queue, &wait,
642 if (dev->power.runtime_status != RPM_SUSPENDING)
645 spin_unlock_irq(&dev->power.lock);
649 spin_lock_irq(&dev->power.lock);
651 finish_wait(&dev->power.wait_queue, &wait);
655 if (dev->power.no_callbacks)
660 dev->power.request = (rpmflags & RPM_AUTO) ?
662 if (!dev->power.request_pending) {
663 dev->power.request_pending = true;
664 queue_work(pm_wq, &dev->power.work);
669 __update_runtime_status(dev, RPM_SUSPENDING);
671 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
673 dev_pm_enable_wake_irq_check(dev, true);
674 retval = rpm_callback(callback, dev);
678 dev_pm_enable_wake_irq_complete(dev);
681 __update_runtime_status(dev, RPM_SUSPENDED);
682 pm_runtime_deactivate_timer(dev);
684 if (dev->parent) {
685 parent = dev->parent;
688 wake_up_all(&dev->power.wait_queue);
690 if (dev->power.deferred_resume) {
691 dev->power.deferred_resume = false;
692 rpm_resume(dev, 0);
697 if (dev->power.irq_safe)
702 spin_unlock(&dev->power.lock);
708 spin_lock(&dev->power.lock);
711 if (dev->power.links_count > 0) {
712 spin_unlock_irq(&dev->power.lock);
714 rpm_suspend_suppliers(dev);
716 spin_lock_irq(&dev->power.lock);
720 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
725 dev_pm_disable_wake_irq_check(dev, true);
726 __update_runtime_status(dev, RPM_ACTIVE);
727 dev->power.deferred_resume = false;
728 wake_up_all(&dev->power.wait_queue);
731 dev->power.runtime_error = 0;
740 pm_runtime_autosuspend_expiration(dev) != 0)
743 pm_runtime_cancel_pending(dev);
750 * @dev: Device to resume.
763 * This function must be called under dev->power.lock with interrupts disabled.
765 static int rpm_resume(struct device *dev, int rpmflags)
766 __releases(&dev->power.lock) __acquires(&dev->power.lock)
772 trace_rpm_resume_rcuidle(dev, rpmflags);
775 if (dev->power.runtime_error)
777 else if (dev->power.disable_depth == 1 && dev->power.is_suspended
778 && dev->power.runtime_status == RPM_ACTIVE)
780 else if (dev->power.disable_depth > 0)
791 dev->power.request = RPM_REQ_NONE;
792 if (!dev->power.timer_autosuspends)
793 pm_runtime_deactivate_timer(dev);
795 if (dev->power.runtime_status == RPM_ACTIVE) {
800 if (dev->power.runtime_status == RPM_RESUMING
801 || dev->power.runtime_status == RPM_SUSPENDING) {
805 if (dev->power.runtime_status == RPM_SUSPENDING)
806 dev->power.deferred_resume = true;
812 if (dev->power.irq_safe) {
813 spin_unlock(&dev->power.lock);
817 spin_lock(&dev->power.lock);
823 prepare_to_wait(&dev->power.wait_queue, &wait,
825 if (dev->power.runtime_status != RPM_RESUMING
826 && dev->power.runtime_status != RPM_SUSPENDING)
829 spin_unlock_irq(&dev->power.lock);
833 spin_lock_irq(&dev->power.lock);
835 finish_wait(&dev->power.wait_queue, &wait);
844 if (dev->power.no_callbacks && !parent && dev->parent) {
845 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
846 if (dev->parent->power.disable_depth > 0
847 || dev->parent->power.ignore_children
848 || dev->parent->power.runtime_status == RPM_ACTIVE) {
849 atomic_inc(&dev->parent->power.child_count);
850 spin_unlock(&dev->parent->power.lock);
854 spin_unlock(&dev->parent->power.lock);
859 dev->power.request = RPM_REQ_RESUME;
860 if (!dev->power.request_pending) {
861 dev->power.request_pending = true;
862 queue_work(pm_wq, &dev->power.work);
868 if (!parent && dev->parent) {
871 * necessary. Not needed if dev is irq-safe; then the
874 parent = dev->parent;
875 if (dev->power.irq_safe)
877 spin_unlock(&dev->power.lock);
894 spin_lock(&dev->power.lock);
901 if (dev->power.no_callbacks)
904 __update_runtime_status(dev, RPM_RESUMING);
906 callback = RPM_GET_CALLBACK(dev, runtime_resume);
908 dev_pm_disable_wake_irq_check(dev, false);
909 retval = rpm_callback(callback, dev);
911 __update_runtime_status(dev, RPM_SUSPENDED);
912 pm_runtime_cancel_pending(dev);
913 dev_pm_enable_wake_irq_check(dev, false);
916 __update_runtime_status(dev, RPM_ACTIVE);
917 pm_runtime_mark_last_busy(dev);
921 wake_up_all(&dev->power.wait_queue);
924 rpm_idle(dev, RPM_ASYNC);
927 if (parent && !dev->power.irq_safe) {
928 spin_unlock_irq(&dev->power.lock);
932 spin_lock_irq(&dev->power.lock);
935 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
949 struct device *dev = container_of(work, struct device, power.work);
952 spin_lock_irq(&dev->power.lock);
954 if (!dev->power.request_pending)
957 req = dev->power.request;
958 dev->power.request = RPM_REQ_NONE;
959 dev->power.request_pending = false;
965 rpm_idle(dev, RPM_NOWAIT);
968 rpm_suspend(dev, RPM_NOWAIT);
971 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
974 rpm_resume(dev, RPM_NOWAIT);
979 spin_unlock_irq(&dev->power.lock);
990 struct device *dev = container_of(timer, struct device, power.suspend_timer);
994 spin_lock_irqsave(&dev->power.lock, flags);
996 expires = dev->power.timer_expires;
1002 dev->power.timer_expires = 0;
1003 rpm_suspend(dev, dev->power.timer_autosuspends ?
1007 spin_unlock_irqrestore(&dev->power.lock, flags);
1014 * @dev: Device to suspend.
1017 int pm_schedule_suspend(struct device *dev, unsigned int delay)
1023 spin_lock_irqsave(&dev->power.lock, flags);
1026 retval = rpm_suspend(dev, RPM_ASYNC);
1030 retval = rpm_check_suspend_allowed(dev);
1035 pm_runtime_cancel_pending(dev);
1038 dev->power.timer_expires = expires;
1039 dev->power.timer_autosuspends = 0;
1040 hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
1043 spin_unlock_irqrestore(&dev->power.lock, flags);
1051 * @dev: Device to send idle notification for.
1061 int __pm_runtime_idle(struct device *dev, int rpmflags)
1067 if (!atomic_dec_and_test(&dev->power.usage_count)) {
1068 trace_rpm_usage_rcuidle(dev, rpmflags);
1073 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1075 spin_lock_irqsave(&dev->power.lock, flags);
1076 retval = rpm_idle(dev, rpmflags);
1077 spin_unlock_irqrestore(&dev->power.lock, flags);
1085 * @dev: Device to suspend.
1095 int __pm_runtime_suspend(struct device *dev, int rpmflags)
1101 if (!atomic_dec_and_test(&dev->power.usage_count)) {
1102 trace_rpm_usage_rcuidle(dev, rpmflags);
1107 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1109 spin_lock_irqsave(&dev->power.lock, flags);
1110 retval = rpm_suspend(dev, rpmflags);
1111 spin_unlock_irqrestore(&dev->power.lock, flags);
1119 * @dev: Device to resume.
1128 int __pm_runtime_resume(struct device *dev, int rpmflags)
1133 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
1134 dev->power.runtime_status != RPM_ACTIVE);
1137 atomic_inc(&dev->power.usage_count);
1139 spin_lock_irqsave(&dev->power.lock, flags);
1140 retval = rpm_resume(dev, rpmflags);
1141 spin_unlock_irqrestore(&dev->power.lock, flags);
1149 * @dev: Device to handle.
1152 * Return -EINVAL if runtime PM is disabled for @dev.
1154 * Otherwise, if the runtime PM status of @dev is %RPM_ACTIVE and either
1155 * @ign_usage_count is %true or the runtime PM usage counter of @dev is not
1156 * zero, increment the usage counter of @dev and return 1. Otherwise, return 0
1167 * @dev after this function has returned a positive value for it.
1169 int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count)
1174 spin_lock_irqsave(&dev->power.lock, flags);
1175 if (dev->power.disable_depth > 0) {
1177 } else if (dev->power.runtime_status != RPM_ACTIVE) {
1181 atomic_inc(&dev->power.usage_count);
1183 retval = atomic_inc_not_zero(&dev->power.usage_count);
1185 trace_rpm_usage_rcuidle(dev, 0);
1186 spin_unlock_irqrestore(&dev->power.lock, flags);
1194 * @dev: Device to handle.
1209 * If @dev has any suppliers (as reflected by device links to them), and @status
1211 * of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead
1216 int __pm_runtime_set_status(struct device *dev, unsigned int status)
1218 struct device *parent = dev->parent;
1225 spin_lock_irq(&dev->power.lock);
1231 if (dev->power.runtime_error || dev->power.disable_depth)
1232 dev->power.disable_depth++;
1236 spin_unlock_irq(&dev->power.lock);
1250 error = rpm_get_suppliers(dev);
1257 spin_lock_irq(&dev->power.lock);
1259 if (dev->power.runtime_status == status || !parent)
1276 dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
1277 dev_name(dev),
1280 } else if (dev->power.runtime_status == RPM_SUSPENDED) {
1293 __update_runtime_status(dev, status);
1295 dev->power.runtime_error = 0;
1298 spin_unlock_irq(&dev->power.lock);
1306 rpm_put_suppliers(dev);
1311 pm_runtime_enable(dev);
1319 * @dev: Device to handle.
1324 * Should be called under dev->power.lock with interrupts disabled.
1326 static void __pm_runtime_barrier(struct device *dev)
1328 pm_runtime_deactivate_timer(dev);
1330 if (dev->power.request_pending) {
1331 dev->power.request = RPM_REQ_NONE;
1332 spin_unlock_irq(&dev->power.lock);
1334 cancel_work_sync(&dev->power.work);
1336 spin_lock_irq(&dev->power.lock);
1337 dev->power.request_pending = false;
1340 if (dev->power.runtime_status == RPM_SUSPENDING
1341 || dev->power.runtime_status == RPM_RESUMING
1342 || dev->power.idle_notification) {
1347 prepare_to_wait(&dev->power.wait_queue, &wait,
1349 if (dev->power.runtime_status != RPM_SUSPENDING
1350 && dev->power.runtime_status != RPM_RESUMING
1351 && !dev->power.idle_notification)
1353 spin_unlock_irq(&dev->power.lock);
1357 spin_lock_irq(&dev->power.lock);
1359 finish_wait(&dev->power.wait_queue, &wait);
1365 * @dev: Device to handle.
1377 int pm_runtime_barrier(struct device *dev)
1381 pm_runtime_get_noresume(dev);
1382 spin_lock_irq(&dev->power.lock);
1384 if (dev->power.request_pending
1385 && dev->power.request == RPM_REQ_RESUME) {
1386 rpm_resume(dev, 0);
1390 __pm_runtime_barrier(dev);
1392 spin_unlock_irq(&dev->power.lock);
1393 pm_runtime_put_noidle(dev);
1401 * @dev: Device to handle.
1413 void __pm_runtime_disable(struct device *dev, bool check_resume)
1415 spin_lock_irq(&dev->power.lock);
1417 if (dev->power.disable_depth > 0) {
1418 dev->power.disable_depth++;
1427 if (check_resume && dev->power.request_pending
1428 && dev->power.request == RPM_REQ_RESUME) {
1433 pm_runtime_get_noresume(dev);
1435 rpm_resume(dev, 0);
1437 pm_runtime_put_noidle(dev);
1441 update_pm_runtime_accounting(dev);
1443 if (!dev->power.disable_depth++)
1444 __pm_runtime_barrier(dev);
1447 spin_unlock_irq(&dev->power.lock);
1453 * @dev: Device to handle.
1455 void pm_runtime_enable(struct device *dev)
1459 spin_lock_irqsave(&dev->power.lock, flags);
1461 if (dev->power.disable_depth > 0) {
1462 dev->power.disable_depth--;
1465 if (!dev->power.disable_depth)
1466 dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
1468 dev_warn(dev, "Unbalanced %s!\n", __func__);
1471 WARN(!dev->power.disable_depth &&
1472 dev->power.runtime_status == RPM_SUSPENDED &&
1473 !dev->power.ignore_children &&
1474 atomic_read(&dev->power.child_count) > 0,
1476 dev_name(dev));
1478 spin_unlock_irqrestore(&dev->power.lock, flags);
1494 * @dev: Device to handle.
1496 int devm_pm_runtime_enable(struct device *dev)
1498 pm_runtime_enable(dev);
1500 return devm_add_action_or_reset(dev, pm_runtime_disable_action, dev);
1506 * @dev: Device to handle.
1512 void pm_runtime_forbid(struct device *dev)
1514 spin_lock_irq(&dev->power.lock);
1515 if (!dev->power.runtime_auto)
1518 dev->power.runtime_auto = false;
1519 atomic_inc(&dev->power.usage_count);
1520 rpm_resume(dev, 0);
1523 spin_unlock_irq(&dev->power.lock);
1529 * @dev: Device to handle.
1533 void pm_runtime_allow(struct device *dev)
1535 spin_lock_irq(&dev->power.lock);
1536 if (dev->power.runtime_auto)
1539 dev->power.runtime_auto = true;
1540 if (atomic_dec_and_test(&dev->power.usage_count))
1541 rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
1543 trace_rpm_usage_rcuidle(dev, RPM_AUTO | RPM_ASYNC);
1546 spin_unlock_irq(&dev->power.lock);
1552 * @dev: Device to handle.
1558 void pm_runtime_no_callbacks(struct device *dev)
1560 spin_lock_irq(&dev->power.lock);
1561 dev->power.no_callbacks = 1;
1562 spin_unlock_irq(&dev->power.lock);
1563 if (device_is_registered(dev))
1564 rpm_sysfs_remove(dev);
1570 * @dev: Device to handle
1579 void pm_runtime_irq_safe(struct device *dev)
1581 if (dev->parent)
1582 pm_runtime_get_sync(dev->parent);
1583 spin_lock_irq(&dev->power.lock);
1584 dev->power.irq_safe = 1;
1585 spin_unlock_irq(&dev->power.lock);
1591 * @dev: Device to handle.
1598 * This function must be called under dev->power.lock with interrupts disabled.
1600 static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1602 int delay = dev->power.autosuspend_delay;
1605 if (dev->power.use_autosuspend && delay < 0) {
1609 atomic_inc(&dev->power.usage_count);
1610 rpm_resume(dev, 0);
1612 trace_rpm_usage_rcuidle(dev, 0);
1621 atomic_dec(&dev->power.usage_count);
1624 rpm_idle(dev, RPM_AUTO);
1630 * @dev: Device to handle.
1637 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1641 spin_lock_irq(&dev->power.lock);
1642 old_delay = dev->power.autosuspend_delay;
1643 old_use = dev->power.use_autosuspend;
1644 dev->power.autosuspend_delay = delay;
1645 update_autosuspend(dev, old_delay, old_use);
1646 spin_unlock_irq(&dev->power.lock);
1652 * @dev: Device to handle.
1658 void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1662 spin_lock_irq(&dev->power.lock);
1663 old_delay = dev->power.autosuspend_delay;
1664 old_use = dev->power.use_autosuspend;
1665 dev->power.use_autosuspend = use;
1666 update_autosuspend(dev, old_delay, old_use);
1667 spin_unlock_irq(&dev->power.lock);
1673 * @dev: Device object to initialize.
1675 void pm_runtime_init(struct device *dev)
1677 dev->power.runtime_status = RPM_SUSPENDED;
1678 dev->power.idle_notification = false;
1680 dev->power.disable_depth = 1;
1681 atomic_set(&dev->power.usage_count, 0);
1683 dev->power.runtime_error = 0;
1685 atomic_set(&dev->power.child_count, 0);
1686 pm_suspend_ignore_children(dev, false);
1687 dev->power.runtime_auto = true;
1689 dev->power.request_pending = false;
1690 dev->power.request = RPM_REQ_NONE;
1691 dev->power.deferred_resume = false;
1692 dev->power.needs_force_resume = 0;
1693 INIT_WORK(&dev->power.work, pm_runtime_work);
1695 dev->power.timer_expires = 0;
1696 hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1697 dev->power.suspend_timer.function = pm_suspend_timer_fn;
1699 init_waitqueue_head(&dev->power.wait_queue);
1704 * @dev: Device object to re-initialize.
1706 void pm_runtime_reinit(struct device *dev)
1708 if (!pm_runtime_enabled(dev)) {
1709 if (dev->power.runtime_status == RPM_ACTIVE)
1710 pm_runtime_set_suspended(dev);
1711 if (dev->power.irq_safe) {
1712 spin_lock_irq(&dev->power.lock);
1713 dev->power.irq_safe = 0;
1714 spin_unlock_irq(&dev->power.lock);
1715 if (dev->parent)
1716 pm_runtime_put(dev->parent);
1723 * @dev: Device object being removed from device hierarchy.
1725 void pm_runtime_remove(struct device *dev)
1727 __pm_runtime_disable(dev, false);
1728 pm_runtime_reinit(dev);
1733 * @dev: Consumer device.
1735 void pm_runtime_get_suppliers(struct device *dev)
1742 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1755 * @dev: Consumer device.
1757 void pm_runtime_put_suppliers(struct device *dev)
1766 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1770 spin_lock_irqsave(&dev->power.lock, flags);
1771 put = pm_runtime_status_suspended(dev) &&
1773 spin_unlock_irqrestore(&dev->power.lock, flags);
1781 void pm_runtime_new_link(struct device *dev)
1783 spin_lock_irq(&dev->power.lock);
1784 dev->power.links_count++;
1785 spin_unlock_irq(&dev->power.lock);
1788 static void pm_runtime_drop_link_count(struct device *dev)
1790 spin_lock_irq(&dev->power.lock);
1791 WARN_ON(dev->power.links_count == 0);
1792 dev->power.links_count--;
1793 spin_unlock_irq(&dev->power.lock);
1814 static bool pm_runtime_need_not_resume(struct device *dev)
1816 return atomic_read(&dev->power.usage_count) <= 1 &&
1817 (atomic_read(&dev->power.child_count) == 0 ||
1818 dev->power.ignore_children);
1823 * @dev: Device to suspend.
1838 int pm_runtime_force_suspend(struct device *dev)
1843 pm_runtime_disable(dev);
1844 if (pm_runtime_status_suspended(dev))
1847 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
1849 ret = callback ? callback(dev) : 0;
1859 if (pm_runtime_need_not_resume(dev)) {
1860 pm_runtime_set_suspended(dev);
1862 __update_runtime_status(dev, RPM_SUSPENDED);
1863 dev->power.needs_force_resume = 1;
1869 pm_runtime_enable(dev);
1876 * @dev: Device to resume.
1886 int pm_runtime_force_resume(struct device *dev)
1891 if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume)
1898 __update_runtime_status(dev, RPM_ACTIVE);
1900 callback = RPM_GET_CALLBACK(dev, runtime_resume);
1902 ret = callback ? callback(dev) : 0;
1904 pm_runtime_set_suspended(dev);
1908 pm_runtime_mark_last_busy(dev);
1910 dev->power.needs_force_resume = 0;
1911 pm_runtime_enable(dev);