Lines Matching refs:dev
21 static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
26 if (dev->pm_domain)
27 ops = &dev->pm_domain->ops;
28 else if (dev->type && dev->type->pm)
29 ops = dev->type->pm;
30 else if (dev->class && dev->class->pm)
31 ops = dev->class->pm;
32 else if (dev->bus && dev->bus->pm)
33 ops = dev->bus->pm;
42 if (!cb && dev->driver && dev->driver->pm)
43 cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
48 #define RPM_GET_CALLBACK(dev, callback) \
49 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
51 static int rpm_resume(struct device *dev, int rpmflags);
52 static int rpm_suspend(struct device *dev, int rpmflags);
56 * @dev: Device to update the accounting for
65 static void update_pm_runtime_accounting(struct device *dev)
69 if (dev->power.disable_depth > 0)
72 last = dev->power.accounting_timestamp;
75 dev->power.accounting_timestamp = now;
87 if (dev->power.runtime_status == RPM_SUSPENDED)
88 dev->power.suspended_time += delta;
90 dev->power.active_time += delta;
93 static void __update_runtime_status(struct device *dev, enum rpm_status status)
95 update_pm_runtime_accounting(dev);
96 dev->power.runtime_status = status;
99 static u64 rpm_get_accounted_time(struct device *dev, bool suspended)
104 spin_lock_irqsave(&dev->power.lock, flags);
106 update_pm_runtime_accounting(dev);
107 time = suspended ? dev->power.suspended_time : dev->power.active_time;
109 spin_unlock_irqrestore(&dev->power.lock, flags);
114 u64 pm_runtime_active_time(struct device *dev)
116 return rpm_get_accounted_time(dev, false);
119 u64 pm_runtime_suspended_time(struct device *dev)
121 return rpm_get_accounted_time(dev, true);
127 * @dev: Device to handle.
129 static void pm_runtime_deactivate_timer(struct device *dev)
131 if (dev->power.timer_expires > 0) {
132 hrtimer_try_to_cancel(&dev->power.suspend_timer);
133 dev->power.timer_expires = 0;
139 * @dev: Device to handle.
141 static void pm_runtime_cancel_pending(struct device *dev)
143 pm_runtime_deactivate_timer(dev);
148 dev->power.request = RPM_REQ_NONE;
153 * @dev: Device to handle.
160 * This function may be called either with or without dev->power.lock held.
163 u64 pm_runtime_autosuspend_expiration(struct device *dev)
168 if (!dev->power.use_autosuspend)
171 autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
175 expires = READ_ONCE(dev->power.last_busy);
184 static int dev_memalloc_noio(struct device *dev, void *data)
186 return dev->power.memalloc_noio;
191 * @dev: Device to handle.
217 void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
226 spin_lock_irq(&dev->power.lock);
227 enabled = dev->power.memalloc_noio;
228 dev->power.memalloc_noio = enable;
229 spin_unlock_irq(&dev->power.lock);
238 dev = dev->parent;
245 if (!dev || (!enable &&
246 device_for_each_child(dev, NULL, dev_memalloc_noio)))
255 * @dev: Device to test.
257 static int rpm_check_suspend_allowed(struct device *dev)
261 if (dev->power.runtime_error)
263 else if (dev->power.disable_depth > 0)
265 else if (atomic_read(&dev->power.usage_count))
267 else if (!dev->power.ignore_children && atomic_read(&dev->power.child_count))
271 else if ((dev->power.deferred_resume &&
272 dev->power.runtime_status == RPM_SUSPENDING) ||
273 (dev->power.request_pending && dev->power.request == RPM_REQ_RESUME))
275 else if (__dev_pm_qos_resume_latency(dev) == 0)
277 else if (dev->power.runtime_status == RPM_SUSPENDED)
283 static int rpm_get_suppliers(struct device *dev)
287 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
326 static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)
330 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
338 static void rpm_put_suppliers(struct device *dev)
340 __rpm_put_suppliers(dev, true);
343 static void rpm_suspend_suppliers(struct device *dev)
348 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
358 * @dev: Device to run the callback for.
360 static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
361 __releases(&dev->power.lock) __acquires(&dev->power.lock)
364 bool use_links = dev->power.links_count > 0;
366 if (dev->power.irq_safe) {
367 spin_unlock(&dev->power.lock);
369 spin_unlock_irq(&dev->power.lock);
378 if (use_links && dev->power.runtime_status == RPM_RESUMING) {
381 retval = rpm_get_suppliers(dev);
383 rpm_put_suppliers(dev);
392 retval = cb(dev);
394 if (dev->power.irq_safe) {
395 spin_lock(&dev->power.lock);
405 ((dev->power.runtime_status == RPM_SUSPENDING && !retval) ||
406 (dev->power.runtime_status == RPM_RESUMING && retval))) {
409 __rpm_put_suppliers(dev, false);
415 spin_lock_irq(&dev->power.lock);
424 * @dev: Device to run the callback for.
426 static int rpm_callback(int (*cb)(struct device *), struct device *dev)
430 if (dev->power.memalloc_noio) {
443 retval = __rpm_callback(cb, dev);
446 retval = __rpm_callback(cb, dev);
449 dev->power.runtime_error = retval;
455 * @dev: Device to notify the bus type about.
464 * This function must be called under dev->power.lock with interrupts disabled.
466 static int rpm_idle(struct device *dev, int rpmflags)
471 trace_rpm_idle(dev, rpmflags);
472 retval = rpm_check_suspend_allowed(dev);
477 else if (dev->power.runtime_status != RPM_ACTIVE)
484 else if (dev->power.request_pending &&
485 dev->power.request > RPM_REQ_IDLE)
489 else if (dev->power.idle_notification)
496 dev->power.request = RPM_REQ_NONE;
498 callback = RPM_GET_CALLBACK(dev, runtime_idle);
501 if (!callback || dev->power.no_callbacks)
506 dev->power.request = RPM_REQ_IDLE;
507 if (!dev->power.request_pending) {
508 dev->power.request_pending = true;
509 queue_work(pm_wq, &dev->power.work);
511 trace_rpm_return_int(dev, _THIS_IP_, 0);
515 dev->power.idle_notification = true;
517 if (dev->power.irq_safe)
518 spin_unlock(&dev->power.lock);
520 spin_unlock_irq(&dev->power.lock);
522 retval = callback(dev);
524 if (dev->power.irq_safe)
525 spin_lock(&dev->power.lock);
527 spin_lock_irq(&dev->power.lock);
529 dev->power.idle_notification = false;
530 wake_up_all(&dev->power.wait_queue);
533 trace_rpm_return_int(dev, _THIS_IP_, retval);
534 return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
539 * @dev: Device to suspend.
551 * ignore_children of parent->power and irq_safe of dev->power are not set).
556 * This function must be called under dev->power.lock with interrupts disabled.
558 static int rpm_suspend(struct device *dev, int rpmflags)
559 __releases(&dev->power.lock) __acquires(&dev->power.lock)
565 trace_rpm_suspend(dev, rpmflags);
568 retval = rpm_check_suspend_allowed(dev);
573 if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC))
580 if ((rpmflags & RPM_AUTO) && dev->power.runtime_status != RPM_SUSPENDING) {
581 u64 expires = pm_runtime_autosuspend_expiration(dev);
585 dev->power.request = RPM_REQ_NONE;
594 if (!(dev->power.timer_expires &&
595 dev->power.timer_expires <= expires)) {
600 u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
603 dev->power.timer_expires = expires;
604 hrtimer_start_range_ns(&dev->power.suspend_timer,
609 dev->power.timer_autosuspends = 1;
615 pm_runtime_cancel_pending(dev);
617 if (dev->power.runtime_status == RPM_SUSPENDING) {
625 if (dev->power.irq_safe) {
626 spin_unlock(&dev->power.lock);
630 spin_lock(&dev->power.lock);
636 prepare_to_wait(&dev->power.wait_queue, &wait,
638 if (dev->power.runtime_status != RPM_SUSPENDING)
641 spin_unlock_irq(&dev->power.lock);
645 spin_lock_irq(&dev->power.lock);
647 finish_wait(&dev->power.wait_queue, &wait);
651 if (dev->power.no_callbacks)
656 dev->power.request = (rpmflags & RPM_AUTO) ?
658 if (!dev->power.request_pending) {
659 dev->power.request_pending = true;
660 queue_work(pm_wq, &dev->power.work);
665 __update_runtime_status(dev, RPM_SUSPENDING);
667 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
669 dev_pm_enable_wake_irq_check(dev, true);
670 retval = rpm_callback(callback, dev);
674 dev_pm_enable_wake_irq_complete(dev);
677 __update_runtime_status(dev, RPM_SUSPENDED);
678 pm_runtime_deactivate_timer(dev);
680 if (dev->parent) {
681 parent = dev->parent;
684 wake_up_all(&dev->power.wait_queue);
686 if (dev->power.deferred_resume) {
687 dev->power.deferred_resume = false;
688 rpm_resume(dev, 0);
693 if (dev->power.irq_safe)
698 spin_unlock(&dev->power.lock);
704 spin_lock(&dev->power.lock);
707 if (dev->power.links_count > 0) {
708 spin_unlock_irq(&dev->power.lock);
710 rpm_suspend_suppliers(dev);
712 spin_lock_irq(&dev->power.lock);
716 trace_rpm_return_int(dev, _THIS_IP_, retval);
721 dev_pm_disable_wake_irq_check(dev, true);
722 __update_runtime_status(dev, RPM_ACTIVE);
723 dev->power.deferred_resume = false;
724 wake_up_all(&dev->power.wait_queue);
727 dev->power.runtime_error = 0;
736 pm_runtime_autosuspend_expiration(dev) != 0)
739 pm_runtime_cancel_pending(dev);
746 * @dev: Device to resume.
759 * This function must be called under dev->power.lock with interrupts disabled.
761 static int rpm_resume(struct device *dev, int rpmflags)
762 __releases(&dev->power.lock) __acquires(&dev->power.lock)
768 trace_rpm_resume(dev, rpmflags);
771 if (dev->power.runtime_error) {
773 } else if (dev->power.disable_depth > 0) {
774 if (dev->power.runtime_status == RPM_ACTIVE &&
775 dev->power.last_status == RPM_ACTIVE)
789 dev->power.request = RPM_REQ_NONE;
790 if (!dev->power.timer_autosuspends)
791 pm_runtime_deactivate_timer(dev);
793 if (dev->power.runtime_status == RPM_ACTIVE) {
798 if (dev->power.runtime_status == RPM_RESUMING ||
799 dev->power.runtime_status == RPM_SUSPENDING) {
803 if (dev->power.runtime_status == RPM_SUSPENDING) {
804 dev->power.deferred_resume = true;
813 if (dev->power.irq_safe) {
814 spin_unlock(&dev->power.lock);
818 spin_lock(&dev->power.lock);
824 prepare_to_wait(&dev->power.wait_queue, &wait,
826 if (dev->power.runtime_status != RPM_RESUMING &&
827 dev->power.runtime_status != RPM_SUSPENDING)
830 spin_unlock_irq(&dev->power.lock);
834 spin_lock_irq(&dev->power.lock);
836 finish_wait(&dev->power.wait_queue, &wait);
845 if (dev->power.no_callbacks && !parent && dev->parent) {
846 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
847 if (dev->parent->power.disable_depth > 0 ||
848 dev->parent->power.ignore_children ||
849 dev->parent->power.runtime_status == RPM_ACTIVE) {
850 atomic_inc(&dev->parent->power.child_count);
851 spin_unlock(&dev->parent->power.lock);
855 spin_unlock(&dev->parent->power.lock);
860 dev->power.request = RPM_REQ_RESUME;
861 if (!dev->power.request_pending) {
862 dev->power.request_pending = true;
863 queue_work(pm_wq, &dev->power.work);
869 if (!parent && dev->parent) {
872 * necessary. Not needed if dev is irq-safe; then the
875 parent = dev->parent;
876 if (dev->power.irq_safe)
879 spin_unlock(&dev->power.lock);
896 spin_lock(&dev->power.lock);
904 if (dev->power.no_callbacks)
907 __update_runtime_status(dev, RPM_RESUMING);
909 callback = RPM_GET_CALLBACK(dev, runtime_resume);
911 dev_pm_disable_wake_irq_check(dev, false);
912 retval = rpm_callback(callback, dev);
914 __update_runtime_status(dev, RPM_SUSPENDED);
915 pm_runtime_cancel_pending(dev);
916 dev_pm_enable_wake_irq_check(dev, false);
919 __update_runtime_status(dev, RPM_ACTIVE);
920 pm_runtime_mark_last_busy(dev);
924 wake_up_all(&dev->power.wait_queue);
927 rpm_idle(dev, RPM_ASYNC);
930 if (parent && !dev->power.irq_safe) {
931 spin_unlock_irq(&dev->power.lock);
935 spin_lock_irq(&dev->power.lock);
938 trace_rpm_return_int(dev, _THIS_IP_, retval);
952 struct device *dev = container_of(work, struct device, power.work);
955 spin_lock_irq(&dev->power.lock);
957 if (!dev->power.request_pending)
960 req = dev->power.request;
961 dev->power.request = RPM_REQ_NONE;
962 dev->power.request_pending = false;
968 rpm_idle(dev, RPM_NOWAIT);
971 rpm_suspend(dev, RPM_NOWAIT);
974 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
977 rpm_resume(dev, RPM_NOWAIT);
982 spin_unlock_irq(&dev->power.lock);
993 struct device *dev = container_of(timer, struct device, power.suspend_timer);
997 spin_lock_irqsave(&dev->power.lock, flags);
999 expires = dev->power.timer_expires;
1005 dev->power.timer_expires = 0;
1006 rpm_suspend(dev, dev->power.timer_autosuspends ?
1010 spin_unlock_irqrestore(&dev->power.lock, flags);
1017 * @dev: Device to suspend.
1020 int pm_schedule_suspend(struct device *dev, unsigned int delay)
1026 spin_lock_irqsave(&dev->power.lock, flags);
1029 retval = rpm_suspend(dev, RPM_ASYNC);
1033 retval = rpm_check_suspend_allowed(dev);
1038 pm_runtime_cancel_pending(dev);
1041 dev->power.timer_expires = expires;
1042 dev->power.timer_autosuspends = 0;
1043 hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
1046 spin_unlock_irqrestore(&dev->power.lock, flags);
1052 static int rpm_drop_usage_count(struct device *dev)
1056 ret = atomic_sub_return(1, &dev->power.usage_count);
1066 atomic_inc(&dev->power.usage_count);
1067 dev_warn(dev, "Runtime PM usage count underflow!\n");
1073 * @dev: Device to send idle notification for.
1084 int __pm_runtime_idle(struct device *dev, int rpmflags)
1090 retval = rpm_drop_usage_count(dev);
1094 trace_rpm_usage(dev, rpmflags);
1099 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1101 spin_lock_irqsave(&dev->power.lock, flags);
1102 retval = rpm_idle(dev, rpmflags);
1103 spin_unlock_irqrestore(&dev->power.lock, flags);
1111 * @dev: Device to suspend.
1122 int __pm_runtime_suspend(struct device *dev, int rpmflags)
1128 retval = rpm_drop_usage_count(dev);
1132 trace_rpm_usage(dev, rpmflags);
1137 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1139 spin_lock_irqsave(&dev->power.lock, flags);
1140 retval = rpm_suspend(dev, rpmflags);
1141 spin_unlock_irqrestore(&dev->power.lock, flags);
1149 * @dev: Device to resume.
1158 int __pm_runtime_resume(struct device *dev, int rpmflags)
1163 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
1164 dev->power.runtime_status != RPM_ACTIVE);
1167 atomic_inc(&dev->power.usage_count);
1169 spin_lock_irqsave(&dev->power.lock, flags);
1170 retval = rpm_resume(dev, rpmflags);
1171 spin_unlock_irqrestore(&dev->power.lock, flags);
1179 * @dev: Device to handle.
1182 * Return -EINVAL if runtime PM is disabled for @dev.
1184 * Otherwise, if the runtime PM status of @dev is %RPM_ACTIVE and either
1185 * @ign_usage_count is %true or the runtime PM usage counter of @dev is not
1186 * zero, increment the usage counter of @dev and return 1. Otherwise, return 0
1197 * @dev after this function has returned a positive value for it.
1199 int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count)
1204 spin_lock_irqsave(&dev->power.lock, flags);
1205 if (dev->power.disable_depth > 0) {
1207 } else if (dev->power.runtime_status != RPM_ACTIVE) {
1211 atomic_inc(&dev->power.usage_count);
1213 retval = atomic_inc_not_zero(&dev->power.usage_count);
1215 trace_rpm_usage(dev, 0);
1216 spin_unlock_irqrestore(&dev->power.lock, flags);
1224 * @dev: Device to handle.
1239 * If @dev has any suppliers (as reflected by device links to them), and @status
1241 * of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead
1246 int __pm_runtime_set_status(struct device *dev, unsigned int status)
1248 struct device *parent = dev->parent;
1256 spin_lock_irqsave(&dev->power.lock, flags);
1262 if (dev->power.runtime_error || dev->power.disable_depth)
1263 dev->power.disable_depth++;
1267 spin_unlock_irqrestore(&dev->power.lock, flags);
1281 error = rpm_get_suppliers(dev);
1288 spin_lock_irqsave(&dev->power.lock, flags);
1290 if (dev->power.runtime_status == status || !parent)
1307 dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
1308 dev_name(dev),
1311 } else if (dev->power.runtime_status == RPM_SUSPENDED) {
1324 __update_runtime_status(dev, status);
1326 dev->power.runtime_error = 0;
1329 spin_unlock_irqrestore(&dev->power.lock, flags);
1337 rpm_put_suppliers(dev);
1342 pm_runtime_enable(dev);
1350 * @dev: Device to handle.
1355 * Should be called under dev->power.lock with interrupts disabled.
1357 static void __pm_runtime_barrier(struct device *dev)
1359 pm_runtime_deactivate_timer(dev);
1361 if (dev->power.request_pending) {
1362 dev->power.request = RPM_REQ_NONE;
1363 spin_unlock_irq(&dev->power.lock);
1365 cancel_work_sync(&dev->power.work);
1367 spin_lock_irq(&dev->power.lock);
1368 dev->power.request_pending = false;
1371 if (dev->power.runtime_status == RPM_SUSPENDING ||
1372 dev->power.runtime_status == RPM_RESUMING ||
1373 dev->power.idle_notification) {
1378 prepare_to_wait(&dev->power.wait_queue, &wait,
1380 if (dev->power.runtime_status != RPM_SUSPENDING
1381 && dev->power.runtime_status != RPM_RESUMING
1382 && !dev->power.idle_notification)
1384 spin_unlock_irq(&dev->power.lock);
1388 spin_lock_irq(&dev->power.lock);
1390 finish_wait(&dev->power.wait_queue, &wait);
1396 * @dev: Device to handle.
1408 int pm_runtime_barrier(struct device *dev)
1412 pm_runtime_get_noresume(dev);
1413 spin_lock_irq(&dev->power.lock);
1415 if (dev->power.request_pending
1416 && dev->power.request == RPM_REQ_RESUME) {
1417 rpm_resume(dev, 0);
1421 __pm_runtime_barrier(dev);
1423 spin_unlock_irq(&dev->power.lock);
1424 pm_runtime_put_noidle(dev);
1432 * @dev: Device to handle.
1444 void __pm_runtime_disable(struct device *dev, bool check_resume)
1446 spin_lock_irq(&dev->power.lock);
1448 if (dev->power.disable_depth > 0) {
1449 dev->power.disable_depth++;
1458 if (check_resume && dev->power.request_pending &&
1459 dev->power.request == RPM_REQ_RESUME) {
1464 pm_runtime_get_noresume(dev);
1466 rpm_resume(dev, 0);
1468 pm_runtime_put_noidle(dev);
1472 update_pm_runtime_accounting(dev);
1474 if (!dev->power.disable_depth++) {
1475 __pm_runtime_barrier(dev);
1476 dev->power.last_status = dev->power.runtime_status;
1480 spin_unlock_irq(&dev->power.lock);
1486 * @dev: Device to handle.
1488 void pm_runtime_enable(struct device *dev)
1492 spin_lock_irqsave(&dev->power.lock, flags);
1494 if (!dev->power.disable_depth) {
1495 dev_warn(dev, "Unbalanced %s!\n", __func__);
1499 if (--dev->power.disable_depth > 0)
1502 dev->power.last_status = RPM_INVALID;
1503 dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
1505 if (dev->power.runtime_status == RPM_SUSPENDED &&
1506 !dev->power.ignore_children &&
1507 atomic_read(&dev->power.child_count) > 0)
1508 dev_warn(dev, "Enabling runtime PM for inactive device with active children\n");
1511 spin_unlock_irqrestore(&dev->power.lock, flags);
1527 * @dev: Device to handle.
1529 int devm_pm_runtime_enable(struct device *dev)
1531 pm_runtime_enable(dev);
1533 return devm_add_action_or_reset(dev, pm_runtime_disable_action, dev);
1539 * @dev: Device to handle.
1545 void pm_runtime_forbid(struct device *dev)
1547 spin_lock_irq(&dev->power.lock);
1548 if (!dev->power.runtime_auto)
1551 dev->power.runtime_auto = false;
1552 atomic_inc(&dev->power.usage_count);
1553 rpm_resume(dev, 0);
1556 spin_unlock_irq(&dev->power.lock);
1562 * @dev: Device to handle.
1566 void pm_runtime_allow(struct device *dev)
1570 spin_lock_irq(&dev->power.lock);
1571 if (dev->power.runtime_auto)
1574 dev->power.runtime_auto = true;
1575 ret = rpm_drop_usage_count(dev);
1577 rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
1579 trace_rpm_usage(dev, RPM_AUTO | RPM_ASYNC);
1582 spin_unlock_irq(&dev->power.lock);
1588 * @dev: Device to handle.
1594 void pm_runtime_no_callbacks(struct device *dev)
1596 spin_lock_irq(&dev->power.lock);
1597 dev->power.no_callbacks = 1;
1598 spin_unlock_irq(&dev->power.lock);
1599 if (device_is_registered(dev))
1600 rpm_sysfs_remove(dev);
1606 * @dev: Device to handle
1615 void pm_runtime_irq_safe(struct device *dev)
1617 if (dev->parent)
1618 pm_runtime_get_sync(dev->parent);
1620 spin_lock_irq(&dev->power.lock);
1621 dev->power.irq_safe = 1;
1622 spin_unlock_irq(&dev->power.lock);
1628 * @dev: Device to handle.
1635 * This function must be called under dev->power.lock with interrupts disabled.
1637 static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1639 int delay = dev->power.autosuspend_delay;
1642 if (dev->power.use_autosuspend && delay < 0) {
1646 atomic_inc(&dev->power.usage_count);
1647 rpm_resume(dev, 0);
1649 trace_rpm_usage(dev, 0);
1658 atomic_dec(&dev->power.usage_count);
1661 rpm_idle(dev, RPM_AUTO);
1667 * @dev: Device to handle.
1674 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1678 spin_lock_irq(&dev->power.lock);
1679 old_delay = dev->power.autosuspend_delay;
1680 old_use = dev->power.use_autosuspend;
1681 dev->power.autosuspend_delay = delay;
1682 update_autosuspend(dev, old_delay, old_use);
1683 spin_unlock_irq(&dev->power.lock);
1689 * @dev: Device to handle.
1695 void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1699 spin_lock_irq(&dev->power.lock);
1700 old_delay = dev->power.autosuspend_delay;
1701 old_use = dev->power.use_autosuspend;
1702 dev->power.use_autosuspend = use;
1703 update_autosuspend(dev, old_delay, old_use);
1704 spin_unlock_irq(&dev->power.lock);
1710 * @dev: Device object to initialize.
1712 void pm_runtime_init(struct device *dev)
1714 dev->power.runtime_status = RPM_SUSPENDED;
1715 dev->power.last_status = RPM_INVALID;
1716 dev->power.idle_notification = false;
1718 dev->power.disable_depth = 1;
1719 atomic_set(&dev->power.usage_count, 0);
1721 dev->power.runtime_error = 0;
1723 atomic_set(&dev->power.child_count, 0);
1724 pm_suspend_ignore_children(dev, false);
1725 dev->power.runtime_auto = true;
1727 dev->power.request_pending = false;
1728 dev->power.request = RPM_REQ_NONE;
1729 dev->power.deferred_resume = false;
1730 dev->power.needs_force_resume = 0;
1731 INIT_WORK(&dev->power.work, pm_runtime_work);
1733 dev->power.timer_expires = 0;
1734 hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1735 dev->power.suspend_timer.function = pm_suspend_timer_fn;
1737 init_waitqueue_head(&dev->power.wait_queue);
1742 * @dev: Device object to re-initialize.
1744 void pm_runtime_reinit(struct device *dev)
1746 if (!pm_runtime_enabled(dev)) {
1747 if (dev->power.runtime_status == RPM_ACTIVE)
1748 pm_runtime_set_suspended(dev);
1749 if (dev->power.irq_safe) {
1750 spin_lock_irq(&dev->power.lock);
1751 dev->power.irq_safe = 0;
1752 spin_unlock_irq(&dev->power.lock);
1753 if (dev->parent)
1754 pm_runtime_put(dev->parent);
1761 * @dev: Device object being removed from device hierarchy.
1763 void pm_runtime_remove(struct device *dev)
1765 __pm_runtime_disable(dev, false);
1766 pm_runtime_reinit(dev);
1771 * @dev: Consumer device.
1773 void pm_runtime_get_suppliers(struct device *dev)
1780 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1792 * @dev: Consumer device.
1794 void pm_runtime_put_suppliers(struct device *dev)
1801 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1811 void pm_runtime_new_link(struct device *dev)
1813 spin_lock_irq(&dev->power.lock);
1814 dev->power.links_count++;
1815 spin_unlock_irq(&dev->power.lock);
1818 static void pm_runtime_drop_link_count(struct device *dev)
1820 spin_lock_irq(&dev->power.lock);
1821 WARN_ON(dev->power.links_count == 0);
1822 dev->power.links_count--;
1823 spin_unlock_irq(&dev->power.lock);
1844 static bool pm_runtime_need_not_resume(struct device *dev)
1846 return atomic_read(&dev->power.usage_count) <= 1 &&
1847 (atomic_read(&dev->power.child_count) == 0 ||
1848 dev->power.ignore_children);
1853 * @dev: Device to suspend.
1872 int pm_runtime_force_suspend(struct device *dev)
1877 pm_runtime_disable(dev);
1878 if (pm_runtime_status_suspended(dev))
1881 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
1883 dev_pm_enable_wake_irq_check(dev, true);
1884 ret = callback ? callback(dev) : 0;
1888 dev_pm_enable_wake_irq_complete(dev);
1896 if (pm_runtime_need_not_resume(dev)) {
1897 pm_runtime_set_suspended(dev);
1899 __update_runtime_status(dev, RPM_SUSPENDED);
1900 dev->power.needs_force_resume = 1;
1906 dev_pm_disable_wake_irq_check(dev, true);
1907 pm_runtime_enable(dev);
1914 * @dev: Device to resume.
1924 int pm_runtime_force_resume(struct device *dev)
1929 if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume)
1936 __update_runtime_status(dev, RPM_ACTIVE);
1938 callback = RPM_GET_CALLBACK(dev, runtime_resume);
1940 dev_pm_disable_wake_irq_check(dev, false);
1941 ret = callback ? callback(dev) : 0;
1943 pm_runtime_set_suspended(dev);
1944 dev_pm_enable_wake_irq_check(dev, false);
1948 pm_runtime_mark_last_busy(dev);
1950 dev->power.needs_force_resume = 0;
1951 pm_runtime_enable(dev);