Lines Matching refs:power

3  * drivers/base/power/runtime.c - Helper functions for device runtime PM
17 #include "power.h"
55 * update_pm_runtime_accounting - Update the time accounting of power states
58 * In order to be able to have time accounting of the various power states
69 if (dev->power.disable_depth > 0)
72 last = dev->power.accounting_timestamp;
75 dev->power.accounting_timestamp = now;
87 if (dev->power.runtime_status == RPM_SUSPENDED)
88 dev->power.suspended_time += delta;
90 dev->power.active_time += delta;
96 dev->power.runtime_status = status;
104 spin_lock_irqsave(&dev->power.lock, flags);
107 time = suspended ? dev->power.suspended_time : dev->power.active_time;
109 spin_unlock_irqrestore(&dev->power.lock, flags);
131 if (dev->power.timer_expires > 0) {
132 hrtimer_try_to_cancel(&dev->power.suspend_timer);
133 dev->power.timer_expires = 0;
148 dev->power.request = RPM_REQ_NONE;
156 * power.last_busy time. If the delay has already expired or is disabled
157 * (negative) or the power.use_autosuspend flag isn't set, return 0.
160 * This function may be called either with or without dev->power.lock held.
161 * Either way it can be racy, since power.last_busy may be updated at any time.
168 if (!dev->power.use_autosuspend)
171 autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
175 expires = READ_ONCE(dev->power.last_busy);
186 return dev->power.memalloc_noio;
225 /* hold power lock since bitfield is not SMP-safe. */
226 spin_lock_irq(&dev->power.lock);
227 enabled = dev->power.memalloc_noio;
228 dev->power.memalloc_noio = enable;
229 spin_unlock_irq(&dev->power.lock);
261 if (dev->power.runtime_error)
263 else if (dev->power.disable_depth > 0)
265 else if (atomic_read(&dev->power.usage_count))
267 else if (!dev->power.ignore_children && atomic_read(&dev->power.child_count))
271 else if ((dev->power.deferred_resume &&
272 dev->power.runtime_status == RPM_SUSPENDING) ||
273 (dev->power.request_pending && dev->power.request == RPM_REQ_RESUME))
277 else if (dev->power.runtime_status == RPM_SUSPENDED)
316 * The additional power.usage_count check is a safety net in case
322 atomic_read(&supplier->power.usage_count) > 0)
361 __releases(&dev->power.lock) __acquires(&dev->power.lock)
364 bool use_links = dev->power.links_count > 0;
366 if (dev->power.irq_safe) {
367 spin_unlock(&dev->power.lock);
369 spin_unlock_irq(&dev->power.lock);
378 if (use_links && dev->power.runtime_status == RPM_RESUMING) {
394 if (dev->power.irq_safe) {
395 spin_lock(&dev->power.lock);
405 ((dev->power.runtime_status == RPM_SUSPENDING && !retval) ||
406 (dev->power.runtime_status == RPM_RESUMING && retval))) {
415 spin_lock_irq(&dev->power.lock);
430 if (dev->power.memalloc_noio) {
449 dev->power.runtime_error = retval;
464 * This function must be called under dev->power.lock with interrupts disabled.
477 else if (dev->power.runtime_status != RPM_ACTIVE)
484 else if (dev->power.request_pending &&
485 dev->power.request > RPM_REQ_IDLE)
489 else if (dev->power.idle_notification)
496 dev->power.request = RPM_REQ_NONE;
501 if (!callback || dev->power.no_callbacks)
506 dev->power.request = RPM_REQ_IDLE;
507 if (!dev->power.request_pending) {
508 dev->power.request_pending = true;
509 queue_work(pm_wq, &dev->power.work);
515 dev->power.idle_notification = true;
517 if (dev->power.irq_safe)
518 spin_unlock(&dev->power.lock);
520 spin_unlock_irq(&dev->power.lock);
524 if (dev->power.irq_safe)
525 spin_lock(&dev->power.lock);
527 spin_lock_irq(&dev->power.lock);
529 dev->power.idle_notification = false;
530 wake_up_all(&dev->power.wait_queue);
551 * ignore_children of parent->power and irq_safe of dev->power are not set).
556 * This function must be called under dev->power.lock with interrupts disabled.
559 __releases(&dev->power.lock) __acquires(&dev->power.lock)
573 if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC))
580 if ((rpmflags & RPM_AUTO) && dev->power.runtime_status != RPM_SUSPENDING) {
585 dev->power.request = RPM_REQ_NONE;
594 if (!(dev->power.timer_expires &&
595 dev->power.timer_expires <= expires)) {
600 u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
603 dev->power.timer_expires = expires;
604 hrtimer_start_range_ns(&dev->power.suspend_timer,
609 dev->power.timer_autosuspends = 1;
617 if (dev->power.runtime_status == RPM_SUSPENDING) {
625 if (dev->power.irq_safe) {
626 spin_unlock(&dev->power.lock);
630 spin_lock(&dev->power.lock);
636 prepare_to_wait(&dev->power.wait_queue, &wait,
638 if (dev->power.runtime_status != RPM_SUSPENDING)
641 spin_unlock_irq(&dev->power.lock);
645 spin_lock_irq(&dev->power.lock);
647 finish_wait(&dev->power.wait_queue, &wait);
651 if (dev->power.no_callbacks)
656 dev->power.request = (rpmflags & RPM_AUTO) ?
658 if (!dev->power.request_pending) {
659 dev->power.request_pending = true;
660 queue_work(pm_wq, &dev->power.work);
682 atomic_add_unless(&parent->power.child_count, -1, 0);
684 wake_up_all(&dev->power.wait_queue);
686 if (dev->power.deferred_resume) {
687 dev->power.deferred_resume = false;
693 if (dev->power.irq_safe)
697 if (parent && !parent->power.ignore_children) {
698 spin_unlock(&dev->power.lock);
700 spin_lock(&parent->power.lock);
702 spin_unlock(&parent->power.lock);
704 spin_lock(&dev->power.lock);
707 if (dev->power.links_count > 0) {
708 spin_unlock_irq(&dev->power.lock);
712 spin_lock_irq(&dev->power.lock);
723 dev->power.deferred_resume = false;
724 wake_up_all(&dev->power.wait_queue);
727 dev->power.runtime_error = 0;
759 * This function must be called under dev->power.lock with interrupts disabled.
762 __releases(&dev->power.lock) __acquires(&dev->power.lock)
771 if (dev->power.runtime_error) {
773 } else if (dev->power.disable_depth > 0) {
774 if (dev->power.runtime_status == RPM_ACTIVE &&
775 dev->power.last_status == RPM_ACTIVE)
789 dev->power.request = RPM_REQ_NONE;
790 if (!dev->power.timer_autosuspends)
793 if (dev->power.runtime_status == RPM_ACTIVE) {
798 if (dev->power.runtime_status == RPM_RESUMING ||
799 dev->power.runtime_status == RPM_SUSPENDING) {
803 if (dev->power.runtime_status == RPM_SUSPENDING) {
804 dev->power.deferred_resume = true;
813 if (dev->power.irq_safe) {
814 spin_unlock(&dev->power.lock);
818 spin_lock(&dev->power.lock);
824 prepare_to_wait(&dev->power.wait_queue, &wait,
826 if (dev->power.runtime_status != RPM_RESUMING &&
827 dev->power.runtime_status != RPM_SUSPENDING)
830 spin_unlock_irq(&dev->power.lock);
834 spin_lock_irq(&dev->power.lock);
836 finish_wait(&dev->power.wait_queue, &wait);
842 * power.no_callbacks is set, because otherwise we don't know whether
845 if (dev->power.no_callbacks && !parent && dev->parent) {
846 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
847 if (dev->parent->power.disable_depth > 0 ||
848 dev->parent->power.ignore_children ||
849 dev->parent->power.runtime_status == RPM_ACTIVE) {
850 atomic_inc(&dev->parent->power.child_count);
851 spin_unlock(&dev->parent->power.lock);
855 spin_unlock(&dev->parent->power.lock);
860 dev->power.request = RPM_REQ_RESUME;
861 if (!dev->power.request_pending) {
862 dev->power.request_pending = true;
863 queue_work(pm_wq, &dev->power.work);
876 if (dev->power.irq_safe)
879 spin_unlock(&dev->power.lock);
883 spin_lock(&parent->power.lock);
888 if (!parent->power.disable_depth &&
889 !parent->power.ignore_children) {
891 if (parent->power.runtime_status != RPM_ACTIVE)
894 spin_unlock(&parent->power.lock);
896 spin_lock(&dev->power.lock);
904 if (dev->power.no_callbacks)
922 atomic_inc(&parent->power.child_count);
924 wake_up_all(&dev->power.wait_queue);
930 if (parent && !dev->power.irq_safe) {
931 spin_unlock_irq(&dev->power.lock);
935 spin_lock_irq(&dev->power.lock);
952 struct device *dev = container_of(work, struct device, power.work);
955 spin_lock_irq(&dev->power.lock);
957 if (!dev->power.request_pending)
960 req = dev->power.request;
961 dev->power.request = RPM_REQ_NONE;
962 dev->power.request_pending = false;
982 spin_unlock_irq(&dev->power.lock);
993 struct device *dev = container_of(timer, struct device, power.suspend_timer);
997 spin_lock_irqsave(&dev->power.lock, flags);
999 expires = dev->power.timer_expires;
1005 dev->power.timer_expires = 0;
1006 rpm_suspend(dev, dev->power.timer_autosuspends ?
1010 spin_unlock_irqrestore(&dev->power.lock, flags);
1026 spin_lock_irqsave(&dev->power.lock, flags);
1041 dev->power.timer_expires = expires;
1042 dev->power.timer_autosuspends = 0;
1043 hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
1046 spin_unlock_irqrestore(&dev->power.lock, flags);
1056 ret = atomic_sub_return(1, &dev->power.usage_count);
1066 atomic_inc(&dev->power.usage_count);
1099 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1101 spin_lock_irqsave(&dev->power.lock, flags);
1103 spin_unlock_irqrestore(&dev->power.lock, flags);
1137 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1139 spin_lock_irqsave(&dev->power.lock, flags);
1141 spin_unlock_irqrestore(&dev->power.lock, flags);
1163 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
1164 dev->power.runtime_status != RPM_ACTIVE);
1167 atomic_inc(&dev->power.usage_count);
1169 spin_lock_irqsave(&dev->power.lock, flags);
1171 spin_unlock_irqrestore(&dev->power.lock, flags);
1204 spin_lock_irqsave(&dev->power.lock, flags);
1205 if (dev->power.disable_depth > 0) {
1207 } else if (dev->power.runtime_status != RPM_ACTIVE) {
1211 atomic_inc(&dev->power.usage_count);
1213 retval = atomic_inc_not_zero(&dev->power.usage_count);
1216 spin_unlock_irqrestore(&dev->power.lock, flags);
1227 * If runtime PM of the device is disabled or its power.runtime_error field is
1231 * parent's power.ignore_children flag is unset, the device's status cannot be
1234 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
1256 spin_lock_irqsave(&dev->power.lock, flags);
1262 if (dev->power.runtime_error || dev->power.disable_depth)
1263 dev->power.disable_depth++;
1267 spin_unlock_irqrestore(&dev->power.lock, flags);
1288 spin_lock_irqsave(&dev->power.lock, flags);
1290 if (dev->power.runtime_status == status || !parent)
1294 atomic_add_unless(&parent->power.child_count, -1, 0);
1295 notify_parent = !parent->power.ignore_children;
1297 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
1302 * 'power.ignore_children' flag unset.
1304 if (!parent->power.disable_depth &&
1305 !parent->power.ignore_children &&
1306 parent->power.runtime_status != RPM_ACTIVE) {
1311 } else if (dev->power.runtime_status == RPM_SUSPENDED) {
1312 atomic_inc(&parent->power.child_count);
1315 spin_unlock(&parent->power.lock);
1326 dev->power.runtime_error = 0;
1329 spin_unlock_irqrestore(&dev->power.lock, flags);
1355 * Should be called under dev->power.lock with interrupts disabled.
1361 if (dev->power.request_pending) {
1362 dev->power.request = RPM_REQ_NONE;
1363 spin_unlock_irq(&dev->power.lock);
1365 cancel_work_sync(&dev->power.work);
1367 spin_lock_irq(&dev->power.lock);
1368 dev->power.request_pending = false;
1371 if (dev->power.runtime_status == RPM_SUSPENDING ||
1372 dev->power.runtime_status == RPM_RESUMING ||
1373 dev->power.idle_notification) {
1378 prepare_to_wait(&dev->power.wait_queue, &wait,
1380 if (dev->power.runtime_status != RPM_SUSPENDING
1381 && dev->power.runtime_status != RPM_RESUMING
1382 && !dev->power.idle_notification)
1384 spin_unlock_irq(&dev->power.lock);
1388 spin_lock_irq(&dev->power.lock);
1390 finish_wait(&dev->power.wait_queue, &wait);
1413 spin_lock_irq(&dev->power.lock);
1415 if (dev->power.request_pending
1416 && dev->power.request == RPM_REQ_RESUME) {
1423 spin_unlock_irq(&dev->power.lock);
1435 * Increment power.disable_depth for the device and if it was zero previously,
1441 * __pm_runtime_disable() is called and power.disable_depth is zero, the
1446 spin_lock_irq(&dev->power.lock);
1448 if (dev->power.disable_depth > 0) {
1449 dev->power.disable_depth++;
1458 if (check_resume && dev->power.request_pending &&
1459 dev->power.request == RPM_REQ_RESUME) {
1474 if (!dev->power.disable_depth++) {
1476 dev->power.last_status = dev->power.runtime_status;
1480 spin_unlock_irq(&dev->power.lock);
1492 spin_lock_irqsave(&dev->power.lock, flags);
1494 if (!dev->power.disable_depth) {
1499 if (--dev->power.disable_depth > 0)
1502 dev->power.last_status = RPM_INVALID;
1503 dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
1505 if (dev->power.runtime_status == RPM_SUSPENDED &&
1506 !dev->power.ignore_children &&
1507 atomic_read(&dev->power.child_count) > 0)
1511 spin_unlock_irqrestore(&dev->power.lock, flags);
1541 * Increase the device's usage count and clear its power.runtime_auto flag,
1547 spin_lock_irq(&dev->power.lock);
1548 if (!dev->power.runtime_auto)
1551 dev->power.runtime_auto = false;
1552 atomic_inc(&dev->power.usage_count);
1556 spin_unlock_irq(&dev->power.lock);
1564 * Decrease the device's usage count and set its power.runtime_auto flag.
1570 spin_lock_irq(&dev->power.lock);
1571 if (dev->power.runtime_auto)
1574 dev->power.runtime_auto = true;
1582 spin_unlock_irq(&dev->power.lock);
1590 * Set the power.no_callbacks flag, which tells the PM core that this
1591 * device is power-managed through its parent and has no runtime PM
1596 spin_lock_irq(&dev->power.lock);
1597 dev->power.no_callbacks = 1;
1598 spin_unlock_irq(&dev->power.lock);
1608 * Set the power.irq_safe flag, which tells the PM core that the
1620 spin_lock_irq(&dev->power.lock);
1621 dev->power.irq_safe = 1;
1622 spin_unlock_irq(&dev->power.lock);
1635 * This function must be called under dev->power.lock with interrupts disabled.
1639 int delay = dev->power.autosuspend_delay;
1642 if (dev->power.use_autosuspend && delay < 0) {
1646 atomic_inc(&dev->power.usage_count);
1658 atomic_dec(&dev->power.usage_count);
1670 * Set the device's power.autosuspend_delay value. If it changes to negative
1671 * and the power.use_autosuspend flag is set, prevent runtime suspends. If it
1678 spin_lock_irq(&dev->power.lock);
1679 old_delay = dev->power.autosuspend_delay;
1680 old_use = dev->power.use_autosuspend;
1681 dev->power.autosuspend_delay = delay;
1683 spin_unlock_irq(&dev->power.lock);
1692 * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1699 spin_lock_irq(&dev->power.lock);
1700 old_delay = dev->power.autosuspend_delay;
1701 old_use = dev->power.use_autosuspend;
1702 dev->power.use_autosuspend = use;
1704 spin_unlock_irq(&dev->power.lock);
1714 dev->power.runtime_status = RPM_SUSPENDED;
1715 dev->power.last_status = RPM_INVALID;
1716 dev->power.idle_notification = false;
1718 dev->power.disable_depth = 1;
1719 atomic_set(&dev->power.usage_count, 0);
1721 dev->power.runtime_error = 0;
1723 atomic_set(&dev->power.child_count, 0);
1725 dev->power.runtime_auto = true;
1727 dev->power.request_pending = false;
1728 dev->power.request = RPM_REQ_NONE;
1729 dev->power.deferred_resume = false;
1730 dev->power.needs_force_resume = 0;
1731 INIT_WORK(&dev->power.work, pm_runtime_work);
1733 dev->power.timer_expires = 0;
1734 hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1735 dev->power.suspend_timer.function = pm_suspend_timer_fn;
1737 init_waitqueue_head(&dev->power.wait_queue);
1747 if (dev->power.runtime_status == RPM_ACTIVE)
1749 if (dev->power.irq_safe) {
1750 spin_lock_irq(&dev->power.lock);
1751 dev->power.irq_safe = 0;
1752 spin_unlock_irq(&dev->power.lock);
1813 spin_lock_irq(&dev->power.lock);
1814 dev->power.links_count++;
1815 spin_unlock_irq(&dev->power.lock);
1820 spin_lock_irq(&dev->power.lock);
1821 WARN_ON(dev->power.links_count == 0);
1822 dev->power.links_count--;
1823 spin_unlock_irq(&dev->power.lock);
1846 return atomic_read(&dev->power.usage_count) <= 1 &&
1847 (atomic_read(&dev->power.child_count) == 0 ||
1848 dev->power.ignore_children);
1864 * sure the device is put into low power state and it should only be used during
1900 dev->power.needs_force_resume = 1;
1917 * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
1918 * those actions and bring the device into full power, if it is expected to be
1929 if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume)
1950 dev->power.needs_force_resume = 0;