Lines Matching refs:genpd
30 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
35 __routine = genpd->dev_ops.callback; \
46 void (*lock)(struct generic_pm_domain *genpd);
47 void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
48 int (*lock_interruptible)(struct generic_pm_domain *genpd);
49 void (*unlock)(struct generic_pm_domain *genpd);
52 static void genpd_lock_mtx(struct generic_pm_domain *genpd)
54 mutex_lock(&genpd->mlock);
57 static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
60 mutex_lock_nested(&genpd->mlock, depth);
63 static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
65 return mutex_lock_interruptible(&genpd->mlock);
68 static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
70 return mutex_unlock(&genpd->mlock);
80 static void genpd_lock_spin(struct generic_pm_domain *genpd)
81 __acquires(&genpd->slock)
85 spin_lock_irqsave(&genpd->slock, flags);
86 genpd->lock_flags = flags;
89 static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
91 __acquires(&genpd->slock)
95 spin_lock_irqsave_nested(&genpd->slock, flags, depth);
96 genpd->lock_flags = flags;
99 static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
100 __acquires(&genpd->slock)
104 spin_lock_irqsave(&genpd->slock, flags);
105 genpd->lock_flags = flags;
109 static void genpd_unlock_spin(struct generic_pm_domain *genpd)
110 __releases(&genpd->slock)
112 spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
127 #define genpd_status_on(genpd) (genpd->status == GENPD_STATE_ON)
128 #define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE)
129 #define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON)
130 #define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
131 #define genpd_is_cpu_domain(genpd) (genpd->flags & GENPD_FLAG_CPU_DOMAIN)
132 #define genpd_is_rpm_always_on(genpd) (genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON)
135 const struct generic_pm_domain *genpd)
139 ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
146 if (genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd))
151 genpd->name);
169 /* A genpd's always have its ->runtime_suspend() callback assigned. */
178 * attached to the device is a genpd domain.
188 static int genpd_stop_dev(const struct generic_pm_domain *genpd,
191 return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
194 static int genpd_start_dev(const struct generic_pm_domain *genpd,
197 return GENPD_DEV_CALLBACK(genpd, int, start, dev);
200 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
204 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
205 ret = !!atomic_dec_and_test(&genpd->sd_count);
210 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
212 atomic_inc(&genpd->sd_count);
219 static void genpd_debug_add(struct generic_pm_domain *genpd);
221 static void genpd_debug_remove(struct generic_pm_domain *genpd)
226 debugfs_lookup_and_remove(genpd->name, genpd_debugfs_dir);
229 static void genpd_update_accounting(struct generic_pm_domain *genpd)
234 if (now <= genpd->accounting_time)
237 delta = now - genpd->accounting_time;
240 * If genpd->status is active, it means we are just
244 if (genpd->status == GENPD_STATE_ON)
245 genpd->states[genpd->state_idx].idle_time += delta;
247 genpd->on_time += delta;
249 genpd->accounting_time = now;
252 static inline void genpd_debug_add(struct generic_pm_domain *genpd) {}
253 static inline void genpd_debug_remove(struct generic_pm_domain *genpd) {}
254 static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
257 static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
265 if (state == genpd->performance_state)
269 if (state > genpd->performance_state)
273 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
283 * field is protected by the parent genpd->lock, which is already taken.
294 list_for_each_entry(link, &genpd->parent_links, parent_node) {
302 static int genpd_xlate_performance_state(struct generic_pm_domain *genpd,
309 return dev_pm_opp_xlate_performance_state(genpd->opp_table,
314 static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
321 if (state == genpd->performance_state)
324 /* Propagate to parents of genpd */
325 list_for_each_entry(link, &genpd->child_links, child_node) {
329 ret = genpd_xlate_performance_state(genpd, parent, state);
351 if (genpd->set_performance_state) {
352 ret = genpd->set_performance_state(genpd, state);
357 genpd->performance_state = state;
362 list_for_each_entry_continue_reverse(link, &genpd->child_links,
386 struct generic_pm_domain *genpd = dev_to_genpd(dev);
396 state = _genpd_reeval_performance_state(genpd, state);
398 ret = _genpd_set_performance_state(genpd, state, 0);
430 * performance state of the genpd).
432 * It is assumed that the users guarantee that the genpd wouldn't be detached
439 struct generic_pm_domain *genpd;
442 genpd = dev_to_genpd_safe(dev);
443 if (!genpd)
450 genpd_lock(genpd);
458 genpd_unlock(genpd);
472 * guarantee that the genpd wouldn't be detached while this routine is getting
481 struct generic_pm_domain *genpd;
484 genpd = dev_to_genpd_safe(dev);
485 if (!genpd)
495 * dev_pm_genpd_get_next_hrtimer - Return the next_hrtimer for the genpd
496 * @dev: A device that is attached to the genpd.
501 * Returns the aggregated value of the genpd's next hrtimer or KTIME_MAX if no
506 struct generic_pm_domain *genpd;
508 genpd = dev_to_genpd_safe(dev);
509 if (!genpd)
512 if (genpd->gd)
513 return genpd->gd->next_hrtimer;
522 * @dev: A device that is attached to the genpd.
524 * Allows a consumer of the genpd to notify the provider that the next power off
527 * It is assumed that the users guarantee that the genpd wouldn't be detached
532 struct generic_pm_domain *genpd;
534 genpd = dev_to_genpd_safe(dev);
535 if (!genpd)
538 genpd_lock(genpd);
539 genpd->synced_poweroff = true;
540 genpd_unlock(genpd);
544 static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
546 unsigned int state_idx = genpd->state_idx;
552 ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
559 if (!genpd->power_on)
562 timed = timed && genpd->gd && !genpd->states[state_idx].fwnode;
564 ret = genpd->power_on(genpd);
572 ret = genpd->power_on(genpd);
577 if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
580 genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
581 genpd->gd->max_off_time_changed = true;
583 genpd->name, "on", elapsed_ns);
586 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
587 genpd->synced_poweroff = false;
590 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
595 static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
597 unsigned int state_idx = genpd->state_idx;
603 ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
610 if (!genpd->power_off)
613 timed = timed && genpd->gd && !genpd->states[state_idx].fwnode;
615 ret = genpd->power_off(genpd);
623 ret = genpd->power_off(genpd);
628 if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
631 genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
632 genpd->gd->max_off_time_changed = true;
634 genpd->name, "off", elapsed_ns);
637 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
641 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
647 * @genpd: PM domain to power off.
652 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
654 queue_work(pm_wq, &genpd->power_off_work);
659 * @genpd: PM domain to power down.
660 * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
666 * If all of the @genpd's devices have been suspended and all of its subdomains
667 * have been powered down, remove power from @genpd.
669 static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
682 if (!genpd_status_on(genpd) || genpd->prepared_count > 0)
690 if (genpd_is_always_on(genpd) ||
691 genpd_is_rpm_always_on(genpd) ||
692 atomic_read(&genpd->sd_count) > 0)
701 list_for_each_entry(link, &genpd->parent_links, parent_node) {
707 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
713 irq_safe_dev_in_sleep_domain(pdd->dev, genpd))
720 if (genpd->gov && genpd->gov->power_down_ok) {
721 if (!genpd->gov->power_down_ok(&genpd->domain))
726 if (!genpd->gov)
727 genpd->state_idx = 0;
730 if (atomic_read(&genpd->sd_count) > 0)
733 ret = _genpd_power_off(genpd, true);
735 genpd->states[genpd->state_idx].rejected++;
739 genpd->status = GENPD_STATE_OFF;
740 genpd_update_accounting(genpd);
741 genpd->states[genpd->state_idx].usage++;
743 list_for_each_entry(link, &genpd->child_links, child_node) {
755 * @genpd: PM domain to power up.
758 * Restore power to @genpd and all of its parents so that it is possible to
761 static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
766 if (genpd_status_on(genpd))
774 list_for_each_entry(link, &genpd->child_links, child_node) {
789 ret = _genpd_power_on(genpd, true);
793 genpd->status = GENPD_STATE_ON;
794 genpd_update_accounting(genpd);
800 &genpd->child_links,
813 struct generic_pm_domain *genpd = dev_to_genpd(dev);
815 return genpd_start_dev(genpd, dev);
828 struct generic_pm_domain *genpd = ERR_PTR(-ENODATA);
840 genpd = dev_to_genpd(dev);
846 if (!IS_ERR(genpd)) {
847 genpd_lock(genpd);
848 genpd->gd->max_off_time_changed = true;
849 genpd_unlock(genpd);
866 struct generic_pm_domain *genpd;
868 genpd = container_of(work, struct generic_pm_domain, power_off_work);
870 genpd_lock(genpd);
871 genpd_power_off(genpd, false, 0);
872 genpd_unlock(genpd);
931 struct generic_pm_domain *genpd;
942 genpd = dev_to_genpd(dev);
943 if (IS_ERR(genpd))
952 suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
964 ret = genpd_stop_dev(genpd, dev);
977 genpd->gd->max_off_time_changed = true;
986 if (irq_safe_dev_in_sleep_domain(dev, genpd))
989 genpd_lock(genpd);
990 genpd_power_off(genpd, true, 0);
992 genpd_unlock(genpd);
1007 struct generic_pm_domain *genpd;
1017 genpd = dev_to_genpd(dev);
1018 if (IS_ERR(genpd))
1025 if (irq_safe_dev_in_sleep_domain(dev, genpd))
1028 genpd_lock(genpd);
1030 ret = genpd_power_on(genpd, 0);
1031 genpd_unlock(genpd);
1041 ret = genpd_start_dev(genpd, dev);
1056 genpd->gd->max_off_time_changed = true;
1064 genpd_stop_dev(genpd, dev);
1066 if (!pm_runtime_is_irq_safe(dev) || genpd_is_irq_safe(genpd)) {
1067 genpd_lock(genpd);
1068 genpd_power_off(genpd, true, 0);
1070 genpd_unlock(genpd);
1089 struct generic_pm_domain *genpd;
1092 pr_warn("genpd: Not disabling unused power domains\n");
1098 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
1099 genpd_queue_power_off_work(genpd);
1111 * @genpd: PM domain to power off, if possible.
1122 static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
1127 if (!genpd_status_on(genpd) || genpd_is_always_on(genpd))
1130 if (genpd->suspended_count != genpd->device_count
1131 || atomic_read(&genpd->sd_count) > 0)
1135 list_for_each_entry(link, &genpd->parent_links, parent_node) {
1142 genpd->state_idx = genpd->state_count - 1;
1143 if (_genpd_power_off(genpd, false))
1146 genpd->status = GENPD_STATE_OFF;
1148 list_for_each_entry(link, &genpd->child_links, child_node) {
1163 * @genpd: PM domain to power on.
1171 static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
1176 if (genpd_status_on(genpd))
1179 list_for_each_entry(link, &genpd->child_links, child_node) {
1191 _genpd_power_on(genpd, false);
1192 genpd->status = GENPD_STATE_ON;
1206 struct generic_pm_domain *genpd;
1211 genpd = dev_to_genpd(dev);
1212 if (IS_ERR(genpd))
1215 genpd_lock(genpd);
1217 if (genpd->prepared_count++ == 0)
1218 genpd->suspended_count = 0;
1220 genpd_unlock(genpd);
1224 genpd_lock(genpd);
1226 genpd->prepared_count--;
1228 genpd_unlock(genpd);
1231 /* Never return 1, as genpd don't cope with the direct_complete path. */
1249 struct generic_pm_domain *genpd;
1252 genpd = dev_to_genpd(dev);
1253 if (IS_ERR(genpd))
1260 if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd))
1263 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1265 ret = genpd_stop_dev(genpd, dev);
1272 genpd_lock(genpd);
1273 genpd->suspended_count++;
1274 genpd_sync_power_off(genpd, true, 0);
1275 genpd_unlock(genpd);
1306 struct generic_pm_domain *genpd;
1311 genpd = dev_to_genpd(dev);
1312 if (IS_ERR(genpd))
1315 if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd))
1318 genpd_lock(genpd);
1319 genpd_sync_power_on(genpd, true, 0);
1320 genpd->suspended_count--;
1321 genpd_unlock(genpd);
1323 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1325 ret = genpd_start_dev(genpd, dev);
1420 struct generic_pm_domain *genpd;
1424 genpd = dev_to_genpd(dev);
1425 if (IS_ERR(genpd))
1430 genpd_lock(genpd);
1432 genpd->prepared_count--;
1433 if (!genpd->prepared_count)
1434 genpd_queue_power_off_work(genpd);
1436 genpd_unlock(genpd);
1441 struct generic_pm_domain *genpd;
1444 genpd = dev_to_genpd_safe(dev);
1445 if (!genpd)
1448 use_lock = genpd_is_irq_safe(genpd);
1451 genpd_lock(genpd);
1454 genpd->suspended_count++;
1455 genpd_sync_power_off(genpd, use_lock, 0);
1457 genpd_sync_power_on(genpd, use_lock, 0);
1458 genpd->suspended_count--;
1462 genpd_unlock(genpd);
1466 * dev_pm_genpd_suspend - Synchronously try to suspend the genpd for @dev
1467 * @dev: The device that is attached to the genpd, that can be suspended.
1472 * genpd.
1481 * dev_pm_genpd_resume - Synchronously try to resume the genpd for @dev
1482 * @dev: The device that is attached to the genpd, which needs to be resumed.
1486 * to resume a corresponding CPU device that is attached to a genpd.
1577 static void genpd_update_cpumask(struct generic_pm_domain *genpd,
1582 if (!genpd_is_cpu_domain(genpd))
1585 list_for_each_entry(link, &genpd->child_links, child_node) {
1594 cpumask_set_cpu(cpu, genpd->cpus);
1596 cpumask_clear_cpu(cpu, genpd->cpus);
1599 static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu)
1602 genpd_update_cpumask(genpd, cpu, true, 0);
1605 static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu)
1608 genpd_update_cpumask(genpd, cpu, false, 0);
1611 static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev)
1615 if (!genpd_is_cpu_domain(genpd))
1626 static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1629 struct genpd_governor_data *gd = genpd->gd;
1639 gpd_data->cpu = genpd_get_cpu(genpd, base_dev);
1641 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1645 genpd_lock(genpd);
1647 genpd_set_cpumask(genpd, gpd_data->cpu);
1648 dev_pm_domain_set(dev, &genpd->domain);
1650 genpd->device_count++;
1654 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1656 genpd_unlock(genpd);
1669 * @genpd: PM domain to add the device to.
1672 int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
1676 if (!genpd || !dev)
1680 ret = genpd_add_device(genpd, dev, dev);
1687 static int genpd_remove_device(struct generic_pm_domain *genpd,
1701 genpd_lock(genpd);
1703 if (genpd->prepared_count > 0) {
1708 genpd->device_count--;
1709 if (genpd->gd)
1710 genpd->gd->max_off_time_changed = true;
1712 genpd_clear_cpumask(genpd, gpd_data->cpu);
1717 genpd_unlock(genpd);
1719 if (genpd->detach_dev)
1720 genpd->detach_dev(genpd, dev);
1727 genpd_unlock(genpd);
1739 struct generic_pm_domain *genpd = dev_to_genpd_safe(dev);
1741 if (!genpd)
1744 return genpd_remove_device(genpd, dev);
1749 * dev_pm_genpd_add_notifier - Add a genpd power on/off notifier for @dev
1754 * Users may call this function to add a genpd power on/off notifier for an
1756 * sent when genpd is powering on/off the PM domain.
1758 * It is assumed that the user guarantee that the genpd wouldn't be detached
1765 struct generic_pm_domain *genpd;
1769 genpd = dev_to_genpd_safe(dev);
1770 if (!genpd)
1781 genpd_lock(genpd);
1782 ret = raw_notifier_chain_register(&genpd->power_notifiers, nb);
1783 genpd_unlock(genpd);
1787 genpd->name);
1797 * dev_pm_genpd_remove_notifier - Remove a genpd power on/off notifier for @dev
1801 * Users may call this function to remove a genpd power on/off notifier for an
1804 * It is assumed that the user guarantee that the genpd wouldn't be detached
1811 struct generic_pm_domain *genpd;
1815 genpd = dev_to_genpd_safe(dev);
1816 if (!genpd)
1827 genpd_lock(genpd);
1828 ret = raw_notifier_chain_unregister(&genpd->power_notifiers,
1830 genpd_unlock(genpd);
1834 genpd->name);
1843 static int genpd_add_subdomain(struct generic_pm_domain *genpd,
1849 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1850 || genpd == subdomain)
1858 if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
1860 genpd->name, subdomain->name);
1869 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1871 if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) {
1876 list_for_each_entry(itr, &genpd->parent_links, parent_node) {
1877 if (itr->child == subdomain && itr->parent == genpd) {
1883 link->parent = genpd;
1884 list_add_tail(&link->parent_node, &genpd->parent_links);
1888 genpd_sd_counter_inc(genpd);
1891 genpd_unlock(genpd);
1900 * @genpd: Leader PM domain to add the subdomain to.
1903 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1909 ret = genpd_add_subdomain(genpd, subdomain);
1918 * @genpd: Leader PM domain to remove the subdomain from.
1921 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1927 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1931 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1935 genpd->name, subdomain->name);
1940 list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) {
1948 genpd_sd_counter_dec(genpd);
1955 genpd_unlock(genpd);
1968 static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
1976 genpd->states = state;
1977 genpd->state_count = 1;
1978 genpd->free_states = genpd_free_default_power_state;
1983 static int genpd_alloc_data(struct generic_pm_domain *genpd)
1988 if (genpd_is_cpu_domain(genpd) &&
1989 !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL))
1992 if (genpd->gov) {
2006 if (genpd->state_count == 0) {
2007 ret = genpd_set_default_power_state(genpd);
2012 genpd->gd = gd;
2016 if (genpd_is_cpu_domain(genpd))
2017 free_cpumask_var(genpd->cpus);
2022 static void genpd_free_data(struct generic_pm_domain *genpd)
2024 if (genpd_is_cpu_domain(genpd))
2025 free_cpumask_var(genpd->cpus);
2026 if (genpd->free_states)
2027 genpd->free_states(genpd->states, genpd->state_count);
2028 kfree(genpd->gd);
2031 static void genpd_lock_init(struct generic_pm_domain *genpd)
2033 if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
2034 spin_lock_init(&genpd->slock);
2035 genpd->lock_ops = &genpd_spin_ops;
2037 mutex_init(&genpd->mlock);
2038 genpd->lock_ops = &genpd_mtx_ops;
2044 * @genpd: PM domain object to initialize.
2050 int pm_genpd_init(struct generic_pm_domain *genpd,
2055 if (IS_ERR_OR_NULL(genpd))
2058 INIT_LIST_HEAD(&genpd->parent_links);
2059 INIT_LIST_HEAD(&genpd->child_links);
2060 INIT_LIST_HEAD(&genpd->dev_list);
2061 RAW_INIT_NOTIFIER_HEAD(&genpd->power_notifiers);
2062 genpd_lock_init(genpd);
2063 genpd->gov = gov;
2064 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
2065 atomic_set(&genpd->sd_count, 0);
2066 genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON;
2067 genpd->device_count = 0;
2068 genpd->provider = NULL;
2069 genpd->has_provider = false;
2070 genpd->accounting_time = ktime_get_mono_fast_ns();
2071 genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
2072 genpd->domain.ops.runtime_resume = genpd_runtime_resume;
2073 genpd->domain.ops.prepare = genpd_prepare;
2074 genpd->domain.ops.suspend_noirq = genpd_suspend_noirq;
2075 genpd->domain.ops.resume_noirq = genpd_resume_noirq;
2076 genpd->domain.ops.freeze_noirq = genpd_freeze_noirq;
2077 genpd->domain.ops.thaw_noirq = genpd_thaw_noirq;
2078 genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
2079 genpd->domain.ops.restore_noirq = genpd_restore_noirq;
2080 genpd->domain.ops.complete = genpd_complete;
2081 genpd->domain.start = genpd_dev_pm_start;
2083 if (genpd->flags & GENPD_FLAG_PM_CLK) {
2084 genpd->dev_ops.stop = pm_clk_suspend;
2085 genpd->dev_ops.start = pm_clk_resume;
2090 genpd->flags |= GENPD_FLAG_RPM_ALWAYS_ON;
2093 if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) &&
2094 !genpd_status_on(genpd)) {
2095 pr_err("always-on PM domain %s is not on\n", genpd->name);
2100 if (!gov && genpd->state_count > 1)
2101 pr_warn("%s: no governor for states\n", genpd->name);
2103 ret = genpd_alloc_data(genpd);
2107 device_initialize(&genpd->dev);
2108 dev_set_name(&genpd->dev, "%s", genpd->name);
2111 list_add(&genpd->gpd_list_node, &gpd_list);
2113 genpd_debug_add(genpd);
2119 static int genpd_remove(struct generic_pm_domain *genpd)
2123 if (IS_ERR_OR_NULL(genpd))
2126 genpd_lock(genpd);
2128 if (genpd->has_provider) {
2129 genpd_unlock(genpd);
2130 pr_err("Provider present, unable to remove %s\n", genpd->name);
2134 if (!list_empty(&genpd->parent_links) || genpd->device_count) {
2135 genpd_unlock(genpd);
2136 pr_err("%s: unable to remove %s\n", __func__, genpd->name);
2140 list_for_each_entry_safe(link, l, &genpd->child_links, child_node) {
2146 list_del(&genpd->gpd_list_node);
2147 genpd_unlock(genpd);
2148 genpd_debug_remove(genpd);
2149 cancel_work_sync(&genpd->power_off_work);
2150 genpd_free_data(genpd);
2152 pr_debug("%s: removed %s\n", __func__, genpd->name);
2159 * @genpd: Pointer to PM domain that is to be removed.
2170 int pm_genpd_remove(struct generic_pm_domain *genpd)
2175 ret = genpd_remove(genpd);
2295 static bool genpd_present(const struct generic_pm_domain *genpd)
2302 if (gpd == genpd) {
2315 * @genpd: Pointer to PM domain associated with the PM domain provider.
2318 struct generic_pm_domain *genpd)
2322 if (!np || !genpd)
2325 if (!genpd_present(genpd))
2328 genpd->dev.of_node = np;
2330 /* Parse genpd OPP table */
2331 if (genpd->set_performance_state) {
2332 ret = dev_pm_opp_of_add_table(&genpd->dev);
2334 return dev_err_probe(&genpd->dev, ret, "Failed to add OPP table\n");
2340 genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
2341 WARN_ON(IS_ERR(genpd->opp_table));
2344 ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
2346 if (genpd->set_performance_state) {
2347 dev_pm_opp_put_opp_table(genpd->opp_table);
2348 dev_pm_opp_of_remove_table(&genpd->dev);
2354 genpd->provider = &np->fwnode;
2355 genpd->has_provider = true;
2369 struct generic_pm_domain *genpd;
2380 genpd = data->domains[i];
2382 if (!genpd)
2384 if (!genpd_present(genpd))
2387 genpd->dev.of_node = np;
2389 /* Parse genpd OPP table */
2390 if (genpd->set_performance_state) {
2391 ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
2393 dev_err_probe(&genpd->dev, ret,
2402 genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
2403 WARN_ON(IS_ERR(genpd->opp_table));
2406 genpd->provider = &np->fwnode;
2407 genpd->has_provider = true;
2418 genpd = data->domains[i];
2420 if (!genpd)
2423 genpd->provider = NULL;
2424 genpd->has_provider = false;
2426 if (genpd->set_performance_state) {
2427 dev_pm_opp_put_opp_table(genpd->opp_table);
2428 dev_pm_opp_of_remove_table(&genpd->dev);
2492 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
2503 genpd = provider->xlate(genpdspec, provider->data);
2504 if (!IS_ERR(genpd))
2510 return genpd;
2523 struct generic_pm_domain *genpd;
2531 genpd = genpd_get_from_provider(genpdspec);
2532 if (IS_ERR(genpd)) {
2533 ret = PTR_ERR(genpd);
2537 ret = genpd_add_device(genpd, dev, dev);
2637 struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
2647 genpd = ret ? ERR_PTR(ret) : gpd;
2653 return genpd;
2664 .name = "genpd",
2711 /* Unregister the device if it was created by genpd. */
2861 /* Allocate and register device on the genpd bus. */
2866 dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev));
2991 * of_genpd_parse_idle_states: Return array of idle states for the genpd.
2993 * @dn: The genpd device node
3036 * pm_genpd_opp_to_performance_state - Gets performance state of the genpd from its OPP node.
3042 * Returns performance state encoded in the OPP of the genpd. This calls
3043 * platform specific genpd->opp_to_performance_state() callback to translate
3051 struct generic_pm_domain *genpd = NULL;
3054 genpd = container_of(genpd_dev, struct generic_pm_domain, dev);
3056 if (unlikely(!genpd->opp_to_performance_state))
3059 genpd_lock(genpd);
3060 state = genpd->opp_to_performance_state(genpd, opp);
3061 genpd_unlock(genpd);
3114 struct generic_pm_domain *genpd)
3126 ret = genpd_lock_interruptible(genpd);
3130 if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
3132 if (!genpd_status_on(genpd))
3134 status_lookup[genpd->status], genpd->state_idx);
3137 status_lookup[genpd->status]);
3138 seq_printf(s, "%-30s %-50s %u", genpd->name, state, genpd->performance_state);
3143 * Also genpd->name is immutable.
3145 list_for_each_entry(link, &genpd->parent_links, parent_node) {
3146 if (list_is_first(&link->parent_node, &genpd->parent_links))
3149 if (!list_is_last(&link->parent_node, &genpd->parent_links))
3153 list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
3155 genpd_is_irq_safe(genpd) ?
3168 genpd_unlock(genpd);
3175 struct generic_pm_domain *genpd;
3186 list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
3187 ret = genpd_summary_one(s, genpd);
3203 struct generic_pm_domain *genpd = s->private;
3206 ret = genpd_lock_interruptible(genpd);
3210 if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
3213 if (genpd->status == GENPD_STATE_OFF)
3214 seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
3215 genpd->state_idx);
3217 seq_printf(s, "%s\n", status_lookup[genpd->status]);
3219 genpd_unlock(genpd);
3225 struct generic_pm_domain *genpd = s->private;
3229 ret = genpd_lock_interruptible(genpd);
3233 list_for_each_entry(link, &genpd->parent_links, parent_node)
3236 genpd_unlock(genpd);
3242 struct generic_pm_domain *genpd = s->private;
3247 ret = genpd_lock_interruptible(genpd);
3253 for (i = 0; i < genpd->state_count; i++) {
3254 idle_time += genpd->states[i].idle_time;
3256 if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) {
3258 if (now > genpd->accounting_time) {
3259 delta = now - genpd->accounting_time;
3266 genpd->states[i].usage, genpd->states[i].rejected);
3269 genpd_unlock(genpd);
3275 struct generic_pm_domain *genpd = s->private;
3279 ret = genpd_lock_interruptible(genpd);
3283 if (genpd->status == GENPD_STATE_ON) {
3285 if (now > genpd->accounting_time)
3286 delta = now - genpd->accounting_time;
3289 on_time = genpd->on_time + delta;
3293 genpd_unlock(genpd);
3299 struct generic_pm_domain *genpd = s->private;
3304 ret = genpd_lock_interruptible(genpd);
3308 for (i = 0; i < genpd->state_count; i++) {
3309 total += genpd->states[i].idle_time;
3311 if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) {
3313 if (now > genpd->accounting_time) {
3314 delta = now - genpd->accounting_time;
3323 genpd_unlock(genpd);
3330 struct generic_pm_domain *genpd = s->private;
3335 ret = genpd_lock_interruptible(genpd);
3339 list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
3341 genpd_is_irq_safe(genpd) ?
3350 genpd_unlock(genpd);
3356 struct generic_pm_domain *genpd = s->private;
3358 if (genpd_lock_interruptible(genpd))
3361 seq_printf(s, "%u\n", genpd->performance_state);
3363 genpd_unlock(genpd);
3376 static void genpd_debug_add(struct generic_pm_domain *genpd)
3383 d = debugfs_create_dir(genpd->name, genpd_debugfs_dir);
3386 d, genpd, &status_fops);
3388 d, genpd, &sub_domains_fops);
3390 d, genpd, &idle_states_fops);
3392 d, genpd, &active_time_fops);
3394 d, genpd, &total_idle_time_fops);
3396 d, genpd, &devices_fops);
3397 if (genpd->set_performance_state)
3399 d, genpd, &perf_state_fops);
3404 struct generic_pm_domain *genpd;
3411 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
3412 genpd_debug_add(genpd);