Lines Matching refs:genpd
29 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
34 __routine = genpd->dev_ops.callback; \
45 void (*lock)(struct generic_pm_domain *genpd);
46 void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
47 int (*lock_interruptible)(struct generic_pm_domain *genpd);
48 void (*unlock)(struct generic_pm_domain *genpd);
51 static void genpd_lock_mtx(struct generic_pm_domain *genpd)
53 mutex_lock(&genpd->mlock);
56 static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
59 mutex_lock_nested(&genpd->mlock, depth);
62 static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
64 return mutex_lock_interruptible(&genpd->mlock);
67 static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
69 return mutex_unlock(&genpd->mlock);
79 static void genpd_lock_spin(struct generic_pm_domain *genpd)
80 __acquires(&genpd->slock)
84 spin_lock_irqsave(&genpd->slock, flags);
85 genpd->lock_flags = flags;
88 static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
90 __acquires(&genpd->slock)
94 spin_lock_irqsave_nested(&genpd->slock, flags, depth);
95 genpd->lock_flags = flags;
98 static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
99 __acquires(&genpd->slock)
103 spin_lock_irqsave(&genpd->slock, flags);
104 genpd->lock_flags = flags;
108 static void genpd_unlock_spin(struct generic_pm_domain *genpd)
109 __releases(&genpd->slock)
111 spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
126 #define genpd_status_on(genpd) (genpd->status == GENPD_STATE_ON)
127 #define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE)
128 #define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON)
129 #define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
130 #define genpd_is_cpu_domain(genpd) (genpd->flags & GENPD_FLAG_CPU_DOMAIN)
131 #define genpd_is_rpm_always_on(genpd) (genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON)
134 const struct generic_pm_domain *genpd)
138 ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
145 if (ret && !genpd_is_always_on(genpd))
147 genpd->name);
165 /* A genpd's always have its ->runtime_suspend() callback assigned. */
174 * attached to the device is a genpd domain.
184 static int genpd_stop_dev(const struct generic_pm_domain *genpd,
187 return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
190 static int genpd_start_dev(const struct generic_pm_domain *genpd,
193 return GENPD_DEV_CALLBACK(genpd, int, start, dev);
196 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
200 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
201 ret = !!atomic_dec_and_test(&genpd->sd_count);
206 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
208 atomic_inc(&genpd->sd_count);
213 static void genpd_update_accounting(struct generic_pm_domain *genpd)
218 delta = ktime_sub(now, genpd->accounting_time);
221 * If genpd->status is active, it means we are just
225 if (genpd->status == GENPD_STATE_ON) {
226 int state_idx = genpd->state_idx;
228 genpd->states[state_idx].idle_time =
229 ktime_add(genpd->states[state_idx].idle_time, delta);
231 genpd->on_time = ktime_add(genpd->on_time, delta);
234 genpd->accounting_time = now;
237 static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
240 static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
248 if (state == genpd->performance_state)
252 if (state > genpd->performance_state)
256 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
266 * field is protected by the parent genpd->lock, which is already taken.
277 list_for_each_entry(link, &genpd->parent_links, parent_node) {
285 static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
292 if (state == genpd->performance_state)
295 /* Propagate to parents of genpd */
296 list_for_each_entry(link, &genpd->child_links, child_node) {
303 ret = dev_pm_opp_xlate_performance_state(genpd->opp_table,
327 ret = genpd->set_performance_state(genpd, state);
331 genpd->performance_state = state;
336 list_for_each_entry_continue_reverse(link, &genpd->child_links,
369 * performance state of the genpd).
371 * It is assumed that the users guarantee that the genpd wouldn't be detached
378 struct generic_pm_domain *genpd;
383 genpd = dev_to_genpd_safe(dev);
384 if (!genpd)
387 if (unlikely(!genpd->set_performance_state))
394 genpd_lock(genpd);
400 state = _genpd_reeval_performance_state(genpd, state);
401 ret = _genpd_set_performance_state(genpd, state, 0);
405 genpd_unlock(genpd);
411 static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
413 unsigned int state_idx = genpd->state_idx;
419 ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
426 if (!genpd->power_on)
430 ret = genpd->power_on(genpd);
438 ret = genpd->power_on(genpd);
443 if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
446 genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
447 genpd->max_off_time_changed = true;
449 genpd->name, "on", elapsed_ns);
452 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
455 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
460 static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
462 unsigned int state_idx = genpd->state_idx;
468 ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
475 if (!genpd->power_off)
479 ret = genpd->power_off(genpd);
487 ret = genpd->power_off(genpd);
492 if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
495 genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
496 genpd->max_off_time_changed = true;
498 genpd->name, "off", elapsed_ns);
501 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
505 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
511 * @genpd: PM domain to power off.
516 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
518 queue_work(pm_wq, &genpd->power_off_work);
523 * @genpd: PM domain to power down.
524 * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
529 * If all of the @genpd's devices have been suspended and all of its subdomains
530 * have been powered down, remove power from @genpd.
532 static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
545 if (!genpd_status_on(genpd) || genpd->prepared_count > 0)
553 if (genpd_is_always_on(genpd) ||
554 genpd_is_rpm_always_on(genpd) ||
555 atomic_read(&genpd->sd_count) > 0)
558 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
570 irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd))
577 if (genpd->gov && genpd->gov->power_down_ok) {
578 if (!genpd->gov->power_down_ok(&genpd->domain))
583 if (!genpd->gov)
584 genpd->state_idx = 0;
587 if (atomic_read(&genpd->sd_count) > 0)
590 ret = _genpd_power_off(genpd, true);
592 genpd->states[genpd->state_idx].rejected++;
596 genpd->status = GENPD_STATE_OFF;
597 genpd_update_accounting(genpd);
598 genpd->states[genpd->state_idx].usage++;
600 list_for_each_entry(link, &genpd->child_links, child_node) {
612 * @genpd: PM domain to power up.
615 * Restore power to @genpd and all of its parents so that it is possible to
618 static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
623 if (genpd_status_on(genpd))
631 list_for_each_entry(link, &genpd->child_links, child_node) {
646 ret = _genpd_power_on(genpd, true);
650 genpd->status = GENPD_STATE_ON;
651 genpd_update_accounting(genpd);
657 &genpd->child_links,
670 struct generic_pm_domain *genpd = dev_to_genpd(dev);
672 return genpd_start_dev(genpd, dev);
685 struct generic_pm_domain *genpd;
694 genpd = dev_to_genpd(dev);
696 genpd = ERR_PTR(-ENODATA);
701 if (!IS_ERR(genpd)) {
702 genpd_lock(genpd);
703 genpd->max_off_time_changed = true;
704 genpd_unlock(genpd);
721 struct generic_pm_domain *genpd;
723 genpd = container_of(work, struct generic_pm_domain, power_off_work);
725 genpd_lock(genpd);
726 genpd_power_off(genpd, false, 0);
727 genpd_unlock(genpd);
786 struct generic_pm_domain *genpd;
796 genpd = dev_to_genpd(dev);
797 if (IS_ERR(genpd))
806 suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
819 ret = genpd_stop_dev(genpd, dev);
832 genpd->max_off_time_changed = true;
841 if (irq_safe_dev_in_no_sleep_domain(dev, genpd))
844 genpd_lock(genpd);
845 genpd_power_off(genpd, true, 0);
846 genpd_unlock(genpd);
861 struct generic_pm_domain *genpd;
871 genpd = dev_to_genpd(dev);
872 if (IS_ERR(genpd))
879 if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) {
884 genpd_lock(genpd);
885 ret = genpd_power_on(genpd, 0);
886 genpd_unlock(genpd);
897 ret = genpd_start_dev(genpd, dev);
912 genpd->max_off_time_changed = true;
920 genpd_stop_dev(genpd, dev);
923 (pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) {
924 genpd_lock(genpd);
925 genpd_power_off(genpd, true, 0);
926 genpd_unlock(genpd);
945 struct generic_pm_domain *genpd;
948 pr_warn("genpd: Not disabling unused power domains\n");
954 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
955 genpd_queue_power_off_work(genpd);
967 * @genpd: PM domain to power off, if possible.
978 static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
983 if (!genpd_status_on(genpd) || genpd_is_always_on(genpd))
986 if (genpd->suspended_count != genpd->device_count
987 || atomic_read(&genpd->sd_count) > 0)
991 genpd->state_idx = genpd->state_count - 1;
992 if (_genpd_power_off(genpd, false))
995 genpd->status = GENPD_STATE_OFF;
997 list_for_each_entry(link, &genpd->child_links, child_node) {
1012 * @genpd: PM domain to power on.
1020 static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
1025 if (genpd_status_on(genpd))
1028 list_for_each_entry(link, &genpd->child_links, child_node) {
1040 _genpd_power_on(genpd, false);
1041 genpd->status = GENPD_STATE_ON;
1047 * @genpd: PM domain the device belongs to.
1061 const struct generic_pm_domain *genpd)
1068 active_wakeup = genpd_is_active_wakeup(genpd);
1083 struct generic_pm_domain *genpd;
1088 genpd = dev_to_genpd(dev);
1089 if (IS_ERR(genpd))
1097 if (resume_needed(dev, genpd))
1100 genpd_lock(genpd);
1102 if (genpd->prepared_count++ == 0)
1103 genpd->suspended_count = 0;
1105 genpd_unlock(genpd);
1109 genpd_lock(genpd);
1111 genpd->prepared_count--;
1113 genpd_unlock(genpd);
1116 /* Never return 1, as genpd don't cope with the direct_complete path. */
1131 struct generic_pm_domain *genpd;
1134 genpd = dev_to_genpd(dev);
1135 if (IS_ERR(genpd))
1145 if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
1148 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1150 ret = genpd_stop_dev(genpd, dev);
1160 genpd_lock(genpd);
1161 genpd->suspended_count++;
1162 genpd_sync_power_off(genpd, true, 0);
1163 genpd_unlock(genpd);
1190 struct generic_pm_domain *genpd;
1195 genpd = dev_to_genpd(dev);
1196 if (IS_ERR(genpd))
1199 if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
1202 genpd_lock(genpd);
1203 genpd_sync_power_on(genpd, true, 0);
1204 genpd->suspended_count--;
1205 genpd_unlock(genpd);
1207 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1209 ret = genpd_start_dev(genpd, dev);
1228 const struct generic_pm_domain *genpd;
1233 genpd = dev_to_genpd(dev);
1234 if (IS_ERR(genpd))
1241 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1243 ret = genpd_stop_dev(genpd, dev);
1257 const struct generic_pm_domain *genpd;
1262 genpd = dev_to_genpd(dev);
1263 if (IS_ERR(genpd))
1266 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1268 ret = genpd_start_dev(genpd, dev);
1300 struct generic_pm_domain *genpd;
1305 genpd = dev_to_genpd(dev);
1306 if (IS_ERR(genpd))
1313 genpd_lock(genpd);
1314 if (genpd->suspended_count++ == 0) {
1320 genpd->status = GENPD_STATE_OFF;
1323 genpd_sync_power_on(genpd, true, 0);
1324 genpd_unlock(genpd);
1326 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1328 ret = genpd_start_dev(genpd, dev);
1347 struct generic_pm_domain *genpd;
1351 genpd = dev_to_genpd(dev);
1352 if (IS_ERR(genpd))
1357 genpd_lock(genpd);
1359 genpd->prepared_count--;
1360 if (!genpd->prepared_count)
1361 genpd_queue_power_off_work(genpd);
1363 genpd_unlock(genpd);
1375 struct generic_pm_domain *genpd;
1377 genpd = dev_to_genpd_safe(dev);
1378 if (!genpd)
1382 genpd->suspended_count++;
1383 genpd_sync_power_off(genpd, false, 0);
1385 genpd_sync_power_on(genpd, false, 0);
1386 genpd->suspended_count--;
1469 static void genpd_update_cpumask(struct generic_pm_domain *genpd,
1474 if (!genpd_is_cpu_domain(genpd))
1477 list_for_each_entry(link, &genpd->child_links, child_node) {
1486 cpumask_set_cpu(cpu, genpd->cpus);
1488 cpumask_clear_cpu(cpu, genpd->cpus);
1491 static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu)
1494 genpd_update_cpumask(genpd, cpu, true, 0);
1497 static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu)
1500 genpd_update_cpumask(genpd, cpu, false, 0);
1503 static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev)
1507 if (!genpd_is_cpu_domain(genpd))
1518 static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1526 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1533 gpd_data->cpu = genpd_get_cpu(genpd, base_dev);
1535 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1539 genpd_lock(genpd);
1541 genpd_set_cpumask(genpd, gpd_data->cpu);
1542 dev_pm_domain_set(dev, &genpd->domain);
1544 genpd->device_count++;
1545 genpd->max_off_time_changed = true;
1547 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1549 genpd_unlock(genpd);
1562 * @genpd: PM domain to add the device to.
1565 int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
1570 ret = genpd_add_device(genpd, dev, dev);
1577 static int genpd_remove_device(struct generic_pm_domain *genpd,
1591 genpd_lock(genpd);
1593 if (genpd->prepared_count > 0) {
1598 genpd->device_count--;
1599 genpd->max_off_time_changed = true;
1601 genpd_clear_cpumask(genpd, gpd_data->cpu);
1606 genpd_unlock(genpd);
1608 if (genpd->detach_dev)
1609 genpd->detach_dev(genpd, dev);
1616 genpd_unlock(genpd);
1628 struct generic_pm_domain *genpd = dev_to_genpd_safe(dev);
1630 if (!genpd)
1633 return genpd_remove_device(genpd, dev);
1638 * dev_pm_genpd_add_notifier - Add a genpd power on/off notifier for @dev
1643 * Users may call this function to add a genpd power on/off notifier for an
1645 * sent when genpd is powering on/off the PM domain.
1647 * It is assumed that the user guarantee that the genpd wouldn't be detached
1654 struct generic_pm_domain *genpd;
1658 genpd = dev_to_genpd_safe(dev);
1659 if (!genpd)
1670 genpd_lock(genpd);
1671 ret = raw_notifier_chain_register(&genpd->power_notifiers, nb);
1672 genpd_unlock(genpd);
1676 genpd->name);
1686 * dev_pm_genpd_remove_notifier - Remove a genpd power on/off notifier for @dev
1690 * Users may call this function to remove a genpd power on/off notifier for an
1693 * It is assumed that the user guarantee that the genpd wouldn't be detached
1700 struct generic_pm_domain *genpd;
1704 genpd = dev_to_genpd_safe(dev);
1705 if (!genpd)
1716 genpd_lock(genpd);
1717 ret = raw_notifier_chain_unregister(&genpd->power_notifiers,
1719 genpd_unlock(genpd);
1723 genpd->name);
1732 static int genpd_add_subdomain(struct generic_pm_domain *genpd,
1738 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1739 || genpd == subdomain)
1747 if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
1749 genpd->name, subdomain->name);
1758 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1760 if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) {
1765 list_for_each_entry(itr, &genpd->parent_links, parent_node) {
1766 if (itr->child == subdomain && itr->parent == genpd) {
1772 link->parent = genpd;
1773 list_add_tail(&link->parent_node, &genpd->parent_links);
1777 genpd_sd_counter_inc(genpd);
1780 genpd_unlock(genpd);
1789 * @genpd: Leader PM domain to add the subdomain to.
1792 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1798 ret = genpd_add_subdomain(genpd, subdomain);
1807 * @genpd: Leader PM domain to remove the subdomain from.
1810 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1816 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1820 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1824 genpd->name, subdomain->name);
1829 list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) {
1837 genpd_sd_counter_dec(genpd);
1844 genpd_unlock(genpd);
1857 static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
1865 genpd->states = state;
1866 genpd->state_count = 1;
1867 genpd->free_states = genpd_free_default_power_state;
1872 static void genpd_lock_init(struct generic_pm_domain *genpd)
1874 if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
1875 spin_lock_init(&genpd->slock);
1876 genpd->lock_ops = &genpd_spin_ops;
1878 mutex_init(&genpd->mlock);
1879 genpd->lock_ops = &genpd_mtx_ops;
1885 * @genpd: PM domain object to initialize.
1891 int pm_genpd_init(struct generic_pm_domain *genpd,
1896 if (IS_ERR_OR_NULL(genpd))
1899 INIT_LIST_HEAD(&genpd->parent_links);
1900 INIT_LIST_HEAD(&genpd->child_links);
1901 INIT_LIST_HEAD(&genpd->dev_list);
1902 RAW_INIT_NOTIFIER_HEAD(&genpd->power_notifiers);
1903 genpd_lock_init(genpd);
1904 genpd->gov = gov;
1905 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1906 atomic_set(&genpd->sd_count, 0);
1907 genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON;
1908 genpd->device_count = 0;
1909 genpd->max_off_time_ns = -1;
1910 genpd->max_off_time_changed = true;
1911 genpd->provider = NULL;
1912 genpd->has_provider = false;
1913 genpd->accounting_time = ktime_get();
1914 genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
1915 genpd->domain.ops.runtime_resume = genpd_runtime_resume;
1916 genpd->domain.ops.prepare = genpd_prepare;
1917 genpd->domain.ops.suspend_noirq = genpd_suspend_noirq;
1918 genpd->domain.ops.resume_noirq = genpd_resume_noirq;
1919 genpd->domain.ops.freeze_noirq = genpd_freeze_noirq;
1920 genpd->domain.ops.thaw_noirq = genpd_thaw_noirq;
1921 genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
1922 genpd->domain.ops.restore_noirq = genpd_restore_noirq;
1923 genpd->domain.ops.complete = genpd_complete;
1924 genpd->domain.start = genpd_dev_pm_start;
1926 if (genpd->flags & GENPD_FLAG_PM_CLK) {
1927 genpd->dev_ops.stop = pm_clk_suspend;
1928 genpd->dev_ops.start = pm_clk_resume;
1932 if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) &&
1933 !genpd_status_on(genpd))
1936 if (genpd_is_cpu_domain(genpd) &&
1937 !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL))
1941 if (genpd->state_count == 0) {
1942 ret = genpd_set_default_power_state(genpd);
1944 if (genpd_is_cpu_domain(genpd))
1945 free_cpumask_var(genpd->cpus);
1948 } else if (!gov && genpd->state_count > 1) {
1949 pr_warn("%s: no governor for states\n", genpd->name);
1952 device_initialize(&genpd->dev);
1953 dev_set_name(&genpd->dev, "%s", genpd->name);
1956 list_add(&genpd->gpd_list_node, &gpd_list);
1963 static int genpd_remove(struct generic_pm_domain *genpd)
1967 if (IS_ERR_OR_NULL(genpd))
1970 genpd_lock(genpd);
1972 if (genpd->has_provider) {
1973 genpd_unlock(genpd);
1974 pr_err("Provider present, unable to remove %s\n", genpd->name);
1978 if (!list_empty(&genpd->parent_links) || genpd->device_count) {
1979 genpd_unlock(genpd);
1980 pr_err("%s: unable to remove %s\n", __func__, genpd->name);
1984 list_for_each_entry_safe(link, l, &genpd->child_links, child_node) {
1990 list_del(&genpd->gpd_list_node);
1991 genpd_unlock(genpd);
1992 cancel_work_sync(&genpd->power_off_work);
1993 if (genpd_is_cpu_domain(genpd))
1994 free_cpumask_var(genpd->cpus);
1995 if (genpd->free_states)
1996 genpd->free_states(genpd->states, genpd->state_count);
1998 pr_debug("%s: removed %s\n", __func__, genpd->name);
2005 * @genpd: Pointer to PM domain that is to be removed.
2016 int pm_genpd_remove(struct generic_pm_domain *genpd)
2021 ret = genpd_remove(genpd);
2140 static bool genpd_present(const struct generic_pm_domain *genpd)
2145 if (gpd == genpd)
2153 * @genpd: Pointer to PM domain associated with the PM domain provider.
2156 struct generic_pm_domain *genpd)
2160 if (!np || !genpd)
2165 if (!genpd_present(genpd))
2168 genpd->dev.of_node = np;
2170 /* Parse genpd OPP table */
2171 if (genpd->set_performance_state) {
2172 ret = dev_pm_opp_of_add_table(&genpd->dev);
2175 dev_err(&genpd->dev, "Failed to add OPP table: %d\n",
2184 genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
2185 WARN_ON(IS_ERR(genpd->opp_table));
2188 ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
2190 if (genpd->set_performance_state) {
2191 dev_pm_opp_put_opp_table(genpd->opp_table);
2192 dev_pm_opp_of_remove_table(&genpd->dev);
2198 genpd->provider = &np->fwnode;
2199 genpd->has_provider = true;
2216 struct generic_pm_domain *genpd;
2229 genpd = data->domains[i];
2231 if (!genpd)
2233 if (!genpd_present(genpd))
2236 genpd->dev.of_node = np;
2238 /* Parse genpd OPP table */
2239 if (genpd->set_performance_state) {
2240 ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
2243 dev_err(&genpd->dev, "Failed to add OPP table for index %d: %d\n",
2252 genpd->opp_table = dev_pm_opp_get_opp_table_indexed(&genpd->dev, i);
2253 WARN_ON(IS_ERR(genpd->opp_table));
2256 genpd->provider = &np->fwnode;
2257 genpd->has_provider = true;
2270 genpd = data->domains[i];
2272 if (!genpd)
2275 genpd->provider = NULL;
2276 genpd->has_provider = false;
2278 if (genpd->set_performance_state) {
2279 dev_pm_opp_put_opp_table(genpd->opp_table);
2280 dev_pm_opp_of_remove_table(&genpd->dev);
2345 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
2356 genpd = provider->xlate(genpdspec, provider->data);
2357 if (!IS_ERR(genpd))
2363 return genpd;
2376 struct generic_pm_domain *genpd;
2381 genpd = genpd_get_from_provider(genpdspec);
2382 if (IS_ERR(genpd)) {
2383 ret = PTR_ERR(genpd);
2387 ret = genpd_add_device(genpd, dev, dev);
2487 struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
2497 genpd = ret ? ERR_PTR(ret) : gpd;
2503 return genpd;
2514 .name = "genpd",
2555 /* Unregister the device if it was created by genpd. */
2682 /* Allocate and register device on the genpd bus. */
2687 dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev));
2812 * of_genpd_parse_idle_states: Return array of idle states for the genpd.
2814 * @dn: The genpd device node
2857 * pm_genpd_opp_to_performance_state - Gets performance state of the genpd from its OPP node.
2863 * Returns performance state encoded in the OPP of the genpd. This calls
2864 * platform specific genpd->opp_to_performance_state() callback to translate
2872 struct generic_pm_domain *genpd = NULL;
2875 genpd = container_of(genpd_dev, struct generic_pm_domain, dev);
2877 if (unlikely(!genpd->opp_to_performance_state))
2880 genpd_lock(genpd);
2881 state = genpd->opp_to_performance_state(genpd, opp);
2882 genpd_unlock(genpd);
2935 struct generic_pm_domain *genpd)
2947 ret = genpd_lock_interruptible(genpd);
2951 if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
2953 if (!genpd_status_on(genpd))
2955 status_lookup[genpd->status], genpd->state_idx);
2958 status_lookup[genpd->status]);
2959 seq_printf(s, "%-30s %-15s ", genpd->name, state);
2964 * Also genpd->name is immutable.
2966 list_for_each_entry(link, &genpd->parent_links, parent_node) {
2968 if (!list_is_last(&link->parent_node, &genpd->parent_links))
2972 list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
2974 genpd_is_irq_safe(genpd) ?
2986 genpd_unlock(genpd);
2993 struct generic_pm_domain *genpd;
3004 list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
3005 ret = genpd_summary_one(s, genpd);
3021 struct generic_pm_domain *genpd = s->private;
3024 ret = genpd_lock_interruptible(genpd);
3028 if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
3031 if (genpd->status == GENPD_STATE_OFF)
3032 seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
3033 genpd->state_idx);
3035 seq_printf(s, "%s\n", status_lookup[genpd->status]);
3037 genpd_unlock(genpd);
3043 struct generic_pm_domain *genpd = s->private;
3047 ret = genpd_lock_interruptible(genpd);
3051 list_for_each_entry(link, &genpd->parent_links, parent_node)
3054 genpd_unlock(genpd);
3060 struct generic_pm_domain *genpd = s->private;
3064 ret = genpd_lock_interruptible(genpd);
3070 for (i = 0; i < genpd->state_count; i++) {
3074 if ((genpd->status == GENPD_STATE_OFF) &&
3075 (genpd->state_idx == i))
3076 delta = ktime_sub(ktime_get(), genpd->accounting_time);
3079 ktime_add(genpd->states[i].idle_time, delta));
3081 genpd->states[i].usage, genpd->states[i].rejected);
3084 genpd_unlock(genpd);
3090 struct generic_pm_domain *genpd = s->private;
3094 ret = genpd_lock_interruptible(genpd);
3098 if (genpd->status == GENPD_STATE_ON)
3099 delta = ktime_sub(ktime_get(), genpd->accounting_time);
3102 ktime_add(genpd->on_time, delta)));
3104 genpd_unlock(genpd);
3110 struct generic_pm_domain *genpd = s->private;
3115 ret = genpd_lock_interruptible(genpd);
3119 for (i = 0; i < genpd->state_count; i++) {
3121 if ((genpd->status == GENPD_STATE_OFF) &&
3122 (genpd->state_idx == i))
3123 delta = ktime_sub(ktime_get(), genpd->accounting_time);
3125 total = ktime_add(total, genpd->states[i].idle_time);
3131 genpd_unlock(genpd);
3138 struct generic_pm_domain *genpd = s->private;
3143 ret = genpd_lock_interruptible(genpd);
3147 list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
3149 genpd_is_irq_safe(genpd) ?
3158 genpd_unlock(genpd);
3164 struct generic_pm_domain *genpd = s->private;
3166 if (genpd_lock_interruptible(genpd))
3169 seq_printf(s, "%u\n", genpd->performance_state);
3171 genpd_unlock(genpd);
3187 struct generic_pm_domain *genpd;
3194 list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
3195 d = debugfs_create_dir(genpd->name, genpd_debugfs_dir);
3198 d, genpd, &status_fops);
3200 d, genpd, &sub_domains_fops);
3202 d, genpd, &idle_states_fops);
3204 d, genpd, &active_time_fops);
3206 d, genpd, &total_idle_time_fops);
3208 d, genpd, &devices_fops);
3209 if (genpd->set_performance_state)
3211 d, genpd, &perf_state_fops);