Lines Matching refs:policy

79 static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
80 static int cpufreq_init_governor(struct cpufreq_policy *policy);
81 static void cpufreq_exit_governor(struct cpufreq_policy *policy);
82 static void cpufreq_governor_limits(struct cpufreq_policy *policy);
83 static int cpufreq_set_policy(struct cpufreq_policy *policy,
88 * Two notifier lists: the "policy" list is involved in the
89 * validation process for a new CPU frequency policy; the
116 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
119 return &policy->kobj;
168 * - policy->cpus with all possible CPUs
170 void cpufreq_generic_init(struct cpufreq_policy *policy,
174 policy->freq_table = table;
175 policy->cpuinfo.transition_latency = transition_latency;
181 cpumask_setall(policy->cpus);
187 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
189 return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
195 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
197 if (!policy || IS_ERR(policy->clk)) {
199 __func__, policy ? "clk" : "policy", cpu);
203 return clk_get_rate(policy->clk) / 1000;
208 * cpufreq_cpu_get - Return policy for a CPU and mark it as busy.
209 * @cpu: CPU to find the policy for.
211 * Call cpufreq_cpu_get_raw() to obtain a cpufreq policy for @cpu and increment
212 * the kobject reference counter of that policy. Return a valid policy on
215 * The policy returned by this function has to be released with the help of
220 struct cpufreq_policy *policy = NULL;
231 policy = cpufreq_cpu_get_raw(cpu);
232 if (policy)
233 kobject_get(&policy->kobj);
238 return policy;
243 * cpufreq_cpu_put - Decrement kobject usage counter for cpufreq policy.
244 * @policy: cpufreq policy returned by cpufreq_cpu_get().
246 void cpufreq_cpu_put(struct cpufreq_policy *policy)
248 kobject_put(&policy->kobj);
253 * cpufreq_cpu_release - Unlock a policy and decrement its usage counter.
254 * @policy: cpufreq policy returned by cpufreq_cpu_acquire().
256 void cpufreq_cpu_release(struct cpufreq_policy *policy)
258 if (WARN_ON(!policy))
261 lockdep_assert_held(&policy->rwsem);
263 up_write(&policy->rwsem);
265 cpufreq_cpu_put(policy);
269 * cpufreq_cpu_acquire - Find policy for a CPU, mark it as busy and lock it.
270 * @cpu: CPU to find the policy for.
272 * Call cpufreq_cpu_get() to get a reference on the cpufreq policy for @cpu and
273 * if the policy returned by it is not NULL, acquire its rwsem for writing.
274 * Return the policy if it is active or release it and return NULL otherwise.
276 * The policy returned by this function has to be released with the help of
282 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
284 if (!policy)
287 down_write(&policy->rwsem);
289 if (policy_is_inactive(policy)) {
290 cpufreq_cpu_release(policy);
294 return policy;
335 * @policy: cpufreq policy to enable fast frequency switching for.
343 static void cpufreq_notify_transition(struct cpufreq_policy *policy,
354 freqs->policy = policy;
366 if (policy->cur && policy->cur != freqs->old) {
368 freqs->old, policy->cur);
369 freqs->old = policy->cur;
381 cpumask_pr_args(policy->cpus));
383 for_each_cpu(cpu, policy->cpus)
389 cpufreq_stats_record_transition(policy, freqs->new);
390 policy->cur = freqs->new;
395 static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
398 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
403 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
404 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
407 void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
420 && current == policy->transition_task);
423 wait_event(policy->transition_wait, !policy->transition_ongoing);
425 spin_lock(&policy->transition_lock);
427 if (unlikely(policy->transition_ongoing)) {
428 spin_unlock(&policy->transition_lock);
432 policy->transition_ongoing = true;
433 policy->transition_task = current;
435 spin_unlock(&policy->transition_lock);
437 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
441 void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
444 if (WARN_ON(!policy->transition_ongoing))
447 cpufreq_notify_post_transition(policy, freqs, transition_failed);
449 arch_set_freq_scale(policy->related_cpus,
450 policy->cur,
451 policy->cpuinfo.max_freq);
453 spin_lock(&policy->transition_lock);
454 policy->transition_ongoing = false;
455 policy->transition_task = NULL;
456 spin_unlock(&policy->transition_lock);
458 wake_up(&policy->transition_wait);
484 * cpufreq_enable_fast_switch - Enable fast frequency switching for policy.
485 * @policy: cpufreq policy to enable fast frequency switching for.
487 * Try to enable fast frequency switching for @policy.
494 void cpufreq_enable_fast_switch(struct cpufreq_policy *policy)
496 lockdep_assert_held(&policy->rwsem);
498 if (!policy->fast_switch_possible)
504 policy->fast_switch_enabled = true;
507 policy->cpu);
515 * cpufreq_disable_fast_switch - Disable fast frequency switching for policy.
516 * @policy: cpufreq policy to disable fast frequency switching for.
518 void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
521 if (policy->fast_switch_enabled) {
522 policy->fast_switch_enabled = false;
533 * @policy: associated policy to interrogate
536 * The target to driver frequency mapping is cached in the policy.
539 * given target_freq, subject to policy (min/max) and driver limitations.
541 unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
544 target_freq = clamp_val(target_freq, policy->min, policy->max);
545 policy->cached_target_freq = target_freq;
550 idx = cpufreq_frequency_table_target(policy, target_freq,
552 policy->cached_resolved_idx = idx;
553 return policy->freq_table[idx].frequency;
557 return cpufreq_driver->resolve_freq(policy, target_freq);
563 unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy)
567 if (policy->transition_delay_us)
568 return policy->transition_delay_us;
570 latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
682 * Write out information from cpufreq_driver->policy[cpu]; object must be
688 (struct cpufreq_policy *policy, char *buf) \
690 return sprintf(buf, "%u\n", policy->object); \
704 static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
709 freq = arch_freq_get_on_cpu(policy->cpu);
713 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
715 ret = sprintf(buf, "%u\n", policy->cur);
724 (struct cpufreq_policy *policy, const char *buf, size_t count) \
733 ret = freq_qos_update_request(policy->object##_freq_req, val);\
743 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
746 unsigned int cur_freq = __cpufreq_get(policy);
755 * show_scaling_governor - show the current policy for the specified CPU
757 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
759 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
761 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
763 else if (policy->governor)
765 policy->governor->name);
770 * store_scaling_governor - store policy for the specified CPU
772 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
789 ret = cpufreq_set_policy(policy, NULL, new_pol);
797 ret = cpufreq_set_policy(policy, new_gov,
809 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
817 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
862 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
864 return cpufreq_show_cpus(policy->related_cpus, buf);
870 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
872 return cpufreq_show_cpus(policy->cpus, buf);
875 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
881 if (!policy->governor || !policy->governor->store_setspeed)
888 policy->governor->store_setspeed(policy, freq);
893 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
895 if (!policy->governor || !policy->governor->show_setspeed)
898 return policy->governor->show_setspeed(policy, buf);
904 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
908 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
911 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
949 struct cpufreq_policy *policy = to_policy(kobj);
956 down_read(&policy->rwsem);
957 ret = fattr->show(policy, buf);
958 up_read(&policy->rwsem);
966 struct cpufreq_policy *policy = to_policy(kobj);
980 if (cpu_online(policy->cpu)) {
981 down_write(&policy->rwsem);
982 ret = fattr->store(policy, buf, count);
983 up_write(&policy->rwsem);
993 struct cpufreq_policy *policy = to_policy(kobj);
995 complete(&policy->kobj_unregister);
1009 static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu,
1015 if (cpumask_test_and_set_cpu(cpu, policy->real_cpus))
1019 if (sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"))
1023 static void remove_cpu_dev_symlink(struct cpufreq_policy *policy,
1030 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
1038 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
1044 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
1049 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
1054 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1062 static int cpufreq_init_policy(struct cpufreq_policy *policy)
1069 /* Update policy governor to the one used before hotplug. */
1070 gov = get_governor(policy->last_governor);
1073 gov->name, policy->cpu);
1085 /* Use the default policy if there is no last_policy. */
1086 if (policy->last_policy) {
1087 pol = policy->last_policy;
1092 * nor "powersave", fall back to the initial policy
1096 pol = policy->policy;
1103 ret = cpufreq_set_policy(policy, gov, pol);
1110 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1115 if (cpumask_test_cpu(cpu, policy->cpus))
1118 down_write(&policy->rwsem);
1120 cpufreq_stop_governor(policy);
1122 cpumask_set_cpu(cpu, policy->cpus);
1125 ret = cpufreq_start_governor(policy);
1129 up_write(&policy->rwsem);
1133 void refresh_frequency_limits(struct cpufreq_policy *policy)
1135 if (!policy_is_inactive(policy)) {
1136 pr_debug("updating policy for CPU %u\n", policy->cpu);
1138 cpufreq_set_policy(policy, policy->governor, policy->policy);
1145 struct cpufreq_policy *policy =
1148 pr_debug("handle_update for cpu %u called\n", policy->cpu);
1149 down_write(&policy->rwsem);
1150 refresh_frequency_limits(policy);
1151 up_write(&policy->rwsem);
1157 struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_min);
1159 schedule_work(&policy->update);
1166 struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_max);
1168 schedule_work(&policy->update);
1172 static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1177 down_write(&policy->rwsem);
1178 cpufreq_stats_free_table(policy);
1179 kobj = &policy->kobj;
1180 cmp = &policy->kobj_unregister;
1181 up_write(&policy->rwsem);
1196 struct cpufreq_policy *policy;
1203 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1204 if (!policy)
1207 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1210 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1213 if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
1216 init_completion(&policy->kobj_unregister);
1217 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1218 cpufreq_global_kobject, "policy%u", cpu);
1220 dev_err(dev, "%s: failed to init policy->kobj: %d\n", __func__, ret);
1222 * The entire policy object will be freed below, but the extra
1226 kobject_put(&policy->kobj);
1230 freq_constraints_init(&policy->constraints);
1232 policy->nb_min.notifier_call = cpufreq_notifier_min;
1233 policy->nb_max.notifier_call = cpufreq_notifier_max;
1235 ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MIN,
1236 &policy->nb_min);
1239 ret, cpumask_pr_args(policy->cpus));
1243 ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MAX,
1244 &policy->nb_max);
1247 ret, cpumask_pr_args(policy->cpus));
1251 INIT_LIST_HEAD(&policy->policy_list);
1252 init_rwsem(&policy->rwsem);
1253 spin_lock_init(&policy->transition_lock);
1254 init_waitqueue_head(&policy->transition_wait);
1255 INIT_WORK(&policy->update, handle_update);
1257 policy->cpu = cpu;
1258 return policy;
1261 freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
1262 &policy->nb_min);
1264 cpufreq_policy_put_kobj(policy);
1266 free_cpumask_var(policy->real_cpus);
1268 free_cpumask_var(policy->related_cpus);
1270 free_cpumask_var(policy->cpus);
1272 kfree(policy);
1277 static void cpufreq_policy_free(struct cpufreq_policy *policy)
1282 /* Remove policy from list */
1284 list_del(&policy->policy_list);
1286 for_each_cpu(cpu, policy->related_cpus)
1290 freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MAX,
1291 &policy->nb_max);
1292 freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
1293 &policy->nb_min);
1295 /* Cancel any pending policy->update work before freeing the policy. */
1296 cancel_work_sync(&policy->update);
1298 if (policy->max_freq_req) {
1304 CPUFREQ_REMOVE_POLICY, policy);
1305 freq_qos_remove_request(policy->max_freq_req);
1308 freq_qos_remove_request(policy->min_freq_req);
1309 kfree(policy->min_freq_req);
1311 cpufreq_policy_put_kobj(policy);
1312 free_cpumask_var(policy->real_cpus);
1313 free_cpumask_var(policy->related_cpus);
1314 free_cpumask_var(policy->cpus);
1315 kfree(policy);
1320 struct cpufreq_policy *policy;
1328 /* Check if this CPU already has a policy to manage it */
1329 policy = per_cpu(cpufreq_cpu_data, cpu);
1330 if (policy) {
1331 WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1332 if (!policy_is_inactive(policy))
1333 return cpufreq_add_policy_cpu(policy, cpu);
1335 /* This is the only online CPU for the policy. Start over. */
1337 down_write(&policy->rwsem);
1338 policy->cpu = cpu;
1339 policy->governor = NULL;
1340 up_write(&policy->rwsem);
1343 policy = cpufreq_policy_alloc(cpu);
1344 if (!policy)
1349 ret = cpufreq_driver->online(policy);
1356 /* Recover policy->cpus using related_cpus */
1357 cpumask_copy(policy->cpus, policy->related_cpus);
1359 cpumask_copy(policy->cpus, cpumask_of(cpu));
1365 ret = cpufreq_driver->init(policy);
1373 * The initialization has succeeded and the policy is online.
1377 ret = cpufreq_table_validate_and_sort(policy);
1381 /* related_cpus should at least include policy->cpus. */
1382 cpumask_copy(policy->related_cpus, policy->cpus);
1385 down_write(&policy->rwsem);
1390 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1393 for_each_cpu(j, policy->related_cpus) {
1394 per_cpu(cpufreq_cpu_data, j) = policy;
1395 add_cpu_dev_symlink(policy, j, get_cpu_device(j));
1398 policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req),
1400 if (!policy->min_freq_req)
1403 ret = freq_qos_add_request(&policy->constraints,
1404 policy->min_freq_req, FREQ_QOS_MIN,
1411 kfree(policy->min_freq_req);
1412 policy->min_freq_req = NULL;
1421 policy->max_freq_req = policy->min_freq_req + 1;
1423 ret = freq_qos_add_request(&policy->constraints,
1424 policy->max_freq_req, FREQ_QOS_MAX,
1427 policy->max_freq_req = NULL;
1432 CPUFREQ_CREATE_POLICY, policy);
1436 policy->cur = cpufreq_driver->get(policy->cpu);
1437 if (!policy->cur) {
1453 * for the next freq which is >= policy->cur ('cur' must be set by now,
1457 * We are passing target-freq as "policy->cur - 1" otherwise
1458 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1463 unsigned int old_freq = policy->cur;
1466 ret = cpufreq_frequency_table_get_index(policy, old_freq);
1468 ret = __cpufreq_driver_target(policy, old_freq - 1,
1478 __func__, policy->cpu, old_freq, policy->cur);
1483 ret = cpufreq_add_dev_interface(policy);
1487 cpufreq_stats_create_table(policy);
1490 list_add(&policy->policy_list, &cpufreq_policy_list);
1494 ret = cpufreq_init_policy(policy);
1496 pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1501 up_write(&policy->rwsem);
1503 kobject_uevent(&policy->kobj, KOBJ_ADD);
1505 /* Callback for handling stuff after policy is ready */
1507 cpufreq_driver->ready(policy);
1510 policy->cdev = of_cpufreq_cooling_register(policy);
1517 for_each_cpu(j, policy->real_cpus)
1518 remove_cpu_dev_symlink(policy, get_cpu_device(j));
1520 up_write(&policy->rwsem);
1524 cpufreq_driver->offline(policy);
1528 cpufreq_driver->exit(policy);
1531 cpufreq_policy_free(policy);
1542 struct cpufreq_policy *policy;
1555 policy = per_cpu(cpufreq_cpu_data, cpu);
1556 if (policy)
1557 add_cpu_dev_symlink(policy, cpu, dev);
1562 static void __cpufreq_offline(unsigned int cpu, struct cpufreq_policy *policy)
1567 cpufreq_stop_governor(policy);
1569 cpumask_clear_cpu(cpu, policy->cpus);
1571 if (!policy_is_inactive(policy)) {
1573 if (cpu == policy->cpu)
1574 policy->cpu = cpumask_any(policy->cpus);
1576 /* Start the governor again for the active policy. */
1578 ret = cpufreq_start_governor(policy);
1587 strncpy(policy->last_governor, policy->governor->name,
1590 policy->last_policy = policy->policy;
1593 cpufreq_cooling_unregister(policy->cdev);
1594 policy->cdev = NULL;
1598 cpufreq_driver->stop_cpu(policy);
1601 cpufreq_exit_governor(policy);
1608 cpufreq_driver->offline(policy);
1613 cpufreq_driver->exit(policy);
1615 policy->freq_table = NULL;
1620 struct cpufreq_policy *policy;
1624 policy = cpufreq_cpu_get_raw(cpu);
1625 if (!policy) {
1630 down_write(&policy->rwsem);
1632 __cpufreq_offline(cpu, policy);
1634 up_write(&policy->rwsem);
1646 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1648 if (!policy)
1651 down_write(&policy->rwsem);
1654 __cpufreq_offline(cpu, policy);
1656 cpumask_clear_cpu(cpu, policy->real_cpus);
1657 remove_cpu_dev_symlink(policy, dev);
1659 if (!cpumask_empty(policy->real_cpus)) {
1660 up_write(&policy->rwsem);
1666 cpufreq_driver->exit(policy);
1668 up_write(&policy->rwsem);
1670 cpufreq_policy_free(policy);
1676 * @policy: policy managing CPUs
1682 static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1688 policy->cur, new_freq);
1690 freqs.old = policy->cur;
1693 cpufreq_freq_transition_begin(policy, &freqs);
1694 cpufreq_freq_transition_end(policy, &freqs, 0);
1697 static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, bool update)
1701 new_freq = cpufreq_driver->get(policy->cpu);
1706 * If fast frequency switching is used with the given policy, the check
1707 * against policy->cur is pointless, so skip it in that case.
1709 if (policy->fast_switch_enabled || !has_target())
1712 if (policy->cur != new_freq) {
1713 cpufreq_out_of_sync(policy, new_freq);
1715 schedule_work(&policy->update);
1722 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1730 struct cpufreq_policy *policy;
1744 policy = cpufreq_cpu_get(cpu);
1745 if (policy) {
1746 ret_freq = policy->cur;
1747 cpufreq_cpu_put(policy);
1762 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1765 if (policy) {
1766 ret_freq = policy->max;
1767 cpufreq_cpu_put(policy);
1782 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1785 if (policy) {
1786 ret_freq = policy->cpuinfo.max_freq;
1787 cpufreq_cpu_put(policy);
1794 static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1796 if (unlikely(policy_is_inactive(policy)))
1799 return cpufreq_verify_current_freq(policy, true);
1810 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1813 if (policy) {
1814 down_read(&policy->rwsem);
1816 ret_freq = __cpufreq_get(policy);
1817 up_read(&policy->rwsem);
1819 cpufreq_cpu_put(policy);
1837 int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1841 if (!policy->suspend_freq) {
1847 policy->suspend_freq);
1849 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1853 __func__, policy->suspend_freq, ret);
1869 struct cpufreq_policy *policy;
1879 for_each_active_policy(policy) {
1881 down_write(&policy->rwsem);
1882 cpufreq_stop_governor(policy);
1883 up_write(&policy->rwsem);
1886 if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
1903 struct cpufreq_policy *policy;
1919 for_each_active_policy(policy) {
1920 if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
1922 policy);
1924 down_write(&policy->rwsem);
1925 ret = cpufreq_start_governor(policy);
1926 up_write(&policy->rwsem);
1929 pr_err("%s: Failed to start governor for policy: %p\n",
1930 __func__, policy);
1989 * changes in cpufreq policy.
2075 * @policy: cpufreq policy to switch the frequency for.
2085 * This function must not be called if policy->fast_switch_enabled is unset.
2088 * twice in parallel for the same policy and that it will never be called in
2089 * parallel with either ->target() or ->target_index() for the same policy.
2096 unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
2102 target_freq = clamp_val(target_freq, policy->min, policy->max);
2103 freq = cpufreq_driver->fast_switch(policy, target_freq);
2108 policy->cur = freq;
2109 arch_set_freq_scale(policy->related_cpus, freq,
2110 policy->cpuinfo.max_freq);
2111 cpufreq_stats_record_transition(policy, freq);
2114 for_each_cpu(cpu, policy->cpus)
2123 static int __target_intermediate(struct cpufreq_policy *policy,
2128 freqs->new = cpufreq_driver->get_intermediate(policy, index);
2135 __func__, policy->cpu, freqs->old, freqs->new);
2137 cpufreq_freq_transition_begin(policy, freqs);
2138 ret = cpufreq_driver->target_intermediate(policy, index);
2139 cpufreq_freq_transition_end(policy, freqs, ret);
2148 static int __target_index(struct cpufreq_policy *policy, int index)
2150 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
2152 unsigned int newfreq = policy->freq_table[index].frequency;
2156 if (newfreq == policy->cur)
2163 retval = __target_intermediate(policy, &freqs, index);
2175 __func__, policy->cpu, freqs.old, freqs.new);
2177 cpufreq_freq_transition_begin(policy, &freqs);
2180 retval = cpufreq_driver->target_index(policy, index);
2186 cpufreq_freq_transition_end(policy, &freqs, retval);
2196 freqs.new = policy->restore_freq;
2197 cpufreq_freq_transition_begin(policy, &freqs);
2198 cpufreq_freq_transition_end(policy, &freqs, 0);
2205 int __cpufreq_driver_target(struct cpufreq_policy *policy,
2216 target_freq = clamp_val(target_freq, policy->min, policy->max);
2219 policy->cpu, target_freq, relation, old_target_freq);
2227 if (target_freq == policy->cur &&
2232 policy->restore_freq = policy->cur;
2235 return cpufreq_driver->target(policy, target_freq, relation);
2240 index = cpufreq_frequency_table_target(policy, target_freq, relation);
2242 return __target_index(policy, index);
2246 int cpufreq_driver_target(struct cpufreq_policy *policy,
2252 down_write(&policy->rwsem);
2254 ret = __cpufreq_driver_target(policy, target_freq, relation);
2256 up_write(&policy->rwsem);
2267 static int cpufreq_init_governor(struct cpufreq_policy *policy)
2278 if (!policy->governor)
2282 if (policy->governor->flags & CPUFREQ_GOV_DYNAMIC_SWITCHING &&
2288 policy->governor->name, gov->name);
2289 policy->governor = gov;
2295 if (!try_module_get(policy->governor->owner))
2298 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2300 if (policy->governor->init) {
2301 ret = policy->governor->init(policy);
2303 module_put(policy->governor->owner);
2308 policy->strict_target = !!(policy->governor->flags & CPUFREQ_GOV_STRICT_TARGET);
2313 static void cpufreq_exit_governor(struct cpufreq_policy *policy)
2315 if (cpufreq_suspended || !policy->governor)
2318 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2320 if (policy->governor->exit)
2321 policy->governor->exit(policy);
2323 module_put(policy->governor->owner);
2326 int cpufreq_start_governor(struct cpufreq_policy *policy)
2333 if (!policy->governor)
2336 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2339 cpufreq_verify_current_freq(policy, false);
2341 if (policy->governor->start) {
2342 ret = policy->governor->start(policy);
2347 if (policy->governor->limits)
2348 policy->governor->limits(policy);
2353 void cpufreq_stop_governor(struct cpufreq_policy *policy)
2355 if (cpufreq_suspended || !policy->governor)
2358 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2360 if (policy->governor->stop)
2361 policy->governor->stop(policy);
2364 static void cpufreq_governor_limits(struct cpufreq_policy *policy)
2366 if (cpufreq_suspended || !policy->governor)
2369 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2371 if (policy->governor->limits)
2372 policy->governor->limits(policy);
2400 struct cpufreq_policy *policy;
2411 for_each_inactive_policy(policy) {
2412 if (!strcmp(policy->last_governor, governor->name)) {
2413 policy->governor = NULL;
2414 strcpy(policy->last_governor, "\0");
2432 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2434 * @cpu: CPU to find the policy for
2436 * Reads the current cpufreq policy.
2438 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2441 if (!policy)
2448 memcpy(policy, cpu_policy, sizeof(*policy));
2456 * cpufreq_set_policy - Modify cpufreq policy parameters.
2457 * @policy: Policy object to modify.
2462 * limits to be set for the policy, update @policy with the verified limits
2464 * carry out a governor update for @policy. That is, run the current governor's
2466 * @policy) or replace the governor for @policy with @new_gov.
2468 * The cpuinfo part of @policy is not updated by this function.
2470 static int cpufreq_set_policy(struct cpufreq_policy *policy,
2478 memcpy(&new_data.cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2479 new_data.freq_table = policy->freq_table;
2480 new_data.cpu = policy->cpu;
2485 new_data.min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
2486 new_data.max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
2488 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2499 policy->min = new_data.min;
2500 policy->max = new_data.max;
2501 trace_cpu_frequency_limits(policy);
2503 policy->cached_target_freq = UINT_MAX;
2506 policy->min, policy->max);
2509 policy->policy = new_pol;
2511 return cpufreq_driver->setpolicy(policy);
2514 if (new_gov == policy->governor) {
2516 cpufreq_governor_limits(policy);
2523 old_gov = policy->governor;
2526 cpufreq_stop_governor(policy);
2527 cpufreq_exit_governor(policy);
2531 policy->governor = new_gov;
2532 ret = cpufreq_init_governor(policy);
2534 ret = cpufreq_start_governor(policy);
2537 sched_cpufreq_governor_change(policy, old_gov);
2540 cpufreq_exit_governor(policy);
2544 pr_debug("starting governor %s failed\n", policy->governor->name);
2546 policy->governor = old_gov;
2547 if (cpufreq_init_governor(policy))
2548 policy->governor = NULL;
2550 cpufreq_start_governor(policy);
2557 * cpufreq_update_policy - Re-evaluate an existing cpufreq policy.
2558 * @cpu: CPU to re-evaluate the policy for.
2560 * Update the current frequency for the cpufreq policy of @cpu and use
2562 * evaluation of policy notifiers and the cpufreq driver's ->verify() callback
2563 * for the policy in question, among other things.
2567 struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
2569 if (!policy)
2577 (cpufreq_suspended || WARN_ON(!cpufreq_verify_current_freq(policy, false))))
2580 refresh_frequency_limits(policy);
2583 cpufreq_cpu_release(policy);
2588 * cpufreq_update_limits - Update policy limits for a given CPU.
2589 * @cpu: CPU to update the policy limits for.
2606 static int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
2610 if (!policy->freq_table)
2613 ret = cpufreq_frequency_table_cpuinfo(policy, policy->freq_table);
2619 ret = freq_qos_update_request(policy->max_freq_req, policy->max);
2628 struct cpufreq_policy *policy;
2640 for_each_active_policy(policy) {
2641 ret = cpufreq_driver->set_boost(policy, state);