1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework
4 * for Non-CPU Devices.
5 *
6 * Copyright (C) 2011 Samsung Electronics
7 * MyungJoo Ham <myungjoo.ham@samsung.com>
8 */
9
10 #include <linux/kernel.h>
11 #include <linux/kmod.h>
12 #include <linux/sched.h>
13 #include <linux/debugfs.h>
14 #include <linux/errno.h>
15 #include <linux/err.h>
16 #include <linux/init.h>
17 #include <linux/export.h>
18 #include <linux/slab.h>
19 #include <linux/stat.h>
20 #include <linux/pm_opp.h>
21 #include <linux/devfreq.h>
22 #include <linux/workqueue.h>
23 #include <linux/platform_device.h>
24 #include <linux/list.h>
25 #include <linux/printk.h>
26 #include <linux/hrtimer.h>
27 #include <linux/of.h>
28 #include <linux/pm_qos.h>
29 #include "governor.h"
30
31 #define CREATE_TRACE_POINTS
32 #include <trace/events/devfreq.h>
33
34 #define HZ_PER_KHZ 1000
35
36 static struct class *devfreq_class;
37 static struct dentry *devfreq_debugfs;
38
39 /*
40 * devfreq core provides delayed work based load monitoring helper
41 * functions. Governors can use these or can implement their own
42 * monitoring mechanism.
43 */
44 static struct workqueue_struct *devfreq_wq;
45
46 /* The list of all device-devfreq governors */
47 static LIST_HEAD(devfreq_governor_list);
48 /* The list of all device-devfreq */
49 static LIST_HEAD(devfreq_list);
50 static DEFINE_MUTEX(devfreq_list_lock);
51
52 static const char timer_name[][DEVFREQ_NAME_LEN] = {
53 [DEVFREQ_TIMER_DEFERRABLE] = {"deferrable"},
54 [DEVFREQ_TIMER_DELAYED] = {"delayed"},
55 };
56
57 /**
58 * find_device_devfreq() - find devfreq struct using device pointer
59 * @dev: device pointer used to lookup device devfreq.
60 *
61 * Search the list of device devfreqs and return the matched device's
62 * devfreq info. devfreq_list_lock should be held by the caller.
63 */
find_device_devfreq(struct device *dev)64 static struct devfreq *find_device_devfreq(struct device *dev)
65 {
66 struct devfreq *tmp_devfreq;
67
68 lockdep_assert_held(&devfreq_list_lock);
69
70 if (IS_ERR_OR_NULL(dev)) {
71 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
72 return ERR_PTR(-EINVAL);
73 }
74
75 list_for_each_entry(tmp_devfreq, &devfreq_list, node)
76 {
77 if (tmp_devfreq->dev.parent == dev) {
78 return tmp_devfreq;
79 }
80 }
81
82 return ERR_PTR(-ENODEV);
83 }
84
find_available_min_freq(struct devfreq *devfreq)85 static unsigned long find_available_min_freq(struct devfreq *devfreq)
86 {
87 struct dev_pm_opp *opp;
88 unsigned long min_freq = 0;
89
90 opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &min_freq);
91 if (IS_ERR(opp)) {
92 min_freq = 0;
93 } else {
94 dev_pm_opp_put(opp);
95 }
96
97 return min_freq;
98 }
99
find_available_max_freq(struct devfreq *devfreq)100 static unsigned long find_available_max_freq(struct devfreq *devfreq)
101 {
102 struct dev_pm_opp *opp;
103 unsigned long max_freq = ULONG_MAX;
104
105 opp = dev_pm_opp_find_freq_floor(devfreq->dev.parent, &max_freq);
106 if (IS_ERR(opp)) {
107 max_freq = 0;
108 } else {
109 dev_pm_opp_put(opp);
110 }
111
112 return max_freq;
113 }
114
115 /**
116 * get_freq_range() - Get the current freq range
117 * @devfreq: the devfreq instance
118 * @min_freq: the min frequency
119 * @max_freq: the max frequency
120 *
121 * This takes into consideration all constraints.
122 */
get_freq_range(struct devfreq *devfreq, unsigned long *min_freq, unsigned long *max_freq)123 static void get_freq_range(struct devfreq *devfreq, unsigned long *min_freq, unsigned long *max_freq)
124 {
125 unsigned long *freq_table = devfreq->profile->freq_table;
126 s32 qos_min_freq, qos_max_freq;
127
128 lockdep_assert_held(&devfreq->lock);
129
130 /*
131 * Initialize minimum/maximum frequency from freq table.
132 * The devfreq drivers can initialize this in either ascending or
133 * descending order and devfreq core supports both.
134 */
135 if (freq_table[0] < freq_table[devfreq->profile->max_state - 1]) {
136 *min_freq = freq_table[0];
137 *max_freq = freq_table[devfreq->profile->max_state - 1];
138 } else {
139 *min_freq = freq_table[devfreq->profile->max_state - 1];
140 *max_freq = freq_table[0];
141 }
142
143 /* Apply constraints from PM QoS */
144 qos_min_freq = dev_pm_qos_read_value(devfreq->dev.parent, DEV_PM_QOS_MIN_FREQUENCY);
145 qos_max_freq = dev_pm_qos_read_value(devfreq->dev.parent, DEV_PM_QOS_MAX_FREQUENCY);
146 *min_freq = max(*min_freq, (unsigned long)HZ_PER_KHZ * qos_min_freq);
147 if (qos_max_freq != PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE) {
148 *max_freq = min(*max_freq, (unsigned long)HZ_PER_KHZ * qos_max_freq);
149 }
150
151 /* Apply constraints from OPP interface */
152 *min_freq = max(*min_freq, devfreq->scaling_min_freq);
153 *max_freq = min(*max_freq, devfreq->scaling_max_freq);
154
155 if (*min_freq > *max_freq) {
156 *min_freq = *max_freq;
157 }
158 }
159
160 /**
161 * devfreq_get_freq_level() - Lookup freq_table for the frequency
162 * @devfreq: the devfreq instance
163 * @freq: the target frequency
164 */
devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq)165 static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq)
166 {
167 int lev;
168
169 for (lev = 0; lev < devfreq->profile->max_state; lev++) {
170 if (freq == devfreq->profile->freq_table[lev]) {
171 return lev;
172 }
173 }
174
175 return -EINVAL;
176 }
177
set_freq_table(struct devfreq *devfreq)178 static int set_freq_table(struct devfreq *devfreq)
179 {
180 struct devfreq_dev_profile *profile = devfreq->profile;
181 struct dev_pm_opp *opp;
182 unsigned long freq;
183 int i, count;
184
185 /* Initialize the freq_table from OPP table */
186 count = dev_pm_opp_get_opp_count(devfreq->dev.parent);
187 if (count <= 0) {
188 return -EINVAL;
189 }
190
191 profile->max_state = count;
192 profile->freq_table =
193 devm_kcalloc(devfreq->dev.parent, profile->max_state, sizeof(*profile->freq_table), GFP_KERNEL);
194 if (!profile->freq_table) {
195 profile->max_state = 0;
196 return -ENOMEM;
197 }
198
199 for (i = 0, freq = 0; i < profile->max_state; i++, freq++) {
200 opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &freq);
201 if (IS_ERR(opp)) {
202 devm_kfree(devfreq->dev.parent, profile->freq_table);
203 profile->max_state = 0;
204 return PTR_ERR(opp);
205 }
206 dev_pm_opp_put(opp);
207 profile->freq_table[i] = freq;
208 }
209
210 return 0;
211 }
212
213 /**
214 * devfreq_update_status() - Update statistics of devfreq behavior
215 * @devfreq: the devfreq instance
216 * @freq: the update target frequency
217 */
devfreq_update_status(struct devfreq *devfreq, unsigned long freq)218 int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
219 {
220 int lev, prev_lev, ret = 0;
221 u64 cur_time;
222
223 lockdep_assert_held(&devfreq->lock);
224 cur_time = get_jiffies_64();
225
226 /* Immediately exit if previous_freq is not initialized yet. */
227 if (!devfreq->previous_freq) {
228 goto out;
229 }
230
231 prev_lev = devfreq_get_freq_level(devfreq, devfreq->previous_freq);
232 if (prev_lev < 0) {
233 ret = prev_lev;
234 goto out;
235 }
236
237 devfreq->stats.time_in_state[prev_lev] += cur_time - devfreq->stats.last_update;
238
239 lev = devfreq_get_freq_level(devfreq, freq);
240 if (lev < 0) {
241 ret = lev;
242 goto out;
243 }
244
245 if (lev != prev_lev) {
246 devfreq->stats.trans_table[(prev_lev * devfreq->profile->max_state) + lev]++;
247 devfreq->stats.total_trans++;
248 }
249
250 out:
251 devfreq->stats.last_update = cur_time;
252 return ret;
253 }
254 EXPORT_SYMBOL(devfreq_update_status);
255
256 /**
257 * find_devfreq_governor() - find devfreq governor from name
258 * @name: name of the governor
259 *
260 * Search the list of devfreq governors and return the matched
261 * governor's pointer. devfreq_list_lock should be held by the caller.
262 */
find_devfreq_governor(const char *name)263 static struct devfreq_governor *find_devfreq_governor(const char *name)
264 {
265 struct devfreq_governor *tmp_governor;
266
267 lockdep_assert_held(&devfreq_list_lock);
268
269 if (IS_ERR_OR_NULL(name)) {
270 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
271 return ERR_PTR(-EINVAL);
272 }
273
274 list_for_each_entry(tmp_governor, &devfreq_governor_list, node)
275 {
276 if (!strncmp(tmp_governor->name, name, DEVFREQ_NAME_LEN)) {
277 return tmp_governor;
278 }
279 }
280
281 return ERR_PTR(-ENODEV);
282 }
283
284 /**
285 * try_then_request_governor() - Try to find the governor and request the
286 * module if is not found.
287 * @name: name of the governor
288 *
289 * Search the list of devfreq governors and request the module and try again
290 * if is not found. This can happen when both drivers (the governor driver
291 * and the driver that call devfreq_add_device) are built as modules.
292 * devfreq_list_lock should be held by the caller. Returns the matched
293 * governor's pointer or an error pointer.
294 */
try_then_request_governor(const char *name)295 static struct devfreq_governor *try_then_request_governor(const char *name)
296 {
297 struct devfreq_governor *governor;
298 int err = 0;
299
300 lockdep_assert_held(&devfreq_list_lock);
301
302 if (IS_ERR_OR_NULL(name)) {
303 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
304 return ERR_PTR(-EINVAL);
305 }
306
307 governor = find_devfreq_governor(name);
308 if (IS_ERR(governor)) {
309 mutex_unlock(&devfreq_list_lock);
310
311 if (!strncmp(name, DEVFREQ_GOV_SIMPLE_ONDEMAND, DEVFREQ_NAME_LEN)) {
312 err = request_module("governor_%s", "simpleondemand");
313 } else {
314 err = request_module("governor_%s", name);
315 }
316 /* Restore previous state before return */
317 mutex_lock(&devfreq_list_lock);
318 if (err) {
319 return (err < 0) ? ERR_PTR(err) : ERR_PTR(-EINVAL);
320 }
321
322 governor = find_devfreq_governor(name);
323 }
324
325 return governor;
326 }
327
devfreq_notify_transition(struct devfreq *devfreq, struct devfreq_freqs *freqs, unsigned int state)328 static int devfreq_notify_transition(struct devfreq *devfreq, struct devfreq_freqs *freqs, unsigned int state)
329 {
330 if (!devfreq) {
331 return -EINVAL;
332 }
333
334 switch (state) {
335 case DEVFREQ_PRECHANGE:
336 srcu_notifier_call_chain(&devfreq->transition_notifier_list, DEVFREQ_PRECHANGE, freqs);
337 break;
338
339 case DEVFREQ_POSTCHANGE:
340 srcu_notifier_call_chain(&devfreq->transition_notifier_list, DEVFREQ_POSTCHANGE, freqs);
341 break;
342 default:
343 return -EINVAL;
344 }
345
346 return 0;
347 }
348
devfreq_set_target(struct devfreq *devfreq, unsigned long new_freq, u32 flags)349 static int devfreq_set_target(struct devfreq *devfreq, unsigned long new_freq, u32 flags)
350 {
351 struct devfreq_freqs freqs;
352 unsigned long cur_freq;
353 int err = 0;
354
355 if (devfreq->profile->get_cur_freq) {
356 devfreq->profile->get_cur_freq(devfreq->dev.parent, &cur_freq);
357 } else {
358 cur_freq = devfreq->previous_freq;
359 }
360
361 freqs.old = cur_freq;
362 freqs.new = new_freq;
363 devfreq_notify_transition(devfreq, &freqs, DEVFREQ_PRECHANGE);
364
365 err = devfreq->profile->target(devfreq->dev.parent, &new_freq, flags);
366 if (err) {
367 freqs.new = cur_freq;
368 devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
369 return err;
370 }
371
372 freqs.new = new_freq;
373 devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
374
375 if (devfreq_update_status(devfreq, new_freq)) {
376 dev_err(&devfreq->dev, "Couldn't update frequency transition information.\n");
377 }
378
379 devfreq->previous_freq = new_freq;
380
381 if (devfreq->suspend_freq) {
382 devfreq->resume_freq = new_freq;
383 }
384
385 return err;
386 }
387
388 /* Load monitoring helper functions for governors use */
389
390 /**
391 * update_devfreq() - Reevaluate the device and configure frequency.
392 * @devfreq: the devfreq instance.
393 *
394 * Note: Lock devfreq->lock before calling update_devfreq
395 * This function is exported for governors.
396 */
update_devfreq(struct devfreq *devfreq)397 int update_devfreq(struct devfreq *devfreq)
398 {
399 unsigned long freq, min_freq, max_freq;
400 int err = 0;
401 u32 flags = 0;
402
403 lockdep_assert_held(&devfreq->lock);
404
405 if (!devfreq->governor) {
406 return -EINVAL;
407 }
408
409 /* Reevaluate the proper frequency */
410 err = devfreq->governor->get_target_freq(devfreq, &freq);
411 if (err) {
412 return err;
413 }
414 get_freq_range(devfreq, &min_freq, &max_freq);
415
416 if (freq < min_freq) {
417 freq = min_freq;
418 flags &= ~DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use GLB */
419 }
420 if (freq > max_freq) {
421 freq = max_freq;
422 flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */
423 }
424
425 return devfreq_set_target(devfreq, freq, flags);
426 }
427 EXPORT_SYMBOL(update_devfreq);
428
429 /**
430 * devfreq_monitor() - Periodically poll devfreq objects.
431 * @work: the work struct used to run devfreq_monitor periodically.
432 *
433 */
devfreq_monitor(struct work_struct *work)434 static void devfreq_monitor(struct work_struct *work)
435 {
436 int err;
437 struct devfreq *devfreq = container_of(work, struct devfreq, work.work);
438
439 mutex_lock(&devfreq->lock);
440 err = update_devfreq(devfreq);
441 if (err) {
442 dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err);
443 }
444
445 queue_delayed_work(devfreq_wq, &devfreq->work, msecs_to_jiffies(devfreq->profile->polling_ms));
446 mutex_unlock(&devfreq->lock);
447
448 trace_devfreq_monitor(devfreq);
449 }
450
451 /**
452 * devfreq_monitor_start() - Start load monitoring of devfreq instance
453 * @devfreq: the devfreq instance.
454 *
455 * Helper function for starting devfreq device load monitoring. By
456 * default delayed work based monitoring is supported. Function
457 * to be called from governor in response to DEVFREQ_GOV_START
458 * event when device is added to devfreq framework.
459 */
devfreq_monitor_start(struct devfreq *devfreq)460 void devfreq_monitor_start(struct devfreq *devfreq)
461 {
462 if (devfreq->governor->interrupt_driven) {
463 return;
464 }
465
466 switch (devfreq->profile->timer) {
467 case DEVFREQ_TIMER_DEFERRABLE:
468 INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor);
469 break;
470 case DEVFREQ_TIMER_DELAYED:
471 INIT_DELAYED_WORK(&devfreq->work, devfreq_monitor);
472 break;
473 default:
474 return;
475 }
476
477 if (devfreq->profile->polling_ms) {
478 queue_delayed_work(devfreq_wq, &devfreq->work, msecs_to_jiffies(devfreq->profile->polling_ms));
479 }
480 }
481 EXPORT_SYMBOL(devfreq_monitor_start);
482
483 /**
484 * devfreq_monitor_stop() - Stop load monitoring of a devfreq instance
485 * @devfreq: the devfreq instance.
486 *
487 * Helper function to stop devfreq device load monitoring. Function
488 * to be called from governor in response to DEVFREQ_GOV_STOP
489 * event when device is removed from devfreq framework.
490 */
devfreq_monitor_stop(struct devfreq *devfreq)491 void devfreq_monitor_stop(struct devfreq *devfreq)
492 {
493 if (devfreq->governor->interrupt_driven) {
494 return;
495 }
496
497 cancel_delayed_work_sync(&devfreq->work);
498 }
499 EXPORT_SYMBOL(devfreq_monitor_stop);
500
501 /**
502 * devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance
503 * @devfreq: the devfreq instance.
504 *
505 * Helper function to suspend devfreq device load monitoring. Function
506 * to be called from governor in response to DEVFREQ_GOV_SUSPEND
507 * event or when polling interval is set to zero.
508 *
509 * Note: Though this function is same as devfreq_monitor_stop(),
510 * intentionally kept separate to provide hooks for collecting
511 * transition statistics.
512 */
devfreq_monitor_suspend(struct devfreq *devfreq)513 void devfreq_monitor_suspend(struct devfreq *devfreq)
514 {
515 mutex_lock(&devfreq->lock);
516 if (devfreq->stop_polling) {
517 mutex_unlock(&devfreq->lock);
518 return;
519 }
520
521 devfreq_update_status(devfreq, devfreq->previous_freq);
522 devfreq->stop_polling = true;
523 mutex_unlock(&devfreq->lock);
524
525 if (devfreq->governor->interrupt_driven) {
526 return;
527 }
528
529 cancel_delayed_work_sync(&devfreq->work);
530 }
531 EXPORT_SYMBOL(devfreq_monitor_suspend);
532
533 /**
534 * devfreq_monitor_resume() - Resume load monitoring of a devfreq instance
535 * @devfreq: the devfreq instance.
536 *
537 * Helper function to resume devfreq device load monitoring. Function
538 * to be called from governor in response to DEVFREQ_GOV_RESUME
539 * event or when polling interval is set to non-zero.
540 */
devfreq_monitor_resume(struct devfreq *devfreq)541 void devfreq_monitor_resume(struct devfreq *devfreq)
542 {
543 unsigned long freq;
544
545 mutex_lock(&devfreq->lock);
546 if (!devfreq->stop_polling) {
547 goto out;
548 }
549
550 if (devfreq->governor->interrupt_driven) {
551 goto out_update;
552 }
553
554 if (!delayed_work_pending(&devfreq->work) && devfreq->profile->polling_ms) {
555 queue_delayed_work(devfreq_wq, &devfreq->work, msecs_to_jiffies(devfreq->profile->polling_ms));
556 }
557
558 out_update:
559 devfreq->stats.last_update = get_jiffies_64();
560 devfreq->stop_polling = false;
561
562 if (devfreq->profile->get_cur_freq && !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq)) {
563 devfreq->previous_freq = freq;
564 }
565
566 out:
567 mutex_unlock(&devfreq->lock);
568 }
569 EXPORT_SYMBOL(devfreq_monitor_resume);
570
571 /**
572 * devfreq_update_interval() - Update device devfreq monitoring interval
573 * @devfreq: the devfreq instance.
574 * @delay: new polling interval to be set.
575 *
576 * Helper function to set new load monitoring polling interval. Function
577 * to be called from governor in response to DEVFREQ_GOV_UPDATE_INTERVAL event.
578 */
devfreq_update_interval(struct devfreq *devfreq, unsigned int *delay)579 void devfreq_update_interval(struct devfreq *devfreq, unsigned int *delay)
580 {
581 unsigned int cur_delay = devfreq->profile->polling_ms;
582 unsigned int new_delay = *delay;
583
584 mutex_lock(&devfreq->lock);
585 devfreq->profile->polling_ms = new_delay;
586
587 if (devfreq->stop_polling) {
588 goto out;
589 }
590
591 if (devfreq->governor->interrupt_driven) {
592 goto out;
593 }
594
595 /* if new delay is zero, stop polling */
596 if (!new_delay) {
597 mutex_unlock(&devfreq->lock);
598 cancel_delayed_work_sync(&devfreq->work);
599 return;
600 }
601
602 /* if current delay is zero, start polling with new delay */
603 if (!cur_delay) {
604 queue_delayed_work(devfreq_wq, &devfreq->work, msecs_to_jiffies(devfreq->profile->polling_ms));
605 goto out;
606 }
607
608 /* if current delay is greater than new delay, restart polling */
609 if (cur_delay > new_delay) {
610 mutex_unlock(&devfreq->lock);
611 cancel_delayed_work_sync(&devfreq->work);
612 mutex_lock(&devfreq->lock);
613 if (!devfreq->stop_polling) {
614 queue_delayed_work(devfreq_wq, &devfreq->work, msecs_to_jiffies(devfreq->profile->polling_ms));
615 }
616 }
617 out:
618 mutex_unlock(&devfreq->lock);
619 }
620 EXPORT_SYMBOL(devfreq_update_interval);
621
622 /**
623 * devfreq_notifier_call() - Notify that the device frequency requirements
624 * has been changed out of devfreq framework.
625 * @nb: the notifier_block (supposed to be devfreq->nb)
626 * @type: not used
627 * @devp: not used
628 *
629 * Called by a notifier that uses devfreq->nb.
630 */
devfreq_notifier_call(struct notifier_block *nb, unsigned long type, void *devp)631 static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type, void *devp)
632 {
633 struct devfreq *devfreq = container_of(nb, struct devfreq, nb);
634 int err = -EINVAL;
635
636 mutex_lock(&devfreq->lock);
637
638 devfreq->scaling_min_freq = find_available_min_freq(devfreq);
639 if (!devfreq->scaling_min_freq) {
640 goto out;
641 }
642
643 devfreq->scaling_max_freq = find_available_max_freq(devfreq);
644 if (!devfreq->scaling_max_freq) {
645 devfreq->scaling_max_freq = ULONG_MAX;
646 goto out;
647 }
648
649 err = update_devfreq(devfreq);
650
651 out:
652 mutex_unlock(&devfreq->lock);
653 if (err) {
654 dev_err(devfreq->dev.parent, "failed to update frequency from OPP notifier (%d)\n", err);
655 }
656
657 return NOTIFY_OK;
658 }
659
660 /**
661 * qos_notifier_call() - Common handler for QoS constraints.
662 * @devfreq: the devfreq instance.
663 */
qos_notifier_call(struct devfreq *devfreq)664 static int qos_notifier_call(struct devfreq *devfreq)
665 {
666 int err;
667
668 mutex_lock(&devfreq->lock);
669 err = update_devfreq(devfreq);
670 mutex_unlock(&devfreq->lock);
671 if (err) {
672 dev_err(devfreq->dev.parent, "failed to update frequency from PM QoS (%d)\n", err);
673 }
674
675 return NOTIFY_OK;
676 }
677
678 /**
679 * qos_min_notifier_call() - Callback for QoS min_freq changes.
680 * @nb: Should be devfreq->nb_min
681 */
qos_min_notifier_call(struct notifier_block *nb, unsigned long val, void *ptr)682 static int qos_min_notifier_call(struct notifier_block *nb, unsigned long val, void *ptr)
683 {
684 return qos_notifier_call(container_of(nb, struct devfreq, nb_min));
685 }
686
687 /**
688 * qos_max_notifier_call() - Callback for QoS max_freq changes.
689 * @nb: Should be devfreq->nb_max
690 */
qos_max_notifier_call(struct notifier_block *nb, unsigned long val, void *ptr)691 static int qos_max_notifier_call(struct notifier_block *nb, unsigned long val, void *ptr)
692 {
693 return qos_notifier_call(container_of(nb, struct devfreq, nb_max));
694 }
695
696 /**
697 * devfreq_dev_release() - Callback for struct device to release the device.
698 * @dev: the devfreq device
699 *
700 * Remove devfreq from the list and release its resources.
701 */
devfreq_dev_release(struct device *dev)702 static void devfreq_dev_release(struct device *dev)
703 {
704 struct devfreq *devfreq = to_devfreq(dev);
705 int err;
706
707 mutex_lock(&devfreq_list_lock);
708 list_del(&devfreq->node);
709 mutex_unlock(&devfreq_list_lock);
710
711 err = dev_pm_qos_remove_notifier(devfreq->dev.parent, &devfreq->nb_max, DEV_PM_QOS_MAX_FREQUENCY);
712 if (err && err != -ENOENT) {
713 dev_warn(dev->parent, "Failed to remove max_freq notifier: %d\n", err);
714 }
715 err = dev_pm_qos_remove_notifier(devfreq->dev.parent, &devfreq->nb_min, DEV_PM_QOS_MIN_FREQUENCY);
716 if (err && err != -ENOENT) {
717 dev_warn(dev->parent, "Failed to remove min_freq notifier: %d\n", err);
718 }
719
720 if (dev_pm_qos_request_active(&devfreq->user_max_freq_req)) {
721 err = dev_pm_qos_remove_request(&devfreq->user_max_freq_req);
722 if (err < 0) {
723 dev_warn(dev->parent, "Failed to remove max_freq request: %d\n", err);
724 }
725 }
726 if (dev_pm_qos_request_active(&devfreq->user_min_freq_req)) {
727 err = dev_pm_qos_remove_request(&devfreq->user_min_freq_req);
728 if (err < 0) {
729 dev_warn(dev->parent, "Failed to remove min_freq request: %d\n", err);
730 }
731 }
732
733 if (devfreq->profile->exit) {
734 devfreq->profile->exit(devfreq->dev.parent);
735 }
736
737 mutex_destroy(&devfreq->lock);
738 kfree(devfreq);
739 }
740
741 /**
742 * devfreq_remove_device() - Remove devfreq feature from a device.
743 * @devfreq: the devfreq instance to be removed
744 *
745 * The opposite of devfreq_add_device().
746 */
devfreq_remove_device(struct devfreq *devfreq)747 int devfreq_remove_device(struct devfreq *devfreq)
748 {
749 if (!devfreq) {
750 return -EINVAL;
751 }
752
753 if (devfreq->governor) {
754 devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_STOP, NULL);
755 }
756 device_unregister(&devfreq->dev);
757
758 return 0;
759 }
760 EXPORT_SYMBOL(devfreq_remove_device);
761
762 /**
763 * devfreq_add_device() - Add devfreq feature to the device
764 * @dev: the device to add devfreq feature.
765 * @profile: device-specific profile to run devfreq.
766 * @governor_name: name of the policy to choose frequency.
767 * @data: private data for the governor. The devfreq framework does not
768 * touch this value.
769 */
devfreq_add_device(struct device *dev, struct devfreq_dev_profile *profile, const char *governor_name, void *data)770 struct devfreq *devfreq_add_device(struct device *dev, struct devfreq_dev_profile *profile, const char *governor_name,
771 void *data)
772 {
773 struct devfreq *devfreq;
774 struct devfreq_governor *governor;
775 int err = 0;
776
777 if (!dev || !profile || !governor_name) {
778 dev_err(dev, "%s: Invalid parameters.\n", __func__);
779 return ERR_PTR(-EINVAL);
780 }
781
782 mutex_lock(&devfreq_list_lock);
783 devfreq = find_device_devfreq(dev);
784 mutex_unlock(&devfreq_list_lock);
785 if (!IS_ERR(devfreq)) {
786 dev_err(dev, "%s: devfreq device already exists!\n", __func__);
787 err = -EINVAL;
788 goto err_out;
789 }
790
791 devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL);
792 if (!devfreq) {
793 err = -ENOMEM;
794 goto err_out;
795 }
796
797 mutex_init(&devfreq->lock);
798 mutex_lock(&devfreq->lock);
799 devfreq->dev.parent = dev;
800 devfreq->dev.class = devfreq_class;
801 devfreq->dev.release = devfreq_dev_release;
802 INIT_LIST_HEAD(&devfreq->node);
803 devfreq->profile = profile;
804 strscpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN);
805 devfreq->previous_freq = profile->initial_freq;
806 devfreq->last_status.current_frequency = profile->initial_freq;
807 devfreq->data = data;
808 devfreq->nb.notifier_call = devfreq_notifier_call;
809
810 if (devfreq->profile->timer < 0 || devfreq->profile->timer >= DEVFREQ_TIMER_NUM) {
811 mutex_unlock(&devfreq->lock);
812 err = -EINVAL;
813 goto err_dev;
814 }
815
816 if (!devfreq->profile->max_state && !devfreq->profile->freq_table) {
817 mutex_unlock(&devfreq->lock);
818 err = set_freq_table(devfreq);
819 if (err < 0) {
820 goto err_dev;
821 }
822 mutex_lock(&devfreq->lock);
823 }
824
825 devfreq->scaling_min_freq = find_available_min_freq(devfreq);
826 if (!devfreq->scaling_min_freq) {
827 mutex_unlock(&devfreq->lock);
828 err = -EINVAL;
829 goto err_dev;
830 }
831
832 devfreq->scaling_max_freq = find_available_max_freq(devfreq);
833 if (!devfreq->scaling_max_freq) {
834 mutex_unlock(&devfreq->lock);
835 err = -EINVAL;
836 goto err_dev;
837 }
838
839 devfreq->suspend_freq = dev_pm_opp_get_suspend_opp_freq(dev);
840 atomic_set(&devfreq->suspend_count, 0);
841
842 dev_set_name(&devfreq->dev, "%s", dev_name(dev));
843 err = device_register(&devfreq->dev);
844 if (err) {
845 mutex_unlock(&devfreq->lock);
846 put_device(&devfreq->dev);
847 goto err_out;
848 }
849
850 devfreq->stats.trans_table = devm_kzalloc(
851 &devfreq->dev, array3_size(sizeof(unsigned int), devfreq->profile->max_state, devfreq->profile->max_state),
852 GFP_KERNEL);
853 if (!devfreq->stats.trans_table) {
854 mutex_unlock(&devfreq->lock);
855 err = -ENOMEM;
856 goto err_devfreq;
857 }
858
859 devfreq->stats.time_in_state =
860 devm_kcalloc(&devfreq->dev, devfreq->profile->max_state, sizeof(*devfreq->stats.time_in_state), GFP_KERNEL);
861 if (!devfreq->stats.time_in_state) {
862 mutex_unlock(&devfreq->lock);
863 err = -ENOMEM;
864 goto err_devfreq;
865 }
866
867 devfreq->stats.total_trans = 0;
868 devfreq->stats.last_update = get_jiffies_64();
869
870 srcu_init_notifier_head(&devfreq->transition_notifier_list);
871
872 mutex_unlock(&devfreq->lock);
873
874 err = dev_pm_qos_add_request(dev, &devfreq->user_min_freq_req, DEV_PM_QOS_MIN_FREQUENCY, 0);
875 if (err < 0) {
876 goto err_devfreq;
877 }
878 err = dev_pm_qos_add_request(dev, &devfreq->user_max_freq_req, DEV_PM_QOS_MAX_FREQUENCY,
879 PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
880 if (err < 0) {
881 goto err_devfreq;
882 }
883
884 devfreq->nb_min.notifier_call = qos_min_notifier_call;
885 err = dev_pm_qos_add_notifier(devfreq->dev.parent, &devfreq->nb_min, DEV_PM_QOS_MIN_FREQUENCY);
886 if (err) {
887 goto err_devfreq;
888 }
889
890 devfreq->nb_max.notifier_call = qos_max_notifier_call;
891 err = dev_pm_qos_add_notifier(devfreq->dev.parent, &devfreq->nb_max, DEV_PM_QOS_MAX_FREQUENCY);
892 if (err) {
893 goto err_devfreq;
894 }
895
896 mutex_lock(&devfreq_list_lock);
897
898 governor = try_then_request_governor(devfreq->governor_name);
899 if (IS_ERR(governor)) {
900 dev_err(dev, "%s: Unable to find governor for the device\n", __func__);
901 err = PTR_ERR(governor);
902 goto err_init;
903 }
904
905 devfreq->governor = governor;
906 err = devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_START, NULL);
907 if (err) {
908 dev_err(dev, "%s: Unable to start governor for the device\n", __func__);
909 goto err_init;
910 }
911
912 list_add(&devfreq->node, &devfreq_list);
913
914 mutex_unlock(&devfreq_list_lock);
915
916 return devfreq;
917
918 err_init:
919 mutex_unlock(&devfreq_list_lock);
920 err_devfreq:
921 devfreq_remove_device(devfreq);
922 devfreq = NULL;
923 err_dev:
924 kfree(devfreq);
925 err_out:
926 return ERR_PTR(err);
927 }
928 EXPORT_SYMBOL(devfreq_add_device);
929
devm_devfreq_dev_match(struct device *dev, void *res, void *data)930 static int devm_devfreq_dev_match(struct device *dev, void *res, void *data)
931 {
932 struct devfreq **r = res;
933
934 if (WARN_ON(!r || !*r)) {
935 return 0;
936 }
937
938 return *r == data;
939 }
940
devm_devfreq_dev_release(struct device *dev, void *res)941 static void devm_devfreq_dev_release(struct device *dev, void *res)
942 {
943 devfreq_remove_device(*(struct devfreq **)res);
944 }
945
946 /**
947 * devm_devfreq_add_device() - Resource-managed devfreq_add_device()
948 * @dev: the device to add devfreq feature.
949 * @profile: device-specific profile to run devfreq.
950 * @governor_name: name of the policy to choose frequency.
951 * @data: private data for the governor. The devfreq framework does not
952 * touch this value.
953 *
954 * This function manages automatically the memory of devfreq device using device
955 * resource management and simplify the free operation for memory of devfreq
956 * device.
957 */
devm_devfreq_add_device(struct device *dev, struct devfreq_dev_profile *profile, const char *governor_name, void *data)958 struct devfreq *devm_devfreq_add_device(struct device *dev, struct devfreq_dev_profile *profile,
959 const char *governor_name, void *data)
960 {
961 struct devfreq **ptr, *devfreq;
962
963 ptr = devres_alloc(devm_devfreq_dev_release, sizeof(*ptr), GFP_KERNEL);
964 if (!ptr) {
965 return ERR_PTR(-ENOMEM);
966 }
967
968 devfreq = devfreq_add_device(dev, profile, governor_name, data);
969 if (IS_ERR(devfreq)) {
970 devres_free(ptr);
971 return devfreq;
972 }
973
974 *ptr = devfreq;
975 devres_add(dev, ptr);
976
977 return devfreq;
978 }
979 EXPORT_SYMBOL(devm_devfreq_add_device);
980
981 #ifdef CONFIG_OF
982 /*
983 * devfreq_get_devfreq_by_node - Get the devfreq device from devicetree
984 * @node - pointer to device_node
985 *
986 * return the instance of devfreq device
987 */
devfreq_get_devfreq_by_node(struct device_node *node)988 struct devfreq *devfreq_get_devfreq_by_node(struct device_node *node)
989 {
990 struct devfreq *devfreq;
991
992 if (!node) {
993 return ERR_PTR(-EINVAL);
994 }
995
996 mutex_lock(&devfreq_list_lock);
997 list_for_each_entry(devfreq, &devfreq_list, node)
998 {
999 if (devfreq->dev.parent && devfreq->dev.parent->of_node == node) {
1000 mutex_unlock(&devfreq_list_lock);
1001 return devfreq;
1002 }
1003 }
1004 mutex_unlock(&devfreq_list_lock);
1005
1006 return ERR_PTR(-ENODEV);
1007 }
1008
1009 /*
1010 * devfreq_get_devfreq_by_phandle - Get the devfreq device from devicetree
1011 * @dev - instance to the given device
1012 * @phandle_name - name of property holding a phandle value
1013 * @index - index into list of devfreq
1014 *
1015 * return the instance of devfreq device
1016 */
devfreq_get_devfreq_by_phandle(struct device *dev, const char *phandle_name, int index)1017 struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, const char *phandle_name, int index)
1018 {
1019 struct device_node *node;
1020 struct devfreq *devfreq;
1021
1022 if (!dev || !phandle_name) {
1023 return ERR_PTR(-EINVAL);
1024 }
1025
1026 if (!dev->of_node) {
1027 return ERR_PTR(-EINVAL);
1028 }
1029
1030 node = of_parse_phandle(dev->of_node, phandle_name, index);
1031 if (!node) {
1032 return ERR_PTR(-ENODEV);
1033 }
1034
1035 devfreq = devfreq_get_devfreq_by_node(node);
1036 of_node_put(node);
1037
1038 return devfreq;
1039 }
1040
1041 #else
devfreq_get_devfreq_by_node(struct device_node *node)1042 struct devfreq *devfreq_get_devfreq_by_node(struct device_node *node)
1043 {
1044 return ERR_PTR(-ENODEV);
1045 }
1046
devfreq_get_devfreq_by_phandle(struct device *dev, const char *phandle_name, int index)1047 struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, const char *phandle_name, int index)
1048 {
1049 return ERR_PTR(-ENODEV);
1050 }
1051 #endif /* CONFIG_OF */
1052 EXPORT_SYMBOL_GPL(devfreq_get_devfreq_by_node);
1053 EXPORT_SYMBOL_GPL(devfreq_get_devfreq_by_phandle);
1054
1055 /**
1056 * devm_devfreq_remove_device() - Resource-managed devfreq_remove_device()
1057 * @dev: the device from which to remove devfreq feature.
1058 * @devfreq: the devfreq instance to be removed
1059 */
devm_devfreq_remove_device(struct device *dev, struct devfreq *devfreq)1060 void devm_devfreq_remove_device(struct device *dev, struct devfreq *devfreq)
1061 {
1062 WARN_ON(devres_release(dev, devm_devfreq_dev_release, devm_devfreq_dev_match, devfreq));
1063 }
1064 EXPORT_SYMBOL(devm_devfreq_remove_device);
1065
1066 /**
1067 * devfreq_suspend_device() - Suspend devfreq of a device.
1068 * @devfreq: the devfreq instance to be suspended
1069 *
1070 * This function is intended to be called by the pm callbacks
1071 * (e.g., runtime_suspend, suspend) of the device driver that
1072 * holds the devfreq.
1073 */
devfreq_suspend_device(struct devfreq *devfreq)1074 int devfreq_suspend_device(struct devfreq *devfreq)
1075 {
1076 int ret;
1077
1078 if (!devfreq) {
1079 return -EINVAL;
1080 }
1081
1082 if (atomic_inc_return(&devfreq->suspend_count) > 1) {
1083 return 0;
1084 }
1085
1086 if (devfreq->governor) {
1087 ret = devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_SUSPEND, NULL);
1088 if (ret) {
1089 return ret;
1090 }
1091 }
1092
1093 if (devfreq->suspend_freq) {
1094 mutex_lock(&devfreq->lock);
1095 ret = devfreq_set_target(devfreq, devfreq->suspend_freq, 0);
1096 mutex_unlock(&devfreq->lock);
1097 if (ret) {
1098 return ret;
1099 }
1100 }
1101
1102 return 0;
1103 }
1104 EXPORT_SYMBOL(devfreq_suspend_device);
1105
1106 /**
1107 * devfreq_resume_device() - Resume devfreq of a device.
1108 * @devfreq: the devfreq instance to be resumed
1109 *
1110 * This function is intended to be called by the pm callbacks
1111 * (e.g., runtime_resume, resume) of the device driver that
1112 * holds the devfreq.
1113 */
devfreq_resume_device(struct devfreq *devfreq)1114 int devfreq_resume_device(struct devfreq *devfreq)
1115 {
1116 int ret;
1117
1118 if (!devfreq) {
1119 return -EINVAL;
1120 }
1121
1122 if (atomic_dec_return(&devfreq->suspend_count) >= 1) {
1123 return 0;
1124 }
1125
1126 if (devfreq->resume_freq) {
1127 mutex_lock(&devfreq->lock);
1128 ret = devfreq_set_target(devfreq, devfreq->resume_freq, 0);
1129 mutex_unlock(&devfreq->lock);
1130 if (ret) {
1131 return ret;
1132 }
1133 }
1134
1135 if (devfreq->governor) {
1136 ret = devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_RESUME, NULL);
1137 if (ret) {
1138 return ret;
1139 }
1140 }
1141
1142 return 0;
1143 }
1144 EXPORT_SYMBOL(devfreq_resume_device);
1145
1146 /**
1147 * devfreq_suspend() - Suspend devfreq governors and devices
1148 *
1149 * Called during system wide Suspend/Hibernate cycles for suspending governors
1150 * and devices preserving the state for resume. On some platforms the devfreq
1151 * device must have precise state (frequency) after resume in order to provide
1152 * fully operating setup.
1153 */
devfreq_suspend(void)1154 void devfreq_suspend(void)
1155 {
1156 struct devfreq *devfreq;
1157 int ret;
1158
1159 mutex_lock(&devfreq_list_lock);
1160 list_for_each_entry(devfreq, &devfreq_list, node)
1161 {
1162 ret = devfreq_suspend_device(devfreq);
1163 if (ret) {
1164 dev_err(&devfreq->dev, "failed to suspend devfreq device\n");
1165 }
1166 }
1167 mutex_unlock(&devfreq_list_lock);
1168 }
1169
1170 /**
1171 * devfreq_resume() - Resume devfreq governors and devices
1172 *
1173 * Called during system wide Suspend/Hibernate cycle for resuming governors and
1174 * devices that are suspended with devfreq_suspend().
1175 */
devfreq_resume(void)1176 void devfreq_resume(void)
1177 {
1178 struct devfreq *devfreq;
1179 int ret;
1180
1181 mutex_lock(&devfreq_list_lock);
1182 list_for_each_entry(devfreq, &devfreq_list, node)
1183 {
1184 ret = devfreq_resume_device(devfreq);
1185 if (ret) {
1186 dev_warn(&devfreq->dev, "failed to resume devfreq device\n");
1187 }
1188 }
1189 mutex_unlock(&devfreq_list_lock);
1190 }
1191
1192 /**
1193 * devfreq_add_governor() - Add devfreq governor
1194 * @governor: the devfreq governor to be added
1195 */
devfreq_add_governor(struct devfreq_governor *governor)1196 int devfreq_add_governor(struct devfreq_governor *governor)
1197 {
1198 struct devfreq_governor *g;
1199 struct devfreq *devfreq;
1200 int err = 0;
1201
1202 if (!governor) {
1203 pr_err("%s: Invalid parameters.\n", __func__);
1204 return -EINVAL;
1205 }
1206
1207 mutex_lock(&devfreq_list_lock);
1208 g = find_devfreq_governor(governor->name);
1209 if (!IS_ERR(g)) {
1210 pr_err("%s: governor %s already registered\n", __func__, g->name);
1211 err = -EINVAL;
1212 goto err_out;
1213 }
1214
1215 list_add(&governor->node, &devfreq_governor_list);
1216
1217 list_for_each_entry(devfreq, &devfreq_list, node)
1218 {
1219 int ret = 0;
1220 struct device *dev = devfreq->dev.parent;
1221
1222 if (!strncmp(devfreq->governor_name, governor->name, DEVFREQ_NAME_LEN)) {
1223 /* The following should never occur */
1224 if (devfreq->governor) {
1225 dev_warn(dev, "%s: Governor %s already present\n", __func__, devfreq->governor->name);
1226 ret = devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_STOP, NULL);
1227 if (ret) {
1228 dev_warn(dev, "%s: Governor %s stop = %d\n", __func__, devfreq->governor->name, ret);
1229 }
1230 /* Fall through */
1231 }
1232 devfreq->governor = governor;
1233 ret = devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_START, NULL);
1234 if (ret) {
1235 dev_warn(dev, "%s: Governor %s start=%d\n", __func__, devfreq->governor->name, ret);
1236 }
1237 }
1238 }
1239
1240 err_out:
1241 mutex_unlock(&devfreq_list_lock);
1242
1243 return err;
1244 }
1245 EXPORT_SYMBOL(devfreq_add_governor);
1246
1247 /**
1248 * devfreq_remove_governor() - Remove devfreq feature from a device.
1249 * @governor: the devfreq governor to be removed
1250 */
devfreq_remove_governor(struct devfreq_governor *governor)1251 int devfreq_remove_governor(struct devfreq_governor *governor)
1252 {
1253 struct devfreq_governor *g;
1254 struct devfreq *devfreq;
1255 int err = 0;
1256
1257 if (!governor) {
1258 pr_err("%s: Invalid parameters.\n", __func__);
1259 return -EINVAL;
1260 }
1261
1262 mutex_lock(&devfreq_list_lock);
1263 g = find_devfreq_governor(governor->name);
1264 if (IS_ERR(g)) {
1265 pr_err("%s: governor %s not registered\n", __func__, governor->name);
1266 err = PTR_ERR(g);
1267 goto err_out;
1268 }
1269 list_for_each_entry(devfreq, &devfreq_list, node)
1270 {
1271 int ret;
1272 struct device *dev = devfreq->dev.parent;
1273
1274 if (!strncmp(devfreq->governor_name, governor->name, DEVFREQ_NAME_LEN)) {
1275 /* we should have a devfreq governor! */
1276 if (!devfreq->governor) {
1277 dev_warn(dev, "%s: Governor %s NOT present\n", __func__, governor->name);
1278 continue;
1279 /* Fall through */
1280 }
1281 ret = devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_STOP, NULL);
1282 if (ret) {
1283 dev_warn(dev, "%s: Governor %s stop=%d\n", __func__, devfreq->governor->name, ret);
1284 }
1285 devfreq->governor = NULL;
1286 }
1287 }
1288
1289 list_del(&governor->node);
1290 err_out:
1291 mutex_unlock(&devfreq_list_lock);
1292
1293 return err;
1294 }
1295 EXPORT_SYMBOL(devfreq_remove_governor);
1296
name_show(struct device *dev, struct device_attribute *attr, char *buf)1297 static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf)
1298 {
1299 struct devfreq *df = to_devfreq(dev);
1300 return sprintf(buf, "%s\n", dev_name(df->dev.parent));
1301 }
1302 static DEVICE_ATTR_RO(name);
1303
governor_show(struct device *dev, struct device_attribute *attr, char *buf)1304 static ssize_t governor_show(struct device *dev, struct device_attribute *attr, char *buf)
1305 {
1306 struct devfreq *df = to_devfreq(dev);
1307
1308 if (!df->governor) {
1309 return -EINVAL;
1310 }
1311
1312 return sprintf(buf, "%s\n", df->governor->name);
1313 }
1314
governor_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)1315 static ssize_t governor_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1316 {
1317 struct devfreq *df = to_devfreq(dev);
1318 int ret;
1319 char str_governor[DEVFREQ_NAME_LEN + 1];
1320 const struct devfreq_governor *governor, *prev_governor;
1321
1322 if (!df->governor) {
1323 return -EINVAL;
1324 }
1325
1326 ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor);
1327 if (ret != 1) {
1328 return -EINVAL;
1329 }
1330
1331 mutex_lock(&devfreq_list_lock);
1332 governor = try_then_request_governor(str_governor);
1333 if (IS_ERR(governor)) {
1334 ret = PTR_ERR(governor);
1335 goto out;
1336 }
1337 if (df->governor == governor) {
1338 ret = 0;
1339 goto out;
1340 } else if (df->governor->immutable || governor->immutable) {
1341 ret = -EINVAL;
1342 goto out;
1343 }
1344
1345 ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL);
1346 if (ret) {
1347 dev_warn(dev, "%s: Governor %s not stopped(%d)\n", __func__, df->governor->name, ret);
1348 goto out;
1349 }
1350
1351 prev_governor = df->governor;
1352 df->governor = governor;
1353 strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN);
1354 ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
1355 if (ret) {
1356 dev_warn(dev, "%s: Governor %s not started(%d)\n", __func__, df->governor->name, ret);
1357 df->governor = prev_governor;
1358 strncpy(df->governor_name, prev_governor->name, DEVFREQ_NAME_LEN);
1359 ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
1360 if (ret) {
1361 dev_err(dev, "%s: reverting to Governor %s failed (%d)\n", __func__, df->governor_name, ret);
1362 df->governor = NULL;
1363 }
1364 }
1365 out:
1366 mutex_unlock(&devfreq_list_lock);
1367
1368 if (!ret) {
1369 ret = count;
1370 }
1371 return ret;
1372 }
1373 static DEVICE_ATTR_RW(governor);
1374
available_governors_show(struct device *d, struct device_attribute *attr, char *buf)1375 static ssize_t available_governors_show(struct device *d, struct device_attribute *attr, char *buf)
1376 {
1377 struct devfreq *df = to_devfreq(d);
1378 ssize_t count = 0;
1379
1380 if (!df->governor) {
1381 return -EINVAL;
1382 }
1383
1384 mutex_lock(&devfreq_list_lock);
1385
1386 /*
1387 * The devfreq with immutable governor (e.g., passive) shows
1388 * only own governor.
1389 */
1390 if (df->governor->immutable) {
1391 count = scnprintf(&buf[count], DEVFREQ_NAME_LEN, "%s ", df->governor_name);
1392 /*
1393 * The devfreq device shows the registered governor except for
1394 * immutable governors such as passive governor .
1395 */
1396 } else {
1397 struct devfreq_governor *governor;
1398
1399 list_for_each_entry(governor, &devfreq_governor_list, node)
1400 {
1401 if (governor->immutable) {
1402 continue;
1403 }
1404 count += scnprintf(&buf[count], (PAGE_SIZE - count - 0x2), "%s ", governor->name);
1405 }
1406 }
1407
1408 mutex_unlock(&devfreq_list_lock);
1409
1410 /* Truncate the trailing space */
1411 if (count) {
1412 count--;
1413 }
1414
1415 count += sprintf(&buf[count], "\n");
1416
1417 return count;
1418 }
1419 static DEVICE_ATTR_RO(available_governors);
1420
cur_freq_show(struct device *dev, struct device_attribute *attr, char *buf)1421 static ssize_t cur_freq_show(struct device *dev, struct device_attribute *attr, char *buf)
1422 {
1423 unsigned long freq;
1424 struct devfreq *df = to_devfreq(dev);
1425
1426 if (!df->profile) {
1427 return -EINVAL;
1428 }
1429
1430 if (df->profile->get_cur_freq && !df->profile->get_cur_freq(df->dev.parent, &freq)) {
1431 return sprintf(buf, "%lu\n", freq);
1432 }
1433
1434 return sprintf(buf, "%lu\n", df->previous_freq);
1435 }
1436 static DEVICE_ATTR_RO(cur_freq);
1437
target_freq_show(struct device *dev, struct device_attribute *attr, char *buf)1438 static ssize_t target_freq_show(struct device *dev, struct device_attribute *attr, char *buf)
1439 {
1440 struct devfreq *df = to_devfreq(dev);
1441
1442 return sprintf(buf, "%lu\n", df->previous_freq);
1443 }
1444 static DEVICE_ATTR_RO(target_freq);
1445
polling_interval_show(struct device *dev, struct device_attribute *attr, char *buf)1446 static ssize_t polling_interval_show(struct device *dev, struct device_attribute *attr, char *buf)
1447 {
1448 struct devfreq *df = to_devfreq(dev);
1449
1450 if (!df->profile) {
1451 return -EINVAL;
1452 }
1453
1454 return sprintf(buf, "%d\n", df->profile->polling_ms);
1455 }
1456
polling_interval_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)1457 static ssize_t polling_interval_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1458 {
1459 struct devfreq *df = to_devfreq(dev);
1460 unsigned int value;
1461 int ret;
1462
1463 if (!df->governor) {
1464 return -EINVAL;
1465 }
1466
1467 ret = sscanf(buf, "%u", &value);
1468 if (ret != 1) {
1469 return -EINVAL;
1470 }
1471
1472 df->governor->event_handler(df, DEVFREQ_GOV_UPDATE_INTERVAL, &value);
1473 ret = count;
1474
1475 return ret;
1476 }
1477 static DEVICE_ATTR_RW(polling_interval);
1478
min_freq_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)1479 static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1480 {
1481 struct devfreq *df = to_devfreq(dev);
1482 unsigned long value;
1483 int ret;
1484
1485 /*
1486 * Protect against theoretical sysfs writes between
1487 * device_add and dev_pm_qos_add_request
1488 */
1489 if (!dev_pm_qos_request_active(&df->user_min_freq_req)) {
1490 return -EAGAIN;
1491 }
1492
1493 ret = sscanf(buf, "%lu", &value);
1494 if (ret != 1) {
1495 return -EINVAL;
1496 }
1497
1498 /* Round down to kHz for PM QoS */
1499 ret = dev_pm_qos_update_request(&df->user_min_freq_req, value / HZ_PER_KHZ);
1500 if (ret < 0) {
1501 return ret;
1502 }
1503
1504 return count;
1505 }
1506
min_freq_show(struct device *dev, struct device_attribute *attr, char *buf)1507 static ssize_t min_freq_show(struct device *dev, struct device_attribute *attr, char *buf)
1508 {
1509 struct devfreq *df = to_devfreq(dev);
1510 unsigned long min_freq, max_freq;
1511
1512 mutex_lock(&df->lock);
1513 get_freq_range(df, &min_freq, &max_freq);
1514 mutex_unlock(&df->lock);
1515
1516 return sprintf(buf, "%lu\n", min_freq);
1517 }
1518 static DEVICE_ATTR_RW(min_freq);
1519
max_freq_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)1520 static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1521 {
1522 struct devfreq *df = to_devfreq(dev);
1523 unsigned long value;
1524 int ret;
1525
1526 /*
1527 * Protect against theoretical sysfs writes between
1528 * device_add and dev_pm_qos_add_request
1529 */
1530 if (!dev_pm_qos_request_active(&df->user_max_freq_req)) {
1531 return -EINVAL;
1532 }
1533
1534 ret = sscanf(buf, "%lu", &value);
1535 if (ret != 1) {
1536 return -EINVAL;
1537 }
1538
1539 /*
1540 * PM QoS frequencies are in kHz so we need to convert. Convert by
1541 * rounding upwards so that the acceptable interval never shrinks.
1542 *
1543 * For example if the user writes "666666666" to sysfs this value will
1544 * be converted to 666667 kHz and back to 666667000 Hz before an OPP
1545 * lookup, this ensures that an OPP of 666666666Hz is still accepted.
1546 *
1547 * A value of zero means "no limit".
1548 */
1549 if (value) {
1550 value = DIV_ROUND_UP(value, HZ_PER_KHZ);
1551 } else {
1552 value = PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
1553 }
1554
1555 ret = dev_pm_qos_update_request(&df->user_max_freq_req, value);
1556 if (ret < 0) {
1557 return ret;
1558 }
1559
1560 return count;
1561 }
1562
max_freq_show(struct device *dev, struct device_attribute *attr, char *buf)1563 static ssize_t max_freq_show(struct device *dev, struct device_attribute *attr, char *buf)
1564 {
1565 struct devfreq *df = to_devfreq(dev);
1566 unsigned long min_freq, max_freq;
1567
1568 mutex_lock(&df->lock);
1569 get_freq_range(df, &min_freq, &max_freq);
1570 mutex_unlock(&df->lock);
1571
1572 return sprintf(buf, "%lu\n", max_freq);
1573 }
1574 static DEVICE_ATTR_RW(max_freq);
1575
available_frequencies_show(struct device *d, struct device_attribute *attr, char *buf)1576 static ssize_t available_frequencies_show(struct device *d, struct device_attribute *attr, char *buf)
1577 {
1578 struct devfreq *df = to_devfreq(d);
1579 ssize_t count = 0;
1580 int i;
1581
1582 if (!df->profile) {
1583 return -EINVAL;
1584 }
1585
1586 mutex_lock(&df->lock);
1587
1588 for (i = 0; i < df->profile->max_state; i++) {
1589 count += scnprintf(&buf[count], (PAGE_SIZE - count - 0x2), "%lu ", df->profile->freq_table[i]);
1590 }
1591
1592 mutex_unlock(&df->lock);
1593 /* Truncate the trailing space */
1594 if (count) {
1595 count--;
1596 }
1597
1598 count += sprintf(&buf[count], "\n");
1599
1600 return count;
1601 }
1602 static DEVICE_ATTR_RO(available_frequencies);
1603
trans_stat_show(struct device *dev, struct device_attribute *attr, char *buf)1604 static ssize_t trans_stat_show(struct device *dev, struct device_attribute *attr, char *buf)
1605 {
1606 struct devfreq *df = to_devfreq(dev);
1607 ssize_t len;
1608 int i, j;
1609 unsigned int max_state;
1610
1611 if (!df->profile) {
1612 return -EINVAL;
1613 }
1614 max_state = df->profile->max_state;
1615
1616 if (max_state == 0) {
1617 return sprintf(buf, "Not Supported.\n");
1618 }
1619
1620 mutex_lock(&df->lock);
1621 if (!df->stop_polling && devfreq_update_status(df, df->previous_freq)) {
1622 mutex_unlock(&df->lock);
1623 return 0;
1624 }
1625 mutex_unlock(&df->lock);
1626
1627 len = sprintf(buf, " From : To\n");
1628 len += sprintf(buf + len, " :");
1629 for (i = 0; i < max_state; i++) {
1630 len += sprintf(buf + len, "%10lu", df->profile->freq_table[i]);
1631 }
1632
1633 len += sprintf(buf + len, " time(ms)\n");
1634
1635 for (i = 0; i < max_state; i++) {
1636 if (df->profile->freq_table[i] == df->previous_freq) {
1637 len += sprintf(buf + len, "*");
1638 } else {
1639 len += sprintf(buf + len, " ");
1640 }
1641 len += sprintf(buf + len, "%10lu:", df->profile->freq_table[i]);
1642 for (j = 0; j < max_state; j++) {
1643 len += sprintf(buf + len, "%10u", df->stats.trans_table[(i * max_state) + j]);
1644 }
1645
1646 len += sprintf(buf + len, "%10llu\n", (u64)jiffies64_to_msecs(df->stats.time_in_state[i]));
1647 }
1648
1649 len += sprintf(buf + len, "Total transition : %u\n", df->stats.total_trans);
1650 return len;
1651 }
1652
trans_stat_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)1653 static ssize_t trans_stat_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1654 {
1655 struct devfreq *df = to_devfreq(dev);
1656 int err, value;
1657
1658 if (!df->profile) {
1659 return -EINVAL;
1660 }
1661
1662 if (df->profile->max_state == 0) {
1663 return count;
1664 }
1665
1666 err = kstrtoint(buf, 0xa, &value);
1667 if (err || value != 0) {
1668 return -EINVAL;
1669 }
1670
1671 mutex_lock(&df->lock);
1672 memset(df->stats.time_in_state, 0, (df->profile->max_state * sizeof(*df->stats.time_in_state)));
1673 memset(df->stats.trans_table, 0, array3_size(sizeof(unsigned int), df->profile->max_state, df->profile->max_state));
1674 df->stats.total_trans = 0;
1675 df->stats.last_update = get_jiffies_64();
1676 mutex_unlock(&df->lock);
1677
1678 return count;
1679 }
1680 static DEVICE_ATTR_RW(trans_stat);
1681
timer_show(struct device *dev, struct device_attribute *attr, char *buf)1682 static ssize_t timer_show(struct device *dev, struct device_attribute *attr, char *buf)
1683 {
1684 struct devfreq *df = to_devfreq(dev);
1685
1686 if (!df->profile) {
1687 return -EINVAL;
1688 }
1689
1690 return sprintf(buf, "%s\n", timer_name[df->profile->timer]);
1691 }
1692
timer_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)1693 static ssize_t timer_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1694 {
1695 struct devfreq *df = to_devfreq(dev);
1696 char str_timer[DEVFREQ_NAME_LEN + 1];
1697 int timer = -1;
1698 int ret = 0, i;
1699
1700 if (!df->governor || !df->profile) {
1701 return -EINVAL;
1702 }
1703
1704 ret = sscanf(buf, "%16s", str_timer);
1705 if (ret != 1) {
1706 return -EINVAL;
1707 }
1708
1709 for (i = 0; i < DEVFREQ_TIMER_NUM; i++) {
1710 if (!strncmp(timer_name[i], str_timer, DEVFREQ_NAME_LEN)) {
1711 timer = i;
1712 break;
1713 }
1714 }
1715
1716 if (timer < 0) {
1717 ret = -EINVAL;
1718 goto out;
1719 }
1720
1721 if (df->profile->timer == timer) {
1722 ret = 0;
1723 goto out;
1724 }
1725
1726 mutex_lock(&df->lock);
1727 df->profile->timer = timer;
1728 mutex_unlock(&df->lock);
1729
1730 ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL);
1731 if (ret) {
1732 dev_warn(dev, "%s: Governor %s not stopped(%d)\n", __func__, df->governor->name, ret);
1733 goto out;
1734 }
1735
1736 ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
1737 if (ret) {
1738 dev_warn(dev, "%s: Governor %s not started(%d)\n", __func__, df->governor->name, ret);
1739 }
1740 out:
1741 return ret ? ret : count;
1742 }
1743 static DEVICE_ATTR_RW(timer);
1744
load_show(struct device *dev, struct device_attribute *attr, char *buf)1745 static ssize_t load_show(struct device *dev, struct device_attribute *attr, char *buf)
1746 {
1747 int err;
1748 struct devfreq *devfreq = to_devfreq(dev);
1749 struct devfreq_dev_status stat = devfreq->last_status;
1750 unsigned long freq;
1751 ssize_t len;
1752
1753 err = devfreq_update_stats(devfreq);
1754 if (err) {
1755 return err;
1756 }
1757
1758 if (stat.total_time < stat.busy_time) {
1759 err = devfreq_update_stats(devfreq);
1760 if (err) {
1761 return err;
1762 }
1763 };
1764
1765 if (!stat.total_time) {
1766 return 0;
1767 }
1768
1769 len = sprintf(buf, "%lu", stat.busy_time * 0x64 / stat.total_time);
1770
1771 if (devfreq->profile->get_cur_freq && !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq)) {
1772 len += sprintf(buf + len, "@%luHz\n", freq);
1773 } else {
1774 len += sprintf(buf + len, "@%luHz\n", devfreq->previous_freq);
1775 }
1776
1777 return len;
1778 }
1779 static DEVICE_ATTR_RO(load);
1780
1781 static struct attribute *devfreq_attrs[] = {
1782 &dev_attr_name.attr,
1783 &dev_attr_governor.attr,
1784 &dev_attr_available_governors.attr,
1785 &dev_attr_cur_freq.attr,
1786 &dev_attr_available_frequencies.attr,
1787 &dev_attr_target_freq.attr,
1788 &dev_attr_polling_interval.attr,
1789 &dev_attr_min_freq.attr,
1790 &dev_attr_max_freq.attr,
1791 &dev_attr_trans_stat.attr,
1792 &dev_attr_timer.attr,
1793 &dev_attr_load.attr,
1794 NULL,
1795 };
1796 ATTRIBUTE_GROUPS(devfreq);
1797
1798 /**
1799 * devfreq_summary_show() - Show the summary of the devfreq devices
1800 * @s: seq_file instance to show the summary of devfreq devices
1801 * @data: not used
1802 *
1803 * Show the summary of the devfreq devices via 'devfreq_summary' debugfs file.
1804 * It helps that user can know the detailed information of the devfreq devices.
1805 *
1806 * Return 0 always because it shows the information without any data change.
1807 */
devfreq_summary_show(struct seq_file *s, void *data)1808 static int devfreq_summary_show(struct seq_file *s, void *data)
1809 {
1810 struct devfreq *devfreq;
1811 struct devfreq *p_devfreq = NULL;
1812 unsigned long cur_freq, min_freq, max_freq;
1813 unsigned int polling_ms;
1814 unsigned int timer;
1815
1816 seq_printf(s, "%-30s %-30s %-15s %-10s %10s %12s %12s %12s\n", "dev", "parent_dev", "governor", "timer",
1817 "polling_ms", "cur_freq_Hz", "min_freq_Hz", "max_freq_Hz");
1818 seq_printf(s, "%30s %30s %15s %10s %10s %12s %12s %12s\n", "------------------------------",
1819 "------------------------------", "---------------", "----------", "----------", "------------",
1820 "------------", "------------");
1821
1822 mutex_lock(&devfreq_list_lock);
1823
1824 list_for_each_entry_reverse(devfreq, &devfreq_list, node)
1825 {
1826 #if IS_ENABLED(CONFIG_DEVFREQ_GOV_PASSIVE)
1827 if (!strncmp(devfreq->governor_name, DEVFREQ_GOV_PASSIVE, DEVFREQ_NAME_LEN)) {
1828 struct devfreq_passive_data *data = devfreq->data;
1829
1830 if (data) {
1831 p_devfreq = data->parent;
1832 }
1833 } else {
1834 p_devfreq = NULL;
1835 }
1836 #endif
1837
1838 mutex_lock(&devfreq->lock);
1839 cur_freq = devfreq->previous_freq;
1840 get_freq_range(devfreq, &min_freq, &max_freq);
1841 polling_ms = devfreq->profile->polling_ms;
1842 timer = devfreq->profile->timer;
1843 mutex_unlock(&devfreq->lock);
1844
1845 seq_printf(s, "%-30s %-30s %-15s %-10s %10d %12ld %12ld %12ld\n", dev_name(&devfreq->dev),
1846 p_devfreq ? dev_name(&p_devfreq->dev) : "null", devfreq->governor_name,
1847 polling_ms ? timer_name[timer] : "null", polling_ms, cur_freq, min_freq, max_freq);
1848 }
1849
1850 mutex_unlock(&devfreq_list_lock);
1851
1852 return 0;
1853 }
1854 DEFINE_SHOW_ATTRIBUTE(devfreq_summary);
1855
devfreq_init(void)1856 static int __init devfreq_init(void)
1857 {
1858 devfreq_class = class_create(THIS_MODULE, "devfreq");
1859 if (IS_ERR(devfreq_class)) {
1860 pr_err("%s: couldn't create class\n", __FILE__);
1861 return PTR_ERR(devfreq_class);
1862 }
1863
1864 devfreq_wq = create_freezable_workqueue("devfreq_wq");
1865 if (!devfreq_wq) {
1866 class_destroy(devfreq_class);
1867 pr_err("%s: couldn't create workqueue\n", __FILE__);
1868 return -ENOMEM;
1869 }
1870 devfreq_class->dev_groups = devfreq_groups;
1871
1872 devfreq_debugfs = debugfs_create_dir("devfreq", NULL);
1873 debugfs_create_file("devfreq_summary", 0444, devfreq_debugfs, NULL, &devfreq_summary_fops);
1874
1875 return 0;
1876 }
1877 subsys_initcall(devfreq_init);
1878
1879 /*
1880 * The following are helper functions for devfreq user device drivers with
1881 * OPP framework.
1882 */
1883
1884 /**
1885 * devfreq_recommended_opp() - Helper function to get proper OPP for the
1886 * freq value given to target callback.
1887 * @dev: The devfreq user device. (parent of devfreq)
1888 * @freq: The frequency given to target function
1889 * @flags: Flags handed from devfreq framework.
1890 *
1891 * The callers are required to call dev_pm_opp_put() for the returned OPP after
1892 * use.
1893 */
devfreq_recommended_opp(struct device *dev, unsigned long *freq, u32 flags)1894 struct dev_pm_opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq, u32 flags)
1895 {
1896 struct dev_pm_opp *opp;
1897
1898 if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) {
1899 /* The freq is an upper bound. opp should be lower */
1900 opp = dev_pm_opp_find_freq_floor(dev, freq);
1901 /* If not available, use the closest opp */
1902 if (opp == ERR_PTR(-ERANGE)) {
1903 opp = dev_pm_opp_find_freq_ceil(dev, freq);
1904 }
1905 } else {
1906 /* The freq is an lower bound. opp should be higher */
1907 opp = dev_pm_opp_find_freq_ceil(dev, freq);
1908 /* If not available, use the closest opp */
1909 if (opp == ERR_PTR(-ERANGE)) {
1910 opp = dev_pm_opp_find_freq_floor(dev, freq);
1911 }
1912 }
1913
1914 return opp;
1915 }
1916 EXPORT_SYMBOL(devfreq_recommended_opp);
1917
1918 /**
1919 * devfreq_register_opp_notifier() - Helper function to get devfreq notified
1920 * for any changes in the OPP availability
1921 * changes
1922 * @dev: The devfreq user device. (parent of devfreq)
1923 * @devfreq: The devfreq object.
1924 */
devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq)1925 int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq)
1926 {
1927 return dev_pm_opp_register_notifier(dev, &devfreq->nb);
1928 }
1929 EXPORT_SYMBOL(devfreq_register_opp_notifier);
1930
1931 /**
1932 * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq
1933 * notified for any changes in the OPP
1934 * availability changes anymore.
1935 * @dev: The devfreq user device. (parent of devfreq)
1936 * @devfreq: The devfreq object.
1937 *
1938 * At exit() callback of devfreq_dev_profile, this must be included if
1939 * devfreq_recommended_opp is used.
1940 */
devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq)1941 int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq)
1942 {
1943 return dev_pm_opp_unregister_notifier(dev, &devfreq->nb);
1944 }
1945 EXPORT_SYMBOL(devfreq_unregister_opp_notifier);
1946
devm_devfreq_opp_release(struct device *dev, void *res)1947 static void devm_devfreq_opp_release(struct device *dev, void *res)
1948 {
1949 devfreq_unregister_opp_notifier(dev, *(struct devfreq **)res);
1950 }
1951
1952 /**
1953 * devm_devfreq_register_opp_notifier() - Resource-managed
1954 * devfreq_register_opp_notifier()
1955 * @dev: The devfreq user device. (parent of devfreq)
1956 * @devfreq: The devfreq object.
1957 */
devm_devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq)1958 int devm_devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq)
1959 {
1960 struct devfreq **ptr;
1961 int ret;
1962
1963 ptr = devres_alloc(devm_devfreq_opp_release, sizeof(*ptr), GFP_KERNEL);
1964 if (!ptr) {
1965 return -ENOMEM;
1966 }
1967
1968 ret = devfreq_register_opp_notifier(dev, devfreq);
1969 if (ret) {
1970 devres_free(ptr);
1971 return ret;
1972 }
1973
1974 *ptr = devfreq;
1975 devres_add(dev, ptr);
1976
1977 return 0;
1978 }
1979 EXPORT_SYMBOL(devm_devfreq_register_opp_notifier);
1980
1981 /**
1982 * devm_devfreq_unregister_opp_notifier() - Resource-managed
1983 * devfreq_unregister_opp_notifier()
1984 * @dev: The devfreq user device. (parent of devfreq)
1985 * @devfreq: The devfreq object.
1986 */
devm_devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq)1987 void devm_devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq)
1988 {
1989 WARN_ON(devres_release(dev, devm_devfreq_opp_release, devm_devfreq_dev_match, devfreq));
1990 }
1991 EXPORT_SYMBOL(devm_devfreq_unregister_opp_notifier);
1992
1993 /**
1994 * devfreq_register_notifier() - Register a driver with devfreq
1995 * @devfreq: The devfreq object.
1996 * @nb: The notifier block to register.
1997 * @list: DEVFREQ_TRANSITION_NOTIFIER.
1998 */
devfreq_register_notifier(struct devfreq *devfreq, struct notifier_block *nb, unsigned int list)1999 int devfreq_register_notifier(struct devfreq *devfreq, struct notifier_block *nb, unsigned int list)
2000 {
2001 int ret = 0;
2002
2003 if (!devfreq) {
2004 return -EINVAL;
2005 }
2006
2007 switch (list) {
2008 case DEVFREQ_TRANSITION_NOTIFIER:
2009 ret = srcu_notifier_chain_register(&devfreq->transition_notifier_list, nb);
2010 break;
2011 default:
2012 ret = -EINVAL;
2013 }
2014
2015 return ret;
2016 }
2017 EXPORT_SYMBOL(devfreq_register_notifier);
2018
2019 /*
2020 * devfreq_unregister_notifier() - Unregister a driver with devfreq
2021 * @devfreq: The devfreq object.
2022 * @nb: The notifier block to be unregistered.
2023 * @list: DEVFREQ_TRANSITION_NOTIFIER.
2024 */
devfreq_unregister_notifier(struct devfreq *devfreq, struct notifier_block *nb, unsigned int list)2025 int devfreq_unregister_notifier(struct devfreq *devfreq, struct notifier_block *nb, unsigned int list)
2026 {
2027 int ret = 0;
2028
2029 if (!devfreq) {
2030 return -EINVAL;
2031 }
2032
2033 switch (list) {
2034 case DEVFREQ_TRANSITION_NOTIFIER:
2035 ret = srcu_notifier_chain_unregister(&devfreq->transition_notifier_list, nb);
2036 break;
2037 default:
2038 ret = -EINVAL;
2039 }
2040
2041 return ret;
2042 }
2043 EXPORT_SYMBOL(devfreq_unregister_notifier);
2044
2045 struct devfreq_notifier_devres {
2046 struct devfreq *devfreq;
2047 struct notifier_block *nb;
2048 unsigned int list;
2049 };
2050
devm_devfreq_notifier_release(struct device *dev, void *res)2051 static void devm_devfreq_notifier_release(struct device *dev, void *res)
2052 {
2053 struct devfreq_notifier_devres *this = res;
2054
2055 devfreq_unregister_notifier(this->devfreq, this->nb, this->list);
2056 }
2057
2058 /**
2059 * devm_devfreq_register_notifier()
2060 * - Resource-managed devfreq_register_notifier()
2061 * @dev: The devfreq user device. (parent of devfreq)
2062 * @devfreq: The devfreq object.
2063 * @nb: The notifier block to be unregistered.
2064 * @list: DEVFREQ_TRANSITION_NOTIFIER.
2065 */
devm_devfreq_register_notifier(struct device *dev, struct devfreq *devfreq, struct notifier_block *nb, unsigned int list)2066 int devm_devfreq_register_notifier(struct device *dev, struct devfreq *devfreq, struct notifier_block *nb,
2067 unsigned int list)
2068 {
2069 struct devfreq_notifier_devres *ptr;
2070 int ret;
2071
2072 ptr = devres_alloc(devm_devfreq_notifier_release, sizeof(*ptr), GFP_KERNEL);
2073 if (!ptr) {
2074 return -ENOMEM;
2075 }
2076
2077 ret = devfreq_register_notifier(devfreq, nb, list);
2078 if (ret) {
2079 devres_free(ptr);
2080 return ret;
2081 }
2082
2083 ptr->devfreq = devfreq;
2084 ptr->nb = nb;
2085 ptr->list = list;
2086 devres_add(dev, ptr);
2087
2088 return 0;
2089 }
2090 EXPORT_SYMBOL(devm_devfreq_register_notifier);
2091
2092 /**
2093 * devm_devfreq_unregister_notifier()
2094 * - Resource-managed devfreq_unregister_notifier()
2095 * @dev: The devfreq user device. (parent of devfreq)
2096 * @devfreq: The devfreq object.
2097 * @nb: The notifier block to be unregistered.
2098 * @list: DEVFREQ_TRANSITION_NOTIFIER.
2099 */
devm_devfreq_unregister_notifier(struct device *dev, struct devfreq *devfreq, struct notifier_block *nb, unsigned int list)2100 void devm_devfreq_unregister_notifier(struct device *dev, struct devfreq *devfreq, struct notifier_block *nb,
2101 unsigned int list)
2102 {
2103 WARN_ON(devres_release(dev, devm_devfreq_notifier_release, devm_devfreq_dev_match, devfreq));
2104 }
2105 EXPORT_SYMBOL(devm_devfreq_unregister_notifier);
2106