1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Generic OPP OF helpers
4 *
5 * Copyright (C) 2009-2010 Texas Instruments Incorporated.
6 * Nishanth Menon
7 * Romit Dasgupta
8 * Kevin Hilman
9 */
10
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13 #include <linux/cpu.h>
14 #include <linux/errno.h>
15 #include <linux/device.h>
16 #include <linux/of_device.h>
17 #include <linux/pm_domain.h>
18 #include <linux/slab.h>
19 #include <linux/export.h>
20 #include <linux/energy_model.h>
21
22 #include "opp.h"
23
24 #define PHANDLE_MOD_VALUE 2
25 #define PHANDLE_DIV_VALUE 2
26 #define REGULATOR_COUNT_MUL 3
27 #define NUM_RECORD_MOD_VALUE 2
28 #define FREQ_MUL 1000
29
30 /*
31 * Returns opp descriptor node for a device node, caller must
32 * do of_node_put().
33 */
_opp_of_get_opp_desc_node(struct device_node *np, int index)34 static struct device_node *_opp_of_get_opp_desc_node(struct device_node *np, int index)
35 {
36 /* "operating-points-v2" can be an array for power domain providers */
37 return of_parse_phandle(np, "operating-points-v2", index);
38 }
39
40 /* Returns opp descriptor node for a device, caller must do of_node_put() */
dev_pm_opp_of_get_opp_desc_node(struct device *dev)41 struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev)
42 {
43 return _opp_of_get_opp_desc_node(dev->of_node, 0);
44 }
45 EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_opp_desc_node);
46
_managed_opp(struct device *dev, int index)47 struct opp_table *_managed_opp(struct device *dev, int index)
48 {
49 struct opp_table *opp_table, *managed_table = NULL;
50 struct device_node *np;
51
52 np = _opp_of_get_opp_desc_node(dev->of_node, index);
53 if (!np) {
54 return NULL;
55 }
56
57 list_for_each_entry(opp_table, &opp_tables, node)
58 {
59 if (opp_table->np == np) {
60 /*
61 * Multiple devices can point to the same OPP table and
62 * so will have same node-pointer, np.
63 *
64 * But the OPPs will be considered as shared only if the
65 * OPP table contains a "opp-shared" property.
66 */
67 if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) {
68 _get_opp_table_kref(opp_table);
69 managed_table = opp_table;
70 }
71
72 break;
73 }
74 }
75
76 of_node_put(np);
77
78 return managed_table;
79 }
80
81 /* The caller must call dev_pm_opp_put() after the OPP is used */
_find_opp_of_np(struct opp_table *opp_table, struct device_node *opp_np)82 static struct dev_pm_opp *_find_opp_of_np(struct opp_table *opp_table, struct device_node *opp_np)
83 {
84 struct dev_pm_opp *opp;
85
86 mutex_lock(&opp_table->lock);
87
88 list_for_each_entry(opp, &opp_table->opp_list, node)
89 {
90 if (opp->np == opp_np) {
91 dev_pm_opp_get(opp);
92 mutex_unlock(&opp_table->lock);
93 return opp;
94 }
95 }
96
97 mutex_unlock(&opp_table->lock);
98
99 return NULL;
100 }
101
of_parse_required_opp(struct device_node *np, int index)102 static struct device_node *of_parse_required_opp(struct device_node *np, int index)
103 {
104 return of_parse_phandle(np, "required-opps", index);
105 }
106
107 /* The caller must call dev_pm_opp_put_opp_table() after the table is used */
_find_table_of_opp_np(struct device_node *opp_np)108 static struct opp_table *_find_table_of_opp_np(struct device_node *opp_np)
109 {
110 struct opp_table *opp_table;
111 struct device_node *opp_table_np;
112
113 lockdep_assert_held(&opp_table_lock);
114
115 opp_table_np = of_get_parent(opp_np);
116 if (!opp_table_np) {
117 goto err;
118 }
119
120 /* It is safe to put the node now as all we need now is its address */
121 of_node_put(opp_table_np);
122
123 list_for_each_entry(opp_table, &opp_tables, node)
124 {
125 if (opp_table_np == opp_table->np) {
126 _get_opp_table_kref(opp_table);
127 return opp_table;
128 }
129 }
130
131 err:
132 return ERR_PTR(-ENODEV);
133 }
134
135 /* Free resources previously acquired by _opp_table_alloc_required_tables() */
_opp_table_free_required_tables(struct opp_table *opp_table)136 static void _opp_table_free_required_tables(struct opp_table *opp_table)
137 {
138 struct opp_table **required_opp_tables = opp_table->required_opp_tables;
139 int i;
140
141 if (!required_opp_tables) {
142 return;
143 }
144
145 for (i = 0; i < opp_table->required_opp_count; i++) {
146 if (IS_ERR_OR_NULL(required_opp_tables[i])) {
147 break;
148 }
149
150 dev_pm_opp_put_opp_table(required_opp_tables[i]);
151 }
152
153 kfree(required_opp_tables);
154
155 opp_table->required_opp_count = 0;
156 opp_table->required_opp_tables = NULL;
157 }
158
159 /*
160 * Populate all devices and opp tables which are part of "required-opps" list.
161 * Checking only the first OPP node should be enough.
162 */
_opp_table_alloc_required_tables(struct opp_table *opp_table, struct device *dev, struct device_node *opp_np)163 static void _opp_table_alloc_required_tables(struct opp_table *opp_table, struct device *dev,
164 struct device_node *opp_np)
165 {
166 struct opp_table **required_opp_tables;
167 struct device_node *required_np, *np;
168 int count, i;
169
170 /* Traversing the first OPP node is all we need */
171 np = of_get_next_available_child(opp_np, NULL);
172 if (!np) {
173 dev_err(dev, "Empty OPP table\n");
174 return;
175 }
176
177 count = of_count_phandle_with_args(np, "required-opps", NULL);
178 if (!count) {
179 goto put_np;
180 }
181
182 required_opp_tables = kcalloc(count, sizeof(*required_opp_tables), GFP_KERNEL);
183 if (!required_opp_tables) {
184 goto put_np;
185 }
186
187 opp_table->required_opp_tables = required_opp_tables;
188 opp_table->required_opp_count = count;
189
190 for (i = 0; i < count; i++) {
191 required_np = of_parse_required_opp(np, i);
192 if (!required_np) {
193 goto free_required_tables;
194 }
195
196 required_opp_tables[i] = _find_table_of_opp_np(required_np);
197 of_node_put(required_np);
198
199 if (IS_ERR(required_opp_tables[i])) {
200 goto free_required_tables;
201 }
202
203 /*
204 * We only support genpd's OPPs in the "required-opps" for now,
205 * as we don't know how much about other cases. Error out if the
206 * required OPP doesn't belong to a genpd.
207 */
208 if (!required_opp_tables[i]->is_genpd) {
209 dev_err(dev, "required-opp doesn't belong to genpd: %pOF\n", required_np);
210 goto free_required_tables;
211 }
212 }
213
214 goto put_np;
215
216 free_required_tables:
217 _opp_table_free_required_tables(opp_table);
218 put_np:
219 of_node_put(np);
220 }
221
_of_init_opp_table(struct opp_table *opp_table, struct device *dev, int index)222 void _of_init_opp_table(struct opp_table *opp_table, struct device *dev, int index)
223 {
224 struct device_node *np, *opp_np;
225 u32 val;
226
227 /*
228 * Only required for backward compatibility with v1 bindings, but isn't
229 * harmful for other cases. And so we do it unconditionally.
230 */
231 np = of_node_get(dev->of_node);
232 if (!np) {
233 return;
234 }
235
236 if (!of_property_read_u32(np, "clock-latency", &val)) {
237 opp_table->clock_latency_ns_max = val;
238 }
239 of_property_read_u32(np, "voltage-tolerance", &opp_table->voltage_tolerance_v1);
240
241 if (of_find_property(np, "#power-domain-cells", NULL)) {
242 opp_table->is_genpd = true;
243 }
244
245 /* Get OPP table node */
246 opp_np = _opp_of_get_opp_desc_node(np, index);
247 of_node_put(np);
248
249 if (!opp_np) {
250 return;
251 }
252
253 if (of_property_read_bool(opp_np, "opp-shared")) {
254 opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED;
255 } else {
256 opp_table->shared_opp = OPP_TABLE_ACCESS_EXCLUSIVE;
257 }
258
259 opp_table->np = opp_np;
260
261 _opp_table_alloc_required_tables(opp_table, dev, opp_np);
262 of_node_put(opp_np);
263 }
264
_of_clear_opp_table(struct opp_table *opp_table)265 void _of_clear_opp_table(struct opp_table *opp_table)
266 {
267 _opp_table_free_required_tables(opp_table);
268 }
269
270 /*
271 * Release all resources previously acquired with a call to
272 * _of_opp_alloc_required_opps().
273 */
_of_opp_free_required_opps(struct opp_table *opp_table, struct dev_pm_opp *opp)274 void _of_opp_free_required_opps(struct opp_table *opp_table, struct dev_pm_opp *opp)
275 {
276 struct dev_pm_opp **required_opps = opp->required_opps;
277 int i;
278
279 if (!required_opps) {
280 return;
281 }
282
283 for (i = 0; i < opp_table->required_opp_count; i++) {
284 if (!required_opps[i]) {
285 break;
286 }
287
288 /* Put the reference back */
289 dev_pm_opp_put(required_opps[i]);
290 }
291
292 kfree(required_opps);
293 opp->required_opps = NULL;
294 }
295
296 /* Populate all required OPPs which are part of "required-opps" list */
_of_opp_alloc_required_opps(struct opp_table *opp_table, struct dev_pm_opp *opp)297 static int _of_opp_alloc_required_opps(struct opp_table *opp_table, struct dev_pm_opp *opp)
298 {
299 struct dev_pm_opp **required_opps;
300 struct opp_table *required_table;
301 struct device_node *np;
302 int i, ret, count = opp_table->required_opp_count;
303
304 if (!count) {
305 return 0;
306 }
307
308 required_opps = kcalloc(count, sizeof(*required_opps), GFP_KERNEL);
309 if (!required_opps) {
310 return -ENOMEM;
311 }
312
313 opp->required_opps = required_opps;
314
315 for (i = 0; i < count; i++) {
316 required_table = opp_table->required_opp_tables[i];
317
318 np = of_parse_required_opp(opp->np, i);
319 if (unlikely(!np)) {
320 ret = -ENODEV;
321 goto free_required_opps;
322 }
323
324 required_opps[i] = _find_opp_of_np(required_table, np);
325 of_node_put(np);
326
327 if (!required_opps[i]) {
328 pr_err("%s: Unable to find required OPP node: %pOF (%d)\n", __func__, opp->np, i);
329 ret = -ENODEV;
330 goto free_required_opps;
331 }
332 }
333
334 return 0;
335
336 free_required_opps:
337 _of_opp_free_required_opps(opp_table, opp);
338
339 return ret;
340 }
341
_bandwidth_supported(struct device *dev, struct opp_table *opp_table)342 static int _bandwidth_supported(struct device *dev, struct opp_table *opp_table)
343 {
344 struct device_node *np, *opp_np;
345 struct property *prop;
346
347 if (!opp_table) {
348 np = of_node_get(dev->of_node);
349 if (!np) {
350 return -ENODEV;
351 }
352 opp_np = _opp_of_get_opp_desc_node(np, 0);
353 of_node_put(np);
354 } else {
355 opp_np = of_node_get(opp_table->np);
356 }
357 /* Lets not fail in case we are parsing opp-v1 bindings */
358 if (!opp_np) {
359 return 0;
360 }
361 /* Checking only first OPP is sufficient */
362 np = of_get_next_available_child(opp_np, NULL);
363 of_node_put(opp_np);
364 if (!np) {
365 dev_err(dev, "OPP table empty\n");
366 return -EINVAL;
367 }
368 prop = of_find_property(np, "opp-peak-kBps", NULL);
369 of_node_put(np);
370 if (!prop || !prop->length) {
371 return 0;
372 }
373 return 1;
374 }
375
dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table)376 int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table)
377 {
378 struct device_node *np;
379 int ret, i, count, num_paths;
380 struct icc_path **paths;
381
382 ret = _bandwidth_supported(dev, opp_table);
383 if (ret <= 0) {
384 return ret;
385 }
386
387 ret = 0;
388
389 np = of_node_get(dev->of_node);
390 if (!np) {
391 return 0;
392 }
393
394 count = of_count_phandle_with_args(np, "interconnects", "#interconnect-cells");
395 of_node_put(np);
396 if (count < 0) {
397 return 0;
398 }
399
400 /* two phandles when #interconnect-cells = <1> */
401 if (count % PHANDLE_MOD_VALUE) {
402 dev_err(dev, "%s: Invalid interconnects values\n", __func__);
403 return -EINVAL;
404 }
405
406 num_paths = count / PHANDLE_DIV_VALUE;
407 paths = kcalloc(num_paths, sizeof(*paths), GFP_KERNEL);
408 if (!paths) {
409 return -ENOMEM;
410 }
411
412 for (i = 0; i < num_paths; i++) {
413 paths[i] = of_icc_get_by_index(dev, i);
414 if (IS_ERR(paths[i])) {
415 ret = PTR_ERR(paths[i]);
416 if (ret != -EPROBE_DEFER) {
417 dev_err(dev, "%s: Unable to get path%d: %d\n", __func__, i, ret);
418 }
419 goto err;
420 }
421 }
422
423 if (opp_table) {
424 opp_table->paths = paths;
425 opp_table->path_count = num_paths;
426 return 0;
427 }
428
429 err:
430 while (i--) {
431 icc_put(paths[i]);
432 }
433
434 kfree(paths);
435
436 return ret;
437 }
438 EXPORT_SYMBOL_GPL(dev_pm_opp_of_find_icc_paths);
439
_opp_is_supported(struct device *dev, struct opp_table *opp_table, struct device_node *np)440 static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table, struct device_node *np)
441 {
442 unsigned int levels = opp_table->supported_hw_count;
443 int count, versions, ret, i, j;
444 u32 val;
445
446 if (!opp_table->supported_hw) {
447 /*
448 * In the case that no supported_hw has been set by the
449 * platform but there is an opp-supported-hw value set for
450 * an OPP then the OPP should not be enabled as there is
451 * no way to see if the hardware supports it.
452 */
453 if (of_find_property(np, "opp-supported-hw", NULL)) {
454 return false;
455 } else {
456 return true;
457 }
458 }
459
460 count = of_property_count_u32_elems(np, "opp-supported-hw");
461 if (count <= 0 || count % levels) {
462 dev_err(dev, "%s: Invalid opp-supported-hw property (%d)\n", __func__, count);
463 return false;
464 }
465
466 versions = count / levels;
467
468 /* All levels in at least one of the versions should match */
469 for (i = 0; i < versions; i++) {
470 bool supported = true;
471
472 for (j = 0; j < levels; j++) {
473 ret = of_property_read_u32_index(np, "opp-supported-hw", i * levels + j, &val);
474 if (ret) {
475 dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n", __func__,
476 i * levels + j, ret);
477 return false;
478 }
479
480 /* Check if the level is supported */
481 if (!(val & opp_table->supported_hw[j])) {
482 supported = false;
483 break;
484 }
485 }
486
487 if (supported) {
488 return true;
489 }
490 }
491
492 return false;
493 }
494
opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev, struct opp_table *opp_table)495 static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev, struct opp_table *opp_table)
496 {
497 u32 *microvolt, *microamp = NULL;
498 int supplies = opp_table->regulator_count, vcount, icount, ret, i, j;
499 struct property *prop = NULL;
500 char name[NAME_MAX];
501
502 /* Search for "opp-microvolt-<name>" */
503 if (opp_table->prop_name) {
504 snprintf(name, sizeof(name), "opp-microvolt-%s", opp_table->prop_name);
505 prop = of_find_property(opp->np, name, NULL);
506 }
507
508 if (!prop) {
509 /* Search for "opp-microvolt" */
510 sprintf(name, "opp-microvolt");
511 prop = of_find_property(opp->np, name, NULL);
512 /* Missing property isn't a problem, but an invalid entry is */
513 if (!prop) {
514 if (unlikely(supplies == -1)) {
515 /* Initialize regulator_count */
516 opp_table->regulator_count = 0;
517 return 0;
518 }
519
520 if (!supplies) {
521 return 0;
522 }
523
524 dev_err(dev, "%s: opp-microvolt missing although OPP managing regulators\n", __func__);
525 return -EINVAL;
526 }
527 }
528
529 if (unlikely(supplies == -1)) {
530 /* Initialize regulator_count */
531 supplies = opp_table->regulator_count = 1;
532 } else if (unlikely(!supplies)) {
533 dev_err(dev, "%s: opp-microvolt wasn't expected\n", __func__);
534 return -EINVAL;
535 }
536
537 vcount = of_property_count_u32_elems(opp->np, name);
538 if (vcount < 0) {
539 dev_err(dev, "%s: Invalid %s property (%d)\n", __func__, name, vcount);
540 return vcount;
541 }
542
543 /* There can be one or three elements per supply */
544 if (vcount != supplies && vcount != supplies * REGULATOR_COUNT_MUL) {
545 dev_err(dev, "%s: Invalid number of elements in %s property (%d) with supplies (%d)\n", __func__, name, vcount,
546 supplies);
547 return -EINVAL;
548 }
549
550 microvolt = kmalloc_array(vcount, sizeof(*microvolt), GFP_KERNEL);
551 if (!microvolt) {
552 return -ENOMEM;
553 }
554
555 ret = of_property_read_u32_array(opp->np, name, microvolt, vcount);
556 if (ret) {
557 dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret);
558 ret = -EINVAL;
559 goto free_microvolt;
560 }
561
562 /* Search for "opp-microamp-<name>" */
563 prop = NULL;
564 if (opp_table->prop_name) {
565 snprintf(name, sizeof(name), "opp-microamp-%s", opp_table->prop_name);
566 prop = of_find_property(opp->np, name, NULL);
567 }
568
569 if (!prop) {
570 /* Search for "opp-microamp" */
571 sprintf(name, "opp-microamp");
572 prop = of_find_property(opp->np, name, NULL);
573 }
574
575 if (prop) {
576 icount = of_property_count_u32_elems(opp->np, name);
577 if (icount < 0) {
578 dev_err(dev, "%s: Invalid %s property (%d)\n", __func__, name, icount);
579 ret = icount;
580 goto free_microvolt;
581 }
582
583 if (icount != supplies) {
584 dev_err(dev, "%s: Invalid number of elements in %s property (%d) with supplies (%d)\n", __func__, name,
585 icount, supplies);
586 ret = -EINVAL;
587 goto free_microvolt;
588 }
589
590 microamp = kmalloc_array(icount, sizeof(*microamp), GFP_KERNEL);
591 if (!microamp) {
592 ret = -EINVAL;
593 goto free_microvolt;
594 }
595
596 ret = of_property_read_u32_array(opp->np, name, microamp, icount);
597 if (ret) {
598 dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret);
599 ret = -EINVAL;
600 goto free_microamp;
601 }
602 }
603
604 for (i = 0, j = 0; i < supplies; i++) {
605 opp->supplies[i].u_volt = microvolt[j++];
606
607 if (vcount == supplies) {
608 opp->supplies[i].u_volt_min = opp->supplies[i].u_volt;
609 opp->supplies[i].u_volt_max = opp->supplies[i].u_volt;
610 } else {
611 opp->supplies[i].u_volt_min = microvolt[j++];
612 opp->supplies[i].u_volt_max = microvolt[j++];
613 }
614
615 if (microamp) {
616 opp->supplies[i].u_amp = microamp[i];
617 }
618 }
619
620 free_microamp:
621 kfree(microamp);
622 free_microvolt:
623 kfree(microvolt);
624
625 return ret;
626 }
627
628 /**
629 * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT
630 * entries
631 * @dev: device pointer used to lookup OPP table.
632 *
633 * Free OPPs created using static entries present in DT.
634 */
dev_pm_opp_of_remove_table(struct device *dev)635 void dev_pm_opp_of_remove_table(struct device *dev)
636 {
637 dev_pm_opp_remove_table(dev);
638 }
639 EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
640
_read_bw(struct dev_pm_opp *new_opp, struct opp_table *table, struct device_node *np, bool peak)641 static int _read_bw(struct dev_pm_opp *new_opp, struct opp_table *table, struct device_node *np, bool peak)
642 {
643 const char *name = peak ? "opp-peak-kBps" : "opp-avg-kBps";
644 struct property *prop;
645 int i, count, ret;
646 u32 *bw;
647
648 prop = of_find_property(np, name, NULL);
649 if (!prop) {
650 return -ENODEV;
651 }
652
653 count = prop->length / sizeof(u32);
654 if (table->path_count != count) {
655 pr_err("%s: Mismatch between %s and paths (%d %d)\n", __func__, name, count, table->path_count);
656 return -EINVAL;
657 }
658
659 bw = kmalloc_array(count, sizeof(*bw), GFP_KERNEL);
660 if (!bw) {
661 return -ENOMEM;
662 }
663
664 ret = of_property_read_u32_array(np, name, bw, count);
665 if (ret) {
666 pr_err("%s: Error parsing %s: %d\n", __func__, name, ret);
667 goto out;
668 }
669
670 for (i = 0; i < count; i++) {
671 if (peak) {
672 new_opp->bandwidth[i].peak = kBps_to_icc(bw[i]);
673 } else {
674 new_opp->bandwidth[i].avg = kBps_to_icc(bw[i]);
675 }
676 }
677
678 out:
679 kfree(bw);
680 return ret;
681 }
682
_read_opp_key(struct dev_pm_opp *new_opp, struct opp_table *table, struct device_node *np, bool *rate_not_available)683 static int _read_opp_key(struct dev_pm_opp *new_opp, struct opp_table *table, struct device_node *np,
684 bool *rate_not_available)
685 {
686 bool found = false;
687 u64 rate;
688 int ret;
689
690 ret = of_property_read_u64(np, "opp-hz", &rate);
691 if (!ret) {
692 /*
693 * Rate is defined as an unsigned long in clk API, and so
694 * casting explicitly to its type. Must be fixed once rate is 64
695 * bit guaranteed in clk API.
696 */
697 new_opp->rate = (unsigned long)rate;
698 found = true;
699 }
700 *rate_not_available = !!ret;
701
702 /*
703 * Bandwidth consists of peak and average (optional) values:
704 * opp-peak-kBps = <path1_value path2_value>;
705 * opp-avg-kBps = <path1_value path2_value>;
706 */
707 ret = _read_bw(new_opp, table, np, true);
708 if (!ret) {
709 found = true;
710 ret = _read_bw(new_opp, table, np, false);
711 }
712
713 /* The properties were found but we failed to parse them */
714 if (ret && ret != -ENODEV) {
715 return ret;
716 }
717
718 if (!of_property_read_u32(np, "opp-level", &new_opp->level)) {
719 found = true;
720 }
721
722 if (found) {
723 return 0;
724 }
725
726 return ret;
727 }
728
729 /**
730 * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
731 * @opp_table: OPP table
732 * @dev: device for which we do this operation
733 * @np: device node
734 *
735 * This function adds an opp definition to the opp table and returns status. The
736 * opp can be controlled using dev_pm_opp_enable/disable functions and may be
737 * removed by dev_pm_opp_remove.
738 *
739 * Return
740 * Valid OPP pointer:
741 * On success
742 * NULL:
743 * Duplicate OPPs (both freq and volt are same) and opp->available
744 * OR if the OPP is not supported by hardware.
745 * ERR_PTR(-EEXIST):
746 * Freq are same and volt are different OR
747 * Duplicate OPPs (both freq and volt are same) and !opp->available
748 * ERR_PTR(-ENOMEM):
749 * Memory allocation failure
750 * ERR_PTR(-EINVAL):
751 * Failed parsing the OPP node
752 */
_opp_add_static_v2(struct opp_table *opp_table, struct device *dev, struct device_node *np)753 static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table, struct device *dev, struct device_node *np)
754 {
755 struct dev_pm_opp *new_opp;
756 u32 val;
757 int ret;
758 bool rate_not_available = false;
759
760 new_opp = _opp_allocate(opp_table);
761 if (!new_opp) {
762 return ERR_PTR(-ENOMEM);
763 }
764
765 ret = _read_opp_key(new_opp, opp_table, np, &rate_not_available);
766 if (ret < 0 && !opp_table->is_genpd) {
767 dev_err(dev, "%s: opp key field not found\n", __func__);
768 goto free_opp;
769 }
770
771 /* Check if the OPP supports hardware's hierarchy of versions or not */
772 if (!_opp_is_supported(dev, opp_table, np)) {
773 dev_dbg(dev, "OPP not supported by hardware: %lu\n", new_opp->rate);
774 goto free_opp;
775 }
776
777 new_opp->turbo = of_property_read_bool(np, "turbo-mode");
778
779 new_opp->np = np;
780 new_opp->dynamic = false;
781 new_opp->available = true;
782
783 ret = _of_opp_alloc_required_opps(opp_table, new_opp);
784 if (ret) {
785 goto free_opp;
786 }
787
788 if (!of_property_read_u32(np, "clock-latency-ns", &val)) {
789 new_opp->clock_latency_ns = val;
790 }
791
792 ret = opp_parse_supplies(new_opp, dev, opp_table);
793 if (ret) {
794 goto free_required_opps;
795 }
796
797 if (opp_table->is_genpd) {
798 new_opp->pstate = pm_genpd_opp_to_performance_state(dev, new_opp);
799 }
800
801 ret = _opp_add(dev, new_opp, opp_table, rate_not_available);
802 if (ret) {
803 /* Don't return error for duplicate OPPs */
804 if (ret == -EBUSY) {
805 ret = 0;
806 }
807 goto free_required_opps;
808 }
809
810 /* OPP to select on device suspend */
811 if (of_property_read_bool(np, "opp-suspend")) {
812 if (opp_table->suspend_opp) {
813 /* Pick the OPP with higher rate as suspend OPP */
814 if (new_opp->rate > opp_table->suspend_opp->rate) {
815 opp_table->suspend_opp->suspend = false;
816 new_opp->suspend = true;
817 opp_table->suspend_opp = new_opp;
818 }
819 } else {
820 new_opp->suspend = true;
821 opp_table->suspend_opp = new_opp;
822 }
823 }
824
825 if (new_opp->clock_latency_ns > opp_table->clock_latency_ns_max) {
826 opp_table->clock_latency_ns_max = new_opp->clock_latency_ns;
827 }
828
829 pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n", __func__, new_opp->turbo, new_opp->rate,
830 new_opp->supplies[0].u_volt, new_opp->supplies[0].u_volt_min, new_opp->supplies[0].u_volt_max,
831 new_opp->clock_latency_ns);
832
833 /*
834 * Notify the changes in the availability of the operable
835 * frequency/voltage list.
836 */
837 blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADD, new_opp);
838 return new_opp;
839
840 free_required_opps:
841 _of_opp_free_required_opps(opp_table, new_opp);
842 free_opp:
843 _opp_free(new_opp);
844
845 return ret ? ERR_PTR(ret) : NULL;
846 }
847
848 /* Initializes OPP tables based on new bindings */
_of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)849 static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
850 {
851 struct device_node *np;
852 int ret, count = 0;
853 struct dev_pm_opp *opp;
854
855 /* OPP table is already initialized for the device */
856 mutex_lock(&opp_table->lock);
857 if (opp_table->parsed_static_opps) {
858 opp_table->parsed_static_opps++;
859 mutex_unlock(&opp_table->lock);
860 return 0;
861 }
862
863 opp_table->parsed_static_opps = 1;
864 mutex_unlock(&opp_table->lock);
865
866 /* We have opp-table node now, iterate over it and add OPPs */
867 for_each_available_child_of_node(opp_table->np, np)
868 {
869 opp = _opp_add_static_v2(opp_table, dev, np);
870 if (IS_ERR(opp)) {
871 ret = PTR_ERR(opp);
872 dev_err(dev, "%s: Failed to add OPP, %d\n", __func__, ret);
873 of_node_put(np);
874 goto remove_static_opp;
875 } else if (opp) {
876 count++;
877 }
878 }
879
880 /* There should be one or more OPPs defined */
881 if (!count) {
882 dev_err(dev, "%s: no supported OPPs", __func__);
883 ret = -ENOENT;
884 goto remove_static_opp;
885 }
886
887 list_for_each_entry(opp, &opp_table->opp_list, node)
888 {
889 /* Any non-zero performance state would enable the feature */
890 if (opp->pstate) {
891 opp_table->genpd_performance_state = true;
892 break;
893 }
894 }
895
896 return 0;
897
898 remove_static_opp:
899 _opp_remove_all_static(opp_table);
900
901 return ret;
902 }
903
904 /* Initializes OPP tables based on old-deprecated bindings */
_of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table)905 static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table)
906 {
907 const struct property *prop;
908 const __be32 *val;
909 int nr, ret = 0;
910
911 mutex_lock(&opp_table->lock);
912 if (opp_table->parsed_static_opps) {
913 opp_table->parsed_static_opps++;
914 mutex_unlock(&opp_table->lock);
915 return 0;
916 }
917
918 opp_table->parsed_static_opps = 1;
919 mutex_unlock(&opp_table->lock);
920
921 prop = of_find_property(dev->of_node, "operating-points", NULL);
922 if (!prop) {
923 ret = -ENODEV;
924 goto remove_static_opp;
925 }
926 if (!prop->value) {
927 ret = -ENODATA;
928 goto remove_static_opp;
929 }
930
931 /*
932 * Each OPP is a set of tuples consisting of frequency and
933 * voltage like <freq-kHz vol-uV>.
934 */
935 nr = prop->length / sizeof(u32);
936 if (nr % NUM_RECORD_MOD_VALUE) {
937 dev_err(dev, "%s: Invalid OPP table\n", __func__);
938 ret = -EINVAL;
939 goto remove_static_opp;
940 }
941
942 val = prop->value;
943 while (nr) {
944 unsigned long freq = be32_to_cpup(val++) * FREQ_MUL;
945 unsigned long volt = be32_to_cpup(val++);
946
947 ret = _opp_add_v1(opp_table, dev, freq, volt, false);
948 if (ret) {
949 dev_err(dev, "%s: Failed to add OPP %ld (%d)\n", __func__, freq, ret);
950 goto remove_static_opp;
951 }
952 nr -= NUM_RECORD_MOD_VALUE;
953 }
954
955 return 0;
956
957 remove_static_opp:
958 _opp_remove_all_static(opp_table);
959
960 return ret;
961 }
962
963 /**
964 * dev_pm_opp_of_add_table() - Initialize opp table from device tree
965 * @dev: device pointer used to lookup OPP table.
966 *
967 * Register the initial OPP table with the OPP library for given device.
968 *
969 * Return:
970 * 0 On success OR
971 * Duplicate OPPs (both freq and volt are same) and opp->available
972 * -EEXIST Freq are same and volt are different OR
973 * Duplicate OPPs (both freq and volt are same) and !opp->available
974 * -ENOMEM Memory allocation failure
975 * -ENODEV when 'operating-points' property is not found or is invalid data
976 * in device node.
977 * -ENODATA when empty 'operating-points' property is found
978 * -EINVAL when invalid entries are found in opp-v2 table
979 */
dev_pm_opp_of_add_table(struct device *dev)980 int dev_pm_opp_of_add_table(struct device *dev)
981 {
982 struct opp_table *opp_table;
983 int ret;
984
985 opp_table = dev_pm_opp_get_opp_table_indexed(dev, 0);
986 if (IS_ERR(opp_table)) {
987 return PTR_ERR(opp_table);
988 }
989
990 /*
991 * OPPs have two version of bindings now. Also try the old (v1)
992 * bindings for backward compatibility with older dtbs.
993 */
994 if (opp_table->np) {
995 ret = _of_add_opp_table_v2(dev, opp_table);
996 } else {
997 ret = _of_add_opp_table_v1(dev, opp_table);
998 }
999
1000 if (ret) {
1001 dev_pm_opp_put_opp_table(opp_table);
1002 }
1003
1004 return ret;
1005 }
1006 EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
1007
1008 /**
1009 * dev_pm_opp_of_add_table_indexed() - Initialize indexed opp table from device tree
1010 * @dev: device pointer used to lookup OPP table.
1011 * @index: Index number.
1012 *
1013 * Register the initial OPP table with the OPP library for given device only
1014 * using the "operating-points-v2" property.
1015 *
1016 * Return:
1017 * 0 On success OR
1018 * Duplicate OPPs (both freq and volt are same) and opp->available
1019 * -EEXIST Freq are same and volt are different OR
1020 * Duplicate OPPs (both freq and volt are same) and !opp->available
1021 * -ENOMEM Memory allocation failure
1022 * -ENODEV when 'operating-points' property is not found or is invalid data
1023 * in device node.
1024 * -ENODATA when empty 'operating-points' property is found
1025 * -EINVAL when invalid entries are found in opp-v2 table
1026 */
dev_pm_opp_of_add_table_indexed(struct device *dev, int index)1027 int dev_pm_opp_of_add_table_indexed(struct device *dev, int index)
1028 {
1029 struct opp_table *opp_table;
1030 int ret, count;
1031
1032 if (index) {
1033 /*
1034 * If only one phandle is present, then the same OPP table
1035 * applies for all index requests.
1036 */
1037 count = of_count_phandle_with_args(dev->of_node, "operating-points-v2", NULL);
1038 if (count == 1) {
1039 index = 0;
1040 }
1041 }
1042
1043 opp_table = dev_pm_opp_get_opp_table_indexed(dev, index);
1044 if (IS_ERR(opp_table)) {
1045 return PTR_ERR(opp_table);
1046 }
1047
1048 ret = _of_add_opp_table_v2(dev, opp_table);
1049 if (ret) {
1050 dev_pm_opp_put_opp_table(opp_table);
1051 }
1052
1053 return ret;
1054 }
1055 EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table_indexed);
1056
1057 /* CPU device specific helpers */
1058
1059 /**
1060 * dev_pm_opp_of_cpumask_remove_table() - Removes OPP table for @cpumask
1061 * @cpumask: cpumask for which OPP table needs to be removed
1062 *
1063 * This removes the OPP tables for CPUs present in the @cpumask.
1064 * This should be used only to remove static entries created from DT.
1065 */
dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask)1066 void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask)
1067 {
1068 _dev_pm_opp_cpumask_remove_table(cpumask, -1);
1069 }
1070 EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_remove_table);
1071
1072 /**
1073 * dev_pm_opp_of_cpumask_add_table() - Adds OPP table for @cpumask
1074 * @cpumask: cpumask for which OPP table needs to be added.
1075 *
1076 * This adds the OPP tables for CPUs present in the @cpumask.
1077 */
dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask)1078 int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask)
1079 {
1080 struct device *cpu_dev;
1081 int cpu, ret;
1082
1083 if (WARN_ON(cpumask_empty(cpumask))) {
1084 return -ENODEV;
1085 }
1086
1087 for_each_cpu(cpu, cpumask)
1088 {
1089 cpu_dev = get_cpu_device(cpu);
1090 if (!cpu_dev) {
1091 pr_err("%s: failed to get cpu%d device\n", __func__, cpu);
1092 ret = -ENODEV;
1093 goto remove_table;
1094 }
1095
1096 ret = dev_pm_opp_of_add_table(cpu_dev);
1097 if (ret) {
1098 /*
1099 * OPP may get registered dynamically, don't print error
1100 * message here.
1101 */
1102 pr_debug("%s: couldn't find opp table for cpu:%d, %d\n", __func__, cpu, ret);
1103
1104 goto remove_table;
1105 }
1106 }
1107
1108 return 0;
1109
1110 remove_table:
1111 /* Free all other OPPs */
1112 _dev_pm_opp_cpumask_remove_table(cpumask, cpu);
1113
1114 return ret;
1115 }
1116 EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table);
1117
1118 /*
1119 * Works only for OPP v2 bindings.
1120 *
1121 * Returns -ENOENT if operating-points-v2 bindings aren't supported.
1122 */
1123 /**
1124 * dev_pm_opp_of_get_sharing_cpus() - Get cpumask of CPUs sharing OPPs with
1125 * @cpu_dev using operating-points-v2
1126 * bindings.
1127 *
1128 * @cpu_dev: CPU device for which we do this operation
1129 * @cpumask: cpumask to update with information of sharing CPUs
1130 *
1131 * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev.
1132 *
1133 * Returns -ENOENT if operating-points-v2 isn't present for @cpu_dev.
1134 */
dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)1135 int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
1136 {
1137 struct device_node *np, *tmp_np, *cpu_np;
1138 int cpu, ret = 0;
1139
1140 /* Get OPP descriptor node */
1141 np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
1142 if (!np) {
1143 dev_dbg(cpu_dev, "%s: Couldn't find opp node.\n", __func__);
1144 return -ENOENT;
1145 }
1146
1147 cpumask_set_cpu(cpu_dev->id, cpumask);
1148
1149 /* OPPs are shared ? */
1150 if (!of_property_read_bool(np, "opp-shared")) {
1151 goto put_cpu_node;
1152 }
1153
1154 for_each_possible_cpu(cpu)
1155 {
1156 if (cpu == cpu_dev->id) {
1157 continue;
1158 }
1159
1160 cpu_np = of_cpu_device_node_get(cpu);
1161 if (!cpu_np) {
1162 dev_err(cpu_dev, "%s: failed to get cpu%d node\n", __func__, cpu);
1163 ret = -ENOENT;
1164 goto put_cpu_node;
1165 }
1166
1167 /* Get OPP descriptor node */
1168 tmp_np = _opp_of_get_opp_desc_node(cpu_np, 0);
1169 of_node_put(cpu_np);
1170 if (!tmp_np) {
1171 pr_err("%pOF: Couldn't find opp node\n", cpu_np);
1172 ret = -ENOENT;
1173 goto put_cpu_node;
1174 }
1175
1176 /* CPUs are sharing opp node */
1177 if (np == tmp_np) {
1178 cpumask_set_cpu(cpu, cpumask);
1179 }
1180
1181 of_node_put(tmp_np);
1182 }
1183
1184 put_cpu_node:
1185 of_node_put(np);
1186 return ret;
1187 }
1188 EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus);
1189
1190 /**
1191 * of_get_required_opp_performance_state() - Search for required OPP and return its performance state.
1192 * @np: Node that contains the "required-opps" property.
1193 * @index: Index of the phandle to parse.
1194 *
1195 * Returns the performance state of the OPP pointed out by the "required-opps"
1196 * property at @index in @np.
1197 *
1198 * Return: Zero or positive performance state on success, otherwise negative
1199 * value on errors.
1200 */
of_get_required_opp_performance_state(struct device_node *np, int index)1201 int of_get_required_opp_performance_state(struct device_node *np, int index)
1202 {
1203 struct dev_pm_opp *opp;
1204 struct device_node *required_np;
1205 struct opp_table *opp_table;
1206 int pstate = -EINVAL;
1207
1208 required_np = of_parse_required_opp(np, index);
1209 if (!required_np) {
1210 return -ENODEV;
1211 }
1212
1213 opp_table = _find_table_of_opp_np(required_np);
1214 if (IS_ERR(opp_table)) {
1215 pr_err("%s: Failed to find required OPP table %pOF: %ld\n", __func__, np, PTR_ERR(opp_table));
1216 goto put_required_np;
1217 }
1218
1219 opp = _find_opp_of_np(opp_table, required_np);
1220 if (opp) {
1221 pstate = opp->pstate;
1222 dev_pm_opp_put(opp);
1223 }
1224
1225 dev_pm_opp_put_opp_table(opp_table);
1226
1227 put_required_np:
1228 of_node_put(required_np);
1229
1230 return pstate;
1231 }
1232 EXPORT_SYMBOL_GPL(of_get_required_opp_performance_state);
1233
1234 /**
1235 * dev_pm_opp_get_of_node() - Gets the DT node corresponding to an opp
1236 * @opp: opp for which DT node has to be returned for
1237 *
1238 * Return: DT node corresponding to the opp, else 0 on success.
1239 *
1240 * The caller needs to put the node with of_node_put() after using it.
1241 */
dev_pm_opp_get_of_node(struct dev_pm_opp *opp)1242 struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp)
1243 {
1244 if (IS_ERR_OR_NULL(opp)) {
1245 pr_err("%s: Invalid parameters\n", __func__);
1246 return NULL;
1247 }
1248
1249 return of_node_get(opp->np);
1250 }
1251 EXPORT_SYMBOL_GPL(dev_pm_opp_get_of_node);
1252
1253 /*
1254 * Callback function provided to the Energy Model framework upon registration.
1255 * This computes the power estimated by @dev at @kHz if it is the frequency
1256 * of an existing OPP, or at the frequency of the first OPP above @kHz otherwise
1257 * (see dev_pm_opp_find_freq_ceil()). This function updates @kHz to the ceiled
1258 * frequency and @mW to the associated power. The power is estimated as
1259 * P = C * V^2 * f with C being the device's capacitance and V and f
1260 * respectively the voltage and frequency of the OPP.
1261 *
1262 * Returns -EINVAL if the power calculation failed because of missing
1263 * parameters, 0 otherwise.
1264 */
_get_power(unsigned long *mW, unsigned long *kHz, struct device *dev)1265 static int __maybe_unused _get_power(unsigned long *mW, unsigned long *kHz, struct device *dev)
1266 {
1267 struct dev_pm_opp *opp;
1268 struct device_node *np;
1269 unsigned long mV, Hz;
1270 u32 cap;
1271 u64 tmp;
1272 int ret;
1273
1274 np = of_node_get(dev->of_node);
1275 if (!np) {
1276 return -EINVAL;
1277 }
1278
1279 ret = of_property_read_u32(np, "dynamic-power-coefficient", &cap);
1280 of_node_put(np);
1281 if (ret) {
1282 return -EINVAL;
1283 }
1284
1285 Hz = *kHz * 1000;
1286 opp = dev_pm_opp_find_freq_ceil(dev, &Hz);
1287 if (IS_ERR(opp)) {
1288 return -EINVAL;
1289 }
1290
1291 mV = dev_pm_opp_get_voltage(opp) / 1000;
1292 dev_pm_opp_put(opp);
1293 if (!mV) {
1294 return -EINVAL;
1295 }
1296
1297 tmp = (u64)cap * mV * mV * (Hz / 1000000);
1298 do_div(tmp, 1000000000);
1299
1300 *mW = (unsigned long)tmp;
1301 *kHz = Hz / 1000;
1302
1303 return 0;
1304 }
1305
1306 /**
1307 * dev_pm_opp_of_register_em() - Attempt to register an Energy Model
1308 * @dev : Device for which an Energy Model has to be registered
1309 * @cpus : CPUs for which an Energy Model has to be registered. For
1310 * other type of devices it should be set to NULL.
1311 *
1312 * This checks whether the "dynamic-power-coefficient" devicetree property has
1313 * been specified, and tries to register an Energy Model with it if it has.
1314 * Having this property means the voltages are known for OPPs and the EM
1315 * might be calculated.
1316 */
dev_pm_opp_of_register_em(struct device *dev, struct cpumask *cpus)1317 int dev_pm_opp_of_register_em(struct device *dev, struct cpumask *cpus)
1318 {
1319 struct em_data_callback em_cb = EM_DATA_CB(_get_power);
1320 struct device_node *np;
1321 int ret, nr_opp;
1322 u32 cap;
1323
1324 if (IS_ERR_OR_NULL(dev)) {
1325 ret = -EINVAL;
1326 goto failed;
1327 }
1328
1329 nr_opp = dev_pm_opp_get_opp_count(dev);
1330 if (nr_opp <= 0) {
1331 ret = -EINVAL;
1332 goto failed;
1333 }
1334
1335 np = of_node_get(dev->of_node);
1336 if (!np) {
1337 ret = -EINVAL;
1338 goto failed;
1339 }
1340
1341 /*
1342 * Register an EM only if the 'dynamic-power-coefficient' property is
1343 * set in devicetree. It is assumed the voltage values are known if that
1344 * property is set since it is useless otherwise. If voltages are not
1345 * known, just let the EM registration fail with an error to alert the
1346 * user about the inconsistent configuration.
1347 */
1348 ret = of_property_read_u32(np, "dynamic-power-coefficient", &cap);
1349 of_node_put(np);
1350 if (ret || !cap) {
1351 dev_dbg(dev, "Couldn't find proper 'dynamic-power-coefficient' in DT\n");
1352 ret = -EINVAL;
1353 goto failed;
1354 }
1355
1356 ret = em_dev_register_perf_domain(dev, nr_opp, &em_cb, cpus, true);
1357 if (ret) {
1358 goto failed;
1359 }
1360
1361 return 0;
1362
1363 failed:
1364 dev_dbg(dev, "Couldn't register Energy Model %d\n", ret);
1365 return ret;
1366 }
1367 EXPORT_SYMBOL_GPL(dev_pm_opp_of_register_em);
1368