1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Rockchip Generic dmc support.
4 *
5 * Copyright (c) 2022 Rockchip Electronics Co. Ltd.
6 * Author: Finley Xiao <finley.xiao@rock-chips.com>
7 */
8
9 #include <dt-bindings/clock/rockchip-ddr.h>
10 #include <dt-bindings/soc/rockchip-system-status.h>
11 #include <drm/drm_modeset_lock.h>
12 #include <linux/arm-smccc.h>
13 #include <linux/clk.h>
14 #include <linux/cpu.h>
15 #include <linux/cpufreq.h>
16 #include <linux/delay.h>
17 #include <linux/devfreq.h>
18 #include <linux/devfreq_cooling.h>
19 #include <linux/devfreq-event.h>
20 #include <linux/input.h>
21 #include <linux/interrupt.h>
22 #include <linux/irq.h>
23 #include <linux/mfd/syscon.h>
24 #include <linux/module.h>
25 #include <linux/of.h>
26 #include <linux/of_irq.h>
27 #include <linux/platform_device.h>
28 #include <linux/pm_opp.h>
29 #include <linux/pm_qos.h>
30 #include <linux/regmap.h>
31 #include <linux/regulator/consumer.h>
32 #include <linux/rockchip/rockchip_sip.h>
33 #include <linux/rwsem.h>
34 #include <linux/slab.h>
35 #include <linux/string.h>
36 #include <linux/suspend.h>
37 #include <linux/thermal.h>
38
39 #include <soc/rockchip/pm_domains.h>
40 #include <soc/rockchip/rkfb_dmc.h>
41 #include <soc/rockchip/rockchip_dmc.h>
42 #include <soc/rockchip/rockchip_sip.h>
43 #include <soc/rockchip/rockchip_system_monitor.h>
44 #include <soc/rockchip/rockchip-system-status.h>
45 #include <soc/rockchip/rockchip_opp_select.h>
46 #include <soc/rockchip/scpi.h>
47 #include <uapi/drm/drm_mode.h>
48
49 #include "governor.h"
50 #include "rockchip_dmc_timing.h"
51 #include "../clk/rockchip/clk.h"
52 #include "../gpu/drm/rockchip/rockchip_drm_drv.h"
53
54 #define system_status_to_dmcfreq(nb) container_of(nb, struct rockchip_dmcfreq, status_nb)
55 #define reboot_to_dmcfreq(nb) container_of(nb, struct rockchip_dmcfreq, reboot_nb)
56 #define boost_to_dmcfreq(work) container_of(work, struct rockchip_dmcfreq, boost_work)
57 #define input_hd_to_dmcfreq(hd) container_of(hd, struct rockchip_dmcfreq, input_handler)
58
59 #define VIDEO_1080P_SIZE (1920 * 1080)
60 #define FIQ_INIT_HANDLER (0x1)
61 #define FIQ_CPU_TGT_BOOT (0x0) /* to booting cpu */
62 #define FIQ_NUM_FOR_DCF (143) /* NA irq map to fiq for dcf */
63 #define DTS_PAR_OFFSET (4096)
64
65 #define FALLBACK_STATIC_TEMPERATURE 55000
66
67 #define ROCKCHIP_DE_SKEW_TWO 2
68 #define ROCKCHIP_DE_SKEW_FOUR 4
69 #define ROCKCHIP_DE_SKEW_ELEVEN 11
70 #define ROCKCHIP_DE_SKEW_TWENTY 20
71 #define ROCKCHIP_DE_SKEW_TWENTYONE 21
72
73 #define DMCFREQ_WAIT_CTRL_T_DCF_EN_TWO 2
74
75 struct dmc_freq_table {
76 unsigned long freq;
77 unsigned long volt;
78 };
79
80 struct share_params {
81 u32 hz;
82 u32 lcdc_type;
83 u32 vop;
84 u32 vop_dclk_mode;
85 u32 sr_idle_en;
86 u32 addr_mcu_el3;
87 /*
88 * 1: need to wait flag1
89 * 0: never wait flag1
90 */
91 u32 wait_flag1;
92 /*
93 * 1: need to wait flag1
94 * 0: never wait flag1
95 */
96 u32 wait_flag0;
97 u32 complt_hwirq;
98 u32 update_drv_odt_cfg;
99 u32 update_deskew_cfg;
100
101 u32 freq_count;
102 u32 freq_info_mhz[6];
103 /* if need, add parameter after */
104 };
105
106 static struct share_params *ddr_psci_param;
107
108 struct rockchip_dmcfreq_ondemand_data {
109 unsigned int upthreshold;
110 unsigned int downdifferential;
111 };
112
113 struct rockchip_dmcfreq {
114 struct device *dev;
115 struct dmcfreq_common_info info;
116 struct rockchip_dmcfreq_ondemand_data ondemand_data;
117 struct clk *dmc_clk;
118 struct devfreq_event_dev **edev;
119 struct mutex lock; /* serializes access to video_info_list */
120 struct dram_timing *timing;
121 struct regulator *vdd_center;
122 struct notifier_block status_nb;
123 struct list_head video_info_list;
124 struct freq_map_table *cpu_bw_tbl;
125 struct work_struct boost_work;
126 struct input_handler input_handler;
127 struct monitor_dev_info *mdev_info;
128 struct share_params *set_rate_params;
129
130 unsigned long *nocp_bw;
131 unsigned long rate, target_rate;
132 unsigned long volt, target_volt;
133 unsigned long auto_min_rate;
134 unsigned long status_rate;
135 unsigned long normal_rate;
136 unsigned long video_1080p_rate;
137 unsigned long video_4k_rate;
138 unsigned long video_4k_10b_rate;
139 unsigned long performance_rate;
140 unsigned long hdmi_rate;
141 unsigned long idle_rate;
142 unsigned long suspend_rate;
143 unsigned long reboot_rate;
144 unsigned long boost_rate;
145 unsigned long fixed_rate;
146 unsigned long low_power_rate;
147
148 unsigned long freq_count;
149 unsigned long freq_info_rate[6];
150 unsigned long rate_low;
151 unsigned long rate_mid_low;
152 unsigned long rate_mid_high;
153 unsigned long rate_high;
154
155 unsigned int min_cpu_freq;
156 unsigned int system_status_en;
157 unsigned int refresh;
158 int edev_count;
159 int dfi_id;
160 int nocp_cpu_id;
161
162 bool is_fixed;
163 bool is_set_rate_direct;
164
165 struct thermal_cooling_device *devfreq_cooling;
166 u32 static_coefficient;
167 s32 ts[4];
168 struct thermal_zone_device *ddr_tz;
169
170 unsigned int touchboostpulse_duration_val;
171 u64 touchboostpulse_endtime;
172
173 int (*set_auto_self_refresh)(u32 en);
174 };
175
176 static struct pm_qos_request pm_qos;
177
is_dualview(unsigned long status)178 static inline unsigned long is_dualview(unsigned long status)
179 {
180 return (status & SYS_STATUS_LCDC0) && (status & SYS_STATUS_LCDC1);
181 }
182
is_isp(unsigned long status)183 static inline unsigned long is_isp(unsigned long status)
184 {
185 return (status & SYS_STATUS_ISP) || (status & SYS_STATUS_CIF0) || (status & SYS_STATUS_CIF1);
186 }
187
188 /*
189 * function: packaging de-skew setting to px30_ddr_dts_config_timing,
190 * px30_ddr_dts_config_timing will pass to trust firmware, and
191 * used direct to set register.
192 * input: de_skew
193 * output: tim
194 */
px30_de_skew_set_2_reg(struct rk3328_ddr_de_skew_setting *de_skew, struct px30_ddr_dts_config_timing *tim)195 static void px30_de_skew_set_2_reg(struct rk3328_ddr_de_skew_setting *de_skew, struct px30_ddr_dts_config_timing *tim)
196 {
197 u32 n;
198 u32 offset;
199 u32 shift;
200
201 memset_io(tim->ca_skew, 0, sizeof(tim->ca_skew));
202 memset_io(tim->cs0_skew, 0, sizeof(tim->cs0_skew));
203 memset_io(tim->cs1_skew, 0, sizeof(tim->cs1_skew));
204
205 /* CA de-skew */
206 for (n = 0; n < ARRAY_SIZE(de_skew->ca_de_skew); n++) {
207 offset = n / ROCKCHIP_DE_SKEW_TWO;
208 shift = n % ROCKCHIP_DE_SKEW_TWO;
209 /* 0 => 4; 1 => 0 */
210 shift = (shift == 0) ? ROCKCHIP_DE_SKEW_FOUR : 0;
211 tim->ca_skew[offset] &= ~(0xf << shift);
212 tim->ca_skew[offset] |= (de_skew->ca_de_skew[n] << shift);
213 }
214
215 /* CS0 data de-skew */
216 for (n = 0; n < ARRAY_SIZE(de_skew->cs0_de_skew); n++) {
217 offset = ((n / ROCKCHIP_DE_SKEW_TWENTYONE) * ROCKCHIP_DE_SKEW_ELEVEN) +
218 ((n % ROCKCHIP_DE_SKEW_TWENTYONE) / ROCKCHIP_DE_SKEW_TWO);
219 shift = ((n % ROCKCHIP_DE_SKEW_TWENTYONE) % ROCKCHIP_DE_SKEW_TWO);
220 if ((n % ROCKCHIP_DE_SKEW_TWENTYONE) == ROCKCHIP_DE_SKEW_TWENTY) {
221 shift = 0;
222 } else {
223 /* 0 => 4; 1 => 0 */
224 shift = (shift == 0) ? ROCKCHIP_DE_SKEW_FOUR : 0;
225 }
226 tim->cs0_skew[offset] &= ~(0xf << shift);
227 tim->cs0_skew[offset] |= (de_skew->cs0_de_skew[n] << shift);
228 }
229
230 /* CS1 data de-skew */
231 for (n = 0; n < ARRAY_SIZE(de_skew->cs1_de_skew); n++) {
232 offset = ((n / ROCKCHIP_DE_SKEW_TWENTYONE) * ROCKCHIP_DE_SKEW_ELEVEN) +
233 ((n % ROCKCHIP_DE_SKEW_TWENTYONE) / ROCKCHIP_DE_SKEW_TWO);
234 shift = ((n % ROCKCHIP_DE_SKEW_TWENTYONE) % ROCKCHIP_DE_SKEW_TWO);
235 if ((n % ROCKCHIP_DE_SKEW_TWENTYONE) == ROCKCHIP_DE_SKEW_TWENTY) {
236 shift = 0;
237 } else {
238 /* 0 => 4; 1 => 0 */
239 shift = (shift == 0) ? ROCKCHIP_DE_SKEW_FOUR : 0;
240 }
241 tim->cs1_skew[offset] &= ~(0xf << shift);
242 tim->cs1_skew[offset] |= (de_skew->cs1_de_skew[n] << shift);
243 }
244 }
245
246 /*
247 * function: packaging de-skew setting to rk3328_ddr_dts_config_timing,
248 * rk3328_ddr_dts_config_timing will pass to trust firmware, and
249 * used direct to set register.
250 * input: de_skew
251 * output: tim
252 */
rk3328_de_skew_setting_2_register(struct rk3328_ddr_de_skew_setting *de_skew, struct rk3328_ddr_dts_config_timing *tim)253 static void rk3328_de_skew_setting_2_register(struct rk3328_ddr_de_skew_setting *de_skew,
254 struct rk3328_ddr_dts_config_timing *tim)
255 {
256 u32 n;
257 u32 offset;
258 u32 shift;
259
260 memset_io(tim->ca_skew, 0, sizeof(tim->ca_skew));
261 memset_io(tim->cs0_skew, 0, sizeof(tim->cs0_skew));
262 memset_io(tim->cs1_skew, 0, sizeof(tim->cs1_skew));
263
264 /* CA de-skew */
265 for (n = 0; n < ARRAY_SIZE(de_skew->ca_de_skew); n++) {
266 offset = n / ROCKCHIP_DE_SKEW_TWO;
267 shift = n % ROCKCHIP_DE_SKEW_TWO;
268 /* 0 => 4; 1 => 0 */
269 shift = (shift == 0) ? ROCKCHIP_DE_SKEW_FOUR : 0;
270 tim->ca_skew[offset] &= ~(0xf << shift);
271 tim->ca_skew[offset] |= (de_skew->ca_de_skew[n] << shift);
272 }
273
274 /* CS0 data de-skew */
275 for (n = 0; n < ARRAY_SIZE(de_skew->cs0_de_skew); n++) {
276 offset = ((n / ROCKCHIP_DE_SKEW_TWENTYONE) * ROCKCHIP_DE_SKEW_ELEVEN) +
277 ((n % ROCKCHIP_DE_SKEW_TWENTYONE) / ROCKCHIP_DE_SKEW_TWO);
278 shift = ((n % ROCKCHIP_DE_SKEW_TWENTYONE) % ROCKCHIP_DE_SKEW_TWO);
279 if ((n % ROCKCHIP_DE_SKEW_TWENTYONE) == ROCKCHIP_DE_SKEW_TWENTY) {
280 shift = 0;
281 } else {
282 /* 0 => 4; 1 => 0 */
283 shift = (shift == 0) ? ROCKCHIP_DE_SKEW_FOUR : 0;
284 }
285 tim->cs0_skew[offset] &= ~(0xf << shift);
286 tim->cs0_skew[offset] |= (de_skew->cs0_de_skew[n] << shift);
287 }
288
289 /* CS1 data de-skew */
290 for (n = 0; n < ARRAY_SIZE(de_skew->cs1_de_skew); n++) {
291 offset = ((n / ROCKCHIP_DE_SKEW_TWENTYONE) * ROCKCHIP_DE_SKEW_ELEVEN) +
292 ((n % ROCKCHIP_DE_SKEW_TWENTYONE) / ROCKCHIP_DE_SKEW_TWO);
293 shift = ((n % ROCKCHIP_DE_SKEW_TWENTYONE) % ROCKCHIP_DE_SKEW_TWO);
294 if ((n % ROCKCHIP_DE_SKEW_TWENTYONE) == ROCKCHIP_DE_SKEW_TWENTY) {
295 shift = 0;
296 } else {
297 /* 0 => 4; 1 => 0 */
298 shift = (shift == 0) ? ROCKCHIP_DE_SKEW_FOUR : 0;
299 }
300 tim->cs1_skew[offset] &= ~(0xf << shift);
301 tim->cs1_skew[offset] |= (de_skew->cs1_de_skew[n] << shift);
302 }
303 }
304
rk_drm_get_lcdc_type(void)305 static int rk_drm_get_lcdc_type(void)
306 {
307 u32 lcdc_type = rockchip_drm_get_sub_dev_type();
308
309 switch (lcdc_type) {
310 case DRM_MODE_CONNECTOR_DPI:
311 case DRM_MODE_CONNECTOR_LVDS:
312 lcdc_type = SCREEN_LVDS;
313 break;
314 case DRM_MODE_CONNECTOR_DisplayPort:
315 lcdc_type = SCREEN_DP;
316 break;
317 case DRM_MODE_CONNECTOR_HDMIA:
318 case DRM_MODE_CONNECTOR_HDMIB:
319 lcdc_type = SCREEN_HDMI;
320 break;
321 case DRM_MODE_CONNECTOR_TV:
322 lcdc_type = SCREEN_TVOUT;
323 break;
324 case DRM_MODE_CONNECTOR_eDP:
325 lcdc_type = SCREEN_EDP;
326 break;
327 case DRM_MODE_CONNECTOR_DSI:
328 lcdc_type = SCREEN_MIPI;
329 break;
330 default:
331 lcdc_type = SCREEN_NULL;
332 break;
333 }
334
335 return lcdc_type;
336 }
337
rockchip_ddr_set_rate(unsigned long target_rate)338 static int rockchip_ddr_set_rate(unsigned long target_rate)
339 {
340 struct arm_smccc_res res;
341
342 ddr_psci_param->hz = target_rate;
343 ddr_psci_param->lcdc_type = rk_drm_get_lcdc_type();
344 ddr_psci_param->wait_flag1 = 1;
345 ddr_psci_param->wait_flag0 = 1;
346
347 res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0, ROCKCHIP_SIP_CONFIG_DRAM_SET_RATE);
348 if ((int)res.a1 == SIP_RET_SET_RATE_TIMEOUT) {
349 rockchip_dmcfreq_wait_complete();
350 }
351
352 return res.a0;
353 }
354
rockchip_dmcfreq_target(struct device *dev, unsigned long *freq, u32 flags)355 static int rockchip_dmcfreq_target(struct device *dev, unsigned long *freq, u32 flags)
356 {
357 struct rockchip_dmcfreq *dmcfreq = dev_get_drvdata(dev);
358 struct dev_pm_opp *opp;
359 struct cpufreq_policy *policy;
360 unsigned long old_clk_rate = dmcfreq->rate;
361 unsigned long target_volt, target_rate;
362 unsigned int cpu_cur, cpufreq_cur;
363 bool is_cpufreq_changed = false;
364 int err = 0;
365
366 opp = devfreq_recommended_opp(dev, freq, flags);
367 if (IS_ERR(opp)) {
368 dev_err(dev, "Failed to find opp for %lu Hz\n", *freq);
369 return PTR_ERR(opp);
370 }
371 target_volt = dev_pm_opp_get_voltage(opp);
372 dev_pm_opp_put(opp);
373
374 if (dmcfreq->is_set_rate_direct) {
375 target_rate = *freq;
376 } else {
377 target_rate = clk_round_rate(dmcfreq->dmc_clk, *freq);
378 if ((long)target_rate <= 0) {
379 target_rate = *freq;
380 }
381 }
382
383 if (dmcfreq->rate == target_rate) {
384 if (dmcfreq->volt == target_volt) {
385 return 0;
386 }
387 err = regulator_set_voltage(dmcfreq->vdd_center, target_volt, INT_MAX);
388 if (err) {
389 dev_err(dev, "Cannot set voltage %lu uV\n", target_volt);
390 return err;
391 }
392 dmcfreq->volt = target_volt;
393 return 0;
394 } else if (!dmcfreq->volt) {
395 dmcfreq->volt = regulator_get_voltage(dmcfreq->vdd_center);
396 }
397
398 /*
399 * We need to prevent cpu hotplug from happening while a dmc freq rate
400 * change is happening.
401 *
402 * Do this before taking the policy rwsem to avoid deadlocks between the
403 * mutex that is locked/unlocked in cpu_hotplug_disable/enable. And it
404 * can also avoid deadlocks between the mutex that is locked/unlocked
405 * in get/put_online_cpus (such as store_scaling_max_freq()).
406 */
407 get_online_cpus();
408
409 /*
410 * Go to specified cpufreq and block other cpufreq changes since
411 * set_rate needs to complete during vblank.
412 */
413 cpu_cur = raw_smp_processor_id();
414 policy = cpufreq_cpu_get(cpu_cur);
415 if (!policy) {
416 dev_err(dev, "cpu%d policy NULL\n", cpu_cur);
417 goto cpufreq;
418 }
419 down_write(&policy->rwsem);
420 cpufreq_cur = cpufreq_quick_get(cpu_cur);
421 if (dmcfreq->min_cpu_freq && cpufreq_cur < dmcfreq->min_cpu_freq) {
422 if (policy->max >= dmcfreq->min_cpu_freq) {
423 __cpufreq_driver_target(policy, dmcfreq->min_cpu_freq, CPUFREQ_RELATION_L);
424 is_cpufreq_changed = true;
425 } else {
426 dev_dbg(dev, "CPU may too slow for DMC (%d MHz)\n", policy->max);
427 }
428 }
429
430 /*
431 * If frequency scaling from low to high, adjust voltage first.
432 * If frequency scaling from high to low, adjust frequency first.
433 */
434 if (old_clk_rate < target_rate) {
435 err = regulator_set_voltage(dmcfreq->vdd_center, target_volt, INT_MAX);
436 if (err) {
437 dev_err(dev, "Cannot set voltage %lu uV\n", target_volt);
438 goto out;
439 }
440 }
441
442 /*
443 * Writer in rwsem may block readers even during its waiting in queue,
444 * and this may lead to a deadlock when the code path takes read sem
445 * twice (e.g. one in vop_lock() and another in rockchip_pmu_lock()).
446 * As a (suboptimal) workaround, let writer to spin until it gets the
447 * lock.
448 */
449 while (!rockchip_dmcfreq_write_trylock()) {
450 cond_resched();
451 }
452 dev_dbg(dev, "%lu-->%lu\n", old_clk_rate, target_rate);
453
454 if (dmcfreq->set_rate_params) {
455 dmcfreq->set_rate_params->lcdc_type = rk_drm_get_lcdc_type();
456 dmcfreq->set_rate_params->wait_flag1 = 1;
457 dmcfreq->set_rate_params->wait_flag0 = 1;
458 }
459
460 if (dmcfreq->is_set_rate_direct) {
461 err = rockchip_ddr_set_rate(target_rate);
462 } else {
463 err = clk_set_rate(dmcfreq->dmc_clk, target_rate);
464 }
465
466 rockchip_dmcfreq_write_unlock();
467 if (err) {
468 dev_err(dev, "Cannot set frequency %lu (%d)\n", target_rate, err);
469 regulator_set_voltage(dmcfreq->vdd_center, dmcfreq->volt, INT_MAX);
470 goto out;
471 }
472
473 /*
474 * Check the dpll rate,
475 * There only two result we will get,
476 * 1. Ddr frequency scaling fail, we still get the old rate.
477 * 2. Ddr frequency scaling sucessful, we get the rate we set.
478 */
479 dmcfreq->rate = clk_get_rate(dmcfreq->dmc_clk);
480
481 /* If get the incorrect rate, set voltage to old value. */
482 if (dmcfreq->rate != target_rate) {
483 dev_err(dev, "Get wrong frequency, Request %lu, Current %lu\n", target_rate, dmcfreq->rate);
484 regulator_set_voltage(dmcfreq->vdd_center, dmcfreq->volt, INT_MAX);
485 goto out;
486 } else if (old_clk_rate > target_rate) {
487 err = regulator_set_voltage(dmcfreq->vdd_center, target_volt, INT_MAX);
488 if (err) {
489 dev_err(dev, "Cannot set vol %lu uV\n", target_volt);
490 goto out;
491 }
492 }
493
494 if (dmcfreq->info.devfreq) {
495 struct devfreq *devfreq = dmcfreq->info.devfreq;
496
497 devfreq->last_status.current_frequency = *freq;
498 }
499
500 dmcfreq->volt = target_volt;
501 out:
502 if (is_cpufreq_changed) {
503 __cpufreq_driver_target(policy, cpufreq_cur, CPUFREQ_RELATION_L);
504 }
505 up_write(&policy->rwsem);
506 cpufreq_cpu_put(policy);
507 cpufreq:
508 put_online_cpus();
509 return err;
510 }
511
rockchip_dmcfreq_get_dev_status(struct device *dev, struct devfreq_dev_status *stat)512 static int rockchip_dmcfreq_get_dev_status(struct device *dev, struct devfreq_dev_status *stat)
513 {
514 struct rockchip_dmcfreq *dmcfreq = dev_get_drvdata(dev);
515 struct devfreq_event_data edata;
516 int i, ret = 0;
517
518 if (!dmcfreq->info.auto_freq_en) {
519 return -EINVAL;
520 }
521
522 for (i = 0; i < dmcfreq->edev_count; i++) {
523 ret = devfreq_event_get_event(dmcfreq->edev[i], &edata);
524 if (ret < 0) {
525 dev_err(dev, "failed to get event %s\n", dmcfreq->edev[i]->desc->name);
526 return ret;
527 }
528 if (i == dmcfreq->dfi_id) {
529 stat->busy_time = edata.load_count;
530 stat->total_time = edata.total_count;
531 } else {
532 dmcfreq->nocp_bw[i] = edata.load_count;
533 }
534 }
535
536 return 0;
537 }
538
rockchip_dmcfreq_get_cur_freq(struct device *dev, unsigned long *freq)539 static int rockchip_dmcfreq_get_cur_freq(struct device *dev, unsigned long *freq)
540 {
541 struct rockchip_dmcfreq *dmcfreq = dev_get_drvdata(dev);
542
543 *freq = dmcfreq->rate;
544
545 return 0;
546 }
547
548 static struct devfreq_dev_profile rockchip_devfreq_dmc_profile = {
549 .polling_ms = 50,
550 .target = rockchip_dmcfreq_target,
551 .get_dev_status = rockchip_dmcfreq_get_dev_status,
552 .get_cur_freq = rockchip_dmcfreq_get_cur_freq,
553 };
554
reset_last_status(struct devfreq *devfreq)555 static inline void reset_last_status(struct devfreq *devfreq)
556 {
557 devfreq->last_status.total_time = 1;
558 devfreq->last_status.busy_time = 1;
559 }
560
of_get_px30_timings(struct device *dev, struct device_node *np, uint32_t *timing)561 static void of_get_px30_timings(struct device *dev, struct device_node *np, uint32_t *timing)
562 {
563 struct device_node *np_tim;
564 u32 *p;
565 struct px30_ddr_dts_config_timing *dts_timing;
566 struct rk3328_ddr_de_skew_setting *de_skew;
567 int ret = 0;
568 u32 i;
569
570 dts_timing = (struct px30_ddr_dts_config_timing *)(timing + DTS_PAR_OFFSET / 0x4);
571
572 np_tim = of_parse_phandle(np, "ddr_timing", 0);
573 if (!np_tim) {
574 ret = -EINVAL;
575 goto end;
576 }
577 de_skew = kmalloc(sizeof(*de_skew), GFP_KERNEL);
578 if (!de_skew) {
579 ret = -ENOMEM;
580 goto end;
581 }
582 p = (u32 *)dts_timing;
583 for (i = 0; i < ARRAY_SIZE(px30_dts_timing); i++) {
584 ret |= of_property_read_u32(np_tim, px30_dts_timing[i], p + i);
585 }
586 p = (u32 *)de_skew->ca_de_skew;
587 for (i = 0; i < ARRAY_SIZE(rk3328_dts_ca_timing); i++) {
588 ret |= of_property_read_u32(np_tim, rk3328_dts_ca_timing[i], p + i);
589 }
590 p = (u32 *)de_skew->cs0_de_skew;
591 for (i = 0; i < ARRAY_SIZE(rk3328_dts_cs0_timing); i++) {
592 ret |= of_property_read_u32(np_tim, rk3328_dts_cs0_timing[i], p + i);
593 }
594 p = (u32 *)de_skew->cs1_de_skew;
595 for (i = 0; i < ARRAY_SIZE(rk3328_dts_cs1_timing); i++) {
596 ret |= of_property_read_u32(np_tim, rk3328_dts_cs1_timing[i], p + i);
597 }
598 if (!ret) {
599 px30_de_skew_set_2_reg(de_skew, dts_timing);
600 }
601 kfree(de_skew);
602 end:
603 if (!ret) {
604 dts_timing->available = 1;
605 } else {
606 dts_timing->available = 0;
607 dev_err(dev, "of_get_ddr_timings: fail\n");
608 }
609
610 of_node_put(np_tim);
611 }
612
of_get_rk1808_timings(struct device *dev, struct device_node *np, uint32_t *timing)613 static void of_get_rk1808_timings(struct device *dev, struct device_node *np, uint32_t *timing)
614 {
615 struct device_node *np_tim;
616 u32 *p;
617 struct rk1808_ddr_dts_config_timing *dts_timing;
618 int ret = 0;
619 u32 i;
620
621 dts_timing = (struct rk1808_ddr_dts_config_timing *)(timing + DTS_PAR_OFFSET / 0x4);
622
623 np_tim = of_parse_phandle(np, "ddr_timing", 0);
624 if (!np_tim) {
625 ret = -EINVAL;
626 goto end;
627 }
628
629 p = (u32 *)dts_timing;
630 for (i = 0; i < ARRAY_SIZE(px30_dts_timing); i++) {
631 ret |= of_property_read_u32(np_tim, px30_dts_timing[i], p + i);
632 }
633 p = (u32 *)dts_timing->ca_de_skew;
634 for (i = 0; i < ARRAY_SIZE(rk1808_dts_ca_timing); i++) {
635 ret |= of_property_read_u32(np_tim, rk1808_dts_ca_timing[i], p + i);
636 }
637 p = (u32 *)dts_timing->cs0_a_de_skew;
638 for (i = 0; i < ARRAY_SIZE(rk1808_dts_cs0_a_timing); i++) {
639 ret |= of_property_read_u32(np_tim, rk1808_dts_cs0_a_timing[i], p + i);
640 }
641 p = (u32 *)dts_timing->cs0_b_de_skew;
642 for (i = 0; i < ARRAY_SIZE(rk1808_dts_cs0_b_timing); i++) {
643 ret |= of_property_read_u32(np_tim, rk1808_dts_cs0_b_timing[i], p + i);
644 }
645 p = (u32 *)dts_timing->cs1_a_de_skew;
646 for (i = 0; i < ARRAY_SIZE(rk1808_dts_cs1_a_timing); i++) {
647 ret |= of_property_read_u32(np_tim, rk1808_dts_cs1_a_timing[i], p + i);
648 }
649 p = (u32 *)dts_timing->cs1_b_de_skew;
650 for (i = 0; i < ARRAY_SIZE(rk1808_dts_cs1_b_timing); i++) {
651 ret |= of_property_read_u32(np_tim, rk1808_dts_cs1_b_timing[i], p + i);
652 }
653
654 end:
655 if (!ret) {
656 dts_timing->available = 1;
657 } else {
658 dts_timing->available = 0;
659 dev_err(dev, "of_get_ddr_timings: fail\n");
660 }
661
662 of_node_put(np_tim);
663 }
664
of_get_rk3128_timings(struct device *dev, struct device_node *np, uint32_t *timing)665 static void of_get_rk3128_timings(struct device *dev, struct device_node *np, uint32_t *timing)
666 {
667 struct device_node *np_tim;
668 u32 *p;
669 struct rk3128_ddr_dts_config_timing *dts_timing;
670 struct share_params *init_timing;
671 int ret = 0;
672 u32 i;
673
674 init_timing = (struct share_params *)timing;
675
676 if (of_property_read_u32(np, "vop-dclk-mode", &init_timing->vop_dclk_mode)) {
677 init_timing->vop_dclk_mode = 0;
678 }
679
680 p = timing + DTS_PAR_OFFSET / 0x4;
681 np_tim = of_parse_phandle(np, "rockchip,ddr_timing", 0);
682 if (!np_tim) {
683 ret = -EINVAL;
684 goto end;
685 }
686 for (i = 0; i < ARRAY_SIZE(rk3128_dts_timing); i++) {
687 ret |= of_property_read_u32(np_tim, rk3128_dts_timing[i], p + i);
688 }
689 end:
690 dts_timing = (struct rk3128_ddr_dts_config_timing *)(timing + DTS_PAR_OFFSET / 0x4);
691 if (!ret) {
692 dts_timing->available = 1;
693 } else {
694 dts_timing->available = 0;
695 dev_err(dev, "of_get_ddr_timings: fail\n");
696 }
697
698 of_node_put(np_tim);
699 }
700
of_get_rk3228_timings(struct device *dev, struct device_node *np, uint32_t *timing)701 static uint32_t of_get_rk3228_timings(struct device *dev, struct device_node *np, uint32_t *timing)
702 {
703 struct device_node *np_tim;
704 u32 *p;
705 int ret = 0;
706 u32 i;
707
708 p = timing + DTS_PAR_OFFSET / 0x4;
709 np_tim = of_parse_phandle(np, "rockchip,dram_timing", 0);
710 if (!np_tim) {
711 ret = -EINVAL;
712 goto end;
713 }
714 for (i = 0; i < ARRAY_SIZE(rk3228_dts_timing); i++) {
715 ret |= of_property_read_u32(np_tim, rk3228_dts_timing[i], p + i);
716 }
717 end:
718 if (ret) {
719 dev_err(dev, "of_get_ddr_timings: fail\n");
720 }
721
722 of_node_put(np_tim);
723 return ret;
724 }
725
of_get_rk3288_timings(struct device *dev, struct device_node *np, uint32_t *timing)726 static void of_get_rk3288_timings(struct device *dev, struct device_node *np, uint32_t *timing)
727 {
728 struct device_node *np_tim;
729 u32 *p;
730 struct rk3288_ddr_dts_config_timing *dts_timing;
731 struct share_params *init_timing;
732 int ret = 0;
733 u32 i;
734
735 init_timing = (struct share_params *)timing;
736
737 if (of_property_read_u32(np, "vop-dclk-mode", &init_timing->vop_dclk_mode)) {
738 init_timing->vop_dclk_mode = 0;
739 }
740
741 p = timing + DTS_PAR_OFFSET / 0x4;
742 np_tim = of_parse_phandle(np, "rockchip,ddr_timing", 0);
743 if (!np_tim) {
744 ret = -EINVAL;
745 goto end;
746 }
747 for (i = 0; i < ARRAY_SIZE(rk3288_dts_timing); i++) {
748 ret |= of_property_read_u32(np_tim, rk3288_dts_timing[i], p + i);
749 }
750 end:
751 dts_timing = (struct rk3288_ddr_dts_config_timing *)(timing + DTS_PAR_OFFSET / 0x4);
752 if (!ret) {
753 dts_timing->available = 1;
754 } else {
755 dts_timing->available = 0;
756 dev_err(dev, "of_get_ddr_timings: fail\n");
757 }
758
759 of_node_put(np_tim);
760 }
761
of_get_rk3328_timings(struct device *dev, struct device_node *np, uint32_t *timing)762 static void of_get_rk3328_timings(struct device *dev, struct device_node *np, uint32_t *timing)
763 {
764 struct device_node *np_tim;
765 u32 *p;
766 struct rk3328_ddr_dts_config_timing *dts_timing;
767 struct rk3328_ddr_de_skew_setting *de_skew;
768 int ret = 0;
769 u32 i;
770
771 dts_timing = (struct rk3328_ddr_dts_config_timing *)(timing + DTS_PAR_OFFSET / 0x4);
772
773 np_tim = of_parse_phandle(np, "ddr_timing", 0);
774 if (!np_tim) {
775 ret = -EINVAL;
776 goto end;
777 }
778 de_skew = kmalloc(sizeof(*de_skew), GFP_KERNEL);
779 if (!de_skew) {
780 ret = -ENOMEM;
781 goto end;
782 }
783 p = (u32 *)dts_timing;
784 for (i = 0; i < ARRAY_SIZE(rk3328_dts_timing); i++) {
785 ret |= of_property_read_u32(np_tim, rk3328_dts_timing[i], p + i);
786 }
787 p = (u32 *)de_skew->ca_de_skew;
788 for (i = 0; i < ARRAY_SIZE(rk3328_dts_ca_timing); i++) {
789 ret |= of_property_read_u32(np_tim, rk3328_dts_ca_timing[i], p + i);
790 }
791 p = (u32 *)de_skew->cs0_de_skew;
792 for (i = 0; i < ARRAY_SIZE(rk3328_dts_cs0_timing); i++) {
793 ret |= of_property_read_u32(np_tim, rk3328_dts_cs0_timing[i], p + i);
794 }
795 p = (u32 *)de_skew->cs1_de_skew;
796 for (i = 0; i < ARRAY_SIZE(rk3328_dts_cs1_timing); i++) {
797 ret |= of_property_read_u32(np_tim, rk3328_dts_cs1_timing[i], p + i);
798 }
799 if (!ret) {
800 rk3328_de_skew_setting_2_register(de_skew, dts_timing);
801 }
802 kfree(de_skew);
803 end:
804 if (!ret) {
805 dts_timing->available = 1;
806 } else {
807 dts_timing->available = 0;
808 dev_err(dev, "of_get_ddr_timings: fail\n");
809 }
810
811 of_node_put(np_tim);
812 }
813
of_get_rv1126_timings(struct device *dev, struct device_node *np, uint32_t *timing)814 static void of_get_rv1126_timings(struct device *dev, struct device_node *np, uint32_t *timing)
815 {
816 struct device_node *np_tim;
817 u32 *p;
818 struct rk1808_ddr_dts_config_timing *dts_timing;
819 int ret = 0;
820 u32 i;
821
822 dts_timing = (struct rk1808_ddr_dts_config_timing *)(timing + DTS_PAR_OFFSET / 0x4);
823
824 np_tim = of_parse_phandle(np, "ddr_timing", 0);
825 if (!np_tim) {
826 ret = -EINVAL;
827 goto end;
828 }
829
830 p = (u32 *)dts_timing;
831 for (i = 0; i < ARRAY_SIZE(px30_dts_timing); i++) {
832 ret |= of_property_read_u32(np_tim, px30_dts_timing[i], p + i);
833 }
834 p = (u32 *)dts_timing->ca_de_skew;
835 for (i = 0; i < ARRAY_SIZE(rv1126_dts_ca_timing); i++) {
836 ret |= of_property_read_u32(np_tim, rv1126_dts_ca_timing[i], p + i);
837 }
838 p = (u32 *)dts_timing->cs0_a_de_skew;
839 for (i = 0; i < ARRAY_SIZE(rv1126_dts_cs0_a_timing); i++) {
840 ret |= of_property_read_u32(np_tim, rv1126_dts_cs0_a_timing[i], p + i);
841 }
842 p = (u32 *)dts_timing->cs0_b_de_skew;
843 for (i = 0; i < ARRAY_SIZE(rv1126_dts_cs0_b_timing); i++) {
844 ret |= of_property_read_u32(np_tim, rv1126_dts_cs0_b_timing[i], p + i);
845 }
846 p = (u32 *)dts_timing->cs1_a_de_skew;
847 for (i = 0; i < ARRAY_SIZE(rv1126_dts_cs1_a_timing); i++) {
848 ret |= of_property_read_u32(np_tim, rv1126_dts_cs1_a_timing[i], p + i);
849 }
850 p = (u32 *)dts_timing->cs1_b_de_skew;
851 for (i = 0; i < ARRAY_SIZE(rv1126_dts_cs1_b_timing); i++) {
852 ret |= of_property_read_u32(np_tim, rv1126_dts_cs1_b_timing[i], p + i);
853 }
854
855 end:
856 if (!ret) {
857 dts_timing->available = 1;
858 } else {
859 dts_timing->available = 0;
860 dev_err(dev, "of_get_ddr_timings: fail\n");
861 }
862
863 of_node_put(np_tim);
864 }
865
of_get_rk3368_timings(struct device *dev, struct device_node *np)866 static struct rk3368_dram_timing *of_get_rk3368_timings(struct device *dev, struct device_node *np)
867 {
868 struct rk3368_dram_timing *timing = NULL;
869 struct device_node *np_tim;
870 int ret = 0;
871
872 np_tim = of_parse_phandle(np, "ddr_timing", 0);
873 if (np_tim) {
874 timing = devm_kzalloc(dev, sizeof(*timing), GFP_KERNEL);
875 if (!timing) {
876 goto err;
877 }
878
879 ret |= of_property_read_u32(np_tim, "dram_spd_bin", &timing->dram_spd_bin);
880 ret |= of_property_read_u32(np_tim, "sr_idle", &timing->sr_idle);
881 ret |= of_property_read_u32(np_tim, "pd_idle", &timing->pd_idle);
882 ret |= of_property_read_u32(np_tim, "dram_dll_disb_freq", &timing->dram_dll_dis_freq);
883 ret |= of_property_read_u32(np_tim, "phy_dll_disb_freq", &timing->phy_dll_dis_freq);
884 ret |= of_property_read_u32(np_tim, "dram_odt_disb_freq", &timing->dram_odt_dis_freq);
885 ret |= of_property_read_u32(np_tim, "phy_odt_disb_freq", &timing->phy_odt_dis_freq);
886 ret |= of_property_read_u32(np_tim, "ddr3_drv", &timing->ddr3_drv);
887 ret |= of_property_read_u32(np_tim, "ddr3_odt", &timing->ddr3_odt);
888 ret |= of_property_read_u32(np_tim, "lpddr3_drv", &timing->lpddr3_drv);
889 ret |= of_property_read_u32(np_tim, "lpddr3_odt", &timing->lpddr3_odt);
890 ret |= of_property_read_u32(np_tim, "lpddr2_drv", &timing->lpddr2_drv);
891 ret |= of_property_read_u32(np_tim, "phy_clk_drv", &timing->phy_clk_drv);
892 ret |= of_property_read_u32(np_tim, "phy_cmd_drv", &timing->phy_cmd_drv);
893 ret |= of_property_read_u32(np_tim, "phy_dqs_drv", &timing->phy_dqs_drv);
894 ret |= of_property_read_u32(np_tim, "phy_odt", &timing->phy_odt);
895 ret |= of_property_read_u32(np_tim, "ddr_2t", &timing->ddr_2t);
896 if (ret) {
897 devm_kfree(dev, timing);
898 goto err;
899 }
900 of_node_put(np_tim);
901 return timing;
902 }
903
904 err:
905 if (timing) {
906 devm_kfree(dev, timing);
907 timing = NULL;
908 }
909 of_node_put(np_tim);
910 return timing;
911 }
912
of_get_rk3399_timings(struct device *dev, struct device_node *np)913 static struct rk3399_dram_timing *of_get_rk3399_timings(struct device *dev, struct device_node *np)
914 {
915 struct rk3399_dram_timing *timing = NULL;
916 struct device_node *np_tim;
917 int ret;
918
919 np_tim = of_parse_phandle(np, "ddr_timing", 0);
920 if (np_tim) {
921 timing = devm_kzalloc(dev, sizeof(*timing), GFP_KERNEL);
922 if (!timing) {
923 goto err;
924 }
925
926 ret = of_property_read_u32(np_tim, "ddr3_speed_bin", &timing->ddr3_speed_bin);
927 ret |= of_property_read_u32(np_tim, "pd_idle", &timing->pd_idle);
928 ret |= of_property_read_u32(np_tim, "sr_idle", &timing->sr_idle);
929 ret |= of_property_read_u32(np_tim, "sr_mc_gate_idle", &timing->sr_mc_gate_idle);
930 ret |= of_property_read_u32(np_tim, "srpd_lite_idle", &timing->srpd_lite_idle);
931 ret |= of_property_read_u32(np_tim, "standby_idle", &timing->standby_idle);
932 ret |= of_property_read_u32(np_tim, "auto_lp_dis_freq", &timing->auto_lp_dis_freq);
933 ret |= of_property_read_u32(np_tim, "ddr3_dll_dis_freq", &timing->ddr3_dll_dis_freq);
934 ret |= of_property_read_u32(np_tim, "phy_dll_dis_freq", &timing->phy_dll_dis_freq);
935 ret |= of_property_read_u32(np_tim, "ddr3_odt_dis_freq", &timing->ddr3_odt_dis_freq);
936 ret |= of_property_read_u32(np_tim, "ddr3_drv", &timing->ddr3_drv);
937 ret |= of_property_read_u32(np_tim, "ddr3_odt", &timing->ddr3_odt);
938 ret |= of_property_read_u32(np_tim, "phy_ddr3_ca_drv", &timing->phy_ddr3_ca_drv);
939 ret |= of_property_read_u32(np_tim, "phy_ddr3_dq_drv", &timing->phy_ddr3_dq_drv);
940 ret |= of_property_read_u32(np_tim, "phy_ddr3_odt", &timing->phy_ddr3_odt);
941 ret |= of_property_read_u32(np_tim, "lpddr3_odt_dis_freq", &timing->lpddr3_odt_dis_freq);
942 ret |= of_property_read_u32(np_tim, "lpddr3_drv", &timing->lpddr3_drv);
943 ret |= of_property_read_u32(np_tim, "lpddr3_odt", &timing->lpddr3_odt);
944 ret |= of_property_read_u32(np_tim, "phy_lpddr3_ca_drv", &timing->phy_lpddr3_ca_drv);
945 ret |= of_property_read_u32(np_tim, "phy_lpddr3_dq_drv", &timing->phy_lpddr3_dq_drv);
946 ret |= of_property_read_u32(np_tim, "phy_lpddr3_odt", &timing->phy_lpddr3_odt);
947 ret |= of_property_read_u32(np_tim, "lpddr4_odt_dis_freq", &timing->lpddr4_odt_dis_freq);
948 ret |= of_property_read_u32(np_tim, "lpddr4_drv", &timing->lpddr4_drv);
949 ret |= of_property_read_u32(np_tim, "lpddr4_dq_odt", &timing->lpddr4_dq_odt);
950 ret |= of_property_read_u32(np_tim, "lpddr4_ca_odt", &timing->lpddr4_ca_odt);
951 ret |= of_property_read_u32(np_tim, "phy_lpddr4_ca_drv", &timing->phy_lpddr4_ca_drv);
952 ret |= of_property_read_u32(np_tim, "phy_lpddr4_ck_cs_drv", &timing->phy_lpddr4_ck_cs_drv);
953 ret |= of_property_read_u32(np_tim, "phy_lpddr4_dq_drv", &timing->phy_lpddr4_dq_drv);
954 ret |= of_property_read_u32(np_tim, "phy_lpddr4_odt", &timing->phy_lpddr4_odt);
955 if (ret) {
956 devm_kfree(dev, timing);
957 goto err;
958 }
959 of_node_put(np_tim);
960 return timing;
961 }
962
963 err:
964 if (timing) {
965 devm_kfree(dev, timing);
966 timing = NULL;
967 }
968 of_node_put(np_tim);
969 return timing;
970 }
971
rockchip_ddr_set_auto_self_refresh(uint32_t en)972 static int rockchip_ddr_set_auto_self_refresh(uint32_t en)
973 {
974 struct arm_smccc_res res;
975
976 ddr_psci_param->sr_idle_en = en;
977 res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0, ROCKCHIP_SIP_CONFIG_DRAM_SET_AT_SR);
978
979 return res.a0;
980 }
981
982 struct dmcfreq_wait_ctrl_t {
983 wait_queue_head_t wait_wq;
984 int complt_irq;
985 int wait_flag;
986 int wait_en;
987 int wait_time_out_ms;
988 int dcf_en;
989 struct regmap *regmap_dcf;
990 };
991
992 static struct dmcfreq_wait_ctrl_t wait_ctrl;
993
wait_complete_irq(int irqno, void *dev_id)994 static irqreturn_t wait_complete_irq(int irqno, void *dev_id)
995 {
996 struct dmcfreq_wait_ctrl_t *ctrl = dev_id;
997
998 ctrl->wait_flag = 0;
999 wake_up(&ctrl->wait_wq);
1000 return IRQ_HANDLED;
1001 }
1002
wait_dcf_complete_irq(int irqno, void *dev_id)1003 static irqreturn_t wait_dcf_complete_irq(int irqno, void *dev_id)
1004 {
1005 struct arm_smccc_res res;
1006 struct dmcfreq_wait_ctrl_t *ctrl = dev_id;
1007
1008 res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0, ROCKCHIP_SIP_CONFIG_DRAM_POST_SET_RATE);
1009 if (res.a0) {
1010 pr_err("%s: dram post set rate error:%lx\n", __func__, res.a0);
1011 }
1012
1013 ctrl->wait_flag = 0;
1014 wake_up(&ctrl->wait_wq);
1015 return IRQ_HANDLED;
1016 }
1017
rockchip_dmcfreq_wait_complete(void)1018 int rockchip_dmcfreq_wait_complete(void)
1019 {
1020 struct arm_smccc_res res;
1021
1022 if (!wait_ctrl.wait_en) {
1023 pr_err("%s: Do not support time out!\n", __func__);
1024 return 0;
1025 }
1026 wait_ctrl.wait_flag = -1;
1027
1028 enable_irq(wait_ctrl.complt_irq);
1029 /*
1030 * CPUs only enter WFI when idle to make sure that
1031 * FIQn can quick response.
1032 */
1033 cpu_latency_qos_update_request(&pm_qos, 0);
1034
1035 if (wait_ctrl.dcf_en == 1) {
1036 /* start dcf */
1037 regmap_update_bits(wait_ctrl.regmap_dcf, 0x0, 0x1, 0x1);
1038 } else if (wait_ctrl.dcf_en == DMCFREQ_WAIT_CTRL_T_DCF_EN_TWO) {
1039 res = sip_smc_dram(0, 0, ROCKCHIP_SIP_CONFIG_MCU_START);
1040 if (res.a0) {
1041 pr_err("rockchip_sip_config_mcu_start error:%lx\n", res.a0);
1042 return -ENOMEM;
1043 }
1044 }
1045
1046 wait_event_timeout(wait_ctrl.wait_wq, (wait_ctrl.wait_flag == 0), msecs_to_jiffies(wait_ctrl.wait_time_out_ms));
1047
1048 cpu_latency_qos_update_request(&pm_qos, PM_QOS_DEFAULT_VALUE);
1049 disable_irq(wait_ctrl.complt_irq);
1050
1051 return 0;
1052 }
1053
rockchip_get_freq_info(struct rockchip_dmcfreq *dmcfreq)1054 static __maybe_unused int rockchip_get_freq_info(struct rockchip_dmcfreq *dmcfreq)
1055 {
1056 struct arm_smccc_res res;
1057 struct dev_pm_opp *opp;
1058 struct dmc_freq_table *freq_table;
1059 unsigned long rate;
1060 int i, j, count, ret = 0;
1061
1062 res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0, ROCKCHIP_SIP_CONFIG_DRAM_GET_FREQ_INFO);
1063 if (res.a0) {
1064 dev_err(dmcfreq->dev, "rockchip_sip_config_dram_get_freq_info error:%lx\n", res.a0);
1065 return -ENOMEM;
1066 }
1067
1068 if (ddr_psci_param->freq_count == 0 || ddr_psci_param->freq_count > 6) {
1069 dev_err(dmcfreq->dev, "it is no available frequencies!\n");
1070 return -EPERM;
1071 }
1072
1073 for (i = 0; i < ddr_psci_param->freq_count; i++) {
1074 dmcfreq->freq_info_rate[i] = ddr_psci_param->freq_info_mhz[i] * 1000000;
1075 }
1076 dmcfreq->freq_count = ddr_psci_param->freq_count;
1077
1078 /* update dmc_opp_table */
1079 count = dev_pm_opp_get_opp_count(dmcfreq->dev);
1080 if (count <= 0) {
1081 ret = count ? count : -ENODATA;
1082 return ret;
1083 }
1084
1085 freq_table = kmalloc(sizeof(struct dmc_freq_table) * count, GFP_KERNEL);
1086 for (i = 0, rate = 0; i < count; i++, rate++) {
1087 /* find next rate */
1088 opp = dev_pm_opp_find_freq_ceil(dmcfreq->dev, &rate);
1089 if (IS_ERR(opp)) {
1090 ret = PTR_ERR(opp);
1091 dev_err(dmcfreq->dev, "failed to find OPP for freq %lu.\n", rate);
1092 goto out;
1093 }
1094 freq_table[i].freq = rate;
1095 freq_table[i].volt = dev_pm_opp_get_voltage(opp);
1096 dev_pm_opp_put(opp);
1097
1098 for (j = 0; j < dmcfreq->freq_count; j++) {
1099 if (rate == dmcfreq->freq_info_rate[j]) {
1100 break;
1101 }
1102 }
1103 if (j == dmcfreq->freq_count) {
1104 dev_pm_opp_remove(dmcfreq->dev, rate);
1105 }
1106 }
1107
1108 for (i = 0; i < dmcfreq->freq_count; i++) {
1109 for (j = 0; j < count; j++) {
1110 if (dmcfreq->freq_info_rate[i] == freq_table[j].freq) {
1111 break;
1112 } else if (dmcfreq->freq_info_rate[i] < freq_table[j].freq) {
1113 dev_pm_opp_add(dmcfreq->dev, dmcfreq->freq_info_rate[i], freq_table[j].volt);
1114 break;
1115 }
1116 }
1117 if (j == count) {
1118 dev_err(dmcfreq->dev, "failed to match dmc_opp_table for %ld\n", dmcfreq->freq_info_rate[i]);
1119 if (i == 0) {
1120 ret = -EPERM;
1121 } else {
1122 dmcfreq->freq_count = i;
1123 }
1124 goto out;
1125 }
1126 }
1127
1128 out:
1129 kfree(freq_table);
1130 return ret;
1131 }
1132
px30_dmc_init(struct platform_device *pdev, struct rockchip_dmcfreq *dmcfreq)1133 static __maybe_unused int px30_dmc_init(struct platform_device *pdev, struct rockchip_dmcfreq *dmcfreq)
1134 {
1135 struct arm_smccc_res res;
1136 u32 size;
1137 int ret;
1138 int complt_irq;
1139 u32 complt_hwirq;
1140 struct irq_data *complt_irq_data;
1141
1142 res = sip_smc_dram(0, 0, ROCKCHIP_SIP_CONFIG_DRAM_GET_VERSION);
1143 dev_notice(&pdev->dev, "current ATF version 0x%lx!\n", res.a1);
1144 if (res.a0 || res.a1 < 0x103) {
1145 dev_err(&pdev->dev, "trusted firmware need to update or is invalid!\n");
1146 return -ENXIO;
1147 }
1148
1149 dev_notice(&pdev->dev, "read tf version 0x%lx!\n", res.a1);
1150
1151 /*
1152 * first 4KB is used for interface parameters
1153 * after 4KB * N is dts parameters
1154 */
1155 size = sizeof(struct px30_ddr_dts_config_timing);
1156 res = sip_smc_request_share_mem(DIV_ROUND_UP(size, 0x1000) + 1, SHARE_PAGE_TYPE_DDR);
1157 if (res.a0 != 0) {
1158 dev_err(&pdev->dev, "no ATF memory for init\n");
1159 return -ENOMEM;
1160 }
1161 ddr_psci_param = (struct share_params *)res.a1;
1162 of_get_px30_timings(&pdev->dev, pdev->dev.of_node, (uint32_t *)ddr_psci_param);
1163
1164 init_waitqueue_head(&wait_ctrl.wait_wq);
1165 wait_ctrl.wait_en = 1;
1166 wait_ctrl.wait_time_out_ms = 0x55;
1167
1168 complt_irq = platform_get_irq_byname(pdev, "complete_irq");
1169 if (complt_irq < 0) {
1170 dev_err(&pdev->dev, "no IRQ for complete_irq: %d\n", complt_irq);
1171 return complt_irq;
1172 }
1173 wait_ctrl.complt_irq = complt_irq;
1174
1175 ret = devm_request_irq(&pdev->dev, complt_irq, wait_complete_irq, 0, dev_name(&pdev->dev), &wait_ctrl);
1176 if (ret < 0) {
1177 dev_err(&pdev->dev, "cannot request complete_irq\n");
1178 return ret;
1179 }
1180 disable_irq(complt_irq);
1181
1182 complt_irq_data = irq_get_irq_data(complt_irq);
1183 complt_hwirq = irqd_to_hwirq(complt_irq_data);
1184 ddr_psci_param->complt_hwirq = complt_hwirq;
1185
1186 dmcfreq->set_rate_params = ddr_psci_param;
1187 rockchip_set_ddrclk_params(dmcfreq->set_rate_params);
1188 rockchip_set_ddrclk_dmcfreq_wait_complete(rockchip_dmcfreq_wait_complete);
1189
1190 res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0, ROCKCHIP_SIP_CONFIG_DRAM_INIT);
1191 if (res.a0) {
1192 dev_err(&pdev->dev, "rockchip_sip_config_dram_init error:%lx\n", res.a0);
1193 return -ENOMEM;
1194 }
1195
1196 dmcfreq->set_auto_self_refresh = rockchip_ddr_set_auto_self_refresh;
1197
1198 return 0;
1199 }
1200
rk1808_dmc_init(struct platform_device *pdev, struct rockchip_dmcfreq *dmcfreq)1201 static __maybe_unused int rk1808_dmc_init(struct platform_device *pdev, struct rockchip_dmcfreq *dmcfreq)
1202 {
1203 struct arm_smccc_res res;
1204 u32 size;
1205 int ret;
1206 int complt_irq;
1207 struct device_node *node;
1208
1209 res = sip_smc_dram(0, 0, ROCKCHIP_SIP_CONFIG_DRAM_GET_VERSION);
1210 dev_notice(&pdev->dev, "current ATF version 0x%lx!\n", res.a1);
1211 if (res.a0 || res.a1 < 0x101) {
1212 dev_err(&pdev->dev, "trusted firmware need to update or is invalid!\n");
1213 return -ENXIO;
1214 }
1215
1216 /*
1217 * first 4KB is used for interface parameters
1218 * after 4KB * N is dts parameters
1219 */
1220 size = sizeof(struct rk1808_ddr_dts_config_timing);
1221 res = sip_smc_request_share_mem(DIV_ROUND_UP(size, 0x1000) + 1, SHARE_PAGE_TYPE_DDR);
1222 if (res.a0 != 0) {
1223 dev_err(&pdev->dev, "no ATF memory for init\n");
1224 return -ENOMEM;
1225 }
1226 ddr_psci_param = (struct share_params *)res.a1;
1227 of_get_rk1808_timings(&pdev->dev, pdev->dev.of_node, (uint32_t *)ddr_psci_param);
1228
1229 /* enable start dcf in kernel after dcf ready */
1230 node = of_parse_phandle(pdev->dev.of_node, "dcf_reg", 0);
1231 wait_ctrl.regmap_dcf = syscon_node_to_regmap(node);
1232 if (IS_ERR(wait_ctrl.regmap_dcf)) {
1233 return PTR_ERR(wait_ctrl.regmap_dcf);
1234 }
1235 wait_ctrl.dcf_en = 1;
1236
1237 init_waitqueue_head(&wait_ctrl.wait_wq);
1238 wait_ctrl.wait_en = 1;
1239 wait_ctrl.wait_time_out_ms = 0x55;
1240
1241 complt_irq = platform_get_irq_byname(pdev, "complete_irq");
1242 if (complt_irq < 0) {
1243 dev_err(&pdev->dev, "no IRQ for complete_irq: %d\n", complt_irq);
1244 return complt_irq;
1245 }
1246 wait_ctrl.complt_irq = complt_irq;
1247
1248 ret = devm_request_irq(&pdev->dev, complt_irq, wait_dcf_complete_irq, 0, dev_name(&pdev->dev), &wait_ctrl);
1249 if (ret < 0) {
1250 dev_err(&pdev->dev, "cannot request complete_irq\n");
1251 return ret;
1252 }
1253 disable_irq(complt_irq);
1254
1255 dmcfreq->set_rate_params = ddr_psci_param;
1256 rockchip_set_ddrclk_params(dmcfreq->set_rate_params);
1257 rockchip_set_ddrclk_dmcfreq_wait_complete(rockchip_dmcfreq_wait_complete);
1258
1259 res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0, ROCKCHIP_SIP_CONFIG_DRAM_INIT);
1260 if (res.a0) {
1261 dev_err(&pdev->dev, "rockchip_sip_config_dram_init error:%lx\n", res.a0);
1262 return -ENOMEM;
1263 }
1264
1265 dmcfreq->set_auto_self_refresh = rockchip_ddr_set_auto_self_refresh;
1266
1267 return 0;
1268 }
1269
rk3128_dmc_init(struct platform_device *pdev, struct rockchip_dmcfreq *dmcfreq)1270 static __maybe_unused int rk3128_dmc_init(struct platform_device *pdev, struct rockchip_dmcfreq *dmcfreq)
1271 {
1272 struct arm_smccc_res res;
1273
1274 res = sip_smc_request_share_mem(DIV_ROUND_UP(sizeof(struct rk3128_ddr_dts_config_timing), 0x1000) + 1,
1275 SHARE_PAGE_TYPE_DDR);
1276 if (res.a0) {
1277 dev_err(&pdev->dev, "no ATF memory for init\n");
1278 return -ENOMEM;
1279 }
1280 ddr_psci_param = (struct share_params *)res.a1;
1281 of_get_rk3128_timings(&pdev->dev, pdev->dev.of_node, (uint32_t *)ddr_psci_param);
1282
1283 ddr_psci_param->hz = 0;
1284 ddr_psci_param->lcdc_type = rk_drm_get_lcdc_type();
1285
1286 dmcfreq->set_rate_params = ddr_psci_param;
1287 rockchip_set_ddrclk_params(dmcfreq->set_rate_params);
1288
1289 res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0, ROCKCHIP_SIP_CONFIG_DRAM_INIT);
1290
1291 if (res.a0) {
1292 dev_err(&pdev->dev, "rockchip_sip_config_dram_init error:%lx\n", res.a0);
1293 return -ENOMEM;
1294 }
1295
1296 dmcfreq->set_auto_self_refresh = rockchip_ddr_set_auto_self_refresh;
1297
1298 return 0;
1299 }
1300
rk3228_dmc_init(struct platform_device *pdev, struct rockchip_dmcfreq *dmcfreq)1301 static __maybe_unused int rk3228_dmc_init(struct platform_device *pdev, struct rockchip_dmcfreq *dmcfreq)
1302 {
1303 struct arm_smccc_res res;
1304
1305 res = sip_smc_request_share_mem(DIV_ROUND_UP(sizeof(struct rk3228_ddr_dts_config_timing), 0x1000) + 1,
1306 SHARE_PAGE_TYPE_DDR);
1307 if (res.a0) {
1308 dev_err(&pdev->dev, "no ATF memory for init\n");
1309 return -ENOMEM;
1310 }
1311
1312 ddr_psci_param = (struct share_params *)res.a1;
1313 if (of_get_rk3228_timings(&pdev->dev, pdev->dev.of_node, (uint32_t *)ddr_psci_param)) {
1314 return -ENOMEM;
1315 }
1316
1317 ddr_psci_param->hz = 0;
1318
1319 dmcfreq->set_rate_params = ddr_psci_param;
1320 rockchip_set_ddrclk_params(dmcfreq->set_rate_params);
1321
1322 res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0, ROCKCHIP_SIP_CONFIG_DRAM_INIT);
1323
1324 if (res.a0) {
1325 dev_err(&pdev->dev, "rockchip_sip_config_dram_init error:%lx\n", res.a0);
1326 return -ENOMEM;
1327 }
1328
1329 dmcfreq->set_auto_self_refresh = rockchip_ddr_set_auto_self_refresh;
1330
1331 return 0;
1332 }
1333
rk3288_dmc_init(struct platform_device *pdev, struct rockchip_dmcfreq *dmcfreq)1334 static __maybe_unused int rk3288_dmc_init(struct platform_device *pdev, struct rockchip_dmcfreq *dmcfreq)
1335 {
1336 struct device *dev = &pdev->dev;
1337 struct clk *pclk_phy, *pclk_upctl, *dmc_clk;
1338 struct arm_smccc_res res;
1339 int ret;
1340
1341 dmc_clk = devm_clk_get(dev, "dmc_clk");
1342 if (IS_ERR(dmc_clk)) {
1343 dev_err(dev, "Cannot get the clk dmc_clk\n");
1344 return PTR_ERR(dmc_clk);
1345 }
1346 ret = clk_prepare_enable(dmc_clk);
1347 if (ret < 0) {
1348 dev_err(dev, "failed to prepare/enable dmc_clk\n");
1349 return ret;
1350 }
1351
1352 pclk_phy = devm_clk_get(dev, "pclk_phy0");
1353 if (IS_ERR(pclk_phy)) {
1354 dev_err(dev, "Cannot get the clk pclk_phy0\n");
1355 return PTR_ERR(pclk_phy);
1356 }
1357 ret = clk_prepare_enable(pclk_phy);
1358 if (ret < 0) {
1359 dev_err(dev, "failed to prepare/enable pclk_phy0\n");
1360 return ret;
1361 }
1362 pclk_upctl = devm_clk_get(dev, "pclk_upctl0");
1363 if (IS_ERR(pclk_upctl)) {
1364 dev_err(dev, "Cannot get the clk pclk_upctl0\n");
1365 return PTR_ERR(pclk_upctl);
1366 }
1367 ret = clk_prepare_enable(pclk_upctl);
1368 if (ret < 0) {
1369 dev_err(dev, "failed to prepare/enable pclk_upctl1\n");
1370 return ret;
1371 }
1372
1373 pclk_phy = devm_clk_get(dev, "pclk_phy1");
1374 if (IS_ERR(pclk_phy)) {
1375 dev_err(dev, "Cannot get the clk pclk_phy1\n");
1376 return PTR_ERR(pclk_phy);
1377 }
1378 ret = clk_prepare_enable(pclk_phy);
1379 if (ret < 0) {
1380 dev_err(dev, "failed to prepare/enable pclk_phy1\n");
1381 return ret;
1382 }
1383 pclk_upctl = devm_clk_get(dev, "pclk_upctl1");
1384 if (IS_ERR(pclk_upctl)) {
1385 dev_err(dev, "Cannot get the clk pclk_upctl1\n");
1386 return PTR_ERR(pclk_upctl);
1387 }
1388 ret = clk_prepare_enable(pclk_upctl);
1389 if (ret < 0) {
1390 dev_err(dev, "failed to prepare/enable pclk_upctl1\n");
1391 return ret;
1392 }
1393
1394 res = sip_smc_request_share_mem(DIV_ROUND_UP(sizeof(struct rk3288_ddr_dts_config_timing), 0x1000) + 1,
1395 SHARE_PAGE_TYPE_DDR);
1396 if (res.a0) {
1397 dev_err(&pdev->dev, "no ATF memory for init\n");
1398 return -ENOMEM;
1399 }
1400
1401 ddr_psci_param = (struct share_params *)res.a1;
1402 of_get_rk3288_timings(&pdev->dev, pdev->dev.of_node, (uint32_t *)ddr_psci_param);
1403
1404 ddr_psci_param->hz = 0;
1405 ddr_psci_param->lcdc_type = rk_drm_get_lcdc_type();
1406
1407 dmcfreq->set_rate_params = ddr_psci_param;
1408 rockchip_set_ddrclk_params(dmcfreq->set_rate_params);
1409
1410 res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0, ROCKCHIP_SIP_CONFIG_DRAM_INIT);
1411
1412 if (res.a0) {
1413 dev_err(&pdev->dev, "rockchip_sip_config_dram_init error:%lx\n", res.a0);
1414 return -ENOMEM;
1415 }
1416
1417 dmcfreq->set_auto_self_refresh = rockchip_ddr_set_auto_self_refresh;
1418
1419 return 0;
1420 }
1421
rk3328_dmc_init(struct platform_device *pdev, struct rockchip_dmcfreq *dmcfreq)1422 static __maybe_unused int rk3328_dmc_init(struct platform_device *pdev, struct rockchip_dmcfreq *dmcfreq)
1423 {
1424 struct arm_smccc_res res;
1425 u32 size;
1426
1427 res = sip_smc_dram(0, 0, ROCKCHIP_SIP_CONFIG_DRAM_GET_VERSION);
1428 dev_notice(&pdev->dev, "current ATF version 0x%lx!\n", res.a1);
1429 if (res.a0 || (res.a1 < 0x101)) {
1430 dev_err(&pdev->dev, "trusted firmware need to update or is invalid!\n");
1431 return -ENXIO;
1432 }
1433
1434 dev_notice(&pdev->dev, "read tf version 0x%lx!\n", res.a1);
1435
1436 /*
1437 * first 4KB is used for interface parameters
1438 * after 4KB * N is dts parameters
1439 */
1440 size = sizeof(struct rk3328_ddr_dts_config_timing);
1441 res = sip_smc_request_share_mem(DIV_ROUND_UP(size, 0x1000) + 1, SHARE_PAGE_TYPE_DDR);
1442 if (res.a0 != 0) {
1443 dev_err(&pdev->dev, "no ATF memory for init\n");
1444 return -ENOMEM;
1445 }
1446 ddr_psci_param = (struct share_params *)res.a1;
1447 of_get_rk3328_timings(&pdev->dev, pdev->dev.of_node, (uint32_t *)ddr_psci_param);
1448
1449 dmcfreq->set_rate_params = ddr_psci_param;
1450 rockchip_set_ddrclk_params(dmcfreq->set_rate_params);
1451
1452 res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0, ROCKCHIP_SIP_CONFIG_DRAM_INIT);
1453 if (res.a0) {
1454 dev_err(&pdev->dev, "rockchip_sip_config_dram_init error:%lx\n", res.a0);
1455 return -ENOMEM;
1456 }
1457
1458 dmcfreq->set_auto_self_refresh = rockchip_ddr_set_auto_self_refresh;
1459
1460 return 0;
1461 }
1462
rk3368_dmc_init(struct platform_device *pdev, struct rockchip_dmcfreq *dmcfreq)1463 static __maybe_unused int rk3368_dmc_init(struct platform_device *pdev, struct rockchip_dmcfreq *dmcfreq)
1464 {
1465 struct device *dev = &pdev->dev;
1466 struct device_node *np = pdev->dev.of_node;
1467 struct arm_smccc_res res;
1468 struct rk3368_dram_timing *dram_timing;
1469 struct clk *pclk_phy, *pclk_upctl;
1470 int ret;
1471 u32 dram_spd_bin;
1472 u32 addr_mcu_el3;
1473 u32 dclk_mode;
1474 u32 lcdc_type;
1475
1476 pclk_phy = devm_clk_get(dev, "pclk_phy");
1477 if (IS_ERR(pclk_phy)) {
1478 dev_err(dev, "Cannot get the clk pclk_phy\n");
1479 return PTR_ERR(pclk_phy);
1480 }
1481 ret = clk_prepare_enable(pclk_phy);
1482 if (ret < 0) {
1483 dev_err(dev, "failed to prepare/enable pclk_phy\n");
1484 return ret;
1485 }
1486 pclk_upctl = devm_clk_get(dev, "pclk_upctl");
1487 if (IS_ERR(pclk_upctl)) {
1488 dev_err(dev, "Cannot get the clk pclk_upctl\n");
1489 return PTR_ERR(pclk_upctl);
1490 }
1491 ret = clk_prepare_enable(pclk_upctl);
1492 if (ret < 0) {
1493 dev_err(dev, "failed to prepare/enable pclk_upctl\n");
1494 return ret;
1495 }
1496
1497 /*
1498 * Get dram timing and pass it to arm trust firmware,
1499 * the dram drvier in arm trust firmware will get these
1500 * timing and to do dram initial.
1501 */
1502 dram_timing = of_get_rk3368_timings(dev, np);
1503 if (dram_timing) {
1504 dram_spd_bin = dram_timing->dram_spd_bin;
1505 if (scpi_ddr_send_timing((u32 *)dram_timing, sizeof(struct rk3368_dram_timing))) {
1506 dev_err(dev, "send ddr timing timeout\n");
1507 }
1508 } else {
1509 dev_err(dev, "get ddr timing from dts error\n");
1510 dram_spd_bin = DDR3_DEFAULT;
1511 }
1512
1513 res = sip_smc_mcu_el3fiq(FIQ_INIT_HANDLER, FIQ_NUM_FOR_DCF, FIQ_CPU_TGT_BOOT);
1514 if ((res.a0) || (res.a1 == 0) || (res.a1 > 0x80000)) {
1515 dev_err(dev, "Trust version error, pls check trust version\n");
1516 }
1517 addr_mcu_el3 = res.a1;
1518
1519 if (of_property_read_u32(np, "vop-dclk-mode", &dclk_mode) == 0) {
1520 scpi_ddr_dclk_mode(dclk_mode);
1521 }
1522
1523 dmcfreq->set_rate_params = devm_kzalloc(dev, sizeof(struct share_params), GFP_KERNEL);
1524 if (!dmcfreq->set_rate_params) {
1525 return -ENOMEM;
1526 }
1527 rockchip_set_ddrclk_params(dmcfreq->set_rate_params);
1528
1529 lcdc_type = rk_drm_get_lcdc_type();
1530
1531 if (scpi_ddr_init(dram_spd_bin, 0, lcdc_type, addr_mcu_el3)) {
1532 dev_err(dev, "ddr init error\n");
1533 } else {
1534 dev_dbg(dev, ("%s out\n"), __func__);
1535 }
1536
1537 dmcfreq->set_auto_self_refresh = scpi_ddr_set_auto_self_refresh;
1538
1539 return 0;
1540 }
1541
rk3399_set_msch_readlatency(unsigned int readlatency)1542 static int rk3399_set_msch_readlatency(unsigned int readlatency)
1543 {
1544 struct arm_smccc_res res;
1545
1546 arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, readlatency, 0, ROCKCHIP_SIP_CONFIG_DRAM_SET_MSCH_RL, 0, 0, 0, 0, &res);
1547
1548 return res.a0;
1549 }
1550
rk3399_dmc_init(struct platform_device *pdev, struct rockchip_dmcfreq *dmcfreq)1551 static __maybe_unused int rk3399_dmc_init(struct platform_device *pdev, struct rockchip_dmcfreq *dmcfreq)
1552 {
1553 struct device *dev = &pdev->dev;
1554 struct device_node *np = pdev->dev.of_node;
1555 struct arm_smccc_res res;
1556 struct rk3399_dram_timing *dram_timing;
1557 int index, size;
1558 u32 *timing;
1559
1560 /*
1561 * Get dram timing and pass it to arm trust firmware,
1562 * the dram drvier in arm trust firmware will get these
1563 * timing and to do dram initial.
1564 */
1565 dram_timing = of_get_rk3399_timings(dev, np);
1566 if (dram_timing) {
1567 timing = (u32 *)dram_timing;
1568 size = sizeof(struct rk3399_dram_timing) / 0x4;
1569 for (index = 0; index < size; index++) {
1570 arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, *timing++, index, ROCKCHIP_SIP_CONFIG_DRAM_SET_PARAM, 0, 0, 0, 0,
1571 &res);
1572 if (res.a0) {
1573 dev_err(dev, "Failed to set dram param: %ld\n", res.a0);
1574 return -EINVAL;
1575 }
1576 }
1577 }
1578
1579 dmcfreq->set_rate_params = devm_kzalloc(dev, sizeof(struct share_params), GFP_KERNEL);
1580 if (!dmcfreq->set_rate_params) {
1581 return -ENOMEM;
1582 }
1583 rockchip_set_ddrclk_params(dmcfreq->set_rate_params);
1584
1585 arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, 0, 0, ROCKCHIP_SIP_CONFIG_DRAM_INIT, 0, 0, 0, 0, &res);
1586
1587 dmcfreq->info.set_msch_readlatency = rk3399_set_msch_readlatency;
1588
1589 return 0;
1590 }
1591
rk3568_dmc_init(struct platform_device *pdev, struct rockchip_dmcfreq *dmcfreq)1592 static __maybe_unused int rk3568_dmc_init(struct platform_device *pdev, struct rockchip_dmcfreq *dmcfreq)
1593 {
1594 struct arm_smccc_res res;
1595 int ret;
1596 int complt_irq;
1597
1598 res = sip_smc_dram(0, 0, ROCKCHIP_SIP_CONFIG_DRAM_GET_VERSION);
1599 dev_notice(&pdev->dev, "current ATF version 0x%lx\n", res.a1);
1600 if (res.a0 || res.a1 < 0x101) {
1601 dev_err(&pdev->dev, "trusted firmware need update to V1.01 and above.\n");
1602 return -ENXIO;
1603 }
1604
1605 /*
1606 * first 4KB is used for interface parameters
1607 * after 4KB is dts parameters
1608 * request share memory size 4KB * 2
1609 */
1610 res = sip_smc_request_share_mem(2, SHARE_PAGE_TYPE_DDR);
1611 if (res.a0 != 0) {
1612 dev_err(&pdev->dev, "no ATF memory for init\n");
1613 return -ENOMEM;
1614 }
1615 ddr_psci_param = (struct share_params *)res.a1;
1616 /* Clear ddr_psci_param, size is 4KB * 2 */
1617 memset_io(ddr_psci_param, 0x0, 0x1000 * 0x2);
1618
1619 /* start mcu with sip_smc_dram */
1620 wait_ctrl.dcf_en = 0x2;
1621
1622 init_waitqueue_head(&wait_ctrl.wait_wq);
1623 wait_ctrl.wait_en = 1;
1624 wait_ctrl.wait_time_out_ms = 0x55;
1625
1626 complt_irq = platform_get_irq_byname(pdev, "complete");
1627 if (complt_irq < 0) {
1628 dev_err(&pdev->dev, "no IRQ for complt_irq: %d\n", complt_irq);
1629 return complt_irq;
1630 }
1631 wait_ctrl.complt_irq = complt_irq;
1632
1633 ret = devm_request_irq(&pdev->dev, complt_irq, wait_dcf_complete_irq, 0, dev_name(&pdev->dev), &wait_ctrl);
1634 if (ret < 0) {
1635 dev_err(&pdev->dev, "cannot request complt_irq\n");
1636 return ret;
1637 }
1638 disable_irq(complt_irq);
1639
1640 res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0, ROCKCHIP_SIP_CONFIG_DRAM_INIT);
1641 if (res.a0) {
1642 dev_err(&pdev->dev, "rockchip_sip_config_dram_init error:%lx\n", res.a0);
1643 return -ENOMEM;
1644 }
1645
1646 ret = rockchip_get_freq_info(dmcfreq);
1647 if (ret < 0) {
1648 dev_err(&pdev->dev, "cannot get frequency info\n");
1649 return ret;
1650 }
1651 dmcfreq->is_set_rate_direct = true;
1652
1653 dmcfreq->set_auto_self_refresh = rockchip_ddr_set_auto_self_refresh;
1654
1655 return 0;
1656 }
1657
rk3588_dmc_init(struct platform_device *pdev, struct rockchip_dmcfreq *dmcfreq)1658 static __maybe_unused int rk3588_dmc_init(struct platform_device *pdev, struct rockchip_dmcfreq *dmcfreq)
1659 {
1660 struct arm_smccc_res res;
1661 int ret;
1662 int complt_irq;
1663
1664 res = sip_smc_dram(0, 0, ROCKCHIP_SIP_CONFIG_DRAM_GET_VERSION);
1665 dev_notice(&pdev->dev, "current ATF version 0x%lx\n", res.a1);
1666 if (res.a0) {
1667 dev_err(&pdev->dev, "trusted firmware unsupported, please update.\n");
1668 return -ENXIO;
1669 }
1670
1671 /*
1672 * first 4KB is used for interface parameters
1673 * after 4KB is dts parameters
1674 * request share memory size 4KB * 2
1675 */
1676 res = sip_smc_request_share_mem(0x2, SHARE_PAGE_TYPE_DDR);
1677 if (res.a0 != 0) {
1678 dev_err(&pdev->dev, "no ATF memory for init\n");
1679 return -ENOMEM;
1680 }
1681 ddr_psci_param = (struct share_params *)res.a1;
1682 /* Clear ddr_psci_param, size is 4KB * 2 */
1683 memset_io(ddr_psci_param, 0x0, 0x1000 * 0x2);
1684
1685 /* start mcu with sip_smc_dram */
1686 wait_ctrl.dcf_en = 0x2;
1687
1688 init_waitqueue_head(&wait_ctrl.wait_wq);
1689 wait_ctrl.wait_en = 1;
1690 wait_ctrl.wait_time_out_ms = 0x55;
1691
1692 complt_irq = platform_get_irq_byname(pdev, "complete");
1693 if (complt_irq < 0) {
1694 dev_err(&pdev->dev, "no IRQ for complt_irq: %d\n", complt_irq);
1695 return complt_irq;
1696 }
1697 wait_ctrl.complt_irq = complt_irq;
1698
1699 ret = devm_request_irq(&pdev->dev, complt_irq, wait_dcf_complete_irq, 0, dev_name(&pdev->dev), &wait_ctrl);
1700 if (ret < 0) {
1701 dev_err(&pdev->dev, "cannot request complt_irq\n");
1702 return ret;
1703 }
1704 disable_irq(complt_irq);
1705
1706 res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0, ROCKCHIP_SIP_CONFIG_DRAM_INIT);
1707 if (res.a0) {
1708 dev_err(&pdev->dev, "rockchip_sip_config_dram_init error:%lx\n", res.a0);
1709 return -ENOMEM;
1710 }
1711
1712 ret = rockchip_get_freq_info(dmcfreq);
1713 if (ret < 0) {
1714 dev_err(&pdev->dev, "cannot get frequency info\n");
1715 return ret;
1716 }
1717 dmcfreq->is_set_rate_direct = true;
1718
1719 dmcfreq->set_auto_self_refresh = rockchip_ddr_set_auto_self_refresh;
1720
1721 return 0;
1722 }
1723
rv1126_dmc_init(struct platform_device *pdev, struct rockchip_dmcfreq *dmcfreq)1724 static __maybe_unused int rv1126_dmc_init(struct platform_device *pdev, struct rockchip_dmcfreq *dmcfreq)
1725 {
1726 struct arm_smccc_res res;
1727 u32 size;
1728 int ret;
1729 int complt_irq;
1730 struct device_node *node;
1731
1732 res = sip_smc_dram(0, 0, ROCKCHIP_SIP_CONFIG_DRAM_GET_VERSION);
1733 dev_notice(&pdev->dev, "current ATF version 0x%lx\n", res.a1);
1734 if (res.a0 || res.a1 < 0x100) {
1735 dev_err(&pdev->dev, "trusted firmware need to update or is invalid!\n");
1736 return -ENXIO;
1737 }
1738
1739 /*
1740 * first 4KB is used for interface parameters
1741 * after 4KB * N is dts parameters
1742 */
1743 size = sizeof(struct rk1808_ddr_dts_config_timing);
1744 res = sip_smc_request_share_mem(DIV_ROUND_UP(size, 0x1000) + 1, SHARE_PAGE_TYPE_DDR);
1745 if (res.a0 != 0) {
1746 dev_err(&pdev->dev, "no ATF memory for init\n");
1747 return -ENOMEM;
1748 }
1749 ddr_psci_param = (struct share_params *)res.a1;
1750 of_get_rv1126_timings(&pdev->dev, pdev->dev.of_node, (uint32_t *)ddr_psci_param);
1751
1752 /* enable start dcf in kernel after dcf ready */
1753 node = of_parse_phandle(pdev->dev.of_node, "dcf", 0);
1754 wait_ctrl.regmap_dcf = syscon_node_to_regmap(node);
1755 if (IS_ERR(wait_ctrl.regmap_dcf)) {
1756 return PTR_ERR(wait_ctrl.regmap_dcf);
1757 }
1758 wait_ctrl.dcf_en = 1;
1759
1760 init_waitqueue_head(&wait_ctrl.wait_wq);
1761 wait_ctrl.wait_en = 1;
1762 wait_ctrl.wait_time_out_ms = 0x55;
1763
1764 complt_irq = platform_get_irq_byname(pdev, "complete");
1765 if (complt_irq < 0) {
1766 dev_err(&pdev->dev, "no IRQ for complt_irq: %d\n", complt_irq);
1767 return complt_irq;
1768 }
1769 wait_ctrl.complt_irq = complt_irq;
1770
1771 ret = devm_request_irq(&pdev->dev, complt_irq, wait_dcf_complete_irq, 0, dev_name(&pdev->dev), &wait_ctrl);
1772 if (ret < 0) {
1773 dev_err(&pdev->dev, "cannot request complt_irq\n");
1774 return ret;
1775 }
1776 disable_irq(complt_irq);
1777
1778 if (of_property_read_u32(pdev->dev.of_node, "update_drv_odt_cfg", &ddr_psci_param->update_drv_odt_cfg)) {
1779 ddr_psci_param->update_drv_odt_cfg = 0;
1780 }
1781
1782 if (of_property_read_u32(pdev->dev.of_node, "update_deskew_cfg", &ddr_psci_param->update_deskew_cfg)) {
1783 ddr_psci_param->update_deskew_cfg = 0;
1784 }
1785
1786 dmcfreq->set_rate_params = ddr_psci_param;
1787 rockchip_set_ddrclk_params(dmcfreq->set_rate_params);
1788 rockchip_set_ddrclk_dmcfreq_wait_complete(rockchip_dmcfreq_wait_complete);
1789
1790 res = sip_smc_dram(SHARE_PAGE_TYPE_DDR, 0, ROCKCHIP_SIP_CONFIG_DRAM_INIT);
1791 if (res.a0) {
1792 dev_err(&pdev->dev, "rockchip_sip_config_dram_init error:%lx\n", res.a0);
1793 return -ENOMEM;
1794 }
1795
1796 dmcfreq->set_auto_self_refresh = rockchip_ddr_set_auto_self_refresh;
1797
1798 return 0;
1799 }
1800
1801 static const struct of_device_id rockchip_dmcfreq_of_match[] = {
1802 #if IS_ENABLED(CONFIG_CPU_PX30)
1803 {.compatible = "rockchip,px30-dmc", .data = px30_dmc_init},
1804 #endif
1805 #if IS_ENABLED(CONFIG_CPU_RK1808)
1806 {.compatible = "rockchip,rk1808-dmc", .data = rk1808_dmc_init},
1807 #endif
1808 #if IS_ENABLED(CONFIG_CPU_RK312X)
1809 {.compatible = "rockchip,rk3128-dmc", .data = rk3128_dmc_init},
1810 #endif
1811 #if IS_ENABLED(CONFIG_CPU_RK322X)
1812 {.compatible = "rockchip,rk3228-dmc", .data = rk3228_dmc_init},
1813 #endif
1814 #if IS_ENABLED(CONFIG_CPU_RK3288)
1815 {.compatible = "rockchip,rk3288-dmc", .data = rk3288_dmc_init},
1816 #endif
1817 #if IS_ENABLED(CONFIG_CPU_RK3308)
1818 {.compatible = "rockchip,rk3308-dmc", .data = NULL},
1819 #endif
1820 #if IS_ENABLED(CONFIG_CPU_RK3328)
1821 {.compatible = "rockchip,rk3328-dmc", .data = rk3328_dmc_init},
1822 #endif
1823 #if IS_ENABLED(CONFIG_CPU_RK3368)
1824 {.compatible = "rockchip,rk3368-dmc", .data = rk3368_dmc_init},
1825 #endif
1826 #if IS_ENABLED(CONFIG_CPU_RK3399)
1827 {.compatible = "rockchip,rk3399-dmc", .data = rk3399_dmc_init},
1828 #endif
1829 #if IS_ENABLED(CONFIG_CPU_RK3568)
1830 {.compatible = "rockchip,rk3568-dmc", .data = rk3568_dmc_init},
1831 #endif
1832 #if IS_ENABLED(CONFIG_CPU_RK3588)
1833 {.compatible = "rockchip,rk3588-dmc", .data = rk3588_dmc_init},
1834 #endif
1835 #if IS_ENABLED(CONFIG_CPU_RV1126)
1836 {.compatible = "rockchip,rv1126-dmc", .data = rv1126_dmc_init},
1837 #endif
1838 {},
1839 };
1840 MODULE_DEVICE_TABLE(of, rockchip_dmcfreq_of_match);
1841
rockchip_get_freq_map_talbe(struct device_node *np, char *porp_name, struct freq_map_table **table)1842 static int rockchip_get_freq_map_talbe(struct device_node *np, char *porp_name, struct freq_map_table **table)
1843 {
1844 struct freq_map_table *tbl;
1845 const struct property *prop;
1846 unsigned int temp_freq = 0;
1847 int count, i;
1848
1849 prop = of_find_property(np, porp_name, NULL);
1850 if (!prop) {
1851 return -EINVAL;
1852 }
1853
1854 if (!prop->value) {
1855 return -ENODATA;
1856 }
1857
1858 count = of_property_count_u32_elems(np, porp_name);
1859 if (count < 0) {
1860 return -EINVAL;
1861 }
1862
1863 if (count % 0x3) {
1864 return -EINVAL;
1865 }
1866
1867 tbl = kzalloc(sizeof(*tbl) * (count / 0x3 + 1), GFP_KERNEL);
1868 if (!tbl) {
1869 return -ENOMEM;
1870 }
1871
1872 for (i = 0; i < count / 0x3; i++) {
1873 of_property_read_u32_index(np, porp_name, 0x3 * i, &tbl[i].min);
1874 of_property_read_u32_index(np, porp_name, 0x3 * i + 0x1, &tbl[i].max);
1875 of_property_read_u32_index(np, porp_name, 0x3 * i + 0x2, &temp_freq);
1876 tbl[i].freq = temp_freq * 0x3e8;
1877 }
1878
1879 tbl[i].min = 0;
1880 tbl[i].max = 0;
1881 tbl[i].freq = DMCFREQ_TABLE_END;
1882
1883 *table = tbl;
1884
1885 return 0;
1886 }
1887
rockchip_get_rl_map_talbe(struct device_node *np, char *porp_name, struct rl_map_table **table)1888 static int rockchip_get_rl_map_talbe(struct device_node *np, char *porp_name, struct rl_map_table **table)
1889 {
1890 struct rl_map_table *tbl;
1891 const struct property *prop;
1892 int count, i;
1893
1894 prop = of_find_property(np, porp_name, NULL);
1895 if (!prop) {
1896 return -EINVAL;
1897 }
1898
1899 if (!prop->value) {
1900 return -ENODATA;
1901 }
1902
1903 count = of_property_count_u32_elems(np, porp_name);
1904 if (count < 0) {
1905 return -EINVAL;
1906 }
1907
1908 if (count % 0x2) {
1909 return -EINVAL;
1910 }
1911
1912 tbl = kzalloc(sizeof(*tbl) * (count / 0x2 + 1), GFP_KERNEL);
1913 if (!tbl) {
1914 return -ENOMEM;
1915 }
1916
1917 for (i = 0; i < count / 0x2; i++) {
1918 of_property_read_u32_index(np, porp_name, 0x2 * i, &tbl[i].pn);
1919 of_property_read_u32_index(np, porp_name, 0x2 * i + 1, &tbl[i].rl);
1920 }
1921
1922 tbl[i].pn = 0;
1923 tbl[i].rl = DMCFREQ_TABLE_END;
1924
1925 *table = tbl;
1926
1927 return 0;
1928 }
1929
rockchip_get_system_status_rate(struct device_node *np, char *porp_name, struct rockchip_dmcfreq *dmcfreq)1930 static int rockchip_get_system_status_rate(struct device_node *np, char *porp_name, struct rockchip_dmcfreq *dmcfreq)
1931 {
1932 const struct property *prop;
1933 unsigned int status = 0, freq = 0;
1934 unsigned long temp_rate = 0;
1935 int count, i;
1936
1937 prop = of_find_property(np, porp_name, NULL);
1938 if (!prop) {
1939 return -ENODEV;
1940 }
1941
1942 if (!prop->value) {
1943 return -ENODATA;
1944 }
1945
1946 count = of_property_count_u32_elems(np, porp_name);
1947 if (count < 0) {
1948 return -EINVAL;
1949 }
1950
1951 if (count % 0x2) {
1952 return -EINVAL;
1953 }
1954
1955 for (i = 0; i < count / 0x2; i++) {
1956 of_property_read_u32_index(np, porp_name, 0x2 * i, &status);
1957 of_property_read_u32_index(np, porp_name, 0x2 * i + 1, &freq);
1958 switch (status) {
1959 case SYS_STATUS_NORMAL:
1960 dmcfreq->normal_rate = (unsigned long)freq * 0x3e8;
1961 break;
1962 case SYS_STATUS_SUSPEND:
1963 dmcfreq->suspend_rate = (unsigned long)freq * 0x3e8;
1964 break;
1965 case SYS_STATUS_VIDEO_1080P:
1966 dmcfreq->video_1080p_rate = (unsigned long)freq * 0x3e8;
1967 break;
1968 case SYS_STATUS_VIDEO_4K:
1969 dmcfreq->video_4k_rate = (unsigned long)freq * 0x3e8;
1970 break;
1971 case SYS_STATUS_VIDEO_4K_10B:
1972 dmcfreq->video_4k_10b_rate = (unsigned long)freq * 0x3e8;
1973 break;
1974 case SYS_STATUS_PERFORMANCE:
1975 dmcfreq->performance_rate = (unsigned long)freq * 0x3e8;
1976 break;
1977 case SYS_STATUS_HDMI:
1978 dmcfreq->hdmi_rate = (unsigned long)freq * 0x3e8;
1979 break;
1980 case SYS_STATUS_IDLE:
1981 dmcfreq->idle_rate = (unsigned long)freq * 0x3e8;
1982 break;
1983 case SYS_STATUS_REBOOT:
1984 dmcfreq->reboot_rate = (unsigned long)freq * 0x3e8;
1985 break;
1986 case SYS_STATUS_BOOST:
1987 dmcfreq->boost_rate = (unsigned long)freq * 0x3e8;
1988 break;
1989 case SYS_STATUS_ISP:
1990 case SYS_STATUS_CIF0:
1991 case SYS_STATUS_CIF1:
1992 case SYS_STATUS_DUALVIEW:
1993 temp_rate = (unsigned long)freq * 0x3e8;
1994 if (dmcfreq->fixed_rate < temp_rate) {
1995 dmcfreq->fixed_rate = temp_rate;
1996 }
1997 break;
1998 case SYS_STATUS_LOW_POWER:
1999 dmcfreq->low_power_rate = (unsigned long)freq * 0x3e8;
2000 break;
2001 default:
2002 break;
2003 }
2004 }
2005
2006 return 0;
2007 }
2008
rockchip_freq_level_2_rate(struct rockchip_dmcfreq *dmcfreq, unsigned int level)2009 static unsigned long rockchip_freq_level_2_rate(struct rockchip_dmcfreq *dmcfreq, unsigned int level)
2010 {
2011 unsigned long rate = 0;
2012
2013 switch (level) {
2014 case DMC_FREQ_LEVEL_LOW:
2015 rate = dmcfreq->rate_low;
2016 break;
2017 case DMC_FREQ_LEVEL_MID_LOW:
2018 rate = dmcfreq->rate_mid_low;
2019 break;
2020 case DMC_FREQ_LEVEL_MID_HIGH:
2021 rate = dmcfreq->rate_mid_high;
2022 break;
2023 case DMC_FREQ_LEVEL_HIGH:
2024 rate = dmcfreq->rate_high;
2025 break;
2026 default:
2027 break;
2028 }
2029
2030 return rate;
2031 }
2032
rockchip_get_system_status_level(struct device_node *np, char *porp_name, struct rockchip_dmcfreq *dmcfreq)2033 static int rockchip_get_system_status_level(struct device_node *np, char *porp_name, struct rockchip_dmcfreq *dmcfreq)
2034 {
2035 const struct property *prop;
2036 unsigned int status = 0, level = 0;
2037 unsigned long temp_rate = 0;
2038 int count, i;
2039
2040 prop = of_find_property(np, porp_name, NULL);
2041 if (!prop) {
2042 return -ENODEV;
2043 }
2044
2045 if (!prop->value) {
2046 return -ENODATA;
2047 }
2048
2049 count = of_property_count_u32_elems(np, porp_name);
2050 if (count < 0) {
2051 return -EINVAL;
2052 }
2053
2054 if (count % 0x2) {
2055 return -EINVAL;
2056 }
2057
2058 if (dmcfreq->freq_count == 0x1) {
2059 dmcfreq->rate_low = dmcfreq->freq_info_rate[0];
2060 dmcfreq->rate_mid_low = dmcfreq->freq_info_rate[0];
2061 dmcfreq->rate_mid_high = dmcfreq->freq_info_rate[0];
2062 dmcfreq->rate_high = dmcfreq->freq_info_rate[0];
2063 } else if (dmcfreq->freq_count == 0x2) {
2064 dmcfreq->rate_low = dmcfreq->freq_info_rate[0];
2065 dmcfreq->rate_mid_low = dmcfreq->freq_info_rate[0];
2066 dmcfreq->rate_mid_high = dmcfreq->freq_info_rate[1];
2067 dmcfreq->rate_high = dmcfreq->freq_info_rate[1];
2068 } else if (dmcfreq->freq_count == 0x3) {
2069 dmcfreq->rate_low = dmcfreq->freq_info_rate[0];
2070 dmcfreq->rate_mid_low = dmcfreq->freq_info_rate[1];
2071 dmcfreq->rate_mid_high = dmcfreq->freq_info_rate[1];
2072 dmcfreq->rate_high = dmcfreq->freq_info_rate[0x2];
2073 } else if (dmcfreq->freq_count == 0x4) {
2074 dmcfreq->rate_low = dmcfreq->freq_info_rate[0];
2075 dmcfreq->rate_mid_low = dmcfreq->freq_info_rate[0x1];
2076 dmcfreq->rate_mid_high = dmcfreq->freq_info_rate[0x2];
2077 dmcfreq->rate_high = dmcfreq->freq_info_rate[0x3];
2078 } else if (dmcfreq->freq_count == 0x5 || dmcfreq->freq_count == 0x6) {
2079 dmcfreq->rate_low = dmcfreq->freq_info_rate[0];
2080 dmcfreq->rate_mid_low = dmcfreq->freq_info_rate[1];
2081 dmcfreq->rate_mid_high = dmcfreq->freq_info_rate[dmcfreq->freq_count - 0x2];
2082 dmcfreq->rate_high = dmcfreq->freq_info_rate[dmcfreq->freq_count - 1];
2083 } else {
2084 return -EINVAL;
2085 }
2086
2087 dmcfreq->auto_min_rate = dmcfreq->rate_low;
2088
2089 for (i = 0; i < count / 0x2; i++) {
2090 of_property_read_u32_index(np, porp_name, 0x2 * i, &status);
2091 of_property_read_u32_index(np, porp_name, 0x2 * i + 1, &level);
2092 switch (status) {
2093 case SYS_STATUS_NORMAL:
2094 dmcfreq->normal_rate = rockchip_freq_level_2_rate(dmcfreq, level);
2095 dev_info(dmcfreq->dev, "normal_rate = %ld\n", dmcfreq->normal_rate);
2096 break;
2097 case SYS_STATUS_SUSPEND:
2098 dmcfreq->suspend_rate = rockchip_freq_level_2_rate(dmcfreq, level);
2099 dev_info(dmcfreq->dev, "suspend_rate = %ld\n", dmcfreq->suspend_rate);
2100 break;
2101 case SYS_STATUS_VIDEO_1080P:
2102 dmcfreq->video_1080p_rate = rockchip_freq_level_2_rate(dmcfreq, level);
2103 dev_info(dmcfreq->dev, "video_1080p_rate = %ld\n", dmcfreq->video_1080p_rate);
2104 break;
2105 case SYS_STATUS_VIDEO_4K:
2106 dmcfreq->video_4k_rate = rockchip_freq_level_2_rate(dmcfreq, level);
2107 dev_info(dmcfreq->dev, "video_4k_rate = %ld\n", dmcfreq->video_4k_rate);
2108 break;
2109 case SYS_STATUS_VIDEO_4K_10B:
2110 dmcfreq->video_4k_10b_rate = rockchip_freq_level_2_rate(dmcfreq, level);
2111 dev_info(dmcfreq->dev, "video_4k_10b_rate = %ld\n", dmcfreq->video_4k_10b_rate);
2112 break;
2113 case SYS_STATUS_PERFORMANCE:
2114 dmcfreq->performance_rate = rockchip_freq_level_2_rate(dmcfreq, level);
2115 dev_info(dmcfreq->dev, "performance_rate = %ld\n", dmcfreq->performance_rate);
2116 break;
2117 case SYS_STATUS_HDMI:
2118 dmcfreq->hdmi_rate = rockchip_freq_level_2_rate(dmcfreq, level);
2119 dev_info(dmcfreq->dev, "hdmi_rate = %ld\n", dmcfreq->hdmi_rate);
2120 break;
2121 case SYS_STATUS_IDLE:
2122 dmcfreq->idle_rate = rockchip_freq_level_2_rate(dmcfreq, level);
2123 dev_info(dmcfreq->dev, "idle_rate = %ld\n", dmcfreq->idle_rate);
2124 break;
2125 case SYS_STATUS_REBOOT:
2126 dmcfreq->reboot_rate = rockchip_freq_level_2_rate(dmcfreq, level);
2127 dev_info(dmcfreq->dev, "reboot_rate = %ld\n", dmcfreq->reboot_rate);
2128 break;
2129 case SYS_STATUS_BOOST:
2130 dmcfreq->boost_rate = rockchip_freq_level_2_rate(dmcfreq, level);
2131 dev_info(dmcfreq->dev, "boost_rate = %ld\n", dmcfreq->boost_rate);
2132 break;
2133 case SYS_STATUS_ISP:
2134 case SYS_STATUS_CIF0:
2135 case SYS_STATUS_CIF1:
2136 case SYS_STATUS_DUALVIEW:
2137 temp_rate = rockchip_freq_level_2_rate(dmcfreq, level);
2138 if (dmcfreq->fixed_rate < temp_rate) {
2139 dmcfreq->fixed_rate = temp_rate;
2140 dev_info(dmcfreq->dev, "fixed_rate(isp|cif0|cif1|dualview) = %ld\n", dmcfreq->fixed_rate);
2141 }
2142 break;
2143 case SYS_STATUS_LOW_POWER:
2144 dmcfreq->low_power_rate = rockchip_freq_level_2_rate(dmcfreq, level);
2145 dev_info(dmcfreq->dev, "low_power_rate = %ld\n", dmcfreq->low_power_rate);
2146 break;
2147 default:
2148 break;
2149 }
2150 }
2151
2152 return 0;
2153 }
2154
rockchip_dmcfreq_update_target(struct rockchip_dmcfreq *dmcfreq)2155 static void rockchip_dmcfreq_update_target(struct rockchip_dmcfreq *dmcfreq)
2156 {
2157 struct devfreq *devfreq = dmcfreq->info.devfreq;
2158
2159 mutex_lock(&devfreq->lock);
2160 update_devfreq(devfreq);
2161 mutex_unlock(&devfreq->lock);
2162 }
2163
rockchip_dmcfreq_system_status_notifier(struct notifier_block *nb, unsigned long status, void *ptr)2164 static int rockchip_dmcfreq_system_status_notifier(struct notifier_block *nb, unsigned long status, void *ptr)
2165 {
2166 struct rockchip_dmcfreq *dmcfreq = system_status_to_dmcfreq(nb);
2167 unsigned long target_rate = 0;
2168 unsigned int refresh = false;
2169 bool is_fixed = false;
2170
2171 if (dmcfreq->fixed_rate && (is_dualview(status) || is_isp(status))) {
2172 if (dmcfreq->is_fixed) {
2173 return NOTIFY_OK;
2174 }
2175 is_fixed = true;
2176 target_rate = dmcfreq->fixed_rate;
2177 goto next;
2178 }
2179
2180 if (dmcfreq->reboot_rate && (status & SYS_STATUS_REBOOT)) {
2181 if (dmcfreq->info.auto_freq_en) {
2182 devfreq_monitor_stop(dmcfreq->info.devfreq);
2183 }
2184 target_rate = dmcfreq->reboot_rate;
2185 goto next;
2186 }
2187
2188 if (dmcfreq->suspend_rate && (status & SYS_STATUS_SUSPEND)) {
2189 target_rate = dmcfreq->suspend_rate;
2190 refresh = true;
2191 goto next;
2192 }
2193
2194 if (dmcfreq->low_power_rate && (status & SYS_STATUS_LOW_POWER)) {
2195 target_rate = dmcfreq->low_power_rate;
2196 goto next;
2197 }
2198
2199 if (dmcfreq->performance_rate && (status & SYS_STATUS_PERFORMANCE)) {
2200 if (dmcfreq->performance_rate > target_rate) {
2201 target_rate = dmcfreq->performance_rate;
2202 }
2203 }
2204
2205 if (dmcfreq->hdmi_rate && (status & SYS_STATUS_HDMI)) {
2206 if (dmcfreq->hdmi_rate > target_rate) {
2207 target_rate = dmcfreq->hdmi_rate;
2208 }
2209 }
2210
2211 if (dmcfreq->video_4k_rate && (status & SYS_STATUS_VIDEO_4K)) {
2212 if (dmcfreq->video_4k_rate > target_rate) {
2213 target_rate = dmcfreq->video_4k_rate;
2214 }
2215 }
2216
2217 if (dmcfreq->video_4k_10b_rate && (status & SYS_STATUS_VIDEO_4K_10B)) {
2218 if (dmcfreq->video_4k_10b_rate > target_rate) {
2219 target_rate = dmcfreq->video_4k_10b_rate;
2220 }
2221 }
2222
2223 if (dmcfreq->video_1080p_rate && (status & SYS_STATUS_VIDEO_1080P)) {
2224 if (dmcfreq->video_1080p_rate > target_rate) {
2225 target_rate = dmcfreq->video_1080p_rate;
2226 }
2227 }
2228
2229 next:
2230
2231 dev_dbg(dmcfreq->dev, "status=0x%x\n", (unsigned int)status);
2232 dmcfreq->is_fixed = is_fixed;
2233 dmcfreq->status_rate = target_rate;
2234 if (dmcfreq->refresh != refresh) {
2235 if (dmcfreq->set_auto_self_refresh) {
2236 dmcfreq->set_auto_self_refresh(refresh);
2237 }
2238 dmcfreq->refresh = refresh;
2239 }
2240 rockchip_dmcfreq_update_target(dmcfreq);
2241
2242 return NOTIFY_OK;
2243 }
2244
rockchip_dmcfreq_status_show(struct device *dev, struct device_attribute *attr, char *buf)2245 static ssize_t rockchip_dmcfreq_status_show(struct device *dev, struct device_attribute *attr, char *buf)
2246 {
2247 unsigned int status = rockchip_get_system_status();
2248
2249 return sprintf(buf, "0x%x\n", status);
2250 }
2251
rockchip_dmcfreq_status_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)2252 static ssize_t rockchip_dmcfreq_status_store(struct device *dev, struct device_attribute *attr, const char *buf,
2253 size_t count)
2254 {
2255 if (!count) {
2256 return -EINVAL;
2257 }
2258
2259 rockchip_update_system_status(buf);
2260
2261 return count;
2262 }
2263
2264 static DEVICE_ATTR(system_status, 0x1a4, rockchip_dmcfreq_status_show, rockchip_dmcfreq_status_store);
2265
upthreshold_show(struct device *dev, struct device_attribute *attr, char *buf)2266 static ssize_t upthreshold_show(struct device *dev, struct device_attribute *attr, char *buf)
2267 {
2268 struct rockchip_dmcfreq *dmcfreq = dev_get_drvdata(dev->parent);
2269 struct rockchip_dmcfreq_ondemand_data *data = &dmcfreq->ondemand_data;
2270
2271 return sprintf(buf, "%d\n", data->upthreshold);
2272 }
2273
upthreshold_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)2274 static ssize_t upthreshold_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2275 {
2276 struct rockchip_dmcfreq *dmcfreq = dev_get_drvdata(dev->parent);
2277 struct rockchip_dmcfreq_ondemand_data *data = &dmcfreq->ondemand_data;
2278 unsigned int value;
2279
2280 if (kstrtouint(buf, 0xa, &value)) {
2281 return -EINVAL;
2282 }
2283
2284 data->upthreshold = value;
2285
2286 return count;
2287 }
2288
2289 static DEVICE_ATTR_RW(upthreshold);
2290
downdifferential_show(struct device *dev, struct device_attribute *attr, char *buf)2291 static ssize_t downdifferential_show(struct device *dev, struct device_attribute *attr, char *buf)
2292 {
2293 struct rockchip_dmcfreq *dmcfreq = dev_get_drvdata(dev->parent);
2294 struct rockchip_dmcfreq_ondemand_data *data = &dmcfreq->ondemand_data;
2295
2296 return sprintf(buf, "%d\n", data->downdifferential);
2297 }
2298
downdifferential_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)2299 static ssize_t downdifferential_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2300 {
2301 struct rockchip_dmcfreq *dmcfreq = dev_get_drvdata(dev->parent);
2302 struct rockchip_dmcfreq_ondemand_data *data = &dmcfreq->ondemand_data;
2303 unsigned int value;
2304
2305 if (kstrtouint(buf, 0xa, &value)) {
2306 return -EINVAL;
2307 }
2308
2309 data->downdifferential = value;
2310
2311 return count;
2312 }
2313
2314 static DEVICE_ATTR_RW(downdifferential);
2315
get_nocp_req_rate(struct rockchip_dmcfreq *dmcfreq)2316 static unsigned long get_nocp_req_rate(struct rockchip_dmcfreq *dmcfreq)
2317 {
2318 unsigned long target = 0, cpu_bw = 0;
2319 int i;
2320
2321 if (!dmcfreq->cpu_bw_tbl || dmcfreq->nocp_cpu_id < 0) {
2322 goto out;
2323 }
2324
2325 cpu_bw = dmcfreq->nocp_bw[dmcfreq->nocp_cpu_id];
2326
2327 for (i = 0; dmcfreq->cpu_bw_tbl[i].freq != CPUFREQ_TABLE_END; i++) {
2328 if (cpu_bw >= dmcfreq->cpu_bw_tbl[i].min) {
2329 target = dmcfreq->cpu_bw_tbl[i].freq;
2330 }
2331 }
2332
2333 out:
2334 return target;
2335 }
2336
devfreq_dmc_ondemand_func(struct devfreq *df, unsigned long *freq)2337 static int devfreq_dmc_ondemand_func(struct devfreq *df, unsigned long *freq)
2338 {
2339 int err;
2340 struct devfreq_dev_status *stat;
2341 unsigned long long a, b;
2342 struct rockchip_dmcfreq *dmcfreq = dev_get_drvdata(df->dev.parent);
2343 struct rockchip_dmcfreq_ondemand_data *data = &dmcfreq->ondemand_data;
2344 unsigned int upthreshold = data->upthreshold;
2345 unsigned int downdifferential = data->downdifferential;
2346 unsigned long target_freq = 0, nocp_req_rate = 0;
2347 u64 now;
2348
2349 if (dmcfreq->info.auto_freq_en && !dmcfreq->is_fixed) {
2350 if (dmcfreq->status_rate) {
2351 target_freq = dmcfreq->status_rate;
2352 } else if (dmcfreq->auto_min_rate) {
2353 target_freq = dmcfreq->auto_min_rate;
2354 }
2355 nocp_req_rate = get_nocp_req_rate(dmcfreq);
2356 target_freq = max3(target_freq, nocp_req_rate, dmcfreq->info.vop_req_rate);
2357 now = ktime_to_us(ktime_get());
2358 if (now < dmcfreq->touchboostpulse_endtime) {
2359 target_freq = max(target_freq, dmcfreq->boost_rate);
2360 }
2361 } else {
2362 if (dmcfreq->status_rate) {
2363 target_freq = dmcfreq->status_rate;
2364 } else if (dmcfreq->normal_rate) {
2365 target_freq = dmcfreq->normal_rate;
2366 }
2367 if (target_freq) {
2368 *freq = target_freq;
2369 }
2370 if (dmcfreq->info.auto_freq_en && !devfreq_update_stats(df)) {
2371 return 0;
2372 }
2373 goto reset_last_status;
2374 }
2375
2376 if (!upthreshold || !downdifferential) {
2377 goto reset_last_status;
2378 }
2379
2380 if (upthreshold > 0x64 || upthreshold < downdifferential) {
2381 goto reset_last_status;
2382 }
2383
2384 err = devfreq_update_stats(df);
2385 if (err) {
2386 goto reset_last_status;
2387 }
2388
2389 stat = &df->last_status;
2390
2391 /* Assume MAX if it is going to be divided by zero */
2392 if (stat->total_time == 0) {
2393 *freq = DEVFREQ_MAX_FREQ;
2394 return 0;
2395 }
2396
2397 /* Prevent overflow */
2398 if (stat->busy_time >= (1 << 0x18) || stat->total_time >= (1 << 0x18)) {
2399 stat->busy_time >>= 0x7;
2400 stat->total_time >>= 0x7;
2401 }
2402
2403 /* Set MAX if it's busy enough */
2404 if (stat->busy_time * 0x64 > stat->total_time * upthreshold) {
2405 *freq = DEVFREQ_MAX_FREQ;
2406 return 0;
2407 }
2408
2409 /* Set MAX if we do not know the initial frequency */
2410 if (stat->current_frequency == 0) {
2411 *freq = DEVFREQ_MAX_FREQ;
2412 return 0;
2413 }
2414
2415 /* Keep the current frequency */
2416 if (stat->busy_time * 0x64 > stat->total_time * (upthreshold - downdifferential)) {
2417 *freq = max(target_freq, stat->current_frequency);
2418 return 0;
2419 }
2420
2421 /* Set the desired frequency based on the load */
2422 a = stat->busy_time;
2423 a *= stat->current_frequency;
2424 b = div_u64(a, stat->total_time);
2425 b *= 0x64;
2426 b = div_u64(b, (upthreshold - downdifferential / 0x2));
2427 *freq = max_t(unsigned long, target_freq, b);
2428
2429 return 0;
2430
2431 reset_last_status:
2432 reset_last_status(df);
2433
2434 return 0;
2435 }
2436
devfreq_dmc_ondemand_handler(struct devfreq *devfreq, unsigned int event, void *data)2437 static int devfreq_dmc_ondemand_handler(struct devfreq *devfreq, unsigned int event, void *data)
2438 {
2439 struct rockchip_dmcfreq *dmcfreq = dev_get_drvdata(devfreq->dev.parent);
2440
2441 if (!dmcfreq->info.auto_freq_en) {
2442 return 0;
2443 }
2444
2445 switch (event) {
2446 case DEVFREQ_GOV_START:
2447 devfreq_monitor_start(devfreq);
2448 break;
2449
2450 case DEVFREQ_GOV_STOP:
2451 devfreq_monitor_stop(devfreq);
2452 break;
2453
2454 case DEVFREQ_GOV_UPDATE_INTERVAL:
2455 devfreq_update_interval(devfreq, (unsigned int *)data);
2456 break;
2457
2458 case DEVFREQ_GOV_SUSPEND:
2459 devfreq_monitor_suspend(devfreq);
2460 break;
2461
2462 case DEVFREQ_GOV_RESUME:
2463 devfreq_monitor_resume(devfreq);
2464 break;
2465
2466 default:
2467 break;
2468 }
2469
2470 return 0;
2471 }
2472
2473 static struct devfreq_governor devfreq_dmc_ondemand = {
2474 .name = "dmc_ondemand",
2475 .get_target_freq = devfreq_dmc_ondemand_func,
2476 .event_handler = devfreq_dmc_ondemand_handler,
2477 };
2478
rockchip_dmcfreq_enable_event(struct rockchip_dmcfreq *dmcfreq)2479 static int rockchip_dmcfreq_enable_event(struct rockchip_dmcfreq *dmcfreq)
2480 {
2481 int i, ret;
2482
2483 if (!dmcfreq->info.auto_freq_en) {
2484 return 0;
2485 }
2486
2487 for (i = 0; i < dmcfreq->edev_count; i++) {
2488 ret = devfreq_event_enable_edev(dmcfreq->edev[i]);
2489 if (ret < 0) {
2490 dev_err(dmcfreq->dev, "failed to enable devfreq-event\n");
2491 return ret;
2492 }
2493 }
2494
2495 return 0;
2496 }
2497
rockchip_dmcfreq_disable_event(struct rockchip_dmcfreq *dmcfreq)2498 static int rockchip_dmcfreq_disable_event(struct rockchip_dmcfreq *dmcfreq)
2499 {
2500 int i, ret;
2501
2502 if (!dmcfreq->info.auto_freq_en) {
2503 return 0;
2504 }
2505
2506 for (i = 0; i < dmcfreq->edev_count; i++) {
2507 ret = devfreq_event_disable_edev(dmcfreq->edev[i]);
2508 if (ret < 0) {
2509 dev_err(dmcfreq->dev, "failed to disable devfreq-event\n");
2510 return ret;
2511 }
2512 }
2513
2514 return 0;
2515 }
2516
rockchip_get_edev_id(struct rockchip_dmcfreq *dmcfreq, const char *name)2517 static int rockchip_get_edev_id(struct rockchip_dmcfreq *dmcfreq, const char *name)
2518 {
2519 struct devfreq_event_dev *edev;
2520 int i;
2521
2522 for (i = 0; i < dmcfreq->edev_count; i++) {
2523 edev = dmcfreq->edev[i];
2524 if (!strcmp(edev->desc->name, name)) {
2525 return i;
2526 }
2527 }
2528
2529 return -EINVAL;
2530 }
2531
rockchip_dmcfreq_get_event(struct rockchip_dmcfreq *dmcfreq)2532 static int rockchip_dmcfreq_get_event(struct rockchip_dmcfreq *dmcfreq)
2533 {
2534 struct device *dev = dmcfreq->dev;
2535 struct device_node *events_np, *np = dev->of_node;
2536 int i, j, count, available_count = 0;
2537
2538 count = devfreq_event_get_edev_count(dev, "devfreq-events");
2539 if (count < 0) {
2540 dev_dbg(dev, "failed to get count of devfreq-event dev\n");
2541 return 0;
2542 }
2543 for (i = 0; i < count; i++) {
2544 events_np = of_parse_phandle(np, "devfreq-events", i);
2545 if (!events_np) {
2546 continue;
2547 }
2548 if (of_device_is_available(events_np)) {
2549 available_count++;
2550 }
2551 of_node_put(events_np);
2552 }
2553 if (!available_count) {
2554 dev_dbg(dev, "failed to get available devfreq-event\n");
2555 return 0;
2556 }
2557 dmcfreq->edev_count = available_count;
2558 dmcfreq->edev = devm_kzalloc(dev, sizeof(*dmcfreq->edev) * available_count, GFP_KERNEL);
2559 if (!dmcfreq->edev) {
2560 return -ENOMEM;
2561 }
2562
2563 for (i = 0, j = 0; i < count; i++) {
2564 events_np = of_parse_phandle(np, "devfreq-events", i);
2565 if (!events_np) {
2566 continue;
2567 }
2568 if (of_device_is_available(events_np)) {
2569 of_node_put(events_np);
2570 if (j >= available_count) {
2571 dev_err(dev, "invalid event conut\n");
2572 return -EINVAL;
2573 }
2574 dmcfreq->edev[j] = devfreq_event_get_edev_by_phandle(dev, "devfreq-events", i);
2575 if (IS_ERR(dmcfreq->edev[j])) {
2576 return -EPROBE_DEFER;
2577 }
2578 j++;
2579 } else {
2580 of_node_put(events_np);
2581 }
2582 }
2583 dmcfreq->info.auto_freq_en = true;
2584 dmcfreq->dfi_id = rockchip_get_edev_id(dmcfreq, "dfi");
2585 dmcfreq->nocp_cpu_id = rockchip_get_edev_id(dmcfreq, "nocp-cpu");
2586 dmcfreq->nocp_bw = devm_kzalloc(dev, sizeof(*dmcfreq->nocp_bw) * available_count, GFP_KERNEL);
2587 if (!dmcfreq->nocp_bw) {
2588 return -ENOMEM;
2589 }
2590
2591 return 0;
2592 }
2593
rockchip_dmcfreq_power_control(struct rockchip_dmcfreq *dmcfreq)2594 static int rockchip_dmcfreq_power_control(struct rockchip_dmcfreq *dmcfreq)
2595 {
2596 struct device *dev = dmcfreq->dev;
2597
2598 dmcfreq->vdd_center = devm_regulator_get_optional(dev, "center");
2599 if (IS_ERR(dmcfreq->vdd_center)) {
2600 dev_err(dev, "Cannot get the regulator \"center\"\n");
2601 return PTR_ERR(dmcfreq->vdd_center);
2602 }
2603
2604 dmcfreq->dmc_clk = devm_clk_get(dev, "dmc_clk");
2605 if (IS_ERR(dmcfreq->dmc_clk)) {
2606 dev_err(dev, "Cannot get the clk dmc_clk. If using SCMI, trusted firmware need update to V1.01 and above.\n");
2607 return PTR_ERR(dmcfreq->dmc_clk);
2608 }
2609 dmcfreq->rate = clk_get_rate(dmcfreq->dmc_clk);
2610
2611 return 0;
2612 }
2613
rockchip_dmcfreq_dmc_init(struct platform_device *pdev, struct rockchip_dmcfreq *dmcfreq)2614 static int rockchip_dmcfreq_dmc_init(struct platform_device *pdev, struct rockchip_dmcfreq *dmcfreq)
2615 {
2616 const struct of_device_id *match;
2617 int (*init)(struct platform_device * pdev, struct rockchip_dmcfreq * data);
2618 int ret;
2619
2620 match = of_match_node(rockchip_dmcfreq_of_match, pdev->dev.of_node);
2621 if (match) {
2622 init = match->data;
2623 if (init) {
2624 ret = init(pdev, dmcfreq);
2625 if (ret) {
2626 return ret;
2627 }
2628 }
2629 }
2630
2631 return 0;
2632 }
2633
rockchip_dmcfreq_parse_dt(struct rockchip_dmcfreq *dmcfreq)2634 static void rockchip_dmcfreq_parse_dt(struct rockchip_dmcfreq *dmcfreq)
2635 {
2636 struct device *dev = dmcfreq->dev;
2637 struct device_node *np = dev->of_node;
2638
2639 if (!rockchip_get_system_status_rate(np, "system-status-freq", dmcfreq)) {
2640 dmcfreq->system_status_en = true;
2641 } else if (!rockchip_get_system_status_level(np, "system-status-level", dmcfreq)) {
2642 dmcfreq->system_status_en = true;
2643 }
2644
2645 of_property_read_u32(np, "min-cpu-freq", &dmcfreq->min_cpu_freq);
2646
2647 of_property_read_u32(np, "upthreshold", &dmcfreq->ondemand_data.upthreshold);
2648 of_property_read_u32(np, "downdifferential", &dmcfreq->ondemand_data.downdifferential);
2649 if (dmcfreq->info.auto_freq_en) {
2650 of_property_read_u32(np, "auto-freq-en", &dmcfreq->info.auto_freq_en);
2651 }
2652 if (!dmcfreq->auto_min_rate) {
2653 of_property_read_u32(np, "auto-min-freq", (u32 *)&dmcfreq->auto_min_rate);
2654 dmcfreq->auto_min_rate *= 0x3e8;
2655 }
2656
2657 if (rockchip_get_freq_map_talbe(np, "cpu-bw-dmc-freq", &dmcfreq->cpu_bw_tbl)) {
2658 dev_dbg(dev, "failed to get cpu bandwidth to dmc rate\n");
2659 }
2660 if (rockchip_get_freq_map_talbe(np, "vop-frame-bw-dmc-freq", &dmcfreq->info.vop_frame_bw_tbl)) {
2661 dev_dbg(dev, "failed to get vop frame bandwidth to dmc rate\n");
2662 }
2663 if (rockchip_get_freq_map_talbe(np, "vop-bw-dmc-freq", &dmcfreq->info.vop_bw_tbl)) {
2664 dev_err(dev, "failed to get vop bandwidth to dmc rate\n");
2665 }
2666 if (rockchip_get_rl_map_talbe(np, "vop-pn-msch-readlatency", &dmcfreq->info.vop_pn_rl_tbl)) {
2667 dev_err(dev, "failed to get vop pn to msch rl\n");
2668 }
2669
2670 of_property_read_u32(np, "touchboost_duration", (u32 *)&dmcfreq->touchboostpulse_duration_val);
2671 if (dmcfreq->touchboostpulse_duration_val) {
2672 dmcfreq->touchboostpulse_duration_val *= USEC_PER_MSEC;
2673 } else {
2674 dmcfreq->touchboostpulse_duration_val = 0x1f4 * USEC_PER_MSEC;
2675 }
2676 }
2677
rockchip_dmcfreq_set_volt_only(struct rockchip_dmcfreq *dmcfreq)2678 static int rockchip_dmcfreq_set_volt_only(struct rockchip_dmcfreq *dmcfreq)
2679 {
2680 struct device *dev = dmcfreq->dev;
2681 struct dev_pm_opp *opp;
2682 unsigned long opp_volt, opp_rate = dmcfreq->rate;
2683 int ret;
2684
2685 opp = devfreq_recommended_opp(dev, &opp_rate, 0);
2686 if (IS_ERR(opp)) {
2687 dev_err(dev, "Failed to find opp for %lu Hz\n", opp_rate);
2688 return PTR_ERR(opp);
2689 }
2690 opp_volt = dev_pm_opp_get_voltage(opp);
2691 dev_pm_opp_put(opp);
2692
2693 ret = regulator_set_voltage(dmcfreq->vdd_center, opp_volt, INT_MAX);
2694 if (ret) {
2695 dev_err(dev, "Cannot set voltage %lu uV\n", opp_volt);
2696 return ret;
2697 }
2698
2699 return 0;
2700 }
2701
rockchip_dmcfreq_add_devfreq(struct rockchip_dmcfreq *dmcfreq)2702 static int rockchip_dmcfreq_add_devfreq(struct rockchip_dmcfreq *dmcfreq)
2703 {
2704 struct devfreq_dev_profile *devp = &rockchip_devfreq_dmc_profile;
2705 struct device *dev = dmcfreq->dev;
2706 struct dev_pm_opp *opp;
2707 struct devfreq *devfreq;
2708 unsigned long opp_rate = dmcfreq->rate;
2709
2710 opp = devfreq_recommended_opp(dev, &opp_rate, 0);
2711 if (IS_ERR(opp)) {
2712 dev_err(dev, "Failed to find opp for %lu Hz\n", opp_rate);
2713 return PTR_ERR(opp);
2714 }
2715 dev_pm_opp_put(opp);
2716
2717 devp->initial_freq = dmcfreq->rate;
2718 devfreq = devm_devfreq_add_device(dev, devp, "dmc_ondemand", &dmcfreq->ondemand_data);
2719 if (IS_ERR(devfreq)) {
2720 dev_err(dev, "failed to add devfreq\n");
2721 return PTR_ERR(devfreq);
2722 }
2723
2724 devm_devfreq_register_opp_notifier(dev, devfreq);
2725
2726 devfreq->last_status.current_frequency = opp_rate;
2727
2728 reset_last_status(devfreq);
2729
2730 dmcfreq->info.devfreq = devfreq;
2731
2732 return 0;
2733 }
2734
2735 static struct monitor_dev_profile dmc_mdevp = {
2736 .type = MONITOR_TPYE_DEV,
2737 .low_temp_adjust = rockchip_monitor_dev_low_temp_adjust,
2738 .high_temp_adjust = rockchip_monitor_dev_high_temp_adjust,
2739 };
2740
rockchip_dmcfreq_register_notifier(struct rockchip_dmcfreq *dmcfreq)2741 static void rockchip_dmcfreq_register_notifier(struct rockchip_dmcfreq *dmcfreq)
2742 {
2743 int ret;
2744
2745 if (vop_register_dmc()) {
2746 dev_err(dmcfreq->dev, "fail to register notify to vop.\n");
2747 }
2748
2749 dmcfreq->status_nb.notifier_call = rockchip_dmcfreq_system_status_notifier;
2750 ret = rockchip_register_system_status_notifier(&dmcfreq->status_nb);
2751 if (ret) {
2752 dev_err(dmcfreq->dev, "failed to register system_status nb\n");
2753 }
2754
2755 dmc_mdevp.data = dmcfreq->info.devfreq;
2756 dmcfreq->mdev_info = rockchip_system_monitor_register(dmcfreq->dev, &dmc_mdevp);
2757 if (IS_ERR(dmcfreq->mdev_info)) {
2758 dev_dbg(dmcfreq->dev, "without without system monitor\n");
2759 dmcfreq->mdev_info = NULL;
2760 }
2761 }
2762
rockchip_dmcfreq_add_interface(struct rockchip_dmcfreq *dmcfreq)2763 static void rockchip_dmcfreq_add_interface(struct rockchip_dmcfreq *dmcfreq)
2764 {
2765 struct devfreq *devfreq = dmcfreq->info.devfreq;
2766
2767 if (sysfs_create_file(&devfreq->dev.kobj, &dev_attr_upthreshold.attr)) {
2768 dev_err(dmcfreq->dev, "failed to register upthreshold sysfs file\n");
2769 }
2770 if (sysfs_create_file(&devfreq->dev.kobj, &dev_attr_downdifferential.attr)) {
2771 dev_err(dmcfreq->dev, "failed to register downdifferential sysfs file\n");
2772 }
2773
2774 if (!rockchip_add_system_status_interface(&devfreq->dev)) {
2775 return;
2776 }
2777 if (sysfs_create_file(&devfreq->dev.kobj, &dev_attr_system_status.attr)) {
2778 dev_err(dmcfreq->dev, "failed to register system_status sysfs file\n");
2779 }
2780 }
2781
rockchip_dmcfreq_boost_work(struct work_struct *work)2782 static void rockchip_dmcfreq_boost_work(struct work_struct *work)
2783 {
2784 struct rockchip_dmcfreq *dmcfreq = boost_to_dmcfreq(work);
2785
2786 rockchip_dmcfreq_update_target(dmcfreq);
2787 }
2788
rockchip_dmcfreq_input_event(struct input_handle *handle, unsigned int type, unsigned int code, int value)2789 static void rockchip_dmcfreq_input_event(struct input_handle *handle, unsigned int type, unsigned int code, int value)
2790 {
2791 struct rockchip_dmcfreq *dmcfreq = handle->private;
2792 u64 now, endtime;
2793
2794 if (type != EV_ABS && type != EV_KEY) {
2795 return;
2796 }
2797
2798 now = ktime_to_us(ktime_get());
2799 endtime = now + dmcfreq->touchboostpulse_duration_val;
2800 if (endtime < (dmcfreq->touchboostpulse_endtime + 0xa * USEC_PER_MSEC)) {
2801 return;
2802 }
2803 dmcfreq->touchboostpulse_endtime = endtime;
2804
2805 schedule_work(&dmcfreq->boost_work);
2806 }
2807
rockchip_dmcfreq_input_connect(struct input_handler *handler, struct input_dev *dev, const struct input_device_id *id)2808 static int rockchip_dmcfreq_input_connect(struct input_handler *handler, struct input_dev *dev,
2809 const struct input_device_id *id)
2810 {
2811 int error;
2812 struct input_handle *handle;
2813 struct rockchip_dmcfreq *dmcfreq = input_hd_to_dmcfreq(handler);
2814
2815 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
2816 if (!handle) {
2817 return -ENOMEM;
2818 }
2819
2820 handle->dev = dev;
2821 handle->handler = handler;
2822 handle->name = "dmcfreq";
2823 handle->private = dmcfreq;
2824
2825 error = input_register_handle(handle);
2826 if (error) {
2827 goto err2;
2828 }
2829
2830 error = input_open_device(handle);
2831 if (error) {
2832 goto err1;
2833 }
2834
2835 return 0;
2836 err1:
2837 input_unregister_handle(handle);
2838 err2:
2839 kfree(handle);
2840 return error;
2841 }
2842
rockchip_dmcfreq_input_disconnect(struct input_handle *handle)2843 static void rockchip_dmcfreq_input_disconnect(struct input_handle *handle)
2844 {
2845 input_close_device(handle);
2846 input_unregister_handle(handle);
2847 kfree(handle);
2848 }
2849
2850 static const struct input_device_id rockchip_dmcfreq_input_ids[] = {
2851 {
2852 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_ABSBIT,
2853 .evbit = {BIT_MASK(EV_ABS)},
2854 .absbit = {[BIT_WORD(ABS_MT_POSITION_X)] = BIT_MASK(ABS_MT_POSITION_X) | BIT_MASK(ABS_MT_POSITION_Y)},
2855 },
2856 {
2857 .flags = INPUT_DEVICE_ID_MATCH_KEYBIT | INPUT_DEVICE_ID_MATCH_ABSBIT,
2858 .keybit = {[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH)},
2859 .absbit = {[BIT_WORD(ABS_X)] = BIT_MASK(ABS_X) | BIT_MASK(ABS_Y)},
2860 },
2861 {
2862 .flags = INPUT_DEVICE_ID_MATCH_EVBIT,
2863 .evbit = {BIT_MASK(EV_KEY)},
2864 },
2865 {},
2866 };
2867
rockchip_dmcfreq_boost_init(struct rockchip_dmcfreq *dmcfreq)2868 static void rockchip_dmcfreq_boost_init(struct rockchip_dmcfreq *dmcfreq)
2869 {
2870 if (!dmcfreq->boost_rate) {
2871 return;
2872 }
2873 INIT_WORK(&dmcfreq->boost_work, rockchip_dmcfreq_boost_work);
2874 dmcfreq->input_handler.event = rockchip_dmcfreq_input_event;
2875 dmcfreq->input_handler.connect = rockchip_dmcfreq_input_connect;
2876 dmcfreq->input_handler.disconnect = rockchip_dmcfreq_input_disconnect;
2877 dmcfreq->input_handler.name = "dmcfreq";
2878 dmcfreq->input_handler.id_table = rockchip_dmcfreq_input_ids;
2879 if (input_register_handler(&dmcfreq->input_handler)) {
2880 dev_err(dmcfreq->dev, "failed to register input handler\n");
2881 }
2882 }
2883
model_static_power(struct devfreq *devfreq, unsigned long voltage)2884 static unsigned long model_static_power(struct devfreq *devfreq, unsigned long voltage)
2885 {
2886 struct device *dev = devfreq->dev.parent;
2887 struct rockchip_dmcfreq *dmcfreq = dev_get_drvdata(dev);
2888
2889 int temperature;
2890 unsigned long temp;
2891 unsigned long temp_squared, temp_cubed, temp_scaling_factor;
2892 const unsigned long voltage_cubed = (voltage * voltage * voltage) >> 0xa;
2893
2894 if (!IS_ERR_OR_NULL(dmcfreq->ddr_tz) && dmcfreq->ddr_tz->ops->get_temp) {
2895 int ret;
2896
2897 ret = dmcfreq->ddr_tz->ops->get_temp(dmcfreq->ddr_tz, &temperature);
2898 if (ret) {
2899 dev_warn_ratelimited(dev, "failed to read temp for ddr thermal zone: %d\n", ret);
2900 temperature = FALLBACK_STATIC_TEMPERATURE;
2901 }
2902 } else {
2903 temperature = FALLBACK_STATIC_TEMPERATURE;
2904 }
2905
2906 /*
2907 * Calculate the temperature scaling factor. To be applied to the
2908 * voltage scaled power.
2909 */
2910 temp = temperature / 0x3e8;
2911 temp_squared = temp * temp;
2912 temp_cubed = temp_squared * temp;
2913 temp_scaling_factor = (dmcfreq->ts[0x3] * temp_cubed) + (dmcfreq->ts[0x2] * temp_squared) +
2914 (dmcfreq->ts[0x1] * temp) + dmcfreq->ts[0x0];
2915
2916 return (((dmcfreq->static_coefficient * voltage_cubed) >> 0x14) * temp_scaling_factor) / 0xf4240;
2917 }
2918
2919 static struct devfreq_cooling_power ddr_cooling_power_data = {
2920 .get_static_power = model_static_power,
2921 .dyn_power_coeff = 0x78,
2922 };
2923
ddr_power_model_simple_init(struct rockchip_dmcfreq *dmcfreq)2924 static int ddr_power_model_simple_init(struct rockchip_dmcfreq *dmcfreq)
2925 {
2926 struct device_node *power_model_node;
2927 const char *tz_name;
2928 u32 temp;
2929
2930 power_model_node = of_get_child_by_name(dmcfreq->dev->of_node, "ddr_power_model");
2931 if (!power_model_node) {
2932 dev_err(dmcfreq->dev, "could not find power_model node\n");
2933 return -ENODEV;
2934 }
2935
2936 if (of_property_read_string(power_model_node, "thermal-zone", &tz_name)) {
2937 dev_err(dmcfreq->dev, "ts in power_model not available\n");
2938 return -EINVAL;
2939 }
2940
2941 dmcfreq->ddr_tz = thermal_zone_get_zone_by_name(tz_name);
2942 if (IS_ERR(dmcfreq->ddr_tz)) {
2943 pr_warn_ratelimited("Error getting ddr thermal zone (%ld), not yet ready?\n", PTR_ERR(dmcfreq->ddr_tz));
2944 dmcfreq->ddr_tz = NULL;
2945
2946 return -EPROBE_DEFER;
2947 }
2948
2949 if (of_property_read_u32(power_model_node, "static-power-coefficient", &dmcfreq->static_coefficient)) {
2950 dev_err(dmcfreq->dev, "static-power-coefficient not available\n");
2951 return -EINVAL;
2952 }
2953 if (of_property_read_u32(power_model_node, "dynamic-power-coefficient", &temp)) {
2954 dev_err(dmcfreq->dev, "dynamic-power-coefficient not available\n");
2955 return -EINVAL;
2956 }
2957 ddr_cooling_power_data.dyn_power_coeff = (unsigned long)temp;
2958
2959 if (of_property_read_u32_array(power_model_node, "ts", (u32 *)dmcfreq->ts, 0x4)) {
2960 dev_err(dmcfreq->dev, "ts in power_model not available\n");
2961 return -EINVAL;
2962 }
2963
2964 return 0;
2965 }
2966
rockchip_dmcfreq_register_cooling_device(struct rockchip_dmcfreq *dmcfreq)2967 static void rockchip_dmcfreq_register_cooling_device(struct rockchip_dmcfreq *dmcfreq)
2968 {
2969 int ret;
2970
2971 ret = ddr_power_model_simple_init(dmcfreq);
2972 if (ret) {
2973 return;
2974 }
2975 dmcfreq->devfreq_cooling =
2976 of_devfreq_cooling_register_power(dmcfreq->dev->of_node, dmcfreq->info.devfreq, &ddr_cooling_power_data);
2977 if (IS_ERR(dmcfreq->devfreq_cooling)) {
2978 ret = PTR_ERR(dmcfreq->devfreq_cooling);
2979 dev_err(dmcfreq->dev, "Failed to register cooling device (%d)\n", ret);
2980 }
2981 }
2982
rockchip_dmcfreq_probe(struct platform_device *pdev)2983 static int rockchip_dmcfreq_probe(struct platform_device *pdev)
2984 {
2985 struct device *dev = &pdev->dev;
2986 struct rockchip_dmcfreq *data;
2987 int ret;
2988
2989 data = devm_kzalloc(dev, sizeof(struct rockchip_dmcfreq), GFP_KERNEL);
2990 if (!data) {
2991 return -ENOMEM;
2992 }
2993
2994 data->dev = dev;
2995 data->info.dev = dev;
2996 mutex_init(&data->lock);
2997 INIT_LIST_HEAD(&data->video_info_list);
2998
2999 ret = rockchip_dmcfreq_get_event(data);
3000 if (ret) {
3001 return ret;
3002 }
3003
3004 ret = rockchip_dmcfreq_power_control(data);
3005 if (ret) {
3006 return ret;
3007 }
3008
3009 ret = rockchip_init_opp_table(dev, NULL, "ddr_leakage", "center");
3010 if (ret) {
3011 return ret;
3012 }
3013
3014 ret = rockchip_dmcfreq_dmc_init(pdev, data);
3015 if (ret) {
3016 return ret;
3017 }
3018
3019 rockchip_dmcfreq_parse_dt(data);
3020 if (!data->system_status_en && !data->info.auto_freq_en) {
3021 dev_info(dev, "don't add devfreq feature\n");
3022 return rockchip_dmcfreq_set_volt_only(data);
3023 }
3024
3025 cpu_latency_qos_add_request(&pm_qos, PM_QOS_DEFAULT_VALUE);
3026 platform_set_drvdata(pdev, data);
3027
3028 ret = devfreq_add_governor(&devfreq_dmc_ondemand);
3029 if (ret) {
3030 return ret;
3031 }
3032 ret = rockchip_dmcfreq_enable_event(data);
3033 if (ret) {
3034 return ret;
3035 }
3036 ret = rockchip_dmcfreq_add_devfreq(data);
3037 if (ret) {
3038 rockchip_dmcfreq_disable_event(data);
3039 return ret;
3040 }
3041
3042 rockchip_dmcfreq_register_notifier(data);
3043 rockchip_dmcfreq_add_interface(data);
3044 rockchip_dmcfreq_boost_init(data);
3045 rockchip_dmcfreq_vop_bandwidth_init(&data->info);
3046 rockchip_dmcfreq_register_cooling_device(data);
3047
3048 rockchip_set_system_status(SYS_STATUS_NORMAL);
3049
3050 return 0;
3051 }
3052
rockchip_dmcfreq_suspend(struct device *dev)3053 static __maybe_unused int rockchip_dmcfreq_suspend(struct device *dev)
3054 {
3055 struct rockchip_dmcfreq *dmcfreq = dev_get_drvdata(dev);
3056 int ret = 0;
3057
3058 if (!dmcfreq) {
3059 return 0;
3060 }
3061
3062 ret = rockchip_dmcfreq_disable_event(dmcfreq);
3063 if (ret) {
3064 return ret;
3065 }
3066
3067 ret = devfreq_suspend_device(dmcfreq->info.devfreq);
3068 if (ret < 0) {
3069 dev_err(dev, "failed to suspend the devfreq devices\n");
3070 return ret;
3071 }
3072
3073 return 0;
3074 }
3075
rockchip_dmcfreq_resume(struct device *dev)3076 static __maybe_unused int rockchip_dmcfreq_resume(struct device *dev)
3077 {
3078 struct rockchip_dmcfreq *dmcfreq = dev_get_drvdata(dev);
3079 int ret = 0;
3080
3081 if (!dmcfreq) {
3082 return 0;
3083 }
3084
3085 ret = rockchip_dmcfreq_enable_event(dmcfreq);
3086 if (ret) {
3087 return ret;
3088 }
3089
3090 ret = devfreq_resume_device(dmcfreq->info.devfreq);
3091 if (ret < 0) {
3092 dev_err(dev, "failed to resume the devfreq devices\n");
3093 return ret;
3094 }
3095 return ret;
3096 }
3097
3098 static SIMPLE_DEV_PM_OPS(rockchip_dmcfreq_pm, rockchip_dmcfreq_suspend, rockchip_dmcfreq_resume);
3099 static struct platform_driver rockchip_dmcfreq_driver = {
3100 .probe = rockchip_dmcfreq_probe,
3101 .driver =
3102 {
3103 .name = "rockchip-dmc",
3104 .pm = &rockchip_dmcfreq_pm,
3105 .of_match_table = rockchip_dmcfreq_of_match,
3106 },
3107 };
3108 module_platform_driver(rockchip_dmcfreq_driver);
3109
3110 MODULE_AUTHOR("Finley Xiao <finley.xiao@rock-chips.com>");
3111 MODULE_DESCRIPTION("rockchip dmcfreq driver with devfreq framework");
3112 MODULE_LICENSE("GPL v2");
3113