xref: /kernel/linux/linux-5.10/drivers/soc/qcom/cpr.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
4 * Copyright (c) 2019, Linaro Limited
5 */
6
7#include <linux/module.h>
8#include <linux/err.h>
9#include <linux/debugfs.h>
10#include <linux/string.h>
11#include <linux/kernel.h>
12#include <linux/list.h>
13#include <linux/init.h>
14#include <linux/io.h>
15#include <linux/bitops.h>
16#include <linux/slab.h>
17#include <linux/of.h>
18#include <linux/of_device.h>
19#include <linux/platform_device.h>
20#include <linux/pm_domain.h>
21#include <linux/pm_opp.h>
22#include <linux/interrupt.h>
23#include <linux/regmap.h>
24#include <linux/mfd/syscon.h>
25#include <linux/regulator/consumer.h>
26#include <linux/clk.h>
27#include <linux/nvmem-consumer.h>
28
29/* Register Offsets for RB-CPR and Bit Definitions */
30
31/* RBCPR Version Register */
32#define REG_RBCPR_VERSION		0
33#define RBCPR_VER_2			0x02
34#define FLAGS_IGNORE_1ST_IRQ_STATUS	BIT(0)
35
36/* RBCPR Gate Count and Target Registers */
37#define REG_RBCPR_GCNT_TARGET(n)	(0x60 + 4 * (n))
38
39#define RBCPR_GCNT_TARGET_TARGET_SHIFT	0
40#define RBCPR_GCNT_TARGET_TARGET_MASK	GENMASK(11, 0)
41#define RBCPR_GCNT_TARGET_GCNT_SHIFT	12
42#define RBCPR_GCNT_TARGET_GCNT_MASK	GENMASK(9, 0)
43
44/* RBCPR Timer Control */
45#define REG_RBCPR_TIMER_INTERVAL	0x44
46#define REG_RBIF_TIMER_ADJUST		0x4c
47
48#define RBIF_TIMER_ADJ_CONS_UP_MASK	GENMASK(3, 0)
49#define RBIF_TIMER_ADJ_CONS_UP_SHIFT	0
50#define RBIF_TIMER_ADJ_CONS_DOWN_MASK	GENMASK(3, 0)
51#define RBIF_TIMER_ADJ_CONS_DOWN_SHIFT	4
52#define RBIF_TIMER_ADJ_CLAMP_INT_MASK	GENMASK(7, 0)
53#define RBIF_TIMER_ADJ_CLAMP_INT_SHIFT	8
54
55/* RBCPR Config Register */
56#define REG_RBIF_LIMIT			0x48
57#define RBIF_LIMIT_CEILING_MASK		GENMASK(5, 0)
58#define RBIF_LIMIT_CEILING_SHIFT	6
59#define RBIF_LIMIT_FLOOR_BITS		6
60#define RBIF_LIMIT_FLOOR_MASK		GENMASK(5, 0)
61
62#define RBIF_LIMIT_CEILING_DEFAULT	RBIF_LIMIT_CEILING_MASK
63#define RBIF_LIMIT_FLOOR_DEFAULT	0
64
65#define REG_RBIF_SW_VLEVEL		0x94
66#define RBIF_SW_VLEVEL_DEFAULT		0x20
67
68#define REG_RBCPR_STEP_QUOT		0x80
69#define RBCPR_STEP_QUOT_STEPQUOT_MASK	GENMASK(7, 0)
70#define RBCPR_STEP_QUOT_IDLE_CLK_MASK	GENMASK(3, 0)
71#define RBCPR_STEP_QUOT_IDLE_CLK_SHIFT	8
72
73/* RBCPR Control Register */
74#define REG_RBCPR_CTL			0x90
75
76#define RBCPR_CTL_LOOP_EN			BIT(0)
77#define RBCPR_CTL_TIMER_EN			BIT(3)
78#define RBCPR_CTL_SW_AUTO_CONT_ACK_EN		BIT(5)
79#define RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN	BIT(6)
80#define RBCPR_CTL_COUNT_MODE			BIT(10)
81#define RBCPR_CTL_UP_THRESHOLD_MASK	GENMASK(3, 0)
82#define RBCPR_CTL_UP_THRESHOLD_SHIFT	24
83#define RBCPR_CTL_DN_THRESHOLD_MASK	GENMASK(3, 0)
84#define RBCPR_CTL_DN_THRESHOLD_SHIFT	28
85
86/* RBCPR Ack/Nack Response */
87#define REG_RBIF_CONT_ACK_CMD		0x98
88#define REG_RBIF_CONT_NACK_CMD		0x9c
89
90/* RBCPR Result status Register */
91#define REG_RBCPR_RESULT_0		0xa0
92
93#define RBCPR_RESULT0_BUSY_SHIFT	19
94#define RBCPR_RESULT0_BUSY_MASK		BIT(RBCPR_RESULT0_BUSY_SHIFT)
95#define RBCPR_RESULT0_ERROR_LT0_SHIFT	18
96#define RBCPR_RESULT0_ERROR_SHIFT	6
97#define RBCPR_RESULT0_ERROR_MASK	GENMASK(11, 0)
98#define RBCPR_RESULT0_ERROR_STEPS_SHIFT	2
99#define RBCPR_RESULT0_ERROR_STEPS_MASK	GENMASK(3, 0)
100#define RBCPR_RESULT0_STEP_UP_SHIFT	1
101
102/* RBCPR Interrupt Control Register */
103#define REG_RBIF_IRQ_EN(n)		(0x100 + 4 * (n))
104#define REG_RBIF_IRQ_CLEAR		0x110
105#define REG_RBIF_IRQ_STATUS		0x114
106
107#define CPR_INT_DONE		BIT(0)
108#define CPR_INT_MIN		BIT(1)
109#define CPR_INT_DOWN		BIT(2)
110#define CPR_INT_MID		BIT(3)
111#define CPR_INT_UP		BIT(4)
112#define CPR_INT_MAX		BIT(5)
113#define CPR_INT_CLAMP		BIT(6)
114#define CPR_INT_ALL	(CPR_INT_DONE | CPR_INT_MIN | CPR_INT_DOWN | \
115			CPR_INT_MID | CPR_INT_UP | CPR_INT_MAX | CPR_INT_CLAMP)
116#define CPR_INT_DEFAULT	(CPR_INT_UP | CPR_INT_DOWN)
117
118#define CPR_NUM_RING_OSC	8
119
120/* CPR eFuse parameters */
121#define CPR_FUSE_TARGET_QUOT_BITS_MASK	GENMASK(11, 0)
122
123#define CPR_FUSE_MIN_QUOT_DIFF		50
124
125#define FUSE_REVISION_UNKNOWN		(-1)
126
127enum voltage_change_dir {
128	NO_CHANGE,
129	DOWN,
130	UP,
131};
132
133struct cpr_fuse {
134	char *ring_osc;
135	char *init_voltage;
136	char *quotient;
137	char *quotient_offset;
138};
139
140struct fuse_corner_data {
141	int ref_uV;
142	int max_uV;
143	int min_uV;
144	int max_volt_scale;
145	int max_quot_scale;
146	/* fuse quot */
147	int quot_offset;
148	int quot_scale;
149	int quot_adjust;
150	/* fuse quot_offset */
151	int quot_offset_scale;
152	int quot_offset_adjust;
153};
154
155struct cpr_fuses {
156	int init_voltage_step;
157	int init_voltage_width;
158	struct fuse_corner_data *fuse_corner_data;
159};
160
161struct corner_data {
162	unsigned int fuse_corner;
163	unsigned long freq;
164};
165
166struct cpr_desc {
167	unsigned int num_fuse_corners;
168	int min_diff_quot;
169	int *step_quot;
170
171	unsigned int		timer_delay_us;
172	unsigned int		timer_cons_up;
173	unsigned int		timer_cons_down;
174	unsigned int		up_threshold;
175	unsigned int		down_threshold;
176	unsigned int		idle_clocks;
177	unsigned int		gcnt_us;
178	unsigned int		vdd_apc_step_up_limit;
179	unsigned int		vdd_apc_step_down_limit;
180	unsigned int		clamp_timer_interval;
181
182	struct cpr_fuses cpr_fuses;
183	bool reduce_to_fuse_uV;
184	bool reduce_to_corner_uV;
185};
186
187struct acc_desc {
188	unsigned int	enable_reg;
189	u32		enable_mask;
190
191	struct reg_sequence	*config;
192	struct reg_sequence	*settings;
193	int			num_regs_per_fuse;
194};
195
196struct cpr_acc_desc {
197	const struct cpr_desc *cpr_desc;
198	const struct acc_desc *acc_desc;
199};
200
201struct fuse_corner {
202	int min_uV;
203	int max_uV;
204	int uV;
205	int quot;
206	int step_quot;
207	const struct reg_sequence *accs;
208	int num_accs;
209	unsigned long max_freq;
210	u8 ring_osc_idx;
211};
212
213struct corner {
214	int min_uV;
215	int max_uV;
216	int uV;
217	int last_uV;
218	int quot_adjust;
219	u32 save_ctl;
220	u32 save_irq;
221	unsigned long freq;
222	struct fuse_corner *fuse_corner;
223};
224
225struct cpr_drv {
226	unsigned int		num_corners;
227	unsigned int		ref_clk_khz;
228
229	struct generic_pm_domain pd;
230	struct device		*dev;
231	struct device		*attached_cpu_dev;
232	struct mutex		lock;
233	void __iomem		*base;
234	struct corner		*corner;
235	struct regulator	*vdd_apc;
236	struct clk		*cpu_clk;
237	struct regmap		*tcsr;
238	bool			loop_disabled;
239	u32			gcnt;
240	unsigned long		flags;
241
242	struct fuse_corner	*fuse_corners;
243	struct corner		*corners;
244
245	const struct cpr_desc *desc;
246	const struct acc_desc *acc_desc;
247	const struct cpr_fuse *cpr_fuses;
248
249	struct dentry *debugfs;
250};
251
252static bool cpr_is_allowed(struct cpr_drv *drv)
253{
254	return !drv->loop_disabled;
255}
256
257static void cpr_write(struct cpr_drv *drv, u32 offset, u32 value)
258{
259	writel_relaxed(value, drv->base + offset);
260}
261
262static u32 cpr_read(struct cpr_drv *drv, u32 offset)
263{
264	return readl_relaxed(drv->base + offset);
265}
266
267static void
268cpr_masked_write(struct cpr_drv *drv, u32 offset, u32 mask, u32 value)
269{
270	u32 val;
271
272	val = readl_relaxed(drv->base + offset);
273	val &= ~mask;
274	val |= value & mask;
275	writel_relaxed(val, drv->base + offset);
276}
277
278static void cpr_irq_clr(struct cpr_drv *drv)
279{
280	cpr_write(drv, REG_RBIF_IRQ_CLEAR, CPR_INT_ALL);
281}
282
283static void cpr_irq_clr_nack(struct cpr_drv *drv)
284{
285	cpr_irq_clr(drv);
286	cpr_write(drv, REG_RBIF_CONT_NACK_CMD, 1);
287}
288
289static void cpr_irq_clr_ack(struct cpr_drv *drv)
290{
291	cpr_irq_clr(drv);
292	cpr_write(drv, REG_RBIF_CONT_ACK_CMD, 1);
293}
294
295static void cpr_irq_set(struct cpr_drv *drv, u32 int_bits)
296{
297	cpr_write(drv, REG_RBIF_IRQ_EN(0), int_bits);
298}
299
300static void cpr_ctl_modify(struct cpr_drv *drv, u32 mask, u32 value)
301{
302	cpr_masked_write(drv, REG_RBCPR_CTL, mask, value);
303}
304
305static void cpr_ctl_enable(struct cpr_drv *drv, struct corner *corner)
306{
307	u32 val, mask;
308	const struct cpr_desc *desc = drv->desc;
309
310	/* Program Consecutive Up & Down */
311	val = desc->timer_cons_down << RBIF_TIMER_ADJ_CONS_DOWN_SHIFT;
312	val |= desc->timer_cons_up << RBIF_TIMER_ADJ_CONS_UP_SHIFT;
313	mask = RBIF_TIMER_ADJ_CONS_UP_MASK | RBIF_TIMER_ADJ_CONS_DOWN_MASK;
314	cpr_masked_write(drv, REG_RBIF_TIMER_ADJUST, mask, val);
315	cpr_masked_write(drv, REG_RBCPR_CTL,
316			 RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN |
317			 RBCPR_CTL_SW_AUTO_CONT_ACK_EN,
318			 corner->save_ctl);
319	cpr_irq_set(drv, corner->save_irq);
320
321	if (cpr_is_allowed(drv) && corner->max_uV > corner->min_uV)
322		val = RBCPR_CTL_LOOP_EN;
323	else
324		val = 0;
325	cpr_ctl_modify(drv, RBCPR_CTL_LOOP_EN, val);
326}
327
328static void cpr_ctl_disable(struct cpr_drv *drv)
329{
330	cpr_irq_set(drv, 0);
331	cpr_ctl_modify(drv, RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN |
332		       RBCPR_CTL_SW_AUTO_CONT_ACK_EN, 0);
333	cpr_masked_write(drv, REG_RBIF_TIMER_ADJUST,
334			 RBIF_TIMER_ADJ_CONS_UP_MASK |
335			 RBIF_TIMER_ADJ_CONS_DOWN_MASK, 0);
336	cpr_irq_clr(drv);
337	cpr_write(drv, REG_RBIF_CONT_ACK_CMD, 1);
338	cpr_write(drv, REG_RBIF_CONT_NACK_CMD, 1);
339	cpr_ctl_modify(drv, RBCPR_CTL_LOOP_EN, 0);
340}
341
342static bool cpr_ctl_is_enabled(struct cpr_drv *drv)
343{
344	u32 reg_val;
345
346	reg_val = cpr_read(drv, REG_RBCPR_CTL);
347	return reg_val & RBCPR_CTL_LOOP_EN;
348}
349
350static bool cpr_ctl_is_busy(struct cpr_drv *drv)
351{
352	u32 reg_val;
353
354	reg_val = cpr_read(drv, REG_RBCPR_RESULT_0);
355	return reg_val & RBCPR_RESULT0_BUSY_MASK;
356}
357
358static void cpr_corner_save(struct cpr_drv *drv, struct corner *corner)
359{
360	corner->save_ctl = cpr_read(drv, REG_RBCPR_CTL);
361	corner->save_irq = cpr_read(drv, REG_RBIF_IRQ_EN(0));
362}
363
364static void cpr_corner_restore(struct cpr_drv *drv, struct corner *corner)
365{
366	u32 gcnt, ctl, irq, ro_sel, step_quot;
367	struct fuse_corner *fuse = corner->fuse_corner;
368	const struct cpr_desc *desc = drv->desc;
369	int i;
370
371	ro_sel = fuse->ring_osc_idx;
372	gcnt = drv->gcnt;
373	gcnt |= fuse->quot - corner->quot_adjust;
374
375	/* Program the step quotient and idle clocks */
376	step_quot = desc->idle_clocks << RBCPR_STEP_QUOT_IDLE_CLK_SHIFT;
377	step_quot |= fuse->step_quot & RBCPR_STEP_QUOT_STEPQUOT_MASK;
378	cpr_write(drv, REG_RBCPR_STEP_QUOT, step_quot);
379
380	/* Clear the target quotient value and gate count of all ROs */
381	for (i = 0; i < CPR_NUM_RING_OSC; i++)
382		cpr_write(drv, REG_RBCPR_GCNT_TARGET(i), 0);
383
384	cpr_write(drv, REG_RBCPR_GCNT_TARGET(ro_sel), gcnt);
385	ctl = corner->save_ctl;
386	cpr_write(drv, REG_RBCPR_CTL, ctl);
387	irq = corner->save_irq;
388	cpr_irq_set(drv, irq);
389	dev_dbg(drv->dev, "gcnt = %#08x, ctl = %#08x, irq = %#08x\n", gcnt,
390		ctl, irq);
391}
392
393static void cpr_set_acc(struct regmap *tcsr, struct fuse_corner *f,
394			struct fuse_corner *end)
395{
396	if (f == end)
397		return;
398
399	if (f < end) {
400		for (f += 1; f <= end; f++)
401			regmap_multi_reg_write(tcsr, f->accs, f->num_accs);
402	} else {
403		for (f -= 1; f >= end; f--)
404			regmap_multi_reg_write(tcsr, f->accs, f->num_accs);
405	}
406}
407
408static int cpr_pre_voltage(struct cpr_drv *drv,
409			   struct fuse_corner *fuse_corner,
410			   enum voltage_change_dir dir)
411{
412	struct fuse_corner *prev_fuse_corner = drv->corner->fuse_corner;
413
414	if (drv->tcsr && dir == DOWN)
415		cpr_set_acc(drv->tcsr, prev_fuse_corner, fuse_corner);
416
417	return 0;
418}
419
420static int cpr_post_voltage(struct cpr_drv *drv,
421			    struct fuse_corner *fuse_corner,
422			    enum voltage_change_dir dir)
423{
424	struct fuse_corner *prev_fuse_corner = drv->corner->fuse_corner;
425
426	if (drv->tcsr && dir == UP)
427		cpr_set_acc(drv->tcsr, prev_fuse_corner, fuse_corner);
428
429	return 0;
430}
431
432static int cpr_scale_voltage(struct cpr_drv *drv, struct corner *corner,
433			     int new_uV, enum voltage_change_dir dir)
434{
435	int ret;
436	struct fuse_corner *fuse_corner = corner->fuse_corner;
437
438	ret = cpr_pre_voltage(drv, fuse_corner, dir);
439	if (ret)
440		return ret;
441
442	ret = regulator_set_voltage(drv->vdd_apc, new_uV, new_uV);
443	if (ret) {
444		dev_err_ratelimited(drv->dev, "failed to set apc voltage %d\n",
445				    new_uV);
446		return ret;
447	}
448
449	ret = cpr_post_voltage(drv, fuse_corner, dir);
450	if (ret)
451		return ret;
452
453	return 0;
454}
455
456static unsigned int cpr_get_cur_perf_state(struct cpr_drv *drv)
457{
458	return drv->corner ? drv->corner - drv->corners + 1 : 0;
459}
460
461static int cpr_scale(struct cpr_drv *drv, enum voltage_change_dir dir)
462{
463	u32 val, error_steps, reg_mask;
464	int last_uV, new_uV, step_uV, ret;
465	struct corner *corner;
466	const struct cpr_desc *desc = drv->desc;
467
468	if (dir != UP && dir != DOWN)
469		return 0;
470
471	step_uV = regulator_get_linear_step(drv->vdd_apc);
472	if (!step_uV)
473		return -EINVAL;
474
475	corner = drv->corner;
476
477	val = cpr_read(drv, REG_RBCPR_RESULT_0);
478
479	error_steps = val >> RBCPR_RESULT0_ERROR_STEPS_SHIFT;
480	error_steps &= RBCPR_RESULT0_ERROR_STEPS_MASK;
481	last_uV = corner->last_uV;
482
483	if (dir == UP) {
484		if (desc->clamp_timer_interval &&
485		    error_steps < desc->up_threshold) {
486			/*
487			 * Handle the case where another measurement started
488			 * after the interrupt was triggered due to a core
489			 * exiting from power collapse.
490			 */
491			error_steps = max(desc->up_threshold,
492					  desc->vdd_apc_step_up_limit);
493		}
494
495		if (last_uV >= corner->max_uV) {
496			cpr_irq_clr_nack(drv);
497
498			/* Maximize the UP threshold */
499			reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK;
500			reg_mask <<= RBCPR_CTL_UP_THRESHOLD_SHIFT;
501			val = reg_mask;
502			cpr_ctl_modify(drv, reg_mask, val);
503
504			/* Disable UP interrupt */
505			cpr_irq_set(drv, CPR_INT_DEFAULT & ~CPR_INT_UP);
506
507			return 0;
508		}
509
510		if (error_steps > desc->vdd_apc_step_up_limit)
511			error_steps = desc->vdd_apc_step_up_limit;
512
513		/* Calculate new voltage */
514		new_uV = last_uV + error_steps * step_uV;
515		new_uV = min(new_uV, corner->max_uV);
516
517		dev_dbg(drv->dev,
518			"UP: -> new_uV: %d last_uV: %d perf state: %u\n",
519			new_uV, last_uV, cpr_get_cur_perf_state(drv));
520	} else {
521		if (desc->clamp_timer_interval &&
522		    error_steps < desc->down_threshold) {
523			/*
524			 * Handle the case where another measurement started
525			 * after the interrupt was triggered due to a core
526			 * exiting from power collapse.
527			 */
528			error_steps = max(desc->down_threshold,
529					  desc->vdd_apc_step_down_limit);
530		}
531
532		if (last_uV <= corner->min_uV) {
533			cpr_irq_clr_nack(drv);
534
535			/* Enable auto nack down */
536			reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
537			val = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
538
539			cpr_ctl_modify(drv, reg_mask, val);
540
541			/* Disable DOWN interrupt */
542			cpr_irq_set(drv, CPR_INT_DEFAULT & ~CPR_INT_DOWN);
543
544			return 0;
545		}
546
547		if (error_steps > desc->vdd_apc_step_down_limit)
548			error_steps = desc->vdd_apc_step_down_limit;
549
550		/* Calculate new voltage */
551		new_uV = last_uV - error_steps * step_uV;
552		new_uV = max(new_uV, corner->min_uV);
553
554		dev_dbg(drv->dev,
555			"DOWN: -> new_uV: %d last_uV: %d perf state: %u\n",
556			new_uV, last_uV, cpr_get_cur_perf_state(drv));
557	}
558
559	ret = cpr_scale_voltage(drv, corner, new_uV, dir);
560	if (ret) {
561		cpr_irq_clr_nack(drv);
562		return ret;
563	}
564	drv->corner->last_uV = new_uV;
565
566	if (dir == UP) {
567		/* Disable auto nack down */
568		reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
569		val = 0;
570	} else {
571		/* Restore default threshold for UP */
572		reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK;
573		reg_mask <<= RBCPR_CTL_UP_THRESHOLD_SHIFT;
574		val = desc->up_threshold;
575		val <<= RBCPR_CTL_UP_THRESHOLD_SHIFT;
576	}
577
578	cpr_ctl_modify(drv, reg_mask, val);
579
580	/* Re-enable default interrupts */
581	cpr_irq_set(drv, CPR_INT_DEFAULT);
582
583	/* Ack */
584	cpr_irq_clr_ack(drv);
585
586	return 0;
587}
588
589static irqreturn_t cpr_irq_handler(int irq, void *dev)
590{
591	struct cpr_drv *drv = dev;
592	const struct cpr_desc *desc = drv->desc;
593	irqreturn_t ret = IRQ_HANDLED;
594	u32 val;
595
596	mutex_lock(&drv->lock);
597
598	val = cpr_read(drv, REG_RBIF_IRQ_STATUS);
599	if (drv->flags & FLAGS_IGNORE_1ST_IRQ_STATUS)
600		val = cpr_read(drv, REG_RBIF_IRQ_STATUS);
601
602	dev_dbg(drv->dev, "IRQ_STATUS = %#02x\n", val);
603
604	if (!cpr_ctl_is_enabled(drv)) {
605		dev_dbg(drv->dev, "CPR is disabled\n");
606		ret = IRQ_NONE;
607	} else if (cpr_ctl_is_busy(drv) && !desc->clamp_timer_interval) {
608		dev_dbg(drv->dev, "CPR measurement is not ready\n");
609	} else if (!cpr_is_allowed(drv)) {
610		val = cpr_read(drv, REG_RBCPR_CTL);
611		dev_err_ratelimited(drv->dev,
612				    "Interrupt broken? RBCPR_CTL = %#02x\n",
613				    val);
614		ret = IRQ_NONE;
615	} else {
616		/*
617		 * Following sequence of handling is as per each IRQ's
618		 * priority
619		 */
620		if (val & CPR_INT_UP) {
621			cpr_scale(drv, UP);
622		} else if (val & CPR_INT_DOWN) {
623			cpr_scale(drv, DOWN);
624		} else if (val & CPR_INT_MIN) {
625			cpr_irq_clr_nack(drv);
626		} else if (val & CPR_INT_MAX) {
627			cpr_irq_clr_nack(drv);
628		} else if (val & CPR_INT_MID) {
629			/* RBCPR_CTL_SW_AUTO_CONT_ACK_EN is enabled */
630			dev_dbg(drv->dev, "IRQ occurred for Mid Flag\n");
631		} else {
632			dev_dbg(drv->dev,
633				"IRQ occurred for unknown flag (%#08x)\n", val);
634		}
635
636		/* Save register values for the corner */
637		cpr_corner_save(drv, drv->corner);
638	}
639
640	mutex_unlock(&drv->lock);
641
642	return ret;
643}
644
645static int cpr_enable(struct cpr_drv *drv)
646{
647	int ret;
648
649	ret = regulator_enable(drv->vdd_apc);
650	if (ret)
651		return ret;
652
653	mutex_lock(&drv->lock);
654
655	if (cpr_is_allowed(drv) && drv->corner) {
656		cpr_irq_clr(drv);
657		cpr_corner_restore(drv, drv->corner);
658		cpr_ctl_enable(drv, drv->corner);
659	}
660
661	mutex_unlock(&drv->lock);
662
663	return 0;
664}
665
666static int cpr_disable(struct cpr_drv *drv)
667{
668	mutex_lock(&drv->lock);
669
670	if (cpr_is_allowed(drv)) {
671		cpr_ctl_disable(drv);
672		cpr_irq_clr(drv);
673	}
674
675	mutex_unlock(&drv->lock);
676
677	return regulator_disable(drv->vdd_apc);
678}
679
680static int cpr_config(struct cpr_drv *drv)
681{
682	int i;
683	u32 val, gcnt;
684	struct corner *corner;
685	const struct cpr_desc *desc = drv->desc;
686
687	/* Disable interrupt and CPR */
688	cpr_write(drv, REG_RBIF_IRQ_EN(0), 0);
689	cpr_write(drv, REG_RBCPR_CTL, 0);
690
691	/* Program the default HW ceiling, floor and vlevel */
692	val = (RBIF_LIMIT_CEILING_DEFAULT & RBIF_LIMIT_CEILING_MASK)
693		<< RBIF_LIMIT_CEILING_SHIFT;
694	val |= RBIF_LIMIT_FLOOR_DEFAULT & RBIF_LIMIT_FLOOR_MASK;
695	cpr_write(drv, REG_RBIF_LIMIT, val);
696	cpr_write(drv, REG_RBIF_SW_VLEVEL, RBIF_SW_VLEVEL_DEFAULT);
697
698	/*
699	 * Clear the target quotient value and gate count of all
700	 * ring oscillators
701	 */
702	for (i = 0; i < CPR_NUM_RING_OSC; i++)
703		cpr_write(drv, REG_RBCPR_GCNT_TARGET(i), 0);
704
705	/* Init and save gcnt */
706	gcnt = (drv->ref_clk_khz * desc->gcnt_us) / 1000;
707	gcnt = gcnt & RBCPR_GCNT_TARGET_GCNT_MASK;
708	gcnt <<= RBCPR_GCNT_TARGET_GCNT_SHIFT;
709	drv->gcnt = gcnt;
710
711	/* Program the delay count for the timer */
712	val = (drv->ref_clk_khz * desc->timer_delay_us) / 1000;
713	cpr_write(drv, REG_RBCPR_TIMER_INTERVAL, val);
714	dev_dbg(drv->dev, "Timer count: %#0x (for %d us)\n", val,
715		desc->timer_delay_us);
716
717	/* Program Consecutive Up & Down */
718	val = desc->timer_cons_down << RBIF_TIMER_ADJ_CONS_DOWN_SHIFT;
719	val |= desc->timer_cons_up << RBIF_TIMER_ADJ_CONS_UP_SHIFT;
720	val |= desc->clamp_timer_interval << RBIF_TIMER_ADJ_CLAMP_INT_SHIFT;
721	cpr_write(drv, REG_RBIF_TIMER_ADJUST, val);
722
723	/* Program the control register */
724	val = desc->up_threshold << RBCPR_CTL_UP_THRESHOLD_SHIFT;
725	val |= desc->down_threshold << RBCPR_CTL_DN_THRESHOLD_SHIFT;
726	val |= RBCPR_CTL_TIMER_EN | RBCPR_CTL_COUNT_MODE;
727	val |= RBCPR_CTL_SW_AUTO_CONT_ACK_EN;
728	cpr_write(drv, REG_RBCPR_CTL, val);
729
730	for (i = 0; i < drv->num_corners; i++) {
731		corner = &drv->corners[i];
732		corner->save_ctl = val;
733		corner->save_irq = CPR_INT_DEFAULT;
734	}
735
736	cpr_irq_set(drv, CPR_INT_DEFAULT);
737
738	val = cpr_read(drv, REG_RBCPR_VERSION);
739	if (val <= RBCPR_VER_2)
740		drv->flags |= FLAGS_IGNORE_1ST_IRQ_STATUS;
741
742	return 0;
743}
744
745static int cpr_set_performance_state(struct generic_pm_domain *domain,
746				     unsigned int state)
747{
748	struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd);
749	struct corner *corner, *end;
750	enum voltage_change_dir dir;
751	int ret = 0, new_uV;
752
753	mutex_lock(&drv->lock);
754
755	dev_dbg(drv->dev, "%s: setting perf state: %u (prev state: %u)\n",
756		__func__, state, cpr_get_cur_perf_state(drv));
757
758	/*
759	 * Determine new corner we're going to.
760	 * Remove one since lowest performance state is 1.
761	 */
762	corner = drv->corners + state - 1;
763	end = &drv->corners[drv->num_corners - 1];
764	if (corner > end || corner < drv->corners) {
765		ret = -EINVAL;
766		goto unlock;
767	}
768
769	/* Determine direction */
770	if (drv->corner > corner)
771		dir = DOWN;
772	else if (drv->corner < corner)
773		dir = UP;
774	else
775		dir = NO_CHANGE;
776
777	if (cpr_is_allowed(drv))
778		new_uV = corner->last_uV;
779	else
780		new_uV = corner->uV;
781
782	if (cpr_is_allowed(drv))
783		cpr_ctl_disable(drv);
784
785	ret = cpr_scale_voltage(drv, corner, new_uV, dir);
786	if (ret)
787		goto unlock;
788
789	if (cpr_is_allowed(drv)) {
790		cpr_irq_clr(drv);
791		if (drv->corner != corner)
792			cpr_corner_restore(drv, corner);
793		cpr_ctl_enable(drv, corner);
794	}
795
796	drv->corner = corner;
797
798unlock:
799	mutex_unlock(&drv->lock);
800
801	return ret;
802}
803
804static int cpr_read_efuse(struct device *dev, const char *cname, u32 *data)
805{
806	struct nvmem_cell *cell;
807	ssize_t len;
808	char *ret;
809	int i;
810
811	*data = 0;
812
813	cell = nvmem_cell_get(dev, cname);
814	if (IS_ERR(cell)) {
815		if (PTR_ERR(cell) != -EPROBE_DEFER)
816			dev_err(dev, "undefined cell %s\n", cname);
817		return PTR_ERR(cell);
818	}
819
820	ret = nvmem_cell_read(cell, &len);
821	nvmem_cell_put(cell);
822	if (IS_ERR(ret)) {
823		dev_err(dev, "can't read cell %s\n", cname);
824		return PTR_ERR(ret);
825	}
826
827	for (i = 0; i < len; i++)
828		*data |= ret[i] << (8 * i);
829
830	kfree(ret);
831	dev_dbg(dev, "efuse read(%s) = %x, bytes %zd\n", cname, *data, len);
832
833	return 0;
834}
835
836static int
837cpr_populate_ring_osc_idx(struct cpr_drv *drv)
838{
839	struct fuse_corner *fuse = drv->fuse_corners;
840	struct fuse_corner *end = fuse + drv->desc->num_fuse_corners;
841	const struct cpr_fuse *fuses = drv->cpr_fuses;
842	u32 data;
843	int ret;
844
845	for (; fuse < end; fuse++, fuses++) {
846		ret = cpr_read_efuse(drv->dev, fuses->ring_osc,
847				     &data);
848		if (ret)
849			return ret;
850		fuse->ring_osc_idx = data;
851	}
852
853	return 0;
854}
855
856static int cpr_read_fuse_uV(const struct cpr_desc *desc,
857			    const struct fuse_corner_data *fdata,
858			    const char *init_v_efuse,
859			    int step_volt,
860			    struct cpr_drv *drv)
861{
862	int step_size_uV, steps, uV;
863	u32 bits = 0;
864	int ret;
865
866	ret = cpr_read_efuse(drv->dev, init_v_efuse, &bits);
867	if (ret)
868		return ret;
869
870	steps = bits & ~BIT(desc->cpr_fuses.init_voltage_width - 1);
871	/* Not two's complement.. instead highest bit is sign bit */
872	if (bits & BIT(desc->cpr_fuses.init_voltage_width - 1))
873		steps = -steps;
874
875	step_size_uV = desc->cpr_fuses.init_voltage_step;
876
877	uV = fdata->ref_uV + steps * step_size_uV;
878	return DIV_ROUND_UP(uV, step_volt) * step_volt;
879}
880
881static int cpr_fuse_corner_init(struct cpr_drv *drv)
882{
883	const struct cpr_desc *desc = drv->desc;
884	const struct cpr_fuse *fuses = drv->cpr_fuses;
885	const struct acc_desc *acc_desc = drv->acc_desc;
886	int i;
887	unsigned int step_volt;
888	struct fuse_corner_data *fdata;
889	struct fuse_corner *fuse, *end;
890	int uV;
891	const struct reg_sequence *accs;
892	int ret;
893
894	accs = acc_desc->settings;
895
896	step_volt = regulator_get_linear_step(drv->vdd_apc);
897	if (!step_volt)
898		return -EINVAL;
899
900	/* Populate fuse_corner members */
901	fuse = drv->fuse_corners;
902	end = &fuse[desc->num_fuse_corners - 1];
903	fdata = desc->cpr_fuses.fuse_corner_data;
904
905	for (i = 0; fuse <= end; fuse++, fuses++, i++, fdata++) {
906		/*
907		 * Update SoC voltages: platforms might choose a different
908		 * regulators than the one used to characterize the algorithms
909		 * (ie, init_voltage_step).
910		 */
911		fdata->min_uV = roundup(fdata->min_uV, step_volt);
912		fdata->max_uV = roundup(fdata->max_uV, step_volt);
913
914		/* Populate uV */
915		uV = cpr_read_fuse_uV(desc, fdata, fuses->init_voltage,
916				      step_volt, drv);
917		if (uV < 0)
918			return uV;
919
920		fuse->min_uV = fdata->min_uV;
921		fuse->max_uV = fdata->max_uV;
922		fuse->uV = clamp(uV, fuse->min_uV, fuse->max_uV);
923
924		if (fuse == end) {
925			/*
926			 * Allow the highest fuse corner's PVS voltage to
927			 * define the ceiling voltage for that corner in order
928			 * to support SoC's in which variable ceiling values
929			 * are required.
930			 */
931			end->max_uV = max(end->max_uV, end->uV);
932		}
933
934		/* Populate target quotient by scaling */
935		ret = cpr_read_efuse(drv->dev, fuses->quotient, &fuse->quot);
936		if (ret)
937			return ret;
938
939		fuse->quot *= fdata->quot_scale;
940		fuse->quot += fdata->quot_offset;
941		fuse->quot += fdata->quot_adjust;
942		fuse->step_quot = desc->step_quot[fuse->ring_osc_idx];
943
944		/* Populate acc settings */
945		fuse->accs = accs;
946		fuse->num_accs = acc_desc->num_regs_per_fuse;
947		accs += acc_desc->num_regs_per_fuse;
948	}
949
950	/*
951	 * Restrict all fuse corner PVS voltages based upon per corner
952	 * ceiling and floor voltages.
953	 */
954	for (fuse = drv->fuse_corners, i = 0; fuse <= end; fuse++, i++) {
955		if (fuse->uV > fuse->max_uV)
956			fuse->uV = fuse->max_uV;
957		else if (fuse->uV < fuse->min_uV)
958			fuse->uV = fuse->min_uV;
959
960		ret = regulator_is_supported_voltage(drv->vdd_apc,
961						     fuse->min_uV,
962						     fuse->min_uV);
963		if (!ret) {
964			dev_err(drv->dev,
965				"min uV: %d (fuse corner: %d) not supported by regulator\n",
966				fuse->min_uV, i);
967			return -EINVAL;
968		}
969
970		ret = regulator_is_supported_voltage(drv->vdd_apc,
971						     fuse->max_uV,
972						     fuse->max_uV);
973		if (!ret) {
974			dev_err(drv->dev,
975				"max uV: %d (fuse corner: %d) not supported by regulator\n",
976				fuse->max_uV, i);
977			return -EINVAL;
978		}
979
980		dev_dbg(drv->dev,
981			"fuse corner %d: [%d %d %d] RO%hhu quot %d squot %d\n",
982			i, fuse->min_uV, fuse->uV, fuse->max_uV,
983			fuse->ring_osc_idx, fuse->quot, fuse->step_quot);
984	}
985
986	return 0;
987}
988
989static int cpr_calculate_scaling(const char *quot_offset,
990				 struct cpr_drv *drv,
991				 const struct fuse_corner_data *fdata,
992				 const struct corner *corner)
993{
994	u32 quot_diff = 0;
995	unsigned long freq_diff;
996	int scaling;
997	const struct fuse_corner *fuse, *prev_fuse;
998	int ret;
999
1000	fuse = corner->fuse_corner;
1001	prev_fuse = fuse - 1;
1002
1003	if (quot_offset) {
1004		ret = cpr_read_efuse(drv->dev, quot_offset, &quot_diff);
1005		if (ret)
1006			return ret;
1007
1008		quot_diff *= fdata->quot_offset_scale;
1009		quot_diff += fdata->quot_offset_adjust;
1010	} else {
1011		quot_diff = fuse->quot - prev_fuse->quot;
1012	}
1013
1014	freq_diff = fuse->max_freq - prev_fuse->max_freq;
1015	freq_diff /= 1000000; /* Convert to MHz */
1016	scaling = 1000 * quot_diff / freq_diff;
1017	return min(scaling, fdata->max_quot_scale);
1018}
1019
1020static int cpr_interpolate(const struct corner *corner, int step_volt,
1021			   const struct fuse_corner_data *fdata)
1022{
1023	unsigned long f_high, f_low, f_diff;
1024	int uV_high, uV_low, uV;
1025	u64 temp, temp_limit;
1026	const struct fuse_corner *fuse, *prev_fuse;
1027
1028	fuse = corner->fuse_corner;
1029	prev_fuse = fuse - 1;
1030
1031	f_high = fuse->max_freq;
1032	f_low = prev_fuse->max_freq;
1033	uV_high = fuse->uV;
1034	uV_low = prev_fuse->uV;
1035	f_diff = fuse->max_freq - corner->freq;
1036
1037	/*
1038	 * Don't interpolate in the wrong direction. This could happen
1039	 * if the adjusted fuse voltage overlaps with the previous fuse's
1040	 * adjusted voltage.
1041	 */
1042	if (f_high <= f_low || uV_high <= uV_low || f_high <= corner->freq)
1043		return corner->uV;
1044
1045	temp = f_diff * (uV_high - uV_low);
1046	temp = div64_ul(temp, f_high - f_low);
1047
1048	/*
1049	 * max_volt_scale has units of uV/MHz while freq values
1050	 * have units of Hz.  Divide by 1000000 to convert to.
1051	 */
1052	temp_limit = f_diff * fdata->max_volt_scale;
1053	do_div(temp_limit, 1000000);
1054
1055	uV = uV_high - min(temp, temp_limit);
1056	return roundup(uV, step_volt);
1057}
1058
1059static unsigned int cpr_get_fuse_corner(struct dev_pm_opp *opp)
1060{
1061	struct device_node *np;
1062	unsigned int fuse_corner = 0;
1063
1064	np = dev_pm_opp_get_of_node(opp);
1065	if (of_property_read_u32(np, "qcom,opp-fuse-level", &fuse_corner))
1066		pr_err("%s: missing 'qcom,opp-fuse-level' property\n",
1067		       __func__);
1068
1069	of_node_put(np);
1070
1071	return fuse_corner;
1072}
1073
1074static unsigned long cpr_get_opp_hz_for_req(struct dev_pm_opp *ref,
1075					    struct device *cpu_dev)
1076{
1077	u64 rate = 0;
1078	struct device_node *ref_np;
1079	struct device_node *desc_np;
1080	struct device_node *child_np = NULL;
1081	struct device_node *child_req_np = NULL;
1082
1083	desc_np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
1084	if (!desc_np)
1085		return 0;
1086
1087	ref_np = dev_pm_opp_get_of_node(ref);
1088	if (!ref_np)
1089		goto out_ref;
1090
1091	do {
1092		of_node_put(child_req_np);
1093		child_np = of_get_next_available_child(desc_np, child_np);
1094		child_req_np = of_parse_phandle(child_np, "required-opps", 0);
1095	} while (child_np && child_req_np != ref_np);
1096
1097	if (child_np && child_req_np == ref_np)
1098		of_property_read_u64(child_np, "opp-hz", &rate);
1099
1100	of_node_put(child_req_np);
1101	of_node_put(child_np);
1102	of_node_put(ref_np);
1103out_ref:
1104	of_node_put(desc_np);
1105
1106	return (unsigned long) rate;
1107}
1108
1109static int cpr_corner_init(struct cpr_drv *drv)
1110{
1111	const struct cpr_desc *desc = drv->desc;
1112	const struct cpr_fuse *fuses = drv->cpr_fuses;
1113	int i, level, scaling = 0;
1114	unsigned int fnum, fc;
1115	const char *quot_offset;
1116	struct fuse_corner *fuse, *prev_fuse;
1117	struct corner *corner, *end;
1118	struct corner_data *cdata;
1119	const struct fuse_corner_data *fdata;
1120	bool apply_scaling;
1121	unsigned long freq_diff, freq_diff_mhz;
1122	unsigned long freq;
1123	int step_volt = regulator_get_linear_step(drv->vdd_apc);
1124	struct dev_pm_opp *opp;
1125
1126	if (!step_volt)
1127		return -EINVAL;
1128
1129	corner = drv->corners;
1130	end = &corner[drv->num_corners - 1];
1131
1132	cdata = devm_kcalloc(drv->dev, drv->num_corners,
1133			     sizeof(struct corner_data),
1134			     GFP_KERNEL);
1135	if (!cdata)
1136		return -ENOMEM;
1137
1138	/*
1139	 * Store maximum frequency for each fuse corner based on the frequency
1140	 * plan
1141	 */
1142	for (level = 1; level <= drv->num_corners; level++) {
1143		opp = dev_pm_opp_find_level_exact(&drv->pd.dev, level);
1144		if (IS_ERR(opp))
1145			return -EINVAL;
1146		fc = cpr_get_fuse_corner(opp);
1147		if (!fc) {
1148			dev_pm_opp_put(opp);
1149			return -EINVAL;
1150		}
1151		fnum = fc - 1;
1152		freq = cpr_get_opp_hz_for_req(opp, drv->attached_cpu_dev);
1153		if (!freq) {
1154			dev_pm_opp_put(opp);
1155			return -EINVAL;
1156		}
1157		cdata[level - 1].fuse_corner = fnum;
1158		cdata[level - 1].freq = freq;
1159
1160		fuse = &drv->fuse_corners[fnum];
1161		dev_dbg(drv->dev, "freq: %lu level: %u fuse level: %u\n",
1162			freq, dev_pm_opp_get_level(opp) - 1, fnum);
1163		if (freq > fuse->max_freq)
1164			fuse->max_freq = freq;
1165		dev_pm_opp_put(opp);
1166	}
1167
1168	/*
1169	 * Get the quotient adjustment scaling factor, according to:
1170	 *
1171	 * scaling = min(1000 * (QUOT(corner_N) - QUOT(corner_N-1))
1172	 *		/ (freq(corner_N) - freq(corner_N-1)), max_factor)
1173	 *
1174	 * QUOT(corner_N):	quotient read from fuse for fuse corner N
1175	 * QUOT(corner_N-1):	quotient read from fuse for fuse corner (N - 1)
1176	 * freq(corner_N):	max frequency in MHz supported by fuse corner N
1177	 * freq(corner_N-1):	max frequency in MHz supported by fuse corner
1178	 *			 (N - 1)
1179	 *
1180	 * Then walk through the corners mapped to each fuse corner
1181	 * and calculate the quotient adjustment for each one using the
1182	 * following formula:
1183	 *
1184	 * quot_adjust = (freq_max - freq_corner) * scaling / 1000
1185	 *
1186	 * freq_max: max frequency in MHz supported by the fuse corner
1187	 * freq_corner: frequency in MHz corresponding to the corner
1188	 * scaling: calculated from above equation
1189	 *
1190	 *
1191	 *     +                           +
1192	 *     |                         v |
1193	 *   q |           f c           o |           f c
1194	 *   u |         c               l |         c
1195	 *   o |       f                 t |       f
1196	 *   t |     c                   a |     c
1197	 *     | c f                     g | c f
1198	 *     |                         e |
1199	 *     +---------------            +----------------
1200	 *       0 1 2 3 4 5 6               0 1 2 3 4 5 6
1201	 *          corner                      corner
1202	 *
1203	 *    c = corner
1204	 *    f = fuse corner
1205	 *
1206	 */
1207	for (apply_scaling = false, i = 0; corner <= end; corner++, i++) {
1208		fnum = cdata[i].fuse_corner;
1209		fdata = &desc->cpr_fuses.fuse_corner_data[fnum];
1210		quot_offset = fuses[fnum].quotient_offset;
1211		fuse = &drv->fuse_corners[fnum];
1212		if (fnum)
1213			prev_fuse = &drv->fuse_corners[fnum - 1];
1214		else
1215			prev_fuse = NULL;
1216
1217		corner->fuse_corner = fuse;
1218		corner->freq = cdata[i].freq;
1219		corner->uV = fuse->uV;
1220
1221		if (prev_fuse && cdata[i - 1].freq == prev_fuse->max_freq) {
1222			scaling = cpr_calculate_scaling(quot_offset, drv,
1223							fdata, corner);
1224			if (scaling < 0)
1225				return scaling;
1226
1227			apply_scaling = true;
1228		} else if (corner->freq == fuse->max_freq) {
1229			/* This is a fuse corner; don't scale anything */
1230			apply_scaling = false;
1231		}
1232
1233		if (apply_scaling) {
1234			freq_diff = fuse->max_freq - corner->freq;
1235			freq_diff_mhz = freq_diff / 1000000;
1236			corner->quot_adjust = scaling * freq_diff_mhz / 1000;
1237
1238			corner->uV = cpr_interpolate(corner, step_volt, fdata);
1239		}
1240
1241		corner->max_uV = fuse->max_uV;
1242		corner->min_uV = fuse->min_uV;
1243		corner->uV = clamp(corner->uV, corner->min_uV, corner->max_uV);
1244		corner->last_uV = corner->uV;
1245
1246		/* Reduce the ceiling voltage if needed */
1247		if (desc->reduce_to_corner_uV && corner->uV < corner->max_uV)
1248			corner->max_uV = corner->uV;
1249		else if (desc->reduce_to_fuse_uV && fuse->uV < corner->max_uV)
1250			corner->max_uV = max(corner->min_uV, fuse->uV);
1251
1252		dev_dbg(drv->dev, "corner %d: [%d %d %d] quot %d\n", i,
1253			corner->min_uV, corner->uV, corner->max_uV,
1254			fuse->quot - corner->quot_adjust);
1255	}
1256
1257	return 0;
1258}
1259
1260static const struct cpr_fuse *cpr_get_fuses(struct cpr_drv *drv)
1261{
1262	const struct cpr_desc *desc = drv->desc;
1263	struct cpr_fuse *fuses;
1264	int i;
1265
1266	fuses = devm_kcalloc(drv->dev, desc->num_fuse_corners,
1267			     sizeof(struct cpr_fuse),
1268			     GFP_KERNEL);
1269	if (!fuses)
1270		return ERR_PTR(-ENOMEM);
1271
1272	for (i = 0; i < desc->num_fuse_corners; i++) {
1273		char tbuf[32];
1274
1275		snprintf(tbuf, 32, "cpr_ring_osc%d", i + 1);
1276		fuses[i].ring_osc = devm_kstrdup(drv->dev, tbuf, GFP_KERNEL);
1277		if (!fuses[i].ring_osc)
1278			return ERR_PTR(-ENOMEM);
1279
1280		snprintf(tbuf, 32, "cpr_init_voltage%d", i + 1);
1281		fuses[i].init_voltage = devm_kstrdup(drv->dev, tbuf,
1282						     GFP_KERNEL);
1283		if (!fuses[i].init_voltage)
1284			return ERR_PTR(-ENOMEM);
1285
1286		snprintf(tbuf, 32, "cpr_quotient%d", i + 1);
1287		fuses[i].quotient = devm_kstrdup(drv->dev, tbuf, GFP_KERNEL);
1288		if (!fuses[i].quotient)
1289			return ERR_PTR(-ENOMEM);
1290
1291		snprintf(tbuf, 32, "cpr_quotient_offset%d", i + 1);
1292		fuses[i].quotient_offset = devm_kstrdup(drv->dev, tbuf,
1293							GFP_KERNEL);
1294		if (!fuses[i].quotient_offset)
1295			return ERR_PTR(-ENOMEM);
1296	}
1297
1298	return fuses;
1299}
1300
1301static void cpr_set_loop_allowed(struct cpr_drv *drv)
1302{
1303	drv->loop_disabled = false;
1304}
1305
1306static int cpr_init_parameters(struct cpr_drv *drv)
1307{
1308	const struct cpr_desc *desc = drv->desc;
1309	struct clk *clk;
1310
1311	clk = clk_get(drv->dev, "ref");
1312	if (IS_ERR(clk))
1313		return PTR_ERR(clk);
1314
1315	drv->ref_clk_khz = clk_get_rate(clk) / 1000;
1316	clk_put(clk);
1317
1318	if (desc->timer_cons_up > RBIF_TIMER_ADJ_CONS_UP_MASK ||
1319	    desc->timer_cons_down > RBIF_TIMER_ADJ_CONS_DOWN_MASK ||
1320	    desc->up_threshold > RBCPR_CTL_UP_THRESHOLD_MASK ||
1321	    desc->down_threshold > RBCPR_CTL_DN_THRESHOLD_MASK ||
1322	    desc->idle_clocks > RBCPR_STEP_QUOT_IDLE_CLK_MASK ||
1323	    desc->clamp_timer_interval > RBIF_TIMER_ADJ_CLAMP_INT_MASK)
1324		return -EINVAL;
1325
1326	dev_dbg(drv->dev, "up threshold = %u, down threshold = %u\n",
1327		desc->up_threshold, desc->down_threshold);
1328
1329	return 0;
1330}
1331
1332static int cpr_find_initial_corner(struct cpr_drv *drv)
1333{
1334	unsigned long rate;
1335	const struct corner *end;
1336	struct corner *iter;
1337	unsigned int i = 0;
1338
1339	if (!drv->cpu_clk) {
1340		dev_err(drv->dev, "cannot get rate from NULL clk\n");
1341		return -EINVAL;
1342	}
1343
1344	end = &drv->corners[drv->num_corners - 1];
1345	rate = clk_get_rate(drv->cpu_clk);
1346
1347	/*
1348	 * Some bootloaders set a CPU clock frequency that is not defined
1349	 * in the OPP table. When running at an unlisted frequency,
1350	 * cpufreq_online() will change to the OPP which has the lowest
1351	 * frequency, at or above the unlisted frequency.
1352	 * Since cpufreq_online() always "rounds up" in the case of an
1353	 * unlisted frequency, this function always "rounds down" in case
1354	 * of an unlisted frequency. That way, when cpufreq_online()
1355	 * triggers the first ever call to cpr_set_performance_state(),
1356	 * it will correctly determine the direction as UP.
1357	 */
1358	for (iter = drv->corners; iter <= end; iter++) {
1359		if (iter->freq > rate)
1360			break;
1361		i++;
1362		if (iter->freq == rate) {
1363			drv->corner = iter;
1364			break;
1365		}
1366		if (iter->freq < rate)
1367			drv->corner = iter;
1368	}
1369
1370	if (!drv->corner) {
1371		dev_err(drv->dev, "boot up corner not found\n");
1372		return -EINVAL;
1373	}
1374
1375	dev_dbg(drv->dev, "boot up perf state: %u\n", i);
1376
1377	return 0;
1378}
1379
1380static const struct cpr_desc qcs404_cpr_desc = {
1381	.num_fuse_corners = 3,
1382	.min_diff_quot = CPR_FUSE_MIN_QUOT_DIFF,
1383	.step_quot = (int []){ 25, 25, 25, },
1384	.timer_delay_us = 5000,
1385	.timer_cons_up = 0,
1386	.timer_cons_down = 2,
1387	.up_threshold = 1,
1388	.down_threshold = 3,
1389	.idle_clocks = 15,
1390	.gcnt_us = 1,
1391	.vdd_apc_step_up_limit = 1,
1392	.vdd_apc_step_down_limit = 1,
1393	.cpr_fuses = {
1394		.init_voltage_step = 8000,
1395		.init_voltage_width = 6,
1396		.fuse_corner_data = (struct fuse_corner_data[]){
1397			/* fuse corner 0 */
1398			{
1399				.ref_uV = 1224000,
1400				.max_uV = 1224000,
1401				.min_uV = 1048000,
1402				.max_volt_scale = 0,
1403				.max_quot_scale = 0,
1404				.quot_offset = 0,
1405				.quot_scale = 1,
1406				.quot_adjust = 0,
1407				.quot_offset_scale = 5,
1408				.quot_offset_adjust = 0,
1409			},
1410			/* fuse corner 1 */
1411			{
1412				.ref_uV = 1288000,
1413				.max_uV = 1288000,
1414				.min_uV = 1048000,
1415				.max_volt_scale = 2000,
1416				.max_quot_scale = 1400,
1417				.quot_offset = 0,
1418				.quot_scale = 1,
1419				.quot_adjust = -20,
1420				.quot_offset_scale = 5,
1421				.quot_offset_adjust = 0,
1422			},
1423			/* fuse corner 2 */
1424			{
1425				.ref_uV = 1352000,
1426				.max_uV = 1384000,
1427				.min_uV = 1088000,
1428				.max_volt_scale = 2000,
1429				.max_quot_scale = 1400,
1430				.quot_offset = 0,
1431				.quot_scale = 1,
1432				.quot_adjust = 0,
1433				.quot_offset_scale = 5,
1434				.quot_offset_adjust = 0,
1435			},
1436		},
1437	},
1438};
1439
1440static const struct acc_desc qcs404_acc_desc = {
1441	.settings = (struct reg_sequence[]){
1442		{ 0xb120, 0x1041040 },
1443		{ 0xb124, 0x41 },
1444		{ 0xb120, 0x0 },
1445		{ 0xb124, 0x0 },
1446		{ 0xb120, 0x0 },
1447		{ 0xb124, 0x0 },
1448	},
1449	.config = (struct reg_sequence[]){
1450		{ 0xb138, 0xff },
1451		{ 0xb130, 0x5555 },
1452	},
1453	.num_regs_per_fuse = 2,
1454};
1455
1456static const struct cpr_acc_desc qcs404_cpr_acc_desc = {
1457	.cpr_desc = &qcs404_cpr_desc,
1458	.acc_desc = &qcs404_acc_desc,
1459};
1460
1461static unsigned int cpr_get_performance_state(struct generic_pm_domain *genpd,
1462					      struct dev_pm_opp *opp)
1463{
1464	return dev_pm_opp_get_level(opp);
1465}
1466
1467static int cpr_power_off(struct generic_pm_domain *domain)
1468{
1469	struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd);
1470
1471	return cpr_disable(drv);
1472}
1473
1474static int cpr_power_on(struct generic_pm_domain *domain)
1475{
1476	struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd);
1477
1478	return cpr_enable(drv);
1479}
1480
1481static int cpr_pd_attach_dev(struct generic_pm_domain *domain,
1482			     struct device *dev)
1483{
1484	struct cpr_drv *drv = container_of(domain, struct cpr_drv, pd);
1485	const struct acc_desc *acc_desc = drv->acc_desc;
1486	int ret = 0;
1487
1488	mutex_lock(&drv->lock);
1489
1490	dev_dbg(drv->dev, "attach callback for: %s\n", dev_name(dev));
1491
1492	/*
1493	 * This driver only supports scaling voltage for a CPU cluster
1494	 * where all CPUs in the cluster share a single regulator.
1495	 * Therefore, save the struct device pointer only for the first
1496	 * CPU device that gets attached. There is no need to do any
1497	 * additional initialization when further CPUs get attached.
1498	 */
1499	if (drv->attached_cpu_dev)
1500		goto unlock;
1501
1502	/*
1503	 * cpr_scale_voltage() requires the direction (if we are changing
1504	 * to a higher or lower OPP). The first time
1505	 * cpr_set_performance_state() is called, there is no previous
1506	 * performance state defined. Therefore, we call
1507	 * cpr_find_initial_corner() that gets the CPU clock frequency
1508	 * set by the bootloader, so that we can determine the direction
1509	 * the first time cpr_set_performance_state() is called.
1510	 */
1511	drv->cpu_clk = devm_clk_get(dev, NULL);
1512	if (IS_ERR(drv->cpu_clk)) {
1513		ret = PTR_ERR(drv->cpu_clk);
1514		if (ret != -EPROBE_DEFER)
1515			dev_err(drv->dev, "could not get cpu clk: %d\n", ret);
1516		goto unlock;
1517	}
1518	drv->attached_cpu_dev = dev;
1519
1520	dev_dbg(drv->dev, "using cpu clk from: %s\n",
1521		dev_name(drv->attached_cpu_dev));
1522
1523	/*
1524	 * Everything related to (virtual) corners has to be initialized
1525	 * here, when attaching to the power domain, since we need to know
1526	 * the maximum frequency for each fuse corner, and this is only
1527	 * available after the cpufreq driver has attached to us.
1528	 * The reason for this is that we need to know the highest
1529	 * frequency associated with each fuse corner.
1530	 */
1531	ret = dev_pm_opp_get_opp_count(&drv->pd.dev);
1532	if (ret < 0) {
1533		dev_err(drv->dev, "could not get OPP count\n");
1534		goto unlock;
1535	}
1536	drv->num_corners = ret;
1537
1538	if (drv->num_corners < 2) {
1539		dev_err(drv->dev, "need at least 2 OPPs to use CPR\n");
1540		ret = -EINVAL;
1541		goto unlock;
1542	}
1543
1544	drv->corners = devm_kcalloc(drv->dev, drv->num_corners,
1545				    sizeof(*drv->corners),
1546				    GFP_KERNEL);
1547	if (!drv->corners) {
1548		ret = -ENOMEM;
1549		goto unlock;
1550	}
1551
1552	ret = cpr_corner_init(drv);
1553	if (ret)
1554		goto unlock;
1555
1556	cpr_set_loop_allowed(drv);
1557
1558	ret = cpr_init_parameters(drv);
1559	if (ret)
1560		goto unlock;
1561
1562	/* Configure CPR HW but keep it disabled */
1563	ret = cpr_config(drv);
1564	if (ret)
1565		goto unlock;
1566
1567	ret = cpr_find_initial_corner(drv);
1568	if (ret)
1569		goto unlock;
1570
1571	if (acc_desc->config)
1572		regmap_multi_reg_write(drv->tcsr, acc_desc->config,
1573				       acc_desc->num_regs_per_fuse);
1574
1575	/* Enable ACC if required */
1576	if (acc_desc->enable_mask)
1577		regmap_update_bits(drv->tcsr, acc_desc->enable_reg,
1578				   acc_desc->enable_mask,
1579				   acc_desc->enable_mask);
1580
1581	dev_info(drv->dev, "driver initialized with %u OPPs\n",
1582		 drv->num_corners);
1583
1584unlock:
1585	mutex_unlock(&drv->lock);
1586
1587	return ret;
1588}
1589
1590static int cpr_debug_info_show(struct seq_file *s, void *unused)
1591{
1592	u32 gcnt, ro_sel, ctl, irq_status, reg, error_steps;
1593	u32 step_dn, step_up, error, error_lt0, busy;
1594	struct cpr_drv *drv = s->private;
1595	struct fuse_corner *fuse_corner;
1596	struct corner *corner;
1597
1598	corner = drv->corner;
1599	fuse_corner = corner->fuse_corner;
1600
1601	seq_printf(s, "corner, current_volt = %d uV\n",
1602		       corner->last_uV);
1603
1604	ro_sel = fuse_corner->ring_osc_idx;
1605	gcnt = cpr_read(drv, REG_RBCPR_GCNT_TARGET(ro_sel));
1606	seq_printf(s, "rbcpr_gcnt_target (%u) = %#02X\n", ro_sel, gcnt);
1607
1608	ctl = cpr_read(drv, REG_RBCPR_CTL);
1609	seq_printf(s, "rbcpr_ctl = %#02X\n", ctl);
1610
1611	irq_status = cpr_read(drv, REG_RBIF_IRQ_STATUS);
1612	seq_printf(s, "rbcpr_irq_status = %#02X\n", irq_status);
1613
1614	reg = cpr_read(drv, REG_RBCPR_RESULT_0);
1615	seq_printf(s, "rbcpr_result_0 = %#02X\n", reg);
1616
1617	step_dn = reg & 0x01;
1618	step_up = (reg >> RBCPR_RESULT0_STEP_UP_SHIFT) & 0x01;
1619	seq_printf(s, "  [step_dn = %u", step_dn);
1620
1621	seq_printf(s, ", step_up = %u", step_up);
1622
1623	error_steps = (reg >> RBCPR_RESULT0_ERROR_STEPS_SHIFT)
1624				& RBCPR_RESULT0_ERROR_STEPS_MASK;
1625	seq_printf(s, ", error_steps = %u", error_steps);
1626
1627	error = (reg >> RBCPR_RESULT0_ERROR_SHIFT) & RBCPR_RESULT0_ERROR_MASK;
1628	seq_printf(s, ", error = %u", error);
1629
1630	error_lt0 = (reg >> RBCPR_RESULT0_ERROR_LT0_SHIFT) & 0x01;
1631	seq_printf(s, ", error_lt_0 = %u", error_lt0);
1632
1633	busy = (reg >> RBCPR_RESULT0_BUSY_SHIFT) & 0x01;
1634	seq_printf(s, ", busy = %u]\n", busy);
1635
1636	return 0;
1637}
1638DEFINE_SHOW_ATTRIBUTE(cpr_debug_info);
1639
1640static void cpr_debugfs_init(struct cpr_drv *drv)
1641{
1642	drv->debugfs = debugfs_create_dir("qcom_cpr", NULL);
1643
1644	debugfs_create_file("debug_info", 0444, drv->debugfs,
1645			    drv, &cpr_debug_info_fops);
1646}
1647
1648static int cpr_probe(struct platform_device *pdev)
1649{
1650	struct resource *res;
1651	struct device *dev = &pdev->dev;
1652	struct cpr_drv *drv;
1653	int irq, ret;
1654	const struct cpr_acc_desc *data;
1655	struct device_node *np;
1656	u32 cpr_rev = FUSE_REVISION_UNKNOWN;
1657
1658	data = of_device_get_match_data(dev);
1659	if (!data || !data->cpr_desc || !data->acc_desc)
1660		return -EINVAL;
1661
1662	drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
1663	if (!drv)
1664		return -ENOMEM;
1665	drv->dev = dev;
1666	drv->desc = data->cpr_desc;
1667	drv->acc_desc = data->acc_desc;
1668
1669	drv->fuse_corners = devm_kcalloc(dev, drv->desc->num_fuse_corners,
1670					 sizeof(*drv->fuse_corners),
1671					 GFP_KERNEL);
1672	if (!drv->fuse_corners)
1673		return -ENOMEM;
1674
1675	np = of_parse_phandle(dev->of_node, "acc-syscon", 0);
1676	if (!np)
1677		return -ENODEV;
1678
1679	drv->tcsr = syscon_node_to_regmap(np);
1680	of_node_put(np);
1681	if (IS_ERR(drv->tcsr))
1682		return PTR_ERR(drv->tcsr);
1683
1684	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1685	drv->base = devm_ioremap_resource(dev, res);
1686	if (IS_ERR(drv->base))
1687		return PTR_ERR(drv->base);
1688
1689	irq = platform_get_irq(pdev, 0);
1690	if (irq < 0)
1691		return -EINVAL;
1692
1693	drv->vdd_apc = devm_regulator_get(dev, "vdd-apc");
1694	if (IS_ERR(drv->vdd_apc))
1695		return PTR_ERR(drv->vdd_apc);
1696
1697	/*
1698	 * Initialize fuse corners, since it simply depends
1699	 * on data in efuses.
1700	 * Everything related to (virtual) corners has to be
1701	 * initialized after attaching to the power domain,
1702	 * since it depends on the CPU's OPP table.
1703	 */
1704	ret = cpr_read_efuse(dev, "cpr_fuse_revision", &cpr_rev);
1705	if (ret)
1706		return ret;
1707
1708	drv->cpr_fuses = cpr_get_fuses(drv);
1709	if (IS_ERR(drv->cpr_fuses))
1710		return PTR_ERR(drv->cpr_fuses);
1711
1712	ret = cpr_populate_ring_osc_idx(drv);
1713	if (ret)
1714		return ret;
1715
1716	ret = cpr_fuse_corner_init(drv);
1717	if (ret)
1718		return ret;
1719
1720	mutex_init(&drv->lock);
1721
1722	ret = devm_request_threaded_irq(dev, irq, NULL,
1723					cpr_irq_handler,
1724					IRQF_ONESHOT | IRQF_TRIGGER_RISING,
1725					"cpr", drv);
1726	if (ret)
1727		return ret;
1728
1729	drv->pd.name = devm_kstrdup_const(dev, dev->of_node->full_name,
1730					  GFP_KERNEL);
1731	if (!drv->pd.name)
1732		return -EINVAL;
1733
1734	drv->pd.power_off = cpr_power_off;
1735	drv->pd.power_on = cpr_power_on;
1736	drv->pd.set_performance_state = cpr_set_performance_state;
1737	drv->pd.opp_to_performance_state = cpr_get_performance_state;
1738	drv->pd.attach_dev = cpr_pd_attach_dev;
1739
1740	ret = pm_genpd_init(&drv->pd, NULL, true);
1741	if (ret)
1742		return ret;
1743
1744	ret = of_genpd_add_provider_simple(dev->of_node, &drv->pd);
1745	if (ret)
1746		goto err_remove_genpd;
1747
1748	platform_set_drvdata(pdev, drv);
1749	cpr_debugfs_init(drv);
1750
1751	return 0;
1752
1753err_remove_genpd:
1754	pm_genpd_remove(&drv->pd);
1755	return ret;
1756}
1757
1758static int cpr_remove(struct platform_device *pdev)
1759{
1760	struct cpr_drv *drv = platform_get_drvdata(pdev);
1761
1762	if (cpr_is_allowed(drv)) {
1763		cpr_ctl_disable(drv);
1764		cpr_irq_set(drv, 0);
1765	}
1766
1767	of_genpd_del_provider(pdev->dev.of_node);
1768	pm_genpd_remove(&drv->pd);
1769
1770	debugfs_remove_recursive(drv->debugfs);
1771
1772	return 0;
1773}
1774
1775static const struct of_device_id cpr_match_table[] = {
1776	{ .compatible = "qcom,qcs404-cpr", .data = &qcs404_cpr_acc_desc },
1777	{ }
1778};
1779MODULE_DEVICE_TABLE(of, cpr_match_table);
1780
1781static struct platform_driver cpr_driver = {
1782	.probe		= cpr_probe,
1783	.remove		= cpr_remove,
1784	.driver		= {
1785		.name	= "qcom-cpr",
1786		.of_match_table = cpr_match_table,
1787	},
1788};
1789module_platform_driver(cpr_driver);
1790
1791MODULE_DESCRIPTION("Core Power Reduction (CPR) driver");
1792MODULE_LICENSE("GPL v2");
1793