1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright 2017-2018 NXP.
4 */
5
6#include <linux/bits.h>
7#include <linux/clk-provider.h>
8#include <linux/err.h>
9#include <linux/export.h>
10#include <linux/io.h>
11#include <linux/iopoll.h>
12#include <linux/slab.h>
13#include <linux/jiffies.h>
14
15#include "clk.h"
16
17#define GNRL_CTL	0x0
18#define DIV_CTL		0x4
19#define LOCK_STATUS	BIT(31)
20#define LOCK_SEL_MASK	BIT(29)
21#define CLKE_MASK	BIT(11)
22#define RST_MASK	BIT(9)
23#define BYPASS_MASK	BIT(4)
24#define MDIV_SHIFT	12
25#define MDIV_MASK	GENMASK(21, 12)
26#define PDIV_SHIFT	4
27#define PDIV_MASK	GENMASK(9, 4)
28#define SDIV_SHIFT	0
29#define SDIV_MASK	GENMASK(2, 0)
30#define KDIV_SHIFT	0
31#define KDIV_MASK	GENMASK(15, 0)
32
33#define LOCK_TIMEOUT_US		10000
34
35struct clk_pll14xx {
36	struct clk_hw			hw;
37	void __iomem			*base;
38	enum imx_pll14xx_type		type;
39	const struct imx_pll14xx_rate_table *rate_table;
40	int rate_count;
41};
42
43#define to_clk_pll14xx(_hw) container_of(_hw, struct clk_pll14xx, hw)
44
45static const struct imx_pll14xx_rate_table imx_pll1416x_tbl[] = {
46	PLL_1416X_RATE(1800000000U, 225, 3, 0),
47	PLL_1416X_RATE(1600000000U, 200, 3, 0),
48	PLL_1416X_RATE(1500000000U, 375, 3, 1),
49	PLL_1416X_RATE(1400000000U, 350, 3, 1),
50	PLL_1416X_RATE(1200000000U, 300, 3, 1),
51	PLL_1416X_RATE(1000000000U, 250, 3, 1),
52	PLL_1416X_RATE(800000000U,  200, 3, 1),
53	PLL_1416X_RATE(750000000U,  250, 2, 2),
54	PLL_1416X_RATE(700000000U,  350, 3, 2),
55	PLL_1416X_RATE(600000000U,  300, 3, 2),
56};
57
58static const struct imx_pll14xx_rate_table imx_pll1443x_tbl[] = {
59	PLL_1443X_RATE(1039500000U, 173, 2, 1, 16384),
60	PLL_1443X_RATE(650000000U, 325, 3, 2, 0),
61	PLL_1443X_RATE(594000000U, 198, 2, 2, 0),
62	PLL_1443X_RATE(519750000U, 173, 2, 2, 16384),
63	PLL_1443X_RATE(393216000U, 262, 2, 3, 9437),
64	PLL_1443X_RATE(361267200U, 361, 3, 3, 17511),
65};
66
67struct imx_pll14xx_clk imx_1443x_pll = {
68	.type = PLL_1443X,
69	.rate_table = imx_pll1443x_tbl,
70	.rate_count = ARRAY_SIZE(imx_pll1443x_tbl),
71};
72EXPORT_SYMBOL_GPL(imx_1443x_pll);
73
74struct imx_pll14xx_clk imx_1443x_dram_pll = {
75	.type = PLL_1443X,
76	.rate_table = imx_pll1443x_tbl,
77	.rate_count = ARRAY_SIZE(imx_pll1443x_tbl),
78	.flags = CLK_GET_RATE_NOCACHE,
79};
80EXPORT_SYMBOL_GPL(imx_1443x_dram_pll);
81
82struct imx_pll14xx_clk imx_1416x_pll = {
83	.type = PLL_1416X,
84	.rate_table = imx_pll1416x_tbl,
85	.rate_count = ARRAY_SIZE(imx_pll1416x_tbl),
86};
87EXPORT_SYMBOL_GPL(imx_1416x_pll);
88
89static const struct imx_pll14xx_rate_table *imx_get_pll_settings(
90		struct clk_pll14xx *pll, unsigned long rate)
91{
92	const struct imx_pll14xx_rate_table *rate_table = pll->rate_table;
93	int i;
94
95	for (i = 0; i < pll->rate_count; i++)
96		if (rate == rate_table[i].rate)
97			return &rate_table[i];
98
99	return NULL;
100}
101
102static long clk_pll14xx_round_rate(struct clk_hw *hw, unsigned long rate,
103			unsigned long *prate)
104{
105	struct clk_pll14xx *pll = to_clk_pll14xx(hw);
106	const struct imx_pll14xx_rate_table *rate_table = pll->rate_table;
107	int i;
108
109	/* Assumming rate_table is in descending order */
110	for (i = 0; i < pll->rate_count; i++)
111		if (rate >= rate_table[i].rate)
112			return rate_table[i].rate;
113
114	/* return minimum supported value */
115	return rate_table[i - 1].rate;
116}
117
118static unsigned long clk_pll1416x_recalc_rate(struct clk_hw *hw,
119						  unsigned long parent_rate)
120{
121	struct clk_pll14xx *pll = to_clk_pll14xx(hw);
122	u32 mdiv, pdiv, sdiv, pll_div;
123	u64 fvco = parent_rate;
124
125	pll_div = readl_relaxed(pll->base + 4);
126	mdiv = (pll_div & MDIV_MASK) >> MDIV_SHIFT;
127	pdiv = (pll_div & PDIV_MASK) >> PDIV_SHIFT;
128	sdiv = (pll_div & SDIV_MASK) >> SDIV_SHIFT;
129
130	fvco *= mdiv;
131	do_div(fvco, pdiv << sdiv);
132
133	return fvco;
134}
135
136static unsigned long clk_pll1443x_recalc_rate(struct clk_hw *hw,
137						  unsigned long parent_rate)
138{
139	struct clk_pll14xx *pll = to_clk_pll14xx(hw);
140	u32 mdiv, pdiv, sdiv, pll_div_ctl0, pll_div_ctl1;
141	short int kdiv;
142	u64 fvco = parent_rate;
143
144	pll_div_ctl0 = readl_relaxed(pll->base + 4);
145	pll_div_ctl1 = readl_relaxed(pll->base + 8);
146	mdiv = (pll_div_ctl0 & MDIV_MASK) >> MDIV_SHIFT;
147	pdiv = (pll_div_ctl0 & PDIV_MASK) >> PDIV_SHIFT;
148	sdiv = (pll_div_ctl0 & SDIV_MASK) >> SDIV_SHIFT;
149	kdiv = pll_div_ctl1 & KDIV_MASK;
150
151	/* fvco = (m * 65536 + k) * Fin / (p * 65536) */
152	fvco *= (mdiv * 65536 + kdiv);
153	pdiv *= 65536;
154
155	do_div(fvco, pdiv << sdiv);
156
157	return fvco;
158}
159
160static inline bool clk_pll14xx_mp_change(const struct imx_pll14xx_rate_table *rate,
161					  u32 pll_div)
162{
163	u32 old_mdiv, old_pdiv;
164
165	old_mdiv = (pll_div & MDIV_MASK) >> MDIV_SHIFT;
166	old_pdiv = (pll_div & PDIV_MASK) >> PDIV_SHIFT;
167
168	return rate->mdiv != old_mdiv || rate->pdiv != old_pdiv;
169}
170
171static int clk_pll14xx_wait_lock(struct clk_pll14xx *pll)
172{
173	u32 val;
174
175	return readl_poll_timeout(pll->base, val, val & LOCK_STATUS, 0,
176			LOCK_TIMEOUT_US);
177}
178
179static int clk_pll1416x_set_rate(struct clk_hw *hw, unsigned long drate,
180				 unsigned long prate)
181{
182	struct clk_pll14xx *pll = to_clk_pll14xx(hw);
183	const struct imx_pll14xx_rate_table *rate;
184	u32 tmp, div_val;
185	int ret;
186
187	rate = imx_get_pll_settings(pll, drate);
188	if (!rate) {
189		pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
190		       drate, clk_hw_get_name(hw));
191		return -EINVAL;
192	}
193
194	tmp = readl_relaxed(pll->base + 4);
195
196	if (!clk_pll14xx_mp_change(rate, tmp)) {
197		tmp &= ~(SDIV_MASK) << SDIV_SHIFT;
198		tmp |= rate->sdiv << SDIV_SHIFT;
199		writel_relaxed(tmp, pll->base + 4);
200
201		return 0;
202	}
203
204	/* Bypass clock and set lock to pll output lock */
205	tmp = readl_relaxed(pll->base);
206	tmp |= LOCK_SEL_MASK;
207	writel_relaxed(tmp, pll->base);
208
209	/* Enable RST */
210	tmp &= ~RST_MASK;
211	writel_relaxed(tmp, pll->base);
212
213	/* Enable BYPASS */
214	tmp |= BYPASS_MASK;
215	writel(tmp, pll->base);
216
217	div_val = (rate->mdiv << MDIV_SHIFT) | (rate->pdiv << PDIV_SHIFT) |
218		(rate->sdiv << SDIV_SHIFT);
219	writel_relaxed(div_val, pll->base + 0x4);
220
221	/*
222	 * According to SPEC, t3 - t2 need to be greater than
223	 * 1us and 1/FREF, respectively.
224	 * FREF is FIN / Prediv, the prediv is [1, 63], so choose
225	 * 3us.
226	 */
227	udelay(3);
228
229	/* Disable RST */
230	tmp |= RST_MASK;
231	writel_relaxed(tmp, pll->base);
232
233	/* Wait Lock */
234	ret = clk_pll14xx_wait_lock(pll);
235	if (ret)
236		return ret;
237
238	/* Bypass */
239	tmp &= ~BYPASS_MASK;
240	writel_relaxed(tmp, pll->base);
241
242	return 0;
243}
244
245static int clk_pll1443x_set_rate(struct clk_hw *hw, unsigned long drate,
246				 unsigned long prate)
247{
248	struct clk_pll14xx *pll = to_clk_pll14xx(hw);
249	const struct imx_pll14xx_rate_table *rate;
250	u32 tmp, div_val;
251	int ret;
252
253	rate = imx_get_pll_settings(pll, drate);
254	if (!rate) {
255		pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
256			drate, clk_hw_get_name(hw));
257		return -EINVAL;
258	}
259
260	tmp = readl_relaxed(pll->base + 4);
261
262	if (!clk_pll14xx_mp_change(rate, tmp)) {
263		tmp &= ~(SDIV_MASK) << SDIV_SHIFT;
264		tmp |= rate->sdiv << SDIV_SHIFT;
265		writel_relaxed(tmp, pll->base + 4);
266
267		tmp = rate->kdiv << KDIV_SHIFT;
268		writel_relaxed(tmp, pll->base + 8);
269
270		return 0;
271	}
272
273	/* Enable RST */
274	tmp = readl_relaxed(pll->base);
275	tmp &= ~RST_MASK;
276	writel_relaxed(tmp, pll->base);
277
278	/* Enable BYPASS */
279	tmp |= BYPASS_MASK;
280	writel_relaxed(tmp, pll->base);
281
282	div_val = (rate->mdiv << MDIV_SHIFT) | (rate->pdiv << PDIV_SHIFT) |
283		(rate->sdiv << SDIV_SHIFT);
284	writel_relaxed(div_val, pll->base + 0x4);
285	writel_relaxed(rate->kdiv << KDIV_SHIFT, pll->base + 0x8);
286
287	/*
288	 * According to SPEC, t3 - t2 need to be greater than
289	 * 1us and 1/FREF, respectively.
290	 * FREF is FIN / Prediv, the prediv is [1, 63], so choose
291	 * 3us.
292	 */
293	udelay(3);
294
295	/* Disable RST */
296	tmp |= RST_MASK;
297	writel_relaxed(tmp, pll->base);
298
299	/* Wait Lock*/
300	ret = clk_pll14xx_wait_lock(pll);
301	if (ret)
302		return ret;
303
304	/* Bypass */
305	tmp &= ~BYPASS_MASK;
306	writel_relaxed(tmp, pll->base);
307
308	return 0;
309}
310
311static int clk_pll14xx_prepare(struct clk_hw *hw)
312{
313	struct clk_pll14xx *pll = to_clk_pll14xx(hw);
314	u32 val;
315	int ret;
316
317	/*
318	 * RESETB = 1 from 0, PLL starts its normal
319	 * operation after lock time
320	 */
321	val = readl_relaxed(pll->base + GNRL_CTL);
322	if (val & RST_MASK)
323		return 0;
324	val |= BYPASS_MASK;
325	writel_relaxed(val, pll->base + GNRL_CTL);
326	val |= RST_MASK;
327	writel_relaxed(val, pll->base + GNRL_CTL);
328
329	ret = clk_pll14xx_wait_lock(pll);
330	if (ret)
331		return ret;
332
333	val &= ~BYPASS_MASK;
334	writel_relaxed(val, pll->base + GNRL_CTL);
335
336	return 0;
337}
338
339static int clk_pll14xx_is_prepared(struct clk_hw *hw)
340{
341	struct clk_pll14xx *pll = to_clk_pll14xx(hw);
342	u32 val;
343
344	val = readl_relaxed(pll->base + GNRL_CTL);
345
346	return (val & RST_MASK) ? 1 : 0;
347}
348
349static void clk_pll14xx_unprepare(struct clk_hw *hw)
350{
351	struct clk_pll14xx *pll = to_clk_pll14xx(hw);
352	u32 val;
353
354	/*
355	 * Set RST to 0, power down mode is enabled and
356	 * every digital block is reset
357	 */
358	val = readl_relaxed(pll->base + GNRL_CTL);
359	val &= ~RST_MASK;
360	writel_relaxed(val, pll->base + GNRL_CTL);
361}
362
363static const struct clk_ops clk_pll1416x_ops = {
364	.prepare	= clk_pll14xx_prepare,
365	.unprepare	= clk_pll14xx_unprepare,
366	.is_prepared	= clk_pll14xx_is_prepared,
367	.recalc_rate	= clk_pll1416x_recalc_rate,
368	.round_rate	= clk_pll14xx_round_rate,
369	.set_rate	= clk_pll1416x_set_rate,
370};
371
372static const struct clk_ops clk_pll1416x_min_ops = {
373	.recalc_rate	= clk_pll1416x_recalc_rate,
374};
375
376static const struct clk_ops clk_pll1443x_ops = {
377	.prepare	= clk_pll14xx_prepare,
378	.unprepare	= clk_pll14xx_unprepare,
379	.is_prepared	= clk_pll14xx_is_prepared,
380	.recalc_rate	= clk_pll1443x_recalc_rate,
381	.round_rate	= clk_pll14xx_round_rate,
382	.set_rate	= clk_pll1443x_set_rate,
383};
384
385struct clk_hw *imx_dev_clk_hw_pll14xx(struct device *dev, const char *name,
386				const char *parent_name, void __iomem *base,
387				const struct imx_pll14xx_clk *pll_clk)
388{
389	struct clk_pll14xx *pll;
390	struct clk_hw *hw;
391	struct clk_init_data init;
392	int ret;
393	u32 val;
394
395	pll = kzalloc(sizeof(*pll), GFP_KERNEL);
396	if (!pll)
397		return ERR_PTR(-ENOMEM);
398
399	init.name = name;
400	init.flags = pll_clk->flags;
401	init.parent_names = &parent_name;
402	init.num_parents = 1;
403
404	switch (pll_clk->type) {
405	case PLL_1416X:
406		if (!pll_clk->rate_table)
407			init.ops = &clk_pll1416x_min_ops;
408		else
409			init.ops = &clk_pll1416x_ops;
410		break;
411	case PLL_1443X:
412		init.ops = &clk_pll1443x_ops;
413		break;
414	default:
415		pr_err("%s: Unknown pll type for pll clk %s\n",
416		       __func__, name);
417		kfree(pll);
418		return ERR_PTR(-EINVAL);
419	};
420
421	pll->base = base;
422	pll->hw.init = &init;
423	pll->type = pll_clk->type;
424	pll->rate_table = pll_clk->rate_table;
425	pll->rate_count = pll_clk->rate_count;
426
427	val = readl_relaxed(pll->base + GNRL_CTL);
428	val &= ~BYPASS_MASK;
429	writel_relaxed(val, pll->base + GNRL_CTL);
430
431	hw = &pll->hw;
432
433	ret = clk_hw_register(dev, hw);
434	if (ret) {
435		pr_err("%s: failed to register pll %s %d\n",
436			__func__, name, ret);
437		kfree(pll);
438		return ERR_PTR(ret);
439	}
440
441	return hw;
442}
443EXPORT_SYMBOL_GPL(imx_dev_clk_hw_pll14xx);
444