1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * MMP PLL clock rate calculation
4 *
5 * Copyright (C) 2020 Lubomir Rintel <lkundrak@v3.sk>
6 */
7
8#include <linux/clk-provider.h>
9#include <linux/slab.h>
10#include <linux/io.h>
11
12#include "clk.h"
13
14#define to_clk_mmp_pll(hw)	container_of(hw, struct mmp_clk_pll, hw)
15
16struct mmp_clk_pll {
17	struct clk_hw hw;
18	unsigned long default_rate;
19	void __iomem *enable_reg;
20	u32 enable;
21	void __iomem *reg;
22	u8 shift;
23
24	unsigned long input_rate;
25	void __iomem *postdiv_reg;
26	u8 postdiv_shift;
27};
28
29static int mmp_clk_pll_is_enabled(struct clk_hw *hw)
30{
31	struct mmp_clk_pll *pll = to_clk_mmp_pll(hw);
32	u32 val;
33
34	val = readl_relaxed(pll->enable_reg);
35	if ((val & pll->enable) == pll->enable)
36		return 1;
37
38	/* Some PLLs, if not software controlled, output default clock. */
39	if (pll->default_rate > 0)
40		return 1;
41
42	return 0;
43}
44
45static unsigned long mmp_clk_pll_recalc_rate(struct clk_hw *hw,
46					unsigned long parent_rate)
47{
48	struct mmp_clk_pll *pll = to_clk_mmp_pll(hw);
49	u32 fbdiv, refdiv, postdiv;
50	u64 rate;
51	u32 val;
52
53	val = readl_relaxed(pll->enable_reg);
54	if ((val & pll->enable) != pll->enable)
55		return pll->default_rate;
56
57	if (pll->reg) {
58		val = readl_relaxed(pll->reg);
59		fbdiv = (val >> pll->shift) & 0x1ff;
60		refdiv = (val >> (pll->shift + 9)) & 0x1f;
61	} else {
62		fbdiv = 2;
63		refdiv = 1;
64	}
65
66	if (pll->postdiv_reg) {
67		/* MMP3 clock rate calculation */
68		static const u8 postdivs[] = {2, 3, 4, 5, 6, 8, 10, 12, 16};
69
70		val = readl_relaxed(pll->postdiv_reg);
71		postdiv = (val >> pll->postdiv_shift) & 0x7;
72
73		rate = pll->input_rate;
74		rate *= 2 * fbdiv;
75		do_div(rate, refdiv);
76		do_div(rate, postdivs[postdiv]);
77	} else {
78		/* MMP2 clock rate calculation */
79		if (refdiv == 3) {
80			rate = 19200000;
81		} else if (refdiv == 4) {
82			rate = 26000000;
83		} else {
84			pr_err("bad refdiv: %d (0x%08x)\n", refdiv, val);
85			return 0;
86		}
87
88		rate *= fbdiv + 2;
89		do_div(rate, refdiv + 2);
90	}
91
92	return (unsigned long)rate;
93}
94
95static const struct clk_ops mmp_clk_pll_ops = {
96	.is_enabled = mmp_clk_pll_is_enabled,
97	.recalc_rate = mmp_clk_pll_recalc_rate,
98};
99
100static struct clk *mmp_clk_register_pll(char *name,
101			unsigned long default_rate,
102			void __iomem *enable_reg, u32 enable,
103			void __iomem *reg, u8 shift,
104			unsigned long input_rate,
105			void __iomem *postdiv_reg, u8 postdiv_shift)
106{
107	struct mmp_clk_pll *pll;
108	struct clk *clk;
109	struct clk_init_data init;
110
111	pll = kzalloc(sizeof(*pll), GFP_KERNEL);
112	if (!pll)
113		return ERR_PTR(-ENOMEM);
114
115	init.name = name;
116	init.ops = &mmp_clk_pll_ops;
117	init.flags = 0;
118	init.parent_names = NULL;
119	init.num_parents = 0;
120
121	pll->default_rate = default_rate;
122	pll->enable_reg = enable_reg;
123	pll->enable = enable;
124	pll->reg = reg;
125	pll->shift = shift;
126
127	pll->input_rate = input_rate;
128	pll->postdiv_reg = postdiv_reg;
129	pll->postdiv_shift = postdiv_shift;
130
131	pll->hw.init = &init;
132
133	clk = clk_register(NULL, &pll->hw);
134
135	if (IS_ERR(clk))
136		kfree(pll);
137
138	return clk;
139}
140
141void mmp_register_pll_clks(struct mmp_clk_unit *unit,
142			struct mmp_param_pll_clk *clks,
143			void __iomem *base, int size)
144{
145	struct clk *clk;
146	int i;
147
148	for (i = 0; i < size; i++) {
149		void __iomem *reg = NULL;
150
151		if (clks[i].offset)
152			reg = base + clks[i].offset;
153
154		clk = mmp_clk_register_pll(clks[i].name,
155					clks[i].default_rate,
156					base + clks[i].enable_offset,
157					clks[i].enable,
158					reg, clks[i].shift,
159					clks[i].input_rate,
160					base + clks[i].postdiv_offset,
161					clks[i].postdiv_shift);
162		if (IS_ERR(clk)) {
163			pr_err("%s: failed to register clock %s\n",
164			       __func__, clks[i].name);
165			continue;
166		}
167		if (clks[i].id)
168			unit->clk_table[clks[i].id] = clk;
169	}
170}
171