1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright 2018 NXP
4 */
5
6 #include <linux/clk-provider.h>
7 #include <linux/errno.h>
8 #include <linux/export.h>
9 #include <linux/io.h>
10 #include <linux/slab.h>
11
12 #include "clk.h"
13
14 #define PCG_PREDIV_SHIFT 16
15 #define PCG_PREDIV_WIDTH 3
16 #define PCG_PREDIV_MAX 8
17
18 #define PCG_DIV_SHIFT 0
19 #define PCG_CORE_DIV_WIDTH 3
20 #define PCG_DIV_WIDTH 6
21 #define PCG_DIV_MAX 64
22
23 #define PCG_PCS_SHIFT 24
24 #define PCG_PCS_MASK 0x7
25
26 #define PCG_CGC_SHIFT 28
27
imx8m_clk_composite_divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)28 static unsigned long imx8m_clk_composite_divider_recalc_rate(struct clk_hw *hw,
29 unsigned long parent_rate)
30 {
31 struct clk_divider *divider = to_clk_divider(hw);
32 unsigned long prediv_rate;
33 unsigned int prediv_value;
34 unsigned int div_value;
35
36 prediv_value = readl(divider->reg) >> divider->shift;
37 prediv_value &= clk_div_mask(divider->width);
38
39 prediv_rate = divider_recalc_rate(hw, parent_rate, prediv_value,
40 NULL, divider->flags,
41 divider->width);
42
43 div_value = readl(divider->reg) >> PCG_DIV_SHIFT;
44 div_value &= clk_div_mask(PCG_DIV_WIDTH);
45
46 return divider_recalc_rate(hw, prediv_rate, div_value, NULL,
47 divider->flags, PCG_DIV_WIDTH);
48 }
49
imx8m_clk_composite_compute_dividers(unsigned long rate, unsigned long parent_rate, int *prediv, int *postdiv)50 static int imx8m_clk_composite_compute_dividers(unsigned long rate,
51 unsigned long parent_rate,
52 int *prediv, int *postdiv)
53 {
54 int div1, div2;
55 int error = INT_MAX;
56 int ret = -EINVAL;
57
58 *prediv = 1;
59 *postdiv = 1;
60
61 for (div1 = 1; div1 <= PCG_PREDIV_MAX; div1++) {
62 for (div2 = 1; div2 <= PCG_DIV_MAX; div2++) {
63 int new_error = ((parent_rate / div1) / div2) - rate;
64
65 if (abs(new_error) < abs(error)) {
66 *prediv = div1;
67 *postdiv = div2;
68 error = new_error;
69 ret = 0;
70 }
71 }
72 }
73 return ret;
74 }
75
imx8m_clk_composite_divider_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate)76 static long imx8m_clk_composite_divider_round_rate(struct clk_hw *hw,
77 unsigned long rate,
78 unsigned long *prate)
79 {
80 int prediv_value;
81 int div_value;
82
83 imx8m_clk_composite_compute_dividers(rate, *prate,
84 &prediv_value, &div_value);
85 rate = DIV_ROUND_UP(*prate, prediv_value);
86
87 return DIV_ROUND_UP(rate, div_value);
88
89 }
90
imx8m_clk_composite_divider_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate)91 static int imx8m_clk_composite_divider_set_rate(struct clk_hw *hw,
92 unsigned long rate,
93 unsigned long parent_rate)
94 {
95 struct clk_divider *divider = to_clk_divider(hw);
96 unsigned long flags;
97 int prediv_value;
98 int div_value;
99 int ret;
100 u32 orig, val;
101
102 ret = imx8m_clk_composite_compute_dividers(rate, parent_rate,
103 &prediv_value, &div_value);
104 if (ret)
105 return -EINVAL;
106
107 spin_lock_irqsave(divider->lock, flags);
108
109 orig = readl(divider->reg);
110 val = orig & ~((clk_div_mask(divider->width) << divider->shift) |
111 (clk_div_mask(PCG_DIV_WIDTH) << PCG_DIV_SHIFT));
112
113 val |= (u32)(prediv_value - 1) << divider->shift;
114 val |= (u32)(div_value - 1) << PCG_DIV_SHIFT;
115
116 if (val != orig)
117 writel(val, divider->reg);
118
119 spin_unlock_irqrestore(divider->lock, flags);
120
121 return ret;
122 }
123
124 static const struct clk_ops imx8m_clk_composite_divider_ops = {
125 .recalc_rate = imx8m_clk_composite_divider_recalc_rate,
126 .round_rate = imx8m_clk_composite_divider_round_rate,
127 .set_rate = imx8m_clk_composite_divider_set_rate,
128 };
129
imx8m_clk_composite_mux_get_parent(struct clk_hw *hw)130 static u8 imx8m_clk_composite_mux_get_parent(struct clk_hw *hw)
131 {
132 return clk_mux_ops.get_parent(hw);
133 }
134
imx8m_clk_composite_mux_set_parent(struct clk_hw *hw, u8 index)135 static int imx8m_clk_composite_mux_set_parent(struct clk_hw *hw, u8 index)
136 {
137 struct clk_mux *mux = to_clk_mux(hw);
138 u32 val = clk_mux_index_to_val(mux->table, mux->flags, index);
139 unsigned long flags = 0;
140 u32 reg;
141
142 if (mux->lock)
143 spin_lock_irqsave(mux->lock, flags);
144
145 reg = readl(mux->reg);
146 reg &= ~(mux->mask << mux->shift);
147 val = val << mux->shift;
148 reg |= val;
149 /*
150 * write twice to make sure non-target interface
151 * SEL_A/B point the same clk input.
152 */
153 writel(reg, mux->reg);
154 writel(reg, mux->reg);
155
156 if (mux->lock)
157 spin_unlock_irqrestore(mux->lock, flags);
158
159 return 0;
160 }
161
162 static int
imx8m_clk_composite_mux_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)163 imx8m_clk_composite_mux_determine_rate(struct clk_hw *hw,
164 struct clk_rate_request *req)
165 {
166 return clk_mux_ops.determine_rate(hw, req);
167 }
168
169
170 static const struct clk_ops imx8m_clk_composite_mux_ops = {
171 .get_parent = imx8m_clk_composite_mux_get_parent,
172 .set_parent = imx8m_clk_composite_mux_set_parent,
173 .determine_rate = imx8m_clk_composite_mux_determine_rate,
174 };
175
imx8m_clk_hw_composite_flags(const char *name, const char * const *parent_names, int num_parents, void __iomem *reg, u32 composite_flags, unsigned long flags)176 struct clk_hw *imx8m_clk_hw_composite_flags(const char *name,
177 const char * const *parent_names,
178 int num_parents, void __iomem *reg,
179 u32 composite_flags,
180 unsigned long flags)
181 {
182 struct clk_hw *hw = ERR_PTR(-ENOMEM), *mux_hw;
183 struct clk_hw *div_hw, *gate_hw;
184 struct clk_divider *div = NULL;
185 struct clk_gate *gate = NULL;
186 struct clk_mux *mux = NULL;
187 const struct clk_ops *divider_ops;
188 const struct clk_ops *mux_ops;
189
190 mux = kzalloc(sizeof(*mux), GFP_KERNEL);
191 if (!mux)
192 goto fail;
193
194 mux_hw = &mux->hw;
195 mux->reg = reg;
196 mux->shift = PCG_PCS_SHIFT;
197 mux->mask = PCG_PCS_MASK;
198 mux->lock = &imx_ccm_lock;
199
200 div = kzalloc(sizeof(*div), GFP_KERNEL);
201 if (!div)
202 goto fail;
203
204 div_hw = &div->hw;
205 div->reg = reg;
206 if (composite_flags & IMX_COMPOSITE_CORE) {
207 div->shift = PCG_DIV_SHIFT;
208 div->width = PCG_CORE_DIV_WIDTH;
209 divider_ops = &clk_divider_ops;
210 mux_ops = &imx8m_clk_composite_mux_ops;
211 } else if (composite_flags & IMX_COMPOSITE_BUS) {
212 div->shift = PCG_PREDIV_SHIFT;
213 div->width = PCG_PREDIV_WIDTH;
214 divider_ops = &imx8m_clk_composite_divider_ops;
215 mux_ops = &imx8m_clk_composite_mux_ops;
216 } else {
217 div->shift = PCG_PREDIV_SHIFT;
218 div->width = PCG_PREDIV_WIDTH;
219 divider_ops = &imx8m_clk_composite_divider_ops;
220 mux_ops = &clk_mux_ops;
221 if (!(composite_flags & IMX_COMPOSITE_FW_MANAGED))
222 flags |= CLK_SET_PARENT_GATE;
223 }
224
225 div->lock = &imx_ccm_lock;
226 div->flags = CLK_DIVIDER_ROUND_CLOSEST;
227
228 gate = kzalloc(sizeof(*gate), GFP_KERNEL);
229 if (!gate)
230 goto fail;
231
232 gate_hw = &gate->hw;
233 gate->reg = reg;
234 gate->bit_idx = PCG_CGC_SHIFT;
235 gate->lock = &imx_ccm_lock;
236
237 hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
238 mux_hw, mux_ops, div_hw,
239 divider_ops, gate_hw, &clk_gate_ops, flags);
240 if (IS_ERR(hw))
241 goto fail;
242
243 return hw;
244
245 fail:
246 kfree(gate);
247 kfree(div);
248 kfree(mux);
249 return ERR_CAST(hw);
250 }
251 EXPORT_SYMBOL_GPL(imx8m_clk_hw_composite_flags);
252