1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2018 MediaTek Inc.
4 * Author: Owen Chen <owen.chen@mediatek.com>
5 */
6
7#include <linux/clk.h>
8#include <linux/clk-provider.h>
9#include <linux/compiler_types.h>
10#include <linux/container_of.h>
11#include <linux/err.h>
12#include <linux/mfd/syscon.h>
13#include <linux/module.h>
14#include <linux/regmap.h>
15#include <linux/spinlock.h>
16#include <linux/slab.h>
17
18#include "clk-mux.h"
19
20struct mtk_clk_mux {
21	struct clk_hw hw;
22	struct regmap *regmap;
23	const struct mtk_mux *data;
24	spinlock_t *lock;
25	bool reparent;
26};
27
28static inline struct mtk_clk_mux *to_mtk_clk_mux(struct clk_hw *hw)
29{
30	return container_of(hw, struct mtk_clk_mux, hw);
31}
32
33static int mtk_clk_mux_enable_setclr(struct clk_hw *hw)
34{
35	struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
36	unsigned long flags = 0;
37
38	if (mux->lock)
39		spin_lock_irqsave(mux->lock, flags);
40	else
41		__acquire(mux->lock);
42
43	regmap_write(mux->regmap, mux->data->clr_ofs,
44		     BIT(mux->data->gate_shift));
45
46	/*
47	 * If the parent has been changed when the clock was disabled, it will
48	 * not be effective yet. Set the update bit to ensure the mux gets
49	 * updated.
50	 */
51	if (mux->reparent && mux->data->upd_shift >= 0) {
52		regmap_write(mux->regmap, mux->data->upd_ofs,
53			     BIT(mux->data->upd_shift));
54		mux->reparent = false;
55	}
56
57	if (mux->lock)
58		spin_unlock_irqrestore(mux->lock, flags);
59	else
60		__release(mux->lock);
61
62	return 0;
63}
64
65static void mtk_clk_mux_disable_setclr(struct clk_hw *hw)
66{
67	struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
68
69	regmap_write(mux->regmap, mux->data->set_ofs,
70			BIT(mux->data->gate_shift));
71}
72
73static int mtk_clk_mux_is_enabled(struct clk_hw *hw)
74{
75	struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
76	u32 val;
77
78	regmap_read(mux->regmap, mux->data->mux_ofs, &val);
79
80	return (val & BIT(mux->data->gate_shift)) == 0;
81}
82
83static u8 mtk_clk_mux_get_parent(struct clk_hw *hw)
84{
85	struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
86	u32 mask = GENMASK(mux->data->mux_width - 1, 0);
87	u32 val;
88
89	regmap_read(mux->regmap, mux->data->mux_ofs, &val);
90	val = (val >> mux->data->mux_shift) & mask;
91
92	return val;
93}
94
95static int mtk_clk_mux_set_parent_setclr_lock(struct clk_hw *hw, u8 index)
96{
97	struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
98	u32 mask = GENMASK(mux->data->mux_width - 1, 0);
99	u32 val, orig;
100	unsigned long flags = 0;
101
102	if (mux->lock)
103		spin_lock_irqsave(mux->lock, flags);
104	else
105		__acquire(mux->lock);
106
107	regmap_read(mux->regmap, mux->data->mux_ofs, &orig);
108	val = (orig & ~(mask << mux->data->mux_shift))
109			| (index << mux->data->mux_shift);
110
111	if (val != orig) {
112		regmap_write(mux->regmap, mux->data->clr_ofs,
113				mask << mux->data->mux_shift);
114		regmap_write(mux->regmap, mux->data->set_ofs,
115				index << mux->data->mux_shift);
116
117		if (mux->data->upd_shift >= 0) {
118			regmap_write(mux->regmap, mux->data->upd_ofs,
119					BIT(mux->data->upd_shift));
120			mux->reparent = true;
121		}
122	}
123
124	if (mux->lock)
125		spin_unlock_irqrestore(mux->lock, flags);
126	else
127		__release(mux->lock);
128
129	return 0;
130}
131
132static int mtk_clk_mux_determine_rate(struct clk_hw *hw,
133				      struct clk_rate_request *req)
134{
135	struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
136
137	return clk_mux_determine_rate_flags(hw, req, mux->data->flags);
138}
139
140const struct clk_ops mtk_mux_clr_set_upd_ops = {
141	.get_parent = mtk_clk_mux_get_parent,
142	.set_parent = mtk_clk_mux_set_parent_setclr_lock,
143	.determine_rate = mtk_clk_mux_determine_rate,
144};
145EXPORT_SYMBOL_GPL(mtk_mux_clr_set_upd_ops);
146
147const struct clk_ops mtk_mux_gate_clr_set_upd_ops  = {
148	.enable = mtk_clk_mux_enable_setclr,
149	.disable = mtk_clk_mux_disable_setclr,
150	.is_enabled = mtk_clk_mux_is_enabled,
151	.get_parent = mtk_clk_mux_get_parent,
152	.set_parent = mtk_clk_mux_set_parent_setclr_lock,
153	.determine_rate = mtk_clk_mux_determine_rate,
154};
155EXPORT_SYMBOL_GPL(mtk_mux_gate_clr_set_upd_ops);
156
157static struct clk_hw *mtk_clk_register_mux(struct device *dev,
158					   const struct mtk_mux *mux,
159					   struct regmap *regmap,
160					   spinlock_t *lock)
161{
162	struct mtk_clk_mux *clk_mux;
163	struct clk_init_data init = {};
164	int ret;
165
166	clk_mux = kzalloc(sizeof(*clk_mux), GFP_KERNEL);
167	if (!clk_mux)
168		return ERR_PTR(-ENOMEM);
169
170	init.name = mux->name;
171	init.flags = mux->flags;
172	init.parent_names = mux->parent_names;
173	init.num_parents = mux->num_parents;
174	init.ops = mux->ops;
175
176	clk_mux->regmap = regmap;
177	clk_mux->data = mux;
178	clk_mux->lock = lock;
179	clk_mux->hw.init = &init;
180
181	ret = clk_hw_register(dev, &clk_mux->hw);
182	if (ret) {
183		kfree(clk_mux);
184		return ERR_PTR(ret);
185	}
186
187	return &clk_mux->hw;
188}
189
190static void mtk_clk_unregister_mux(struct clk_hw *hw)
191{
192	struct mtk_clk_mux *mux;
193	if (!hw)
194		return;
195
196	mux = to_mtk_clk_mux(hw);
197
198	clk_hw_unregister(hw);
199	kfree(mux);
200}
201
202int mtk_clk_register_muxes(struct device *dev,
203			   const struct mtk_mux *muxes,
204			   int num, struct device_node *node,
205			   spinlock_t *lock,
206			   struct clk_hw_onecell_data *clk_data)
207{
208	struct regmap *regmap;
209	struct clk_hw *hw;
210	int i;
211
212	regmap = device_node_to_regmap(node);
213	if (IS_ERR(regmap)) {
214		pr_err("Cannot find regmap for %pOF: %pe\n", node, regmap);
215		return PTR_ERR(regmap);
216	}
217
218	for (i = 0; i < num; i++) {
219		const struct mtk_mux *mux = &muxes[i];
220
221		if (!IS_ERR_OR_NULL(clk_data->hws[mux->id])) {
222			pr_warn("%pOF: Trying to register duplicate clock ID: %d\n",
223				node, mux->id);
224			continue;
225		}
226
227		hw = mtk_clk_register_mux(dev, mux, regmap, lock);
228
229		if (IS_ERR(hw)) {
230			pr_err("Failed to register clk %s: %pe\n", mux->name,
231			       hw);
232			goto err;
233		}
234
235		clk_data->hws[mux->id] = hw;
236	}
237
238	return 0;
239
240err:
241	while (--i >= 0) {
242		const struct mtk_mux *mux = &muxes[i];
243
244		if (IS_ERR_OR_NULL(clk_data->hws[mux->id]))
245			continue;
246
247		mtk_clk_unregister_mux(clk_data->hws[mux->id]);
248		clk_data->hws[mux->id] = ERR_PTR(-ENOENT);
249	}
250
251	return PTR_ERR(hw);
252}
253EXPORT_SYMBOL_GPL(mtk_clk_register_muxes);
254
255void mtk_clk_unregister_muxes(const struct mtk_mux *muxes, int num,
256			      struct clk_hw_onecell_data *clk_data)
257{
258	int i;
259
260	if (!clk_data)
261		return;
262
263	for (i = num; i > 0; i--) {
264		const struct mtk_mux *mux = &muxes[i - 1];
265
266		if (IS_ERR_OR_NULL(clk_data->hws[mux->id]))
267			continue;
268
269		mtk_clk_unregister_mux(clk_data->hws[mux->id]);
270		clk_data->hws[mux->id] = ERR_PTR(-ENOENT);
271	}
272}
273EXPORT_SYMBOL_GPL(mtk_clk_unregister_muxes);
274
275/*
276 * This clock notifier is called when the frequency of the parent
277 * PLL clock is to be changed. The idea is to switch the parent to a
278 * stable clock, such as the main oscillator, while the PLL frequency
279 * stabilizes.
280 */
281static int mtk_clk_mux_notifier_cb(struct notifier_block *nb,
282				   unsigned long event, void *_data)
283{
284	struct clk_notifier_data *data = _data;
285	struct clk_hw *hw = __clk_get_hw(data->clk);
286	struct mtk_mux_nb *mux_nb = to_mtk_mux_nb(nb);
287	int ret = 0;
288
289	switch (event) {
290	case PRE_RATE_CHANGE:
291		mux_nb->original_index = mux_nb->ops->get_parent(hw);
292		ret = mux_nb->ops->set_parent(hw, mux_nb->bypass_index);
293		break;
294	case POST_RATE_CHANGE:
295	case ABORT_RATE_CHANGE:
296		ret = mux_nb->ops->set_parent(hw, mux_nb->original_index);
297		break;
298	}
299
300	return notifier_from_errno(ret);
301}
302
303int devm_mtk_clk_mux_notifier_register(struct device *dev, struct clk *clk,
304				       struct mtk_mux_nb *mux_nb)
305{
306	mux_nb->nb.notifier_call = mtk_clk_mux_notifier_cb;
307
308	return devm_clk_notifier_register(dev, clk, &mux_nb->nb);
309}
310EXPORT_SYMBOL_GPL(devm_mtk_clk_mux_notifier_register);
311
312MODULE_LICENSE("GPL");
313