1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2012, NVIDIA CORPORATION.  All rights reserved.
4 */
5
6#include <linux/kernel.h>
7#include <linux/io.h>
8#include <linux/delay.h>
9#include <linux/err.h>
10#include <linux/slab.h>
11#include <linux/clk-provider.h>
12
13#include "clk.h"
14
15#define SUPER_STATE_IDLE 0
16#define SUPER_STATE_RUN 1
17#define SUPER_STATE_IRQ 2
18#define SUPER_STATE_FIQ 3
19
20#define SUPER_STATE_SHIFT 28
21#define SUPER_STATE_MASK ((BIT(SUPER_STATE_IDLE) | BIT(SUPER_STATE_RUN) | \
22			   BIT(SUPER_STATE_IRQ) | BIT(SUPER_STATE_FIQ))	\
23			  << SUPER_STATE_SHIFT)
24
25#define SUPER_LP_DIV2_BYPASS (1 << 16)
26
27#define super_state(s) (BIT(s) << SUPER_STATE_SHIFT)
28#define super_state_to_src_shift(m, s) ((m->width * s))
29#define super_state_to_src_mask(m) (((1 << m->width) - 1))
30
31#define CCLK_SRC_PLLP_OUT0 4
32#define CCLK_SRC_PLLP_OUT4 5
33
34static u8 clk_super_get_parent(struct clk_hw *hw)
35{
36	struct tegra_clk_super_mux *mux = to_clk_super_mux(hw);
37	u32 val, state;
38	u8 source, shift;
39
40	val = readl_relaxed(mux->reg);
41
42	state = val & SUPER_STATE_MASK;
43
44	BUG_ON((state != super_state(SUPER_STATE_RUN)) &&
45	       (state != super_state(SUPER_STATE_IDLE)));
46	shift = (state == super_state(SUPER_STATE_IDLE)) ?
47		super_state_to_src_shift(mux, SUPER_STATE_IDLE) :
48		super_state_to_src_shift(mux, SUPER_STATE_RUN);
49
50	source = (val >> shift) & super_state_to_src_mask(mux);
51
52	/*
53	 * If LP_DIV2_BYPASS is not set and PLLX is current parent then
54	 * PLLX/2 is the input source to CCLKLP.
55	 */
56	if ((mux->flags & TEGRA_DIVIDER_2) && !(val & SUPER_LP_DIV2_BYPASS) &&
57	    (source == mux->pllx_index))
58		source = mux->div2_index;
59
60	return source;
61}
62
63static int clk_super_set_parent(struct clk_hw *hw, u8 index)
64{
65	struct tegra_clk_super_mux *mux = to_clk_super_mux(hw);
66	u32 val, state;
67	int err = 0;
68	u8 parent_index, shift;
69	unsigned long flags = 0;
70
71	if (mux->lock)
72		spin_lock_irqsave(mux->lock, flags);
73
74	val = readl_relaxed(mux->reg);
75	state = val & SUPER_STATE_MASK;
76	BUG_ON((state != super_state(SUPER_STATE_RUN)) &&
77	       (state != super_state(SUPER_STATE_IDLE)));
78	shift = (state == super_state(SUPER_STATE_IDLE)) ?
79		super_state_to_src_shift(mux, SUPER_STATE_IDLE) :
80		super_state_to_src_shift(mux, SUPER_STATE_RUN);
81
82	/*
83	 * For LP mode super-clock switch between PLLX direct
84	 * and divided-by-2 outputs is allowed only when other
85	 * than PLLX clock source is current parent.
86	 */
87	if ((mux->flags & TEGRA_DIVIDER_2) && ((index == mux->div2_index) ||
88					       (index == mux->pllx_index))) {
89		parent_index = clk_super_get_parent(hw);
90		if ((parent_index == mux->div2_index) ||
91		    (parent_index == mux->pllx_index)) {
92			err = -EINVAL;
93			goto out;
94		}
95
96		val ^= SUPER_LP_DIV2_BYPASS;
97		writel_relaxed(val, mux->reg);
98		udelay(2);
99
100		if (index == mux->div2_index)
101			index = mux->pllx_index;
102	}
103
104	/* enable PLLP branches to CPU before selecting PLLP source */
105	if ((mux->flags & TEGRA210_CPU_CLK) &&
106	    (index == CCLK_SRC_PLLP_OUT0 || index == CCLK_SRC_PLLP_OUT4))
107		tegra_clk_set_pllp_out_cpu(true);
108
109	val &= ~((super_state_to_src_mask(mux)) << shift);
110	val |= (index & (super_state_to_src_mask(mux))) << shift;
111
112	writel_relaxed(val, mux->reg);
113	udelay(2);
114
115	/* disable PLLP branches to CPU if not used */
116	if ((mux->flags & TEGRA210_CPU_CLK) &&
117	    index != CCLK_SRC_PLLP_OUT0 && index != CCLK_SRC_PLLP_OUT4)
118		tegra_clk_set_pllp_out_cpu(false);
119
120out:
121	if (mux->lock)
122		spin_unlock_irqrestore(mux->lock, flags);
123
124	return err;
125}
126
127static void clk_super_mux_restore_context(struct clk_hw *hw)
128{
129	int parent_id;
130
131	parent_id = clk_hw_get_parent_index(hw);
132	if (WARN_ON(parent_id < 0))
133		return;
134
135	clk_super_set_parent(hw, parent_id);
136}
137
138static const struct clk_ops tegra_clk_super_mux_ops = {
139	.get_parent = clk_super_get_parent,
140	.set_parent = clk_super_set_parent,
141	.restore_context = clk_super_mux_restore_context,
142};
143
144static long clk_super_round_rate(struct clk_hw *hw, unsigned long rate,
145				 unsigned long *parent_rate)
146{
147	struct tegra_clk_super_mux *super = to_clk_super_mux(hw);
148	struct clk_hw *div_hw = &super->frac_div.hw;
149
150	__clk_hw_set_clk(div_hw, hw);
151
152	return super->div_ops->round_rate(div_hw, rate, parent_rate);
153}
154
155static unsigned long clk_super_recalc_rate(struct clk_hw *hw,
156					   unsigned long parent_rate)
157{
158	struct tegra_clk_super_mux *super = to_clk_super_mux(hw);
159	struct clk_hw *div_hw = &super->frac_div.hw;
160
161	__clk_hw_set_clk(div_hw, hw);
162
163	return super->div_ops->recalc_rate(div_hw, parent_rate);
164}
165
166static int clk_super_set_rate(struct clk_hw *hw, unsigned long rate,
167			      unsigned long parent_rate)
168{
169	struct tegra_clk_super_mux *super = to_clk_super_mux(hw);
170	struct clk_hw *div_hw = &super->frac_div.hw;
171
172	__clk_hw_set_clk(div_hw, hw);
173
174	return super->div_ops->set_rate(div_hw, rate, parent_rate);
175}
176
177static void clk_super_restore_context(struct clk_hw *hw)
178{
179	struct tegra_clk_super_mux *super = to_clk_super_mux(hw);
180	struct clk_hw *div_hw = &super->frac_div.hw;
181	int parent_id;
182
183	parent_id = clk_hw_get_parent_index(hw);
184	if (WARN_ON(parent_id < 0))
185		return;
186
187	super->div_ops->restore_context(div_hw);
188	clk_super_set_parent(hw, parent_id);
189}
190
191const struct clk_ops tegra_clk_super_ops = {
192	.get_parent = clk_super_get_parent,
193	.set_parent = clk_super_set_parent,
194	.set_rate = clk_super_set_rate,
195	.round_rate = clk_super_round_rate,
196	.recalc_rate = clk_super_recalc_rate,
197	.restore_context = clk_super_restore_context,
198};
199
200struct clk *tegra_clk_register_super_mux(const char *name,
201		const char **parent_names, u8 num_parents,
202		unsigned long flags, void __iomem *reg, u8 clk_super_flags,
203		u8 width, u8 pllx_index, u8 div2_index, spinlock_t *lock)
204{
205	struct tegra_clk_super_mux *super;
206	struct clk *clk;
207	struct clk_init_data init;
208
209	super = kzalloc(sizeof(*super), GFP_KERNEL);
210	if (!super)
211		return ERR_PTR(-ENOMEM);
212
213	init.name = name;
214	init.ops = &tegra_clk_super_mux_ops;
215	init.flags = flags;
216	init.parent_names = parent_names;
217	init.num_parents = num_parents;
218
219	super->reg = reg;
220	super->pllx_index = pllx_index;
221	super->div2_index = div2_index;
222	super->lock = lock;
223	super->width = width;
224	super->flags = clk_super_flags;
225
226	/* Data in .init is copied by clk_register(), so stack variable OK */
227	super->hw.init = &init;
228
229	clk = clk_register(NULL, &super->hw);
230	if (IS_ERR(clk))
231		kfree(super);
232
233	return clk;
234}
235
236struct clk *tegra_clk_register_super_clk(const char *name,
237		const char * const *parent_names, u8 num_parents,
238		unsigned long flags, void __iomem *reg, u8 clk_super_flags,
239		spinlock_t *lock)
240{
241	struct tegra_clk_super_mux *super;
242	struct clk *clk;
243	struct clk_init_data init;
244
245	super = kzalloc(sizeof(*super), GFP_KERNEL);
246	if (!super)
247		return ERR_PTR(-ENOMEM);
248
249	init.name = name;
250	init.ops = &tegra_clk_super_ops;
251	init.flags = flags;
252	init.parent_names = parent_names;
253	init.num_parents = num_parents;
254
255	super->reg = reg;
256	super->lock = lock;
257	super->width = 4;
258	super->flags = clk_super_flags;
259	super->frac_div.reg = reg + 4;
260	super->frac_div.shift = 16;
261	super->frac_div.width = 8;
262	super->frac_div.frac_width = 1;
263	super->frac_div.lock = lock;
264	super->div_ops = &tegra_clk_frac_div_ops;
265
266	/* Data in .init is copied by clk_register(), so stack variable OK */
267	super->hw.init = &init;
268
269	clk = clk_register(NULL, &super->hw);
270	if (IS_ERR(clk))
271		kfree(super);
272
273	return clk;
274}
275