xref: /kernel/linux/linux-5.10/drivers/clk/tegra/clk.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2012, NVIDIA CORPORATION.  All rights reserved.
4 */
5
6#include <linux/clkdev.h>
7#include <linux/clk.h>
8#include <linux/clk-provider.h>
9#include <linux/delay.h>
10#include <linux/io.h>
11#include <linux/of.h>
12#include <linux/clk/tegra.h>
13#include <linux/reset-controller.h>
14
15#include <soc/tegra/fuse.h>
16
17#include "clk.h"
18
19/* Global data of Tegra CPU CAR ops */
20static struct tegra_cpu_car_ops dummy_car_ops;
21struct tegra_cpu_car_ops *tegra_cpu_car_ops = &dummy_car_ops;
22
23int *periph_clk_enb_refcnt;
24static int periph_banks;
25static u32 *periph_state_ctx;
26static struct clk **clks;
27static int clk_num;
28static struct clk_onecell_data clk_data;
29
30/* Handlers for SoC-specific reset lines */
31static int (*special_reset_assert)(unsigned long);
32static int (*special_reset_deassert)(unsigned long);
33static unsigned int num_special_reset;
34
35static const struct tegra_clk_periph_regs periph_regs[] = {
36	[0] = {
37		.enb_reg = CLK_OUT_ENB_L,
38		.enb_set_reg = CLK_OUT_ENB_SET_L,
39		.enb_clr_reg = CLK_OUT_ENB_CLR_L,
40		.rst_reg = RST_DEVICES_L,
41		.rst_set_reg = RST_DEVICES_SET_L,
42		.rst_clr_reg = RST_DEVICES_CLR_L,
43	},
44	[1] = {
45		.enb_reg = CLK_OUT_ENB_H,
46		.enb_set_reg = CLK_OUT_ENB_SET_H,
47		.enb_clr_reg = CLK_OUT_ENB_CLR_H,
48		.rst_reg = RST_DEVICES_H,
49		.rst_set_reg = RST_DEVICES_SET_H,
50		.rst_clr_reg = RST_DEVICES_CLR_H,
51	},
52	[2] = {
53		.enb_reg = CLK_OUT_ENB_U,
54		.enb_set_reg = CLK_OUT_ENB_SET_U,
55		.enb_clr_reg = CLK_OUT_ENB_CLR_U,
56		.rst_reg = RST_DEVICES_U,
57		.rst_set_reg = RST_DEVICES_SET_U,
58		.rst_clr_reg = RST_DEVICES_CLR_U,
59	},
60	[3] = {
61		.enb_reg = CLK_OUT_ENB_V,
62		.enb_set_reg = CLK_OUT_ENB_SET_V,
63		.enb_clr_reg = CLK_OUT_ENB_CLR_V,
64		.rst_reg = RST_DEVICES_V,
65		.rst_set_reg = RST_DEVICES_SET_V,
66		.rst_clr_reg = RST_DEVICES_CLR_V,
67	},
68	[4] = {
69		.enb_reg = CLK_OUT_ENB_W,
70		.enb_set_reg = CLK_OUT_ENB_SET_W,
71		.enb_clr_reg = CLK_OUT_ENB_CLR_W,
72		.rst_reg = RST_DEVICES_W,
73		.rst_set_reg = RST_DEVICES_SET_W,
74		.rst_clr_reg = RST_DEVICES_CLR_W,
75	},
76	[5] = {
77		.enb_reg = CLK_OUT_ENB_X,
78		.enb_set_reg = CLK_OUT_ENB_SET_X,
79		.enb_clr_reg = CLK_OUT_ENB_CLR_X,
80		.rst_reg = RST_DEVICES_X,
81		.rst_set_reg = RST_DEVICES_SET_X,
82		.rst_clr_reg = RST_DEVICES_CLR_X,
83	},
84	[6] = {
85		.enb_reg = CLK_OUT_ENB_Y,
86		.enb_set_reg = CLK_OUT_ENB_SET_Y,
87		.enb_clr_reg = CLK_OUT_ENB_CLR_Y,
88		.rst_reg = RST_DEVICES_Y,
89		.rst_set_reg = RST_DEVICES_SET_Y,
90		.rst_clr_reg = RST_DEVICES_CLR_Y,
91	},
92};
93
94static void __iomem *clk_base;
95
96static int tegra_clk_rst_assert(struct reset_controller_dev *rcdev,
97		unsigned long id)
98{
99	/*
100	 * If peripheral is on the APB bus then we must read the APB bus to
101	 * flush the write operation in apb bus. This will avoid peripheral
102	 * access after disabling clock. Since the reset driver has no
103	 * knowledge of which reset IDs represent which devices, simply do
104	 * this all the time.
105	 */
106	tegra_read_chipid();
107
108	if (id < periph_banks * 32) {
109		writel_relaxed(BIT(id % 32),
110			       clk_base + periph_regs[id / 32].rst_set_reg);
111		return 0;
112	} else if (id < periph_banks * 32 + num_special_reset) {
113		return special_reset_assert(id);
114	}
115
116	return -EINVAL;
117}
118
119static int tegra_clk_rst_deassert(struct reset_controller_dev *rcdev,
120		unsigned long id)
121{
122	if (id < periph_banks * 32) {
123		writel_relaxed(BIT(id % 32),
124			       clk_base + periph_regs[id / 32].rst_clr_reg);
125		return 0;
126	} else if (id < periph_banks * 32 + num_special_reset) {
127		return special_reset_deassert(id);
128	}
129
130	return -EINVAL;
131}
132
133static int tegra_clk_rst_reset(struct reset_controller_dev *rcdev,
134		unsigned long id)
135{
136	int err;
137
138	err = tegra_clk_rst_assert(rcdev, id);
139	if (err)
140		return err;
141
142	udelay(1);
143
144	return tegra_clk_rst_deassert(rcdev, id);
145}
146
147const struct tegra_clk_periph_regs *get_reg_bank(int clkid)
148{
149	int reg_bank = clkid / 32;
150
151	if (reg_bank < periph_banks)
152		return &periph_regs[reg_bank];
153	else {
154		WARN_ON(1);
155		return NULL;
156	}
157}
158
159void tegra_clk_set_pllp_out_cpu(bool enable)
160{
161	u32 val;
162
163	val = readl_relaxed(clk_base + CLK_OUT_ENB_Y);
164	if (enable)
165		val |= CLK_ENB_PLLP_OUT_CPU;
166	else
167		val &= ~CLK_ENB_PLLP_OUT_CPU;
168
169	writel_relaxed(val, clk_base + CLK_OUT_ENB_Y);
170}
171
172void tegra_clk_periph_suspend(void)
173{
174	unsigned int i, idx;
175
176	idx = 0;
177	for (i = 0; i < periph_banks; i++, idx++)
178		periph_state_ctx[idx] =
179			readl_relaxed(clk_base + periph_regs[i].enb_reg);
180
181	for (i = 0; i < periph_banks; i++, idx++)
182		periph_state_ctx[idx] =
183			readl_relaxed(clk_base + periph_regs[i].rst_reg);
184}
185
186void tegra_clk_periph_resume(void)
187{
188	unsigned int i, idx;
189
190	idx = 0;
191	for (i = 0; i < periph_banks; i++, idx++)
192		writel_relaxed(periph_state_ctx[idx],
193			       clk_base + periph_regs[i].enb_reg);
194	/*
195	 * All non-boot peripherals will be in reset state on resume.
196	 * Wait for 5us of reset propagation delay before de-asserting
197	 * the peripherals based on the saved context.
198	 */
199	fence_udelay(5, clk_base);
200
201	for (i = 0; i < periph_banks; i++, idx++)
202		writel_relaxed(periph_state_ctx[idx],
203			       clk_base + periph_regs[i].rst_reg);
204
205	fence_udelay(2, clk_base);
206}
207
208static int tegra_clk_periph_ctx_init(int banks)
209{
210	periph_state_ctx = kcalloc(2 * banks, sizeof(*periph_state_ctx),
211				   GFP_KERNEL);
212	if (!periph_state_ctx)
213		return -ENOMEM;
214
215	return 0;
216}
217
218struct clk ** __init tegra_clk_init(void __iomem *regs, int num, int banks)
219{
220	clk_base = regs;
221
222	if (WARN_ON(banks > ARRAY_SIZE(periph_regs)))
223		return NULL;
224
225	periph_clk_enb_refcnt = kcalloc(32 * banks,
226					sizeof(*periph_clk_enb_refcnt),
227					GFP_KERNEL);
228	if (!periph_clk_enb_refcnt)
229		return NULL;
230
231	periph_banks = banks;
232
233	clks = kcalloc(num, sizeof(struct clk *), GFP_KERNEL);
234	if (!clks) {
235		kfree(periph_clk_enb_refcnt);
236		return NULL;
237	}
238
239	clk_num = num;
240
241	if (IS_ENABLED(CONFIG_PM_SLEEP)) {
242		if (tegra_clk_periph_ctx_init(banks)) {
243			kfree(periph_clk_enb_refcnt);
244			kfree(clks);
245			return NULL;
246		}
247	}
248
249	return clks;
250}
251
252void __init tegra_init_dup_clks(struct tegra_clk_duplicate *dup_list,
253				struct clk *clks[], int clk_max)
254{
255	struct clk *clk;
256
257	for (; dup_list->clk_id < clk_max; dup_list++) {
258		clk = clks[dup_list->clk_id];
259		dup_list->lookup.clk = clk;
260		clkdev_add(&dup_list->lookup);
261	}
262}
263
264void __init tegra_init_from_table(struct tegra_clk_init_table *tbl,
265				  struct clk *clks[], int clk_max)
266{
267	struct clk *clk;
268
269	for (; tbl->clk_id < clk_max; tbl++) {
270		clk = clks[tbl->clk_id];
271		if (IS_ERR_OR_NULL(clk)) {
272			pr_err("%s: invalid entry %ld in clks array for id %d\n",
273			       __func__, PTR_ERR(clk), tbl->clk_id);
274			WARN_ON(1);
275
276			continue;
277		}
278
279		if (tbl->parent_id < clk_max) {
280			struct clk *parent = clks[tbl->parent_id];
281			if (clk_set_parent(clk, parent)) {
282				pr_err("%s: Failed to set parent %s of %s\n",
283				       __func__, __clk_get_name(parent),
284				       __clk_get_name(clk));
285				WARN_ON(1);
286			}
287		}
288
289		if (tbl->rate)
290			if (clk_set_rate(clk, tbl->rate)) {
291				pr_err("%s: Failed to set rate %lu of %s\n",
292				       __func__, tbl->rate,
293				       __clk_get_name(clk));
294				WARN_ON(1);
295			}
296
297		if (tbl->state)
298			if (clk_prepare_enable(clk)) {
299				pr_err("%s: Failed to enable %s\n", __func__,
300				       __clk_get_name(clk));
301				WARN_ON(1);
302			}
303	}
304}
305
306static const struct reset_control_ops rst_ops = {
307	.assert = tegra_clk_rst_assert,
308	.deassert = tegra_clk_rst_deassert,
309	.reset = tegra_clk_rst_reset,
310};
311
312static struct reset_controller_dev rst_ctlr = {
313	.ops = &rst_ops,
314	.owner = THIS_MODULE,
315	.of_reset_n_cells = 1,
316};
317
318void __init tegra_add_of_provider(struct device_node *np,
319				  void *clk_src_onecell_get)
320{
321	int i;
322
323	for (i = 0; i < clk_num; i++) {
324		if (IS_ERR(clks[i])) {
325			pr_err
326			    ("Tegra clk %d: register failed with %ld\n",
327			     i, PTR_ERR(clks[i]));
328		}
329		if (!clks[i])
330			clks[i] = ERR_PTR(-EINVAL);
331	}
332
333	clk_data.clks = clks;
334	clk_data.clk_num = clk_num;
335	of_clk_add_provider(np, clk_src_onecell_get, &clk_data);
336
337	rst_ctlr.of_node = np;
338	rst_ctlr.nr_resets = periph_banks * 32 + num_special_reset;
339	reset_controller_register(&rst_ctlr);
340}
341
342void __init tegra_init_special_resets(unsigned int num,
343				      int (*assert)(unsigned long),
344				      int (*deassert)(unsigned long))
345{
346	num_special_reset = num;
347	special_reset_assert = assert;
348	special_reset_deassert = deassert;
349}
350
351void __init tegra_register_devclks(struct tegra_devclk *dev_clks, int num)
352{
353	int i;
354
355	for (i = 0; i < num; i++, dev_clks++)
356		clk_register_clkdev(clks[dev_clks->dt_id], dev_clks->con_id,
357				dev_clks->dev_id);
358
359	for (i = 0; i < clk_num; i++) {
360		if (!IS_ERR_OR_NULL(clks[i]))
361			clk_register_clkdev(clks[i], __clk_get_name(clks[i]),
362				"tegra-clk-debug");
363	}
364}
365
366struct clk ** __init tegra_lookup_dt_id(int clk_id,
367					struct tegra_clk *tegra_clk)
368{
369	if (tegra_clk[clk_id].present)
370		return &clks[tegra_clk[clk_id].dt_id];
371	else
372		return NULL;
373}
374
375tegra_clk_apply_init_table_func tegra_clk_apply_init_table;
376
377static int __init tegra_clocks_apply_init_table(void)
378{
379	if (!tegra_clk_apply_init_table)
380		return 0;
381
382	tegra_clk_apply_init_table();
383
384	return 0;
385}
386arch_initcall(tegra_clocks_apply_init_table);
387