1// SPDX-License-Identifier: GPL-2.0
2/*
3 * RZ/G2L Clock Pulse Generator
4 *
5 * Copyright (C) 2021 Renesas Electronics Corp.
6 *
7 * Based on renesas-cpg-mssr.c
8 *
9 * Copyright (C) 2015 Glider bvba
10 * Copyright (C) 2013 Ideas On Board SPRL
11 * Copyright (C) 2015 Renesas Electronics Corp.
12 */
13
14#include <linux/bitfield.h>
15#include <linux/clk.h>
16#include <linux/clk-provider.h>
17#include <linux/clk/renesas.h>
18#include <linux/delay.h>
19#include <linux/device.h>
20#include <linux/init.h>
21#include <linux/iopoll.h>
22#include <linux/mod_devicetable.h>
23#include <linux/module.h>
24#include <linux/of.h>
25#include <linux/platform_device.h>
26#include <linux/pm_clock.h>
27#include <linux/pm_domain.h>
28#include <linux/reset-controller.h>
29#include <linux/slab.h>
30#include <linux/units.h>
31
32#include <dt-bindings/clock/renesas-cpg-mssr.h>
33
34#include "rzg2l-cpg.h"
35
36#ifdef DEBUG
37#define WARN_DEBUG(x)	WARN_ON(x)
38#else
39#define WARN_DEBUG(x)	do { } while (0)
40#endif
41
42#define GET_SHIFT(val)		((val >> 12) & 0xff)
43#define GET_WIDTH(val)		((val >> 8) & 0xf)
44
45#define KDIV(val)		((s16)FIELD_GET(GENMASK(31, 16), val))
46#define MDIV(val)		FIELD_GET(GENMASK(15, 6), val)
47#define PDIV(val)		FIELD_GET(GENMASK(5, 0), val)
48#define SDIV(val)		FIELD_GET(GENMASK(2, 0), val)
49
50#define CLK_ON_R(reg)		(reg)
51#define CLK_MON_R(reg)		(0x180 + (reg))
52#define CLK_RST_R(reg)		(reg)
53#define CLK_MRST_R(reg)		(0x180 + (reg))
54
55#define GET_REG_OFFSET(val)		((val >> 20) & 0xfff)
56#define GET_REG_SAMPLL_CLK1(val)	((val >> 22) & 0xfff)
57#define GET_REG_SAMPLL_CLK2(val)	((val >> 12) & 0xfff)
58
59#define MAX_VCLK_FREQ		(148500000)
60
61struct sd_hw_data {
62	struct clk_hw hw;
63	u32 conf;
64	struct rzg2l_cpg_priv *priv;
65};
66
67#define to_sd_hw_data(_hw)	container_of(_hw, struct sd_hw_data, hw)
68
69struct rzg2l_pll5_param {
70	u32 pl5_fracin;
71	u8 pl5_refdiv;
72	u8 pl5_intin;
73	u8 pl5_postdiv1;
74	u8 pl5_postdiv2;
75	u8 pl5_spread;
76};
77
78struct rzg2l_pll5_mux_dsi_div_param {
79	u8 clksrc;
80	u8 dsi_div_a;
81	u8 dsi_div_b;
82};
83
84/**
85 * struct rzg2l_cpg_priv - Clock Pulse Generator Private Data
86 *
87 * @rcdev: Reset controller entity
88 * @dev: CPG device
89 * @base: CPG register block base address
90 * @rmw_lock: protects register accesses
91 * @clks: Array containing all Core and Module Clocks
92 * @num_core_clks: Number of Core Clocks in clks[]
93 * @num_mod_clks: Number of Module Clocks in clks[]
94 * @num_resets: Number of Module Resets in info->resets[]
95 * @last_dt_core_clk: ID of the last Core Clock exported to DT
96 * @info: Pointer to platform data
97 * @genpd: PM domain
98 * @mux_dsi_div_params: pll5 mux and dsi div parameters
99 */
100struct rzg2l_cpg_priv {
101	struct reset_controller_dev rcdev;
102	struct device *dev;
103	void __iomem *base;
104	spinlock_t rmw_lock;
105
106	struct clk **clks;
107	unsigned int num_core_clks;
108	unsigned int num_mod_clks;
109	unsigned int num_resets;
110	unsigned int last_dt_core_clk;
111
112	const struct rzg2l_cpg_info *info;
113
114	struct generic_pm_domain genpd;
115
116	struct rzg2l_pll5_mux_dsi_div_param mux_dsi_div_params;
117};
118
119static void rzg2l_cpg_del_clk_provider(void *data)
120{
121	of_clk_del_provider(data);
122}
123
124static struct clk * __init
125rzg2l_cpg_div_clk_register(const struct cpg_core_clk *core,
126			   struct clk **clks,
127			   void __iomem *base,
128			   struct rzg2l_cpg_priv *priv)
129{
130	struct device *dev = priv->dev;
131	const struct clk *parent;
132	const char *parent_name;
133	struct clk_hw *clk_hw;
134
135	parent = clks[core->parent & 0xffff];
136	if (IS_ERR(parent))
137		return ERR_CAST(parent);
138
139	parent_name = __clk_get_name(parent);
140
141	if (core->dtable)
142		clk_hw = clk_hw_register_divider_table(dev, core->name,
143						       parent_name, 0,
144						       base + GET_REG_OFFSET(core->conf),
145						       GET_SHIFT(core->conf),
146						       GET_WIDTH(core->conf),
147						       core->flag,
148						       core->dtable,
149						       &priv->rmw_lock);
150	else
151		clk_hw = clk_hw_register_divider(dev, core->name,
152						 parent_name, 0,
153						 base + GET_REG_OFFSET(core->conf),
154						 GET_SHIFT(core->conf),
155						 GET_WIDTH(core->conf),
156						 core->flag, &priv->rmw_lock);
157
158	if (IS_ERR(clk_hw))
159		return ERR_CAST(clk_hw);
160
161	return clk_hw->clk;
162}
163
164static struct clk * __init
165rzg2l_cpg_mux_clk_register(const struct cpg_core_clk *core,
166			   void __iomem *base,
167			   struct rzg2l_cpg_priv *priv)
168{
169	const struct clk_hw *clk_hw;
170
171	clk_hw = devm_clk_hw_register_mux(priv->dev, core->name,
172					  core->parent_names, core->num_parents,
173					  core->flag,
174					  base + GET_REG_OFFSET(core->conf),
175					  GET_SHIFT(core->conf),
176					  GET_WIDTH(core->conf),
177					  core->mux_flags, &priv->rmw_lock);
178	if (IS_ERR(clk_hw))
179		return ERR_CAST(clk_hw);
180
181	return clk_hw->clk;
182}
183
184static int rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw *hw, u8 index)
185{
186	struct sd_hw_data *hwdata = to_sd_hw_data(hw);
187	struct rzg2l_cpg_priv *priv = hwdata->priv;
188	u32 off = GET_REG_OFFSET(hwdata->conf);
189	u32 shift = GET_SHIFT(hwdata->conf);
190	const u32 clk_src_266 = 2;
191	u32 msk, val, bitmask;
192	unsigned long flags;
193	int ret;
194
195	/*
196	 * As per the HW manual, we should not directly switch from 533 MHz to
197	 * 400 MHz and vice versa. To change the setting from 2’b01 (533 MHz)
198	 * to 2’b10 (400 MHz) or vice versa, Switch to 2’b11 (266 MHz) first,
199	 * and then switch to the target setting (2’b01 (533 MHz) or 2’b10
200	 * (400 MHz)).
201	 * Setting a value of '0' to the SEL_SDHI0_SET or SEL_SDHI1_SET clock
202	 * switching register is prohibited.
203	 * The clock mux has 3 input clocks(533 MHz, 400 MHz, and 266 MHz), and
204	 * the index to value mapping is done by adding 1 to the index.
205	 */
206	bitmask = (GENMASK(GET_WIDTH(hwdata->conf) - 1, 0) << shift) << 16;
207	msk = off ? CPG_CLKSTATUS_SELSDHI1_STS : CPG_CLKSTATUS_SELSDHI0_STS;
208	spin_lock_irqsave(&priv->rmw_lock, flags);
209	if (index != clk_src_266) {
210		writel(bitmask | ((clk_src_266 + 1) << shift), priv->base + off);
211
212		ret = readl_poll_timeout_atomic(priv->base + CPG_CLKSTATUS, val,
213						!(val & msk), 10,
214						CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US);
215		if (ret)
216			goto unlock;
217	}
218
219	writel(bitmask | ((index + 1) << shift), priv->base + off);
220
221	ret = readl_poll_timeout_atomic(priv->base + CPG_CLKSTATUS, val,
222					!(val & msk), 10,
223					CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US);
224unlock:
225	spin_unlock_irqrestore(&priv->rmw_lock, flags);
226
227	if (ret)
228		dev_err(priv->dev, "failed to switch clk source\n");
229
230	return ret;
231}
232
233static u8 rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw *hw)
234{
235	struct sd_hw_data *hwdata = to_sd_hw_data(hw);
236	struct rzg2l_cpg_priv *priv = hwdata->priv;
237	u32 val = readl(priv->base + GET_REG_OFFSET(hwdata->conf));
238
239	val >>= GET_SHIFT(hwdata->conf);
240	val &= GENMASK(GET_WIDTH(hwdata->conf) - 1, 0);
241
242	return val ? val - 1 : 0;
243}
244
245static const struct clk_ops rzg2l_cpg_sd_clk_mux_ops = {
246	.determine_rate = __clk_mux_determine_rate_closest,
247	.set_parent	= rzg2l_cpg_sd_clk_mux_set_parent,
248	.get_parent	= rzg2l_cpg_sd_clk_mux_get_parent,
249};
250
251static struct clk * __init
252rzg2l_cpg_sd_mux_clk_register(const struct cpg_core_clk *core,
253			      void __iomem *base,
254			      struct rzg2l_cpg_priv *priv)
255{
256	struct sd_hw_data *clk_hw_data;
257	struct clk_init_data init;
258	struct clk_hw *clk_hw;
259	int ret;
260
261	clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
262	if (!clk_hw_data)
263		return ERR_PTR(-ENOMEM);
264
265	clk_hw_data->priv = priv;
266	clk_hw_data->conf = core->conf;
267
268	init.name = GET_SHIFT(core->conf) ? "sd1" : "sd0";
269	init.ops = &rzg2l_cpg_sd_clk_mux_ops;
270	init.flags = 0;
271	init.num_parents = core->num_parents;
272	init.parent_names = core->parent_names;
273
274	clk_hw = &clk_hw_data->hw;
275	clk_hw->init = &init;
276
277	ret = devm_clk_hw_register(priv->dev, clk_hw);
278	if (ret)
279		return ERR_PTR(ret);
280
281	return clk_hw->clk;
282}
283
284static unsigned long
285rzg2l_cpg_get_foutpostdiv_rate(struct rzg2l_pll5_param *params,
286			       unsigned long rate)
287{
288	unsigned long foutpostdiv_rate;
289
290	params->pl5_intin = rate / MEGA;
291	params->pl5_fracin = div_u64(((u64)rate % MEGA) << 24, MEGA);
292	params->pl5_refdiv = 2;
293	params->pl5_postdiv1 = 1;
294	params->pl5_postdiv2 = 1;
295	params->pl5_spread = 0x16;
296
297	foutpostdiv_rate =
298		EXTAL_FREQ_IN_MEGA_HZ * MEGA / params->pl5_refdiv *
299		((((params->pl5_intin << 24) + params->pl5_fracin)) >> 24) /
300		(params->pl5_postdiv1 * params->pl5_postdiv2);
301
302	return foutpostdiv_rate;
303}
304
305struct dsi_div_hw_data {
306	struct clk_hw hw;
307	u32 conf;
308	unsigned long rate;
309	struct rzg2l_cpg_priv *priv;
310};
311
312#define to_dsi_div_hw_data(_hw)	container_of(_hw, struct dsi_div_hw_data, hw)
313
314static unsigned long rzg2l_cpg_dsi_div_recalc_rate(struct clk_hw *hw,
315						   unsigned long parent_rate)
316{
317	struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
318	unsigned long rate = dsi_div->rate;
319
320	if (!rate)
321		rate = parent_rate;
322
323	return rate;
324}
325
326static unsigned long rzg2l_cpg_get_vclk_parent_rate(struct clk_hw *hw,
327						    unsigned long rate)
328{
329	struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
330	struct rzg2l_cpg_priv *priv = dsi_div->priv;
331	struct rzg2l_pll5_param params;
332	unsigned long parent_rate;
333
334	parent_rate = rzg2l_cpg_get_foutpostdiv_rate(&params, rate);
335
336	if (priv->mux_dsi_div_params.clksrc)
337		parent_rate /= 2;
338
339	return parent_rate;
340}
341
342static int rzg2l_cpg_dsi_div_determine_rate(struct clk_hw *hw,
343					    struct clk_rate_request *req)
344{
345	if (req->rate > MAX_VCLK_FREQ)
346		req->rate = MAX_VCLK_FREQ;
347
348	req->best_parent_rate = rzg2l_cpg_get_vclk_parent_rate(hw, req->rate);
349
350	return 0;
351}
352
353static int rzg2l_cpg_dsi_div_set_rate(struct clk_hw *hw,
354				      unsigned long rate,
355				      unsigned long parent_rate)
356{
357	struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
358	struct rzg2l_cpg_priv *priv = dsi_div->priv;
359
360	/*
361	 * MUX -->DIV_DSI_{A,B} -->M3 -->VCLK
362	 *
363	 * Based on the dot clock, the DSI divider clock sets the divider value,
364	 * calculates the pll parameters for generating FOUTPOSTDIV and the clk
365	 * source for the MUX and propagates that info to the parents.
366	 */
367
368	if (!rate || rate > MAX_VCLK_FREQ)
369		return -EINVAL;
370
371	dsi_div->rate = rate;
372	writel(CPG_PL5_SDIV_DIV_DSI_A_WEN | CPG_PL5_SDIV_DIV_DSI_B_WEN |
373	       (priv->mux_dsi_div_params.dsi_div_a << 0) |
374	       (priv->mux_dsi_div_params.dsi_div_b << 8),
375	       priv->base + CPG_PL5_SDIV);
376
377	return 0;
378}
379
380static const struct clk_ops rzg2l_cpg_dsi_div_ops = {
381	.recalc_rate = rzg2l_cpg_dsi_div_recalc_rate,
382	.determine_rate = rzg2l_cpg_dsi_div_determine_rate,
383	.set_rate = rzg2l_cpg_dsi_div_set_rate,
384};
385
386static struct clk * __init
387rzg2l_cpg_dsi_div_clk_register(const struct cpg_core_clk *core,
388			       struct clk **clks,
389			       struct rzg2l_cpg_priv *priv)
390{
391	struct dsi_div_hw_data *clk_hw_data;
392	const struct clk *parent;
393	const char *parent_name;
394	struct clk_init_data init;
395	struct clk_hw *clk_hw;
396	int ret;
397
398	parent = clks[core->parent & 0xffff];
399	if (IS_ERR(parent))
400		return ERR_CAST(parent);
401
402	clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
403	if (!clk_hw_data)
404		return ERR_PTR(-ENOMEM);
405
406	clk_hw_data->priv = priv;
407
408	parent_name = __clk_get_name(parent);
409	init.name = core->name;
410	init.ops = &rzg2l_cpg_dsi_div_ops;
411	init.flags = CLK_SET_RATE_PARENT;
412	init.parent_names = &parent_name;
413	init.num_parents = 1;
414
415	clk_hw = &clk_hw_data->hw;
416	clk_hw->init = &init;
417
418	ret = devm_clk_hw_register(priv->dev, clk_hw);
419	if (ret)
420		return ERR_PTR(ret);
421
422	return clk_hw->clk;
423}
424
425struct pll5_mux_hw_data {
426	struct clk_hw hw;
427	u32 conf;
428	unsigned long rate;
429	struct rzg2l_cpg_priv *priv;
430};
431
432#define to_pll5_mux_hw_data(_hw)	container_of(_hw, struct pll5_mux_hw_data, hw)
433
434static int rzg2l_cpg_pll5_4_clk_mux_determine_rate(struct clk_hw *hw,
435						   struct clk_rate_request *req)
436{
437	struct clk_hw *parent;
438	struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
439	struct rzg2l_cpg_priv *priv = hwdata->priv;
440
441	parent = clk_hw_get_parent_by_index(hw, priv->mux_dsi_div_params.clksrc);
442	req->best_parent_hw = parent;
443	req->best_parent_rate = req->rate;
444
445	return 0;
446}
447
448static int rzg2l_cpg_pll5_4_clk_mux_set_parent(struct clk_hw *hw, u8 index)
449{
450	struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
451	struct rzg2l_cpg_priv *priv = hwdata->priv;
452
453	/*
454	 * FOUTPOSTDIV--->|
455	 *  |             | -->MUX -->DIV_DSIA_B -->M3 -->VCLK
456	 *  |--FOUT1PH0-->|
457	 *
458	 * Based on the dot clock, the DSI divider clock calculates the parent
459	 * rate and clk source for the MUX. It propagates that info to
460	 * pll5_4_clk_mux which sets the clock source for DSI divider clock.
461	 */
462
463	writel(CPG_OTHERFUNC1_REG_RES0_ON_WEN | index,
464	       priv->base + CPG_OTHERFUNC1_REG);
465
466	return 0;
467}
468
469static u8 rzg2l_cpg_pll5_4_clk_mux_get_parent(struct clk_hw *hw)
470{
471	struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
472	struct rzg2l_cpg_priv *priv = hwdata->priv;
473
474	return readl(priv->base + GET_REG_OFFSET(hwdata->conf));
475}
476
477static const struct clk_ops rzg2l_cpg_pll5_4_clk_mux_ops = {
478	.determine_rate = rzg2l_cpg_pll5_4_clk_mux_determine_rate,
479	.set_parent	= rzg2l_cpg_pll5_4_clk_mux_set_parent,
480	.get_parent	= rzg2l_cpg_pll5_4_clk_mux_get_parent,
481};
482
483static struct clk * __init
484rzg2l_cpg_pll5_4_mux_clk_register(const struct cpg_core_clk *core,
485				  struct rzg2l_cpg_priv *priv)
486{
487	struct pll5_mux_hw_data *clk_hw_data;
488	struct clk_init_data init;
489	struct clk_hw *clk_hw;
490	int ret;
491
492	clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
493	if (!clk_hw_data)
494		return ERR_PTR(-ENOMEM);
495
496	clk_hw_data->priv = priv;
497	clk_hw_data->conf = core->conf;
498
499	init.name = core->name;
500	init.ops = &rzg2l_cpg_pll5_4_clk_mux_ops;
501	init.flags = CLK_SET_RATE_PARENT;
502	init.num_parents = core->num_parents;
503	init.parent_names = core->parent_names;
504
505	clk_hw = &clk_hw_data->hw;
506	clk_hw->init = &init;
507
508	ret = devm_clk_hw_register(priv->dev, clk_hw);
509	if (ret)
510		return ERR_PTR(ret);
511
512	return clk_hw->clk;
513}
514
515struct sipll5 {
516	struct clk_hw hw;
517	u32 conf;
518	unsigned long foutpostdiv_rate;
519	struct rzg2l_cpg_priv *priv;
520};
521
522#define to_sipll5(_hw)	container_of(_hw, struct sipll5, hw)
523
524static unsigned long rzg2l_cpg_get_vclk_rate(struct clk_hw *hw,
525					     unsigned long rate)
526{
527	struct sipll5 *sipll5 = to_sipll5(hw);
528	struct rzg2l_cpg_priv *priv = sipll5->priv;
529	unsigned long vclk;
530
531	vclk = rate / ((1 << priv->mux_dsi_div_params.dsi_div_a) *
532		       (priv->mux_dsi_div_params.dsi_div_b + 1));
533
534	if (priv->mux_dsi_div_params.clksrc)
535		vclk /= 2;
536
537	return vclk;
538}
539
540static unsigned long rzg2l_cpg_sipll5_recalc_rate(struct clk_hw *hw,
541						  unsigned long parent_rate)
542{
543	struct sipll5 *sipll5 = to_sipll5(hw);
544	unsigned long pll5_rate = sipll5->foutpostdiv_rate;
545
546	if (!pll5_rate)
547		pll5_rate = parent_rate;
548
549	return pll5_rate;
550}
551
552static long rzg2l_cpg_sipll5_round_rate(struct clk_hw *hw,
553					unsigned long rate,
554					unsigned long *parent_rate)
555{
556	return rate;
557}
558
559static int rzg2l_cpg_sipll5_set_rate(struct clk_hw *hw,
560				     unsigned long rate,
561				     unsigned long parent_rate)
562{
563	struct sipll5 *sipll5 = to_sipll5(hw);
564	struct rzg2l_cpg_priv *priv = sipll5->priv;
565	struct rzg2l_pll5_param params;
566	unsigned long vclk_rate;
567	int ret;
568	u32 val;
569
570	/*
571	 *  OSC --> PLL5 --> FOUTPOSTDIV-->|
572	 *                   |             | -->MUX -->DIV_DSIA_B -->M3 -->VCLK
573	 *                   |--FOUT1PH0-->|
574	 *
575	 * Based on the dot clock, the DSI divider clock calculates the parent
576	 * rate and the pll5 parameters for generating FOUTPOSTDIV. It propagates
577	 * that info to sipll5 which sets parameters for generating FOUTPOSTDIV.
578	 *
579	 * OSC --> PLL5 --> FOUTPOSTDIV
580	 */
581
582	if (!rate)
583		return -EINVAL;
584
585	vclk_rate = rzg2l_cpg_get_vclk_rate(hw, rate);
586	sipll5->foutpostdiv_rate =
587		rzg2l_cpg_get_foutpostdiv_rate(&params, vclk_rate);
588
589	/* Put PLL5 into standby mode */
590	writel(CPG_SIPLL5_STBY_RESETB_WEN, priv->base + CPG_SIPLL5_STBY);
591	ret = readl_poll_timeout(priv->base + CPG_SIPLL5_MON, val,
592				 !(val & CPG_SIPLL5_MON_PLL5_LOCK), 100, 250000);
593	if (ret) {
594		dev_err(priv->dev, "failed to release pll5 lock");
595		return ret;
596	}
597
598	/* Output clock setting 1 */
599	writel((params.pl5_postdiv1 << 0) | (params.pl5_postdiv2 << 4) |
600	       (params.pl5_refdiv << 8), priv->base + CPG_SIPLL5_CLK1);
601
602	/* Output clock setting, SSCG modulation value setting 3 */
603	writel((params.pl5_fracin << 8), priv->base + CPG_SIPLL5_CLK3);
604
605	/* Output clock setting 4 */
606	writel(CPG_SIPLL5_CLK4_RESV_LSB | (params.pl5_intin << 16),
607	       priv->base + CPG_SIPLL5_CLK4);
608
609	/* Output clock setting 5 */
610	writel(params.pl5_spread, priv->base + CPG_SIPLL5_CLK5);
611
612	/* PLL normal mode setting */
613	writel(CPG_SIPLL5_STBY_DOWNSPREAD_WEN | CPG_SIPLL5_STBY_SSCG_EN_WEN |
614	       CPG_SIPLL5_STBY_RESETB_WEN | CPG_SIPLL5_STBY_RESETB,
615	       priv->base + CPG_SIPLL5_STBY);
616
617	/* PLL normal mode transition, output clock stability check */
618	ret = readl_poll_timeout(priv->base + CPG_SIPLL5_MON, val,
619				 (val & CPG_SIPLL5_MON_PLL5_LOCK), 100, 250000);
620	if (ret) {
621		dev_err(priv->dev, "failed to lock pll5");
622		return ret;
623	}
624
625	return 0;
626}
627
628static const struct clk_ops rzg2l_cpg_sipll5_ops = {
629	.recalc_rate = rzg2l_cpg_sipll5_recalc_rate,
630	.round_rate = rzg2l_cpg_sipll5_round_rate,
631	.set_rate = rzg2l_cpg_sipll5_set_rate,
632};
633
634static struct clk * __init
635rzg2l_cpg_sipll5_register(const struct cpg_core_clk *core,
636			  struct clk **clks,
637			  struct rzg2l_cpg_priv *priv)
638{
639	const struct clk *parent;
640	struct clk_init_data init;
641	const char *parent_name;
642	struct sipll5 *sipll5;
643	struct clk_hw *clk_hw;
644	int ret;
645
646	parent = clks[core->parent & 0xffff];
647	if (IS_ERR(parent))
648		return ERR_CAST(parent);
649
650	sipll5 = devm_kzalloc(priv->dev, sizeof(*sipll5), GFP_KERNEL);
651	if (!sipll5)
652		return ERR_PTR(-ENOMEM);
653
654	init.name = core->name;
655	parent_name = __clk_get_name(parent);
656	init.ops = &rzg2l_cpg_sipll5_ops;
657	init.flags = 0;
658	init.parent_names = &parent_name;
659	init.num_parents = 1;
660
661	sipll5->hw.init = &init;
662	sipll5->conf = core->conf;
663	sipll5->priv = priv;
664
665	writel(CPG_SIPLL5_STBY_SSCG_EN_WEN | CPG_SIPLL5_STBY_RESETB_WEN |
666	       CPG_SIPLL5_STBY_RESETB, priv->base + CPG_SIPLL5_STBY);
667
668	clk_hw = &sipll5->hw;
669	clk_hw->init = &init;
670
671	ret = devm_clk_hw_register(priv->dev, clk_hw);
672	if (ret)
673		return ERR_PTR(ret);
674
675	priv->mux_dsi_div_params.clksrc = 1; /* Use clk src 1 for DSI */
676	priv->mux_dsi_div_params.dsi_div_a = 1; /* Divided by 2 */
677	priv->mux_dsi_div_params.dsi_div_b = 2; /* Divided by 3 */
678
679	return clk_hw->clk;
680}
681
682struct pll_clk {
683	struct clk_hw hw;
684	unsigned int conf;
685	unsigned int type;
686	void __iomem *base;
687	struct rzg2l_cpg_priv *priv;
688};
689
690#define to_pll(_hw)	container_of(_hw, struct pll_clk, hw)
691
692static unsigned long rzg2l_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
693						   unsigned long parent_rate)
694{
695	struct pll_clk *pll_clk = to_pll(hw);
696	struct rzg2l_cpg_priv *priv = pll_clk->priv;
697	unsigned int val1, val2;
698	u64 rate;
699
700	if (pll_clk->type != CLK_TYPE_SAM_PLL)
701		return parent_rate;
702
703	val1 = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf));
704	val2 = readl(priv->base + GET_REG_SAMPLL_CLK2(pll_clk->conf));
705
706	rate = mul_u64_u32_shr(parent_rate, (MDIV(val1) << 16) + KDIV(val1),
707			       16 + SDIV(val2));
708
709	return DIV_ROUND_CLOSEST_ULL(rate, PDIV(val1));
710}
711
712static const struct clk_ops rzg2l_cpg_pll_ops = {
713	.recalc_rate = rzg2l_cpg_pll_clk_recalc_rate,
714};
715
716static struct clk * __init
717rzg2l_cpg_pll_clk_register(const struct cpg_core_clk *core,
718			   struct clk **clks,
719			   void __iomem *base,
720			   struct rzg2l_cpg_priv *priv)
721{
722	struct device *dev = priv->dev;
723	const struct clk *parent;
724	struct clk_init_data init;
725	const char *parent_name;
726	struct pll_clk *pll_clk;
727
728	parent = clks[core->parent & 0xffff];
729	if (IS_ERR(parent))
730		return ERR_CAST(parent);
731
732	pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL);
733	if (!pll_clk)
734		return ERR_PTR(-ENOMEM);
735
736	parent_name = __clk_get_name(parent);
737	init.name = core->name;
738	init.ops = &rzg2l_cpg_pll_ops;
739	init.flags = 0;
740	init.parent_names = &parent_name;
741	init.num_parents = 1;
742
743	pll_clk->hw.init = &init;
744	pll_clk->conf = core->conf;
745	pll_clk->base = base;
746	pll_clk->priv = priv;
747	pll_clk->type = core->type;
748
749	return clk_register(NULL, &pll_clk->hw);
750}
751
752static struct clk
753*rzg2l_cpg_clk_src_twocell_get(struct of_phandle_args *clkspec,
754			       void *data)
755{
756	unsigned int clkidx = clkspec->args[1];
757	struct rzg2l_cpg_priv *priv = data;
758	struct device *dev = priv->dev;
759	const char *type;
760	struct clk *clk;
761
762	switch (clkspec->args[0]) {
763	case CPG_CORE:
764		type = "core";
765		if (clkidx > priv->last_dt_core_clk) {
766			dev_err(dev, "Invalid %s clock index %u\n", type, clkidx);
767			return ERR_PTR(-EINVAL);
768		}
769		clk = priv->clks[clkidx];
770		break;
771
772	case CPG_MOD:
773		type = "module";
774		if (clkidx >= priv->num_mod_clks) {
775			dev_err(dev, "Invalid %s clock index %u\n", type,
776				clkidx);
777			return ERR_PTR(-EINVAL);
778		}
779		clk = priv->clks[priv->num_core_clks + clkidx];
780		break;
781
782	default:
783		dev_err(dev, "Invalid CPG clock type %u\n", clkspec->args[0]);
784		return ERR_PTR(-EINVAL);
785	}
786
787	if (IS_ERR(clk))
788		dev_err(dev, "Cannot get %s clock %u: %ld", type, clkidx,
789			PTR_ERR(clk));
790	else
791		dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n",
792			clkspec->args[0], clkspec->args[1], clk,
793			clk_get_rate(clk));
794	return clk;
795}
796
797static void __init
798rzg2l_cpg_register_core_clk(const struct cpg_core_clk *core,
799			    const struct rzg2l_cpg_info *info,
800			    struct rzg2l_cpg_priv *priv)
801{
802	struct clk *clk = ERR_PTR(-EOPNOTSUPP), *parent;
803	struct device *dev = priv->dev;
804	unsigned int id = core->id, div = core->div;
805	const char *parent_name;
806
807	WARN_DEBUG(id >= priv->num_core_clks);
808	WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
809
810	if (!core->name) {
811		/* Skip NULLified clock */
812		return;
813	}
814
815	switch (core->type) {
816	case CLK_TYPE_IN:
817		clk = of_clk_get_by_name(priv->dev->of_node, core->name);
818		break;
819	case CLK_TYPE_FF:
820		WARN_DEBUG(core->parent >= priv->num_core_clks);
821		parent = priv->clks[core->parent];
822		if (IS_ERR(parent)) {
823			clk = parent;
824			goto fail;
825		}
826
827		parent_name = __clk_get_name(parent);
828		clk = clk_register_fixed_factor(NULL, core->name,
829						parent_name, CLK_SET_RATE_PARENT,
830						core->mult, div);
831		break;
832	case CLK_TYPE_SAM_PLL:
833		clk = rzg2l_cpg_pll_clk_register(core, priv->clks,
834						 priv->base, priv);
835		break;
836	case CLK_TYPE_SIPLL5:
837		clk = rzg2l_cpg_sipll5_register(core, priv->clks, priv);
838		break;
839	case CLK_TYPE_DIV:
840		clk = rzg2l_cpg_div_clk_register(core, priv->clks,
841						 priv->base, priv);
842		break;
843	case CLK_TYPE_MUX:
844		clk = rzg2l_cpg_mux_clk_register(core, priv->base, priv);
845		break;
846	case CLK_TYPE_SD_MUX:
847		clk = rzg2l_cpg_sd_mux_clk_register(core, priv->base, priv);
848		break;
849	case CLK_TYPE_PLL5_4_MUX:
850		clk = rzg2l_cpg_pll5_4_mux_clk_register(core, priv);
851		break;
852	case CLK_TYPE_DSI_DIV:
853		clk = rzg2l_cpg_dsi_div_clk_register(core, priv->clks, priv);
854		break;
855	default:
856		goto fail;
857	}
858
859	if (IS_ERR_OR_NULL(clk))
860		goto fail;
861
862	dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
863	priv->clks[id] = clk;
864	return;
865
866fail:
867	dev_err(dev, "Failed to register %s clock %s: %ld\n", "core",
868		core->name, PTR_ERR(clk));
869}
870
871/**
872 * struct mstp_clock - MSTP gating clock
873 *
874 * @hw: handle between common and hardware-specific interfaces
875 * @off: register offset
876 * @bit: ON/MON bit
877 * @enabled: soft state of the clock, if it is coupled with another clock
878 * @priv: CPG/MSTP private data
879 * @sibling: pointer to the other coupled clock
880 */
881struct mstp_clock {
882	struct clk_hw hw;
883	u16 off;
884	u8 bit;
885	bool enabled;
886	struct rzg2l_cpg_priv *priv;
887	struct mstp_clock *sibling;
888};
889
890#define to_mod_clock(_hw) container_of(_hw, struct mstp_clock, hw)
891
892static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable)
893{
894	struct mstp_clock *clock = to_mod_clock(hw);
895	struct rzg2l_cpg_priv *priv = clock->priv;
896	unsigned int reg = clock->off;
897	struct device *dev = priv->dev;
898	unsigned long flags;
899	u32 bitmask = BIT(clock->bit);
900	u32 value;
901	int error;
902
903	if (!clock->off) {
904		dev_dbg(dev, "%pC does not support ON/OFF\n",  hw->clk);
905		return 0;
906	}
907
908	dev_dbg(dev, "CLK_ON %u/%pC %s\n", CLK_ON_R(reg), hw->clk,
909		enable ? "ON" : "OFF");
910	spin_lock_irqsave(&priv->rmw_lock, flags);
911
912	if (enable)
913		value = (bitmask << 16) | bitmask;
914	else
915		value = bitmask << 16;
916	writel(value, priv->base + CLK_ON_R(reg));
917
918	spin_unlock_irqrestore(&priv->rmw_lock, flags);
919
920	if (!enable)
921		return 0;
922
923	if (!priv->info->has_clk_mon_regs)
924		return 0;
925
926	error = readl_poll_timeout_atomic(priv->base + CLK_MON_R(reg), value,
927					  value & bitmask, 0, 10);
928	if (error)
929		dev_err(dev, "Failed to enable CLK_ON %p\n",
930			priv->base + CLK_ON_R(reg));
931
932	return error;
933}
934
935static int rzg2l_mod_clock_enable(struct clk_hw *hw)
936{
937	struct mstp_clock *clock = to_mod_clock(hw);
938
939	if (clock->sibling) {
940		struct rzg2l_cpg_priv *priv = clock->priv;
941		unsigned long flags;
942		bool enabled;
943
944		spin_lock_irqsave(&priv->rmw_lock, flags);
945		enabled = clock->sibling->enabled;
946		clock->enabled = true;
947		spin_unlock_irqrestore(&priv->rmw_lock, flags);
948		if (enabled)
949			return 0;
950	}
951
952	return rzg2l_mod_clock_endisable(hw, true);
953}
954
955static void rzg2l_mod_clock_disable(struct clk_hw *hw)
956{
957	struct mstp_clock *clock = to_mod_clock(hw);
958
959	if (clock->sibling) {
960		struct rzg2l_cpg_priv *priv = clock->priv;
961		unsigned long flags;
962		bool enabled;
963
964		spin_lock_irqsave(&priv->rmw_lock, flags);
965		enabled = clock->sibling->enabled;
966		clock->enabled = false;
967		spin_unlock_irqrestore(&priv->rmw_lock, flags);
968		if (enabled)
969			return;
970	}
971
972	rzg2l_mod_clock_endisable(hw, false);
973}
974
975static int rzg2l_mod_clock_is_enabled(struct clk_hw *hw)
976{
977	struct mstp_clock *clock = to_mod_clock(hw);
978	struct rzg2l_cpg_priv *priv = clock->priv;
979	u32 bitmask = BIT(clock->bit);
980	u32 value;
981
982	if (!clock->off) {
983		dev_dbg(priv->dev, "%pC does not support ON/OFF\n",  hw->clk);
984		return 1;
985	}
986
987	if (clock->sibling)
988		return clock->enabled;
989
990	if (priv->info->has_clk_mon_regs)
991		value = readl(priv->base + CLK_MON_R(clock->off));
992	else
993		value = readl(priv->base + clock->off);
994
995	return value & bitmask;
996}
997
998static const struct clk_ops rzg2l_mod_clock_ops = {
999	.enable = rzg2l_mod_clock_enable,
1000	.disable = rzg2l_mod_clock_disable,
1001	.is_enabled = rzg2l_mod_clock_is_enabled,
1002};
1003
1004static struct mstp_clock
1005*rzg2l_mod_clock_get_sibling(struct mstp_clock *clock,
1006			     struct rzg2l_cpg_priv *priv)
1007{
1008	struct clk_hw *hw;
1009	unsigned int i;
1010
1011	for (i = 0; i < priv->num_mod_clks; i++) {
1012		struct mstp_clock *clk;
1013
1014		if (priv->clks[priv->num_core_clks + i] == ERR_PTR(-ENOENT))
1015			continue;
1016
1017		hw = __clk_get_hw(priv->clks[priv->num_core_clks + i]);
1018		clk = to_mod_clock(hw);
1019		if (clock->off == clk->off && clock->bit == clk->bit)
1020			return clk;
1021	}
1022
1023	return NULL;
1024}
1025
1026static void __init
1027rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod,
1028			   const struct rzg2l_cpg_info *info,
1029			   struct rzg2l_cpg_priv *priv)
1030{
1031	struct mstp_clock *clock = NULL;
1032	struct device *dev = priv->dev;
1033	unsigned int id = mod->id;
1034	struct clk_init_data init;
1035	struct clk *parent, *clk;
1036	const char *parent_name;
1037	unsigned int i;
1038
1039	WARN_DEBUG(id < priv->num_core_clks);
1040	WARN_DEBUG(id >= priv->num_core_clks + priv->num_mod_clks);
1041	WARN_DEBUG(mod->parent >= priv->num_core_clks + priv->num_mod_clks);
1042	WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
1043
1044	if (!mod->name) {
1045		/* Skip NULLified clock */
1046		return;
1047	}
1048
1049	parent = priv->clks[mod->parent];
1050	if (IS_ERR(parent)) {
1051		clk = parent;
1052		goto fail;
1053	}
1054
1055	clock = devm_kzalloc(dev, sizeof(*clock), GFP_KERNEL);
1056	if (!clock) {
1057		clk = ERR_PTR(-ENOMEM);
1058		goto fail;
1059	}
1060
1061	init.name = mod->name;
1062	init.ops = &rzg2l_mod_clock_ops;
1063	init.flags = CLK_SET_RATE_PARENT;
1064	for (i = 0; i < info->num_crit_mod_clks; i++)
1065		if (id == info->crit_mod_clks[i]) {
1066			dev_dbg(dev, "CPG %s setting CLK_IS_CRITICAL\n",
1067				mod->name);
1068			init.flags |= CLK_IS_CRITICAL;
1069			break;
1070		}
1071
1072	parent_name = __clk_get_name(parent);
1073	init.parent_names = &parent_name;
1074	init.num_parents = 1;
1075
1076	clock->off = mod->off;
1077	clock->bit = mod->bit;
1078	clock->priv = priv;
1079	clock->hw.init = &init;
1080
1081	clk = clk_register(NULL, &clock->hw);
1082	if (IS_ERR(clk))
1083		goto fail;
1084
1085	dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
1086	priv->clks[id] = clk;
1087
1088	if (mod->is_coupled) {
1089		struct mstp_clock *sibling;
1090
1091		clock->enabled = rzg2l_mod_clock_is_enabled(&clock->hw);
1092		sibling = rzg2l_mod_clock_get_sibling(clock, priv);
1093		if (sibling) {
1094			clock->sibling = sibling;
1095			sibling->sibling = clock;
1096		}
1097	}
1098
1099	return;
1100
1101fail:
1102	dev_err(dev, "Failed to register %s clock %s: %ld\n", "module",
1103		mod->name, PTR_ERR(clk));
1104}
1105
1106#define rcdev_to_priv(x)	container_of(x, struct rzg2l_cpg_priv, rcdev)
1107
1108static int rzg2l_cpg_assert(struct reset_controller_dev *rcdev,
1109			    unsigned long id)
1110{
1111	struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1112	const struct rzg2l_cpg_info *info = priv->info;
1113	unsigned int reg = info->resets[id].off;
1114	u32 mask = BIT(info->resets[id].bit);
1115	s8 monbit = info->resets[id].monbit;
1116	u32 value = mask << 16;
1117
1118	dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, CLK_RST_R(reg));
1119
1120	writel(value, priv->base + CLK_RST_R(reg));
1121
1122	if (info->has_clk_mon_regs) {
1123		reg = CLK_MRST_R(reg);
1124	} else if (monbit >= 0) {
1125		reg = CPG_RST_MON;
1126		mask = BIT(monbit);
1127	} else {
1128		/* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
1129		udelay(35);
1130		return 0;
1131	}
1132
1133	return readl_poll_timeout_atomic(priv->base + reg, value,
1134					 value & mask, 10, 200);
1135}
1136
1137static int rzg2l_cpg_deassert(struct reset_controller_dev *rcdev,
1138			      unsigned long id)
1139{
1140	struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1141	const struct rzg2l_cpg_info *info = priv->info;
1142	unsigned int reg = info->resets[id].off;
1143	u32 mask = BIT(info->resets[id].bit);
1144	s8 monbit = info->resets[id].monbit;
1145	u32 value = (mask << 16) | mask;
1146
1147	dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id,
1148		CLK_RST_R(reg));
1149
1150	writel(value, priv->base + CLK_RST_R(reg));
1151
1152	if (info->has_clk_mon_regs) {
1153		reg = CLK_MRST_R(reg);
1154	} else if (monbit >= 0) {
1155		reg = CPG_RST_MON;
1156		mask = BIT(monbit);
1157	} else {
1158		/* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
1159		udelay(35);
1160		return 0;
1161	}
1162
1163	return readl_poll_timeout_atomic(priv->base + reg, value,
1164					 !(value & mask), 10, 200);
1165}
1166
1167static int rzg2l_cpg_reset(struct reset_controller_dev *rcdev,
1168			   unsigned long id)
1169{
1170	int ret;
1171
1172	ret = rzg2l_cpg_assert(rcdev, id);
1173	if (ret)
1174		return ret;
1175
1176	return rzg2l_cpg_deassert(rcdev, id);
1177}
1178
1179static int rzg2l_cpg_status(struct reset_controller_dev *rcdev,
1180			    unsigned long id)
1181{
1182	struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1183	const struct rzg2l_cpg_info *info = priv->info;
1184	s8 monbit = info->resets[id].monbit;
1185	unsigned int reg;
1186	u32 bitmask;
1187
1188	if (info->has_clk_mon_regs) {
1189		reg = CLK_MRST_R(info->resets[id].off);
1190		bitmask = BIT(info->resets[id].bit);
1191	} else if (monbit >= 0) {
1192		reg = CPG_RST_MON;
1193		bitmask = BIT(monbit);
1194	} else {
1195		return -ENOTSUPP;
1196	}
1197
1198	return !!(readl(priv->base + reg) & bitmask);
1199}
1200
1201static const struct reset_control_ops rzg2l_cpg_reset_ops = {
1202	.reset = rzg2l_cpg_reset,
1203	.assert = rzg2l_cpg_assert,
1204	.deassert = rzg2l_cpg_deassert,
1205	.status = rzg2l_cpg_status,
1206};
1207
1208static int rzg2l_cpg_reset_xlate(struct reset_controller_dev *rcdev,
1209				 const struct of_phandle_args *reset_spec)
1210{
1211	struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1212	const struct rzg2l_cpg_info *info = priv->info;
1213	unsigned int id = reset_spec->args[0];
1214
1215	if (id >= rcdev->nr_resets || !info->resets[id].off) {
1216		dev_err(rcdev->dev, "Invalid reset index %u\n", id);
1217		return -EINVAL;
1218	}
1219
1220	return id;
1221}
1222
1223static int rzg2l_cpg_reset_controller_register(struct rzg2l_cpg_priv *priv)
1224{
1225	priv->rcdev.ops = &rzg2l_cpg_reset_ops;
1226	priv->rcdev.of_node = priv->dev->of_node;
1227	priv->rcdev.dev = priv->dev;
1228	priv->rcdev.of_reset_n_cells = 1;
1229	priv->rcdev.of_xlate = rzg2l_cpg_reset_xlate;
1230	priv->rcdev.nr_resets = priv->num_resets;
1231
1232	return devm_reset_controller_register(priv->dev, &priv->rcdev);
1233}
1234
1235static bool rzg2l_cpg_is_pm_clk(struct rzg2l_cpg_priv *priv,
1236				const struct of_phandle_args *clkspec)
1237{
1238	const struct rzg2l_cpg_info *info = priv->info;
1239	unsigned int id;
1240	unsigned int i;
1241
1242	if (clkspec->args_count != 2)
1243		return false;
1244
1245	if (clkspec->args[0] != CPG_MOD)
1246		return false;
1247
1248	id = clkspec->args[1] + info->num_total_core_clks;
1249	for (i = 0; i < info->num_no_pm_mod_clks; i++) {
1250		if (info->no_pm_mod_clks[i] == id)
1251			return false;
1252	}
1253
1254	return true;
1255}
1256
1257static int rzg2l_cpg_attach_dev(struct generic_pm_domain *domain, struct device *dev)
1258{
1259	struct rzg2l_cpg_priv *priv = container_of(domain, struct rzg2l_cpg_priv, genpd);
1260	struct device_node *np = dev->of_node;
1261	struct of_phandle_args clkspec;
1262	bool once = true;
1263	struct clk *clk;
1264	int error;
1265	int i = 0;
1266
1267	while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i,
1268					   &clkspec)) {
1269		if (rzg2l_cpg_is_pm_clk(priv, &clkspec)) {
1270			if (once) {
1271				once = false;
1272				error = pm_clk_create(dev);
1273				if (error) {
1274					of_node_put(clkspec.np);
1275					goto err;
1276				}
1277			}
1278			clk = of_clk_get_from_provider(&clkspec);
1279			of_node_put(clkspec.np);
1280			if (IS_ERR(clk)) {
1281				error = PTR_ERR(clk);
1282				goto fail_destroy;
1283			}
1284
1285			error = pm_clk_add_clk(dev, clk);
1286			if (error) {
1287				dev_err(dev, "pm_clk_add_clk failed %d\n",
1288					error);
1289				goto fail_put;
1290			}
1291		} else {
1292			of_node_put(clkspec.np);
1293		}
1294		i++;
1295	}
1296
1297	return 0;
1298
1299fail_put:
1300	clk_put(clk);
1301
1302fail_destroy:
1303	pm_clk_destroy(dev);
1304err:
1305	return error;
1306}
1307
1308static void rzg2l_cpg_detach_dev(struct generic_pm_domain *unused, struct device *dev)
1309{
1310	if (!pm_clk_no_clocks(dev))
1311		pm_clk_destroy(dev);
1312}
1313
1314static void rzg2l_cpg_genpd_remove(void *data)
1315{
1316	pm_genpd_remove(data);
1317}
1318
1319static int __init rzg2l_cpg_add_clk_domain(struct rzg2l_cpg_priv *priv)
1320{
1321	struct device *dev = priv->dev;
1322	struct device_node *np = dev->of_node;
1323	struct generic_pm_domain *genpd = &priv->genpd;
1324	int ret;
1325
1326	genpd->name = np->name;
1327	genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON |
1328		       GENPD_FLAG_ACTIVE_WAKEUP;
1329	genpd->attach_dev = rzg2l_cpg_attach_dev;
1330	genpd->detach_dev = rzg2l_cpg_detach_dev;
1331	ret = pm_genpd_init(genpd, &pm_domain_always_on_gov, false);
1332	if (ret)
1333		return ret;
1334
1335	ret = devm_add_action_or_reset(dev, rzg2l_cpg_genpd_remove, genpd);
1336	if (ret)
1337		return ret;
1338
1339	return of_genpd_add_provider_simple(np, genpd);
1340}
1341
1342static int __init rzg2l_cpg_probe(struct platform_device *pdev)
1343{
1344	struct device *dev = &pdev->dev;
1345	struct device_node *np = dev->of_node;
1346	const struct rzg2l_cpg_info *info;
1347	struct rzg2l_cpg_priv *priv;
1348	unsigned int nclks, i;
1349	struct clk **clks;
1350	int error;
1351
1352	info = of_device_get_match_data(dev);
1353
1354	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1355	if (!priv)
1356		return -ENOMEM;
1357
1358	priv->dev = dev;
1359	priv->info = info;
1360	spin_lock_init(&priv->rmw_lock);
1361
1362	priv->base = devm_platform_ioremap_resource(pdev, 0);
1363	if (IS_ERR(priv->base))
1364		return PTR_ERR(priv->base);
1365
1366	nclks = info->num_total_core_clks + info->num_hw_mod_clks;
1367	clks = devm_kmalloc_array(dev, nclks, sizeof(*clks), GFP_KERNEL);
1368	if (!clks)
1369		return -ENOMEM;
1370
1371	dev_set_drvdata(dev, priv);
1372	priv->clks = clks;
1373	priv->num_core_clks = info->num_total_core_clks;
1374	priv->num_mod_clks = info->num_hw_mod_clks;
1375	priv->num_resets = info->num_resets;
1376	priv->last_dt_core_clk = info->last_dt_core_clk;
1377
1378	for (i = 0; i < nclks; i++)
1379		clks[i] = ERR_PTR(-ENOENT);
1380
1381	for (i = 0; i < info->num_core_clks; i++)
1382		rzg2l_cpg_register_core_clk(&info->core_clks[i], info, priv);
1383
1384	for (i = 0; i < info->num_mod_clks; i++)
1385		rzg2l_cpg_register_mod_clk(&info->mod_clks[i], info, priv);
1386
1387	error = of_clk_add_provider(np, rzg2l_cpg_clk_src_twocell_get, priv);
1388	if (error)
1389		return error;
1390
1391	error = devm_add_action_or_reset(dev, rzg2l_cpg_del_clk_provider, np);
1392	if (error)
1393		return error;
1394
1395	error = rzg2l_cpg_add_clk_domain(priv);
1396	if (error)
1397		return error;
1398
1399	error = rzg2l_cpg_reset_controller_register(priv);
1400	if (error)
1401		return error;
1402
1403	return 0;
1404}
1405
1406static const struct of_device_id rzg2l_cpg_match[] = {
1407#ifdef CONFIG_CLK_R9A07G043
1408	{
1409		.compatible = "renesas,r9a07g043-cpg",
1410		.data = &r9a07g043_cpg_info,
1411	},
1412#endif
1413#ifdef CONFIG_CLK_R9A07G044
1414	{
1415		.compatible = "renesas,r9a07g044-cpg",
1416		.data = &r9a07g044_cpg_info,
1417	},
1418#endif
1419#ifdef CONFIG_CLK_R9A07G054
1420	{
1421		.compatible = "renesas,r9a07g054-cpg",
1422		.data = &r9a07g054_cpg_info,
1423	},
1424#endif
1425#ifdef CONFIG_CLK_R9A09G011
1426	{
1427		.compatible = "renesas,r9a09g011-cpg",
1428		.data = &r9a09g011_cpg_info,
1429	},
1430#endif
1431	{ /* sentinel */ }
1432};
1433
1434static struct platform_driver rzg2l_cpg_driver = {
1435	.driver		= {
1436		.name	= "rzg2l-cpg",
1437		.of_match_table = rzg2l_cpg_match,
1438	},
1439};
1440
1441static int __init rzg2l_cpg_init(void)
1442{
1443	return platform_driver_probe(&rzg2l_cpg_driver, rzg2l_cpg_probe);
1444}
1445
1446subsys_initcall(rzg2l_cpg_init);
1447
1448MODULE_DESCRIPTION("Renesas RZ/G2L CPG Driver");
1449