1// SPDX-License-Identifier: GPL-2.0
2/*
3 * R-Car Gen3 Clock Pulse Generator
4 *
5 * Copyright (C) 2015-2018 Glider bvba
6 * Copyright (C) 2019 Renesas Electronics Corp.
7 *
8 * Based on clk-rcar-gen3.c
9 *
10 * Copyright (C) 2015 Renesas Electronics Corp.
11 */
12
13#include <linux/bug.h>
14#include <linux/bitfield.h>
15#include <linux/clk.h>
16#include <linux/clk-provider.h>
17#include <linux/device.h>
18#include <linux/err.h>
19#include <linux/init.h>
20#include <linux/io.h>
21#include <linux/pm.h>
22#include <linux/slab.h>
23#include <linux/sys_soc.h>
24
25#include "renesas-cpg-mssr.h"
26#include "rcar-gen3-cpg.h"
27
28#define CPG_PLL0CR		0x00d8
29#define CPG_PLL2CR		0x002c
30#define CPG_PLL4CR		0x01f4
31
32#define CPG_RCKCR_CKSEL	BIT(15)	/* RCLK Clock Source Select */
33
34static spinlock_t cpg_lock;
35
36static void cpg_reg_modify(void __iomem *reg, u32 clear, u32 set)
37{
38	unsigned long flags;
39	u32 val;
40
41	spin_lock_irqsave(&cpg_lock, flags);
42	val = readl(reg);
43	val &= ~clear;
44	val |= set;
45	writel(val, reg);
46	spin_unlock_irqrestore(&cpg_lock, flags);
47};
48
49struct cpg_simple_notifier {
50	struct notifier_block nb;
51	void __iomem *reg;
52	u32 saved;
53};
54
55static int cpg_simple_notifier_call(struct notifier_block *nb,
56				    unsigned long action, void *data)
57{
58	struct cpg_simple_notifier *csn =
59		container_of(nb, struct cpg_simple_notifier, nb);
60
61	switch (action) {
62	case PM_EVENT_SUSPEND:
63		csn->saved = readl(csn->reg);
64		return NOTIFY_OK;
65
66	case PM_EVENT_RESUME:
67		writel(csn->saved, csn->reg);
68		return NOTIFY_OK;
69	}
70	return NOTIFY_DONE;
71}
72
73static void cpg_simple_notifier_register(struct raw_notifier_head *notifiers,
74					 struct cpg_simple_notifier *csn)
75{
76	csn->nb.notifier_call = cpg_simple_notifier_call;
77	raw_notifier_chain_register(notifiers, &csn->nb);
78}
79
80/*
81 * Z Clock & Z2 Clock
82 *
83 * Traits of this clock:
84 * prepare - clk_prepare only ensures that parents are prepared
85 * enable - clk_enable only ensures that parents are enabled
86 * rate - rate is adjustable.  clk->rate = (parent->rate * mult / 32 ) / 2
87 * parent - fixed parent.  No clk_set_parent support
88 */
89#define CPG_FRQCRB			0x00000004
90#define CPG_FRQCRB_KICK			BIT(31)
91#define CPG_FRQCRC			0x000000e0
92
93struct cpg_z_clk {
94	struct clk_hw hw;
95	void __iomem *reg;
96	void __iomem *kick_reg;
97	unsigned long mask;
98	unsigned int fixed_div;
99};
100
101#define to_z_clk(_hw)	container_of(_hw, struct cpg_z_clk, hw)
102
103static unsigned long cpg_z_clk_recalc_rate(struct clk_hw *hw,
104					   unsigned long parent_rate)
105{
106	struct cpg_z_clk *zclk = to_z_clk(hw);
107	unsigned int mult;
108	u32 val;
109
110	val = readl(zclk->reg) & zclk->mask;
111	mult = 32 - (val >> __ffs(zclk->mask));
112
113	return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult,
114				     32 * zclk->fixed_div);
115}
116
117static int cpg_z_clk_determine_rate(struct clk_hw *hw,
118				    struct clk_rate_request *req)
119{
120	struct cpg_z_clk *zclk = to_z_clk(hw);
121	unsigned int min_mult, max_mult, mult;
122	unsigned long prate;
123
124	prate = req->best_parent_rate / zclk->fixed_div;
125	min_mult = max(div64_ul(req->min_rate * 32ULL, prate), 1ULL);
126	max_mult = min(div64_ul(req->max_rate * 32ULL, prate), 32ULL);
127	if (max_mult < min_mult)
128		return -EINVAL;
129
130	mult = div64_ul(req->rate * 32ULL, prate);
131	mult = clamp(mult, min_mult, max_mult);
132
133	req->rate = div_u64((u64)prate * mult, 32);
134	return 0;
135}
136
137static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
138			      unsigned long parent_rate)
139{
140	struct cpg_z_clk *zclk = to_z_clk(hw);
141	unsigned int mult;
142	unsigned int i;
143
144	mult = DIV64_U64_ROUND_CLOSEST(rate * 32ULL * zclk->fixed_div,
145				       parent_rate);
146	mult = clamp(mult, 1U, 32U);
147
148	if (readl(zclk->kick_reg) & CPG_FRQCRB_KICK)
149		return -EBUSY;
150
151	cpg_reg_modify(zclk->reg, zclk->mask,
152		       ((32 - mult) << __ffs(zclk->mask)) & zclk->mask);
153
154	/*
155	 * Set KICK bit in FRQCRB to update hardware setting and wait for
156	 * clock change completion.
157	 */
158	cpg_reg_modify(zclk->kick_reg, 0, CPG_FRQCRB_KICK);
159
160	/*
161	 * Note: There is no HW information about the worst case latency.
162	 *
163	 * Using experimental measurements, it seems that no more than
164	 * ~10 iterations are needed, independently of the CPU rate.
165	 * Since this value might be dependent of external xtal rate, pll1
166	 * rate or even the other emulation clocks rate, use 1000 as a
167	 * "super" safe value.
168	 */
169	for (i = 1000; i; i--) {
170		if (!(readl(zclk->kick_reg) & CPG_FRQCRB_KICK))
171			return 0;
172
173		cpu_relax();
174	}
175
176	return -ETIMEDOUT;
177}
178
179static const struct clk_ops cpg_z_clk_ops = {
180	.recalc_rate = cpg_z_clk_recalc_rate,
181	.determine_rate = cpg_z_clk_determine_rate,
182	.set_rate = cpg_z_clk_set_rate,
183};
184
185static struct clk * __init cpg_z_clk_register(const char *name,
186					      const char *parent_name,
187					      void __iomem *reg,
188					      unsigned int div,
189					      unsigned int offset)
190{
191	struct clk_init_data init;
192	struct cpg_z_clk *zclk;
193	struct clk *clk;
194
195	zclk = kzalloc(sizeof(*zclk), GFP_KERNEL);
196	if (!zclk)
197		return ERR_PTR(-ENOMEM);
198
199	init.name = name;
200	init.ops = &cpg_z_clk_ops;
201	init.flags = 0;
202	init.parent_names = &parent_name;
203	init.num_parents = 1;
204
205	zclk->reg = reg + CPG_FRQCRC;
206	zclk->kick_reg = reg + CPG_FRQCRB;
207	zclk->hw.init = &init;
208	zclk->mask = GENMASK(offset + 4, offset);
209	zclk->fixed_div = div; /* PLLVCO x 1/div x SYS-CPU divider */
210
211	clk = clk_register(NULL, &zclk->hw);
212	if (IS_ERR(clk))
213		kfree(zclk);
214
215	return clk;
216}
217
218/*
219 * SDn Clock
220 */
221#define CPG_SD_STP_HCK		BIT(9)
222#define CPG_SD_STP_CK		BIT(8)
223
224#define CPG_SD_STP_MASK		(CPG_SD_STP_HCK | CPG_SD_STP_CK)
225#define CPG_SD_FC_MASK		(0x7 << 2 | 0x3 << 0)
226
227#define CPG_SD_DIV_TABLE_DATA(stp_hck, stp_ck, sd_srcfc, sd_fc, sd_div) \
228{ \
229	.val = ((stp_hck) ? CPG_SD_STP_HCK : 0) | \
230	       ((stp_ck) ? CPG_SD_STP_CK : 0) | \
231	       ((sd_srcfc) << 2) | \
232	       ((sd_fc) << 0), \
233	.div = (sd_div), \
234}
235
236struct sd_div_table {
237	u32 val;
238	unsigned int div;
239};
240
241struct sd_clock {
242	struct clk_hw hw;
243	const struct sd_div_table *div_table;
244	struct cpg_simple_notifier csn;
245	unsigned int div_num;
246	unsigned int cur_div_idx;
247};
248
249/* SDn divider
250 *                     sd_srcfc   sd_fc   div
251 * stp_hck   stp_ck    (div)      (div)     = sd_srcfc x sd_fc
252 *-------------------------------------------------------------------
253 *  0         0         0 (1)      1 (4)      4 : SDR104 / HS200 / HS400 (8 TAP)
254 *  0         0         1 (2)      1 (4)      8 : SDR50
255 *  1         0         2 (4)      1 (4)     16 : HS / SDR25
256 *  1         0         3 (8)      1 (4)     32 : NS / SDR12
257 *  1         0         4 (16)     1 (4)     64
258 *  0         0         0 (1)      0 (2)      2
259 *  0         0         1 (2)      0 (2)      4 : SDR104 / HS200 / HS400 (4 TAP)
260 *  1         0         2 (4)      0 (2)      8
261 *  1         0         3 (8)      0 (2)     16
262 *  1         0         4 (16)     0 (2)     32
263 *
264 *  NOTE: There is a quirk option to ignore the first row of the dividers
265 *  table when searching for suitable settings. This is because HS400 on
266 *  early ES versions of H3 and M3-W requires a specific setting to work.
267 */
268static const struct sd_div_table cpg_sd_div_table[] = {
269/*	CPG_SD_DIV_TABLE_DATA(stp_hck,  stp_ck,   sd_srcfc,   sd_fc,  sd_div) */
270	CPG_SD_DIV_TABLE_DATA(0,        0,        0,          1,        4),
271	CPG_SD_DIV_TABLE_DATA(0,        0,        1,          1,        8),
272	CPG_SD_DIV_TABLE_DATA(1,        0,        2,          1,       16),
273	CPG_SD_DIV_TABLE_DATA(1,        0,        3,          1,       32),
274	CPG_SD_DIV_TABLE_DATA(1,        0,        4,          1,       64),
275	CPG_SD_DIV_TABLE_DATA(0,        0,        0,          0,        2),
276	CPG_SD_DIV_TABLE_DATA(0,        0,        1,          0,        4),
277	CPG_SD_DIV_TABLE_DATA(1,        0,        2,          0,        8),
278	CPG_SD_DIV_TABLE_DATA(1,        0,        3,          0,       16),
279	CPG_SD_DIV_TABLE_DATA(1,        0,        4,          0,       32),
280};
281
282#define to_sd_clock(_hw) container_of(_hw, struct sd_clock, hw)
283
284static int cpg_sd_clock_enable(struct clk_hw *hw)
285{
286	struct sd_clock *clock = to_sd_clock(hw);
287
288	cpg_reg_modify(clock->csn.reg, CPG_SD_STP_MASK,
289		       clock->div_table[clock->cur_div_idx].val &
290		       CPG_SD_STP_MASK);
291
292	return 0;
293}
294
295static void cpg_sd_clock_disable(struct clk_hw *hw)
296{
297	struct sd_clock *clock = to_sd_clock(hw);
298
299	cpg_reg_modify(clock->csn.reg, 0, CPG_SD_STP_MASK);
300}
301
302static int cpg_sd_clock_is_enabled(struct clk_hw *hw)
303{
304	struct sd_clock *clock = to_sd_clock(hw);
305
306	return !(readl(clock->csn.reg) & CPG_SD_STP_MASK);
307}
308
309static unsigned long cpg_sd_clock_recalc_rate(struct clk_hw *hw,
310						unsigned long parent_rate)
311{
312	struct sd_clock *clock = to_sd_clock(hw);
313
314	return DIV_ROUND_CLOSEST(parent_rate,
315				 clock->div_table[clock->cur_div_idx].div);
316}
317
318static int cpg_sd_clock_determine_rate(struct clk_hw *hw,
319				       struct clk_rate_request *req)
320{
321	unsigned long best_rate = ULONG_MAX, diff_min = ULONG_MAX;
322	struct sd_clock *clock = to_sd_clock(hw);
323	unsigned long calc_rate, diff;
324	unsigned int i;
325
326	for (i = 0; i < clock->div_num; i++) {
327		calc_rate = DIV_ROUND_CLOSEST(req->best_parent_rate,
328					      clock->div_table[i].div);
329		if (calc_rate < req->min_rate || calc_rate > req->max_rate)
330			continue;
331
332		diff = calc_rate > req->rate ? calc_rate - req->rate
333					     : req->rate - calc_rate;
334		if (diff < diff_min) {
335			best_rate = calc_rate;
336			diff_min = diff;
337		}
338	}
339
340	if (best_rate == ULONG_MAX)
341		return -EINVAL;
342
343	req->rate = best_rate;
344	return 0;
345}
346
347static int cpg_sd_clock_set_rate(struct clk_hw *hw, unsigned long rate,
348				 unsigned long parent_rate)
349{
350	struct sd_clock *clock = to_sd_clock(hw);
351	unsigned int i;
352
353	for (i = 0; i < clock->div_num; i++)
354		if (rate == DIV_ROUND_CLOSEST(parent_rate,
355					      clock->div_table[i].div))
356			break;
357
358	if (i >= clock->div_num)
359		return -EINVAL;
360
361	clock->cur_div_idx = i;
362
363	cpg_reg_modify(clock->csn.reg, CPG_SD_STP_MASK | CPG_SD_FC_MASK,
364		       clock->div_table[i].val &
365		       (CPG_SD_STP_MASK | CPG_SD_FC_MASK));
366
367	return 0;
368}
369
370static const struct clk_ops cpg_sd_clock_ops = {
371	.enable = cpg_sd_clock_enable,
372	.disable = cpg_sd_clock_disable,
373	.is_enabled = cpg_sd_clock_is_enabled,
374	.recalc_rate = cpg_sd_clock_recalc_rate,
375	.determine_rate = cpg_sd_clock_determine_rate,
376	.set_rate = cpg_sd_clock_set_rate,
377};
378
379static u32 cpg_quirks __initdata;
380
381#define PLL_ERRATA	BIT(0)		/* Missing PLL0/2/4 post-divider */
382#define RCKCR_CKSEL	BIT(1)		/* Manual RCLK parent selection */
383#define SD_SKIP_FIRST	BIT(2)		/* Skip first clock in SD table */
384
385static struct clk * __init cpg_sd_clk_register(const char *name,
386	void __iomem *base, unsigned int offset, const char *parent_name,
387	struct raw_notifier_head *notifiers)
388{
389	struct clk_init_data init;
390	struct sd_clock *clock;
391	struct clk *clk;
392	u32 val;
393
394	clock = kzalloc(sizeof(*clock), GFP_KERNEL);
395	if (!clock)
396		return ERR_PTR(-ENOMEM);
397
398	init.name = name;
399	init.ops = &cpg_sd_clock_ops;
400	init.flags = CLK_SET_RATE_PARENT;
401	init.parent_names = &parent_name;
402	init.num_parents = 1;
403
404	clock->csn.reg = base + offset;
405	clock->hw.init = &init;
406	clock->div_table = cpg_sd_div_table;
407	clock->div_num = ARRAY_SIZE(cpg_sd_div_table);
408
409	if (cpg_quirks & SD_SKIP_FIRST) {
410		clock->div_table++;
411		clock->div_num--;
412	}
413
414	val = readl(clock->csn.reg) & ~CPG_SD_FC_MASK;
415	val |= CPG_SD_STP_MASK | (clock->div_table[0].val & CPG_SD_FC_MASK);
416	writel(val, clock->csn.reg);
417
418	clk = clk_register(NULL, &clock->hw);
419	if (IS_ERR(clk))
420		goto free_clock;
421
422	cpg_simple_notifier_register(notifiers, &clock->csn);
423	return clk;
424
425free_clock:
426	kfree(clock);
427	return clk;
428}
429
430struct rpc_clock {
431	struct clk_divider div;
432	struct clk_gate gate;
433	/*
434	 * One notifier covers both RPC and RPCD2 clocks as they are both
435	 * controlled by the same RPCCKCR register...
436	 */
437	struct cpg_simple_notifier csn;
438};
439
440static const struct clk_div_table cpg_rpcsrc_div_table[] = {
441	{ 2, 5 }, { 3, 6 }, { 0, 0 },
442};
443
444static const struct clk_div_table cpg_rpc_div_table[] = {
445	{ 1, 2 }, { 3, 4 }, { 5, 6 }, { 7, 8 }, { 0, 0 },
446};
447
448static struct clk * __init cpg_rpc_clk_register(const char *name,
449	void __iomem *base, const char *parent_name,
450	struct raw_notifier_head *notifiers)
451{
452	struct rpc_clock *rpc;
453	struct clk *clk;
454
455	rpc = kzalloc(sizeof(*rpc), GFP_KERNEL);
456	if (!rpc)
457		return ERR_PTR(-ENOMEM);
458
459	rpc->div.reg = base + CPG_RPCCKCR;
460	rpc->div.width = 3;
461	rpc->div.table = cpg_rpc_div_table;
462	rpc->div.lock = &cpg_lock;
463
464	rpc->gate.reg = base + CPG_RPCCKCR;
465	rpc->gate.bit_idx = 8;
466	rpc->gate.flags = CLK_GATE_SET_TO_DISABLE;
467	rpc->gate.lock = &cpg_lock;
468
469	rpc->csn.reg = base + CPG_RPCCKCR;
470
471	clk = clk_register_composite(NULL, name, &parent_name, 1, NULL, NULL,
472				     &rpc->div.hw,  &clk_divider_ops,
473				     &rpc->gate.hw, &clk_gate_ops,
474				     CLK_SET_RATE_PARENT);
475	if (IS_ERR(clk)) {
476		kfree(rpc);
477		return clk;
478	}
479
480	cpg_simple_notifier_register(notifiers, &rpc->csn);
481	return clk;
482}
483
484struct rpcd2_clock {
485	struct clk_fixed_factor fixed;
486	struct clk_gate gate;
487};
488
489static struct clk * __init cpg_rpcd2_clk_register(const char *name,
490						  void __iomem *base,
491						  const char *parent_name)
492{
493	struct rpcd2_clock *rpcd2;
494	struct clk *clk;
495
496	rpcd2 = kzalloc(sizeof(*rpcd2), GFP_KERNEL);
497	if (!rpcd2)
498		return ERR_PTR(-ENOMEM);
499
500	rpcd2->fixed.mult = 1;
501	rpcd2->fixed.div = 2;
502
503	rpcd2->gate.reg = base + CPG_RPCCKCR;
504	rpcd2->gate.bit_idx = 9;
505	rpcd2->gate.flags = CLK_GATE_SET_TO_DISABLE;
506	rpcd2->gate.lock = &cpg_lock;
507
508	clk = clk_register_composite(NULL, name, &parent_name, 1, NULL, NULL,
509				     &rpcd2->fixed.hw, &clk_fixed_factor_ops,
510				     &rpcd2->gate.hw, &clk_gate_ops,
511				     CLK_SET_RATE_PARENT);
512	if (IS_ERR(clk))
513		kfree(rpcd2);
514
515	return clk;
516}
517
518
519static const struct rcar_gen3_cpg_pll_config *cpg_pll_config __initdata;
520static unsigned int cpg_clk_extalr __initdata;
521static u32 cpg_mode __initdata;
522
523static const struct soc_device_attribute cpg_quirks_match[] __initconst = {
524	{
525		.soc_id = "r8a7795", .revision = "ES1.0",
526		.data = (void *)(PLL_ERRATA | RCKCR_CKSEL | SD_SKIP_FIRST),
527	},
528	{
529		.soc_id = "r8a7795", .revision = "ES1.*",
530		.data = (void *)(RCKCR_CKSEL | SD_SKIP_FIRST),
531	},
532	{
533		.soc_id = "r8a7795", .revision = "ES2.0",
534		.data = (void *)SD_SKIP_FIRST,
535	},
536	{
537		.soc_id = "r8a7796", .revision = "ES1.0",
538		.data = (void *)(RCKCR_CKSEL | SD_SKIP_FIRST),
539	},
540	{
541		.soc_id = "r8a7796", .revision = "ES1.1",
542		.data = (void *)SD_SKIP_FIRST,
543	},
544	{ /* sentinel */ }
545};
546
547struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
548	const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
549	struct clk **clks, void __iomem *base,
550	struct raw_notifier_head *notifiers)
551{
552	const struct clk *parent;
553	unsigned int mult = 1;
554	unsigned int div = 1;
555	u32 value;
556
557	parent = clks[core->parent & 0xffff];	/* some types use high bits */
558	if (IS_ERR(parent))
559		return ERR_CAST(parent);
560
561	switch (core->type) {
562	case CLK_TYPE_GEN3_MAIN:
563		div = cpg_pll_config->extal_div;
564		break;
565
566	case CLK_TYPE_GEN3_PLL0:
567		/*
568		 * PLL0 is a configurable multiplier clock. Register it as a
569		 * fixed factor clock for now as there's no generic multiplier
570		 * clock implementation and we currently have no need to change
571		 * the multiplier value.
572		 */
573		value = readl(base + CPG_PLL0CR);
574		mult = (((value >> 24) & 0x7f) + 1) * 2;
575		if (cpg_quirks & PLL_ERRATA)
576			mult *= 2;
577		break;
578
579	case CLK_TYPE_GEN3_PLL1:
580		mult = cpg_pll_config->pll1_mult;
581		div = cpg_pll_config->pll1_div;
582		break;
583
584	case CLK_TYPE_GEN3_PLL2:
585		/*
586		 * PLL2 is a configurable multiplier clock. Register it as a
587		 * fixed factor clock for now as there's no generic multiplier
588		 * clock implementation and we currently have no need to change
589		 * the multiplier value.
590		 */
591		value = readl(base + CPG_PLL2CR);
592		mult = (((value >> 24) & 0x7f) + 1) * 2;
593		if (cpg_quirks & PLL_ERRATA)
594			mult *= 2;
595		break;
596
597	case CLK_TYPE_GEN3_PLL3:
598		mult = cpg_pll_config->pll3_mult;
599		div = cpg_pll_config->pll3_div;
600		break;
601
602	case CLK_TYPE_GEN3_PLL4:
603		/*
604		 * PLL4 is a configurable multiplier clock. Register it as a
605		 * fixed factor clock for now as there's no generic multiplier
606		 * clock implementation and we currently have no need to change
607		 * the multiplier value.
608		 */
609		value = readl(base + CPG_PLL4CR);
610		mult = (((value >> 24) & 0x7f) + 1) * 2;
611		if (cpg_quirks & PLL_ERRATA)
612			mult *= 2;
613		break;
614
615	case CLK_TYPE_GEN3_SD:
616		return cpg_sd_clk_register(core->name, base, core->offset,
617					   __clk_get_name(parent), notifiers);
618
619	case CLK_TYPE_GEN3_R:
620		if (cpg_quirks & RCKCR_CKSEL) {
621			struct cpg_simple_notifier *csn;
622
623			csn = kzalloc(sizeof(*csn), GFP_KERNEL);
624			if (!csn)
625				return ERR_PTR(-ENOMEM);
626
627			csn->reg = base + CPG_RCKCR;
628
629			/*
630			 * RINT is default.
631			 * Only if EXTALR is populated, we switch to it.
632			 */
633			value = readl(csn->reg) & 0x3f;
634
635			if (clk_get_rate(clks[cpg_clk_extalr])) {
636				parent = clks[cpg_clk_extalr];
637				value |= CPG_RCKCR_CKSEL;
638			}
639
640			writel(value, csn->reg);
641			cpg_simple_notifier_register(notifiers, csn);
642			break;
643		}
644
645		/* Select parent clock of RCLK by MD28 */
646		if (cpg_mode & BIT(28))
647			parent = clks[cpg_clk_extalr];
648		break;
649
650	case CLK_TYPE_GEN3_MDSEL:
651		/*
652		 * Clock selectable between two parents and two fixed dividers
653		 * using a mode pin
654		 */
655		if (cpg_mode & BIT(core->offset)) {
656			div = core->div & 0xffff;
657		} else {
658			parent = clks[core->parent >> 16];
659			if (IS_ERR(parent))
660				return ERR_CAST(parent);
661			div = core->div >> 16;
662		}
663		mult = 1;
664		break;
665
666	case CLK_TYPE_GEN3_Z:
667		return cpg_z_clk_register(core->name, __clk_get_name(parent),
668					  base, core->div, core->offset);
669
670	case CLK_TYPE_GEN3_OSC:
671		/*
672		 * Clock combining OSC EXTAL predivider and a fixed divider
673		 */
674		div = cpg_pll_config->osc_prediv * core->div;
675		break;
676
677	case CLK_TYPE_GEN3_RCKSEL:
678		/*
679		 * Clock selectable between two parents and two fixed dividers
680		 * using RCKCR.CKSEL
681		 */
682		if (readl(base + CPG_RCKCR) & CPG_RCKCR_CKSEL) {
683			div = core->div & 0xffff;
684		} else {
685			parent = clks[core->parent >> 16];
686			if (IS_ERR(parent))
687				return ERR_CAST(parent);
688			div = core->div >> 16;
689		}
690		break;
691
692	case CLK_TYPE_GEN3_RPCSRC:
693		return clk_register_divider_table(NULL, core->name,
694						  __clk_get_name(parent), 0,
695						  base + CPG_RPCCKCR, 3, 2, 0,
696						  cpg_rpcsrc_div_table,
697						  &cpg_lock);
698
699	case CLK_TYPE_GEN3_RPC:
700		return cpg_rpc_clk_register(core->name, base,
701					    __clk_get_name(parent), notifiers);
702
703	case CLK_TYPE_GEN3_RPCD2:
704		return cpg_rpcd2_clk_register(core->name, base,
705					      __clk_get_name(parent));
706
707	default:
708		return ERR_PTR(-EINVAL);
709	}
710
711	return clk_register_fixed_factor(NULL, core->name,
712					 __clk_get_name(parent), 0, mult, div);
713}
714
715int __init rcar_gen3_cpg_init(const struct rcar_gen3_cpg_pll_config *config,
716			      unsigned int clk_extalr, u32 mode)
717{
718	const struct soc_device_attribute *attr;
719
720	cpg_pll_config = config;
721	cpg_clk_extalr = clk_extalr;
722	cpg_mode = mode;
723	attr = soc_device_match(cpg_quirks_match);
724	if (attr)
725		cpg_quirks = (uintptr_t)attr->data;
726	pr_debug("%s: mode = 0x%x quirks = 0x%x\n", __func__, mode, cpg_quirks);
727
728	spin_lock_init(&cpg_lock);
729
730	return 0;
731}
732