1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * OMAP3/4 - specific DPLL control functions
4 *
5 * Copyright (C) 2009-2010 Texas Instruments, Inc.
6 * Copyright (C) 2009-2010 Nokia Corporation
7 *
8 * Written by Paul Walmsley
9 * Testing and integration fixes by Jouni Högander
10 *
11 * 36xx support added by Vishwanath BS, Richard Woodruff, and Nishanth
12 * Menon
13 *
14 * Parts of this code are based on code written by
15 * Richard Woodruff, Tony Lindgren, Tuukka Tikkanen, Karthik Dasu
16 */
17
18#include <linux/kernel.h>
19#include <linux/device.h>
20#include <linux/list.h>
21#include <linux/errno.h>
22#include <linux/delay.h>
23#include <linux/clk.h>
24#include <linux/io.h>
25#include <linux/bitops.h>
26#include <linux/clkdev.h>
27#include <linux/clk/ti.h>
28
29#include "clock.h"
30
31/* CM_AUTOIDLE_PLL*.AUTO_* bit values */
32#define DPLL_AUTOIDLE_DISABLE			0x0
33#define DPLL_AUTOIDLE_LOW_POWER_STOP		0x1
34
35#define MAX_DPLL_WAIT_TRIES		1000000
36
37#define OMAP3XXX_EN_DPLL_LOCKED		0x7
38
39/* Forward declarations */
40static u32 omap3_dpll_autoidle_read(struct clk_hw_omap *clk);
41static void omap3_dpll_deny_idle(struct clk_hw_omap *clk);
42static void omap3_dpll_allow_idle(struct clk_hw_omap *clk);
43
44/* Private functions */
45
46/* _omap3_dpll_write_clken - write clken_bits arg to a DPLL's enable bits */
47static void _omap3_dpll_write_clken(struct clk_hw_omap *clk, u8 clken_bits)
48{
49	const struct dpll_data *dd;
50	u32 v;
51
52	dd = clk->dpll_data;
53
54	v = ti_clk_ll_ops->clk_readl(&dd->control_reg);
55	v &= ~dd->enable_mask;
56	v |= clken_bits << __ffs(dd->enable_mask);
57	ti_clk_ll_ops->clk_writel(v, &dd->control_reg);
58}
59
60/* _omap3_wait_dpll_status: wait for a DPLL to enter a specific state */
61static int _omap3_wait_dpll_status(struct clk_hw_omap *clk, u8 state)
62{
63	const struct dpll_data *dd;
64	int i = 0;
65	int ret = -EINVAL;
66	const char *clk_name;
67
68	dd = clk->dpll_data;
69	clk_name = clk_hw_get_name(&clk->hw);
70
71	state <<= __ffs(dd->idlest_mask);
72
73	while (((ti_clk_ll_ops->clk_readl(&dd->idlest_reg) & dd->idlest_mask)
74		!= state) && i < MAX_DPLL_WAIT_TRIES) {
75		i++;
76		udelay(1);
77	}
78
79	if (i == MAX_DPLL_WAIT_TRIES) {
80		pr_err("clock: %s failed transition to '%s'\n",
81		       clk_name, (state) ? "locked" : "bypassed");
82	} else {
83		pr_debug("clock: %s transition to '%s' in %d loops\n",
84			 clk_name, (state) ? "locked" : "bypassed", i);
85
86		ret = 0;
87	}
88
89	return ret;
90}
91
92/* From 3430 TRM ES2 4.7.6.2 */
93static u16 _omap3_dpll_compute_freqsel(struct clk_hw_omap *clk, u8 n)
94{
95	unsigned long fint;
96	u16 f = 0;
97
98	fint = clk_hw_get_rate(clk->dpll_data->clk_ref) / n;
99
100	pr_debug("clock: fint is %lu\n", fint);
101
102	if (fint >= 750000 && fint <= 1000000)
103		f = 0x3;
104	else if (fint > 1000000 && fint <= 1250000)
105		f = 0x4;
106	else if (fint > 1250000 && fint <= 1500000)
107		f = 0x5;
108	else if (fint > 1500000 && fint <= 1750000)
109		f = 0x6;
110	else if (fint > 1750000 && fint <= 2100000)
111		f = 0x7;
112	else if (fint > 7500000 && fint <= 10000000)
113		f = 0xB;
114	else if (fint > 10000000 && fint <= 12500000)
115		f = 0xC;
116	else if (fint > 12500000 && fint <= 15000000)
117		f = 0xD;
118	else if (fint > 15000000 && fint <= 17500000)
119		f = 0xE;
120	else if (fint > 17500000 && fint <= 21000000)
121		f = 0xF;
122	else
123		pr_debug("clock: unknown freqsel setting for %d\n", n);
124
125	return f;
126}
127
128/*
129 * _omap3_noncore_dpll_lock - instruct a DPLL to lock and wait for readiness
130 * @clk: pointer to a DPLL struct clk
131 *
132 * Instructs a non-CORE DPLL to lock.  Waits for the DPLL to report
133 * readiness before returning.  Will save and restore the DPLL's
134 * autoidle state across the enable, per the CDP code.  If the DPLL
135 * locked successfully, return 0; if the DPLL did not lock in the time
136 * allotted, or DPLL3 was passed in, return -EINVAL.
137 */
138static int _omap3_noncore_dpll_lock(struct clk_hw_omap *clk)
139{
140	const struct dpll_data *dd;
141	u8 ai;
142	u8 state = 1;
143	int r = 0;
144
145	pr_debug("clock: locking DPLL %s\n", clk_hw_get_name(&clk->hw));
146
147	dd = clk->dpll_data;
148	state <<= __ffs(dd->idlest_mask);
149
150	/* Check if already locked */
151	if ((ti_clk_ll_ops->clk_readl(&dd->idlest_reg) & dd->idlest_mask) ==
152	    state)
153		goto done;
154
155	ai = omap3_dpll_autoidle_read(clk);
156
157	if (ai)
158		omap3_dpll_deny_idle(clk);
159
160	_omap3_dpll_write_clken(clk, DPLL_LOCKED);
161
162	r = _omap3_wait_dpll_status(clk, 1);
163
164	if (ai)
165		omap3_dpll_allow_idle(clk);
166
167done:
168	return r;
169}
170
171/*
172 * _omap3_noncore_dpll_bypass - instruct a DPLL to bypass and wait for readiness
173 * @clk: pointer to a DPLL struct clk
174 *
175 * Instructs a non-CORE DPLL to enter low-power bypass mode.  In
176 * bypass mode, the DPLL's rate is set equal to its parent clock's
177 * rate.  Waits for the DPLL to report readiness before returning.
178 * Will save and restore the DPLL's autoidle state across the enable,
179 * per the CDP code.  If the DPLL entered bypass mode successfully,
180 * return 0; if the DPLL did not enter bypass in the time allotted, or
181 * DPLL3 was passed in, or the DPLL does not support low-power bypass,
182 * return -EINVAL.
183 */
184static int _omap3_noncore_dpll_bypass(struct clk_hw_omap *clk)
185{
186	int r;
187	u8 ai;
188
189	if (!(clk->dpll_data->modes & (1 << DPLL_LOW_POWER_BYPASS)))
190		return -EINVAL;
191
192	pr_debug("clock: configuring DPLL %s for low-power bypass\n",
193		 clk_hw_get_name(&clk->hw));
194
195	ai = omap3_dpll_autoidle_read(clk);
196
197	_omap3_dpll_write_clken(clk, DPLL_LOW_POWER_BYPASS);
198
199	r = _omap3_wait_dpll_status(clk, 0);
200
201	if (ai)
202		omap3_dpll_allow_idle(clk);
203
204	return r;
205}
206
207/*
208 * _omap3_noncore_dpll_stop - instruct a DPLL to stop
209 * @clk: pointer to a DPLL struct clk
210 *
211 * Instructs a non-CORE DPLL to enter low-power stop. Will save and
212 * restore the DPLL's autoidle state across the stop, per the CDP
213 * code.  If DPLL3 was passed in, or the DPLL does not support
214 * low-power stop, return -EINVAL; otherwise, return 0.
215 */
216static int _omap3_noncore_dpll_stop(struct clk_hw_omap *clk)
217{
218	u8 ai;
219
220	if (!(clk->dpll_data->modes & (1 << DPLL_LOW_POWER_STOP)))
221		return -EINVAL;
222
223	pr_debug("clock: stopping DPLL %s\n", clk_hw_get_name(&clk->hw));
224
225	ai = omap3_dpll_autoidle_read(clk);
226
227	_omap3_dpll_write_clken(clk, DPLL_LOW_POWER_STOP);
228
229	if (ai)
230		omap3_dpll_allow_idle(clk);
231
232	return 0;
233}
234
235/**
236 * _lookup_dco - Lookup DCO used by j-type DPLL
237 * @clk: pointer to a DPLL struct clk
238 * @dco: digital control oscillator selector
239 * @m: DPLL multiplier to set
240 * @n: DPLL divider to set
241 *
242 * See 36xx TRM section 3.5.3.3.3.2 "Type B DPLL (Low-Jitter)"
243 *
244 * XXX This code is not needed for 3430/AM35xx; can it be optimized
245 * out in non-multi-OMAP builds for those chips?
246 */
247static void _lookup_dco(struct clk_hw_omap *clk, u8 *dco, u16 m, u8 n)
248{
249	unsigned long fint, clkinp; /* watch out for overflow */
250
251	clkinp = clk_hw_get_rate(clk_hw_get_parent(&clk->hw));
252	fint = (clkinp / n) * m;
253
254	if (fint < 1000000000)
255		*dco = 2;
256	else
257		*dco = 4;
258}
259
260/**
261 * _lookup_sddiv - Calculate sigma delta divider for j-type DPLL
262 * @clk: pointer to a DPLL struct clk
263 * @sd_div: target sigma-delta divider
264 * @m: DPLL multiplier to set
265 * @n: DPLL divider to set
266 *
267 * See 36xx TRM section 3.5.3.3.3.2 "Type B DPLL (Low-Jitter)"
268 *
269 * XXX This code is not needed for 3430/AM35xx; can it be optimized
270 * out in non-multi-OMAP builds for those chips?
271 */
272static void _lookup_sddiv(struct clk_hw_omap *clk, u8 *sd_div, u16 m, u8 n)
273{
274	unsigned long clkinp, sd; /* watch out for overflow */
275	int mod1, mod2;
276
277	clkinp = clk_hw_get_rate(clk_hw_get_parent(&clk->hw));
278
279	/*
280	 * target sigma-delta to near 250MHz
281	 * sd = ceil[(m/(n+1)) * (clkinp_MHz / 250)]
282	 */
283	clkinp /= 100000; /* shift from MHz to 10*Hz for 38.4 and 19.2 */
284	mod1 = (clkinp * m) % (250 * n);
285	sd = (clkinp * m) / (250 * n);
286	mod2 = sd % 10;
287	sd /= 10;
288
289	if (mod1 || mod2)
290		sd++;
291	*sd_div = sd;
292}
293
294/*
295 * _omap3_noncore_dpll_program - set non-core DPLL M,N values directly
296 * @clk:	struct clk * of DPLL to set
297 * @freqsel:	FREQSEL value to set
298 *
299 * Program the DPLL with the last M, N values calculated, and wait for
300 * the DPLL to lock. Returns -EINVAL upon error, or 0 upon success.
301 */
302static int omap3_noncore_dpll_program(struct clk_hw_omap *clk, u16 freqsel)
303{
304	struct dpll_data *dd = clk->dpll_data;
305	u8 dco, sd_div, ai = 0;
306	u32 v;
307	bool errata_i810;
308
309	/* 3430 ES2 TRM: 4.7.6.9 DPLL Programming Sequence */
310	_omap3_noncore_dpll_bypass(clk);
311
312	/*
313	 * Set jitter correction. Jitter correction applicable for OMAP343X
314	 * only since freqsel field is no longer present on other devices.
315	 */
316	if (ti_clk_get_features()->flags & TI_CLK_DPLL_HAS_FREQSEL) {
317		v = ti_clk_ll_ops->clk_readl(&dd->control_reg);
318		v &= ~dd->freqsel_mask;
319		v |= freqsel << __ffs(dd->freqsel_mask);
320		ti_clk_ll_ops->clk_writel(v, &dd->control_reg);
321	}
322
323	/* Set DPLL multiplier, divider */
324	v = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg);
325
326	/* Handle Duty Cycle Correction */
327	if (dd->dcc_mask) {
328		if (dd->last_rounded_rate >= dd->dcc_rate)
329			v |= dd->dcc_mask; /* Enable DCC */
330		else
331			v &= ~dd->dcc_mask; /* Disable DCC */
332	}
333
334	v &= ~(dd->mult_mask | dd->div1_mask);
335	v |= dd->last_rounded_m << __ffs(dd->mult_mask);
336	v |= (dd->last_rounded_n - 1) << __ffs(dd->div1_mask);
337
338	/* Configure dco and sd_div for dplls that have these fields */
339	if (dd->dco_mask) {
340		_lookup_dco(clk, &dco, dd->last_rounded_m, dd->last_rounded_n);
341		v &= ~(dd->dco_mask);
342		v |= dco << __ffs(dd->dco_mask);
343	}
344	if (dd->sddiv_mask) {
345		_lookup_sddiv(clk, &sd_div, dd->last_rounded_m,
346			      dd->last_rounded_n);
347		v &= ~(dd->sddiv_mask);
348		v |= sd_div << __ffs(dd->sddiv_mask);
349	}
350
351	/*
352	 * Errata i810 - DPLL controller can get stuck while transitioning
353	 * to a power saving state. Software must ensure the DPLL can not
354	 * transition to a low power state while changing M/N values.
355	 * Easiest way to accomplish this is to prevent DPLL autoidle
356	 * before doing the M/N re-program.
357	 */
358	errata_i810 = ti_clk_get_features()->flags & TI_CLK_ERRATA_I810;
359
360	if (errata_i810) {
361		ai = omap3_dpll_autoidle_read(clk);
362		if (ai) {
363			omap3_dpll_deny_idle(clk);
364
365			/* OCP barrier */
366			omap3_dpll_autoidle_read(clk);
367		}
368	}
369
370	ti_clk_ll_ops->clk_writel(v, &dd->mult_div1_reg);
371
372	/* Set 4X multiplier and low-power mode */
373	if (dd->m4xen_mask || dd->lpmode_mask) {
374		v = ti_clk_ll_ops->clk_readl(&dd->control_reg);
375
376		if (dd->m4xen_mask) {
377			if (dd->last_rounded_m4xen)
378				v |= dd->m4xen_mask;
379			else
380				v &= ~dd->m4xen_mask;
381		}
382
383		if (dd->lpmode_mask) {
384			if (dd->last_rounded_lpmode)
385				v |= dd->lpmode_mask;
386			else
387				v &= ~dd->lpmode_mask;
388		}
389
390		ti_clk_ll_ops->clk_writel(v, &dd->control_reg);
391	}
392
393	/* We let the clock framework set the other output dividers later */
394
395	/* REVISIT: Set ramp-up delay? */
396
397	_omap3_noncore_dpll_lock(clk);
398
399	if (errata_i810 && ai)
400		omap3_dpll_allow_idle(clk);
401
402	return 0;
403}
404
405/* Public functions */
406
407/**
408 * omap3_dpll_recalc - recalculate DPLL rate
409 * @clk: DPLL struct clk
410 *
411 * Recalculate and propagate the DPLL rate.
412 */
413unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate)
414{
415	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
416
417	return omap2_get_dpll_rate(clk);
418}
419
420/* Non-CORE DPLL (e.g., DPLLs that do not control SDRC) clock functions */
421
422/**
423 * omap3_noncore_dpll_enable - instruct a DPLL to enter bypass or lock mode
424 * @clk: pointer to a DPLL struct clk
425 *
426 * Instructs a non-CORE DPLL to enable, e.g., to enter bypass or lock.
427 * The choice of modes depends on the DPLL's programmed rate: if it is
428 * the same as the DPLL's parent clock, it will enter bypass;
429 * otherwise, it will enter lock.  This code will wait for the DPLL to
430 * indicate readiness before returning, unless the DPLL takes too long
431 * to enter the target state.  Intended to be used as the struct clk's
432 * enable function.  If DPLL3 was passed in, or the DPLL does not
433 * support low-power stop, or if the DPLL took too long to enter
434 * bypass or lock, return -EINVAL; otherwise, return 0.
435 */
436int omap3_noncore_dpll_enable(struct clk_hw *hw)
437{
438	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
439	int r;
440	struct dpll_data *dd;
441	struct clk_hw *parent;
442
443	dd = clk->dpll_data;
444	if (!dd)
445		return -EINVAL;
446
447	if (clk->clkdm) {
448		r = ti_clk_ll_ops->clkdm_clk_enable(clk->clkdm, hw->clk);
449		if (r) {
450			WARN(1,
451			     "%s: could not enable %s's clockdomain %s: %d\n",
452			     __func__, clk_hw_get_name(hw),
453			     clk->clkdm_name, r);
454			return r;
455		}
456	}
457
458	parent = clk_hw_get_parent(hw);
459
460	if (clk_hw_get_rate(hw) == clk_hw_get_rate(dd->clk_bypass)) {
461		WARN_ON(parent != dd->clk_bypass);
462		r = _omap3_noncore_dpll_bypass(clk);
463	} else {
464		WARN_ON(parent != dd->clk_ref);
465		r = _omap3_noncore_dpll_lock(clk);
466	}
467
468	return r;
469}
470
471/**
472 * omap3_noncore_dpll_disable - instruct a DPLL to enter low-power stop
473 * @clk: pointer to a DPLL struct clk
474 *
475 * Instructs a non-CORE DPLL to enter low-power stop.  This function is
476 * intended for use in struct clkops.  No return value.
477 */
478void omap3_noncore_dpll_disable(struct clk_hw *hw)
479{
480	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
481
482	_omap3_noncore_dpll_stop(clk);
483	if (clk->clkdm)
484		ti_clk_ll_ops->clkdm_clk_disable(clk->clkdm, hw->clk);
485}
486
487/* Non-CORE DPLL rate set code */
488
489/**
490 * omap3_noncore_dpll_determine_rate - determine rate for a DPLL
491 * @hw: pointer to the clock to determine rate for
492 * @req: target rate request
493 *
494 * Determines which DPLL mode to use for reaching a desired target rate.
495 * Checks whether the DPLL shall be in bypass or locked mode, and if
496 * locked, calculates the M,N values for the DPLL via round-rate.
497 * Returns a 0 on success, negative error value in failure.
498 */
499int omap3_noncore_dpll_determine_rate(struct clk_hw *hw,
500				      struct clk_rate_request *req)
501{
502	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
503	struct dpll_data *dd;
504
505	if (!req->rate)
506		return -EINVAL;
507
508	dd = clk->dpll_data;
509	if (!dd)
510		return -EINVAL;
511
512	if (clk_hw_get_rate(dd->clk_bypass) == req->rate &&
513	    (dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) {
514		req->best_parent_hw = dd->clk_bypass;
515	} else {
516		req->rate = omap2_dpll_round_rate(hw, req->rate,
517					  &req->best_parent_rate);
518		req->best_parent_hw = dd->clk_ref;
519	}
520
521	req->best_parent_rate = req->rate;
522
523	return 0;
524}
525
526/**
527 * omap3_noncore_dpll_set_parent - set parent for a DPLL clock
528 * @hw: pointer to the clock to set parent for
529 * @index: parent index to select
530 *
531 * Sets parent for a DPLL clock. This sets the DPLL into bypass or
532 * locked mode. Returns 0 with success, negative error value otherwise.
533 */
534int omap3_noncore_dpll_set_parent(struct clk_hw *hw, u8 index)
535{
536	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
537	int ret;
538
539	if (!hw)
540		return -EINVAL;
541
542	if (index)
543		ret = _omap3_noncore_dpll_bypass(clk);
544	else
545		ret = _omap3_noncore_dpll_lock(clk);
546
547	return ret;
548}
549
550/**
551 * omap3_noncore_dpll_set_rate - set rate for a DPLL clock
552 * @hw: pointer to the clock to set parent for
553 * @rate: target rate for the clock
554 * @parent_rate: rate of the parent clock
555 *
556 * Sets rate for a DPLL clock. First checks if the clock parent is
557 * reference clock (in bypass mode, the rate of the clock can't be
558 * changed) and proceeds with the rate change operation. Returns 0
559 * with success, negative error value otherwise.
560 */
561int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate,
562				unsigned long parent_rate)
563{
564	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
565	struct dpll_data *dd;
566	u16 freqsel = 0;
567	int ret;
568
569	if (!hw || !rate)
570		return -EINVAL;
571
572	dd = clk->dpll_data;
573	if (!dd)
574		return -EINVAL;
575
576	if (clk_hw_get_parent(hw) != dd->clk_ref)
577		return -EINVAL;
578
579	if (dd->last_rounded_rate == 0)
580		return -EINVAL;
581
582	/* Freqsel is available only on OMAP343X devices */
583	if (ti_clk_get_features()->flags & TI_CLK_DPLL_HAS_FREQSEL) {
584		freqsel = _omap3_dpll_compute_freqsel(clk, dd->last_rounded_n);
585		WARN_ON(!freqsel);
586	}
587
588	pr_debug("%s: %s: set rate: locking rate to %lu.\n", __func__,
589		 clk_hw_get_name(hw), rate);
590
591	ret = omap3_noncore_dpll_program(clk, freqsel);
592
593	return ret;
594}
595
596/**
597 * omap3_noncore_dpll_set_rate_and_parent - set rate and parent for a DPLL clock
598 * @hw: pointer to the clock to set rate and parent for
599 * @rate: target rate for the DPLL
600 * @parent_rate: clock rate of the DPLL parent
601 * @index: new parent index for the DPLL, 0 - reference, 1 - bypass
602 *
603 * Sets rate and parent for a DPLL clock. If new parent is the bypass
604 * clock, only selects the parent. Otherwise proceeds with a rate
605 * change, as this will effectively also change the parent as the
606 * DPLL is put into locked mode. Returns 0 with success, negative error
607 * value otherwise.
608 */
609int omap3_noncore_dpll_set_rate_and_parent(struct clk_hw *hw,
610					   unsigned long rate,
611					   unsigned long parent_rate,
612					   u8 index)
613{
614	int ret;
615
616	if (!hw || !rate)
617		return -EINVAL;
618
619	/*
620	 * clk-ref at index[0], in which case we only need to set rate,
621	 * the parent will be changed automatically with the lock sequence.
622	 * With clk-bypass case we only need to change parent.
623	 */
624	if (index)
625		ret = omap3_noncore_dpll_set_parent(hw, index);
626	else
627		ret = omap3_noncore_dpll_set_rate(hw, rate, parent_rate);
628
629	return ret;
630}
631
632/* DPLL autoidle read/set code */
633
634/**
635 * omap3_dpll_autoidle_read - read a DPLL's autoidle bits
636 * @clk: struct clk * of the DPLL to read
637 *
638 * Return the DPLL's autoidle bits, shifted down to bit 0.  Returns
639 * -EINVAL if passed a null pointer or if the struct clk does not
640 * appear to refer to a DPLL.
641 */
642static u32 omap3_dpll_autoidle_read(struct clk_hw_omap *clk)
643{
644	const struct dpll_data *dd;
645	u32 v;
646
647	if (!clk || !clk->dpll_data)
648		return -EINVAL;
649
650	dd = clk->dpll_data;
651
652	if (!dd->autoidle_mask)
653		return -EINVAL;
654
655	v = ti_clk_ll_ops->clk_readl(&dd->autoidle_reg);
656	v &= dd->autoidle_mask;
657	v >>= __ffs(dd->autoidle_mask);
658
659	return v;
660}
661
662/**
663 * omap3_dpll_allow_idle - enable DPLL autoidle bits
664 * @clk: struct clk * of the DPLL to operate on
665 *
666 * Enable DPLL automatic idle control.  This automatic idle mode
667 * switching takes effect only when the DPLL is locked, at least on
668 * OMAP3430.  The DPLL will enter low-power stop when its downstream
669 * clocks are gated.  No return value.
670 */
671static void omap3_dpll_allow_idle(struct clk_hw_omap *clk)
672{
673	const struct dpll_data *dd;
674	u32 v;
675
676	if (!clk || !clk->dpll_data)
677		return;
678
679	dd = clk->dpll_data;
680
681	if (!dd->autoidle_mask)
682		return;
683
684	/*
685	 * REVISIT: CORE DPLL can optionally enter low-power bypass
686	 * by writing 0x5 instead of 0x1.  Add some mechanism to
687	 * optionally enter this mode.
688	 */
689	v = ti_clk_ll_ops->clk_readl(&dd->autoidle_reg);
690	v &= ~dd->autoidle_mask;
691	v |= DPLL_AUTOIDLE_LOW_POWER_STOP << __ffs(dd->autoidle_mask);
692	ti_clk_ll_ops->clk_writel(v, &dd->autoidle_reg);
693}
694
695/**
696 * omap3_dpll_deny_idle - prevent DPLL from automatically idling
697 * @clk: struct clk * of the DPLL to operate on
698 *
699 * Disable DPLL automatic idle control.  No return value.
700 */
701static void omap3_dpll_deny_idle(struct clk_hw_omap *clk)
702{
703	const struct dpll_data *dd;
704	u32 v;
705
706	if (!clk || !clk->dpll_data)
707		return;
708
709	dd = clk->dpll_data;
710
711	if (!dd->autoidle_mask)
712		return;
713
714	v = ti_clk_ll_ops->clk_readl(&dd->autoidle_reg);
715	v &= ~dd->autoidle_mask;
716	v |= DPLL_AUTOIDLE_DISABLE << __ffs(dd->autoidle_mask);
717	ti_clk_ll_ops->clk_writel(v, &dd->autoidle_reg);
718}
719
720/* Clock control for DPLL outputs */
721
722/* Find the parent DPLL for the given clkoutx2 clock */
723static struct clk_hw_omap *omap3_find_clkoutx2_dpll(struct clk_hw *hw)
724{
725	struct clk_hw_omap *pclk = NULL;
726
727	/* Walk up the parents of clk, looking for a DPLL */
728	do {
729		do {
730			hw = clk_hw_get_parent(hw);
731		} while (hw && (!omap2_clk_is_hw_omap(hw)));
732		if (!hw)
733			break;
734		pclk = to_clk_hw_omap(hw);
735	} while (pclk && !pclk->dpll_data);
736
737	/* clk does not have a DPLL as a parent?  error in the clock data */
738	if (!pclk) {
739		WARN_ON(1);
740		return NULL;
741	}
742
743	return pclk;
744}
745
746/**
747 * omap3_clkoutx2_recalc - recalculate DPLL X2 output virtual clock rate
748 * @clk: DPLL output struct clk
749 *
750 * Using parent clock DPLL data, look up DPLL state.  If locked, set our
751 * rate to the dpll_clk * 2; otherwise, just use dpll_clk.
752 */
753unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
754				    unsigned long parent_rate)
755{
756	const struct dpll_data *dd;
757	unsigned long rate;
758	u32 v;
759	struct clk_hw_omap *pclk = NULL;
760
761	if (!parent_rate)
762		return 0;
763
764	pclk = omap3_find_clkoutx2_dpll(hw);
765
766	if (!pclk)
767		return 0;
768
769	dd = pclk->dpll_data;
770
771	WARN_ON(!dd->enable_mask);
772
773	v = ti_clk_ll_ops->clk_readl(&dd->control_reg) & dd->enable_mask;
774	v >>= __ffs(dd->enable_mask);
775	if ((v != OMAP3XXX_EN_DPLL_LOCKED) || (dd->flags & DPLL_J_TYPE))
776		rate = parent_rate;
777	else
778		rate = parent_rate * 2;
779	return rate;
780}
781
782/**
783 * omap3_core_dpll_save_context - Save the m and n values of the divider
784 * @hw: pointer  struct clk_hw
785 *
786 * Before the dpll registers are lost save the last rounded rate m and n
787 * and the enable mask.
788 */
789int omap3_core_dpll_save_context(struct clk_hw *hw)
790{
791	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
792	struct dpll_data *dd;
793	u32 v;
794
795	dd = clk->dpll_data;
796
797	v = ti_clk_ll_ops->clk_readl(&dd->control_reg);
798	clk->context = (v & dd->enable_mask) >> __ffs(dd->enable_mask);
799
800	if (clk->context == DPLL_LOCKED) {
801		v = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg);
802		dd->last_rounded_m = (v & dd->mult_mask) >>
803						__ffs(dd->mult_mask);
804		dd->last_rounded_n = ((v & dd->div1_mask) >>
805						__ffs(dd->div1_mask)) + 1;
806	}
807
808	return 0;
809}
810
811/**
812 * omap3_core_dpll_restore_context - restore the m and n values of the divider
813 * @hw: pointer  struct clk_hw
814 *
815 * Restore the last rounded rate m and n
816 * and the enable mask.
817 */
818void omap3_core_dpll_restore_context(struct clk_hw *hw)
819{
820	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
821	const struct dpll_data *dd;
822	u32 v;
823
824	dd = clk->dpll_data;
825
826	if (clk->context == DPLL_LOCKED) {
827		_omap3_dpll_write_clken(clk, 0x4);
828		_omap3_wait_dpll_status(clk, 0);
829
830		v = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg);
831		v &= ~(dd->mult_mask | dd->div1_mask);
832		v |= dd->last_rounded_m << __ffs(dd->mult_mask);
833		v |= (dd->last_rounded_n - 1) << __ffs(dd->div1_mask);
834		ti_clk_ll_ops->clk_writel(v, &dd->mult_div1_reg);
835
836		_omap3_dpll_write_clken(clk, DPLL_LOCKED);
837		_omap3_wait_dpll_status(clk, 1);
838	} else {
839		_omap3_dpll_write_clken(clk, clk->context);
840	}
841}
842
843/**
844 * omap3_non_core_dpll_save_context - Save the m and n values of the divider
845 * @hw: pointer  struct clk_hw
846 *
847 * Before the dpll registers are lost save the last rounded rate m and n
848 * and the enable mask.
849 */
850int omap3_noncore_dpll_save_context(struct clk_hw *hw)
851{
852	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
853	struct dpll_data *dd;
854	u32 v;
855
856	dd = clk->dpll_data;
857
858	v = ti_clk_ll_ops->clk_readl(&dd->control_reg);
859	clk->context = (v & dd->enable_mask) >> __ffs(dd->enable_mask);
860
861	if (clk->context == DPLL_LOCKED) {
862		v = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg);
863		dd->last_rounded_m = (v & dd->mult_mask) >>
864						__ffs(dd->mult_mask);
865		dd->last_rounded_n = ((v & dd->div1_mask) >>
866						__ffs(dd->div1_mask)) + 1;
867	}
868
869	return 0;
870}
871
872/**
873 * omap3_core_dpll_restore_context - restore the m and n values of the divider
874 * @hw: pointer  struct clk_hw
875 *
876 * Restore the last rounded rate m and n
877 * and the enable mask.
878 */
879void omap3_noncore_dpll_restore_context(struct clk_hw *hw)
880{
881	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
882	const struct dpll_data *dd;
883	u32 ctrl, mult_div1;
884
885	dd = clk->dpll_data;
886
887	ctrl = ti_clk_ll_ops->clk_readl(&dd->control_reg);
888	mult_div1 = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg);
889
890	if (clk->context == ((ctrl & dd->enable_mask) >>
891			     __ffs(dd->enable_mask)) &&
892	    dd->last_rounded_m == ((mult_div1 & dd->mult_mask) >>
893				   __ffs(dd->mult_mask)) &&
894	    dd->last_rounded_n == ((mult_div1 & dd->div1_mask) >>
895				   __ffs(dd->div1_mask)) + 1) {
896		/* nothing to be done */
897		return;
898	}
899
900	if (clk->context == DPLL_LOCKED)
901		omap3_noncore_dpll_program(clk, 0);
902	else
903		_omap3_dpll_write_clken(clk, clk->context);
904}
905
906/* OMAP3/4 non-CORE DPLL clkops */
907const struct clk_hw_omap_ops clkhwops_omap3_dpll = {
908	.allow_idle	= omap3_dpll_allow_idle,
909	.deny_idle	= omap3_dpll_deny_idle,
910};
911
912/**
913 * omap3_dpll4_set_rate - set rate for omap3 per-dpll
914 * @hw: clock to change
915 * @rate: target rate for clock
916 * @parent_rate: rate of the parent clock
917 *
918 * Check if the current SoC supports the per-dpll reprogram operation
919 * or not, and then do the rate change if supported. Returns -EINVAL
920 * if not supported, 0 for success, and potential error codes from the
921 * clock rate change.
922 */
923int omap3_dpll4_set_rate(struct clk_hw *hw, unsigned long rate,
924			 unsigned long parent_rate)
925{
926	/*
927	 * According to the 12-5 CDP code from TI, "Limitation 2.5"
928	 * on 3430ES1 prevents us from changing DPLL multipliers or dividers
929	 * on DPLL4.
930	 */
931	if (ti_clk_get_features()->flags & TI_CLK_DPLL4_DENY_REPROGRAM) {
932		pr_err("clock: DPLL4 cannot change rate due to silicon 'Limitation 2.5' on 3430ES1.\n");
933		return -EINVAL;
934	}
935
936	return omap3_noncore_dpll_set_rate(hw, rate, parent_rate);
937}
938
939/**
940 * omap3_dpll4_set_rate_and_parent - set rate and parent for omap3 per-dpll
941 * @hw: clock to change
942 * @rate: target rate for clock
943 * @parent_rate: rate of the parent clock
944 * @index: parent index, 0 - reference clock, 1 - bypass clock
945 *
946 * Check if the current SoC support the per-dpll reprogram operation
947 * or not, and then do the rate + parent change if supported. Returns
948 * -EINVAL if not supported, 0 for success, and potential error codes
949 * from the clock rate change.
950 */
951int omap3_dpll4_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
952				    unsigned long parent_rate, u8 index)
953{
954	if (ti_clk_get_features()->flags & TI_CLK_DPLL4_DENY_REPROGRAM) {
955		pr_err("clock: DPLL4 cannot change rate due to silicon 'Limitation 2.5' on 3430ES1.\n");
956		return -EINVAL;
957	}
958
959	return omap3_noncore_dpll_set_rate_and_parent(hw, rate, parent_rate,
960						      index);
961}
962
963/* Apply DM3730 errata sprz319 advisory 2.1. */
964static bool omap3_dpll5_apply_errata(struct clk_hw *hw,
965				     unsigned long parent_rate)
966{
967	struct omap3_dpll5_settings {
968		unsigned int rate, m, n;
969	};
970
971	static const struct omap3_dpll5_settings precomputed[] = {
972		/*
973		 * From DM3730 errata advisory 2.1, table 35 and 36.
974		 * The N value is increased by 1 compared to the tables as the
975		 * errata lists register values while last_rounded_field is the
976		 * real divider value.
977		 */
978		{ 12000000,  80,  0 + 1 },
979		{ 13000000, 443,  5 + 1 },
980		{ 19200000,  50,  0 + 1 },
981		{ 26000000, 443, 11 + 1 },
982		{ 38400000,  25,  0 + 1 }
983	};
984
985	const struct omap3_dpll5_settings *d;
986	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
987	struct dpll_data *dd;
988	unsigned int i;
989
990	for (i = 0; i < ARRAY_SIZE(precomputed); ++i) {
991		if (parent_rate == precomputed[i].rate)
992			break;
993	}
994
995	if (i == ARRAY_SIZE(precomputed))
996		return false;
997
998	d = &precomputed[i];
999
1000	/* Update the M, N and rounded rate values and program the DPLL. */
1001	dd = clk->dpll_data;
1002	dd->last_rounded_m = d->m;
1003	dd->last_rounded_n = d->n;
1004	dd->last_rounded_rate = div_u64((u64)parent_rate * d->m, d->n);
1005	omap3_noncore_dpll_program(clk, 0);
1006
1007	return true;
1008}
1009
1010/**
1011 * omap3_dpll5_set_rate - set rate for omap3 dpll5
1012 * @hw: clock to change
1013 * @rate: target rate for clock
1014 * @parent_rate: rate of the parent clock
1015 *
1016 * Set rate for the DPLL5 clock. Apply the sprz319 advisory 2.1 on OMAP36xx if
1017 * the DPLL is used for USB host (detected through the requested rate).
1018 */
1019int omap3_dpll5_set_rate(struct clk_hw *hw, unsigned long rate,
1020			 unsigned long parent_rate)
1021{
1022	if (rate == OMAP3_DPLL5_FREQ_FOR_USBHOST * 8) {
1023		if (omap3_dpll5_apply_errata(hw, parent_rate))
1024			return 0;
1025	}
1026
1027	return omap3_noncore_dpll_set_rate(hw, rate, parent_rate);
1028}
1029