1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 *  linux/arch/arm/mach-omap1/clock.c
4 *
5 *  Copyright (C) 2004 - 2005, 2009-2010 Nokia Corporation
6 *  Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
7 *
8 *  Modified to use omap shared clock framework by
9 *  Tony Lindgren <tony@atomide.com>
10 */
11#include <linux/kernel.h>
12#include <linux/export.h>
13#include <linux/list.h>
14#include <linux/errno.h>
15#include <linux/err.h>
16#include <linux/io.h>
17#include <linux/clk.h>
18#include <linux/clkdev.h>
19
20#include <asm/mach-types.h>
21
22#include <mach/hardware.h>
23
24#include "soc.h"
25#include "iomap.h"
26#include "clock.h"
27#include "opp.h"
28#include "sram.h"
29
30__u32 arm_idlect1_mask;
31struct clk *api_ck_p, *ck_dpll1_p, *ck_ref_p;
32
33static LIST_HEAD(clocks);
34static DEFINE_MUTEX(clocks_mutex);
35static DEFINE_SPINLOCK(clockfw_lock);
36
37/*
38 * Omap1 specific clock functions
39 */
40
41unsigned long omap1_uart_recalc(struct clk *clk)
42{
43	unsigned int val = __raw_readl(clk->enable_reg);
44	return val & 1 << clk->enable_bit ? 48000000 : 12000000;
45}
46
47unsigned long omap1_sossi_recalc(struct clk *clk)
48{
49	u32 div = omap_readl(MOD_CONF_CTRL_1);
50
51	div = (div >> 17) & 0x7;
52	div++;
53
54	return clk->parent->rate / div;
55}
56
57static void omap1_clk_allow_idle(struct clk *clk)
58{
59	struct arm_idlect1_clk * iclk = (struct arm_idlect1_clk *)clk;
60
61	if (!(clk->flags & CLOCK_IDLE_CONTROL))
62		return;
63
64	if (iclk->no_idle_count > 0 && !(--iclk->no_idle_count))
65		arm_idlect1_mask |= 1 << iclk->idlect_shift;
66}
67
68static void omap1_clk_deny_idle(struct clk *clk)
69{
70	struct arm_idlect1_clk * iclk = (struct arm_idlect1_clk *)clk;
71
72	if (!(clk->flags & CLOCK_IDLE_CONTROL))
73		return;
74
75	if (iclk->no_idle_count++ == 0)
76		arm_idlect1_mask &= ~(1 << iclk->idlect_shift);
77}
78
79static __u16 verify_ckctl_value(__u16 newval)
80{
81	/* This function checks for following limitations set
82	 * by the hardware (all conditions must be true):
83	 * DSPMMU_CK == DSP_CK  or  DSPMMU_CK == DSP_CK/2
84	 * ARM_CK >= TC_CK
85	 * DSP_CK >= TC_CK
86	 * DSPMMU_CK >= TC_CK
87	 *
88	 * In addition following rules are enforced:
89	 * LCD_CK <= TC_CK
90	 * ARMPER_CK <= TC_CK
91	 *
92	 * However, maximum frequencies are not checked for!
93	 */
94	__u8 per_exp;
95	__u8 lcd_exp;
96	__u8 arm_exp;
97	__u8 dsp_exp;
98	__u8 tc_exp;
99	__u8 dspmmu_exp;
100
101	per_exp = (newval >> CKCTL_PERDIV_OFFSET) & 3;
102	lcd_exp = (newval >> CKCTL_LCDDIV_OFFSET) & 3;
103	arm_exp = (newval >> CKCTL_ARMDIV_OFFSET) & 3;
104	dsp_exp = (newval >> CKCTL_DSPDIV_OFFSET) & 3;
105	tc_exp = (newval >> CKCTL_TCDIV_OFFSET) & 3;
106	dspmmu_exp = (newval >> CKCTL_DSPMMUDIV_OFFSET) & 3;
107
108	if (dspmmu_exp < dsp_exp)
109		dspmmu_exp = dsp_exp;
110	if (dspmmu_exp > dsp_exp+1)
111		dspmmu_exp = dsp_exp+1;
112	if (tc_exp < arm_exp)
113		tc_exp = arm_exp;
114	if (tc_exp < dspmmu_exp)
115		tc_exp = dspmmu_exp;
116	if (tc_exp > lcd_exp)
117		lcd_exp = tc_exp;
118	if (tc_exp > per_exp)
119		per_exp = tc_exp;
120
121	newval &= 0xf000;
122	newval |= per_exp << CKCTL_PERDIV_OFFSET;
123	newval |= lcd_exp << CKCTL_LCDDIV_OFFSET;
124	newval |= arm_exp << CKCTL_ARMDIV_OFFSET;
125	newval |= dsp_exp << CKCTL_DSPDIV_OFFSET;
126	newval |= tc_exp << CKCTL_TCDIV_OFFSET;
127	newval |= dspmmu_exp << CKCTL_DSPMMUDIV_OFFSET;
128
129	return newval;
130}
131
132static int calc_dsor_exp(struct clk *clk, unsigned long rate)
133{
134	/* Note: If target frequency is too low, this function will return 4,
135	 * which is invalid value. Caller must check for this value and act
136	 * accordingly.
137	 *
138	 * Note: This function does not check for following limitations set
139	 * by the hardware (all conditions must be true):
140	 * DSPMMU_CK == DSP_CK  or  DSPMMU_CK == DSP_CK/2
141	 * ARM_CK >= TC_CK
142	 * DSP_CK >= TC_CK
143	 * DSPMMU_CK >= TC_CK
144	 */
145	unsigned long realrate;
146	struct clk * parent;
147	unsigned  dsor_exp;
148
149	parent = clk->parent;
150	if (unlikely(parent == NULL))
151		return -EIO;
152
153	realrate = parent->rate;
154	for (dsor_exp=0; dsor_exp<4; dsor_exp++) {
155		if (realrate <= rate)
156			break;
157
158		realrate /= 2;
159	}
160
161	return dsor_exp;
162}
163
164unsigned long omap1_ckctl_recalc(struct clk *clk)
165{
166	/* Calculate divisor encoded as 2-bit exponent */
167	int dsor = 1 << (3 & (omap_readw(ARM_CKCTL) >> clk->rate_offset));
168
169	return clk->parent->rate / dsor;
170}
171
172unsigned long omap1_ckctl_recalc_dsp_domain(struct clk *clk)
173{
174	int dsor;
175
176	/* Calculate divisor encoded as 2-bit exponent
177	 *
178	 * The clock control bits are in DSP domain,
179	 * so api_ck is needed for access.
180	 * Note that DSP_CKCTL virt addr = phys addr, so
181	 * we must use __raw_readw() instead of omap_readw().
182	 */
183	omap1_clk_enable(api_ck_p);
184	dsor = 1 << (3 & (__raw_readw(DSP_CKCTL) >> clk->rate_offset));
185	omap1_clk_disable(api_ck_p);
186
187	return clk->parent->rate / dsor;
188}
189
190/* MPU virtual clock functions */
191int omap1_select_table_rate(struct clk *clk, unsigned long rate)
192{
193	/* Find the highest supported frequency <= rate and switch to it */
194	struct mpu_rate * ptr;
195	unsigned long ref_rate;
196
197	ref_rate = ck_ref_p->rate;
198
199	for (ptr = omap1_rate_table; ptr->rate; ptr++) {
200		if (!(ptr->flags & cpu_mask))
201			continue;
202
203		if (ptr->xtal != ref_rate)
204			continue;
205
206		/* Can check only after xtal frequency check */
207		if (ptr->rate <= rate)
208			break;
209	}
210
211	if (!ptr->rate)
212		return -EINVAL;
213
214	/*
215	 * In most cases we should not need to reprogram DPLL.
216	 * Reprogramming the DPLL is tricky, it must be done from SRAM.
217	 */
218	omap_sram_reprogram_clock(ptr->dpllctl_val, ptr->ckctl_val);
219
220	/* XXX Do we need to recalculate the tree below DPLL1 at this point? */
221	ck_dpll1_p->rate = ptr->pll_rate;
222
223	return 0;
224}
225
226int omap1_clk_set_rate_dsp_domain(struct clk *clk, unsigned long rate)
227{
228	int dsor_exp;
229	u16 regval;
230
231	dsor_exp = calc_dsor_exp(clk, rate);
232	if (dsor_exp > 3)
233		dsor_exp = -EINVAL;
234	if (dsor_exp < 0)
235		return dsor_exp;
236
237	regval = __raw_readw(DSP_CKCTL);
238	regval &= ~(3 << clk->rate_offset);
239	regval |= dsor_exp << clk->rate_offset;
240	__raw_writew(regval, DSP_CKCTL);
241	clk->rate = clk->parent->rate / (1 << dsor_exp);
242
243	return 0;
244}
245
246long omap1_clk_round_rate_ckctl_arm(struct clk *clk, unsigned long rate)
247{
248	int dsor_exp = calc_dsor_exp(clk, rate);
249	if (dsor_exp < 0)
250		return dsor_exp;
251	if (dsor_exp > 3)
252		dsor_exp = 3;
253	return clk->parent->rate / (1 << dsor_exp);
254}
255
256int omap1_clk_set_rate_ckctl_arm(struct clk *clk, unsigned long rate)
257{
258	int dsor_exp;
259	u16 regval;
260
261	dsor_exp = calc_dsor_exp(clk, rate);
262	if (dsor_exp > 3)
263		dsor_exp = -EINVAL;
264	if (dsor_exp < 0)
265		return dsor_exp;
266
267	regval = omap_readw(ARM_CKCTL);
268	regval &= ~(3 << clk->rate_offset);
269	regval |= dsor_exp << clk->rate_offset;
270	regval = verify_ckctl_value(regval);
271	omap_writew(regval, ARM_CKCTL);
272	clk->rate = clk->parent->rate / (1 << dsor_exp);
273	return 0;
274}
275
276long omap1_round_to_table_rate(struct clk *clk, unsigned long rate)
277{
278	/* Find the highest supported frequency <= rate */
279	struct mpu_rate * ptr;
280	long highest_rate;
281	unsigned long ref_rate;
282
283	ref_rate = ck_ref_p->rate;
284
285	highest_rate = -EINVAL;
286
287	for (ptr = omap1_rate_table; ptr->rate; ptr++) {
288		if (!(ptr->flags & cpu_mask))
289			continue;
290
291		if (ptr->xtal != ref_rate)
292			continue;
293
294		highest_rate = ptr->rate;
295
296		/* Can check only after xtal frequency check */
297		if (ptr->rate <= rate)
298			break;
299	}
300
301	return highest_rate;
302}
303
304static unsigned calc_ext_dsor(unsigned long rate)
305{
306	unsigned dsor;
307
308	/* MCLK and BCLK divisor selection is not linear:
309	 * freq = 96MHz / dsor
310	 *
311	 * RATIO_SEL range: dsor <-> RATIO_SEL
312	 * 0..6: (RATIO_SEL+2) <-> (dsor-2)
313	 * 6..48:  (8+(RATIO_SEL-6)*2) <-> ((dsor-8)/2+6)
314	 * Minimum dsor is 2 and maximum is 96. Odd divisors starting from 9
315	 * can not be used.
316	 */
317	for (dsor = 2; dsor < 96; ++dsor) {
318		if ((dsor & 1) && dsor > 8)
319			continue;
320		if (rate >= 96000000 / dsor)
321			break;
322	}
323	return dsor;
324}
325
326/* XXX Only needed on 1510 */
327int omap1_set_uart_rate(struct clk *clk, unsigned long rate)
328{
329	unsigned int val;
330
331	val = __raw_readl(clk->enable_reg);
332	if (rate == 12000000)
333		val &= ~(1 << clk->enable_bit);
334	else if (rate == 48000000)
335		val |= (1 << clk->enable_bit);
336	else
337		return -EINVAL;
338	__raw_writel(val, clk->enable_reg);
339	clk->rate = rate;
340
341	return 0;
342}
343
344/* External clock (MCLK & BCLK) functions */
345int omap1_set_ext_clk_rate(struct clk *clk, unsigned long rate)
346{
347	unsigned dsor;
348	__u16 ratio_bits;
349
350	dsor = calc_ext_dsor(rate);
351	clk->rate = 96000000 / dsor;
352	if (dsor > 8)
353		ratio_bits = ((dsor - 8) / 2 + 6) << 2;
354	else
355		ratio_bits = (dsor - 2) << 2;
356
357	ratio_bits |= __raw_readw(clk->enable_reg) & ~0xfd;
358	__raw_writew(ratio_bits, clk->enable_reg);
359
360	return 0;
361}
362
363int omap1_set_sossi_rate(struct clk *clk, unsigned long rate)
364{
365	u32 l;
366	int div;
367	unsigned long p_rate;
368
369	p_rate = clk->parent->rate;
370	/* Round towards slower frequency */
371	div = (p_rate + rate - 1) / rate;
372	div--;
373	if (div < 0 || div > 7)
374		return -EINVAL;
375
376	l = omap_readl(MOD_CONF_CTRL_1);
377	l &= ~(7 << 17);
378	l |= div << 17;
379	omap_writel(l, MOD_CONF_CTRL_1);
380
381	clk->rate = p_rate / (div + 1);
382
383	return 0;
384}
385
386long omap1_round_ext_clk_rate(struct clk *clk, unsigned long rate)
387{
388	return 96000000 / calc_ext_dsor(rate);
389}
390
391void omap1_init_ext_clk(struct clk *clk)
392{
393	unsigned dsor;
394	__u16 ratio_bits;
395
396	/* Determine current rate and ensure clock is based on 96MHz APLL */
397	ratio_bits = __raw_readw(clk->enable_reg) & ~1;
398	__raw_writew(ratio_bits, clk->enable_reg);
399
400	ratio_bits = (ratio_bits & 0xfc) >> 2;
401	if (ratio_bits > 6)
402		dsor = (ratio_bits - 6) * 2 + 8;
403	else
404		dsor = ratio_bits + 2;
405
406	clk-> rate = 96000000 / dsor;
407}
408
409int omap1_clk_enable(struct clk *clk)
410{
411	int ret = 0;
412
413	if (clk->usecount++ == 0) {
414		if (clk->parent) {
415			ret = omap1_clk_enable(clk->parent);
416			if (ret)
417				goto err;
418
419			if (clk->flags & CLOCK_NO_IDLE_PARENT)
420				omap1_clk_deny_idle(clk->parent);
421		}
422
423		ret = clk->ops->enable(clk);
424		if (ret) {
425			if (clk->parent)
426				omap1_clk_disable(clk->parent);
427			goto err;
428		}
429	}
430	return ret;
431
432err:
433	clk->usecount--;
434	return ret;
435}
436
437void omap1_clk_disable(struct clk *clk)
438{
439	if (clk->usecount > 0 && !(--clk->usecount)) {
440		clk->ops->disable(clk);
441		if (likely(clk->parent)) {
442			omap1_clk_disable(clk->parent);
443			if (clk->flags & CLOCK_NO_IDLE_PARENT)
444				omap1_clk_allow_idle(clk->parent);
445		}
446	}
447}
448
449static int omap1_clk_enable_generic(struct clk *clk)
450{
451	__u16 regval16;
452	__u32 regval32;
453
454	if (unlikely(clk->enable_reg == NULL)) {
455		printk(KERN_ERR "clock.c: Enable for %s without enable code\n",
456		       clk->name);
457		return -EINVAL;
458	}
459
460	if (clk->flags & ENABLE_REG_32BIT) {
461		regval32 = __raw_readl(clk->enable_reg);
462		regval32 |= (1 << clk->enable_bit);
463		__raw_writel(regval32, clk->enable_reg);
464	} else {
465		regval16 = __raw_readw(clk->enable_reg);
466		regval16 |= (1 << clk->enable_bit);
467		__raw_writew(regval16, clk->enable_reg);
468	}
469
470	return 0;
471}
472
473static void omap1_clk_disable_generic(struct clk *clk)
474{
475	__u16 regval16;
476	__u32 regval32;
477
478	if (clk->enable_reg == NULL)
479		return;
480
481	if (clk->flags & ENABLE_REG_32BIT) {
482		regval32 = __raw_readl(clk->enable_reg);
483		regval32 &= ~(1 << clk->enable_bit);
484		__raw_writel(regval32, clk->enable_reg);
485	} else {
486		regval16 = __raw_readw(clk->enable_reg);
487		regval16 &= ~(1 << clk->enable_bit);
488		__raw_writew(regval16, clk->enable_reg);
489	}
490}
491
492const struct clkops clkops_generic = {
493	.enable		= omap1_clk_enable_generic,
494	.disable	= omap1_clk_disable_generic,
495};
496
497static int omap1_clk_enable_dsp_domain(struct clk *clk)
498{
499	int retval;
500
501	retval = omap1_clk_enable(api_ck_p);
502	if (!retval) {
503		retval = omap1_clk_enable_generic(clk);
504		omap1_clk_disable(api_ck_p);
505	}
506
507	return retval;
508}
509
510static void omap1_clk_disable_dsp_domain(struct clk *clk)
511{
512	if (omap1_clk_enable(api_ck_p) == 0) {
513		omap1_clk_disable_generic(clk);
514		omap1_clk_disable(api_ck_p);
515	}
516}
517
518const struct clkops clkops_dspck = {
519	.enable		= omap1_clk_enable_dsp_domain,
520	.disable	= omap1_clk_disable_dsp_domain,
521};
522
523/* XXX SYSC register handling does not belong in the clock framework */
524static int omap1_clk_enable_uart_functional_16xx(struct clk *clk)
525{
526	int ret;
527	struct uart_clk *uclk;
528
529	ret = omap1_clk_enable_generic(clk);
530	if (ret == 0) {
531		/* Set smart idle acknowledgement mode */
532		uclk = (struct uart_clk *)clk;
533		omap_writeb((omap_readb(uclk->sysc_addr) & ~0x10) | 8,
534			    uclk->sysc_addr);
535	}
536
537	return ret;
538}
539
540/* XXX SYSC register handling does not belong in the clock framework */
541static void omap1_clk_disable_uart_functional_16xx(struct clk *clk)
542{
543	struct uart_clk *uclk;
544
545	/* Set force idle acknowledgement mode */
546	uclk = (struct uart_clk *)clk;
547	omap_writeb((omap_readb(uclk->sysc_addr) & ~0x18), uclk->sysc_addr);
548
549	omap1_clk_disable_generic(clk);
550}
551
552/* XXX SYSC register handling does not belong in the clock framework */
553const struct clkops clkops_uart_16xx = {
554	.enable		= omap1_clk_enable_uart_functional_16xx,
555	.disable	= omap1_clk_disable_uart_functional_16xx,
556};
557
558long omap1_clk_round_rate(struct clk *clk, unsigned long rate)
559{
560	if (clk->round_rate != NULL)
561		return clk->round_rate(clk, rate);
562
563	return clk->rate;
564}
565
566int omap1_clk_set_rate(struct clk *clk, unsigned long rate)
567{
568	int  ret = -EINVAL;
569
570	if (clk->set_rate)
571		ret = clk->set_rate(clk, rate);
572	return ret;
573}
574
575/*
576 * Omap1 clock reset and init functions
577 */
578
579#ifdef CONFIG_OMAP_RESET_CLOCKS
580
581void omap1_clk_disable_unused(struct clk *clk)
582{
583	__u32 regval32;
584
585	/* Clocks in the DSP domain need api_ck. Just assume bootloader
586	 * has not enabled any DSP clocks */
587	if (clk->enable_reg == DSP_IDLECT2) {
588		pr_info("Skipping reset check for DSP domain clock \"%s\"\n",
589			clk->name);
590		return;
591	}
592
593	/* Is the clock already disabled? */
594	if (clk->flags & ENABLE_REG_32BIT)
595		regval32 = __raw_readl(clk->enable_reg);
596	else
597		regval32 = __raw_readw(clk->enable_reg);
598
599	if ((regval32 & (1 << clk->enable_bit)) == 0)
600		return;
601
602	printk(KERN_INFO "Disabling unused clock \"%s\"... ", clk->name);
603	clk->ops->disable(clk);
604	printk(" done\n");
605}
606
607#endif
608
609
610int clk_enable(struct clk *clk)
611{
612	unsigned long flags;
613	int ret;
614
615	if (clk == NULL || IS_ERR(clk))
616		return -EINVAL;
617
618	spin_lock_irqsave(&clockfw_lock, flags);
619	ret = omap1_clk_enable(clk);
620	spin_unlock_irqrestore(&clockfw_lock, flags);
621
622	return ret;
623}
624EXPORT_SYMBOL(clk_enable);
625
626void clk_disable(struct clk *clk)
627{
628	unsigned long flags;
629
630	if (clk == NULL || IS_ERR(clk))
631		return;
632
633	spin_lock_irqsave(&clockfw_lock, flags);
634	if (clk->usecount == 0) {
635		pr_err("Trying disable clock %s with 0 usecount\n",
636		       clk->name);
637		WARN_ON(1);
638		goto out;
639	}
640
641	omap1_clk_disable(clk);
642
643out:
644	spin_unlock_irqrestore(&clockfw_lock, flags);
645}
646EXPORT_SYMBOL(clk_disable);
647
648unsigned long clk_get_rate(struct clk *clk)
649{
650	unsigned long flags;
651	unsigned long ret;
652
653	if (clk == NULL || IS_ERR(clk))
654		return 0;
655
656	spin_lock_irqsave(&clockfw_lock, flags);
657	ret = clk->rate;
658	spin_unlock_irqrestore(&clockfw_lock, flags);
659
660	return ret;
661}
662EXPORT_SYMBOL(clk_get_rate);
663
664/*
665 * Optional clock functions defined in include/linux/clk.h
666 */
667
668long clk_round_rate(struct clk *clk, unsigned long rate)
669{
670	unsigned long flags;
671	long ret;
672
673	if (clk == NULL || IS_ERR(clk))
674		return 0;
675
676	spin_lock_irqsave(&clockfw_lock, flags);
677	ret = omap1_clk_round_rate(clk, rate);
678	spin_unlock_irqrestore(&clockfw_lock, flags);
679
680	return ret;
681}
682EXPORT_SYMBOL(clk_round_rate);
683
684int clk_set_rate(struct clk *clk, unsigned long rate)
685{
686	unsigned long flags;
687	int ret = -EINVAL;
688
689	if (clk == NULL || IS_ERR(clk))
690		return ret;
691
692	spin_lock_irqsave(&clockfw_lock, flags);
693	ret = omap1_clk_set_rate(clk, rate);
694	if (ret == 0)
695		propagate_rate(clk);
696	spin_unlock_irqrestore(&clockfw_lock, flags);
697
698	return ret;
699}
700EXPORT_SYMBOL(clk_set_rate);
701
702int clk_set_parent(struct clk *clk, struct clk *parent)
703{
704	WARN_ONCE(1, "clk_set_parent() not implemented for OMAP1\n");
705
706	return -EINVAL;
707}
708EXPORT_SYMBOL(clk_set_parent);
709
710struct clk *clk_get_parent(struct clk *clk)
711{
712	return clk->parent;
713}
714EXPORT_SYMBOL(clk_get_parent);
715
716/*
717 * OMAP specific clock functions shared between omap1 and omap2
718 */
719
720/* Used for clocks that always have same value as the parent clock */
721unsigned long followparent_recalc(struct clk *clk)
722{
723	return clk->parent->rate;
724}
725
726/*
727 * Used for clocks that have the same value as the parent clock,
728 * divided by some factor
729 */
730unsigned long omap_fixed_divisor_recalc(struct clk *clk)
731{
732	WARN_ON(!clk->fixed_div);
733
734	return clk->parent->rate / clk->fixed_div;
735}
736
737void clk_reparent(struct clk *child, struct clk *parent)
738{
739	list_del_init(&child->sibling);
740	if (parent)
741		list_add(&child->sibling, &parent->children);
742	child->parent = parent;
743
744	/* now do the debugfs renaming to reattach the child
745	   to the proper parent */
746}
747
748/* Propagate rate to children */
749void propagate_rate(struct clk *tclk)
750{
751	struct clk *clkp;
752
753	list_for_each_entry(clkp, &tclk->children, sibling) {
754		if (clkp->recalc)
755			clkp->rate = clkp->recalc(clkp);
756		propagate_rate(clkp);
757	}
758}
759
760static LIST_HEAD(root_clks);
761
762/**
763 * recalculate_root_clocks - recalculate and propagate all root clocks
764 *
765 * Recalculates all root clocks (clocks with no parent), which if the
766 * clock's .recalc is set correctly, should also propagate their rates.
767 * Called at init.
768 */
769void recalculate_root_clocks(void)
770{
771	struct clk *clkp;
772
773	list_for_each_entry(clkp, &root_clks, sibling) {
774		if (clkp->recalc)
775			clkp->rate = clkp->recalc(clkp);
776		propagate_rate(clkp);
777	}
778}
779
780/**
781 * clk_preinit - initialize any fields in the struct clk before clk init
782 * @clk: struct clk * to initialize
783 *
784 * Initialize any struct clk fields needed before normal clk initialization
785 * can run.  No return value.
786 */
787void clk_preinit(struct clk *clk)
788{
789	INIT_LIST_HEAD(&clk->children);
790}
791
792int clk_register(struct clk *clk)
793{
794	if (clk == NULL || IS_ERR(clk))
795		return -EINVAL;
796
797	/*
798	 * trap out already registered clocks
799	 */
800	if (clk->node.next || clk->node.prev)
801		return 0;
802
803	mutex_lock(&clocks_mutex);
804	if (clk->parent)
805		list_add(&clk->sibling, &clk->parent->children);
806	else
807		list_add(&clk->sibling, &root_clks);
808
809	list_add(&clk->node, &clocks);
810	if (clk->init)
811		clk->init(clk);
812	mutex_unlock(&clocks_mutex);
813
814	return 0;
815}
816EXPORT_SYMBOL(clk_register);
817
818void clk_unregister(struct clk *clk)
819{
820	if (clk == NULL || IS_ERR(clk))
821		return;
822
823	mutex_lock(&clocks_mutex);
824	list_del(&clk->sibling);
825	list_del(&clk->node);
826	mutex_unlock(&clocks_mutex);
827}
828EXPORT_SYMBOL(clk_unregister);
829
830void clk_enable_init_clocks(void)
831{
832	struct clk *clkp;
833
834	list_for_each_entry(clkp, &clocks, node)
835		if (clkp->flags & ENABLE_ON_INIT)
836			clk_enable(clkp);
837}
838
839/**
840 * omap_clk_get_by_name - locate OMAP struct clk by its name
841 * @name: name of the struct clk to locate
842 *
843 * Locate an OMAP struct clk by its name.  Assumes that struct clk
844 * names are unique.  Returns NULL if not found or a pointer to the
845 * struct clk if found.
846 */
847struct clk *omap_clk_get_by_name(const char *name)
848{
849	struct clk *c;
850	struct clk *ret = NULL;
851
852	mutex_lock(&clocks_mutex);
853
854	list_for_each_entry(c, &clocks, node) {
855		if (!strcmp(c->name, name)) {
856			ret = c;
857			break;
858		}
859	}
860
861	mutex_unlock(&clocks_mutex);
862
863	return ret;
864}
865
866int omap_clk_enable_autoidle_all(void)
867{
868	struct clk *c;
869	unsigned long flags;
870
871	spin_lock_irqsave(&clockfw_lock, flags);
872
873	list_for_each_entry(c, &clocks, node)
874		if (c->ops->allow_idle)
875			c->ops->allow_idle(c);
876
877	spin_unlock_irqrestore(&clockfw_lock, flags);
878
879	return 0;
880}
881
882int omap_clk_disable_autoidle_all(void)
883{
884	struct clk *c;
885	unsigned long flags;
886
887	spin_lock_irqsave(&clockfw_lock, flags);
888
889	list_for_each_entry(c, &clocks, node)
890		if (c->ops->deny_idle)
891			c->ops->deny_idle(c);
892
893	spin_unlock_irqrestore(&clockfw_lock, flags);
894
895	return 0;
896}
897
898/*
899 * Low level helpers
900 */
901static int clkll_enable_null(struct clk *clk)
902{
903	return 0;
904}
905
906static void clkll_disable_null(struct clk *clk)
907{
908}
909
910const struct clkops clkops_null = {
911	.enable		= clkll_enable_null,
912	.disable	= clkll_disable_null,
913};
914
915/*
916 * Dummy clock
917 *
918 * Used for clock aliases that are needed on some OMAPs, but not others
919 */
920struct clk dummy_ck = {
921	.name	= "dummy",
922	.ops	= &clkops_null,
923};
924
925/*
926 *
927 */
928
929#ifdef CONFIG_OMAP_RESET_CLOCKS
930/*
931 * Disable any unused clocks left on by the bootloader
932 */
933static int __init clk_disable_unused(void)
934{
935	struct clk *ck;
936	unsigned long flags;
937
938	pr_info("clock: disabling unused clocks to save power\n");
939
940	spin_lock_irqsave(&clockfw_lock, flags);
941	list_for_each_entry(ck, &clocks, node) {
942		if (ck->ops == &clkops_null)
943			continue;
944
945		if (ck->usecount > 0 || !ck->enable_reg)
946			continue;
947
948		omap1_clk_disable_unused(ck);
949	}
950	spin_unlock_irqrestore(&clockfw_lock, flags);
951
952	return 0;
953}
954late_initcall(clk_disable_unused);
955late_initcall(omap_clk_enable_autoidle_all);
956#endif
957
958#if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS)
959/*
960 *	debugfs support to trace clock tree hierarchy and attributes
961 */
962
963#include <linux/debugfs.h>
964#include <linux/seq_file.h>
965
966static struct dentry *clk_debugfs_root;
967
968static int debug_clock_show(struct seq_file *s, void *unused)
969{
970	struct clk *c;
971	struct clk *pa;
972
973	mutex_lock(&clocks_mutex);
974	seq_printf(s, "%-30s %-30s %-10s %s\n",
975		   "clock-name", "parent-name", "rate", "use-count");
976
977	list_for_each_entry(c, &clocks, node) {
978		pa = c->parent;
979		seq_printf(s, "%-30s %-30s %-10lu %d\n",
980			   c->name, pa ? pa->name : "none", c->rate,
981			   c->usecount);
982	}
983	mutex_unlock(&clocks_mutex);
984
985	return 0;
986}
987
988DEFINE_SHOW_ATTRIBUTE(debug_clock);
989
990static void clk_debugfs_register_one(struct clk *c)
991{
992	struct dentry *d;
993	struct clk *pa = c->parent;
994
995	d = debugfs_create_dir(c->name, pa ? pa->dent : clk_debugfs_root);
996	c->dent = d;
997
998	debugfs_create_u8("usecount", S_IRUGO, c->dent, &c->usecount);
999	debugfs_create_ulong("rate", S_IRUGO, c->dent, &c->rate);
1000	debugfs_create_x8("flags", S_IRUGO, c->dent, &c->flags);
1001}
1002
1003static void clk_debugfs_register(struct clk *c)
1004{
1005	struct clk *pa = c->parent;
1006
1007	if (pa && !pa->dent)
1008		clk_debugfs_register(pa);
1009
1010	if (!c->dent)
1011		clk_debugfs_register_one(c);
1012}
1013
1014static int __init clk_debugfs_init(void)
1015{
1016	struct clk *c;
1017	struct dentry *d;
1018
1019	d = debugfs_create_dir("clock", NULL);
1020	clk_debugfs_root = d;
1021
1022	list_for_each_entry(c, &clocks, node)
1023		clk_debugfs_register(c);
1024
1025	debugfs_create_file("summary", S_IRUGO, d, NULL, &debug_clock_fops);
1026
1027	return 0;
1028}
1029late_initcall(clk_debugfs_init);
1030
1031#endif /* defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS) */
1032