1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 *  linux/drivers/clocksource/arm_arch_timer.c
4 *
5 *  Copyright (C) 2011 ARM Ltd.
6 *  All Rights Reserved
7 */
8
9#define pr_fmt(fmt) 	"arch_timer: " fmt
10
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/device.h>
14#include <linux/smp.h>
15#include <linux/cpu.h>
16#include <linux/cpu_pm.h>
17#include <linux/clockchips.h>
18#include <linux/clocksource.h>
19#include <linux/interrupt.h>
20#include <linux/of_irq.h>
21#include <linux/of_address.h>
22#include <linux/io.h>
23#include <linux/slab.h>
24#include <linux/sched/clock.h>
25#include <linux/sched_clock.h>
26#include <linux/acpi.h>
27
28#include <asm/arch_timer.h>
29#include <asm/virt.h>
30
31#include <clocksource/arm_arch_timer.h>
32
33#define CNTTIDR		0x08
34#define CNTTIDR_VIRT(n)	(BIT(1) << ((n) * 4))
35
36#define CNTACR(n)	(0x40 + ((n) * 4))
37#define CNTACR_RPCT	BIT(0)
38#define CNTACR_RVCT	BIT(1)
39#define CNTACR_RFRQ	BIT(2)
40#define CNTACR_RVOFF	BIT(3)
41#define CNTACR_RWVT	BIT(4)
42#define CNTACR_RWPT	BIT(5)
43
44#define CNTVCT_LO	0x08
45#define CNTVCT_HI	0x0c
46#define CNTFRQ		0x10
47#define CNTP_TVAL	0x28
48#define CNTP_CTL	0x2c
49#define CNTV_TVAL	0x38
50#define CNTV_CTL	0x3c
51
52static unsigned arch_timers_present __initdata;
53
54static void __iomem *arch_counter_base;
55
56struct arch_timer {
57	void __iomem *base;
58	struct clock_event_device evt;
59};
60
61#define to_arch_timer(e) container_of(e, struct arch_timer, evt)
62
63static u32 arch_timer_rate;
64static int arch_timer_ppi[ARCH_TIMER_MAX_TIMER_PPI];
65
66static struct clock_event_device __percpu *arch_timer_evt;
67
68static enum arch_timer_ppi_nr arch_timer_uses_ppi = ARCH_TIMER_VIRT_PPI;
69static bool arch_timer_c3stop;
70static bool arch_timer_mem_use_virtual;
71static bool arch_counter_suspend_stop;
72#ifdef CONFIG_GENERIC_GETTIMEOFDAY
73static enum vdso_clock_mode vdso_default = VDSO_CLOCKMODE_ARCHTIMER;
74#else
75static enum vdso_clock_mode vdso_default = VDSO_CLOCKMODE_NONE;
76#endif /* CONFIG_GENERIC_GETTIMEOFDAY */
77
78static cpumask_t evtstrm_available = CPU_MASK_NONE;
79static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
80
81static int __init early_evtstrm_cfg(char *buf)
82{
83	return strtobool(buf, &evtstrm_enable);
84}
85early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg);
86
87/*
88 * Architected system timer support.
89 */
90
91static __always_inline
92void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val,
93			  struct clock_event_device *clk)
94{
95	if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
96		struct arch_timer *timer = to_arch_timer(clk);
97		switch (reg) {
98		case ARCH_TIMER_REG_CTRL:
99			writel_relaxed(val, timer->base + CNTP_CTL);
100			break;
101		case ARCH_TIMER_REG_TVAL:
102			writel_relaxed(val, timer->base + CNTP_TVAL);
103			break;
104		}
105	} else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
106		struct arch_timer *timer = to_arch_timer(clk);
107		switch (reg) {
108		case ARCH_TIMER_REG_CTRL:
109			writel_relaxed(val, timer->base + CNTV_CTL);
110			break;
111		case ARCH_TIMER_REG_TVAL:
112			writel_relaxed(val, timer->base + CNTV_TVAL);
113			break;
114		}
115	} else {
116		arch_timer_reg_write_cp15(access, reg, val);
117	}
118}
119
120static __always_inline
121u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
122			struct clock_event_device *clk)
123{
124	u32 val;
125
126	if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
127		struct arch_timer *timer = to_arch_timer(clk);
128		switch (reg) {
129		case ARCH_TIMER_REG_CTRL:
130			val = readl_relaxed(timer->base + CNTP_CTL);
131			break;
132		case ARCH_TIMER_REG_TVAL:
133			val = readl_relaxed(timer->base + CNTP_TVAL);
134			break;
135		}
136	} else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
137		struct arch_timer *timer = to_arch_timer(clk);
138		switch (reg) {
139		case ARCH_TIMER_REG_CTRL:
140			val = readl_relaxed(timer->base + CNTV_CTL);
141			break;
142		case ARCH_TIMER_REG_TVAL:
143			val = readl_relaxed(timer->base + CNTV_TVAL);
144			break;
145		}
146	} else {
147		val = arch_timer_reg_read_cp15(access, reg);
148	}
149
150	return val;
151}
152
153static notrace u64 arch_counter_get_cntpct_stable(void)
154{
155	return __arch_counter_get_cntpct_stable();
156}
157
158static notrace u64 arch_counter_get_cntpct(void)
159{
160	return __arch_counter_get_cntpct();
161}
162
163static notrace u64 arch_counter_get_cntvct_stable(void)
164{
165	return __arch_counter_get_cntvct_stable();
166}
167
168static notrace u64 arch_counter_get_cntvct(void)
169{
170	return __arch_counter_get_cntvct();
171}
172
173/*
174 * Default to cp15 based access because arm64 uses this function for
175 * sched_clock() before DT is probed and the cp15 method is guaranteed
176 * to exist on arm64. arm doesn't use this before DT is probed so even
177 * if we don't have the cp15 accessors we won't have a problem.
178 */
179u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct;
180EXPORT_SYMBOL_GPL(arch_timer_read_counter);
181
182static u64 arch_counter_read(struct clocksource *cs)
183{
184	return arch_timer_read_counter();
185}
186
187static u64 arch_counter_read_cc(const struct cyclecounter *cc)
188{
189	return arch_timer_read_counter();
190}
191
192static struct clocksource clocksource_counter = {
193	.name	= "arch_sys_counter",
194	.rating	= 400,
195	.read	= arch_counter_read,
196	.mask	= CLOCKSOURCE_MASK(56),
197	.flags	= CLOCK_SOURCE_IS_CONTINUOUS,
198};
199
200static struct cyclecounter cyclecounter __ro_after_init = {
201	.read	= arch_counter_read_cc,
202	.mask	= CLOCKSOURCE_MASK(56),
203};
204
205struct ate_acpi_oem_info {
206	char oem_id[ACPI_OEM_ID_SIZE + 1];
207	char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
208	u32 oem_revision;
209};
210
211#ifdef CONFIG_FSL_ERRATUM_A008585
212/*
213 * The number of retries is an arbitrary value well beyond the highest number
214 * of iterations the loop has been observed to take.
215 */
216#define __fsl_a008585_read_reg(reg) ({			\
217	u64 _old, _new;					\
218	int _retries = 200;				\
219							\
220	do {						\
221		_old = read_sysreg(reg);		\
222		_new = read_sysreg(reg);		\
223		_retries--;				\
224	} while (unlikely(_old != _new) && _retries);	\
225							\
226	WARN_ON_ONCE(!_retries);			\
227	_new;						\
228})
229
230static u32 notrace fsl_a008585_read_cntp_tval_el0(void)
231{
232	return __fsl_a008585_read_reg(cntp_tval_el0);
233}
234
235static u32 notrace fsl_a008585_read_cntv_tval_el0(void)
236{
237	return __fsl_a008585_read_reg(cntv_tval_el0);
238}
239
240static u64 notrace fsl_a008585_read_cntpct_el0(void)
241{
242	return __fsl_a008585_read_reg(cntpct_el0);
243}
244
245static u64 notrace fsl_a008585_read_cntvct_el0(void)
246{
247	return __fsl_a008585_read_reg(cntvct_el0);
248}
249#endif
250
251#ifdef CONFIG_HISILICON_ERRATUM_161010101
252/*
253 * Verify whether the value of the second read is larger than the first by
254 * less than 32 is the only way to confirm the value is correct, so clear the
255 * lower 5 bits to check whether the difference is greater than 32 or not.
256 * Theoretically the erratum should not occur more than twice in succession
257 * when reading the system counter, but it is possible that some interrupts
258 * may lead to more than twice read errors, triggering the warning, so setting
259 * the number of retries far beyond the number of iterations the loop has been
260 * observed to take.
261 */
262#define __hisi_161010101_read_reg(reg) ({				\
263	u64 _old, _new;						\
264	int _retries = 50;					\
265								\
266	do {							\
267		_old = read_sysreg(reg);			\
268		_new = read_sysreg(reg);			\
269		_retries--;					\
270	} while (unlikely((_new - _old) >> 5) && _retries);	\
271								\
272	WARN_ON_ONCE(!_retries);				\
273	_new;							\
274})
275
276static u32 notrace hisi_161010101_read_cntp_tval_el0(void)
277{
278	return __hisi_161010101_read_reg(cntp_tval_el0);
279}
280
281static u32 notrace hisi_161010101_read_cntv_tval_el0(void)
282{
283	return __hisi_161010101_read_reg(cntv_tval_el0);
284}
285
286static u64 notrace hisi_161010101_read_cntpct_el0(void)
287{
288	return __hisi_161010101_read_reg(cntpct_el0);
289}
290
291static u64 notrace hisi_161010101_read_cntvct_el0(void)
292{
293	return __hisi_161010101_read_reg(cntvct_el0);
294}
295
296static struct ate_acpi_oem_info hisi_161010101_oem_info[] = {
297	/*
298	 * Note that trailing spaces are required to properly match
299	 * the OEM table information.
300	 */
301	{
302		.oem_id		= "HISI  ",
303		.oem_table_id	= "HIP05   ",
304		.oem_revision	= 0,
305	},
306	{
307		.oem_id		= "HISI  ",
308		.oem_table_id	= "HIP06   ",
309		.oem_revision	= 0,
310	},
311	{
312		.oem_id		= "HISI  ",
313		.oem_table_id	= "HIP07   ",
314		.oem_revision	= 0,
315	},
316	{ /* Sentinel indicating the end of the OEM array */ },
317};
318#endif
319
320#ifdef CONFIG_ARM64_ERRATUM_858921
321static u64 notrace arm64_858921_read_cntpct_el0(void)
322{
323	u64 old, new;
324
325	old = read_sysreg(cntpct_el0);
326	new = read_sysreg(cntpct_el0);
327	return (((old ^ new) >> 32) & 1) ? old : new;
328}
329
330static u64 notrace arm64_858921_read_cntvct_el0(void)
331{
332	u64 old, new;
333
334	old = read_sysreg(cntvct_el0);
335	new = read_sysreg(cntvct_el0);
336	return (((old ^ new) >> 32) & 1) ? old : new;
337}
338#endif
339
340#ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
341/*
342 * The low bits of the counter registers are indeterminate while bit 10 or
343 * greater is rolling over. Since the counter value can jump both backward
344 * (7ff -> 000 -> 800) and forward (7ff -> fff -> 800), ignore register values
345 * with all ones or all zeros in the low bits. Bound the loop by the maximum
346 * number of CPU cycles in 3 consecutive 24 MHz counter periods.
347 */
348#define __sun50i_a64_read_reg(reg) ({					\
349	u64 _val;							\
350	int _retries = 150;						\
351									\
352	do {								\
353		_val = read_sysreg(reg);				\
354		_retries--;						\
355	} while (((_val + 1) & GENMASK(8, 0)) <= 1 && _retries);	\
356									\
357	WARN_ON_ONCE(!_retries);					\
358	_val;								\
359})
360
361static u64 notrace sun50i_a64_read_cntpct_el0(void)
362{
363	return __sun50i_a64_read_reg(cntpct_el0);
364}
365
366static u64 notrace sun50i_a64_read_cntvct_el0(void)
367{
368	return __sun50i_a64_read_reg(cntvct_el0);
369}
370
371static u32 notrace sun50i_a64_read_cntp_tval_el0(void)
372{
373	return read_sysreg(cntp_cval_el0) - sun50i_a64_read_cntpct_el0();
374}
375
376static u32 notrace sun50i_a64_read_cntv_tval_el0(void)
377{
378	return read_sysreg(cntv_cval_el0) - sun50i_a64_read_cntvct_el0();
379}
380#endif
381
382#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
383DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, timer_unstable_counter_workaround);
384EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
385
386static atomic_t timer_unstable_counter_workaround_in_use = ATOMIC_INIT(0);
387
388static void erratum_set_next_event_tval_generic(const int access, unsigned long evt,
389						struct clock_event_device *clk)
390{
391	unsigned long ctrl;
392	u64 cval;
393
394	ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
395	ctrl |= ARCH_TIMER_CTRL_ENABLE;
396	ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
397
398	if (access == ARCH_TIMER_PHYS_ACCESS) {
399		cval = evt + arch_counter_get_cntpct_stable();
400		write_sysreg(cval, cntp_cval_el0);
401	} else {
402		cval = evt + arch_counter_get_cntvct_stable();
403		write_sysreg(cval, cntv_cval_el0);
404	}
405
406	arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
407}
408
409static __maybe_unused int erratum_set_next_event_tval_virt(unsigned long evt,
410					    struct clock_event_device *clk)
411{
412	erratum_set_next_event_tval_generic(ARCH_TIMER_VIRT_ACCESS, evt, clk);
413	return 0;
414}
415
416static __maybe_unused int erratum_set_next_event_tval_phys(unsigned long evt,
417					    struct clock_event_device *clk)
418{
419	erratum_set_next_event_tval_generic(ARCH_TIMER_PHYS_ACCESS, evt, clk);
420	return 0;
421}
422
423static const struct arch_timer_erratum_workaround ool_workarounds[] = {
424#ifdef CONFIG_FSL_ERRATUM_A008585
425	{
426		.match_type = ate_match_dt,
427		.id = "fsl,erratum-a008585",
428		.desc = "Freescale erratum a005858",
429		.read_cntp_tval_el0 = fsl_a008585_read_cntp_tval_el0,
430		.read_cntv_tval_el0 = fsl_a008585_read_cntv_tval_el0,
431		.read_cntpct_el0 = fsl_a008585_read_cntpct_el0,
432		.read_cntvct_el0 = fsl_a008585_read_cntvct_el0,
433		.set_next_event_phys = erratum_set_next_event_tval_phys,
434		.set_next_event_virt = erratum_set_next_event_tval_virt,
435	},
436#endif
437#ifdef CONFIG_HISILICON_ERRATUM_161010101
438	{
439		.match_type = ate_match_dt,
440		.id = "hisilicon,erratum-161010101",
441		.desc = "HiSilicon erratum 161010101",
442		.read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
443		.read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
444		.read_cntpct_el0 = hisi_161010101_read_cntpct_el0,
445		.read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
446		.set_next_event_phys = erratum_set_next_event_tval_phys,
447		.set_next_event_virt = erratum_set_next_event_tval_virt,
448	},
449	{
450		.match_type = ate_match_acpi_oem_info,
451		.id = hisi_161010101_oem_info,
452		.desc = "HiSilicon erratum 161010101",
453		.read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
454		.read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
455		.read_cntpct_el0 = hisi_161010101_read_cntpct_el0,
456		.read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
457		.set_next_event_phys = erratum_set_next_event_tval_phys,
458		.set_next_event_virt = erratum_set_next_event_tval_virt,
459	},
460#endif
461#ifdef CONFIG_ARM64_ERRATUM_858921
462	{
463		.match_type = ate_match_local_cap_id,
464		.id = (void *)ARM64_WORKAROUND_858921,
465		.desc = "ARM erratum 858921",
466		.read_cntpct_el0 = arm64_858921_read_cntpct_el0,
467		.read_cntvct_el0 = arm64_858921_read_cntvct_el0,
468	},
469#endif
470#ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
471	{
472		.match_type = ate_match_dt,
473		.id = "allwinner,erratum-unknown1",
474		.desc = "Allwinner erratum UNKNOWN1",
475		.read_cntp_tval_el0 = sun50i_a64_read_cntp_tval_el0,
476		.read_cntv_tval_el0 = sun50i_a64_read_cntv_tval_el0,
477		.read_cntpct_el0 = sun50i_a64_read_cntpct_el0,
478		.read_cntvct_el0 = sun50i_a64_read_cntvct_el0,
479		.set_next_event_phys = erratum_set_next_event_tval_phys,
480		.set_next_event_virt = erratum_set_next_event_tval_virt,
481	},
482#endif
483#ifdef CONFIG_ARM64_ERRATUM_1418040
484	{
485		.match_type = ate_match_local_cap_id,
486		.id = (void *)ARM64_WORKAROUND_1418040,
487		.desc = "ARM erratum 1418040",
488		.disable_compat_vdso = true,
489	},
490#endif
491};
492
493typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *,
494			       const void *);
495
496static
497bool arch_timer_check_dt_erratum(const struct arch_timer_erratum_workaround *wa,
498				 const void *arg)
499{
500	const struct device_node *np = arg;
501
502	return of_property_read_bool(np, wa->id);
503}
504
505static
506bool arch_timer_check_local_cap_erratum(const struct arch_timer_erratum_workaround *wa,
507					const void *arg)
508{
509	return this_cpu_has_cap((uintptr_t)wa->id);
510}
511
512
513static
514bool arch_timer_check_acpi_oem_erratum(const struct arch_timer_erratum_workaround *wa,
515				       const void *arg)
516{
517	static const struct ate_acpi_oem_info empty_oem_info = {};
518	const struct ate_acpi_oem_info *info = wa->id;
519	const struct acpi_table_header *table = arg;
520
521	/* Iterate over the ACPI OEM info array, looking for a match */
522	while (memcmp(info, &empty_oem_info, sizeof(*info))) {
523		if (!memcmp(info->oem_id, table->oem_id, ACPI_OEM_ID_SIZE) &&
524		    !memcmp(info->oem_table_id, table->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
525		    info->oem_revision == table->oem_revision)
526			return true;
527
528		info++;
529	}
530
531	return false;
532}
533
534static const struct arch_timer_erratum_workaround *
535arch_timer_iterate_errata(enum arch_timer_erratum_match_type type,
536			  ate_match_fn_t match_fn,
537			  void *arg)
538{
539	int i;
540
541	for (i = 0; i < ARRAY_SIZE(ool_workarounds); i++) {
542		if (ool_workarounds[i].match_type != type)
543			continue;
544
545		if (match_fn(&ool_workarounds[i], arg))
546			return &ool_workarounds[i];
547	}
548
549	return NULL;
550}
551
552static
553void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa,
554				  bool local)
555{
556	int i;
557
558	if (local) {
559		__this_cpu_write(timer_unstable_counter_workaround, wa);
560	} else {
561		for_each_possible_cpu(i)
562			per_cpu(timer_unstable_counter_workaround, i) = wa;
563	}
564
565	if (wa->read_cntvct_el0 || wa->read_cntpct_el0)
566		atomic_set(&timer_unstable_counter_workaround_in_use, 1);
567
568	/*
569	 * Don't use the vdso fastpath if errata require using the
570	 * out-of-line counter accessor. We may change our mind pretty
571	 * late in the game (with a per-CPU erratum, for example), so
572	 * change both the default value and the vdso itself.
573	 */
574	if (wa->read_cntvct_el0) {
575		clocksource_counter.vdso_clock_mode = VDSO_CLOCKMODE_NONE;
576		vdso_default = VDSO_CLOCKMODE_NONE;
577	} else if (wa->disable_compat_vdso && vdso_default != VDSO_CLOCKMODE_NONE) {
578		vdso_default = VDSO_CLOCKMODE_ARCHTIMER_NOCOMPAT;
579		clocksource_counter.vdso_clock_mode = vdso_default;
580	}
581}
582
583static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type type,
584					    void *arg)
585{
586	const struct arch_timer_erratum_workaround *wa, *__wa;
587	ate_match_fn_t match_fn = NULL;
588	bool local = false;
589
590	switch (type) {
591	case ate_match_dt:
592		match_fn = arch_timer_check_dt_erratum;
593		break;
594	case ate_match_local_cap_id:
595		match_fn = arch_timer_check_local_cap_erratum;
596		local = true;
597		break;
598	case ate_match_acpi_oem_info:
599		match_fn = arch_timer_check_acpi_oem_erratum;
600		break;
601	default:
602		WARN_ON(1);
603		return;
604	}
605
606	wa = arch_timer_iterate_errata(type, match_fn, arg);
607	if (!wa)
608		return;
609
610	__wa = __this_cpu_read(timer_unstable_counter_workaround);
611	if (__wa && wa != __wa)
612		pr_warn("Can't enable workaround for %s (clashes with %s\n)",
613			wa->desc, __wa->desc);
614
615	if (__wa)
616		return;
617
618	arch_timer_enable_workaround(wa, local);
619	pr_info("Enabling %s workaround for %s\n",
620		local ? "local" : "global", wa->desc);
621}
622
623static bool arch_timer_this_cpu_has_cntvct_wa(void)
624{
625	return has_erratum_handler(read_cntvct_el0);
626}
627
628static bool arch_timer_counter_has_wa(void)
629{
630	return atomic_read(&timer_unstable_counter_workaround_in_use);
631}
632#else
633#define arch_timer_check_ool_workaround(t,a)		do { } while(0)
634#define arch_timer_this_cpu_has_cntvct_wa()		({false;})
635#define arch_timer_counter_has_wa()			({false;})
636#endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
637
638static __always_inline irqreturn_t timer_handler(const int access,
639					struct clock_event_device *evt)
640{
641	unsigned long ctrl;
642
643	ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt);
644	if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
645		ctrl |= ARCH_TIMER_CTRL_IT_MASK;
646		arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt);
647		evt->event_handler(evt);
648		return IRQ_HANDLED;
649	}
650
651	return IRQ_NONE;
652}
653
654static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
655{
656	struct clock_event_device *evt = dev_id;
657
658	return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
659}
660
661static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
662{
663	struct clock_event_device *evt = dev_id;
664
665	return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
666}
667
668static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id)
669{
670	struct clock_event_device *evt = dev_id;
671
672	return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt);
673}
674
675static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id)
676{
677	struct clock_event_device *evt = dev_id;
678
679	return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt);
680}
681
682static __always_inline int timer_shutdown(const int access,
683					  struct clock_event_device *clk)
684{
685	unsigned long ctrl;
686
687	ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
688	ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
689	arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
690
691	return 0;
692}
693
694static int arch_timer_shutdown_virt(struct clock_event_device *clk)
695{
696	return timer_shutdown(ARCH_TIMER_VIRT_ACCESS, clk);
697}
698
699static int arch_timer_shutdown_phys(struct clock_event_device *clk)
700{
701	return timer_shutdown(ARCH_TIMER_PHYS_ACCESS, clk);
702}
703
704static int arch_timer_shutdown_virt_mem(struct clock_event_device *clk)
705{
706	return timer_shutdown(ARCH_TIMER_MEM_VIRT_ACCESS, clk);
707}
708
709static int arch_timer_shutdown_phys_mem(struct clock_event_device *clk)
710{
711	return timer_shutdown(ARCH_TIMER_MEM_PHYS_ACCESS, clk);
712}
713
714static __always_inline void set_next_event(const int access, unsigned long evt,
715					   struct clock_event_device *clk)
716{
717	unsigned long ctrl;
718	ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
719	ctrl |= ARCH_TIMER_CTRL_ENABLE;
720	ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
721	arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt, clk);
722	arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
723}
724
725static int arch_timer_set_next_event_virt(unsigned long evt,
726					  struct clock_event_device *clk)
727{
728	set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk);
729	return 0;
730}
731
732static int arch_timer_set_next_event_phys(unsigned long evt,
733					  struct clock_event_device *clk)
734{
735	set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk);
736	return 0;
737}
738
739static int arch_timer_set_next_event_virt_mem(unsigned long evt,
740					      struct clock_event_device *clk)
741{
742	set_next_event(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk);
743	return 0;
744}
745
746static int arch_timer_set_next_event_phys_mem(unsigned long evt,
747					      struct clock_event_device *clk)
748{
749	set_next_event(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk);
750	return 0;
751}
752
753static void __arch_timer_setup(unsigned type,
754			       struct clock_event_device *clk)
755{
756	clk->features = CLOCK_EVT_FEAT_ONESHOT;
757
758	if (type == ARCH_TIMER_TYPE_CP15) {
759		typeof(clk->set_next_event) sne;
760
761		arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL);
762
763		if (arch_timer_c3stop)
764			clk->features |= CLOCK_EVT_FEAT_C3STOP;
765		clk->name = "arch_sys_timer";
766		clk->rating = 450;
767		clk->cpumask = cpumask_of(smp_processor_id());
768		clk->irq = arch_timer_ppi[arch_timer_uses_ppi];
769		switch (arch_timer_uses_ppi) {
770		case ARCH_TIMER_VIRT_PPI:
771			clk->set_state_shutdown = arch_timer_shutdown_virt;
772			clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
773			sne = erratum_handler(set_next_event_virt);
774			break;
775		case ARCH_TIMER_PHYS_SECURE_PPI:
776		case ARCH_TIMER_PHYS_NONSECURE_PPI:
777		case ARCH_TIMER_HYP_PPI:
778			clk->set_state_shutdown = arch_timer_shutdown_phys;
779			clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
780			sne = erratum_handler(set_next_event_phys);
781			break;
782		default:
783			BUG();
784		}
785
786		clk->set_next_event = sne;
787	} else {
788		clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
789		clk->name = "arch_mem_timer";
790		clk->rating = 400;
791		clk->cpumask = cpu_possible_mask;
792		if (arch_timer_mem_use_virtual) {
793			clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
794			clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem;
795			clk->set_next_event =
796				arch_timer_set_next_event_virt_mem;
797		} else {
798			clk->set_state_shutdown = arch_timer_shutdown_phys_mem;
799			clk->set_state_oneshot_stopped = arch_timer_shutdown_phys_mem;
800			clk->set_next_event =
801				arch_timer_set_next_event_phys_mem;
802		}
803	}
804
805	clk->set_state_shutdown(clk);
806
807	clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff);
808}
809
810static void arch_timer_evtstrm_enable(int divider)
811{
812	u32 cntkctl = arch_timer_get_cntkctl();
813
814	cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
815	/* Set the divider and enable virtual event stream */
816	cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
817			| ARCH_TIMER_VIRT_EVT_EN;
818	arch_timer_set_cntkctl(cntkctl);
819	arch_timer_set_evtstrm_feature();
820	cpumask_set_cpu(smp_processor_id(), &evtstrm_available);
821}
822
823static void arch_timer_configure_evtstream(void)
824{
825	int evt_stream_div, lsb;
826
827	/*
828	 * As the event stream can at most be generated at half the frequency
829	 * of the counter, use half the frequency when computing the divider.
830	 */
831	evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ / 2;
832
833	/*
834	 * Find the closest power of two to the divisor. If the adjacent bit
835	 * of lsb (last set bit, starts from 0) is set, then we use (lsb + 1).
836	 */
837	lsb = fls(evt_stream_div) - 1;
838	if (lsb > 0 && (evt_stream_div & BIT(lsb - 1)))
839		lsb++;
840
841	/* enable event stream */
842	arch_timer_evtstrm_enable(max(0, min(lsb, 15)));
843}
844
845static void arch_counter_set_user_access(void)
846{
847	u32 cntkctl = arch_timer_get_cntkctl();
848
849	/* Disable user access to the timers and both counters */
850	/* Also disable virtual event stream */
851	cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
852			| ARCH_TIMER_USR_VT_ACCESS_EN
853		        | ARCH_TIMER_USR_VCT_ACCESS_EN
854			| ARCH_TIMER_VIRT_EVT_EN
855			| ARCH_TIMER_USR_PCT_ACCESS_EN);
856
857	/*
858	 * Enable user access to the virtual counter if it doesn't
859	 * need to be workaround. The vdso may have been already
860	 * disabled though.
861	 */
862	if (arch_timer_this_cpu_has_cntvct_wa())
863		pr_info("CPU%d: Trapping CNTVCT access\n", smp_processor_id());
864	else
865		cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
866
867	arch_timer_set_cntkctl(cntkctl);
868}
869
870static bool arch_timer_has_nonsecure_ppi(void)
871{
872	return (arch_timer_uses_ppi == ARCH_TIMER_PHYS_SECURE_PPI &&
873		arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
874}
875
876static u32 check_ppi_trigger(int irq)
877{
878	u32 flags = irq_get_trigger_type(irq);
879
880	if (flags != IRQF_TRIGGER_HIGH && flags != IRQF_TRIGGER_LOW) {
881		pr_warn("WARNING: Invalid trigger for IRQ%d, assuming level low\n", irq);
882		pr_warn("WARNING: Please fix your firmware\n");
883		flags = IRQF_TRIGGER_LOW;
884	}
885
886	return flags;
887}
888
889static int arch_timer_starting_cpu(unsigned int cpu)
890{
891	struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
892	u32 flags;
893
894	__arch_timer_setup(ARCH_TIMER_TYPE_CP15, clk);
895
896	flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]);
897	enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags);
898
899	if (arch_timer_has_nonsecure_ppi()) {
900		flags = check_ppi_trigger(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
901		enable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI],
902				  flags);
903	}
904
905	arch_counter_set_user_access();
906	if (evtstrm_enable)
907		arch_timer_configure_evtstream();
908
909	return 0;
910}
911
912static int validate_timer_rate(void)
913{
914	if (!arch_timer_rate)
915		return -EINVAL;
916
917	/* Arch timer frequency < 1MHz can cause trouble */
918	WARN_ON(arch_timer_rate < 1000000);
919
920	return 0;
921}
922
923/*
924 * For historical reasons, when probing with DT we use whichever (non-zero)
925 * rate was probed first, and don't verify that others match. If the first node
926 * probed has a clock-frequency property, this overrides the HW register.
927 */
928static void arch_timer_of_configure_rate(u32 rate, struct device_node *np)
929{
930	/* Who has more than one independent system counter? */
931	if (arch_timer_rate)
932		return;
933
934	if (of_property_read_u32(np, "clock-frequency", &arch_timer_rate))
935		arch_timer_rate = rate;
936
937	/* Check the timer frequency. */
938	if (validate_timer_rate())
939		pr_warn("frequency not available\n");
940}
941
942static void arch_timer_banner(unsigned type)
943{
944	pr_info("%s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n",
945		type & ARCH_TIMER_TYPE_CP15 ? "cp15" : "",
946		type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ?
947			" and " : "",
948		type & ARCH_TIMER_TYPE_MEM ? "mmio" : "",
949		(unsigned long)arch_timer_rate / 1000000,
950		(unsigned long)(arch_timer_rate / 10000) % 100,
951		type & ARCH_TIMER_TYPE_CP15 ?
952			(arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) ? "virt" : "phys" :
953			"",
954		type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ? "/" : "",
955		type & ARCH_TIMER_TYPE_MEM ?
956			arch_timer_mem_use_virtual ? "virt" : "phys" :
957			"");
958}
959
960u32 arch_timer_get_rate(void)
961{
962	return arch_timer_rate;
963}
964
965bool arch_timer_evtstrm_available(void)
966{
967	/*
968	 * We might get called from a preemptible context. This is fine
969	 * because availability of the event stream should be always the same
970	 * for a preemptible context and context where we might resume a task.
971	 */
972	return cpumask_test_cpu(raw_smp_processor_id(), &evtstrm_available);
973}
974
975static u64 arch_counter_get_cntvct_mem(void)
976{
977	u32 vct_lo, vct_hi, tmp_hi;
978
979	do {
980		vct_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
981		vct_lo = readl_relaxed(arch_counter_base + CNTVCT_LO);
982		tmp_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
983	} while (vct_hi != tmp_hi);
984
985	return ((u64) vct_hi << 32) | vct_lo;
986}
987
988static struct arch_timer_kvm_info arch_timer_kvm_info;
989
990struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
991{
992	return &arch_timer_kvm_info;
993}
994
995static void __init arch_counter_register(unsigned type)
996{
997	u64 start_count;
998
999	/* Register the CP15 based counter if we have one */
1000	if (type & ARCH_TIMER_TYPE_CP15) {
1001		u64 (*rd)(void);
1002
1003		if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) ||
1004		    arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) {
1005			if (arch_timer_counter_has_wa())
1006				rd = arch_counter_get_cntvct_stable;
1007			else
1008				rd = arch_counter_get_cntvct;
1009		} else {
1010			if (arch_timer_counter_has_wa())
1011				rd = arch_counter_get_cntpct_stable;
1012			else
1013				rd = arch_counter_get_cntpct;
1014		}
1015
1016		arch_timer_read_counter = rd;
1017		clocksource_counter.vdso_clock_mode = vdso_default;
1018	} else {
1019		arch_timer_read_counter = arch_counter_get_cntvct_mem;
1020	}
1021
1022	if (!arch_counter_suspend_stop)
1023		clocksource_counter.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
1024	start_count = arch_timer_read_counter();
1025	clocksource_register_hz(&clocksource_counter, arch_timer_rate);
1026	cyclecounter.mult = clocksource_counter.mult;
1027	cyclecounter.shift = clocksource_counter.shift;
1028	timecounter_init(&arch_timer_kvm_info.timecounter,
1029			 &cyclecounter, start_count);
1030
1031	/* 56 bits minimum, so we assume worst case rollover */
1032	sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate);
1033}
1034
1035static void arch_timer_stop(struct clock_event_device *clk)
1036{
1037	pr_debug("disable IRQ%d cpu #%d\n", clk->irq, smp_processor_id());
1038
1039	disable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi]);
1040	if (arch_timer_has_nonsecure_ppi())
1041		disable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
1042
1043	clk->set_state_shutdown(clk);
1044}
1045
1046static int arch_timer_dying_cpu(unsigned int cpu)
1047{
1048	struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
1049
1050	cpumask_clear_cpu(smp_processor_id(), &evtstrm_available);
1051
1052	arch_timer_stop(clk);
1053	return 0;
1054}
1055
1056#ifdef CONFIG_CPU_PM
1057static DEFINE_PER_CPU(unsigned long, saved_cntkctl);
1058static int arch_timer_cpu_pm_notify(struct notifier_block *self,
1059				    unsigned long action, void *hcpu)
1060{
1061	if (action == CPU_PM_ENTER) {
1062		__this_cpu_write(saved_cntkctl, arch_timer_get_cntkctl());
1063
1064		cpumask_clear_cpu(smp_processor_id(), &evtstrm_available);
1065	} else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT) {
1066		arch_timer_set_cntkctl(__this_cpu_read(saved_cntkctl));
1067
1068		if (arch_timer_have_evtstrm_feature())
1069			cpumask_set_cpu(smp_processor_id(), &evtstrm_available);
1070	}
1071	return NOTIFY_OK;
1072}
1073
1074static struct notifier_block arch_timer_cpu_pm_notifier = {
1075	.notifier_call = arch_timer_cpu_pm_notify,
1076};
1077
1078static int __init arch_timer_cpu_pm_init(void)
1079{
1080	return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier);
1081}
1082
1083static void __init arch_timer_cpu_pm_deinit(void)
1084{
1085	WARN_ON(cpu_pm_unregister_notifier(&arch_timer_cpu_pm_notifier));
1086}
1087
1088#else
1089static int __init arch_timer_cpu_pm_init(void)
1090{
1091	return 0;
1092}
1093
1094static void __init arch_timer_cpu_pm_deinit(void)
1095{
1096}
1097#endif
1098
1099static int __init arch_timer_register(void)
1100{
1101	int err;
1102	int ppi;
1103
1104	arch_timer_evt = alloc_percpu(struct clock_event_device);
1105	if (!arch_timer_evt) {
1106		err = -ENOMEM;
1107		goto out;
1108	}
1109
1110	ppi = arch_timer_ppi[arch_timer_uses_ppi];
1111	switch (arch_timer_uses_ppi) {
1112	case ARCH_TIMER_VIRT_PPI:
1113		err = request_percpu_irq(ppi, arch_timer_handler_virt,
1114					 "arch_timer", arch_timer_evt);
1115		break;
1116	case ARCH_TIMER_PHYS_SECURE_PPI:
1117	case ARCH_TIMER_PHYS_NONSECURE_PPI:
1118		err = request_percpu_irq(ppi, arch_timer_handler_phys,
1119					 "arch_timer", arch_timer_evt);
1120		if (!err && arch_timer_has_nonsecure_ppi()) {
1121			ppi = arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI];
1122			err = request_percpu_irq(ppi, arch_timer_handler_phys,
1123						 "arch_timer", arch_timer_evt);
1124			if (err)
1125				free_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_SECURE_PPI],
1126						arch_timer_evt);
1127		}
1128		break;
1129	case ARCH_TIMER_HYP_PPI:
1130		err = request_percpu_irq(ppi, arch_timer_handler_phys,
1131					 "arch_timer", arch_timer_evt);
1132		break;
1133	default:
1134		BUG();
1135	}
1136
1137	if (err) {
1138		pr_err("can't register interrupt %d (%d)\n", ppi, err);
1139		goto out_free;
1140	}
1141
1142	err = arch_timer_cpu_pm_init();
1143	if (err)
1144		goto out_unreg_notify;
1145
1146	/* Register and immediately configure the timer on the boot CPU */
1147	err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING,
1148				"clockevents/arm/arch_timer:starting",
1149				arch_timer_starting_cpu, arch_timer_dying_cpu);
1150	if (err)
1151		goto out_unreg_cpupm;
1152	return 0;
1153
1154out_unreg_cpupm:
1155	arch_timer_cpu_pm_deinit();
1156
1157out_unreg_notify:
1158	free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt);
1159	if (arch_timer_has_nonsecure_ppi())
1160		free_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI],
1161				arch_timer_evt);
1162
1163out_free:
1164	free_percpu(arch_timer_evt);
1165out:
1166	return err;
1167}
1168
1169static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq)
1170{
1171	int ret;
1172	irq_handler_t func;
1173	struct arch_timer *t;
1174
1175	t = kzalloc(sizeof(*t), GFP_KERNEL);
1176	if (!t)
1177		return -ENOMEM;
1178
1179	t->base = base;
1180	t->evt.irq = irq;
1181	__arch_timer_setup(ARCH_TIMER_TYPE_MEM, &t->evt);
1182
1183	if (arch_timer_mem_use_virtual)
1184		func = arch_timer_handler_virt_mem;
1185	else
1186		func = arch_timer_handler_phys_mem;
1187
1188	ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &t->evt);
1189	if (ret) {
1190		pr_err("Failed to request mem timer irq\n");
1191		kfree(t);
1192	}
1193
1194	return ret;
1195}
1196
1197static const struct of_device_id arch_timer_of_match[] __initconst = {
1198	{ .compatible   = "arm,armv7-timer",    },
1199	{ .compatible   = "arm,armv8-timer",    },
1200	{},
1201};
1202
1203static const struct of_device_id arch_timer_mem_of_match[] __initconst = {
1204	{ .compatible   = "arm,armv7-timer-mem", },
1205	{},
1206};
1207
1208static bool __init arch_timer_needs_of_probing(void)
1209{
1210	struct device_node *dn;
1211	bool needs_probing = false;
1212	unsigned int mask = ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM;
1213
1214	/* We have two timers, and both device-tree nodes are probed. */
1215	if ((arch_timers_present & mask) == mask)
1216		return false;
1217
1218	/*
1219	 * Only one type of timer is probed,
1220	 * check if we have another type of timer node in device-tree.
1221	 */
1222	if (arch_timers_present & ARCH_TIMER_TYPE_CP15)
1223		dn = of_find_matching_node(NULL, arch_timer_mem_of_match);
1224	else
1225		dn = of_find_matching_node(NULL, arch_timer_of_match);
1226
1227	if (dn && of_device_is_available(dn))
1228		needs_probing = true;
1229
1230	of_node_put(dn);
1231
1232	return needs_probing;
1233}
1234
1235static int __init arch_timer_common_init(void)
1236{
1237	arch_timer_banner(arch_timers_present);
1238	arch_counter_register(arch_timers_present);
1239	return arch_timer_arch_init();
1240}
1241
1242/**
1243 * arch_timer_select_ppi() - Select suitable PPI for the current system.
1244 *
1245 * If HYP mode is available, we know that the physical timer
1246 * has been configured to be accessible from PL1. Use it, so
1247 * that a guest can use the virtual timer instead.
1248 *
1249 * On ARMv8.1 with VH extensions, the kernel runs in HYP. VHE
1250 * accesses to CNTP_*_EL1 registers are silently redirected to
1251 * their CNTHP_*_EL2 counterparts, and use a different PPI
1252 * number.
1253 *
1254 * If no interrupt provided for virtual timer, we'll have to
1255 * stick to the physical timer. It'd better be accessible...
1256 * For arm64 we never use the secure interrupt.
1257 *
1258 * Return: a suitable PPI type for the current system.
1259 */
1260static enum arch_timer_ppi_nr __init arch_timer_select_ppi(void)
1261{
1262	if (is_kernel_in_hyp_mode())
1263		return ARCH_TIMER_HYP_PPI;
1264
1265	if (!is_hyp_mode_available() && arch_timer_ppi[ARCH_TIMER_VIRT_PPI])
1266		return ARCH_TIMER_VIRT_PPI;
1267
1268	if (IS_ENABLED(CONFIG_ARM64))
1269		return ARCH_TIMER_PHYS_NONSECURE_PPI;
1270
1271	return ARCH_TIMER_PHYS_SECURE_PPI;
1272}
1273
1274static void __init arch_timer_populate_kvm_info(void)
1275{
1276	arch_timer_kvm_info.virtual_irq = arch_timer_ppi[ARCH_TIMER_VIRT_PPI];
1277	if (is_kernel_in_hyp_mode())
1278		arch_timer_kvm_info.physical_irq = arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI];
1279}
1280
1281static int __init arch_timer_of_init(struct device_node *np)
1282{
1283	int i, ret;
1284	u32 rate;
1285
1286	if (arch_timers_present & ARCH_TIMER_TYPE_CP15) {
1287		pr_warn("multiple nodes in dt, skipping\n");
1288		return 0;
1289	}
1290
1291	arch_timers_present |= ARCH_TIMER_TYPE_CP15;
1292	for (i = ARCH_TIMER_PHYS_SECURE_PPI; i < ARCH_TIMER_MAX_TIMER_PPI; i++)
1293		arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
1294
1295	arch_timer_populate_kvm_info();
1296
1297	rate = arch_timer_get_cntfrq();
1298	arch_timer_of_configure_rate(rate, np);
1299
1300	arch_timer_c3stop = !of_property_read_bool(np, "always-on");
1301
1302	/* Check for globally applicable workarounds */
1303	arch_timer_check_ool_workaround(ate_match_dt, np);
1304
1305	/*
1306	 * If we cannot rely on firmware initializing the timer registers then
1307	 * we should use the physical timers instead.
1308	 */
1309	if (IS_ENABLED(CONFIG_ARM) &&
1310	    of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
1311		arch_timer_uses_ppi = ARCH_TIMER_PHYS_SECURE_PPI;
1312	else
1313		arch_timer_uses_ppi = arch_timer_select_ppi();
1314
1315	if (!arch_timer_ppi[arch_timer_uses_ppi]) {
1316		pr_err("No interrupt available, giving up\n");
1317		return -EINVAL;
1318	}
1319
1320	/* On some systems, the counter stops ticking when in suspend. */
1321	arch_counter_suspend_stop = of_property_read_bool(np,
1322							 "arm,no-tick-in-suspend");
1323
1324	ret = arch_timer_register();
1325	if (ret)
1326		return ret;
1327
1328	if (arch_timer_needs_of_probing())
1329		return 0;
1330
1331	return arch_timer_common_init();
1332}
1333TIMER_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
1334TIMER_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init);
1335
1336static u32 __init
1337arch_timer_mem_frame_get_cntfrq(struct arch_timer_mem_frame *frame)
1338{
1339	void __iomem *base;
1340	u32 rate;
1341
1342	base = ioremap(frame->cntbase, frame->size);
1343	if (!base) {
1344		pr_err("Unable to map frame @ %pa\n", &frame->cntbase);
1345		return 0;
1346	}
1347
1348	rate = readl_relaxed(base + CNTFRQ);
1349
1350	iounmap(base);
1351
1352	return rate;
1353}
1354
1355static struct arch_timer_mem_frame * __init
1356arch_timer_mem_find_best_frame(struct arch_timer_mem *timer_mem)
1357{
1358	struct arch_timer_mem_frame *frame, *best_frame = NULL;
1359	void __iomem *cntctlbase;
1360	u32 cnttidr;
1361	int i;
1362
1363	cntctlbase = ioremap(timer_mem->cntctlbase, timer_mem->size);
1364	if (!cntctlbase) {
1365		pr_err("Can't map CNTCTLBase @ %pa\n",
1366			&timer_mem->cntctlbase);
1367		return NULL;
1368	}
1369
1370	cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
1371
1372	/*
1373	 * Try to find a virtual capable frame. Otherwise fall back to a
1374	 * physical capable frame.
1375	 */
1376	for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) {
1377		u32 cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT |
1378			     CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT;
1379
1380		frame = &timer_mem->frame[i];
1381		if (!frame->valid)
1382			continue;
1383
1384		/* Try enabling everything, and see what sticks */
1385		writel_relaxed(cntacr, cntctlbase + CNTACR(i));
1386		cntacr = readl_relaxed(cntctlbase + CNTACR(i));
1387
1388		if ((cnttidr & CNTTIDR_VIRT(i)) &&
1389		    !(~cntacr & (CNTACR_RWVT | CNTACR_RVCT))) {
1390			best_frame = frame;
1391			arch_timer_mem_use_virtual = true;
1392			break;
1393		}
1394
1395		if (~cntacr & (CNTACR_RWPT | CNTACR_RPCT))
1396			continue;
1397
1398		best_frame = frame;
1399	}
1400
1401	iounmap(cntctlbase);
1402
1403	return best_frame;
1404}
1405
1406static int __init
1407arch_timer_mem_frame_register(struct arch_timer_mem_frame *frame)
1408{
1409	void __iomem *base;
1410	int ret, irq = 0;
1411
1412	if (arch_timer_mem_use_virtual)
1413		irq = frame->virt_irq;
1414	else
1415		irq = frame->phys_irq;
1416
1417	if (!irq) {
1418		pr_err("Frame missing %s irq.\n",
1419		       arch_timer_mem_use_virtual ? "virt" : "phys");
1420		return -EINVAL;
1421	}
1422
1423	if (!request_mem_region(frame->cntbase, frame->size,
1424				"arch_mem_timer"))
1425		return -EBUSY;
1426
1427	base = ioremap(frame->cntbase, frame->size);
1428	if (!base) {
1429		pr_err("Can't map frame's registers\n");
1430		return -ENXIO;
1431	}
1432
1433	ret = arch_timer_mem_register(base, irq);
1434	if (ret) {
1435		iounmap(base);
1436		return ret;
1437	}
1438
1439	arch_counter_base = base;
1440	arch_timers_present |= ARCH_TIMER_TYPE_MEM;
1441
1442	return 0;
1443}
1444
1445static int __init arch_timer_mem_of_init(struct device_node *np)
1446{
1447	struct arch_timer_mem *timer_mem;
1448	struct arch_timer_mem_frame *frame;
1449	struct device_node *frame_node;
1450	struct resource res;
1451	int ret = -EINVAL;
1452	u32 rate;
1453
1454	timer_mem = kzalloc(sizeof(*timer_mem), GFP_KERNEL);
1455	if (!timer_mem)
1456		return -ENOMEM;
1457
1458	if (of_address_to_resource(np, 0, &res))
1459		goto out;
1460	timer_mem->cntctlbase = res.start;
1461	timer_mem->size = resource_size(&res);
1462
1463	for_each_available_child_of_node(np, frame_node) {
1464		u32 n;
1465		struct arch_timer_mem_frame *frame;
1466
1467		if (of_property_read_u32(frame_node, "frame-number", &n)) {
1468			pr_err(FW_BUG "Missing frame-number.\n");
1469			of_node_put(frame_node);
1470			goto out;
1471		}
1472		if (n >= ARCH_TIMER_MEM_MAX_FRAMES) {
1473			pr_err(FW_BUG "Wrong frame-number, only 0-%u are permitted.\n",
1474			       ARCH_TIMER_MEM_MAX_FRAMES - 1);
1475			of_node_put(frame_node);
1476			goto out;
1477		}
1478		frame = &timer_mem->frame[n];
1479
1480		if (frame->valid) {
1481			pr_err(FW_BUG "Duplicated frame-number.\n");
1482			of_node_put(frame_node);
1483			goto out;
1484		}
1485
1486		if (of_address_to_resource(frame_node, 0, &res)) {
1487			of_node_put(frame_node);
1488			goto out;
1489		}
1490		frame->cntbase = res.start;
1491		frame->size = resource_size(&res);
1492
1493		frame->virt_irq = irq_of_parse_and_map(frame_node,
1494						       ARCH_TIMER_VIRT_SPI);
1495		frame->phys_irq = irq_of_parse_and_map(frame_node,
1496						       ARCH_TIMER_PHYS_SPI);
1497
1498		frame->valid = true;
1499	}
1500
1501	frame = arch_timer_mem_find_best_frame(timer_mem);
1502	if (!frame) {
1503		pr_err("Unable to find a suitable frame in timer @ %pa\n",
1504			&timer_mem->cntctlbase);
1505		ret = -EINVAL;
1506		goto out;
1507	}
1508
1509	rate = arch_timer_mem_frame_get_cntfrq(frame);
1510	arch_timer_of_configure_rate(rate, np);
1511
1512	ret = arch_timer_mem_frame_register(frame);
1513	if (!ret && !arch_timer_needs_of_probing())
1514		ret = arch_timer_common_init();
1515out:
1516	kfree(timer_mem);
1517	return ret;
1518}
1519TIMER_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
1520		       arch_timer_mem_of_init);
1521
1522#ifdef CONFIG_ACPI_GTDT
1523static int __init
1524arch_timer_mem_verify_cntfrq(struct arch_timer_mem *timer_mem)
1525{
1526	struct arch_timer_mem_frame *frame;
1527	u32 rate;
1528	int i;
1529
1530	for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) {
1531		frame = &timer_mem->frame[i];
1532
1533		if (!frame->valid)
1534			continue;
1535
1536		rate = arch_timer_mem_frame_get_cntfrq(frame);
1537		if (rate == arch_timer_rate)
1538			continue;
1539
1540		pr_err(FW_BUG "CNTFRQ mismatch: frame @ %pa: (0x%08lx), CPU: (0x%08lx)\n",
1541			&frame->cntbase,
1542			(unsigned long)rate, (unsigned long)arch_timer_rate);
1543
1544		return -EINVAL;
1545	}
1546
1547	return 0;
1548}
1549
1550static int __init arch_timer_mem_acpi_init(int platform_timer_count)
1551{
1552	struct arch_timer_mem *timers, *timer;
1553	struct arch_timer_mem_frame *frame, *best_frame = NULL;
1554	int timer_count, i, ret = 0;
1555
1556	timers = kcalloc(platform_timer_count, sizeof(*timers),
1557			    GFP_KERNEL);
1558	if (!timers)
1559		return -ENOMEM;
1560
1561	ret = acpi_arch_timer_mem_init(timers, &timer_count);
1562	if (ret || !timer_count)
1563		goto out;
1564
1565	/*
1566	 * While unlikely, it's theoretically possible that none of the frames
1567	 * in a timer expose the combination of feature we want.
1568	 */
1569	for (i = 0; i < timer_count; i++) {
1570		timer = &timers[i];
1571
1572		frame = arch_timer_mem_find_best_frame(timer);
1573		if (!best_frame)
1574			best_frame = frame;
1575
1576		ret = arch_timer_mem_verify_cntfrq(timer);
1577		if (ret) {
1578			pr_err("Disabling MMIO timers due to CNTFRQ mismatch\n");
1579			goto out;
1580		}
1581
1582		if (!best_frame) /* implies !frame */
1583			/*
1584			 * Only complain about missing suitable frames if we
1585			 * haven't already found one in a previous iteration.
1586			 */
1587			pr_err("Unable to find a suitable frame in timer @ %pa\n",
1588				&timer->cntctlbase);
1589	}
1590
1591	if (best_frame)
1592		ret = arch_timer_mem_frame_register(best_frame);
1593out:
1594	kfree(timers);
1595	return ret;
1596}
1597
1598/* Initialize per-processor generic timer and memory-mapped timer(if present) */
1599static int __init arch_timer_acpi_init(struct acpi_table_header *table)
1600{
1601	int ret, platform_timer_count;
1602
1603	if (arch_timers_present & ARCH_TIMER_TYPE_CP15) {
1604		pr_warn("already initialized, skipping\n");
1605		return -EINVAL;
1606	}
1607
1608	arch_timers_present |= ARCH_TIMER_TYPE_CP15;
1609
1610	ret = acpi_gtdt_init(table, &platform_timer_count);
1611	if (ret)
1612		return ret;
1613
1614	arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI] =
1615		acpi_gtdt_map_ppi(ARCH_TIMER_PHYS_NONSECURE_PPI);
1616
1617	arch_timer_ppi[ARCH_TIMER_VIRT_PPI] =
1618		acpi_gtdt_map_ppi(ARCH_TIMER_VIRT_PPI);
1619
1620	arch_timer_ppi[ARCH_TIMER_HYP_PPI] =
1621		acpi_gtdt_map_ppi(ARCH_TIMER_HYP_PPI);
1622
1623	arch_timer_populate_kvm_info();
1624
1625	/*
1626	 * When probing via ACPI, we have no mechanism to override the sysreg
1627	 * CNTFRQ value. This *must* be correct.
1628	 */
1629	arch_timer_rate = arch_timer_get_cntfrq();
1630	ret = validate_timer_rate();
1631	if (ret) {
1632		pr_err(FW_BUG "frequency not available.\n");
1633		return ret;
1634	}
1635
1636	arch_timer_uses_ppi = arch_timer_select_ppi();
1637	if (!arch_timer_ppi[arch_timer_uses_ppi]) {
1638		pr_err("No interrupt available, giving up\n");
1639		return -EINVAL;
1640	}
1641
1642	/* Always-on capability */
1643	arch_timer_c3stop = acpi_gtdt_c3stop(arch_timer_uses_ppi);
1644
1645	/* Check for globally applicable workarounds */
1646	arch_timer_check_ool_workaround(ate_match_acpi_oem_info, table);
1647
1648	ret = arch_timer_register();
1649	if (ret)
1650		return ret;
1651
1652	if (platform_timer_count &&
1653	    arch_timer_mem_acpi_init(platform_timer_count))
1654		pr_err("Failed to initialize memory-mapped timer.\n");
1655
1656	return arch_timer_common_init();
1657}
1658TIMER_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init);
1659#endif
1660