1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2018 ARM Limited
4 */
5#ifndef __ASM_VDSO_GETTIMEOFDAY_H
6#define __ASM_VDSO_GETTIMEOFDAY_H
7
8#ifndef __ASSEMBLY__
9
10#include <asm/alternative.h>
11#include <asm/barrier.h>
12#include <asm/unistd.h>
13#include <asm/sysreg.h>
14
15#define VDSO_HAS_CLOCK_GETRES		1
16
17static __always_inline
18int gettimeofday_fallback(struct __kernel_old_timeval *_tv,
19			  struct timezone *_tz)
20{
21	register struct timezone *tz asm("x1") = _tz;
22	register struct __kernel_old_timeval *tv asm("x0") = _tv;
23	register long ret asm ("x0");
24	register long nr asm("x8") = __NR_gettimeofday;
25
26	asm volatile(
27	"       svc #0\n"
28	: "=r" (ret)
29	: "r" (tv), "r" (tz), "r" (nr)
30	: "memory");
31
32	return ret;
33}
34
35static __always_inline
36long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
37{
38	register struct __kernel_timespec *ts asm("x1") = _ts;
39	register clockid_t clkid asm("x0") = _clkid;
40	register long ret asm ("x0");
41	register long nr asm("x8") = __NR_clock_gettime;
42
43	asm volatile(
44	"       svc #0\n"
45	: "=r" (ret)
46	: "r" (clkid), "r" (ts), "r" (nr)
47	: "memory");
48
49	return ret;
50}
51
52static __always_inline
53int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
54{
55	register struct __kernel_timespec *ts asm("x1") = _ts;
56	register clockid_t clkid asm("x0") = _clkid;
57	register long ret asm ("x0");
58	register long nr asm("x8") = __NR_clock_getres;
59
60	asm volatile(
61	"       svc #0\n"
62	: "=r" (ret)
63	: "r" (clkid), "r" (ts), "r" (nr)
64	: "memory");
65
66	return ret;
67}
68
69static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
70						 const struct vdso_data *vd)
71{
72	u64 res;
73
74	/*
75	 * Core checks for mode already, so this raced against a concurrent
76	 * update. Return something. Core will do another round and then
77	 * see the mode change and fallback to the syscall.
78	 */
79	if (clock_mode == VDSO_CLOCKMODE_NONE)
80		return 0;
81
82	/*
83	 * If FEAT_ECV is available, use the self-synchronizing counter.
84	 * Otherwise the isb is required to prevent that the counter value
85	 * is speculated.
86	*/
87	asm volatile(
88	ALTERNATIVE("isb\n"
89		    "mrs %0, cntvct_el0",
90		    "nop\n"
91		    __mrs_s("%0", SYS_CNTVCTSS_EL0),
92		    ARM64_HAS_ECV)
93	: "=r" (res)
94	:
95	: "memory");
96
97	arch_counter_enforce_ordering(res);
98
99	return res;
100}
101
102static __always_inline
103const struct vdso_data *__arch_get_vdso_data(void)
104{
105	return _vdso_data;
106}
107
108#ifdef CONFIG_TIME_NS
109static __always_inline
110const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd)
111{
112	return _timens_data;
113}
114#endif
115
116#endif /* !__ASSEMBLY__ */
117
118#endif /* __ASM_VDSO_GETTIMEOFDAY_H */
119