1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2012 ARM Ltd.
4 */
5#ifndef __ASM_IRQFLAGS_H
6#define __ASM_IRQFLAGS_H
7
8#include <asm/alternative.h>
9#include <asm/barrier.h>
10#include <asm/ptrace.h>
11#include <asm/sysreg.h>
12
13/*
14 * Aarch64 has flags for masking: Debug, Asynchronous (serror), Interrupts and
15 * FIQ exceptions, in the 'daif' register. We mask and unmask them in 'daif'
16 * order:
17 * Masking debug exceptions causes all other exceptions to be masked too/
18 * Masking SError masks IRQ/FIQ, but not debug exceptions. IRQ and FIQ are
19 * always masked and unmasked together, and have no side effects for other
20 * flags. Keeping to this order makes it easier for entry.S to know which
21 * exceptions should be unmasked.
22 */
23
24static __always_inline bool __irqflags_uses_pmr(void)
25{
26	return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
27	       alternative_has_cap_unlikely(ARM64_HAS_GIC_PRIO_MASKING);
28}
29
30static __always_inline void __daif_local_irq_enable(void)
31{
32	barrier();
33	asm volatile("msr daifclr, #3");
34	barrier();
35}
36
37static __always_inline void __pmr_local_irq_enable(void)
38{
39	if (IS_ENABLED(CONFIG_ARM64_DEBUG_PRIORITY_MASKING)) {
40		u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1);
41		WARN_ON_ONCE(pmr != GIC_PRIO_IRQON && pmr != GIC_PRIO_IRQOFF);
42	}
43
44	barrier();
45	write_sysreg_s(GIC_PRIO_IRQON, SYS_ICC_PMR_EL1);
46	pmr_sync();
47	barrier();
48}
49
50static inline void arch_local_irq_enable(void)
51{
52	if (__irqflags_uses_pmr()) {
53		__pmr_local_irq_enable();
54	} else {
55		__daif_local_irq_enable();
56	}
57}
58
59static __always_inline void __daif_local_irq_disable(void)
60{
61	barrier();
62	asm volatile("msr daifset, #3");
63	barrier();
64}
65
66static __always_inline void __pmr_local_irq_disable(void)
67{
68	if (IS_ENABLED(CONFIG_ARM64_DEBUG_PRIORITY_MASKING)) {
69		u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1);
70		WARN_ON_ONCE(pmr != GIC_PRIO_IRQON && pmr != GIC_PRIO_IRQOFF);
71	}
72
73	barrier();
74	write_sysreg_s(GIC_PRIO_IRQOFF, SYS_ICC_PMR_EL1);
75	barrier();
76}
77
78static inline void arch_local_irq_disable(void)
79{
80	if (__irqflags_uses_pmr()) {
81		__pmr_local_irq_disable();
82	} else {
83		__daif_local_irq_disable();
84	}
85}
86
87static __always_inline unsigned long __daif_local_save_flags(void)
88{
89	return read_sysreg(daif);
90}
91
92static __always_inline unsigned long __pmr_local_save_flags(void)
93{
94	return read_sysreg_s(SYS_ICC_PMR_EL1);
95}
96
97/*
98 * Save the current interrupt enable state.
99 */
100static inline unsigned long arch_local_save_flags(void)
101{
102	if (__irqflags_uses_pmr()) {
103		return __pmr_local_save_flags();
104	} else {
105		return __daif_local_save_flags();
106	}
107}
108
109static __always_inline bool __daif_irqs_disabled_flags(unsigned long flags)
110{
111	return flags & PSR_I_BIT;
112}
113
114static __always_inline bool __pmr_irqs_disabled_flags(unsigned long flags)
115{
116	return flags != GIC_PRIO_IRQON;
117}
118
119static inline bool arch_irqs_disabled_flags(unsigned long flags)
120{
121	if (__irqflags_uses_pmr()) {
122		return __pmr_irqs_disabled_flags(flags);
123	} else {
124		return __daif_irqs_disabled_flags(flags);
125	}
126}
127
128static __always_inline bool __daif_irqs_disabled(void)
129{
130	return __daif_irqs_disabled_flags(__daif_local_save_flags());
131}
132
133static __always_inline bool __pmr_irqs_disabled(void)
134{
135	return __pmr_irqs_disabled_flags(__pmr_local_save_flags());
136}
137
138static inline bool arch_irqs_disabled(void)
139{
140	if (__irqflags_uses_pmr()) {
141		return __pmr_irqs_disabled();
142	} else {
143		return __daif_irqs_disabled();
144	}
145}
146
147static __always_inline unsigned long __daif_local_irq_save(void)
148{
149	unsigned long flags = __daif_local_save_flags();
150
151	__daif_local_irq_disable();
152
153	return flags;
154}
155
156static __always_inline unsigned long __pmr_local_irq_save(void)
157{
158	unsigned long flags = __pmr_local_save_flags();
159
160	/*
161	 * There are too many states with IRQs disabled, just keep the current
162	 * state if interrupts are already disabled/masked.
163	 */
164	if (!__pmr_irqs_disabled_flags(flags))
165		__pmr_local_irq_disable();
166
167	return flags;
168}
169
170static inline unsigned long arch_local_irq_save(void)
171{
172	if (__irqflags_uses_pmr()) {
173		return __pmr_local_irq_save();
174	} else {
175		return __daif_local_irq_save();
176	}
177}
178
179static __always_inline void __daif_local_irq_restore(unsigned long flags)
180{
181	barrier();
182	write_sysreg(flags, daif);
183	barrier();
184}
185
186static __always_inline void __pmr_local_irq_restore(unsigned long flags)
187{
188	barrier();
189	write_sysreg_s(flags, SYS_ICC_PMR_EL1);
190	pmr_sync();
191	barrier();
192}
193
194/*
195 * restore saved IRQ state
196 */
197static inline void arch_local_irq_restore(unsigned long flags)
198{
199	if (__irqflags_uses_pmr()) {
200		__pmr_local_irq_restore(flags);
201	} else {
202		__daif_local_irq_restore(flags);
203	}
204}
205
206#endif /* __ASM_IRQFLAGS_H */
207