1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
4 */
5#ifndef _ASM_POWERPC_HW_IRQ_H
6#define _ASM_POWERPC_HW_IRQ_H
7
8#ifdef __KERNEL__
9
10#include <linux/errno.h>
11#include <linux/compiler.h>
12#include <asm/ptrace.h>
13#include <asm/processor.h>
14
15#ifdef CONFIG_PPC64
16
17/*
18 * PACA flags in paca->irq_happened.
19 *
20 * This bits are set when interrupts occur while soft-disabled
21 * and allow a proper replay. Additionally, PACA_IRQ_HARD_DIS
22 * is set whenever we manually hard disable.
23 */
24#define PACA_IRQ_HARD_DIS	0x01
25#define PACA_IRQ_DBELL		0x02
26#define PACA_IRQ_EE		0x04
27#define PACA_IRQ_DEC		0x08 /* Or FIT */
28#define PACA_IRQ_HMI		0x10
29#define PACA_IRQ_PMI		0x20
30
31/*
32 * Some soft-masked interrupts must be hard masked until they are replayed
33 * (e.g., because the soft-masked handler does not clear the exception).
34 */
35#ifdef CONFIG_PPC_BOOK3S
36#define PACA_IRQ_MUST_HARD_MASK	(PACA_IRQ_EE|PACA_IRQ_PMI)
37#else
38#define PACA_IRQ_MUST_HARD_MASK	(PACA_IRQ_EE)
39#endif
40
41#endif /* CONFIG_PPC64 */
42
43/*
44 * flags for paca->irq_soft_mask
45 */
46#define IRQS_ENABLED		0
47#define IRQS_DISABLED		1 /* local_irq_disable() interrupts */
48#define IRQS_PMI_DISABLED	2
49#define IRQS_ALL_DISABLED	(IRQS_DISABLED | IRQS_PMI_DISABLED)
50
51#ifndef __ASSEMBLY__
52
53extern void replay_system_reset(void);
54extern void replay_soft_interrupts(void);
55
56extern void timer_interrupt(struct pt_regs *);
57extern void timer_broadcast_interrupt(void);
58extern void performance_monitor_exception(struct pt_regs *regs);
59extern void WatchdogException(struct pt_regs *regs);
60extern void unknown_exception(struct pt_regs *regs);
61
62#ifdef CONFIG_PPC64
63#include <asm/paca.h>
64
65static inline notrace unsigned long irq_soft_mask_return(void)
66{
67	unsigned long flags;
68
69	asm volatile(
70		"lbz %0,%1(13)"
71		: "=r" (flags)
72		: "i" (offsetof(struct paca_struct, irq_soft_mask)));
73
74	return flags;
75}
76
77/*
78 * The "memory" clobber acts as both a compiler barrier
79 * for the critical section and as a clobber because
80 * we changed paca->irq_soft_mask
81 */
82static inline notrace void irq_soft_mask_set(unsigned long mask)
83{
84#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
85	/*
86	 * The irq mask must always include the STD bit if any are set.
87	 *
88	 * and interrupts don't get replayed until the standard
89	 * interrupt (local_irq_disable()) is unmasked.
90	 *
91	 * Other masks must only provide additional masking beyond
92	 * the standard, and they are also not replayed until the
93	 * standard interrupt becomes unmasked.
94	 *
95	 * This could be changed, but it will require partial
96	 * unmasks to be replayed, among other things. For now, take
97	 * the simple approach.
98	 */
99	WARN_ON(mask && !(mask & IRQS_DISABLED));
100#endif
101
102	asm volatile(
103		"stb %0,%1(13)"
104		:
105		: "r" (mask),
106		  "i" (offsetof(struct paca_struct, irq_soft_mask))
107		: "memory");
108}
109
110static inline notrace unsigned long irq_soft_mask_set_return(unsigned long mask)
111{
112	unsigned long flags;
113
114#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
115	WARN_ON(mask && !(mask & IRQS_DISABLED));
116#endif
117
118	asm volatile(
119		"lbz %0,%1(13); stb %2,%1(13)"
120		: "=&r" (flags)
121		: "i" (offsetof(struct paca_struct, irq_soft_mask)),
122		  "r" (mask)
123		: "memory");
124
125	return flags;
126}
127
128static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask)
129{
130	unsigned long flags, tmp;
131
132	asm volatile(
133		"lbz %0,%2(13); or %1,%0,%3; stb %1,%2(13)"
134		: "=&r" (flags), "=r" (tmp)
135		: "i" (offsetof(struct paca_struct, irq_soft_mask)),
136		  "r" (mask)
137		: "memory");
138
139#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
140	WARN_ON((mask | flags) && !((mask | flags) & IRQS_DISABLED));
141#endif
142
143	return flags;
144}
145
146static inline unsigned long arch_local_save_flags(void)
147{
148	return irq_soft_mask_return();
149}
150
151static inline void arch_local_irq_disable(void)
152{
153	irq_soft_mask_set(IRQS_DISABLED);
154}
155
156extern void arch_local_irq_restore(unsigned long);
157
158static inline void arch_local_irq_enable(void)
159{
160	arch_local_irq_restore(IRQS_ENABLED);
161}
162
163static inline unsigned long arch_local_irq_save(void)
164{
165	return irq_soft_mask_set_return(IRQS_DISABLED);
166}
167
168static inline bool arch_irqs_disabled_flags(unsigned long flags)
169{
170	return flags & IRQS_DISABLED;
171}
172
173static inline bool arch_irqs_disabled(void)
174{
175	return arch_irqs_disabled_flags(arch_local_save_flags());
176}
177
178static inline void set_pmi_irq_pending(void)
179{
180	/*
181	 * Invoked from PMU callback functions to set PMI bit in the paca.
182	 * This has to be called with irq's disabled (via hard_irq_disable()).
183	 */
184	if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
185		WARN_ON_ONCE(mfmsr() & MSR_EE);
186
187	get_paca()->irq_happened |= PACA_IRQ_PMI;
188}
189
190static inline void clear_pmi_irq_pending(void)
191{
192	/*
193	 * Invoked from PMU callback functions to clear the pending PMI bit
194	 * in the paca.
195	 */
196	if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
197		WARN_ON_ONCE(mfmsr() & MSR_EE);
198
199	get_paca()->irq_happened &= ~PACA_IRQ_PMI;
200}
201
202static inline bool pmi_irq_pending(void)
203{
204	/*
205	 * Invoked from PMU callback functions to check if there is a pending
206	 * PMI bit in the paca.
207	 */
208	if (get_paca()->irq_happened & PACA_IRQ_PMI)
209		return true;
210
211	return false;
212}
213
214#ifdef CONFIG_PPC_BOOK3S
215/*
216 * To support disabling and enabling of irq with PMI, set of
217 * new powerpc_local_irq_pmu_save() and powerpc_local_irq_restore()
218 * functions are added. These macros are implemented using generic
219 * linux local_irq_* code from include/linux/irqflags.h.
220 */
221#define raw_local_irq_pmu_save(flags)					\
222	do {								\
223		typecheck(unsigned long, flags);			\
224		flags = irq_soft_mask_or_return(IRQS_DISABLED |	\
225				IRQS_PMI_DISABLED);			\
226	} while(0)
227
228#define raw_local_irq_pmu_restore(flags)				\
229	do {								\
230		typecheck(unsigned long, flags);			\
231		arch_local_irq_restore(flags);				\
232	} while(0)
233
234#ifdef CONFIG_TRACE_IRQFLAGS
235#define powerpc_local_irq_pmu_save(flags)			\
236	 do {							\
237		raw_local_irq_pmu_save(flags);			\
238		if (!raw_irqs_disabled_flags(flags))		\
239			trace_hardirqs_off();			\
240	} while(0)
241#define powerpc_local_irq_pmu_restore(flags)			\
242	do {							\
243		if (!raw_irqs_disabled_flags(flags))		\
244			trace_hardirqs_on();			\
245		raw_local_irq_pmu_restore(flags);		\
246	} while(0)
247#else
248#define powerpc_local_irq_pmu_save(flags)			\
249	do {							\
250		raw_local_irq_pmu_save(flags);			\
251	} while(0)
252#define powerpc_local_irq_pmu_restore(flags)			\
253	do {							\
254		raw_local_irq_pmu_restore(flags);		\
255	} while (0)
256#endif  /* CONFIG_TRACE_IRQFLAGS */
257
258#endif /* CONFIG_PPC_BOOK3S */
259
260#ifdef CONFIG_PPC_BOOK3E
261#define __hard_irq_enable()	wrtee(MSR_EE)
262#define __hard_irq_disable()	wrtee(0)
263#define __hard_EE_RI_disable()	wrtee(0)
264#define __hard_RI_enable()	do { } while (0)
265#else
266#define __hard_irq_enable()	__mtmsrd(MSR_EE|MSR_RI, 1)
267#define __hard_irq_disable()	__mtmsrd(MSR_RI, 1)
268#define __hard_EE_RI_disable()	__mtmsrd(0, 1)
269#define __hard_RI_enable()	__mtmsrd(MSR_RI, 1)
270#endif
271
272#define hard_irq_disable()	do {					\
273	unsigned long flags;						\
274	__hard_irq_disable();						\
275	flags = irq_soft_mask_set_return(IRQS_ALL_DISABLED);		\
276	local_paca->irq_happened |= PACA_IRQ_HARD_DIS;			\
277	if (!arch_irqs_disabled_flags(flags)) {				\
278		asm ("stdx %%r1, 0, %1 ;"				\
279		     : "=m" (local_paca->saved_r1)			\
280		     : "b" (&local_paca->saved_r1));			\
281		trace_hardirqs_off();					\
282	}								\
283} while(0)
284
285static inline bool __lazy_irq_pending(u8 irq_happened)
286{
287	return !!(irq_happened & ~PACA_IRQ_HARD_DIS);
288}
289
290/*
291 * Check if a lazy IRQ is pending. Should be called with IRQs hard disabled.
292 */
293static inline bool lazy_irq_pending(void)
294{
295	return __lazy_irq_pending(get_paca()->irq_happened);
296}
297
298/*
299 * Check if a lazy IRQ is pending, with no debugging checks.
300 * Should be called with IRQs hard disabled.
301 * For use in RI disabled code or other constrained situations.
302 */
303static inline bool lazy_irq_pending_nocheck(void)
304{
305	return __lazy_irq_pending(local_paca->irq_happened);
306}
307
308/*
309 * This is called by asynchronous interrupts to conditionally
310 * re-enable hard interrupts after having cleared the source
311 * of the interrupt. They are kept disabled if there is a different
312 * soft-masked interrupt pending that requires hard masking.
313 */
314static inline void may_hard_irq_enable(void)
315{
316	if (!(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)) {
317		get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
318		__hard_irq_enable();
319	}
320}
321
322static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
323{
324	return (regs->softe & IRQS_DISABLED);
325}
326
327extern bool prep_irq_for_idle(void);
328extern bool prep_irq_for_idle_irqsoff(void);
329extern void irq_set_pending_from_srr1(unsigned long srr1);
330
331#define fini_irq_for_idle_irqsoff() trace_hardirqs_off();
332
333extern void force_external_irq_replay(void);
334
335static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val)
336{
337	regs->softe = val;
338}
339#else /* CONFIG_PPC64 */
340
341static inline unsigned long arch_local_save_flags(void)
342{
343	return mfmsr();
344}
345
346static inline void arch_local_irq_restore(unsigned long flags)
347{
348	if (IS_ENABLED(CONFIG_BOOKE))
349		wrtee(flags);
350	else
351		mtmsr(flags);
352}
353
354static inline unsigned long arch_local_irq_save(void)
355{
356	unsigned long flags = arch_local_save_flags();
357
358	if (IS_ENABLED(CONFIG_BOOKE))
359		wrtee(0);
360	else if (IS_ENABLED(CONFIG_PPC_8xx))
361		wrtspr(SPRN_EID);
362	else
363		mtmsr(flags & ~MSR_EE);
364
365	return flags;
366}
367
368static inline void arch_local_irq_disable(void)
369{
370	if (IS_ENABLED(CONFIG_BOOKE))
371		wrtee(0);
372	else if (IS_ENABLED(CONFIG_PPC_8xx))
373		wrtspr(SPRN_EID);
374	else
375		mtmsr(mfmsr() & ~MSR_EE);
376}
377
378static inline void arch_local_irq_enable(void)
379{
380	if (IS_ENABLED(CONFIG_BOOKE))
381		wrtee(MSR_EE);
382	else if (IS_ENABLED(CONFIG_PPC_8xx))
383		wrtspr(SPRN_EIE);
384	else
385		mtmsr(mfmsr() | MSR_EE);
386}
387
388static inline bool arch_irqs_disabled_flags(unsigned long flags)
389{
390	return (flags & MSR_EE) == 0;
391}
392
393static inline bool arch_irqs_disabled(void)
394{
395	return arch_irqs_disabled_flags(arch_local_save_flags());
396}
397
398#define hard_irq_disable()		arch_local_irq_disable()
399
400static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
401{
402	return !(regs->msr & MSR_EE);
403}
404
405static inline void may_hard_irq_enable(void) { }
406
407static inline void clear_pmi_irq_pending(void) { }
408static inline void set_pmi_irq_pending(void) { }
409static inline bool pmi_irq_pending(void) { return false; }
410
411static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val)
412{
413}
414#endif /* CONFIG_PPC64 */
415
416#define ARCH_IRQ_INIT_FLAGS	IRQ_NOREQUEST
417
418#endif  /* __ASSEMBLY__ */
419#endif	/* __KERNEL__ */
420#endif	/* _ASM_POWERPC_HW_IRQ_H */
421