1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _X86_IRQFLAGS_H_
3#define _X86_IRQFLAGS_H_
4
5#include <asm/processor-flags.h>
6
7#ifndef __ASSEMBLY__
8
9#include <asm/nospec-branch.h>
10
11/* Provide __cpuidle; we can't safely include <linux/cpu.h> */
12#define __cpuidle __section(".cpuidle.text")
13
14/*
15 * Interrupt control:
16 */
17
18/* Declaration required for gcc < 4.9 to prevent -Werror=missing-prototypes */
19extern inline unsigned long native_save_fl(void);
20extern __always_inline unsigned long native_save_fl(void)
21{
22	unsigned long flags;
23
24	/*
25	 * "=rm" is safe here, because "pop" adjusts the stack before
26	 * it evaluates its effective address -- this is part of the
27	 * documented behavior of the "pop" instruction.
28	 */
29	asm volatile("# __raw_save_flags\n\t"
30		     "pushf ; pop %0"
31		     : "=rm" (flags)
32		     : /* no input */
33		     : "memory");
34
35	return flags;
36}
37
38extern inline void native_restore_fl(unsigned long flags);
39extern inline void native_restore_fl(unsigned long flags)
40{
41	asm volatile("push %0 ; popf"
42		     : /* no output */
43		     :"g" (flags)
44		     :"memory", "cc");
45}
46
47static __always_inline void native_irq_disable(void)
48{
49	asm volatile("cli": : :"memory");
50}
51
52static __always_inline void native_irq_enable(void)
53{
54	asm volatile("sti": : :"memory");
55}
56
57static inline __cpuidle void native_safe_halt(void)
58{
59	mds_idle_clear_cpu_buffers();
60	asm volatile("sti; hlt": : :"memory");
61}
62
63static inline __cpuidle void native_halt(void)
64{
65	mds_idle_clear_cpu_buffers();
66	asm volatile("hlt": : :"memory");
67}
68
69#endif
70
71#ifdef CONFIG_PARAVIRT_XXL
72#include <asm/paravirt.h>
73#else
74#ifndef __ASSEMBLY__
75#include <linux/types.h>
76
77static __always_inline unsigned long arch_local_save_flags(void)
78{
79	return native_save_fl();
80}
81
82static __always_inline void arch_local_irq_restore(unsigned long flags)
83{
84	native_restore_fl(flags);
85}
86
87static __always_inline void arch_local_irq_disable(void)
88{
89	native_irq_disable();
90}
91
92static __always_inline void arch_local_irq_enable(void)
93{
94	native_irq_enable();
95}
96
97/*
98 * Used in the idle loop; sti takes one instruction cycle
99 * to complete:
100 */
101static inline __cpuidle void arch_safe_halt(void)
102{
103	native_safe_halt();
104}
105
106/*
107 * Used when interrupts are already enabled or to
108 * shutdown the processor:
109 */
110static inline __cpuidle void halt(void)
111{
112	native_halt();
113}
114
115/*
116 * For spinlocks, etc:
117 */
118static __always_inline unsigned long arch_local_irq_save(void)
119{
120	unsigned long flags = arch_local_save_flags();
121	arch_local_irq_disable();
122	return flags;
123}
124#else
125
126#define ENABLE_INTERRUPTS(x)	sti
127#define DISABLE_INTERRUPTS(x)	cli
128
129#ifdef CONFIG_X86_64
130#ifdef CONFIG_DEBUG_ENTRY
131#define SAVE_FLAGS(x)		pushfq; popq %rax
132#endif
133
134#define INTERRUPT_RETURN	jmp native_iret
135#define USERGS_SYSRET64				\
136	swapgs;					\
137	sysretq;
138#define USERGS_SYSRET32				\
139	swapgs;					\
140	sysretl
141
142#else
143#define INTERRUPT_RETURN		iret
144#endif
145
146#endif /* __ASSEMBLY__ */
147#endif /* CONFIG_PARAVIRT_XXL */
148
149#ifndef __ASSEMBLY__
150static __always_inline int arch_irqs_disabled_flags(unsigned long flags)
151{
152	return !(flags & X86_EFLAGS_IF);
153}
154
155static __always_inline int arch_irqs_disabled(void)
156{
157	unsigned long flags = arch_local_save_flags();
158
159	return arch_irqs_disabled_flags(flags);
160}
161#else
162#ifdef CONFIG_X86_64
163#ifdef CONFIG_XEN_PV
164#define SWAPGS	ALTERNATIVE "swapgs", "", X86_FEATURE_XENPV
165#else
166#define SWAPGS	swapgs
167#endif
168#endif
169#endif /* !__ASSEMBLY__ */
170
171#endif
172