1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_MWAIT_H
3#define _ASM_X86_MWAIT_H
4
5#include <linux/sched.h>
6#include <linux/sched/idle.h>
7
8#include <asm/cpufeature.h>
9#include <asm/nospec-branch.h>
10
11#define MWAIT_SUBSTATE_MASK		0xf
12#define MWAIT_CSTATE_MASK		0xf
13#define MWAIT_SUBSTATE_SIZE		4
14#define MWAIT_HINT2CSTATE(hint)		(((hint) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK)
15#define MWAIT_HINT2SUBSTATE(hint)	((hint) & MWAIT_CSTATE_MASK)
16
17#define CPUID_MWAIT_LEAF		5
18#define CPUID5_ECX_EXTENSIONS_SUPPORTED 0x1
19#define CPUID5_ECX_INTERRUPT_BREAK	0x2
20
21#define MWAIT_ECX_INTERRUPT_BREAK	0x1
22#define MWAITX_ECX_TIMER_ENABLE		BIT(1)
23#define MWAITX_MAX_WAIT_CYCLES		UINT_MAX
24#define MWAITX_DISABLE_CSTATES		0xf0
25#define TPAUSE_C01_STATE		1
26#define TPAUSE_C02_STATE		0
27
28static inline void __monitor(const void *eax, unsigned long ecx,
29			     unsigned long edx)
30{
31	/* "monitor %eax, %ecx, %edx;" */
32	asm volatile(".byte 0x0f, 0x01, 0xc8;"
33		     :: "a" (eax), "c" (ecx), "d"(edx));
34}
35
36static inline void __monitorx(const void *eax, unsigned long ecx,
37			      unsigned long edx)
38{
39	/* "monitorx %eax, %ecx, %edx;" */
40	asm volatile(".byte 0x0f, 0x01, 0xfa;"
41		     :: "a" (eax), "c" (ecx), "d"(edx));
42}
43
44static inline void __mwait(unsigned long eax, unsigned long ecx)
45{
46	mds_idle_clear_cpu_buffers();
47
48	/* "mwait %eax, %ecx;" */
49	asm volatile(".byte 0x0f, 0x01, 0xc9;"
50		     :: "a" (eax), "c" (ecx));
51}
52
53/*
54 * MWAITX allows for a timer expiration to get the core out a wait state in
55 * addition to the default MWAIT exit condition of a store appearing at a
56 * monitored virtual address.
57 *
58 * Registers:
59 *
60 * MWAITX ECX[1]: enable timer if set
61 * MWAITX EBX[31:0]: max wait time expressed in SW P0 clocks. The software P0
62 * frequency is the same as the TSC frequency.
63 *
64 * Below is a comparison between MWAIT and MWAITX on AMD processors:
65 *
66 *                 MWAIT                           MWAITX
67 * opcode          0f 01 c9           |            0f 01 fb
68 * ECX[0]                  value of RFLAGS.IF seen by instruction
69 * ECX[1]          unused/#GP if set  |            enable timer if set
70 * ECX[31:2]                     unused/#GP if set
71 * EAX                           unused (reserve for hint)
72 * EBX[31:0]       unused             |            max wait time (P0 clocks)
73 *
74 *                 MONITOR                         MONITORX
75 * opcode          0f 01 c8           |            0f 01 fa
76 * EAX                     (logical) address to monitor
77 * ECX                     #GP if not zero
78 */
79static inline void __mwaitx(unsigned long eax, unsigned long ebx,
80			    unsigned long ecx)
81{
82	/* No MDS buffer clear as this is AMD/HYGON only */
83
84	/* "mwaitx %eax, %ebx, %ecx;" */
85	asm volatile(".byte 0x0f, 0x01, 0xfb;"
86		     :: "a" (eax), "b" (ebx), "c" (ecx));
87}
88
89static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
90{
91	mds_idle_clear_cpu_buffers();
92	/* "mwait %eax, %ecx;" */
93	asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
94		     :: "a" (eax), "c" (ecx));
95}
96
97/*
98 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
99 * which can obviate IPI to trigger checking of need_resched.
100 * We execute MONITOR against need_resched and enter optimized wait state
101 * through MWAIT. Whenever someone changes need_resched, we would be woken
102 * up from MWAIT (without an IPI).
103 *
104 * New with Core Duo processors, MWAIT can take some hints based on CPU
105 * capability.
106 */
107static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
108{
109	if (static_cpu_has_bug(X86_BUG_MONITOR) || !current_set_polling_and_test()) {
110		if (static_cpu_has_bug(X86_BUG_CLFLUSH_MONITOR)) {
111			mb();
112			clflush((void *)&current_thread_info()->flags);
113			mb();
114		}
115
116		__monitor((void *)&current_thread_info()->flags, 0, 0);
117		if (!need_resched())
118			__mwait(eax, ecx);
119	}
120	current_clr_polling();
121}
122
123/*
124 * Caller can specify whether to enter C0.1 (low latency, less
125 * power saving) or C0.2 state (saves more power, but longer wakeup
126 * latency). This may be overridden by the IA32_UMWAIT_CONTROL MSR
127 * which can force requests for C0.2 to be downgraded to C0.1.
128 */
129static inline void __tpause(u32 ecx, u32 edx, u32 eax)
130{
131	/* "tpause %ecx, %edx, %eax;" */
132	#ifdef CONFIG_AS_TPAUSE
133	asm volatile("tpause %%ecx\n"
134		     :
135		     : "c"(ecx), "d"(edx), "a"(eax));
136	#else
137	asm volatile(".byte 0x66, 0x0f, 0xae, 0xf1\t\n"
138		     :
139		     : "c"(ecx), "d"(edx), "a"(eax));
140	#endif
141}
142
143#endif /* _ASM_X86_MWAIT_H */
144