xref: /kernel/linux/linux-5.10/arch/x86/kvm/cpuid.h (revision 8c2ecf20)
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef ARCH_X86_KVM_CPUID_H
3#define ARCH_X86_KVM_CPUID_H
4
5#include "x86.h"
6#include <asm/cpu.h>
7#include <asm/processor.h>
8#include <uapi/asm/kvm_para.h>
9
10extern u32 kvm_cpu_caps[NCAPINTS] __read_mostly;
11void kvm_set_cpu_caps(void);
12
13void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu);
14void kvm_update_pv_runtime(struct kvm_vcpu *vcpu);
15struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
16					      u32 function, u32 index);
17int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
18			    struct kvm_cpuid_entry2 __user *entries,
19			    unsigned int type);
20int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
21			     struct kvm_cpuid *cpuid,
22			     struct kvm_cpuid_entry __user *entries);
23int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
24			      struct kvm_cpuid2 *cpuid,
25			      struct kvm_cpuid_entry2 __user *entries);
26int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
27			      struct kvm_cpuid2 *cpuid,
28			      struct kvm_cpuid_entry2 __user *entries);
29bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
30	       u32 *ecx, u32 *edx, bool exact_only);
31
32int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
33
34static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
35{
36	return vcpu->arch.maxphyaddr;
37}
38
39static inline bool kvm_vcpu_is_illegal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
40{
41	return (gpa >= BIT_ULL(cpuid_maxphyaddr(vcpu)));
42}
43
44struct cpuid_reg {
45	u32 function;
46	u32 index;
47	int reg;
48};
49
50static const struct cpuid_reg reverse_cpuid[] = {
51	[CPUID_1_EDX]         = {         1, 0, CPUID_EDX},
52	[CPUID_8000_0001_EDX] = {0x80000001, 0, CPUID_EDX},
53	[CPUID_8086_0001_EDX] = {0x80860001, 0, CPUID_EDX},
54	[CPUID_1_ECX]         = {         1, 0, CPUID_ECX},
55	[CPUID_C000_0001_EDX] = {0xc0000001, 0, CPUID_EDX},
56	[CPUID_8000_0001_ECX] = {0x80000001, 0, CPUID_ECX},
57	[CPUID_7_0_EBX]       = {         7, 0, CPUID_EBX},
58	[CPUID_D_1_EAX]       = {       0xd, 1, CPUID_EAX},
59	[CPUID_8000_0008_EBX] = {0x80000008, 0, CPUID_EBX},
60	[CPUID_6_EAX]         = {         6, 0, CPUID_EAX},
61	[CPUID_8000_000A_EDX] = {0x8000000a, 0, CPUID_EDX},
62	[CPUID_7_ECX]         = {         7, 0, CPUID_ECX},
63	[CPUID_8000_0007_EBX] = {0x80000007, 0, CPUID_EBX},
64	[CPUID_7_EDX]         = {         7, 0, CPUID_EDX},
65	[CPUID_7_1_EAX]       = {         7, 1, CPUID_EAX},
66	[CPUID_8000_0021_EAX] = {0x80000021, 0, CPUID_EAX},
67};
68
69/*
70 * Reverse CPUID and its derivatives can only be used for hardware-defined
71 * feature words, i.e. words whose bits directly correspond to a CPUID leaf.
72 * Retrieving a feature bit or masking guest CPUID from a Linux-defined word
73 * is nonsensical as the bit number/mask is an arbitrary software-defined value
74 * and can't be used by KVM to query/control guest capabilities.  And obviously
75 * the leaf being queried must have an entry in the lookup table.
76 */
77static __always_inline void reverse_cpuid_check(unsigned int x86_leaf)
78{
79	BUILD_BUG_ON(x86_leaf == CPUID_LNX_1);
80	BUILD_BUG_ON(x86_leaf == CPUID_LNX_2);
81	BUILD_BUG_ON(x86_leaf == CPUID_LNX_3);
82	BUILD_BUG_ON(x86_leaf == CPUID_LNX_4);
83	BUILD_BUG_ON(x86_leaf >= ARRAY_SIZE(reverse_cpuid));
84	BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0);
85}
86
87/*
88 * Retrieve the bit mask from an X86_FEATURE_* definition.  Features contain
89 * the hardware defined bit number (stored in bits 4:0) and a software defined
90 * "word" (stored in bits 31:5).  The word is used to index into arrays of
91 * bit masks that hold the per-cpu feature capabilities, e.g. this_cpu_has().
92 */
93static __always_inline u32 __feature_bit(int x86_feature)
94{
95	reverse_cpuid_check(x86_feature / 32);
96	return 1 << (x86_feature & 31);
97}
98
99#define feature_bit(name)  __feature_bit(X86_FEATURE_##name)
100
101static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned int x86_feature)
102{
103	unsigned int x86_leaf = x86_feature / 32;
104
105	reverse_cpuid_check(x86_leaf);
106	return reverse_cpuid[x86_leaf];
107}
108
109static __always_inline u32 *__cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry,
110						  u32 reg)
111{
112	switch (reg) {
113	case CPUID_EAX:
114		return &entry->eax;
115	case CPUID_EBX:
116		return &entry->ebx;
117	case CPUID_ECX:
118		return &entry->ecx;
119	case CPUID_EDX:
120		return &entry->edx;
121	default:
122		BUILD_BUG();
123		return NULL;
124	}
125}
126
127static __always_inline u32 *cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry,
128						unsigned int x86_feature)
129{
130	const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
131
132	return __cpuid_entry_get_reg(entry, cpuid.reg);
133}
134
135static __always_inline u32 cpuid_entry_get(struct kvm_cpuid_entry2 *entry,
136					   unsigned int x86_feature)
137{
138	u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
139
140	return *reg & __feature_bit(x86_feature);
141}
142
143static __always_inline bool cpuid_entry_has(struct kvm_cpuid_entry2 *entry,
144					    unsigned int x86_feature)
145{
146	return cpuid_entry_get(entry, x86_feature);
147}
148
149static __always_inline void cpuid_entry_clear(struct kvm_cpuid_entry2 *entry,
150					      unsigned int x86_feature)
151{
152	u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
153
154	*reg &= ~__feature_bit(x86_feature);
155}
156
157static __always_inline void cpuid_entry_set(struct kvm_cpuid_entry2 *entry,
158					    unsigned int x86_feature)
159{
160	u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
161
162	*reg |= __feature_bit(x86_feature);
163}
164
165static __always_inline void cpuid_entry_change(struct kvm_cpuid_entry2 *entry,
166					       unsigned int x86_feature,
167					       bool set)
168{
169	u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
170
171	/*
172	 * Open coded instead of using cpuid_entry_{clear,set}() to coerce the
173	 * compiler into using CMOV instead of Jcc when possible.
174	 */
175	if (set)
176		*reg |= __feature_bit(x86_feature);
177	else
178		*reg &= ~__feature_bit(x86_feature);
179}
180
181static __always_inline void cpuid_entry_override(struct kvm_cpuid_entry2 *entry,
182						 enum cpuid_leafs leaf)
183{
184	u32 *reg = cpuid_entry_get_reg(entry, leaf * 32);
185
186	BUILD_BUG_ON(leaf >= ARRAY_SIZE(kvm_cpu_caps));
187	*reg = kvm_cpu_caps[leaf];
188}
189
190static __always_inline u32 *guest_cpuid_get_register(struct kvm_vcpu *vcpu,
191						     unsigned int x86_feature)
192{
193	const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
194	struct kvm_cpuid_entry2 *entry;
195
196	entry = kvm_find_cpuid_entry(vcpu, cpuid.function, cpuid.index);
197	if (!entry)
198		return NULL;
199
200	return __cpuid_entry_get_reg(entry, cpuid.reg);
201}
202
203static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu,
204					    unsigned int x86_feature)
205{
206	u32 *reg;
207
208	reg = guest_cpuid_get_register(vcpu, x86_feature);
209	if (!reg)
210		return false;
211
212	return *reg & __feature_bit(x86_feature);
213}
214
215static __always_inline void guest_cpuid_clear(struct kvm_vcpu *vcpu,
216					      unsigned int x86_feature)
217{
218	u32 *reg;
219
220	reg = guest_cpuid_get_register(vcpu, x86_feature);
221	if (reg)
222		*reg &= ~__feature_bit(x86_feature);
223}
224
225static inline bool guest_cpuid_is_amd_or_hygon(struct kvm_vcpu *vcpu)
226{
227	struct kvm_cpuid_entry2 *best;
228
229	best = kvm_find_cpuid_entry(vcpu, 0, 0);
230	return best &&
231	       (is_guest_vendor_amd(best->ebx, best->ecx, best->edx) ||
232		is_guest_vendor_hygon(best->ebx, best->ecx, best->edx));
233}
234
235static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
236{
237	struct kvm_cpuid_entry2 *best;
238
239	best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
240	if (!best)
241		return -1;
242
243	return x86_family(best->eax);
244}
245
246static inline int guest_cpuid_model(struct kvm_vcpu *vcpu)
247{
248	struct kvm_cpuid_entry2 *best;
249
250	best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
251	if (!best)
252		return -1;
253
254	return x86_model(best->eax);
255}
256
257static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu)
258{
259	struct kvm_cpuid_entry2 *best;
260
261	best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
262	if (!best)
263		return -1;
264
265	return x86_stepping(best->eax);
266}
267
268static inline bool guest_has_spec_ctrl_msr(struct kvm_vcpu *vcpu)
269{
270	return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
271		guest_cpuid_has(vcpu, X86_FEATURE_AMD_STIBP) ||
272		guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) ||
273		guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD));
274}
275
276static inline bool guest_has_pred_cmd_msr(struct kvm_vcpu *vcpu)
277{
278	return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
279		guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB));
280}
281
282static inline bool supports_cpuid_fault(struct kvm_vcpu *vcpu)
283{
284	return vcpu->arch.msr_platform_info & MSR_PLATFORM_INFO_CPUID_FAULT;
285}
286
287static inline bool cpuid_fault_enabled(struct kvm_vcpu *vcpu)
288{
289	return vcpu->arch.msr_misc_features_enables &
290		  MSR_MISC_FEATURES_ENABLES_CPUID_FAULT;
291}
292
293static __always_inline void kvm_cpu_cap_clear(unsigned int x86_feature)
294{
295	unsigned int x86_leaf = x86_feature / 32;
296
297	reverse_cpuid_check(x86_leaf);
298	kvm_cpu_caps[x86_leaf] &= ~__feature_bit(x86_feature);
299}
300
301static __always_inline void kvm_cpu_cap_set(unsigned int x86_feature)
302{
303	unsigned int x86_leaf = x86_feature / 32;
304
305	reverse_cpuid_check(x86_leaf);
306	kvm_cpu_caps[x86_leaf] |= __feature_bit(x86_feature);
307}
308
309static __always_inline u32 kvm_cpu_cap_get(unsigned int x86_feature)
310{
311	unsigned int x86_leaf = x86_feature / 32;
312
313	reverse_cpuid_check(x86_leaf);
314	return kvm_cpu_caps[x86_leaf] & __feature_bit(x86_feature);
315}
316
317static __always_inline bool kvm_cpu_cap_has(unsigned int x86_feature)
318{
319	return !!kvm_cpu_cap_get(x86_feature);
320}
321
322static __always_inline void kvm_cpu_cap_check_and_set(unsigned int x86_feature)
323{
324	if (boot_cpu_has(x86_feature))
325		kvm_cpu_cap_set(x86_feature);
326}
327
328static inline bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa)
329{
330	return PAGE_ALIGNED(gpa) && !(gpa >> cpuid_maxphyaddr(vcpu));
331}
332
333static __always_inline bool guest_pv_has(struct kvm_vcpu *vcpu,
334					 unsigned int kvm_feature)
335{
336	if (!vcpu->arch.pv_cpuid.enforce)
337		return true;
338
339	return vcpu->arch.pv_cpuid.features & (1u << kvm_feature);
340}
341
342#endif
343