1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Kernel-based Virtual Machine driver for Linux 4 * cpuid support routines 5 * 6 * derived from arch/x86/kvm/x86.c 7 * 8 * Copyright 2011 Red Hat, Inc. and/or its affiliates. 9 * Copyright IBM Corporation, 2008 10 */ 11 12#include <linux/kvm_host.h> 13#include <linux/export.h> 14#include <linux/vmalloc.h> 15#include <linux/uaccess.h> 16#include <linux/sched/stat.h> 17 18#include <asm/processor.h> 19#include <asm/user.h> 20#include <asm/fpu/xstate.h> 21#include "cpuid.h" 22#include "lapic.h" 23#include "mmu.h" 24#include "trace.h" 25#include "pmu.h" 26 27/* 28 * Unlike "struct cpuinfo_x86.x86_capability", kvm_cpu_caps doesn't need to be 29 * aligned to sizeof(unsigned long) because it's not accessed via bitops. 30 */ 31u32 kvm_cpu_caps[NCAPINTS] __read_mostly; 32EXPORT_SYMBOL_GPL(kvm_cpu_caps); 33 34static u32 xstate_required_size(u64 xstate_bv, bool compacted) 35{ 36 int feature_bit = 0; 37 u32 ret = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET; 38 39 xstate_bv &= XFEATURE_MASK_EXTEND; 40 while (xstate_bv) { 41 if (xstate_bv & 0x1) { 42 u32 eax, ebx, ecx, edx, offset; 43 cpuid_count(0xD, feature_bit, &eax, &ebx, &ecx, &edx); 44 offset = compacted ? ret : ebx; 45 ret = max(ret, offset + eax); 46 } 47 48 xstate_bv >>= 1; 49 feature_bit++; 50 } 51 52 return ret; 53} 54 55#define F feature_bit 56 57static inline struct kvm_cpuid_entry2 *cpuid_entry2_find( 58 struct kvm_cpuid_entry2 *entries, int nent, u32 function, u32 index) 59{ 60 struct kvm_cpuid_entry2 *e; 61 int i; 62 63 for (i = 0; i < nent; i++) { 64 e = &entries[i]; 65 66 if (e->function == function && (e->index == index || 67 !(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX))) 68 return e; 69 } 70 71 return NULL; 72} 73 74static int kvm_check_cpuid(struct kvm_cpuid_entry2 *entries, int nent) 75{ 76 struct kvm_cpuid_entry2 *best; 77 78 /* 79 * The existing code assumes virtual address is 48-bit or 57-bit in the 80 * canonical address checks; exit if it is ever changed. 81 */ 82 best = cpuid_entry2_find(entries, nent, 0x80000008, 0); 83 if (best) { 84 int vaddr_bits = (best->eax & 0xff00) >> 8; 85 86 if (vaddr_bits != 48 && vaddr_bits != 57 && vaddr_bits != 0) 87 return -EINVAL; 88 } 89 90 return 0; 91} 92 93void kvm_update_pv_runtime(struct kvm_vcpu *vcpu) 94{ 95 struct kvm_cpuid_entry2 *best; 96 97 best = kvm_find_cpuid_entry(vcpu, KVM_CPUID_FEATURES, 0); 98 99 /* 100 * save the feature bitmap to avoid cpuid lookup for every PV 101 * operation 102 */ 103 if (best) 104 vcpu->arch.pv_cpuid.features = best->eax; 105} 106 107void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu) 108{ 109 struct kvm_cpuid_entry2 *best; 110 111 best = kvm_find_cpuid_entry(vcpu, 1, 0); 112 if (best) { 113 /* Update OSXSAVE bit */ 114 if (boot_cpu_has(X86_FEATURE_XSAVE)) 115 cpuid_entry_change(best, X86_FEATURE_OSXSAVE, 116 kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)); 117 118 cpuid_entry_change(best, X86_FEATURE_APIC, 119 vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE); 120 } 121 122 best = kvm_find_cpuid_entry(vcpu, 7, 0); 123 if (best && boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7) 124 cpuid_entry_change(best, X86_FEATURE_OSPKE, 125 kvm_read_cr4_bits(vcpu, X86_CR4_PKE)); 126 127 best = kvm_find_cpuid_entry(vcpu, 0xD, 0); 128 if (best) 129 best->ebx = xstate_required_size(vcpu->arch.xcr0, false); 130 131 best = kvm_find_cpuid_entry(vcpu, 0xD, 1); 132 if (best && (cpuid_entry_has(best, X86_FEATURE_XSAVES) || 133 cpuid_entry_has(best, X86_FEATURE_XSAVEC))) 134 best->ebx = xstate_required_size(vcpu->arch.xcr0, true); 135 136 best = kvm_find_cpuid_entry(vcpu, KVM_CPUID_FEATURES, 0); 137 if (kvm_hlt_in_guest(vcpu->kvm) && best && 138 (best->eax & (1 << KVM_FEATURE_PV_UNHALT))) 139 best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT); 140 141 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT)) { 142 best = kvm_find_cpuid_entry(vcpu, 0x1, 0); 143 if (best) 144 cpuid_entry_change(best, X86_FEATURE_MWAIT, 145 vcpu->arch.ia32_misc_enable_msr & 146 MSR_IA32_MISC_ENABLE_MWAIT); 147 } 148} 149 150static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) 151{ 152 struct kvm_lapic *apic = vcpu->arch.apic; 153 struct kvm_cpuid_entry2 *best; 154 155 best = kvm_find_cpuid_entry(vcpu, 1, 0); 156 if (best && apic) { 157 if (cpuid_entry_has(best, X86_FEATURE_TSC_DEADLINE_TIMER)) 158 apic->lapic_timer.timer_mode_mask = 3 << 17; 159 else 160 apic->lapic_timer.timer_mode_mask = 1 << 17; 161 162 kvm_apic_set_version(vcpu); 163 } 164 165 best = kvm_find_cpuid_entry(vcpu, 0xD, 0); 166 if (!best) 167 vcpu->arch.guest_supported_xcr0 = 0; 168 else 169 vcpu->arch.guest_supported_xcr0 = 170 (best->eax | ((u64)best->edx << 32)) & supported_xcr0; 171 172 kvm_update_pv_runtime(vcpu); 173 174 vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); 175 kvm_mmu_reset_context(vcpu); 176 177 kvm_pmu_refresh(vcpu); 178 vcpu->arch.cr4_guest_rsvd_bits = 179 __cr4_reserved_bits(guest_cpuid_has, vcpu); 180 181 vcpu->arch.cr3_lm_rsvd_bits = rsvd_bits(cpuid_maxphyaddr(vcpu), 63); 182 183 /* Invoke the vendor callback only after the above state is updated. */ 184 kvm_x86_ops.vcpu_after_set_cpuid(vcpu); 185} 186 187static int is_efer_nx(void) 188{ 189 return host_efer & EFER_NX; 190} 191 192static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu) 193{ 194 int i; 195 struct kvm_cpuid_entry2 *e, *entry; 196 197 entry = NULL; 198 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) { 199 e = &vcpu->arch.cpuid_entries[i]; 200 if (e->function == 0x80000001) { 201 entry = e; 202 break; 203 } 204 } 205 if (entry && cpuid_entry_has(entry, X86_FEATURE_NX) && !is_efer_nx()) { 206 cpuid_entry_clear(entry, X86_FEATURE_NX); 207 printk(KERN_INFO "kvm: guest NX capability removed\n"); 208 } 209} 210 211int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu) 212{ 213 struct kvm_cpuid_entry2 *best; 214 215 best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0); 216 if (!best || best->eax < 0x80000008) 217 goto not_found; 218 best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0); 219 if (best) 220 return best->eax & 0xff; 221not_found: 222 return 36; 223} 224 225/* when an old userspace process fills a new kernel module */ 226int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, 227 struct kvm_cpuid *cpuid, 228 struct kvm_cpuid_entry __user *entries) 229{ 230 int r, i; 231 struct kvm_cpuid_entry *e = NULL; 232 struct kvm_cpuid_entry2 *e2 = NULL; 233 234 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) 235 return -E2BIG; 236 237 if (cpuid->nent) { 238 e = vmemdup_user(entries, array_size(sizeof(*e), cpuid->nent)); 239 if (IS_ERR(e)) 240 return PTR_ERR(e); 241 242 e2 = kvmalloc_array(cpuid->nent, sizeof(*e2), GFP_KERNEL_ACCOUNT); 243 if (!e2) { 244 r = -ENOMEM; 245 goto out_free_cpuid; 246 } 247 } 248 for (i = 0; i < cpuid->nent; i++) { 249 e2[i].function = e[i].function; 250 e2[i].eax = e[i].eax; 251 e2[i].ebx = e[i].ebx; 252 e2[i].ecx = e[i].ecx; 253 e2[i].edx = e[i].edx; 254 e2[i].index = 0; 255 e2[i].flags = 0; 256 e2[i].padding[0] = 0; 257 e2[i].padding[1] = 0; 258 e2[i].padding[2] = 0; 259 } 260 261 r = kvm_check_cpuid(e2, cpuid->nent); 262 if (r) { 263 kvfree(e2); 264 goto out_free_cpuid; 265 } 266 267 kvfree(vcpu->arch.cpuid_entries); 268 vcpu->arch.cpuid_entries = e2; 269 vcpu->arch.cpuid_nent = cpuid->nent; 270 271 cpuid_fix_nx_cap(vcpu); 272 kvm_update_cpuid_runtime(vcpu); 273 kvm_vcpu_after_set_cpuid(vcpu); 274 275out_free_cpuid: 276 kvfree(e); 277 278 return r; 279} 280 281int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu, 282 struct kvm_cpuid2 *cpuid, 283 struct kvm_cpuid_entry2 __user *entries) 284{ 285 struct kvm_cpuid_entry2 *e2 = NULL; 286 int r; 287 288 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) 289 return -E2BIG; 290 291 if (cpuid->nent) { 292 e2 = vmemdup_user(entries, array_size(sizeof(*e2), cpuid->nent)); 293 if (IS_ERR(e2)) 294 return PTR_ERR(e2); 295 } 296 297 r = kvm_check_cpuid(e2, cpuid->nent); 298 if (r) { 299 kvfree(e2); 300 return r; 301 } 302 303 kvfree(vcpu->arch.cpuid_entries); 304 vcpu->arch.cpuid_entries = e2; 305 vcpu->arch.cpuid_nent = cpuid->nent; 306 307 kvm_update_cpuid_runtime(vcpu); 308 kvm_vcpu_after_set_cpuid(vcpu); 309 310 return 0; 311} 312 313int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, 314 struct kvm_cpuid2 *cpuid, 315 struct kvm_cpuid_entry2 __user *entries) 316{ 317 int r; 318 319 r = -E2BIG; 320 if (cpuid->nent < vcpu->arch.cpuid_nent) 321 goto out; 322 r = -EFAULT; 323 if (copy_to_user(entries, vcpu->arch.cpuid_entries, 324 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2))) 325 goto out; 326 return 0; 327 328out: 329 cpuid->nent = vcpu->arch.cpuid_nent; 330 return r; 331} 332 333static __always_inline void kvm_cpu_cap_mask(enum cpuid_leafs leaf, u32 mask) 334{ 335 const struct cpuid_reg cpuid = x86_feature_cpuid(leaf * 32); 336 struct kvm_cpuid_entry2 entry; 337 338 reverse_cpuid_check(leaf); 339 kvm_cpu_caps[leaf] &= mask; 340 341 cpuid_count(cpuid.function, cpuid.index, 342 &entry.eax, &entry.ebx, &entry.ecx, &entry.edx); 343 344 kvm_cpu_caps[leaf] &= *__cpuid_entry_get_reg(&entry, cpuid.reg); 345} 346 347void kvm_set_cpu_caps(void) 348{ 349 unsigned int f_nx = is_efer_nx() ? F(NX) : 0; 350#ifdef CONFIG_X86_64 351 unsigned int f_gbpages = F(GBPAGES); 352 unsigned int f_lm = F(LM); 353#else 354 unsigned int f_gbpages = 0; 355 unsigned int f_lm = 0; 356#endif 357 358 BUILD_BUG_ON(sizeof(kvm_cpu_caps) > 359 sizeof(boot_cpu_data.x86_capability)); 360 361 memcpy(&kvm_cpu_caps, &boot_cpu_data.x86_capability, 362 sizeof(kvm_cpu_caps)); 363 364 kvm_cpu_cap_mask(CPUID_1_ECX, 365 /* 366 * NOTE: MONITOR (and MWAIT) are emulated as NOP, but *not* 367 * advertised to guests via CPUID! 368 */ 369 F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ | 370 0 /* DS-CPL, VMX, SMX, EST */ | 371 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ | 372 F(FMA) | F(CX16) | 0 /* xTPR Update */ | F(PDCM) | 373 F(PCID) | 0 /* Reserved, DCA */ | F(XMM4_1) | 374 F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) | 375 0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) | 376 F(F16C) | F(RDRAND) 377 ); 378 /* KVM emulates x2apic in software irrespective of host support. */ 379 kvm_cpu_cap_set(X86_FEATURE_X2APIC); 380 381 kvm_cpu_cap_mask(CPUID_1_EDX, 382 F(FPU) | F(VME) | F(DE) | F(PSE) | 383 F(TSC) | F(MSR) | F(PAE) | F(MCE) | 384 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) | 385 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) | 386 F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLUSH) | 387 0 /* Reserved, DS, ACPI */ | F(MMX) | 388 F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) | 389 0 /* HTT, TM, Reserved, PBE */ 390 ); 391 392 kvm_cpu_cap_mask(CPUID_7_0_EBX, 393 F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) | 394 F(BMI2) | F(ERMS) | 0 /*INVPCID*/ | F(RTM) | 0 /*MPX*/ | F(RDSEED) | 395 F(ADX) | F(SMAP) | F(AVX512IFMA) | F(AVX512F) | F(AVX512PF) | 396 F(AVX512ER) | F(AVX512CD) | F(CLFLUSHOPT) | F(CLWB) | F(AVX512DQ) | 397 F(SHA_NI) | F(AVX512BW) | F(AVX512VL) | 0 /*INTEL_PT*/ 398 ); 399 400 kvm_cpu_cap_mask(CPUID_7_ECX, 401 F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ | F(RDPID) | 402 F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) | 403 F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) | 404 F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B) | 0 /*WAITPKG*/ 405 ); 406 /* Set LA57 based on hardware capability. */ 407 if (cpuid_ecx(7) & F(LA57)) 408 kvm_cpu_cap_set(X86_FEATURE_LA57); 409 410 /* 411 * PKU not yet implemented for shadow paging and requires OSPKE 412 * to be set on the host. Clear it if that is not the case 413 */ 414 if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE)) 415 kvm_cpu_cap_clear(X86_FEATURE_PKU); 416 417 kvm_cpu_cap_mask(CPUID_7_EDX, 418 F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | 419 F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP) | 420 F(MD_CLEAR) | F(AVX512_VP2INTERSECT) | F(FSRM) | 421 F(SERIALIZE) | F(TSXLDTRK) 422 ); 423 424 /* TSC_ADJUST and ARCH_CAPABILITIES are emulated in software. */ 425 kvm_cpu_cap_set(X86_FEATURE_TSC_ADJUST); 426 kvm_cpu_cap_set(X86_FEATURE_ARCH_CAPABILITIES); 427 428 if (boot_cpu_has(X86_FEATURE_IBPB) && boot_cpu_has(X86_FEATURE_IBRS)) 429 kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL); 430 if (boot_cpu_has(X86_FEATURE_STIBP)) 431 kvm_cpu_cap_set(X86_FEATURE_INTEL_STIBP); 432 if (boot_cpu_has(X86_FEATURE_AMD_SSBD)) 433 kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL_SSBD); 434 435 kvm_cpu_cap_mask(CPUID_7_1_EAX, 436 F(AVX512_BF16) 437 ); 438 439 kvm_cpu_cap_mask(CPUID_D_1_EAX, 440 F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | F(XSAVES) 441 ); 442 443 kvm_cpu_cap_mask(CPUID_8000_0001_ECX, 444 F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ | 445 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) | 446 F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) | 447 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM) | 448 F(TOPOEXT) | F(PERFCTR_CORE) 449 ); 450 451 kvm_cpu_cap_mask(CPUID_8000_0001_EDX, 452 F(FPU) | F(VME) | F(DE) | F(PSE) | 453 F(TSC) | F(MSR) | F(PAE) | F(MCE) | 454 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) | 455 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) | 456 F(PAT) | F(PSE36) | 0 /* Reserved */ | 457 f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) | 458 F(FXSR) | F(FXSR_OPT) | f_gbpages | F(RDTSCP) | 459 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW) 460 ); 461 462 if (!tdp_enabled && IS_ENABLED(CONFIG_X86_64)) 463 kvm_cpu_cap_set(X86_FEATURE_GBPAGES); 464 465 kvm_cpu_cap_mask(CPUID_8000_0008_EBX, 466 F(CLZERO) | F(XSAVEERPTR) | 467 F(WBNOINVD) | F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) | 468 F(AMD_SSB_NO) | F(AMD_STIBP) | F(AMD_STIBP_ALWAYS_ON) 469 ); 470 471 /* 472 * AMD has separate bits for each SPEC_CTRL bit. 473 * arch/x86/kernel/cpu/bugs.c is kind enough to 474 * record that in cpufeatures so use them. 475 */ 476 if (boot_cpu_has(X86_FEATURE_IBPB)) 477 kvm_cpu_cap_set(X86_FEATURE_AMD_IBPB); 478 if (boot_cpu_has(X86_FEATURE_IBRS)) 479 kvm_cpu_cap_set(X86_FEATURE_AMD_IBRS); 480 if (boot_cpu_has(X86_FEATURE_STIBP)) 481 kvm_cpu_cap_set(X86_FEATURE_AMD_STIBP); 482 if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD)) 483 kvm_cpu_cap_set(X86_FEATURE_AMD_SSBD); 484 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) 485 kvm_cpu_cap_set(X86_FEATURE_AMD_SSB_NO); 486 /* 487 * The preference is to use SPEC CTRL MSR instead of the 488 * VIRT_SPEC MSR. 489 */ 490 if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) && 491 !boot_cpu_has(X86_FEATURE_AMD_SSBD)) 492 kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD); 493 494 if (cpu_feature_enabled(X86_FEATURE_SRSO_NO)) 495 kvm_cpu_cap_set(X86_FEATURE_SRSO_NO); 496 497 /* 498 * Hide all SVM features by default, SVM will set the cap bits for 499 * features it emulates and/or exposes for L1. 500 */ 501 kvm_cpu_cap_mask(CPUID_8000_000A_EDX, 0); 502 503 kvm_cpu_cap_mask(CPUID_C000_0001_EDX, 504 F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) | 505 F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) | 506 F(PMM) | F(PMM_EN) 507 ); 508} 509EXPORT_SYMBOL_GPL(kvm_set_cpu_caps); 510 511struct kvm_cpuid_array { 512 struct kvm_cpuid_entry2 *entries; 513 int maxnent; 514 int nent; 515}; 516 517static struct kvm_cpuid_entry2 *get_next_cpuid(struct kvm_cpuid_array *array) 518{ 519 if (array->nent >= array->maxnent) 520 return NULL; 521 522 return &array->entries[array->nent++]; 523} 524 525static struct kvm_cpuid_entry2 *do_host_cpuid(struct kvm_cpuid_array *array, 526 u32 function, u32 index) 527{ 528 struct kvm_cpuid_entry2 *entry = get_next_cpuid(array); 529 530 if (!entry) 531 return NULL; 532 533 entry->function = function; 534 entry->index = index; 535 entry->flags = 0; 536 537 cpuid_count(entry->function, entry->index, 538 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx); 539 540 switch (function) { 541 case 4: 542 case 7: 543 case 0xb: 544 case 0xd: 545 case 0xf: 546 case 0x10: 547 case 0x12: 548 case 0x14: 549 case 0x17: 550 case 0x18: 551 case 0x1f: 552 case 0x8000001d: 553 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; 554 break; 555 } 556 557 return entry; 558} 559 560static int __do_cpuid_func_emulated(struct kvm_cpuid_array *array, u32 func) 561{ 562 struct kvm_cpuid_entry2 *entry; 563 564 if (array->nent >= array->maxnent) 565 return -E2BIG; 566 567 entry = &array->entries[array->nent]; 568 entry->function = func; 569 entry->index = 0; 570 entry->flags = 0; 571 572 switch (func) { 573 case 0: 574 entry->eax = 7; 575 ++array->nent; 576 break; 577 case 1: 578 entry->ecx = F(MOVBE); 579 ++array->nent; 580 break; 581 case 7: 582 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; 583 entry->eax = 0; 584 if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP)) 585 entry->ecx = F(RDPID); 586 ++array->nent; 587 default: 588 break; 589 } 590 591 return 0; 592} 593 594static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) 595{ 596 struct kvm_cpuid_entry2 *entry; 597 int r, i, max_idx; 598 599 /* all calls to cpuid_count() should be made on the same cpu */ 600 get_cpu(); 601 602 r = -E2BIG; 603 604 entry = do_host_cpuid(array, function, 0); 605 if (!entry) 606 goto out; 607 608 switch (function) { 609 case 0: 610 /* Limited to the highest leaf implemented in KVM. */ 611 entry->eax = min(entry->eax, 0x1fU); 612 break; 613 case 1: 614 cpuid_entry_override(entry, CPUID_1_EDX); 615 cpuid_entry_override(entry, CPUID_1_ECX); 616 break; 617 case 2: 618 /* 619 * On ancient CPUs, function 2 entries are STATEFUL. That is, 620 * CPUID(function=2, index=0) may return different results each 621 * time, with the least-significant byte in EAX enumerating the 622 * number of times software should do CPUID(2, 0). 623 * 624 * Modern CPUs, i.e. every CPU KVM has *ever* run on are less 625 * idiotic. Intel's SDM states that EAX & 0xff "will always 626 * return 01H. Software should ignore this value and not 627 * interpret it as an informational descriptor", while AMD's 628 * APM states that CPUID(2) is reserved. 629 * 630 * WARN if a frankenstein CPU that supports virtualization and 631 * a stateful CPUID.0x2 is encountered. 632 */ 633 WARN_ON_ONCE((entry->eax & 0xff) > 1); 634 break; 635 /* functions 4 and 0x8000001d have additional index. */ 636 case 4: 637 case 0x8000001d: 638 /* 639 * Read entries until the cache type in the previous entry is 640 * zero, i.e. indicates an invalid entry. 641 */ 642 for (i = 1; entry->eax & 0x1f; ++i) { 643 entry = do_host_cpuid(array, function, i); 644 if (!entry) 645 goto out; 646 } 647 break; 648 case 6: /* Thermal management */ 649 entry->eax = 0x4; /* allow ARAT */ 650 entry->ebx = 0; 651 entry->ecx = 0; 652 entry->edx = 0; 653 break; 654 /* function 7 has additional index. */ 655 case 7: 656 entry->eax = min(entry->eax, 1u); 657 cpuid_entry_override(entry, CPUID_7_0_EBX); 658 cpuid_entry_override(entry, CPUID_7_ECX); 659 cpuid_entry_override(entry, CPUID_7_EDX); 660 661 /* KVM only supports 0x7.0 and 0x7.1, capped above via min(). */ 662 if (entry->eax == 1) { 663 entry = do_host_cpuid(array, function, 1); 664 if (!entry) 665 goto out; 666 667 cpuid_entry_override(entry, CPUID_7_1_EAX); 668 entry->ebx = 0; 669 entry->ecx = 0; 670 entry->edx = 0; 671 } 672 break; 673 case 0xa: { /* Architectural Performance Monitoring */ 674 struct x86_pmu_capability cap; 675 union cpuid10_eax eax; 676 union cpuid10_edx edx; 677 678 if (!static_cpu_has(X86_FEATURE_ARCH_PERFMON)) { 679 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; 680 break; 681 } 682 683 perf_get_x86_pmu_capability(&cap); 684 685 /* 686 * Only support guest architectural pmu on a host 687 * with architectural pmu. 688 */ 689 if (!cap.version) 690 memset(&cap, 0, sizeof(cap)); 691 692 eax.split.version_id = min(cap.version, 2); 693 eax.split.num_counters = cap.num_counters_gp; 694 eax.split.bit_width = cap.bit_width_gp; 695 eax.split.mask_length = cap.events_mask_len; 696 697 edx.split.num_counters_fixed = min(cap.num_counters_fixed, MAX_FIXED_COUNTERS); 698 edx.split.bit_width_fixed = cap.bit_width_fixed; 699 if (cap.version) 700 edx.split.anythread_deprecated = 1; 701 edx.split.reserved1 = 0; 702 edx.split.reserved2 = 0; 703 704 entry->eax = eax.full; 705 entry->ebx = cap.events_mask; 706 entry->ecx = 0; 707 entry->edx = edx.full; 708 break; 709 } 710 case 0x1f: 711 case 0xb: 712 /* 713 * No topology; a valid topology is indicated by the presence 714 * of subleaf 1. 715 */ 716 entry->eax = entry->ebx = entry->ecx = 0; 717 break; 718 case 0xd: 719 entry->eax &= supported_xcr0; 720 entry->ebx = xstate_required_size(supported_xcr0, false); 721 entry->ecx = entry->ebx; 722 entry->edx &= supported_xcr0 >> 32; 723 if (!supported_xcr0) 724 break; 725 726 entry = do_host_cpuid(array, function, 1); 727 if (!entry) 728 goto out; 729 730 cpuid_entry_override(entry, CPUID_D_1_EAX); 731 if (entry->eax & (F(XSAVES)|F(XSAVEC))) 732 entry->ebx = xstate_required_size(supported_xcr0 | supported_xss, 733 true); 734 else { 735 WARN_ON_ONCE(supported_xss != 0); 736 entry->ebx = 0; 737 } 738 entry->ecx &= supported_xss; 739 entry->edx &= supported_xss >> 32; 740 741 for (i = 2; i < 64; ++i) { 742 bool s_state; 743 if (supported_xcr0 & BIT_ULL(i)) 744 s_state = false; 745 else if (supported_xss & BIT_ULL(i)) 746 s_state = true; 747 else 748 continue; 749 750 entry = do_host_cpuid(array, function, i); 751 if (!entry) 752 goto out; 753 754 /* 755 * The supported check above should have filtered out 756 * invalid sub-leafs. Only valid sub-leafs should 757 * reach this point, and they should have a non-zero 758 * save state size. Furthermore, check whether the 759 * processor agrees with supported_xcr0/supported_xss 760 * on whether this is an XCR0- or IA32_XSS-managed area. 761 */ 762 if (WARN_ON_ONCE(!entry->eax || (entry->ecx & 0x1) != s_state)) { 763 --array->nent; 764 continue; 765 } 766 entry->edx = 0; 767 } 768 break; 769 /* Intel PT */ 770 case 0x14: 771 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT)) { 772 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; 773 break; 774 } 775 776 for (i = 1, max_idx = entry->eax; i <= max_idx; ++i) { 777 if (!do_host_cpuid(array, function, i)) 778 goto out; 779 } 780 break; 781 case KVM_CPUID_SIGNATURE: { 782 static const char signature[12] = "KVMKVMKVM\0\0"; 783 const u32 *sigptr = (const u32 *)signature; 784 entry->eax = KVM_CPUID_FEATURES; 785 entry->ebx = sigptr[0]; 786 entry->ecx = sigptr[1]; 787 entry->edx = sigptr[2]; 788 break; 789 } 790 case KVM_CPUID_FEATURES: 791 entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) | 792 (1 << KVM_FEATURE_NOP_IO_DELAY) | 793 (1 << KVM_FEATURE_CLOCKSOURCE2) | 794 (1 << KVM_FEATURE_ASYNC_PF) | 795 (1 << KVM_FEATURE_PV_EOI) | 796 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) | 797 (1 << KVM_FEATURE_PV_UNHALT) | 798 (1 << KVM_FEATURE_PV_TLB_FLUSH) | 799 (1 << KVM_FEATURE_ASYNC_PF_VMEXIT) | 800 (1 << KVM_FEATURE_PV_SEND_IPI) | 801 (1 << KVM_FEATURE_POLL_CONTROL) | 802 (1 << KVM_FEATURE_PV_SCHED_YIELD) | 803 (1 << KVM_FEATURE_ASYNC_PF_INT); 804 805 if (sched_info_on()) 806 entry->eax |= (1 << KVM_FEATURE_STEAL_TIME); 807 808 entry->ebx = 0; 809 entry->ecx = 0; 810 entry->edx = 0; 811 break; 812 case 0x80000000: 813 entry->eax = min(entry->eax, 0x8000001f); 814 break; 815 case 0x80000001: 816 entry->ebx &= ~GENMASK(27, 16); 817 cpuid_entry_override(entry, CPUID_8000_0001_EDX); 818 cpuid_entry_override(entry, CPUID_8000_0001_ECX); 819 break; 820 case 0x80000006: 821 /* Drop reserved bits, pass host L2 cache and TLB info. */ 822 entry->edx &= ~GENMASK(17, 16); 823 break; 824 case 0x80000007: /* Advanced power management */ 825 /* invariant TSC is CPUID.80000007H:EDX[8] */ 826 entry->edx &= (1 << 8); 827 /* mask against host */ 828 entry->edx &= boot_cpu_data.x86_power; 829 entry->eax = entry->ebx = entry->ecx = 0; 830 break; 831 case 0x80000008: { 832 unsigned g_phys_as = (entry->eax >> 16) & 0xff; 833 unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U); 834 unsigned phys_as = entry->eax & 0xff; 835 836 /* 837 * Use bare metal's MAXPHADDR if the CPU doesn't report guest 838 * MAXPHYADDR separately, or if TDP (NPT) is disabled, as the 839 * guest version "applies only to guests using nested paging". 840 */ 841 if (!g_phys_as || !tdp_enabled) 842 g_phys_as = phys_as; 843 844 entry->eax = g_phys_as | (virt_as << 8); 845 entry->ecx &= ~(GENMASK(31, 16) | GENMASK(11, 8)); 846 entry->edx = 0; 847 cpuid_entry_override(entry, CPUID_8000_0008_EBX); 848 break; 849 } 850 case 0x8000000A: 851 if (!kvm_cpu_cap_has(X86_FEATURE_SVM)) { 852 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; 853 break; 854 } 855 entry->eax = 1; /* SVM revision 1 */ 856 entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper 857 ASID emulation to nested SVM */ 858 entry->ecx = 0; /* Reserved */ 859 cpuid_entry_override(entry, CPUID_8000_000A_EDX); 860 break; 861 case 0x80000019: 862 entry->ecx = entry->edx = 0; 863 break; 864 case 0x8000001a: 865 entry->eax &= GENMASK(2, 0); 866 entry->ebx = entry->ecx = entry->edx = 0; 867 break; 868 case 0x8000001e: 869 /* Do not return host topology information. */ 870 entry->eax = entry->ebx = entry->ecx = 0; 871 entry->edx = 0; /* reserved */ 872 break; 873 /* Support memory encryption cpuid if host supports it */ 874 case 0x8000001F: 875 if (!boot_cpu_has(X86_FEATURE_SEV)) 876 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; 877 break; 878 /*Add support for Centaur's CPUID instruction*/ 879 case 0xC0000000: 880 /*Just support up to 0xC0000004 now*/ 881 entry->eax = min(entry->eax, 0xC0000004); 882 break; 883 case 0xC0000001: 884 cpuid_entry_override(entry, CPUID_C000_0001_EDX); 885 break; 886 case 3: /* Processor serial number */ 887 case 5: /* MONITOR/MWAIT */ 888 case 0xC0000002: 889 case 0xC0000003: 890 case 0xC0000004: 891 default: 892 entry->eax = entry->ebx = entry->ecx = entry->edx = 0; 893 break; 894 } 895 896 r = 0; 897 898out: 899 put_cpu(); 900 901 return r; 902} 903 904static int do_cpuid_func(struct kvm_cpuid_array *array, u32 func, 905 unsigned int type) 906{ 907 if (type == KVM_GET_EMULATED_CPUID) 908 return __do_cpuid_func_emulated(array, func); 909 910 return __do_cpuid_func(array, func); 911} 912 913#define CENTAUR_CPUID_SIGNATURE 0xC0000000 914 915static int get_cpuid_func(struct kvm_cpuid_array *array, u32 func, 916 unsigned int type) 917{ 918 u32 limit; 919 int r; 920 921 if (func == CENTAUR_CPUID_SIGNATURE && 922 boot_cpu_data.x86_vendor != X86_VENDOR_CENTAUR) 923 return 0; 924 925 r = do_cpuid_func(array, func, type); 926 if (r) 927 return r; 928 929 limit = array->entries[array->nent - 1].eax; 930 for (func = func + 1; func <= limit; ++func) { 931 r = do_cpuid_func(array, func, type); 932 if (r) 933 break; 934 } 935 936 return r; 937} 938 939static bool sanity_check_entries(struct kvm_cpuid_entry2 __user *entries, 940 __u32 num_entries, unsigned int ioctl_type) 941{ 942 int i; 943 __u32 pad[3]; 944 945 if (ioctl_type != KVM_GET_EMULATED_CPUID) 946 return false; 947 948 /* 949 * We want to make sure that ->padding is being passed clean from 950 * userspace in case we want to use it for something in the future. 951 * 952 * Sadly, this wasn't enforced for KVM_GET_SUPPORTED_CPUID and so we 953 * have to give ourselves satisfied only with the emulated side. /me 954 * sheds a tear. 955 */ 956 for (i = 0; i < num_entries; i++) { 957 if (copy_from_user(pad, entries[i].padding, sizeof(pad))) 958 return true; 959 960 if (pad[0] || pad[1] || pad[2]) 961 return true; 962 } 963 return false; 964} 965 966int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid, 967 struct kvm_cpuid_entry2 __user *entries, 968 unsigned int type) 969{ 970 static const u32 funcs[] = { 971 0, 0x80000000, CENTAUR_CPUID_SIGNATURE, KVM_CPUID_SIGNATURE, 972 }; 973 974 struct kvm_cpuid_array array = { 975 .nent = 0, 976 }; 977 int r, i; 978 979 if (cpuid->nent < 1) 980 return -E2BIG; 981 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) 982 cpuid->nent = KVM_MAX_CPUID_ENTRIES; 983 984 if (sanity_check_entries(entries, cpuid->nent, type)) 985 return -EINVAL; 986 987 array.entries = vzalloc(array_size(sizeof(struct kvm_cpuid_entry2), 988 cpuid->nent)); 989 if (!array.entries) 990 return -ENOMEM; 991 992 array.maxnent = cpuid->nent; 993 994 for (i = 0; i < ARRAY_SIZE(funcs); i++) { 995 r = get_cpuid_func(&array, funcs[i], type); 996 if (r) 997 goto out_free; 998 } 999 cpuid->nent = array.nent; 1000 1001 if (copy_to_user(entries, array.entries, 1002 array.nent * sizeof(struct kvm_cpuid_entry2))) 1003 r = -EFAULT; 1004 1005out_free: 1006 vfree(array.entries); 1007 return r; 1008} 1009 1010struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, 1011 u32 function, u32 index) 1012{ 1013 return cpuid_entry2_find(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent, 1014 function, index); 1015} 1016EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry); 1017 1018/* 1019 * Intel CPUID semantics treats any query for an out-of-range leaf as if the 1020 * highest basic leaf (i.e. CPUID.0H:EAX) were requested. AMD CPUID semantics 1021 * returns all zeroes for any undefined leaf, whether or not the leaf is in 1022 * range. Centaur/VIA follows Intel semantics. 1023 * 1024 * A leaf is considered out-of-range if its function is higher than the maximum 1025 * supported leaf of its associated class or if its associated class does not 1026 * exist. 1027 * 1028 * There are three primary classes to be considered, with their respective 1029 * ranges described as "<base> - <top>[,<base2> - <top2>] inclusive. A primary 1030 * class exists if a guest CPUID entry for its <base> leaf exists. For a given 1031 * class, CPUID.<base>.EAX contains the max supported leaf for the class. 1032 * 1033 * - Basic: 0x00000000 - 0x3fffffff, 0x50000000 - 0x7fffffff 1034 * - Hypervisor: 0x40000000 - 0x4fffffff 1035 * - Extended: 0x80000000 - 0xbfffffff 1036 * - Centaur: 0xc0000000 - 0xcfffffff 1037 * 1038 * The Hypervisor class is further subdivided into sub-classes that each act as 1039 * their own indepdent class associated with a 0x100 byte range. E.g. if Qemu 1040 * is advertising support for both HyperV and KVM, the resulting Hypervisor 1041 * CPUID sub-classes are: 1042 * 1043 * - HyperV: 0x40000000 - 0x400000ff 1044 * - KVM: 0x40000100 - 0x400001ff 1045 */ 1046static struct kvm_cpuid_entry2 * 1047get_out_of_range_cpuid_entry(struct kvm_vcpu *vcpu, u32 *fn_ptr, u32 index) 1048{ 1049 struct kvm_cpuid_entry2 *basic, *class; 1050 u32 function = *fn_ptr; 1051 1052 basic = kvm_find_cpuid_entry(vcpu, 0, 0); 1053 if (!basic) 1054 return NULL; 1055 1056 if (is_guest_vendor_amd(basic->ebx, basic->ecx, basic->edx) || 1057 is_guest_vendor_hygon(basic->ebx, basic->ecx, basic->edx)) 1058 return NULL; 1059 1060 if (function >= 0x40000000 && function <= 0x4fffffff) 1061 class = kvm_find_cpuid_entry(vcpu, function & 0xffffff00, 0); 1062 else if (function >= 0xc0000000) 1063 class = kvm_find_cpuid_entry(vcpu, 0xc0000000, 0); 1064 else 1065 class = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0); 1066 1067 if (class && function <= class->eax) 1068 return NULL; 1069 1070 /* 1071 * Leaf specific adjustments are also applied when redirecting to the 1072 * max basic entry, e.g. if the max basic leaf is 0xb but there is no 1073 * entry for CPUID.0xb.index (see below), then the output value for EDX 1074 * needs to be pulled from CPUID.0xb.1. 1075 */ 1076 *fn_ptr = basic->eax; 1077 1078 /* 1079 * The class does not exist or the requested function is out of range; 1080 * the effective CPUID entry is the max basic leaf. Note, the index of 1081 * the original requested leaf is observed! 1082 */ 1083 return kvm_find_cpuid_entry(vcpu, basic->eax, index); 1084} 1085 1086bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, 1087 u32 *ecx, u32 *edx, bool exact_only) 1088{ 1089 u32 orig_function = *eax, function = *eax, index = *ecx; 1090 struct kvm_cpuid_entry2 *entry; 1091 bool exact, used_max_basic = false; 1092 1093 entry = kvm_find_cpuid_entry(vcpu, function, index); 1094 exact = !!entry; 1095 1096 if (!entry && !exact_only) { 1097 entry = get_out_of_range_cpuid_entry(vcpu, &function, index); 1098 used_max_basic = !!entry; 1099 } 1100 1101 if (entry) { 1102 *eax = entry->eax; 1103 *ebx = entry->ebx; 1104 *ecx = entry->ecx; 1105 *edx = entry->edx; 1106 if (function == 7 && index == 0) { 1107 u64 data; 1108 if (!__kvm_get_msr(vcpu, MSR_IA32_TSX_CTRL, &data, true) && 1109 (data & TSX_CTRL_CPUID_CLEAR)) 1110 *ebx &= ~(F(RTM) | F(HLE)); 1111 } 1112 } else { 1113 *eax = *ebx = *ecx = *edx = 0; 1114 /* 1115 * When leaf 0BH or 1FH is defined, CL is pass-through 1116 * and EDX is always the x2APIC ID, even for undefined 1117 * subleaves. Index 1 will exist iff the leaf is 1118 * implemented, so we pass through CL iff leaf 1 1119 * exists. EDX can be copied from any existing index. 1120 */ 1121 if (function == 0xb || function == 0x1f) { 1122 entry = kvm_find_cpuid_entry(vcpu, function, 1); 1123 if (entry) { 1124 *ecx = index & 0xff; 1125 *edx = entry->edx; 1126 } 1127 } 1128 } 1129 trace_kvm_cpuid(orig_function, index, *eax, *ebx, *ecx, *edx, exact, 1130 used_max_basic); 1131 return exact; 1132} 1133EXPORT_SYMBOL_GPL(kvm_cpuid); 1134 1135int kvm_emulate_cpuid(struct kvm_vcpu *vcpu) 1136{ 1137 u32 eax, ebx, ecx, edx; 1138 1139 if (cpuid_fault_enabled(vcpu) && !kvm_require_cpl(vcpu, 0)) 1140 return 1; 1141 1142 eax = kvm_rax_read(vcpu); 1143 ecx = kvm_rcx_read(vcpu); 1144 kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx, false); 1145 kvm_rax_write(vcpu, eax); 1146 kvm_rbx_write(vcpu, ebx); 1147 kvm_rcx_write(vcpu, ecx); 1148 kvm_rdx_write(vcpu, edx); 1149 return kvm_skip_emulated_instruction(vcpu); 1150} 1151EXPORT_SYMBOL_GPL(kvm_emulate_cpuid); 1152