1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef ARCH_X86_KVM_X86_H 3#define ARCH_X86_KVM_X86_H 4 5#include <linux/kvm_host.h> 6#include <asm/fpu/xstate.h> 7#include <asm/mce.h> 8#include <asm/pvclock.h> 9#include "kvm_cache_regs.h" 10#include "kvm_emulate.h" 11 12struct kvm_caps { 13 /* control of guest tsc rate supported? */ 14 bool has_tsc_control; 15 /* maximum supported tsc_khz for guests */ 16 u32 max_guest_tsc_khz; 17 /* number of bits of the fractional part of the TSC scaling ratio */ 18 u8 tsc_scaling_ratio_frac_bits; 19 /* maximum allowed value of TSC scaling ratio */ 20 u64 max_tsc_scaling_ratio; 21 /* 1ull << kvm_caps.tsc_scaling_ratio_frac_bits */ 22 u64 default_tsc_scaling_ratio; 23 /* bus lock detection supported? */ 24 bool has_bus_lock_exit; 25 /* notify VM exit supported? */ 26 bool has_notify_vmexit; 27 28 u64 supported_mce_cap; 29 u64 supported_xcr0; 30 u64 supported_xss; 31 u64 supported_perf_cap; 32}; 33 34void kvm_spurious_fault(void); 35 36#define KVM_NESTED_VMENTER_CONSISTENCY_CHECK(consistency_check) \ 37({ \ 38 bool failed = (consistency_check); \ 39 if (failed) \ 40 trace_kvm_nested_vmenter_failed(#consistency_check, 0); \ 41 failed; \ 42}) 43 44/* 45 * The first...last VMX feature MSRs that are emulated by KVM. This may or may 46 * not cover all known VMX MSRs, as KVM doesn't emulate an MSR until there's an 47 * associated feature that KVM supports for nested virtualization. 48 */ 49#define KVM_FIRST_EMULATED_VMX_MSR MSR_IA32_VMX_BASIC 50#define KVM_LAST_EMULATED_VMX_MSR MSR_IA32_VMX_VMFUNC 51 52#define KVM_DEFAULT_PLE_GAP 128 53#define KVM_VMX_DEFAULT_PLE_WINDOW 4096 54#define KVM_DEFAULT_PLE_WINDOW_GROW 2 55#define KVM_DEFAULT_PLE_WINDOW_SHRINK 0 56#define KVM_VMX_DEFAULT_PLE_WINDOW_MAX UINT_MAX 57#define KVM_SVM_DEFAULT_PLE_WINDOW_MAX USHRT_MAX 58#define KVM_SVM_DEFAULT_PLE_WINDOW 3000 59 60static inline unsigned int __grow_ple_window(unsigned int val, 61 unsigned int base, unsigned int modifier, unsigned int max) 62{ 63 u64 ret = val; 64 65 if (modifier < 1) 66 return base; 67 68 if (modifier < base) 69 ret *= modifier; 70 else 71 ret += modifier; 72 73 return min(ret, (u64)max); 74} 75 76static inline unsigned int __shrink_ple_window(unsigned int val, 77 unsigned int base, unsigned int modifier, unsigned int min) 78{ 79 if (modifier < 1) 80 return base; 81 82 if (modifier < base) 83 val /= modifier; 84 else 85 val -= modifier; 86 87 return max(val, min); 88} 89 90#define MSR_IA32_CR_PAT_DEFAULT 0x0007040600070406ULL 91 92void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu); 93int kvm_check_nested_events(struct kvm_vcpu *vcpu); 94 95static inline bool kvm_vcpu_has_run(struct kvm_vcpu *vcpu) 96{ 97 return vcpu->arch.last_vmentry_cpu != -1; 98} 99 100static inline bool kvm_is_exception_pending(struct kvm_vcpu *vcpu) 101{ 102 return vcpu->arch.exception.pending || 103 vcpu->arch.exception_vmexit.pending || 104 kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu); 105} 106 107static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu) 108{ 109 vcpu->arch.exception.pending = false; 110 vcpu->arch.exception.injected = false; 111 vcpu->arch.exception_vmexit.pending = false; 112} 113 114static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector, 115 bool soft) 116{ 117 vcpu->arch.interrupt.injected = true; 118 vcpu->arch.interrupt.soft = soft; 119 vcpu->arch.interrupt.nr = vector; 120} 121 122static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu) 123{ 124 vcpu->arch.interrupt.injected = false; 125} 126 127static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu) 128{ 129 return vcpu->arch.exception.injected || vcpu->arch.interrupt.injected || 130 vcpu->arch.nmi_injected; 131} 132 133static inline bool kvm_exception_is_soft(unsigned int nr) 134{ 135 return (nr == BP_VECTOR) || (nr == OF_VECTOR); 136} 137 138static inline bool is_protmode(struct kvm_vcpu *vcpu) 139{ 140 return kvm_is_cr0_bit_set(vcpu, X86_CR0_PE); 141} 142 143static inline bool is_long_mode(struct kvm_vcpu *vcpu) 144{ 145#ifdef CONFIG_X86_64 146 return !!(vcpu->arch.efer & EFER_LMA); 147#else 148 return false; 149#endif 150} 151 152static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu) 153{ 154 int cs_db, cs_l; 155 156 WARN_ON_ONCE(vcpu->arch.guest_state_protected); 157 158 if (!is_long_mode(vcpu)) 159 return false; 160 static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l); 161 return cs_l; 162} 163 164static inline bool is_64_bit_hypercall(struct kvm_vcpu *vcpu) 165{ 166 /* 167 * If running with protected guest state, the CS register is not 168 * accessible. The hypercall register values will have had to been 169 * provided in 64-bit mode, so assume the guest is in 64-bit. 170 */ 171 return vcpu->arch.guest_state_protected || is_64_bit_mode(vcpu); 172} 173 174static inline bool x86_exception_has_error_code(unsigned int vector) 175{ 176 static u32 exception_has_error_code = BIT(DF_VECTOR) | BIT(TS_VECTOR) | 177 BIT(NP_VECTOR) | BIT(SS_VECTOR) | BIT(GP_VECTOR) | 178 BIT(PF_VECTOR) | BIT(AC_VECTOR); 179 180 return (1U << vector) & exception_has_error_code; 181} 182 183static inline bool mmu_is_nested(struct kvm_vcpu *vcpu) 184{ 185 return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu; 186} 187 188static inline bool is_pae(struct kvm_vcpu *vcpu) 189{ 190 return kvm_is_cr4_bit_set(vcpu, X86_CR4_PAE); 191} 192 193static inline bool is_pse(struct kvm_vcpu *vcpu) 194{ 195 return kvm_is_cr4_bit_set(vcpu, X86_CR4_PSE); 196} 197 198static inline bool is_paging(struct kvm_vcpu *vcpu) 199{ 200 return likely(kvm_is_cr0_bit_set(vcpu, X86_CR0_PG)); 201} 202 203static inline bool is_pae_paging(struct kvm_vcpu *vcpu) 204{ 205 return !is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu); 206} 207 208static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu) 209{ 210 return kvm_is_cr4_bit_set(vcpu, X86_CR4_LA57) ? 57 : 48; 211} 212 213static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu) 214{ 215 return !__is_canonical_address(la, vcpu_virt_addr_bits(vcpu)); 216} 217 218static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu, 219 gva_t gva, gfn_t gfn, unsigned access) 220{ 221 u64 gen = kvm_memslots(vcpu->kvm)->generation; 222 223 if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS)) 224 return; 225 226 /* 227 * If this is a shadow nested page table, the "GVA" is 228 * actually a nGPA. 229 */ 230 vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK; 231 vcpu->arch.mmio_access = access; 232 vcpu->arch.mmio_gfn = gfn; 233 vcpu->arch.mmio_gen = gen; 234} 235 236static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu) 237{ 238 return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation; 239} 240 241/* 242 * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we 243 * clear all mmio cache info. 244 */ 245#define MMIO_GVA_ANY (~(gva_t)0) 246 247static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva) 248{ 249 if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK)) 250 return; 251 252 vcpu->arch.mmio_gva = 0; 253} 254 255static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva) 256{ 257 if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva && 258 vcpu->arch.mmio_gva == (gva & PAGE_MASK)) 259 return true; 260 261 return false; 262} 263 264static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) 265{ 266 if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn && 267 vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT) 268 return true; 269 270 return false; 271} 272 273static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg) 274{ 275 unsigned long val = kvm_register_read_raw(vcpu, reg); 276 277 return is_64_bit_mode(vcpu) ? val : (u32)val; 278} 279 280static inline void kvm_register_write(struct kvm_vcpu *vcpu, 281 int reg, unsigned long val) 282{ 283 if (!is_64_bit_mode(vcpu)) 284 val = (u32)val; 285 return kvm_register_write_raw(vcpu, reg, val); 286} 287 288static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk) 289{ 290 return !(kvm->arch.disabled_quirks & quirk); 291} 292 293void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip); 294 295u64 get_kvmclock_ns(struct kvm *kvm); 296 297int kvm_read_guest_virt(struct kvm_vcpu *vcpu, 298 gva_t addr, void *val, unsigned int bytes, 299 struct x86_exception *exception); 300 301int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, 302 gva_t addr, void *val, unsigned int bytes, 303 struct x86_exception *exception); 304 305int handle_ud(struct kvm_vcpu *vcpu); 306 307void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu, 308 struct kvm_queued_exception *ex); 309 310void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu); 311u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn); 312int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data); 313int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); 314bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, 315 int page_num); 316bool kvm_vector_hashing_enabled(void); 317void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code); 318int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type, 319 void *insn, int insn_len); 320int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, 321 int emulation_type, void *insn, int insn_len); 322fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu); 323 324extern u64 host_xcr0; 325extern u64 host_xss; 326extern u64 host_arch_capabilities; 327 328extern struct kvm_caps kvm_caps; 329 330extern bool enable_pmu; 331 332/* 333 * Get a filtered version of KVM's supported XCR0 that strips out dynamic 334 * features for which the current process doesn't (yet) have permission to use. 335 * This is intended to be used only when enumerating support to userspace, 336 * e.g. in KVM_GET_SUPPORTED_CPUID and KVM_CAP_XSAVE2, it does NOT need to be 337 * used to check/restrict guest behavior as KVM rejects KVM_SET_CPUID{2} if 338 * userspace attempts to enable unpermitted features. 339 */ 340static inline u64 kvm_get_filtered_xcr0(void) 341{ 342 u64 permitted_xcr0 = kvm_caps.supported_xcr0; 343 344 BUILD_BUG_ON(XFEATURE_MASK_USER_DYNAMIC != XFEATURE_MASK_XTILE_DATA); 345 346 if (permitted_xcr0 & XFEATURE_MASK_USER_DYNAMIC) { 347 permitted_xcr0 &= xstate_get_guest_group_perm(); 348 349 /* 350 * Treat XTILE_CFG as unsupported if the current process isn't 351 * allowed to use XTILE_DATA, as attempting to set XTILE_CFG in 352 * XCR0 without setting XTILE_DATA is architecturally illegal. 353 */ 354 if (!(permitted_xcr0 & XFEATURE_MASK_XTILE_DATA)) 355 permitted_xcr0 &= ~XFEATURE_MASK_XTILE_CFG; 356 } 357 return permitted_xcr0; 358} 359 360static inline bool kvm_mpx_supported(void) 361{ 362 return (kvm_caps.supported_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR)) 363 == (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR); 364} 365 366extern unsigned int min_timer_period_us; 367 368extern bool enable_vmware_backdoor; 369 370extern int pi_inject_timer; 371 372extern bool report_ignored_msrs; 373 374extern bool eager_page_split; 375 376static inline void kvm_pr_unimpl_wrmsr(struct kvm_vcpu *vcpu, u32 msr, u64 data) 377{ 378 if (report_ignored_msrs) 379 vcpu_unimpl(vcpu, "Unhandled WRMSR(0x%x) = 0x%llx\n", msr, data); 380} 381 382static inline void kvm_pr_unimpl_rdmsr(struct kvm_vcpu *vcpu, u32 msr) 383{ 384 if (report_ignored_msrs) 385 vcpu_unimpl(vcpu, "Unhandled RDMSR(0x%x)\n", msr); 386} 387 388static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec) 389{ 390 return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult, 391 vcpu->arch.virtual_tsc_shift); 392} 393 394/* Same "calling convention" as do_div: 395 * - divide (n << 32) by base 396 * - put result in n 397 * - return remainder 398 */ 399#define do_shl32_div32(n, base) \ 400 ({ \ 401 u32 __quot, __rem; \ 402 asm("divl %2" : "=a" (__quot), "=d" (__rem) \ 403 : "rm" (base), "0" (0), "1" ((u32) n)); \ 404 n = __quot; \ 405 __rem; \ 406 }) 407 408static inline bool kvm_mwait_in_guest(struct kvm *kvm) 409{ 410 return kvm->arch.mwait_in_guest; 411} 412 413static inline bool kvm_hlt_in_guest(struct kvm *kvm) 414{ 415 return kvm->arch.hlt_in_guest; 416} 417 418static inline bool kvm_pause_in_guest(struct kvm *kvm) 419{ 420 return kvm->arch.pause_in_guest; 421} 422 423static inline bool kvm_cstate_in_guest(struct kvm *kvm) 424{ 425 return kvm->arch.cstate_in_guest; 426} 427 428static inline bool kvm_notify_vmexit_enabled(struct kvm *kvm) 429{ 430 return kvm->arch.notify_vmexit_flags & KVM_X86_NOTIFY_VMEXIT_ENABLED; 431} 432 433enum kvm_intr_type { 434 /* Values are arbitrary, but must be non-zero. */ 435 KVM_HANDLING_IRQ = 1, 436 KVM_HANDLING_NMI, 437}; 438 439static __always_inline void kvm_before_interrupt(struct kvm_vcpu *vcpu, 440 enum kvm_intr_type intr) 441{ 442 WRITE_ONCE(vcpu->arch.handling_intr_from_guest, (u8)intr); 443} 444 445static __always_inline void kvm_after_interrupt(struct kvm_vcpu *vcpu) 446{ 447 WRITE_ONCE(vcpu->arch.handling_intr_from_guest, 0); 448} 449 450static inline bool kvm_handling_nmi_from_guest(struct kvm_vcpu *vcpu) 451{ 452 return vcpu->arch.handling_intr_from_guest == KVM_HANDLING_NMI; 453} 454 455static inline bool kvm_pat_valid(u64 data) 456{ 457 if (data & 0xF8F8F8F8F8F8F8F8ull) 458 return false; 459 /* 0, 1, 4, 5, 6, 7 are valid values. */ 460 return (data | ((data & 0x0202020202020202ull) << 1)) == data; 461} 462 463static inline bool kvm_dr7_valid(u64 data) 464{ 465 /* Bits [63:32] are reserved */ 466 return !(data >> 32); 467} 468static inline bool kvm_dr6_valid(u64 data) 469{ 470 /* Bits [63:32] are reserved */ 471 return !(data >> 32); 472} 473 474/* 475 * Trigger machine check on the host. We assume all the MSRs are already set up 476 * by the CPU and that we still run on the same CPU as the MCE occurred on. 477 * We pass a fake environment to the machine check handler because we want 478 * the guest to be always treated like user space, no matter what context 479 * it used internally. 480 */ 481static inline void kvm_machine_check(void) 482{ 483#if defined(CONFIG_X86_MCE) 484 struct pt_regs regs = { 485 .cs = 3, /* Fake ring 3 no matter what the guest ran on */ 486 .flags = X86_EFLAGS_IF, 487 }; 488 489 do_machine_check(®s); 490#endif 491} 492 493void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu); 494void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu); 495int kvm_spec_ctrl_test_value(u64 value); 496bool __kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); 497int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r, 498 struct x86_exception *e); 499int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva); 500bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type); 501 502/* 503 * Internal error codes that are used to indicate that MSR emulation encountered 504 * an error that should result in #GP in the guest, unless userspace 505 * handles it. 506 */ 507#define KVM_MSR_RET_INVALID 2 /* in-kernel MSR emulation #GP condition */ 508#define KVM_MSR_RET_FILTERED 3 /* #GP due to userspace MSR filter */ 509 510#define __cr4_reserved_bits(__cpu_has, __c) \ 511({ \ 512 u64 __reserved_bits = CR4_RESERVED_BITS; \ 513 \ 514 if (!__cpu_has(__c, X86_FEATURE_XSAVE)) \ 515 __reserved_bits |= X86_CR4_OSXSAVE; \ 516 if (!__cpu_has(__c, X86_FEATURE_SMEP)) \ 517 __reserved_bits |= X86_CR4_SMEP; \ 518 if (!__cpu_has(__c, X86_FEATURE_SMAP)) \ 519 __reserved_bits |= X86_CR4_SMAP; \ 520 if (!__cpu_has(__c, X86_FEATURE_FSGSBASE)) \ 521 __reserved_bits |= X86_CR4_FSGSBASE; \ 522 if (!__cpu_has(__c, X86_FEATURE_PKU)) \ 523 __reserved_bits |= X86_CR4_PKE; \ 524 if (!__cpu_has(__c, X86_FEATURE_LA57)) \ 525 __reserved_bits |= X86_CR4_LA57; \ 526 if (!__cpu_has(__c, X86_FEATURE_UMIP)) \ 527 __reserved_bits |= X86_CR4_UMIP; \ 528 if (!__cpu_has(__c, X86_FEATURE_VMX)) \ 529 __reserved_bits |= X86_CR4_VMXE; \ 530 if (!__cpu_has(__c, X86_FEATURE_PCID)) \ 531 __reserved_bits |= X86_CR4_PCIDE; \ 532 __reserved_bits; \ 533}) 534 535int kvm_sev_es_mmio_write(struct kvm_vcpu *vcpu, gpa_t src, unsigned int bytes, 536 void *dst); 537int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa_t src, unsigned int bytes, 538 void *dst); 539int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size, 540 unsigned int port, void *data, unsigned int count, 541 int in); 542 543#endif 544