xref: /kernel/linux/linux-6.6/arch/x86/kvm/vmx/vmx.c (revision 62306a36)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * This module enables machines with Intel VT-x extensions to run virtual
6 * machines without emulation or binary translation.
7 *
8 * Copyright (C) 2006 Qumranet, Inc.
9 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10 *
11 * Authors:
12 *   Avi Kivity   <avi@qumranet.com>
13 *   Yaniv Kamay  <yaniv@qumranet.com>
14 */
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#include <linux/highmem.h>
18#include <linux/hrtimer.h>
19#include <linux/kernel.h>
20#include <linux/kvm_host.h>
21#include <linux/module.h>
22#include <linux/moduleparam.h>
23#include <linux/mod_devicetable.h>
24#include <linux/mm.h>
25#include <linux/objtool.h>
26#include <linux/sched.h>
27#include <linux/sched/smt.h>
28#include <linux/slab.h>
29#include <linux/tboot.h>
30#include <linux/trace_events.h>
31#include <linux/entry-kvm.h>
32
33#include <asm/apic.h>
34#include <asm/asm.h>
35#include <asm/cpu.h>
36#include <asm/cpu_device_id.h>
37#include <asm/debugreg.h>
38#include <asm/desc.h>
39#include <asm/fpu/api.h>
40#include <asm/fpu/xstate.h>
41#include <asm/idtentry.h>
42#include <asm/io.h>
43#include <asm/irq_remapping.h>
44#include <asm/reboot.h>
45#include <asm/perf_event.h>
46#include <asm/mmu_context.h>
47#include <asm/mshyperv.h>
48#include <asm/mwait.h>
49#include <asm/spec-ctrl.h>
50#include <asm/vmx.h>
51
52#include "capabilities.h"
53#include "cpuid.h"
54#include "hyperv.h"
55#include "kvm_onhyperv.h"
56#include "irq.h"
57#include "kvm_cache_regs.h"
58#include "lapic.h"
59#include "mmu.h"
60#include "nested.h"
61#include "pmu.h"
62#include "sgx.h"
63#include "trace.h"
64#include "vmcs.h"
65#include "vmcs12.h"
66#include "vmx.h"
67#include "x86.h"
68#include "smm.h"
69
70MODULE_AUTHOR("Qumranet");
71MODULE_LICENSE("GPL");
72
73#ifdef MODULE
74static const struct x86_cpu_id vmx_cpu_id[] = {
75	X86_MATCH_FEATURE(X86_FEATURE_VMX, NULL),
76	{}
77};
78MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
79#endif
80
81bool __read_mostly enable_vpid = 1;
82module_param_named(vpid, enable_vpid, bool, 0444);
83
84static bool __read_mostly enable_vnmi = 1;
85module_param_named(vnmi, enable_vnmi, bool, S_IRUGO);
86
87bool __read_mostly flexpriority_enabled = 1;
88module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO);
89
90bool __read_mostly enable_ept = 1;
91module_param_named(ept, enable_ept, bool, S_IRUGO);
92
93bool __read_mostly enable_unrestricted_guest = 1;
94module_param_named(unrestricted_guest,
95			enable_unrestricted_guest, bool, S_IRUGO);
96
97bool __read_mostly enable_ept_ad_bits = 1;
98module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO);
99
100static bool __read_mostly emulate_invalid_guest_state = true;
101module_param(emulate_invalid_guest_state, bool, S_IRUGO);
102
103static bool __read_mostly fasteoi = 1;
104module_param(fasteoi, bool, S_IRUGO);
105
106module_param(enable_apicv, bool, S_IRUGO);
107
108bool __read_mostly enable_ipiv = true;
109module_param(enable_ipiv, bool, 0444);
110
111/*
112 * If nested=1, nested virtualization is supported, i.e., guests may use
113 * VMX and be a hypervisor for its own guests. If nested=0, guests may not
114 * use VMX instructions.
115 */
116static bool __read_mostly nested = 1;
117module_param(nested, bool, S_IRUGO);
118
119bool __read_mostly enable_pml = 1;
120module_param_named(pml, enable_pml, bool, S_IRUGO);
121
122static bool __read_mostly error_on_inconsistent_vmcs_config = true;
123module_param(error_on_inconsistent_vmcs_config, bool, 0444);
124
125static bool __read_mostly dump_invalid_vmcs = 0;
126module_param(dump_invalid_vmcs, bool, 0644);
127
128#define MSR_BITMAP_MODE_X2APIC		1
129#define MSR_BITMAP_MODE_X2APIC_APICV	2
130
131#define KVM_VMX_TSC_MULTIPLIER_MAX     0xffffffffffffffffULL
132
133/* Guest_tsc -> host_tsc conversion requires 64-bit division.  */
134static int __read_mostly cpu_preemption_timer_multi;
135static bool __read_mostly enable_preemption_timer = 1;
136#ifdef CONFIG_X86_64
137module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
138#endif
139
140extern bool __read_mostly allow_smaller_maxphyaddr;
141module_param(allow_smaller_maxphyaddr, bool, S_IRUGO);
142
143#define KVM_VM_CR0_ALWAYS_OFF (X86_CR0_NW | X86_CR0_CD)
144#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR0_NE
145#define KVM_VM_CR0_ALWAYS_ON				\
146	(KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
147
148#define KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR4_VMXE
149#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
150#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
151
152#define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
153
154#define MSR_IA32_RTIT_STATUS_MASK (~(RTIT_STATUS_FILTEREN | \
155	RTIT_STATUS_CONTEXTEN | RTIT_STATUS_TRIGGEREN | \
156	RTIT_STATUS_ERROR | RTIT_STATUS_STOPPED | \
157	RTIT_STATUS_BYTECNT))
158
159/*
160 * List of MSRs that can be directly passed to the guest.
161 * In addition to these x2apic and PT MSRs are handled specially.
162 */
163static u32 vmx_possible_passthrough_msrs[MAX_POSSIBLE_PASSTHROUGH_MSRS] = {
164	MSR_IA32_SPEC_CTRL,
165	MSR_IA32_PRED_CMD,
166	MSR_IA32_FLUSH_CMD,
167	MSR_IA32_TSC,
168#ifdef CONFIG_X86_64
169	MSR_FS_BASE,
170	MSR_GS_BASE,
171	MSR_KERNEL_GS_BASE,
172	MSR_IA32_XFD,
173	MSR_IA32_XFD_ERR,
174#endif
175	MSR_IA32_SYSENTER_CS,
176	MSR_IA32_SYSENTER_ESP,
177	MSR_IA32_SYSENTER_EIP,
178	MSR_CORE_C1_RES,
179	MSR_CORE_C3_RESIDENCY,
180	MSR_CORE_C6_RESIDENCY,
181	MSR_CORE_C7_RESIDENCY,
182};
183
184/*
185 * These 2 parameters are used to config the controls for Pause-Loop Exiting:
186 * ple_gap:    upper bound on the amount of time between two successive
187 *             executions of PAUSE in a loop. Also indicate if ple enabled.
188 *             According to test, this time is usually smaller than 128 cycles.
189 * ple_window: upper bound on the amount of time a guest is allowed to execute
190 *             in a PAUSE loop. Tests indicate that most spinlocks are held for
191 *             less than 2^12 cycles
192 * Time is measured based on a counter that runs at the same rate as the TSC,
193 * refer SDM volume 3b section 21.6.13 & 22.1.3.
194 */
195static unsigned int ple_gap = KVM_DEFAULT_PLE_GAP;
196module_param(ple_gap, uint, 0444);
197
198static unsigned int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
199module_param(ple_window, uint, 0444);
200
201/* Default doubles per-vcpu window every exit. */
202static unsigned int ple_window_grow = KVM_DEFAULT_PLE_WINDOW_GROW;
203module_param(ple_window_grow, uint, 0444);
204
205/* Default resets per-vcpu window every exit to ple_window. */
206static unsigned int ple_window_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK;
207module_param(ple_window_shrink, uint, 0444);
208
209/* Default is to compute the maximum so we can never overflow. */
210static unsigned int ple_window_max        = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
211module_param(ple_window_max, uint, 0444);
212
213/* Default is SYSTEM mode, 1 for host-guest mode */
214int __read_mostly pt_mode = PT_MODE_SYSTEM;
215module_param(pt_mode, int, S_IRUGO);
216
217static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
218static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond);
219static DEFINE_MUTEX(vmx_l1d_flush_mutex);
220
221/* Storage for pre module init parameter parsing */
222static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_L1D_FLUSH_AUTO;
223
224static const struct {
225	const char *option;
226	bool for_parse;
227} vmentry_l1d_param[] = {
228	[VMENTER_L1D_FLUSH_AUTO]	 = {"auto", true},
229	[VMENTER_L1D_FLUSH_NEVER]	 = {"never", true},
230	[VMENTER_L1D_FLUSH_COND]	 = {"cond", true},
231	[VMENTER_L1D_FLUSH_ALWAYS]	 = {"always", true},
232	[VMENTER_L1D_FLUSH_EPT_DISABLED] = {"EPT disabled", false},
233	[VMENTER_L1D_FLUSH_NOT_REQUIRED] = {"not required", false},
234};
235
236#define L1D_CACHE_ORDER 4
237static void *vmx_l1d_flush_pages;
238
239static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
240{
241	struct page *page;
242	unsigned int i;
243
244	if (!boot_cpu_has_bug(X86_BUG_L1TF)) {
245		l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED;
246		return 0;
247	}
248
249	if (!enable_ept) {
250		l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED;
251		return 0;
252	}
253
254	if (host_arch_capabilities & ARCH_CAP_SKIP_VMENTRY_L1DFLUSH) {
255		l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED;
256		return 0;
257	}
258
259	/* If set to auto use the default l1tf mitigation method */
260	if (l1tf == VMENTER_L1D_FLUSH_AUTO) {
261		switch (l1tf_mitigation) {
262		case L1TF_MITIGATION_OFF:
263			l1tf = VMENTER_L1D_FLUSH_NEVER;
264			break;
265		case L1TF_MITIGATION_FLUSH_NOWARN:
266		case L1TF_MITIGATION_FLUSH:
267		case L1TF_MITIGATION_FLUSH_NOSMT:
268			l1tf = VMENTER_L1D_FLUSH_COND;
269			break;
270		case L1TF_MITIGATION_FULL:
271		case L1TF_MITIGATION_FULL_FORCE:
272			l1tf = VMENTER_L1D_FLUSH_ALWAYS;
273			break;
274		}
275	} else if (l1tf_mitigation == L1TF_MITIGATION_FULL_FORCE) {
276		l1tf = VMENTER_L1D_FLUSH_ALWAYS;
277	}
278
279	if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages &&
280	    !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
281		/*
282		 * This allocation for vmx_l1d_flush_pages is not tied to a VM
283		 * lifetime and so should not be charged to a memcg.
284		 */
285		page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
286		if (!page)
287			return -ENOMEM;
288		vmx_l1d_flush_pages = page_address(page);
289
290		/*
291		 * Initialize each page with a different pattern in
292		 * order to protect against KSM in the nested
293		 * virtualization case.
294		 */
295		for (i = 0; i < 1u << L1D_CACHE_ORDER; ++i) {
296			memset(vmx_l1d_flush_pages + i * PAGE_SIZE, i + 1,
297			       PAGE_SIZE);
298		}
299	}
300
301	l1tf_vmx_mitigation = l1tf;
302
303	if (l1tf != VMENTER_L1D_FLUSH_NEVER)
304		static_branch_enable(&vmx_l1d_should_flush);
305	else
306		static_branch_disable(&vmx_l1d_should_flush);
307
308	if (l1tf == VMENTER_L1D_FLUSH_COND)
309		static_branch_enable(&vmx_l1d_flush_cond);
310	else
311		static_branch_disable(&vmx_l1d_flush_cond);
312	return 0;
313}
314
315static int vmentry_l1d_flush_parse(const char *s)
316{
317	unsigned int i;
318
319	if (s) {
320		for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) {
321			if (vmentry_l1d_param[i].for_parse &&
322			    sysfs_streq(s, vmentry_l1d_param[i].option))
323				return i;
324		}
325	}
326	return -EINVAL;
327}
328
329static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp)
330{
331	int l1tf, ret;
332
333	l1tf = vmentry_l1d_flush_parse(s);
334	if (l1tf < 0)
335		return l1tf;
336
337	if (!boot_cpu_has(X86_BUG_L1TF))
338		return 0;
339
340	/*
341	 * Has vmx_init() run already? If not then this is the pre init
342	 * parameter parsing. In that case just store the value and let
343	 * vmx_init() do the proper setup after enable_ept has been
344	 * established.
345	 */
346	if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) {
347		vmentry_l1d_flush_param = l1tf;
348		return 0;
349	}
350
351	mutex_lock(&vmx_l1d_flush_mutex);
352	ret = vmx_setup_l1d_flush(l1tf);
353	mutex_unlock(&vmx_l1d_flush_mutex);
354	return ret;
355}
356
357static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp)
358{
359	if (WARN_ON_ONCE(l1tf_vmx_mitigation >= ARRAY_SIZE(vmentry_l1d_param)))
360		return sysfs_emit(s, "???\n");
361
362	return sysfs_emit(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option);
363}
364
365static __always_inline void vmx_disable_fb_clear(struct vcpu_vmx *vmx)
366{
367	u64 msr;
368
369	if (!vmx->disable_fb_clear)
370		return;
371
372	msr = __rdmsr(MSR_IA32_MCU_OPT_CTRL);
373	msr |= FB_CLEAR_DIS;
374	native_wrmsrl(MSR_IA32_MCU_OPT_CTRL, msr);
375	/* Cache the MSR value to avoid reading it later */
376	vmx->msr_ia32_mcu_opt_ctrl = msr;
377}
378
379static __always_inline void vmx_enable_fb_clear(struct vcpu_vmx *vmx)
380{
381	if (!vmx->disable_fb_clear)
382		return;
383
384	vmx->msr_ia32_mcu_opt_ctrl &= ~FB_CLEAR_DIS;
385	native_wrmsrl(MSR_IA32_MCU_OPT_CTRL, vmx->msr_ia32_mcu_opt_ctrl);
386}
387
388static void vmx_update_fb_clear_dis(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
389{
390	/*
391	 * Disable VERW's behavior of clearing CPU buffers for the guest if the
392	 * CPU isn't affected by MDS/TAA, and the host hasn't forcefully enabled
393	 * the mitigation. Disabling the clearing behavior provides a
394	 * performance boost for guests that aren't aware that manually clearing
395	 * CPU buffers is unnecessary, at the cost of MSR accesses on VM-Entry
396	 * and VM-Exit.
397	 */
398	vmx->disable_fb_clear = !cpu_feature_enabled(X86_FEATURE_CLEAR_CPU_BUF) &&
399				(host_arch_capabilities & ARCH_CAP_FB_CLEAR_CTRL) &&
400				!boot_cpu_has_bug(X86_BUG_MDS) &&
401				!boot_cpu_has_bug(X86_BUG_TAA);
402
403	/*
404	 * If guest will not execute VERW, there is no need to set FB_CLEAR_DIS
405	 * at VMEntry. Skip the MSR read/write when a guest has no use case to
406	 * execute VERW.
407	 */
408	if ((vcpu->arch.arch_capabilities & ARCH_CAP_FB_CLEAR) ||
409	   ((vcpu->arch.arch_capabilities & ARCH_CAP_MDS_NO) &&
410	    (vcpu->arch.arch_capabilities & ARCH_CAP_TAA_NO) &&
411	    (vcpu->arch.arch_capabilities & ARCH_CAP_PSDP_NO) &&
412	    (vcpu->arch.arch_capabilities & ARCH_CAP_FBSDP_NO) &&
413	    (vcpu->arch.arch_capabilities & ARCH_CAP_SBDR_SSDP_NO)))
414		vmx->disable_fb_clear = false;
415}
416
417static const struct kernel_param_ops vmentry_l1d_flush_ops = {
418	.set = vmentry_l1d_flush_set,
419	.get = vmentry_l1d_flush_get,
420};
421module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, 0644);
422
423static u32 vmx_segment_access_rights(struct kvm_segment *var);
424
425void vmx_vmexit(void);
426
427#define vmx_insn_failed(fmt...)		\
428do {					\
429	WARN_ONCE(1, fmt);		\
430	pr_warn_ratelimited(fmt);	\
431} while (0)
432
433noinline void vmread_error(unsigned long field)
434{
435	vmx_insn_failed("vmread failed: field=%lx\n", field);
436}
437
438#ifndef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
439noinstr void vmread_error_trampoline2(unsigned long field, bool fault)
440{
441	if (fault) {
442		kvm_spurious_fault();
443	} else {
444		instrumentation_begin();
445		vmread_error(field);
446		instrumentation_end();
447	}
448}
449#endif
450
451noinline void vmwrite_error(unsigned long field, unsigned long value)
452{
453	vmx_insn_failed("vmwrite failed: field=%lx val=%lx err=%u\n",
454			field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
455}
456
457noinline void vmclear_error(struct vmcs *vmcs, u64 phys_addr)
458{
459	vmx_insn_failed("vmclear failed: %p/%llx err=%u\n",
460			vmcs, phys_addr, vmcs_read32(VM_INSTRUCTION_ERROR));
461}
462
463noinline void vmptrld_error(struct vmcs *vmcs, u64 phys_addr)
464{
465	vmx_insn_failed("vmptrld failed: %p/%llx err=%u\n",
466			vmcs, phys_addr, vmcs_read32(VM_INSTRUCTION_ERROR));
467}
468
469noinline void invvpid_error(unsigned long ext, u16 vpid, gva_t gva)
470{
471	vmx_insn_failed("invvpid failed: ext=0x%lx vpid=%u gva=0x%lx\n",
472			ext, vpid, gva);
473}
474
475noinline void invept_error(unsigned long ext, u64 eptp, gpa_t gpa)
476{
477	vmx_insn_failed("invept failed: ext=0x%lx eptp=%llx gpa=0x%llx\n",
478			ext, eptp, gpa);
479}
480
481static DEFINE_PER_CPU(struct vmcs *, vmxarea);
482DEFINE_PER_CPU(struct vmcs *, current_vmcs);
483/*
484 * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed
485 * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it.
486 */
487static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
488
489static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
490static DEFINE_SPINLOCK(vmx_vpid_lock);
491
492struct vmcs_config vmcs_config __ro_after_init;
493struct vmx_capability vmx_capability __ro_after_init;
494
495#define VMX_SEGMENT_FIELD(seg)					\
496	[VCPU_SREG_##seg] = {                                   \
497		.selector = GUEST_##seg##_SELECTOR,		\
498		.base = GUEST_##seg##_BASE,		   	\
499		.limit = GUEST_##seg##_LIMIT,		   	\
500		.ar_bytes = GUEST_##seg##_AR_BYTES,	   	\
501	}
502
503static const struct kvm_vmx_segment_field {
504	unsigned selector;
505	unsigned base;
506	unsigned limit;
507	unsigned ar_bytes;
508} kvm_vmx_segment_fields[] = {
509	VMX_SEGMENT_FIELD(CS),
510	VMX_SEGMENT_FIELD(DS),
511	VMX_SEGMENT_FIELD(ES),
512	VMX_SEGMENT_FIELD(FS),
513	VMX_SEGMENT_FIELD(GS),
514	VMX_SEGMENT_FIELD(SS),
515	VMX_SEGMENT_FIELD(TR),
516	VMX_SEGMENT_FIELD(LDTR),
517};
518
519static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
520{
521	vmx->segment_cache.bitmask = 0;
522}
523
524static unsigned long host_idt_base;
525
526#if IS_ENABLED(CONFIG_HYPERV)
527static struct kvm_x86_ops vmx_x86_ops __initdata;
528
529static bool __read_mostly enlightened_vmcs = true;
530module_param(enlightened_vmcs, bool, 0444);
531
532static int hv_enable_l2_tlb_flush(struct kvm_vcpu *vcpu)
533{
534	struct hv_enlightened_vmcs *evmcs;
535	struct hv_partition_assist_pg **p_hv_pa_pg =
536			&to_kvm_hv(vcpu->kvm)->hv_pa_pg;
537	/*
538	 * Synthetic VM-Exit is not enabled in current code and so All
539	 * evmcs in singe VM shares same assist page.
540	 */
541	if (!*p_hv_pa_pg)
542		*p_hv_pa_pg = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT);
543
544	if (!*p_hv_pa_pg)
545		return -ENOMEM;
546
547	evmcs = (struct hv_enlightened_vmcs *)to_vmx(vcpu)->loaded_vmcs->vmcs;
548
549	evmcs->partition_assist_page =
550		__pa(*p_hv_pa_pg);
551	evmcs->hv_vm_id = (unsigned long)vcpu->kvm;
552	evmcs->hv_enlightenments_control.nested_flush_hypercall = 1;
553
554	return 0;
555}
556
557static __init void hv_init_evmcs(void)
558{
559	int cpu;
560
561	if (!enlightened_vmcs)
562		return;
563
564	/*
565	 * Enlightened VMCS usage should be recommended and the host needs
566	 * to support eVMCS v1 or above.
567	 */
568	if (ms_hyperv.hints & HV_X64_ENLIGHTENED_VMCS_RECOMMENDED &&
569	    (ms_hyperv.nested_features & HV_X64_ENLIGHTENED_VMCS_VERSION) >=
570	     KVM_EVMCS_VERSION) {
571
572		/* Check that we have assist pages on all online CPUs */
573		for_each_online_cpu(cpu) {
574			if (!hv_get_vp_assist_page(cpu)) {
575				enlightened_vmcs = false;
576				break;
577			}
578		}
579
580		if (enlightened_vmcs) {
581			pr_info("Using Hyper-V Enlightened VMCS\n");
582			static_branch_enable(&__kvm_is_using_evmcs);
583		}
584
585		if (ms_hyperv.nested_features & HV_X64_NESTED_DIRECT_FLUSH)
586			vmx_x86_ops.enable_l2_tlb_flush
587				= hv_enable_l2_tlb_flush;
588
589	} else {
590		enlightened_vmcs = false;
591	}
592}
593
594static void hv_reset_evmcs(void)
595{
596	struct hv_vp_assist_page *vp_ap;
597
598	if (!kvm_is_using_evmcs())
599		return;
600
601	/*
602	 * KVM should enable eVMCS if and only if all CPUs have a VP assist
603	 * page, and should reject CPU onlining if eVMCS is enabled the CPU
604	 * doesn't have a VP assist page allocated.
605	 */
606	vp_ap = hv_get_vp_assist_page(smp_processor_id());
607	if (WARN_ON_ONCE(!vp_ap))
608		return;
609
610	/*
611	 * Reset everything to support using non-enlightened VMCS access later
612	 * (e.g. when we reload the module with enlightened_vmcs=0)
613	 */
614	vp_ap->nested_control.features.directhypercall = 0;
615	vp_ap->current_nested_vmcs = 0;
616	vp_ap->enlighten_vmentry = 0;
617}
618
619#else /* IS_ENABLED(CONFIG_HYPERV) */
620static void hv_init_evmcs(void) {}
621static void hv_reset_evmcs(void) {}
622#endif /* IS_ENABLED(CONFIG_HYPERV) */
623
624/*
625 * Comment's format: document - errata name - stepping - processor name.
626 * Refer from
627 * https://www.virtualbox.org/svn/vbox/trunk/src/VBox/VMM/VMMR0/HMR0.cpp
628 */
629static u32 vmx_preemption_cpu_tfms[] = {
630/* 323344.pdf - BA86   - D0 - Xeon 7500 Series */
6310x000206E6,
632/* 323056.pdf - AAX65  - C2 - Xeon L3406 */
633/* 322814.pdf - AAT59  - C2 - i7-600, i5-500, i5-400 and i3-300 Mobile */
634/* 322911.pdf - AAU65  - C2 - i5-600, i3-500 Desktop and Pentium G6950 */
6350x00020652,
636/* 322911.pdf - AAU65  - K0 - i5-600, i3-500 Desktop and Pentium G6950 */
6370x00020655,
638/* 322373.pdf - AAO95  - B1 - Xeon 3400 Series */
639/* 322166.pdf - AAN92  - B1 - i7-800 and i5-700 Desktop */
640/*
641 * 320767.pdf - AAP86  - B1 -
642 * i7-900 Mobile Extreme, i7-800 and i7-700 Mobile
643 */
6440x000106E5,
645/* 321333.pdf - AAM126 - C0 - Xeon 3500 */
6460x000106A0,
647/* 321333.pdf - AAM126 - C1 - Xeon 3500 */
6480x000106A1,
649/* 320836.pdf - AAJ124 - C0 - i7-900 Desktop Extreme and i7-900 Desktop */
6500x000106A4,
651 /* 321333.pdf - AAM126 - D0 - Xeon 3500 */
652 /* 321324.pdf - AAK139 - D0 - Xeon 5500 */
653 /* 320836.pdf - AAJ124 - D0 - i7-900 Extreme and i7-900 Desktop */
6540x000106A5,
655 /* Xeon E3-1220 V2 */
6560x000306A8,
657};
658
659static inline bool cpu_has_broken_vmx_preemption_timer(void)
660{
661	u32 eax = cpuid_eax(0x00000001), i;
662
663	/* Clear the reserved bits */
664	eax &= ~(0x3U << 14 | 0xfU << 28);
665	for (i = 0; i < ARRAY_SIZE(vmx_preemption_cpu_tfms); i++)
666		if (eax == vmx_preemption_cpu_tfms[i])
667			return true;
668
669	return false;
670}
671
672static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu)
673{
674	return flexpriority_enabled && lapic_in_kernel(vcpu);
675}
676
677static int possible_passthrough_msr_slot(u32 msr)
678{
679	u32 i;
680
681	for (i = 0; i < ARRAY_SIZE(vmx_possible_passthrough_msrs); i++)
682		if (vmx_possible_passthrough_msrs[i] == msr)
683			return i;
684
685	return -ENOENT;
686}
687
688static bool is_valid_passthrough_msr(u32 msr)
689{
690	bool r;
691
692	switch (msr) {
693	case 0x800 ... 0x8ff:
694		/* x2APIC MSRs. These are handled in vmx_update_msr_bitmap_x2apic() */
695		return true;
696	case MSR_IA32_RTIT_STATUS:
697	case MSR_IA32_RTIT_OUTPUT_BASE:
698	case MSR_IA32_RTIT_OUTPUT_MASK:
699	case MSR_IA32_RTIT_CR3_MATCH:
700	case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
701		/* PT MSRs. These are handled in pt_update_intercept_for_msr() */
702	case MSR_LBR_SELECT:
703	case MSR_LBR_TOS:
704	case MSR_LBR_INFO_0 ... MSR_LBR_INFO_0 + 31:
705	case MSR_LBR_NHM_FROM ... MSR_LBR_NHM_FROM + 31:
706	case MSR_LBR_NHM_TO ... MSR_LBR_NHM_TO + 31:
707	case MSR_LBR_CORE_FROM ... MSR_LBR_CORE_FROM + 8:
708	case MSR_LBR_CORE_TO ... MSR_LBR_CORE_TO + 8:
709		/* LBR MSRs. These are handled in vmx_update_intercept_for_lbr_msrs() */
710		return true;
711	}
712
713	r = possible_passthrough_msr_slot(msr) != -ENOENT;
714
715	WARN(!r, "Invalid MSR %x, please adapt vmx_possible_passthrough_msrs[]", msr);
716
717	return r;
718}
719
720struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr)
721{
722	int i;
723
724	i = kvm_find_user_return_msr(msr);
725	if (i >= 0)
726		return &vmx->guest_uret_msrs[i];
727	return NULL;
728}
729
730static int vmx_set_guest_uret_msr(struct vcpu_vmx *vmx,
731				  struct vmx_uret_msr *msr, u64 data)
732{
733	unsigned int slot = msr - vmx->guest_uret_msrs;
734	int ret = 0;
735
736	if (msr->load_into_hardware) {
737		preempt_disable();
738		ret = kvm_set_user_return_msr(slot, data, msr->mask);
739		preempt_enable();
740	}
741	if (!ret)
742		msr->data = data;
743	return ret;
744}
745
746/*
747 * Disable VMX and clear CR4.VMXE (even if VMXOFF faults)
748 *
749 * Note, VMXOFF causes a #UD if the CPU is !post-VMXON, but it's impossible to
750 * atomically track post-VMXON state, e.g. this may be called in NMI context.
751 * Eat all faults as all other faults on VMXOFF faults are mode related, i.e.
752 * faults are guaranteed to be due to the !post-VMXON check unless the CPU is
753 * magically in RM, VM86, compat mode, or at CPL>0.
754 */
755static int kvm_cpu_vmxoff(void)
756{
757	asm goto("1: vmxoff\n\t"
758			  _ASM_EXTABLE(1b, %l[fault])
759			  ::: "cc", "memory" : fault);
760
761	cr4_clear_bits(X86_CR4_VMXE);
762	return 0;
763
764fault:
765	cr4_clear_bits(X86_CR4_VMXE);
766	return -EIO;
767}
768
769static void vmx_emergency_disable(void)
770{
771	int cpu = raw_smp_processor_id();
772	struct loaded_vmcs *v;
773
774	kvm_rebooting = true;
775
776	/*
777	 * Note, CR4.VMXE can be _cleared_ in NMI context, but it can only be
778	 * set in task context.  If this races with VMX is disabled by an NMI,
779	 * VMCLEAR and VMXOFF may #UD, but KVM will eat those faults due to
780	 * kvm_rebooting set.
781	 */
782	if (!(__read_cr4() & X86_CR4_VMXE))
783		return;
784
785	list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu),
786			    loaded_vmcss_on_cpu_link)
787		vmcs_clear(v->vmcs);
788
789	kvm_cpu_vmxoff();
790}
791
792static void __loaded_vmcs_clear(void *arg)
793{
794	struct loaded_vmcs *loaded_vmcs = arg;
795	int cpu = raw_smp_processor_id();
796
797	if (loaded_vmcs->cpu != cpu)
798		return; /* vcpu migration can race with cpu offline */
799	if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs)
800		per_cpu(current_vmcs, cpu) = NULL;
801
802	vmcs_clear(loaded_vmcs->vmcs);
803	if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched)
804		vmcs_clear(loaded_vmcs->shadow_vmcs);
805
806	list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link);
807
808	/*
809	 * Ensure all writes to loaded_vmcs, including deleting it from its
810	 * current percpu list, complete before setting loaded_vmcs->cpu to
811	 * -1, otherwise a different cpu can see loaded_vmcs->cpu == -1 first
812	 * and add loaded_vmcs to its percpu list before it's deleted from this
813	 * cpu's list. Pairs with the smp_rmb() in vmx_vcpu_load_vmcs().
814	 */
815	smp_wmb();
816
817	loaded_vmcs->cpu = -1;
818	loaded_vmcs->launched = 0;
819}
820
821void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
822{
823	int cpu = loaded_vmcs->cpu;
824
825	if (cpu != -1)
826		smp_call_function_single(cpu,
827			 __loaded_vmcs_clear, loaded_vmcs, 1);
828}
829
830static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg,
831				       unsigned field)
832{
833	bool ret;
834	u32 mask = 1 << (seg * SEG_FIELD_NR + field);
835
836	if (!kvm_register_is_available(&vmx->vcpu, VCPU_EXREG_SEGMENTS)) {
837		kvm_register_mark_available(&vmx->vcpu, VCPU_EXREG_SEGMENTS);
838		vmx->segment_cache.bitmask = 0;
839	}
840	ret = vmx->segment_cache.bitmask & mask;
841	vmx->segment_cache.bitmask |= mask;
842	return ret;
843}
844
845static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg)
846{
847	u16 *p = &vmx->segment_cache.seg[seg].selector;
848
849	if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL))
850		*p = vmcs_read16(kvm_vmx_segment_fields[seg].selector);
851	return *p;
852}
853
854static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg)
855{
856	ulong *p = &vmx->segment_cache.seg[seg].base;
857
858	if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE))
859		*p = vmcs_readl(kvm_vmx_segment_fields[seg].base);
860	return *p;
861}
862
863static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg)
864{
865	u32 *p = &vmx->segment_cache.seg[seg].limit;
866
867	if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT))
868		*p = vmcs_read32(kvm_vmx_segment_fields[seg].limit);
869	return *p;
870}
871
872static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg)
873{
874	u32 *p = &vmx->segment_cache.seg[seg].ar;
875
876	if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR))
877		*p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes);
878	return *p;
879}
880
881void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu)
882{
883	u32 eb;
884
885	eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
886	     (1u << DB_VECTOR) | (1u << AC_VECTOR);
887	/*
888	 * Guest access to VMware backdoor ports could legitimately
889	 * trigger #GP because of TSS I/O permission bitmap.
890	 * We intercept those #GP and allow access to them anyway
891	 * as VMware does.
892	 */
893	if (enable_vmware_backdoor)
894		eb |= (1u << GP_VECTOR);
895	if ((vcpu->guest_debug &
896	     (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
897	    (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP))
898		eb |= 1u << BP_VECTOR;
899	if (to_vmx(vcpu)->rmode.vm86_active)
900		eb = ~0;
901	if (!vmx_need_pf_intercept(vcpu))
902		eb &= ~(1u << PF_VECTOR);
903
904	/* When we are running a nested L2 guest and L1 specified for it a
905	 * certain exception bitmap, we must trap the same exceptions and pass
906	 * them to L1. When running L2, we will only handle the exceptions
907	 * specified above if L1 did not want them.
908	 */
909	if (is_guest_mode(vcpu))
910		eb |= get_vmcs12(vcpu)->exception_bitmap;
911	else {
912		int mask = 0, match = 0;
913
914		if (enable_ept && (eb & (1u << PF_VECTOR))) {
915			/*
916			 * If EPT is enabled, #PF is currently only intercepted
917			 * if MAXPHYADDR is smaller on the guest than on the
918			 * host.  In that case we only care about present,
919			 * non-reserved faults.  For vmcs02, however, PFEC_MASK
920			 * and PFEC_MATCH are set in prepare_vmcs02_rare.
921			 */
922			mask = PFERR_PRESENT_MASK | PFERR_RSVD_MASK;
923			match = PFERR_PRESENT_MASK;
924		}
925		vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, mask);
926		vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, match);
927	}
928
929	/*
930	 * Disabling xfd interception indicates that dynamic xfeatures
931	 * might be used in the guest. Always trap #NM in this case
932	 * to save guest xfd_err timely.
933	 */
934	if (vcpu->arch.xfd_no_write_intercept)
935		eb |= (1u << NM_VECTOR);
936
937	vmcs_write32(EXCEPTION_BITMAP, eb);
938}
939
940/*
941 * Check if MSR is intercepted for currently loaded MSR bitmap.
942 */
943static bool msr_write_intercepted(struct vcpu_vmx *vmx, u32 msr)
944{
945	if (!(exec_controls_get(vmx) & CPU_BASED_USE_MSR_BITMAPS))
946		return true;
947
948	return vmx_test_msr_bitmap_write(vmx->loaded_vmcs->msr_bitmap, msr);
949}
950
951unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx)
952{
953	unsigned int flags = 0;
954
955	if (vmx->loaded_vmcs->launched)
956		flags |= VMX_RUN_VMRESUME;
957
958	/*
959	 * If writes to the SPEC_CTRL MSR aren't intercepted, the guest is free
960	 * to change it directly without causing a vmexit.  In that case read
961	 * it after vmexit and store it in vmx->spec_ctrl.
962	 */
963	if (!msr_write_intercepted(vmx, MSR_IA32_SPEC_CTRL))
964		flags |= VMX_RUN_SAVE_SPEC_CTRL;
965
966	return flags;
967}
968
969static __always_inline void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
970		unsigned long entry, unsigned long exit)
971{
972	vm_entry_controls_clearbit(vmx, entry);
973	vm_exit_controls_clearbit(vmx, exit);
974}
975
976int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr)
977{
978	unsigned int i;
979
980	for (i = 0; i < m->nr; ++i) {
981		if (m->val[i].index == msr)
982			return i;
983	}
984	return -ENOENT;
985}
986
987static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
988{
989	int i;
990	struct msr_autoload *m = &vmx->msr_autoload;
991
992	switch (msr) {
993	case MSR_EFER:
994		if (cpu_has_load_ia32_efer()) {
995			clear_atomic_switch_msr_special(vmx,
996					VM_ENTRY_LOAD_IA32_EFER,
997					VM_EXIT_LOAD_IA32_EFER);
998			return;
999		}
1000		break;
1001	case MSR_CORE_PERF_GLOBAL_CTRL:
1002		if (cpu_has_load_perf_global_ctrl()) {
1003			clear_atomic_switch_msr_special(vmx,
1004					VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
1005					VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
1006			return;
1007		}
1008		break;
1009	}
1010	i = vmx_find_loadstore_msr_slot(&m->guest, msr);
1011	if (i < 0)
1012		goto skip_guest;
1013	--m->guest.nr;
1014	m->guest.val[i] = m->guest.val[m->guest.nr];
1015	vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
1016
1017skip_guest:
1018	i = vmx_find_loadstore_msr_slot(&m->host, msr);
1019	if (i < 0)
1020		return;
1021
1022	--m->host.nr;
1023	m->host.val[i] = m->host.val[m->host.nr];
1024	vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
1025}
1026
1027static __always_inline void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
1028		unsigned long entry, unsigned long exit,
1029		unsigned long guest_val_vmcs, unsigned long host_val_vmcs,
1030		u64 guest_val, u64 host_val)
1031{
1032	vmcs_write64(guest_val_vmcs, guest_val);
1033	if (host_val_vmcs != HOST_IA32_EFER)
1034		vmcs_write64(host_val_vmcs, host_val);
1035	vm_entry_controls_setbit(vmx, entry);
1036	vm_exit_controls_setbit(vmx, exit);
1037}
1038
1039static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
1040				  u64 guest_val, u64 host_val, bool entry_only)
1041{
1042	int i, j = 0;
1043	struct msr_autoload *m = &vmx->msr_autoload;
1044
1045	switch (msr) {
1046	case MSR_EFER:
1047		if (cpu_has_load_ia32_efer()) {
1048			add_atomic_switch_msr_special(vmx,
1049					VM_ENTRY_LOAD_IA32_EFER,
1050					VM_EXIT_LOAD_IA32_EFER,
1051					GUEST_IA32_EFER,
1052					HOST_IA32_EFER,
1053					guest_val, host_val);
1054			return;
1055		}
1056		break;
1057	case MSR_CORE_PERF_GLOBAL_CTRL:
1058		if (cpu_has_load_perf_global_ctrl()) {
1059			add_atomic_switch_msr_special(vmx,
1060					VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
1061					VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
1062					GUEST_IA32_PERF_GLOBAL_CTRL,
1063					HOST_IA32_PERF_GLOBAL_CTRL,
1064					guest_val, host_val);
1065			return;
1066		}
1067		break;
1068	case MSR_IA32_PEBS_ENABLE:
1069		/* PEBS needs a quiescent period after being disabled (to write
1070		 * a record).  Disabling PEBS through VMX MSR swapping doesn't
1071		 * provide that period, so a CPU could write host's record into
1072		 * guest's memory.
1073		 */
1074		wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
1075	}
1076
1077	i = vmx_find_loadstore_msr_slot(&m->guest, msr);
1078	if (!entry_only)
1079		j = vmx_find_loadstore_msr_slot(&m->host, msr);
1080
1081	if ((i < 0 && m->guest.nr == MAX_NR_LOADSTORE_MSRS) ||
1082	    (j < 0 &&  m->host.nr == MAX_NR_LOADSTORE_MSRS)) {
1083		printk_once(KERN_WARNING "Not enough msr switch entries. "
1084				"Can't add msr %x\n", msr);
1085		return;
1086	}
1087	if (i < 0) {
1088		i = m->guest.nr++;
1089		vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
1090	}
1091	m->guest.val[i].index = msr;
1092	m->guest.val[i].value = guest_val;
1093
1094	if (entry_only)
1095		return;
1096
1097	if (j < 0) {
1098		j = m->host.nr++;
1099		vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
1100	}
1101	m->host.val[j].index = msr;
1102	m->host.val[j].value = host_val;
1103}
1104
1105static bool update_transition_efer(struct vcpu_vmx *vmx)
1106{
1107	u64 guest_efer = vmx->vcpu.arch.efer;
1108	u64 ignore_bits = 0;
1109	int i;
1110
1111	/* Shadow paging assumes NX to be available.  */
1112	if (!enable_ept)
1113		guest_efer |= EFER_NX;
1114
1115	/*
1116	 * LMA and LME handled by hardware; SCE meaningless outside long mode.
1117	 */
1118	ignore_bits |= EFER_SCE;
1119#ifdef CONFIG_X86_64
1120	ignore_bits |= EFER_LMA | EFER_LME;
1121	/* SCE is meaningful only in long mode on Intel */
1122	if (guest_efer & EFER_LMA)
1123		ignore_bits &= ~(u64)EFER_SCE;
1124#endif
1125
1126	/*
1127	 * On EPT, we can't emulate NX, so we must switch EFER atomically.
1128	 * On CPUs that support "load IA32_EFER", always switch EFER
1129	 * atomically, since it's faster than switching it manually.
1130	 */
1131	if (cpu_has_load_ia32_efer() ||
1132	    (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) {
1133		if (!(guest_efer & EFER_LMA))
1134			guest_efer &= ~EFER_LME;
1135		if (guest_efer != host_efer)
1136			add_atomic_switch_msr(vmx, MSR_EFER,
1137					      guest_efer, host_efer, false);
1138		else
1139			clear_atomic_switch_msr(vmx, MSR_EFER);
1140		return false;
1141	}
1142
1143	i = kvm_find_user_return_msr(MSR_EFER);
1144	if (i < 0)
1145		return false;
1146
1147	clear_atomic_switch_msr(vmx, MSR_EFER);
1148
1149	guest_efer &= ~ignore_bits;
1150	guest_efer |= host_efer & ignore_bits;
1151
1152	vmx->guest_uret_msrs[i].data = guest_efer;
1153	vmx->guest_uret_msrs[i].mask = ~ignore_bits;
1154
1155	return true;
1156}
1157
1158#ifdef CONFIG_X86_32
1159/*
1160 * On 32-bit kernels, VM exits still load the FS and GS bases from the
1161 * VMCS rather than the segment table.  KVM uses this helper to figure
1162 * out the current bases to poke them into the VMCS before entry.
1163 */
1164static unsigned long segment_base(u16 selector)
1165{
1166	struct desc_struct *table;
1167	unsigned long v;
1168
1169	if (!(selector & ~SEGMENT_RPL_MASK))
1170		return 0;
1171
1172	table = get_current_gdt_ro();
1173
1174	if ((selector & SEGMENT_TI_MASK) == SEGMENT_LDT) {
1175		u16 ldt_selector = kvm_read_ldt();
1176
1177		if (!(ldt_selector & ~SEGMENT_RPL_MASK))
1178			return 0;
1179
1180		table = (struct desc_struct *)segment_base(ldt_selector);
1181	}
1182	v = get_desc_base(&table[selector >> 3]);
1183	return v;
1184}
1185#endif
1186
1187static inline bool pt_can_write_msr(struct vcpu_vmx *vmx)
1188{
1189	return vmx_pt_mode_is_host_guest() &&
1190	       !(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN);
1191}
1192
1193static inline bool pt_output_base_valid(struct kvm_vcpu *vcpu, u64 base)
1194{
1195	/* The base must be 128-byte aligned and a legal physical address. */
1196	return kvm_vcpu_is_legal_aligned_gpa(vcpu, base, 128);
1197}
1198
1199static inline void pt_load_msr(struct pt_ctx *ctx, u32 addr_range)
1200{
1201	u32 i;
1202
1203	wrmsrl(MSR_IA32_RTIT_STATUS, ctx->status);
1204	wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base);
1205	wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask);
1206	wrmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match);
1207	for (i = 0; i < addr_range; i++) {
1208		wrmsrl(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->addr_a[i]);
1209		wrmsrl(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->addr_b[i]);
1210	}
1211}
1212
1213static inline void pt_save_msr(struct pt_ctx *ctx, u32 addr_range)
1214{
1215	u32 i;
1216
1217	rdmsrl(MSR_IA32_RTIT_STATUS, ctx->status);
1218	rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base);
1219	rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask);
1220	rdmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match);
1221	for (i = 0; i < addr_range; i++) {
1222		rdmsrl(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->addr_a[i]);
1223		rdmsrl(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->addr_b[i]);
1224	}
1225}
1226
1227static void pt_guest_enter(struct vcpu_vmx *vmx)
1228{
1229	if (vmx_pt_mode_is_system())
1230		return;
1231
1232	/*
1233	 * GUEST_IA32_RTIT_CTL is already set in the VMCS.
1234	 * Save host state before VM entry.
1235	 */
1236	rdmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl);
1237	if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) {
1238		wrmsrl(MSR_IA32_RTIT_CTL, 0);
1239		pt_save_msr(&vmx->pt_desc.host, vmx->pt_desc.num_address_ranges);
1240		pt_load_msr(&vmx->pt_desc.guest, vmx->pt_desc.num_address_ranges);
1241	}
1242}
1243
1244static void pt_guest_exit(struct vcpu_vmx *vmx)
1245{
1246	if (vmx_pt_mode_is_system())
1247		return;
1248
1249	if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) {
1250		pt_save_msr(&vmx->pt_desc.guest, vmx->pt_desc.num_address_ranges);
1251		pt_load_msr(&vmx->pt_desc.host, vmx->pt_desc.num_address_ranges);
1252	}
1253
1254	/*
1255	 * KVM requires VM_EXIT_CLEAR_IA32_RTIT_CTL to expose PT to the guest,
1256	 * i.e. RTIT_CTL is always cleared on VM-Exit.  Restore it if necessary.
1257	 */
1258	if (vmx->pt_desc.host.ctl)
1259		wrmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl);
1260}
1261
1262void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel,
1263			unsigned long fs_base, unsigned long gs_base)
1264{
1265	if (unlikely(fs_sel != host->fs_sel)) {
1266		if (!(fs_sel & 7))
1267			vmcs_write16(HOST_FS_SELECTOR, fs_sel);
1268		else
1269			vmcs_write16(HOST_FS_SELECTOR, 0);
1270		host->fs_sel = fs_sel;
1271	}
1272	if (unlikely(gs_sel != host->gs_sel)) {
1273		if (!(gs_sel & 7))
1274			vmcs_write16(HOST_GS_SELECTOR, gs_sel);
1275		else
1276			vmcs_write16(HOST_GS_SELECTOR, 0);
1277		host->gs_sel = gs_sel;
1278	}
1279	if (unlikely(fs_base != host->fs_base)) {
1280		vmcs_writel(HOST_FS_BASE, fs_base);
1281		host->fs_base = fs_base;
1282	}
1283	if (unlikely(gs_base != host->gs_base)) {
1284		vmcs_writel(HOST_GS_BASE, gs_base);
1285		host->gs_base = gs_base;
1286	}
1287}
1288
1289void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
1290{
1291	struct vcpu_vmx *vmx = to_vmx(vcpu);
1292	struct vmcs_host_state *host_state;
1293#ifdef CONFIG_X86_64
1294	int cpu = raw_smp_processor_id();
1295#endif
1296	unsigned long fs_base, gs_base;
1297	u16 fs_sel, gs_sel;
1298	int i;
1299
1300	vmx->req_immediate_exit = false;
1301
1302	/*
1303	 * Note that guest MSRs to be saved/restored can also be changed
1304	 * when guest state is loaded. This happens when guest transitions
1305	 * to/from long-mode by setting MSR_EFER.LMA.
1306	 */
1307	if (!vmx->guest_uret_msrs_loaded) {
1308		vmx->guest_uret_msrs_loaded = true;
1309		for (i = 0; i < kvm_nr_uret_msrs; ++i) {
1310			if (!vmx->guest_uret_msrs[i].load_into_hardware)
1311				continue;
1312
1313			kvm_set_user_return_msr(i,
1314						vmx->guest_uret_msrs[i].data,
1315						vmx->guest_uret_msrs[i].mask);
1316		}
1317	}
1318
1319	if (vmx->nested.need_vmcs12_to_shadow_sync)
1320		nested_sync_vmcs12_to_shadow(vcpu);
1321
1322	if (vmx->guest_state_loaded)
1323		return;
1324
1325	host_state = &vmx->loaded_vmcs->host_state;
1326
1327	/*
1328	 * Set host fs and gs selectors.  Unfortunately, 22.2.3 does not
1329	 * allow segment selectors with cpl > 0 or ti == 1.
1330	 */
1331	host_state->ldt_sel = kvm_read_ldt();
1332
1333#ifdef CONFIG_X86_64
1334	savesegment(ds, host_state->ds_sel);
1335	savesegment(es, host_state->es_sel);
1336
1337	gs_base = cpu_kernelmode_gs_base(cpu);
1338	if (likely(is_64bit_mm(current->mm))) {
1339		current_save_fsgs();
1340		fs_sel = current->thread.fsindex;
1341		gs_sel = current->thread.gsindex;
1342		fs_base = current->thread.fsbase;
1343		vmx->msr_host_kernel_gs_base = current->thread.gsbase;
1344	} else {
1345		savesegment(fs, fs_sel);
1346		savesegment(gs, gs_sel);
1347		fs_base = read_msr(MSR_FS_BASE);
1348		vmx->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
1349	}
1350
1351	wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
1352#else
1353	savesegment(fs, fs_sel);
1354	savesegment(gs, gs_sel);
1355	fs_base = segment_base(fs_sel);
1356	gs_base = segment_base(gs_sel);
1357#endif
1358
1359	vmx_set_host_fs_gs(host_state, fs_sel, gs_sel, fs_base, gs_base);
1360	vmx->guest_state_loaded = true;
1361}
1362
1363static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
1364{
1365	struct vmcs_host_state *host_state;
1366
1367	if (!vmx->guest_state_loaded)
1368		return;
1369
1370	host_state = &vmx->loaded_vmcs->host_state;
1371
1372	++vmx->vcpu.stat.host_state_reload;
1373
1374#ifdef CONFIG_X86_64
1375	rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
1376#endif
1377	if (host_state->ldt_sel || (host_state->gs_sel & 7)) {
1378		kvm_load_ldt(host_state->ldt_sel);
1379#ifdef CONFIG_X86_64
1380		load_gs_index(host_state->gs_sel);
1381#else
1382		loadsegment(gs, host_state->gs_sel);
1383#endif
1384	}
1385	if (host_state->fs_sel & 7)
1386		loadsegment(fs, host_state->fs_sel);
1387#ifdef CONFIG_X86_64
1388	if (unlikely(host_state->ds_sel | host_state->es_sel)) {
1389		loadsegment(ds, host_state->ds_sel);
1390		loadsegment(es, host_state->es_sel);
1391	}
1392#endif
1393	invalidate_tss_limit();
1394#ifdef CONFIG_X86_64
1395	wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
1396#endif
1397	load_fixmap_gdt(raw_smp_processor_id());
1398	vmx->guest_state_loaded = false;
1399	vmx->guest_uret_msrs_loaded = false;
1400}
1401
1402#ifdef CONFIG_X86_64
1403static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx)
1404{
1405	preempt_disable();
1406	if (vmx->guest_state_loaded)
1407		rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
1408	preempt_enable();
1409	return vmx->msr_guest_kernel_gs_base;
1410}
1411
1412static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data)
1413{
1414	preempt_disable();
1415	if (vmx->guest_state_loaded)
1416		wrmsrl(MSR_KERNEL_GS_BASE, data);
1417	preempt_enable();
1418	vmx->msr_guest_kernel_gs_base = data;
1419}
1420#endif
1421
1422void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
1423			struct loaded_vmcs *buddy)
1424{
1425	struct vcpu_vmx *vmx = to_vmx(vcpu);
1426	bool already_loaded = vmx->loaded_vmcs->cpu == cpu;
1427	struct vmcs *prev;
1428
1429	if (!already_loaded) {
1430		loaded_vmcs_clear(vmx->loaded_vmcs);
1431		local_irq_disable();
1432
1433		/*
1434		 * Ensure loaded_vmcs->cpu is read before adding loaded_vmcs to
1435		 * this cpu's percpu list, otherwise it may not yet be deleted
1436		 * from its previous cpu's percpu list.  Pairs with the
1437		 * smb_wmb() in __loaded_vmcs_clear().
1438		 */
1439		smp_rmb();
1440
1441		list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
1442			 &per_cpu(loaded_vmcss_on_cpu, cpu));
1443		local_irq_enable();
1444	}
1445
1446	prev = per_cpu(current_vmcs, cpu);
1447	if (prev != vmx->loaded_vmcs->vmcs) {
1448		per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
1449		vmcs_load(vmx->loaded_vmcs->vmcs);
1450
1451		/*
1452		 * No indirect branch prediction barrier needed when switching
1453		 * the active VMCS within a vCPU, unless IBRS is advertised to
1454		 * the vCPU.  To minimize the number of IBPBs executed, KVM
1455		 * performs IBPB on nested VM-Exit (a single nested transition
1456		 * may switch the active VMCS multiple times).
1457		 */
1458		if (!buddy || WARN_ON_ONCE(buddy->vmcs != prev))
1459			indirect_branch_prediction_barrier();
1460	}
1461
1462	if (!already_loaded) {
1463		void *gdt = get_current_gdt_ro();
1464
1465		/*
1466		 * Flush all EPTP/VPID contexts, the new pCPU may have stale
1467		 * TLB entries from its previous association with the vCPU.
1468		 */
1469		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1470
1471		/*
1472		 * Linux uses per-cpu TSS and GDT, so set these when switching
1473		 * processors.  See 22.2.4.
1474		 */
1475		vmcs_writel(HOST_TR_BASE,
1476			    (unsigned long)&get_cpu_entry_area(cpu)->tss.x86_tss);
1477		vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt);   /* 22.2.4 */
1478
1479		if (IS_ENABLED(CONFIG_IA32_EMULATION) || IS_ENABLED(CONFIG_X86_32)) {
1480			/* 22.2.3 */
1481			vmcs_writel(HOST_IA32_SYSENTER_ESP,
1482				    (unsigned long)(cpu_entry_stack(cpu) + 1));
1483		}
1484
1485		vmx->loaded_vmcs->cpu = cpu;
1486	}
1487}
1488
1489/*
1490 * Switches to specified vcpu, until a matching vcpu_put(), but assumes
1491 * vcpu mutex is already taken.
1492 */
1493static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1494{
1495	struct vcpu_vmx *vmx = to_vmx(vcpu);
1496
1497	vmx_vcpu_load_vmcs(vcpu, cpu, NULL);
1498
1499	vmx_vcpu_pi_load(vcpu, cpu);
1500
1501	vmx->host_debugctlmsr = get_debugctlmsr();
1502}
1503
1504static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
1505{
1506	vmx_vcpu_pi_put(vcpu);
1507
1508	vmx_prepare_switch_to_host(to_vmx(vcpu));
1509}
1510
1511bool vmx_emulation_required(struct kvm_vcpu *vcpu)
1512{
1513	return emulate_invalid_guest_state && !vmx_guest_state_valid(vcpu);
1514}
1515
1516unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
1517{
1518	struct vcpu_vmx *vmx = to_vmx(vcpu);
1519	unsigned long rflags, save_rflags;
1520
1521	if (!kvm_register_is_available(vcpu, VCPU_EXREG_RFLAGS)) {
1522		kvm_register_mark_available(vcpu, VCPU_EXREG_RFLAGS);
1523		rflags = vmcs_readl(GUEST_RFLAGS);
1524		if (vmx->rmode.vm86_active) {
1525			rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
1526			save_rflags = vmx->rmode.save_rflags;
1527			rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
1528		}
1529		vmx->rflags = rflags;
1530	}
1531	return vmx->rflags;
1532}
1533
1534void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
1535{
1536	struct vcpu_vmx *vmx = to_vmx(vcpu);
1537	unsigned long old_rflags;
1538
1539	/*
1540	 * Unlike CR0 and CR4, RFLAGS handling requires checking if the vCPU
1541	 * is an unrestricted guest in order to mark L2 as needing emulation
1542	 * if L1 runs L2 as a restricted guest.
1543	 */
1544	if (is_unrestricted_guest(vcpu)) {
1545		kvm_register_mark_available(vcpu, VCPU_EXREG_RFLAGS);
1546		vmx->rflags = rflags;
1547		vmcs_writel(GUEST_RFLAGS, rflags);
1548		return;
1549	}
1550
1551	old_rflags = vmx_get_rflags(vcpu);
1552	vmx->rflags = rflags;
1553	if (vmx->rmode.vm86_active) {
1554		vmx->rmode.save_rflags = rflags;
1555		rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
1556	}
1557	vmcs_writel(GUEST_RFLAGS, rflags);
1558
1559	if ((old_rflags ^ vmx->rflags) & X86_EFLAGS_VM)
1560		vmx->emulation_required = vmx_emulation_required(vcpu);
1561}
1562
1563static bool vmx_get_if_flag(struct kvm_vcpu *vcpu)
1564{
1565	return vmx_get_rflags(vcpu) & X86_EFLAGS_IF;
1566}
1567
1568u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
1569{
1570	u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
1571	int ret = 0;
1572
1573	if (interruptibility & GUEST_INTR_STATE_STI)
1574		ret |= KVM_X86_SHADOW_INT_STI;
1575	if (interruptibility & GUEST_INTR_STATE_MOV_SS)
1576		ret |= KVM_X86_SHADOW_INT_MOV_SS;
1577
1578	return ret;
1579}
1580
1581void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
1582{
1583	u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
1584	u32 interruptibility = interruptibility_old;
1585
1586	interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
1587
1588	if (mask & KVM_X86_SHADOW_INT_MOV_SS)
1589		interruptibility |= GUEST_INTR_STATE_MOV_SS;
1590	else if (mask & KVM_X86_SHADOW_INT_STI)
1591		interruptibility |= GUEST_INTR_STATE_STI;
1592
1593	if ((interruptibility != interruptibility_old))
1594		vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility);
1595}
1596
1597static int vmx_rtit_ctl_check(struct kvm_vcpu *vcpu, u64 data)
1598{
1599	struct vcpu_vmx *vmx = to_vmx(vcpu);
1600	unsigned long value;
1601
1602	/*
1603	 * Any MSR write that attempts to change bits marked reserved will
1604	 * case a #GP fault.
1605	 */
1606	if (data & vmx->pt_desc.ctl_bitmask)
1607		return 1;
1608
1609	/*
1610	 * Any attempt to modify IA32_RTIT_CTL while TraceEn is set will
1611	 * result in a #GP unless the same write also clears TraceEn.
1612	 */
1613	if ((vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) &&
1614		((vmx->pt_desc.guest.ctl ^ data) & ~RTIT_CTL_TRACEEN))
1615		return 1;
1616
1617	/*
1618	 * WRMSR to IA32_RTIT_CTL that sets TraceEn but clears this bit
1619	 * and FabricEn would cause #GP, if
1620	 * CPUID.(EAX=14H, ECX=0):ECX.SNGLRGNOUT[bit 2] = 0
1621	 */
1622	if ((data & RTIT_CTL_TRACEEN) && !(data & RTIT_CTL_TOPA) &&
1623		!(data & RTIT_CTL_FABRIC_EN) &&
1624		!intel_pt_validate_cap(vmx->pt_desc.caps,
1625					PT_CAP_single_range_output))
1626		return 1;
1627
1628	/*
1629	 * MTCFreq, CycThresh and PSBFreq encodings check, any MSR write that
1630	 * utilize encodings marked reserved will cause a #GP fault.
1631	 */
1632	value = intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc_periods);
1633	if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc) &&
1634			!test_bit((data & RTIT_CTL_MTC_RANGE) >>
1635			RTIT_CTL_MTC_RANGE_OFFSET, &value))
1636		return 1;
1637	value = intel_pt_validate_cap(vmx->pt_desc.caps,
1638						PT_CAP_cycle_thresholds);
1639	if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc) &&
1640			!test_bit((data & RTIT_CTL_CYC_THRESH) >>
1641			RTIT_CTL_CYC_THRESH_OFFSET, &value))
1642		return 1;
1643	value = intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_periods);
1644	if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc) &&
1645			!test_bit((data & RTIT_CTL_PSB_FREQ) >>
1646			RTIT_CTL_PSB_FREQ_OFFSET, &value))
1647		return 1;
1648
1649	/*
1650	 * If ADDRx_CFG is reserved or the encodings is >2 will
1651	 * cause a #GP fault.
1652	 */
1653	value = (data & RTIT_CTL_ADDR0) >> RTIT_CTL_ADDR0_OFFSET;
1654	if ((value && (vmx->pt_desc.num_address_ranges < 1)) || (value > 2))
1655		return 1;
1656	value = (data & RTIT_CTL_ADDR1) >> RTIT_CTL_ADDR1_OFFSET;
1657	if ((value && (vmx->pt_desc.num_address_ranges < 2)) || (value > 2))
1658		return 1;
1659	value = (data & RTIT_CTL_ADDR2) >> RTIT_CTL_ADDR2_OFFSET;
1660	if ((value && (vmx->pt_desc.num_address_ranges < 3)) || (value > 2))
1661		return 1;
1662	value = (data & RTIT_CTL_ADDR3) >> RTIT_CTL_ADDR3_OFFSET;
1663	if ((value && (vmx->pt_desc.num_address_ranges < 4)) || (value > 2))
1664		return 1;
1665
1666	return 0;
1667}
1668
1669static bool vmx_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
1670					void *insn, int insn_len)
1671{
1672	/*
1673	 * Emulation of instructions in SGX enclaves is impossible as RIP does
1674	 * not point at the failing instruction, and even if it did, the code
1675	 * stream is inaccessible.  Inject #UD instead of exiting to userspace
1676	 * so that guest userspace can't DoS the guest simply by triggering
1677	 * emulation (enclaves are CPL3 only).
1678	 */
1679	if (to_vmx(vcpu)->exit_reason.enclave_mode) {
1680		kvm_queue_exception(vcpu, UD_VECTOR);
1681		return false;
1682	}
1683	return true;
1684}
1685
1686static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
1687{
1688	union vmx_exit_reason exit_reason = to_vmx(vcpu)->exit_reason;
1689	unsigned long rip, orig_rip;
1690	u32 instr_len;
1691
1692	/*
1693	 * Using VMCS.VM_EXIT_INSTRUCTION_LEN on EPT misconfig depends on
1694	 * undefined behavior: Intel's SDM doesn't mandate the VMCS field be
1695	 * set when EPT misconfig occurs.  In practice, real hardware updates
1696	 * VM_EXIT_INSTRUCTION_LEN on EPT misconfig, but other hypervisors
1697	 * (namely Hyper-V) don't set it due to it being undefined behavior,
1698	 * i.e. we end up advancing IP with some random value.
1699	 */
1700	if (!static_cpu_has(X86_FEATURE_HYPERVISOR) ||
1701	    exit_reason.basic != EXIT_REASON_EPT_MISCONFIG) {
1702		instr_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
1703
1704		/*
1705		 * Emulating an enclave's instructions isn't supported as KVM
1706		 * cannot access the enclave's memory or its true RIP, e.g. the
1707		 * vmcs.GUEST_RIP points at the exit point of the enclave, not
1708		 * the RIP that actually triggered the VM-Exit.  But, because
1709		 * most instructions that cause VM-Exit will #UD in an enclave,
1710		 * most instruction-based VM-Exits simply do not occur.
1711		 *
1712		 * There are a few exceptions, notably the debug instructions
1713		 * INT1ICEBRK and INT3, as they are allowed in debug enclaves
1714		 * and generate #DB/#BP as expected, which KVM might intercept.
1715		 * But again, the CPU does the dirty work and saves an instr
1716		 * length of zero so VMMs don't shoot themselves in the foot.
1717		 * WARN if KVM tries to skip a non-zero length instruction on
1718		 * a VM-Exit from an enclave.
1719		 */
1720		if (!instr_len)
1721			goto rip_updated;
1722
1723		WARN_ONCE(exit_reason.enclave_mode,
1724			  "skipping instruction after SGX enclave VM-Exit");
1725
1726		orig_rip = kvm_rip_read(vcpu);
1727		rip = orig_rip + instr_len;
1728#ifdef CONFIG_X86_64
1729		/*
1730		 * We need to mask out the high 32 bits of RIP if not in 64-bit
1731		 * mode, but just finding out that we are in 64-bit mode is
1732		 * quite expensive.  Only do it if there was a carry.
1733		 */
1734		if (unlikely(((rip ^ orig_rip) >> 31) == 3) && !is_64_bit_mode(vcpu))
1735			rip = (u32)rip;
1736#endif
1737		kvm_rip_write(vcpu, rip);
1738	} else {
1739		if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP))
1740			return 0;
1741	}
1742
1743rip_updated:
1744	/* skipping an emulated instruction also counts */
1745	vmx_set_interrupt_shadow(vcpu, 0);
1746
1747	return 1;
1748}
1749
1750/*
1751 * Recognizes a pending MTF VM-exit and records the nested state for later
1752 * delivery.
1753 */
1754static void vmx_update_emulated_instruction(struct kvm_vcpu *vcpu)
1755{
1756	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1757	struct vcpu_vmx *vmx = to_vmx(vcpu);
1758
1759	if (!is_guest_mode(vcpu))
1760		return;
1761
1762	/*
1763	 * Per the SDM, MTF takes priority over debug-trap exceptions besides
1764	 * TSS T-bit traps and ICEBP (INT1).  KVM doesn't emulate T-bit traps
1765	 * or ICEBP (in the emulator proper), and skipping of ICEBP after an
1766	 * intercepted #DB deliberately avoids single-step #DB and MTF updates
1767	 * as ICEBP is higher priority than both.  As instruction emulation is
1768	 * completed at this point (i.e. KVM is at the instruction boundary),
1769	 * any #DB exception pending delivery must be a debug-trap of lower
1770	 * priority than MTF.  Record the pending MTF state to be delivered in
1771	 * vmx_check_nested_events().
1772	 */
1773	if (nested_cpu_has_mtf(vmcs12) &&
1774	    (!vcpu->arch.exception.pending ||
1775	     vcpu->arch.exception.vector == DB_VECTOR) &&
1776	    (!vcpu->arch.exception_vmexit.pending ||
1777	     vcpu->arch.exception_vmexit.vector == DB_VECTOR)) {
1778		vmx->nested.mtf_pending = true;
1779		kvm_make_request(KVM_REQ_EVENT, vcpu);
1780	} else {
1781		vmx->nested.mtf_pending = false;
1782	}
1783}
1784
1785static int vmx_skip_emulated_instruction(struct kvm_vcpu *vcpu)
1786{
1787	vmx_update_emulated_instruction(vcpu);
1788	return skip_emulated_instruction(vcpu);
1789}
1790
1791static void vmx_clear_hlt(struct kvm_vcpu *vcpu)
1792{
1793	/*
1794	 * Ensure that we clear the HLT state in the VMCS.  We don't need to
1795	 * explicitly skip the instruction because if the HLT state is set,
1796	 * then the instruction is already executing and RIP has already been
1797	 * advanced.
1798	 */
1799	if (kvm_hlt_in_guest(vcpu->kvm) &&
1800			vmcs_read32(GUEST_ACTIVITY_STATE) == GUEST_ACTIVITY_HLT)
1801		vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
1802}
1803
1804static void vmx_inject_exception(struct kvm_vcpu *vcpu)
1805{
1806	struct kvm_queued_exception *ex = &vcpu->arch.exception;
1807	u32 intr_info = ex->vector | INTR_INFO_VALID_MASK;
1808	struct vcpu_vmx *vmx = to_vmx(vcpu);
1809
1810	kvm_deliver_exception_payload(vcpu, ex);
1811
1812	if (ex->has_error_code) {
1813		/*
1814		 * Despite the error code being architecturally defined as 32
1815		 * bits, and the VMCS field being 32 bits, Intel CPUs and thus
1816		 * VMX don't actually supporting setting bits 31:16.  Hardware
1817		 * will (should) never provide a bogus error code, but AMD CPUs
1818		 * do generate error codes with bits 31:16 set, and so KVM's
1819		 * ABI lets userspace shove in arbitrary 32-bit values.  Drop
1820		 * the upper bits to avoid VM-Fail, losing information that
1821		 * does't really exist is preferable to killing the VM.
1822		 */
1823		vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, (u16)ex->error_code);
1824		intr_info |= INTR_INFO_DELIVER_CODE_MASK;
1825	}
1826
1827	if (vmx->rmode.vm86_active) {
1828		int inc_eip = 0;
1829		if (kvm_exception_is_soft(ex->vector))
1830			inc_eip = vcpu->arch.event_exit_inst_len;
1831		kvm_inject_realmode_interrupt(vcpu, ex->vector, inc_eip);
1832		return;
1833	}
1834
1835	WARN_ON_ONCE(vmx->emulation_required);
1836
1837	if (kvm_exception_is_soft(ex->vector)) {
1838		vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
1839			     vmx->vcpu.arch.event_exit_inst_len);
1840		intr_info |= INTR_TYPE_SOFT_EXCEPTION;
1841	} else
1842		intr_info |= INTR_TYPE_HARD_EXCEPTION;
1843
1844	vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
1845
1846	vmx_clear_hlt(vcpu);
1847}
1848
1849static void vmx_setup_uret_msr(struct vcpu_vmx *vmx, unsigned int msr,
1850			       bool load_into_hardware)
1851{
1852	struct vmx_uret_msr *uret_msr;
1853
1854	uret_msr = vmx_find_uret_msr(vmx, msr);
1855	if (!uret_msr)
1856		return;
1857
1858	uret_msr->load_into_hardware = load_into_hardware;
1859}
1860
1861/*
1862 * Configuring user return MSRs to automatically save, load, and restore MSRs
1863 * that need to be shoved into hardware when running the guest.  Note, omitting
1864 * an MSR here does _NOT_ mean it's not emulated, only that it will not be
1865 * loaded into hardware when running the guest.
1866 */
1867static void vmx_setup_uret_msrs(struct vcpu_vmx *vmx)
1868{
1869#ifdef CONFIG_X86_64
1870	bool load_syscall_msrs;
1871
1872	/*
1873	 * The SYSCALL MSRs are only needed on long mode guests, and only
1874	 * when EFER.SCE is set.
1875	 */
1876	load_syscall_msrs = is_long_mode(&vmx->vcpu) &&
1877			    (vmx->vcpu.arch.efer & EFER_SCE);
1878
1879	vmx_setup_uret_msr(vmx, MSR_STAR, load_syscall_msrs);
1880	vmx_setup_uret_msr(vmx, MSR_LSTAR, load_syscall_msrs);
1881	vmx_setup_uret_msr(vmx, MSR_SYSCALL_MASK, load_syscall_msrs);
1882#endif
1883	vmx_setup_uret_msr(vmx, MSR_EFER, update_transition_efer(vmx));
1884
1885	vmx_setup_uret_msr(vmx, MSR_TSC_AUX,
1886			   guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP) ||
1887			   guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDPID));
1888
1889	/*
1890	 * hle=0, rtm=0, tsx_ctrl=1 can be found with some combinations of new
1891	 * kernel and old userspace.  If those guests run on a tsx=off host, do
1892	 * allow guests to use TSX_CTRL, but don't change the value in hardware
1893	 * so that TSX remains always disabled.
1894	 */
1895	vmx_setup_uret_msr(vmx, MSR_IA32_TSX_CTRL, boot_cpu_has(X86_FEATURE_RTM));
1896
1897	/*
1898	 * The set of MSRs to load may have changed, reload MSRs before the
1899	 * next VM-Enter.
1900	 */
1901	vmx->guest_uret_msrs_loaded = false;
1902}
1903
1904u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu)
1905{
1906	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1907
1908	if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETTING))
1909		return vmcs12->tsc_offset;
1910
1911	return 0;
1912}
1913
1914u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu)
1915{
1916	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1917
1918	if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETTING) &&
1919	    nested_cpu_has2(vmcs12, SECONDARY_EXEC_TSC_SCALING))
1920		return vmcs12->tsc_multiplier;
1921
1922	return kvm_caps.default_tsc_scaling_ratio;
1923}
1924
1925static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu)
1926{
1927	vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
1928}
1929
1930static void vmx_write_tsc_multiplier(struct kvm_vcpu *vcpu)
1931{
1932	vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio);
1933}
1934
1935/*
1936 * Userspace is allowed to set any supported IA32_FEATURE_CONTROL regardless of
1937 * guest CPUID.  Note, KVM allows userspace to set "VMX in SMX" to maintain
1938 * backwards compatibility even though KVM doesn't support emulating SMX.  And
1939 * because userspace set "VMX in SMX", the guest must also be allowed to set it,
1940 * e.g. if the MSR is left unlocked and the guest does a RMW operation.
1941 */
1942#define KVM_SUPPORTED_FEATURE_CONTROL  (FEAT_CTL_LOCKED			 | \
1943					FEAT_CTL_VMX_ENABLED_INSIDE_SMX	 | \
1944					FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX | \
1945					FEAT_CTL_SGX_LC_ENABLED		 | \
1946					FEAT_CTL_SGX_ENABLED		 | \
1947					FEAT_CTL_LMCE_ENABLED)
1948
1949static inline bool is_vmx_feature_control_msr_valid(struct vcpu_vmx *vmx,
1950						    struct msr_data *msr)
1951{
1952	uint64_t valid_bits;
1953
1954	/*
1955	 * Ensure KVM_SUPPORTED_FEATURE_CONTROL is updated when new bits are
1956	 * exposed to the guest.
1957	 */
1958	WARN_ON_ONCE(vmx->msr_ia32_feature_control_valid_bits &
1959		     ~KVM_SUPPORTED_FEATURE_CONTROL);
1960
1961	if (!msr->host_initiated &&
1962	    (vmx->msr_ia32_feature_control & FEAT_CTL_LOCKED))
1963		return false;
1964
1965	if (msr->host_initiated)
1966		valid_bits = KVM_SUPPORTED_FEATURE_CONTROL;
1967	else
1968		valid_bits = vmx->msr_ia32_feature_control_valid_bits;
1969
1970	return !(msr->data & ~valid_bits);
1971}
1972
1973static int vmx_get_msr_feature(struct kvm_msr_entry *msr)
1974{
1975	switch (msr->index) {
1976	case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR:
1977		if (!nested)
1978			return 1;
1979		return vmx_get_vmx_msr(&vmcs_config.nested, msr->index, &msr->data);
1980	default:
1981		return KVM_MSR_RET_INVALID;
1982	}
1983}
1984
1985/*
1986 * Reads an msr value (of 'msr_info->index') into 'msr_info->data'.
1987 * Returns 0 on success, non-0 otherwise.
1988 * Assumes vcpu_load() was already called.
1989 */
1990static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1991{
1992	struct vcpu_vmx *vmx = to_vmx(vcpu);
1993	struct vmx_uret_msr *msr;
1994	u32 index;
1995
1996	switch (msr_info->index) {
1997#ifdef CONFIG_X86_64
1998	case MSR_FS_BASE:
1999		msr_info->data = vmcs_readl(GUEST_FS_BASE);
2000		break;
2001	case MSR_GS_BASE:
2002		msr_info->data = vmcs_readl(GUEST_GS_BASE);
2003		break;
2004	case MSR_KERNEL_GS_BASE:
2005		msr_info->data = vmx_read_guest_kernel_gs_base(vmx);
2006		break;
2007#endif
2008	case MSR_EFER:
2009		return kvm_get_msr_common(vcpu, msr_info);
2010	case MSR_IA32_TSX_CTRL:
2011		if (!msr_info->host_initiated &&
2012		    !(vcpu->arch.arch_capabilities & ARCH_CAP_TSX_CTRL_MSR))
2013			return 1;
2014		goto find_uret_msr;
2015	case MSR_IA32_UMWAIT_CONTROL:
2016		if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx))
2017			return 1;
2018
2019		msr_info->data = vmx->msr_ia32_umwait_control;
2020		break;
2021	case MSR_IA32_SPEC_CTRL:
2022		if (!msr_info->host_initiated &&
2023		    !guest_has_spec_ctrl_msr(vcpu))
2024			return 1;
2025
2026		msr_info->data = to_vmx(vcpu)->spec_ctrl;
2027		break;
2028	case MSR_IA32_SYSENTER_CS:
2029		msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
2030		break;
2031	case MSR_IA32_SYSENTER_EIP:
2032		msr_info->data = vmcs_readl(GUEST_SYSENTER_EIP);
2033		break;
2034	case MSR_IA32_SYSENTER_ESP:
2035		msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP);
2036		break;
2037	case MSR_IA32_BNDCFGS:
2038		if (!kvm_mpx_supported() ||
2039		    (!msr_info->host_initiated &&
2040		     !guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
2041			return 1;
2042		msr_info->data = vmcs_read64(GUEST_BNDCFGS);
2043		break;
2044	case MSR_IA32_MCG_EXT_CTL:
2045		if (!msr_info->host_initiated &&
2046		    !(vmx->msr_ia32_feature_control &
2047		      FEAT_CTL_LMCE_ENABLED))
2048			return 1;
2049		msr_info->data = vcpu->arch.mcg_ext_ctl;
2050		break;
2051	case MSR_IA32_FEAT_CTL:
2052		msr_info->data = vmx->msr_ia32_feature_control;
2053		break;
2054	case MSR_IA32_SGXLEPUBKEYHASH0 ... MSR_IA32_SGXLEPUBKEYHASH3:
2055		if (!msr_info->host_initiated &&
2056		    !guest_cpuid_has(vcpu, X86_FEATURE_SGX_LC))
2057			return 1;
2058		msr_info->data = to_vmx(vcpu)->msr_ia32_sgxlepubkeyhash
2059			[msr_info->index - MSR_IA32_SGXLEPUBKEYHASH0];
2060		break;
2061	case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR:
2062		if (!guest_can_use(vcpu, X86_FEATURE_VMX))
2063			return 1;
2064		if (vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index,
2065				    &msr_info->data))
2066			return 1;
2067		/*
2068		 * Enlightened VMCS v1 doesn't have certain VMCS fields but
2069		 * instead of just ignoring the features, different Hyper-V
2070		 * versions are either trying to use them and fail or do some
2071		 * sanity checking and refuse to boot. Filter all unsupported
2072		 * features out.
2073		 */
2074		if (!msr_info->host_initiated && guest_cpuid_has_evmcs(vcpu))
2075			nested_evmcs_filter_control_msr(vcpu, msr_info->index,
2076							&msr_info->data);
2077		break;
2078	case MSR_IA32_RTIT_CTL:
2079		if (!vmx_pt_mode_is_host_guest())
2080			return 1;
2081		msr_info->data = vmx->pt_desc.guest.ctl;
2082		break;
2083	case MSR_IA32_RTIT_STATUS:
2084		if (!vmx_pt_mode_is_host_guest())
2085			return 1;
2086		msr_info->data = vmx->pt_desc.guest.status;
2087		break;
2088	case MSR_IA32_RTIT_CR3_MATCH:
2089		if (!vmx_pt_mode_is_host_guest() ||
2090			!intel_pt_validate_cap(vmx->pt_desc.caps,
2091						PT_CAP_cr3_filtering))
2092			return 1;
2093		msr_info->data = vmx->pt_desc.guest.cr3_match;
2094		break;
2095	case MSR_IA32_RTIT_OUTPUT_BASE:
2096		if (!vmx_pt_mode_is_host_guest() ||
2097			(!intel_pt_validate_cap(vmx->pt_desc.caps,
2098					PT_CAP_topa_output) &&
2099			 !intel_pt_validate_cap(vmx->pt_desc.caps,
2100					PT_CAP_single_range_output)))
2101			return 1;
2102		msr_info->data = vmx->pt_desc.guest.output_base;
2103		break;
2104	case MSR_IA32_RTIT_OUTPUT_MASK:
2105		if (!vmx_pt_mode_is_host_guest() ||
2106			(!intel_pt_validate_cap(vmx->pt_desc.caps,
2107					PT_CAP_topa_output) &&
2108			 !intel_pt_validate_cap(vmx->pt_desc.caps,
2109					PT_CAP_single_range_output)))
2110			return 1;
2111		msr_info->data = vmx->pt_desc.guest.output_mask;
2112		break;
2113	case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
2114		index = msr_info->index - MSR_IA32_RTIT_ADDR0_A;
2115		if (!vmx_pt_mode_is_host_guest() ||
2116		    (index >= 2 * vmx->pt_desc.num_address_ranges))
2117			return 1;
2118		if (index % 2)
2119			msr_info->data = vmx->pt_desc.guest.addr_b[index / 2];
2120		else
2121			msr_info->data = vmx->pt_desc.guest.addr_a[index / 2];
2122		break;
2123	case MSR_IA32_DEBUGCTLMSR:
2124		msr_info->data = vmcs_read64(GUEST_IA32_DEBUGCTL);
2125		break;
2126	default:
2127	find_uret_msr:
2128		msr = vmx_find_uret_msr(vmx, msr_info->index);
2129		if (msr) {
2130			msr_info->data = msr->data;
2131			break;
2132		}
2133		return kvm_get_msr_common(vcpu, msr_info);
2134	}
2135
2136	return 0;
2137}
2138
2139static u64 nested_vmx_truncate_sysenter_addr(struct kvm_vcpu *vcpu,
2140						    u64 data)
2141{
2142#ifdef CONFIG_X86_64
2143	if (!guest_cpuid_has(vcpu, X86_FEATURE_LM))
2144		return (u32)data;
2145#endif
2146	return (unsigned long)data;
2147}
2148
2149static u64 vmx_get_supported_debugctl(struct kvm_vcpu *vcpu, bool host_initiated)
2150{
2151	u64 debugctl = 0;
2152
2153	if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT) &&
2154	    (host_initiated || guest_cpuid_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT)))
2155		debugctl |= DEBUGCTLMSR_BUS_LOCK_DETECT;
2156
2157	if ((kvm_caps.supported_perf_cap & PMU_CAP_LBR_FMT) &&
2158	    (host_initiated || intel_pmu_lbr_is_enabled(vcpu)))
2159		debugctl |= DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI;
2160
2161	return debugctl;
2162}
2163
2164/*
2165 * Writes msr value into the appropriate "register".
2166 * Returns 0 on success, non-0 otherwise.
2167 * Assumes vcpu_load() was already called.
2168 */
2169static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2170{
2171	struct vcpu_vmx *vmx = to_vmx(vcpu);
2172	struct vmx_uret_msr *msr;
2173	int ret = 0;
2174	u32 msr_index = msr_info->index;
2175	u64 data = msr_info->data;
2176	u32 index;
2177
2178	switch (msr_index) {
2179	case MSR_EFER:
2180		ret = kvm_set_msr_common(vcpu, msr_info);
2181		break;
2182#ifdef CONFIG_X86_64
2183	case MSR_FS_BASE:
2184		vmx_segment_cache_clear(vmx);
2185		vmcs_writel(GUEST_FS_BASE, data);
2186		break;
2187	case MSR_GS_BASE:
2188		vmx_segment_cache_clear(vmx);
2189		vmcs_writel(GUEST_GS_BASE, data);
2190		break;
2191	case MSR_KERNEL_GS_BASE:
2192		vmx_write_guest_kernel_gs_base(vmx, data);
2193		break;
2194	case MSR_IA32_XFD:
2195		ret = kvm_set_msr_common(vcpu, msr_info);
2196		/*
2197		 * Always intercepting WRMSR could incur non-negligible
2198		 * overhead given xfd might be changed frequently in
2199		 * guest context switch. Disable write interception
2200		 * upon the first write with a non-zero value (indicating
2201		 * potential usage on dynamic xfeatures). Also update
2202		 * exception bitmap to trap #NM for proper virtualization
2203		 * of guest xfd_err.
2204		 */
2205		if (!ret && data) {
2206			vmx_disable_intercept_for_msr(vcpu, MSR_IA32_XFD,
2207						      MSR_TYPE_RW);
2208			vcpu->arch.xfd_no_write_intercept = true;
2209			vmx_update_exception_bitmap(vcpu);
2210		}
2211		break;
2212#endif
2213	case MSR_IA32_SYSENTER_CS:
2214		if (is_guest_mode(vcpu))
2215			get_vmcs12(vcpu)->guest_sysenter_cs = data;
2216		vmcs_write32(GUEST_SYSENTER_CS, data);
2217		break;
2218	case MSR_IA32_SYSENTER_EIP:
2219		if (is_guest_mode(vcpu)) {
2220			data = nested_vmx_truncate_sysenter_addr(vcpu, data);
2221			get_vmcs12(vcpu)->guest_sysenter_eip = data;
2222		}
2223		vmcs_writel(GUEST_SYSENTER_EIP, data);
2224		break;
2225	case MSR_IA32_SYSENTER_ESP:
2226		if (is_guest_mode(vcpu)) {
2227			data = nested_vmx_truncate_sysenter_addr(vcpu, data);
2228			get_vmcs12(vcpu)->guest_sysenter_esp = data;
2229		}
2230		vmcs_writel(GUEST_SYSENTER_ESP, data);
2231		break;
2232	case MSR_IA32_DEBUGCTLMSR: {
2233		u64 invalid;
2234
2235		invalid = data & ~vmx_get_supported_debugctl(vcpu, msr_info->host_initiated);
2236		if (invalid & (DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR)) {
2237			kvm_pr_unimpl_wrmsr(vcpu, msr_index, data);
2238			data &= ~(DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR);
2239			invalid &= ~(DEBUGCTLMSR_BTF|DEBUGCTLMSR_LBR);
2240		}
2241
2242		if (invalid)
2243			return 1;
2244
2245		if (is_guest_mode(vcpu) && get_vmcs12(vcpu)->vm_exit_controls &
2246						VM_EXIT_SAVE_DEBUG_CONTROLS)
2247			get_vmcs12(vcpu)->guest_ia32_debugctl = data;
2248
2249		vmcs_write64(GUEST_IA32_DEBUGCTL, data);
2250		if (intel_pmu_lbr_is_enabled(vcpu) && !to_vmx(vcpu)->lbr_desc.event &&
2251		    (data & DEBUGCTLMSR_LBR))
2252			intel_pmu_create_guest_lbr_event(vcpu);
2253		return 0;
2254	}
2255	case MSR_IA32_BNDCFGS:
2256		if (!kvm_mpx_supported() ||
2257		    (!msr_info->host_initiated &&
2258		     !guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
2259			return 1;
2260		if (is_noncanonical_address(data & PAGE_MASK, vcpu) ||
2261		    (data & MSR_IA32_BNDCFGS_RSVD))
2262			return 1;
2263
2264		if (is_guest_mode(vcpu) &&
2265		    ((vmx->nested.msrs.entry_ctls_high & VM_ENTRY_LOAD_BNDCFGS) ||
2266		     (vmx->nested.msrs.exit_ctls_high & VM_EXIT_CLEAR_BNDCFGS)))
2267			get_vmcs12(vcpu)->guest_bndcfgs = data;
2268
2269		vmcs_write64(GUEST_BNDCFGS, data);
2270		break;
2271	case MSR_IA32_UMWAIT_CONTROL:
2272		if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx))
2273			return 1;
2274
2275		/* The reserved bit 1 and non-32 bit [63:32] should be zero */
2276		if (data & (BIT_ULL(1) | GENMASK_ULL(63, 32)))
2277			return 1;
2278
2279		vmx->msr_ia32_umwait_control = data;
2280		break;
2281	case MSR_IA32_SPEC_CTRL:
2282		if (!msr_info->host_initiated &&
2283		    !guest_has_spec_ctrl_msr(vcpu))
2284			return 1;
2285
2286		if (kvm_spec_ctrl_test_value(data))
2287			return 1;
2288
2289		vmx->spec_ctrl = data;
2290		if (!data)
2291			break;
2292
2293		/*
2294		 * For non-nested:
2295		 * When it's written (to non-zero) for the first time, pass
2296		 * it through.
2297		 *
2298		 * For nested:
2299		 * The handling of the MSR bitmap for L2 guests is done in
2300		 * nested_vmx_prepare_msr_bitmap. We should not touch the
2301		 * vmcs02.msr_bitmap here since it gets completely overwritten
2302		 * in the merging. We update the vmcs01 here for L1 as well
2303		 * since it will end up touching the MSR anyway now.
2304		 */
2305		vmx_disable_intercept_for_msr(vcpu,
2306					      MSR_IA32_SPEC_CTRL,
2307					      MSR_TYPE_RW);
2308		break;
2309	case MSR_IA32_TSX_CTRL:
2310		if (!msr_info->host_initiated &&
2311		    !(vcpu->arch.arch_capabilities & ARCH_CAP_TSX_CTRL_MSR))
2312			return 1;
2313		if (data & ~(TSX_CTRL_RTM_DISABLE | TSX_CTRL_CPUID_CLEAR))
2314			return 1;
2315		goto find_uret_msr;
2316	case MSR_IA32_CR_PAT:
2317		ret = kvm_set_msr_common(vcpu, msr_info);
2318		if (ret)
2319			break;
2320
2321		if (is_guest_mode(vcpu) &&
2322		    get_vmcs12(vcpu)->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT)
2323			get_vmcs12(vcpu)->guest_ia32_pat = data;
2324
2325		if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
2326			vmcs_write64(GUEST_IA32_PAT, data);
2327		break;
2328	case MSR_IA32_MCG_EXT_CTL:
2329		if ((!msr_info->host_initiated &&
2330		     !(to_vmx(vcpu)->msr_ia32_feature_control &
2331		       FEAT_CTL_LMCE_ENABLED)) ||
2332		    (data & ~MCG_EXT_CTL_LMCE_EN))
2333			return 1;
2334		vcpu->arch.mcg_ext_ctl = data;
2335		break;
2336	case MSR_IA32_FEAT_CTL:
2337		if (!is_vmx_feature_control_msr_valid(vmx, msr_info))
2338			return 1;
2339
2340		vmx->msr_ia32_feature_control = data;
2341		if (msr_info->host_initiated && data == 0)
2342			vmx_leave_nested(vcpu);
2343
2344		/* SGX may be enabled/disabled by guest's firmware */
2345		vmx_write_encls_bitmap(vcpu, NULL);
2346		break;
2347	case MSR_IA32_SGXLEPUBKEYHASH0 ... MSR_IA32_SGXLEPUBKEYHASH3:
2348		/*
2349		 * On real hardware, the LE hash MSRs are writable before
2350		 * the firmware sets bit 0 in MSR 0x7a ("activating" SGX),
2351		 * at which point SGX related bits in IA32_FEATURE_CONTROL
2352		 * become writable.
2353		 *
2354		 * KVM does not emulate SGX activation for simplicity, so
2355		 * allow writes to the LE hash MSRs if IA32_FEATURE_CONTROL
2356		 * is unlocked.  This is technically not architectural
2357		 * behavior, but it's close enough.
2358		 */
2359		if (!msr_info->host_initiated &&
2360		    (!guest_cpuid_has(vcpu, X86_FEATURE_SGX_LC) ||
2361		    ((vmx->msr_ia32_feature_control & FEAT_CTL_LOCKED) &&
2362		    !(vmx->msr_ia32_feature_control & FEAT_CTL_SGX_LC_ENABLED))))
2363			return 1;
2364		vmx->msr_ia32_sgxlepubkeyhash
2365			[msr_index - MSR_IA32_SGXLEPUBKEYHASH0] = data;
2366		break;
2367	case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR:
2368		if (!msr_info->host_initiated)
2369			return 1; /* they are read-only */
2370		if (!guest_can_use(vcpu, X86_FEATURE_VMX))
2371			return 1;
2372		return vmx_set_vmx_msr(vcpu, msr_index, data);
2373	case MSR_IA32_RTIT_CTL:
2374		if (!vmx_pt_mode_is_host_guest() ||
2375			vmx_rtit_ctl_check(vcpu, data) ||
2376			vmx->nested.vmxon)
2377			return 1;
2378		vmcs_write64(GUEST_IA32_RTIT_CTL, data);
2379		vmx->pt_desc.guest.ctl = data;
2380		pt_update_intercept_for_msr(vcpu);
2381		break;
2382	case MSR_IA32_RTIT_STATUS:
2383		if (!pt_can_write_msr(vmx))
2384			return 1;
2385		if (data & MSR_IA32_RTIT_STATUS_MASK)
2386			return 1;
2387		vmx->pt_desc.guest.status = data;
2388		break;
2389	case MSR_IA32_RTIT_CR3_MATCH:
2390		if (!pt_can_write_msr(vmx))
2391			return 1;
2392		if (!intel_pt_validate_cap(vmx->pt_desc.caps,
2393					   PT_CAP_cr3_filtering))
2394			return 1;
2395		vmx->pt_desc.guest.cr3_match = data;
2396		break;
2397	case MSR_IA32_RTIT_OUTPUT_BASE:
2398		if (!pt_can_write_msr(vmx))
2399			return 1;
2400		if (!intel_pt_validate_cap(vmx->pt_desc.caps,
2401					   PT_CAP_topa_output) &&
2402		    !intel_pt_validate_cap(vmx->pt_desc.caps,
2403					   PT_CAP_single_range_output))
2404			return 1;
2405		if (!pt_output_base_valid(vcpu, data))
2406			return 1;
2407		vmx->pt_desc.guest.output_base = data;
2408		break;
2409	case MSR_IA32_RTIT_OUTPUT_MASK:
2410		if (!pt_can_write_msr(vmx))
2411			return 1;
2412		if (!intel_pt_validate_cap(vmx->pt_desc.caps,
2413					   PT_CAP_topa_output) &&
2414		    !intel_pt_validate_cap(vmx->pt_desc.caps,
2415					   PT_CAP_single_range_output))
2416			return 1;
2417		vmx->pt_desc.guest.output_mask = data;
2418		break;
2419	case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
2420		if (!pt_can_write_msr(vmx))
2421			return 1;
2422		index = msr_info->index - MSR_IA32_RTIT_ADDR0_A;
2423		if (index >= 2 * vmx->pt_desc.num_address_ranges)
2424			return 1;
2425		if (is_noncanonical_address(data, vcpu))
2426			return 1;
2427		if (index % 2)
2428			vmx->pt_desc.guest.addr_b[index / 2] = data;
2429		else
2430			vmx->pt_desc.guest.addr_a[index / 2] = data;
2431		break;
2432	case MSR_IA32_PERF_CAPABILITIES:
2433		if (data && !vcpu_to_pmu(vcpu)->version)
2434			return 1;
2435		if (data & PMU_CAP_LBR_FMT) {
2436			if ((data & PMU_CAP_LBR_FMT) !=
2437			    (kvm_caps.supported_perf_cap & PMU_CAP_LBR_FMT))
2438				return 1;
2439			if (!cpuid_model_is_consistent(vcpu))
2440				return 1;
2441		}
2442		if (data & PERF_CAP_PEBS_FORMAT) {
2443			if ((data & PERF_CAP_PEBS_MASK) !=
2444			    (kvm_caps.supported_perf_cap & PERF_CAP_PEBS_MASK))
2445				return 1;
2446			if (!guest_cpuid_has(vcpu, X86_FEATURE_DS))
2447				return 1;
2448			if (!guest_cpuid_has(vcpu, X86_FEATURE_DTES64))
2449				return 1;
2450			if (!cpuid_model_is_consistent(vcpu))
2451				return 1;
2452		}
2453		ret = kvm_set_msr_common(vcpu, msr_info);
2454		break;
2455
2456	default:
2457	find_uret_msr:
2458		msr = vmx_find_uret_msr(vmx, msr_index);
2459		if (msr)
2460			ret = vmx_set_guest_uret_msr(vmx, msr, data);
2461		else
2462			ret = kvm_set_msr_common(vcpu, msr_info);
2463	}
2464
2465	/* FB_CLEAR may have changed, also update the FB_CLEAR_DIS behavior */
2466	if (msr_index == MSR_IA32_ARCH_CAPABILITIES)
2467		vmx_update_fb_clear_dis(vcpu, vmx);
2468
2469	return ret;
2470}
2471
2472static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
2473{
2474	unsigned long guest_owned_bits;
2475
2476	kvm_register_mark_available(vcpu, reg);
2477
2478	switch (reg) {
2479	case VCPU_REGS_RSP:
2480		vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
2481		break;
2482	case VCPU_REGS_RIP:
2483		vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP);
2484		break;
2485	case VCPU_EXREG_PDPTR:
2486		if (enable_ept)
2487			ept_save_pdptrs(vcpu);
2488		break;
2489	case VCPU_EXREG_CR0:
2490		guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
2491
2492		vcpu->arch.cr0 &= ~guest_owned_bits;
2493		vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & guest_owned_bits;
2494		break;
2495	case VCPU_EXREG_CR3:
2496		/*
2497		 * When intercepting CR3 loads, e.g. for shadowing paging, KVM's
2498		 * CR3 is loaded into hardware, not the guest's CR3.
2499		 */
2500		if (!(exec_controls_get(to_vmx(vcpu)) & CPU_BASED_CR3_LOAD_EXITING))
2501			vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
2502		break;
2503	case VCPU_EXREG_CR4:
2504		guest_owned_bits = vcpu->arch.cr4_guest_owned_bits;
2505
2506		vcpu->arch.cr4 &= ~guest_owned_bits;
2507		vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & guest_owned_bits;
2508		break;
2509	default:
2510		KVM_BUG_ON(1, vcpu->kvm);
2511		break;
2512	}
2513}
2514
2515/*
2516 * There is no X86_FEATURE for SGX yet, but anyway we need to query CPUID
2517 * directly instead of going through cpu_has(), to ensure KVM is trapping
2518 * ENCLS whenever it's supported in hardware.  It does not matter whether
2519 * the host OS supports or has enabled SGX.
2520 */
2521static bool cpu_has_sgx(void)
2522{
2523	return cpuid_eax(0) >= 0x12 && (cpuid_eax(0x12) & BIT(0));
2524}
2525
2526/*
2527 * Some cpus support VM_{ENTRY,EXIT}_IA32_PERF_GLOBAL_CTRL but they
2528 * can't be used due to errata where VM Exit may incorrectly clear
2529 * IA32_PERF_GLOBAL_CTRL[34:32]. Work around the errata by using the
2530 * MSR load mechanism to switch IA32_PERF_GLOBAL_CTRL.
2531 */
2532static bool cpu_has_perf_global_ctrl_bug(void)
2533{
2534	if (boot_cpu_data.x86 == 0x6) {
2535		switch (boot_cpu_data.x86_model) {
2536		case INTEL_FAM6_NEHALEM_EP:	/* AAK155 */
2537		case INTEL_FAM6_NEHALEM:	/* AAP115 */
2538		case INTEL_FAM6_WESTMERE:	/* AAT100 */
2539		case INTEL_FAM6_WESTMERE_EP:	/* BC86,AAY89,BD102 */
2540		case INTEL_FAM6_NEHALEM_EX:	/* BA97 */
2541			return true;
2542		default:
2543			break;
2544		}
2545	}
2546
2547	return false;
2548}
2549
2550static int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, u32 msr, u32 *result)
2551{
2552	u32 vmx_msr_low, vmx_msr_high;
2553	u32 ctl = ctl_min | ctl_opt;
2554
2555	rdmsr(msr, vmx_msr_low, vmx_msr_high);
2556
2557	ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */
2558	ctl |= vmx_msr_low;  /* bit == 1 in low word  ==> must be one  */
2559
2560	/* Ensure minimum (required) set of control bits are supported. */
2561	if (ctl_min & ~ctl)
2562		return -EIO;
2563
2564	*result = ctl;
2565	return 0;
2566}
2567
2568static u64 adjust_vmx_controls64(u64 ctl_opt, u32 msr)
2569{
2570	u64 allowed;
2571
2572	rdmsrl(msr, allowed);
2573
2574	return  ctl_opt & allowed;
2575}
2576
2577static int setup_vmcs_config(struct vmcs_config *vmcs_conf,
2578			     struct vmx_capability *vmx_cap)
2579{
2580	u32 vmx_msr_low, vmx_msr_high;
2581	u32 _pin_based_exec_control = 0;
2582	u32 _cpu_based_exec_control = 0;
2583	u32 _cpu_based_2nd_exec_control = 0;
2584	u64 _cpu_based_3rd_exec_control = 0;
2585	u32 _vmexit_control = 0;
2586	u32 _vmentry_control = 0;
2587	u64 misc_msr;
2588	int i;
2589
2590	/*
2591	 * LOAD/SAVE_DEBUG_CONTROLS are absent because both are mandatory.
2592	 * SAVE_IA32_PAT and SAVE_IA32_EFER are absent because KVM always
2593	 * intercepts writes to PAT and EFER, i.e. never enables those controls.
2594	 */
2595	struct {
2596		u32 entry_control;
2597		u32 exit_control;
2598	} const vmcs_entry_exit_pairs[] = {
2599		{ VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,	VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL },
2600		{ VM_ENTRY_LOAD_IA32_PAT,		VM_EXIT_LOAD_IA32_PAT },
2601		{ VM_ENTRY_LOAD_IA32_EFER,		VM_EXIT_LOAD_IA32_EFER },
2602		{ VM_ENTRY_LOAD_BNDCFGS,		VM_EXIT_CLEAR_BNDCFGS },
2603		{ VM_ENTRY_LOAD_IA32_RTIT_CTL,		VM_EXIT_CLEAR_IA32_RTIT_CTL },
2604	};
2605
2606	memset(vmcs_conf, 0, sizeof(*vmcs_conf));
2607
2608	if (adjust_vmx_controls(KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL,
2609				KVM_OPTIONAL_VMX_CPU_BASED_VM_EXEC_CONTROL,
2610				MSR_IA32_VMX_PROCBASED_CTLS,
2611				&_cpu_based_exec_control))
2612		return -EIO;
2613	if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) {
2614		if (adjust_vmx_controls(KVM_REQUIRED_VMX_SECONDARY_VM_EXEC_CONTROL,
2615					KVM_OPTIONAL_VMX_SECONDARY_VM_EXEC_CONTROL,
2616					MSR_IA32_VMX_PROCBASED_CTLS2,
2617					&_cpu_based_2nd_exec_control))
2618			return -EIO;
2619	}
2620#ifndef CONFIG_X86_64
2621	if (!(_cpu_based_2nd_exec_control &
2622				SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
2623		_cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW;
2624#endif
2625
2626	if (!(_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
2627		_cpu_based_2nd_exec_control &= ~(
2628				SECONDARY_EXEC_APIC_REGISTER_VIRT |
2629				SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2630				SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
2631
2632	rdmsr_safe(MSR_IA32_VMX_EPT_VPID_CAP,
2633		&vmx_cap->ept, &vmx_cap->vpid);
2634
2635	if (!(_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) &&
2636	    vmx_cap->ept) {
2637		pr_warn_once("EPT CAP should not exist if not support "
2638				"1-setting enable EPT VM-execution control\n");
2639
2640		if (error_on_inconsistent_vmcs_config)
2641			return -EIO;
2642
2643		vmx_cap->ept = 0;
2644	}
2645	if (!(_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_VPID) &&
2646	    vmx_cap->vpid) {
2647		pr_warn_once("VPID CAP should not exist if not support "
2648				"1-setting enable VPID VM-execution control\n");
2649
2650		if (error_on_inconsistent_vmcs_config)
2651			return -EIO;
2652
2653		vmx_cap->vpid = 0;
2654	}
2655
2656	if (!cpu_has_sgx())
2657		_cpu_based_2nd_exec_control &= ~SECONDARY_EXEC_ENCLS_EXITING;
2658
2659	if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_TERTIARY_CONTROLS)
2660		_cpu_based_3rd_exec_control =
2661			adjust_vmx_controls64(KVM_OPTIONAL_VMX_TERTIARY_VM_EXEC_CONTROL,
2662					      MSR_IA32_VMX_PROCBASED_CTLS3);
2663
2664	if (adjust_vmx_controls(KVM_REQUIRED_VMX_VM_EXIT_CONTROLS,
2665				KVM_OPTIONAL_VMX_VM_EXIT_CONTROLS,
2666				MSR_IA32_VMX_EXIT_CTLS,
2667				&_vmexit_control))
2668		return -EIO;
2669
2670	if (adjust_vmx_controls(KVM_REQUIRED_VMX_PIN_BASED_VM_EXEC_CONTROL,
2671				KVM_OPTIONAL_VMX_PIN_BASED_VM_EXEC_CONTROL,
2672				MSR_IA32_VMX_PINBASED_CTLS,
2673				&_pin_based_exec_control))
2674		return -EIO;
2675
2676	if (cpu_has_broken_vmx_preemption_timer())
2677		_pin_based_exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
2678	if (!(_cpu_based_2nd_exec_control &
2679		SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY))
2680		_pin_based_exec_control &= ~PIN_BASED_POSTED_INTR;
2681
2682	if (adjust_vmx_controls(KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS,
2683				KVM_OPTIONAL_VMX_VM_ENTRY_CONTROLS,
2684				MSR_IA32_VMX_ENTRY_CTLS,
2685				&_vmentry_control))
2686		return -EIO;
2687
2688	for (i = 0; i < ARRAY_SIZE(vmcs_entry_exit_pairs); i++) {
2689		u32 n_ctrl = vmcs_entry_exit_pairs[i].entry_control;
2690		u32 x_ctrl = vmcs_entry_exit_pairs[i].exit_control;
2691
2692		if (!(_vmentry_control & n_ctrl) == !(_vmexit_control & x_ctrl))
2693			continue;
2694
2695		pr_warn_once("Inconsistent VM-Entry/VM-Exit pair, entry = %x, exit = %x\n",
2696			     _vmentry_control & n_ctrl, _vmexit_control & x_ctrl);
2697
2698		if (error_on_inconsistent_vmcs_config)
2699			return -EIO;
2700
2701		_vmentry_control &= ~n_ctrl;
2702		_vmexit_control &= ~x_ctrl;
2703	}
2704
2705	rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
2706
2707	/* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
2708	if ((vmx_msr_high & 0x1fff) > PAGE_SIZE)
2709		return -EIO;
2710
2711#ifdef CONFIG_X86_64
2712	/* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
2713	if (vmx_msr_high & (1u<<16))
2714		return -EIO;
2715#endif
2716
2717	/* Require Write-Back (WB) memory type for VMCS accesses. */
2718	if (((vmx_msr_high >> 18) & 15) != 6)
2719		return -EIO;
2720
2721	rdmsrl(MSR_IA32_VMX_MISC, misc_msr);
2722
2723	vmcs_conf->size = vmx_msr_high & 0x1fff;
2724	vmcs_conf->basic_cap = vmx_msr_high & ~0x1fff;
2725
2726	vmcs_conf->revision_id = vmx_msr_low;
2727
2728	vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
2729	vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
2730	vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control;
2731	vmcs_conf->cpu_based_3rd_exec_ctrl = _cpu_based_3rd_exec_control;
2732	vmcs_conf->vmexit_ctrl         = _vmexit_control;
2733	vmcs_conf->vmentry_ctrl        = _vmentry_control;
2734	vmcs_conf->misc	= misc_msr;
2735
2736#if IS_ENABLED(CONFIG_HYPERV)
2737	if (enlightened_vmcs)
2738		evmcs_sanitize_exec_ctrls(vmcs_conf);
2739#endif
2740
2741	return 0;
2742}
2743
2744static bool __kvm_is_vmx_supported(void)
2745{
2746	int cpu = smp_processor_id();
2747
2748	if (!(cpuid_ecx(1) & feature_bit(VMX))) {
2749		pr_err("VMX not supported by CPU %d\n", cpu);
2750		return false;
2751	}
2752
2753	if (!this_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) ||
2754	    !this_cpu_has(X86_FEATURE_VMX)) {
2755		pr_err("VMX not enabled (by BIOS) in MSR_IA32_FEAT_CTL on CPU %d\n", cpu);
2756		return false;
2757	}
2758
2759	return true;
2760}
2761
2762static bool kvm_is_vmx_supported(void)
2763{
2764	bool supported;
2765
2766	migrate_disable();
2767	supported = __kvm_is_vmx_supported();
2768	migrate_enable();
2769
2770	return supported;
2771}
2772
2773static int vmx_check_processor_compat(void)
2774{
2775	int cpu = raw_smp_processor_id();
2776	struct vmcs_config vmcs_conf;
2777	struct vmx_capability vmx_cap;
2778
2779	if (!__kvm_is_vmx_supported())
2780		return -EIO;
2781
2782	if (setup_vmcs_config(&vmcs_conf, &vmx_cap) < 0) {
2783		pr_err("Failed to setup VMCS config on CPU %d\n", cpu);
2784		return -EIO;
2785	}
2786	if (nested)
2787		nested_vmx_setup_ctls_msrs(&vmcs_conf, vmx_cap.ept);
2788	if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config))) {
2789		pr_err("Inconsistent VMCS config on CPU %d\n", cpu);
2790		return -EIO;
2791	}
2792	return 0;
2793}
2794
2795static int kvm_cpu_vmxon(u64 vmxon_pointer)
2796{
2797	u64 msr;
2798
2799	cr4_set_bits(X86_CR4_VMXE);
2800
2801	asm goto("1: vmxon %[vmxon_pointer]\n\t"
2802			  _ASM_EXTABLE(1b, %l[fault])
2803			  : : [vmxon_pointer] "m"(vmxon_pointer)
2804			  : : fault);
2805	return 0;
2806
2807fault:
2808	WARN_ONCE(1, "VMXON faulted, MSR_IA32_FEAT_CTL (0x3a) = 0x%llx\n",
2809		  rdmsrl_safe(MSR_IA32_FEAT_CTL, &msr) ? 0xdeadbeef : msr);
2810	cr4_clear_bits(X86_CR4_VMXE);
2811
2812	return -EFAULT;
2813}
2814
2815static int vmx_hardware_enable(void)
2816{
2817	int cpu = raw_smp_processor_id();
2818	u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
2819	int r;
2820
2821	if (cr4_read_shadow() & X86_CR4_VMXE)
2822		return -EBUSY;
2823
2824	/*
2825	 * This can happen if we hot-added a CPU but failed to allocate
2826	 * VP assist page for it.
2827	 */
2828	if (kvm_is_using_evmcs() && !hv_get_vp_assist_page(cpu))
2829		return -EFAULT;
2830
2831	intel_pt_handle_vmx(1);
2832
2833	r = kvm_cpu_vmxon(phys_addr);
2834	if (r) {
2835		intel_pt_handle_vmx(0);
2836		return r;
2837	}
2838
2839	if (enable_ept)
2840		ept_sync_global();
2841
2842	return 0;
2843}
2844
2845static void vmclear_local_loaded_vmcss(void)
2846{
2847	int cpu = raw_smp_processor_id();
2848	struct loaded_vmcs *v, *n;
2849
2850	list_for_each_entry_safe(v, n, &per_cpu(loaded_vmcss_on_cpu, cpu),
2851				 loaded_vmcss_on_cpu_link)
2852		__loaded_vmcs_clear(v);
2853}
2854
2855static void vmx_hardware_disable(void)
2856{
2857	vmclear_local_loaded_vmcss();
2858
2859	if (kvm_cpu_vmxoff())
2860		kvm_spurious_fault();
2861
2862	hv_reset_evmcs();
2863
2864	intel_pt_handle_vmx(0);
2865}
2866
2867struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags)
2868{
2869	int node = cpu_to_node(cpu);
2870	struct page *pages;
2871	struct vmcs *vmcs;
2872
2873	pages = __alloc_pages_node(node, flags, 0);
2874	if (!pages)
2875		return NULL;
2876	vmcs = page_address(pages);
2877	memset(vmcs, 0, vmcs_config.size);
2878
2879	/* KVM supports Enlightened VMCS v1 only */
2880	if (kvm_is_using_evmcs())
2881		vmcs->hdr.revision_id = KVM_EVMCS_VERSION;
2882	else
2883		vmcs->hdr.revision_id = vmcs_config.revision_id;
2884
2885	if (shadow)
2886		vmcs->hdr.shadow_vmcs = 1;
2887	return vmcs;
2888}
2889
2890void free_vmcs(struct vmcs *vmcs)
2891{
2892	free_page((unsigned long)vmcs);
2893}
2894
2895/*
2896 * Free a VMCS, but before that VMCLEAR it on the CPU where it was last loaded
2897 */
2898void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
2899{
2900	if (!loaded_vmcs->vmcs)
2901		return;
2902	loaded_vmcs_clear(loaded_vmcs);
2903	free_vmcs(loaded_vmcs->vmcs);
2904	loaded_vmcs->vmcs = NULL;
2905	if (loaded_vmcs->msr_bitmap)
2906		free_page((unsigned long)loaded_vmcs->msr_bitmap);
2907	WARN_ON(loaded_vmcs->shadow_vmcs != NULL);
2908}
2909
2910int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
2911{
2912	loaded_vmcs->vmcs = alloc_vmcs(false);
2913	if (!loaded_vmcs->vmcs)
2914		return -ENOMEM;
2915
2916	vmcs_clear(loaded_vmcs->vmcs);
2917
2918	loaded_vmcs->shadow_vmcs = NULL;
2919	loaded_vmcs->hv_timer_soft_disabled = false;
2920	loaded_vmcs->cpu = -1;
2921	loaded_vmcs->launched = 0;
2922
2923	if (cpu_has_vmx_msr_bitmap()) {
2924		loaded_vmcs->msr_bitmap = (unsigned long *)
2925				__get_free_page(GFP_KERNEL_ACCOUNT);
2926		if (!loaded_vmcs->msr_bitmap)
2927			goto out_vmcs;
2928		memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE);
2929	}
2930
2931	memset(&loaded_vmcs->host_state, 0, sizeof(struct vmcs_host_state));
2932	memset(&loaded_vmcs->controls_shadow, 0,
2933		sizeof(struct vmcs_controls_shadow));
2934
2935	return 0;
2936
2937out_vmcs:
2938	free_loaded_vmcs(loaded_vmcs);
2939	return -ENOMEM;
2940}
2941
2942static void free_kvm_area(void)
2943{
2944	int cpu;
2945
2946	for_each_possible_cpu(cpu) {
2947		free_vmcs(per_cpu(vmxarea, cpu));
2948		per_cpu(vmxarea, cpu) = NULL;
2949	}
2950}
2951
2952static __init int alloc_kvm_area(void)
2953{
2954	int cpu;
2955
2956	for_each_possible_cpu(cpu) {
2957		struct vmcs *vmcs;
2958
2959		vmcs = alloc_vmcs_cpu(false, cpu, GFP_KERNEL);
2960		if (!vmcs) {
2961			free_kvm_area();
2962			return -ENOMEM;
2963		}
2964
2965		/*
2966		 * When eVMCS is enabled, alloc_vmcs_cpu() sets
2967		 * vmcs->revision_id to KVM_EVMCS_VERSION instead of
2968		 * revision_id reported by MSR_IA32_VMX_BASIC.
2969		 *
2970		 * However, even though not explicitly documented by
2971		 * TLFS, VMXArea passed as VMXON argument should
2972		 * still be marked with revision_id reported by
2973		 * physical CPU.
2974		 */
2975		if (kvm_is_using_evmcs())
2976			vmcs->hdr.revision_id = vmcs_config.revision_id;
2977
2978		per_cpu(vmxarea, cpu) = vmcs;
2979	}
2980	return 0;
2981}
2982
2983static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg,
2984		struct kvm_segment *save)
2985{
2986	if (!emulate_invalid_guest_state) {
2987		/*
2988		 * CS and SS RPL should be equal during guest entry according
2989		 * to VMX spec, but in reality it is not always so. Since vcpu
2990		 * is in the middle of the transition from real mode to
2991		 * protected mode it is safe to assume that RPL 0 is a good
2992		 * default value.
2993		 */
2994		if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS)
2995			save->selector &= ~SEGMENT_RPL_MASK;
2996		save->dpl = save->selector & SEGMENT_RPL_MASK;
2997		save->s = 1;
2998	}
2999	__vmx_set_segment(vcpu, save, seg);
3000}
3001
3002static void enter_pmode(struct kvm_vcpu *vcpu)
3003{
3004	unsigned long flags;
3005	struct vcpu_vmx *vmx = to_vmx(vcpu);
3006
3007	/*
3008	 * Update real mode segment cache. It may be not up-to-date if segment
3009	 * register was written while vcpu was in a guest mode.
3010	 */
3011	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
3012	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
3013	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
3014	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
3015	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
3016	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
3017
3018	vmx->rmode.vm86_active = 0;
3019
3020	__vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
3021
3022	flags = vmcs_readl(GUEST_RFLAGS);
3023	flags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
3024	flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
3025	vmcs_writel(GUEST_RFLAGS, flags);
3026
3027	vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
3028			(vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME));
3029
3030	vmx_update_exception_bitmap(vcpu);
3031
3032	fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
3033	fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
3034	fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
3035	fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
3036	fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
3037	fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
3038}
3039
3040static void fix_rmode_seg(int seg, struct kvm_segment *save)
3041{
3042	const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
3043	struct kvm_segment var = *save;
3044
3045	var.dpl = 0x3;
3046	if (seg == VCPU_SREG_CS)
3047		var.type = 0x3;
3048
3049	if (!emulate_invalid_guest_state) {
3050		var.selector = var.base >> 4;
3051		var.base = var.base & 0xffff0;
3052		var.limit = 0xffff;
3053		var.g = 0;
3054		var.db = 0;
3055		var.present = 1;
3056		var.s = 1;
3057		var.l = 0;
3058		var.unusable = 0;
3059		var.type = 0x3;
3060		var.avl = 0;
3061		if (save->base & 0xf)
3062			pr_warn_once("segment base is not paragraph aligned "
3063				     "when entering protected mode (seg=%d)", seg);
3064	}
3065
3066	vmcs_write16(sf->selector, var.selector);
3067	vmcs_writel(sf->base, var.base);
3068	vmcs_write32(sf->limit, var.limit);
3069	vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var));
3070}
3071
3072static void enter_rmode(struct kvm_vcpu *vcpu)
3073{
3074	unsigned long flags;
3075	struct vcpu_vmx *vmx = to_vmx(vcpu);
3076	struct kvm_vmx *kvm_vmx = to_kvm_vmx(vcpu->kvm);
3077
3078	/*
3079	 * KVM should never use VM86 to virtualize Real Mode when L2 is active,
3080	 * as using VM86 is unnecessary if unrestricted guest is enabled, and
3081	 * if unrestricted guest is disabled, VM-Enter (from L1) with CR0.PG=0
3082	 * should VM-Fail and KVM should reject userspace attempts to stuff
3083	 * CR0.PG=0 when L2 is active.
3084	 */
3085	WARN_ON_ONCE(is_guest_mode(vcpu));
3086
3087	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
3088	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
3089	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
3090	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
3091	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
3092	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
3093	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
3094
3095	vmx->rmode.vm86_active = 1;
3096
3097	vmx_segment_cache_clear(vmx);
3098
3099	vmcs_writel(GUEST_TR_BASE, kvm_vmx->tss_addr);
3100	vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
3101	vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
3102
3103	flags = vmcs_readl(GUEST_RFLAGS);
3104	vmx->rmode.save_rflags = flags;
3105
3106	flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
3107
3108	vmcs_writel(GUEST_RFLAGS, flags);
3109	vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME);
3110	vmx_update_exception_bitmap(vcpu);
3111
3112	fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
3113	fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
3114	fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
3115	fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
3116	fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
3117	fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
3118}
3119
3120int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
3121{
3122	struct vcpu_vmx *vmx = to_vmx(vcpu);
3123
3124	/* Nothing to do if hardware doesn't support EFER. */
3125	if (!vmx_find_uret_msr(vmx, MSR_EFER))
3126		return 0;
3127
3128	vcpu->arch.efer = efer;
3129#ifdef CONFIG_X86_64
3130	if (efer & EFER_LMA)
3131		vm_entry_controls_setbit(vmx, VM_ENTRY_IA32E_MODE);
3132	else
3133		vm_entry_controls_clearbit(vmx, VM_ENTRY_IA32E_MODE);
3134#else
3135	if (KVM_BUG_ON(efer & EFER_LMA, vcpu->kvm))
3136		return 1;
3137#endif
3138
3139	vmx_setup_uret_msrs(vmx);
3140	return 0;
3141}
3142
3143#ifdef CONFIG_X86_64
3144
3145static void enter_lmode(struct kvm_vcpu *vcpu)
3146{
3147	u32 guest_tr_ar;
3148
3149	vmx_segment_cache_clear(to_vmx(vcpu));
3150
3151	guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
3152	if ((guest_tr_ar & VMX_AR_TYPE_MASK) != VMX_AR_TYPE_BUSY_64_TSS) {
3153		pr_debug_ratelimited("%s: tss fixup for long mode. \n",
3154				     __func__);
3155		vmcs_write32(GUEST_TR_AR_BYTES,
3156			     (guest_tr_ar & ~VMX_AR_TYPE_MASK)
3157			     | VMX_AR_TYPE_BUSY_64_TSS);
3158	}
3159	vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA);
3160}
3161
3162static void exit_lmode(struct kvm_vcpu *vcpu)
3163{
3164	vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA);
3165}
3166
3167#endif
3168
3169static void vmx_flush_tlb_all(struct kvm_vcpu *vcpu)
3170{
3171	struct vcpu_vmx *vmx = to_vmx(vcpu);
3172
3173	/*
3174	 * INVEPT must be issued when EPT is enabled, irrespective of VPID, as
3175	 * the CPU is not required to invalidate guest-physical mappings on
3176	 * VM-Entry, even if VPID is disabled.  Guest-physical mappings are
3177	 * associated with the root EPT structure and not any particular VPID
3178	 * (INVVPID also isn't required to invalidate guest-physical mappings).
3179	 */
3180	if (enable_ept) {
3181		ept_sync_global();
3182	} else if (enable_vpid) {
3183		if (cpu_has_vmx_invvpid_global()) {
3184			vpid_sync_vcpu_global();
3185		} else {
3186			vpid_sync_vcpu_single(vmx->vpid);
3187			vpid_sync_vcpu_single(vmx->nested.vpid02);
3188		}
3189	}
3190}
3191
3192static inline int vmx_get_current_vpid(struct kvm_vcpu *vcpu)
3193{
3194	if (is_guest_mode(vcpu))
3195		return nested_get_vpid02(vcpu);
3196	return to_vmx(vcpu)->vpid;
3197}
3198
3199static void vmx_flush_tlb_current(struct kvm_vcpu *vcpu)
3200{
3201	struct kvm_mmu *mmu = vcpu->arch.mmu;
3202	u64 root_hpa = mmu->root.hpa;
3203
3204	/* No flush required if the current context is invalid. */
3205	if (!VALID_PAGE(root_hpa))
3206		return;
3207
3208	if (enable_ept)
3209		ept_sync_context(construct_eptp(vcpu, root_hpa,
3210						mmu->root_role.level));
3211	else
3212		vpid_sync_context(vmx_get_current_vpid(vcpu));
3213}
3214
3215static void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr)
3216{
3217	/*
3218	 * vpid_sync_vcpu_addr() is a nop if vpid==0, see the comment in
3219	 * vmx_flush_tlb_guest() for an explanation of why this is ok.
3220	 */
3221	vpid_sync_vcpu_addr(vmx_get_current_vpid(vcpu), addr);
3222}
3223
3224static void vmx_flush_tlb_guest(struct kvm_vcpu *vcpu)
3225{
3226	/*
3227	 * vpid_sync_context() is a nop if vpid==0, e.g. if enable_vpid==0 or a
3228	 * vpid couldn't be allocated for this vCPU.  VM-Enter and VM-Exit are
3229	 * required to flush GVA->{G,H}PA mappings from the TLB if vpid is
3230	 * disabled (VM-Enter with vpid enabled and vpid==0 is disallowed),
3231	 * i.e. no explicit INVVPID is necessary.
3232	 */
3233	vpid_sync_context(vmx_get_current_vpid(vcpu));
3234}
3235
3236void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu)
3237{
3238	struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
3239
3240	if (!kvm_register_is_dirty(vcpu, VCPU_EXREG_PDPTR))
3241		return;
3242
3243	if (is_pae_paging(vcpu)) {
3244		vmcs_write64(GUEST_PDPTR0, mmu->pdptrs[0]);
3245		vmcs_write64(GUEST_PDPTR1, mmu->pdptrs[1]);
3246		vmcs_write64(GUEST_PDPTR2, mmu->pdptrs[2]);
3247		vmcs_write64(GUEST_PDPTR3, mmu->pdptrs[3]);
3248	}
3249}
3250
3251void ept_save_pdptrs(struct kvm_vcpu *vcpu)
3252{
3253	struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
3254
3255	if (WARN_ON_ONCE(!is_pae_paging(vcpu)))
3256		return;
3257
3258	mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
3259	mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
3260	mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
3261	mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
3262
3263	kvm_register_mark_available(vcpu, VCPU_EXREG_PDPTR);
3264}
3265
3266#define CR3_EXITING_BITS (CPU_BASED_CR3_LOAD_EXITING | \
3267			  CPU_BASED_CR3_STORE_EXITING)
3268
3269static bool vmx_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
3270{
3271	if (is_guest_mode(vcpu))
3272		return nested_guest_cr0_valid(vcpu, cr0);
3273
3274	if (to_vmx(vcpu)->nested.vmxon)
3275		return nested_host_cr0_valid(vcpu, cr0);
3276
3277	return true;
3278}
3279
3280void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
3281{
3282	struct vcpu_vmx *vmx = to_vmx(vcpu);
3283	unsigned long hw_cr0, old_cr0_pg;
3284	u32 tmp;
3285
3286	old_cr0_pg = kvm_read_cr0_bits(vcpu, X86_CR0_PG);
3287
3288	hw_cr0 = (cr0 & ~KVM_VM_CR0_ALWAYS_OFF);
3289	if (enable_unrestricted_guest)
3290		hw_cr0 |= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST;
3291	else {
3292		hw_cr0 |= KVM_VM_CR0_ALWAYS_ON;
3293		if (!enable_ept)
3294			hw_cr0 |= X86_CR0_WP;
3295
3296		if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE))
3297			enter_pmode(vcpu);
3298
3299		if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE))
3300			enter_rmode(vcpu);
3301	}
3302
3303	vmcs_writel(CR0_READ_SHADOW, cr0);
3304	vmcs_writel(GUEST_CR0, hw_cr0);
3305	vcpu->arch.cr0 = cr0;
3306	kvm_register_mark_available(vcpu, VCPU_EXREG_CR0);
3307
3308#ifdef CONFIG_X86_64
3309	if (vcpu->arch.efer & EFER_LME) {
3310		if (!old_cr0_pg && (cr0 & X86_CR0_PG))
3311			enter_lmode(vcpu);
3312		else if (old_cr0_pg && !(cr0 & X86_CR0_PG))
3313			exit_lmode(vcpu);
3314	}
3315#endif
3316
3317	if (enable_ept && !enable_unrestricted_guest) {
3318		/*
3319		 * Ensure KVM has an up-to-date snapshot of the guest's CR3.  If
3320		 * the below code _enables_ CR3 exiting, vmx_cache_reg() will
3321		 * (correctly) stop reading vmcs.GUEST_CR3 because it thinks
3322		 * KVM's CR3 is installed.
3323		 */
3324		if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
3325			vmx_cache_reg(vcpu, VCPU_EXREG_CR3);
3326
3327		/*
3328		 * When running with EPT but not unrestricted guest, KVM must
3329		 * intercept CR3 accesses when paging is _disabled_.  This is
3330		 * necessary because restricted guests can't actually run with
3331		 * paging disabled, and so KVM stuffs its own CR3 in order to
3332		 * run the guest when identity mapped page tables.
3333		 *
3334		 * Do _NOT_ check the old CR0.PG, e.g. to optimize away the
3335		 * update, it may be stale with respect to CR3 interception,
3336		 * e.g. after nested VM-Enter.
3337		 *
3338		 * Lastly, honor L1's desires, i.e. intercept CR3 loads and/or
3339		 * stores to forward them to L1, even if KVM does not need to
3340		 * intercept them to preserve its identity mapped page tables.
3341		 */
3342		if (!(cr0 & X86_CR0_PG)) {
3343			exec_controls_setbit(vmx, CR3_EXITING_BITS);
3344		} else if (!is_guest_mode(vcpu)) {
3345			exec_controls_clearbit(vmx, CR3_EXITING_BITS);
3346		} else {
3347			tmp = exec_controls_get(vmx);
3348			tmp &= ~CR3_EXITING_BITS;
3349			tmp |= get_vmcs12(vcpu)->cpu_based_vm_exec_control & CR3_EXITING_BITS;
3350			exec_controls_set(vmx, tmp);
3351		}
3352
3353		/* Note, vmx_set_cr4() consumes the new vcpu->arch.cr0. */
3354		if ((old_cr0_pg ^ cr0) & X86_CR0_PG)
3355			vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
3356
3357		/*
3358		 * When !CR0_PG -> CR0_PG, vcpu->arch.cr3 becomes active, but
3359		 * GUEST_CR3 is still vmx->ept_identity_map_addr if EPT + !URG.
3360		 */
3361		if (!(old_cr0_pg & X86_CR0_PG) && (cr0 & X86_CR0_PG))
3362			kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3);
3363	}
3364
3365	/* depends on vcpu->arch.cr0 to be set to a new value */
3366	vmx->emulation_required = vmx_emulation_required(vcpu);
3367}
3368
3369static int vmx_get_max_ept_level(void)
3370{
3371	if (cpu_has_vmx_ept_5levels())
3372		return 5;
3373	return 4;
3374}
3375
3376u64 construct_eptp(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level)
3377{
3378	u64 eptp = VMX_EPTP_MT_WB;
3379
3380	eptp |= (root_level == 5) ? VMX_EPTP_PWL_5 : VMX_EPTP_PWL_4;
3381
3382	if (enable_ept_ad_bits &&
3383	    (!is_guest_mode(vcpu) || nested_ept_ad_enabled(vcpu)))
3384		eptp |= VMX_EPTP_AD_ENABLE_BIT;
3385	eptp |= root_hpa;
3386
3387	return eptp;
3388}
3389
3390static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa,
3391			     int root_level)
3392{
3393	struct kvm *kvm = vcpu->kvm;
3394	bool update_guest_cr3 = true;
3395	unsigned long guest_cr3;
3396	u64 eptp;
3397
3398	if (enable_ept) {
3399		eptp = construct_eptp(vcpu, root_hpa, root_level);
3400		vmcs_write64(EPT_POINTER, eptp);
3401
3402		hv_track_root_tdp(vcpu, root_hpa);
3403
3404		if (!enable_unrestricted_guest && !is_paging(vcpu))
3405			guest_cr3 = to_kvm_vmx(kvm)->ept_identity_map_addr;
3406		else if (kvm_register_is_dirty(vcpu, VCPU_EXREG_CR3))
3407			guest_cr3 = vcpu->arch.cr3;
3408		else /* vmcs.GUEST_CR3 is already up-to-date. */
3409			update_guest_cr3 = false;
3410		vmx_ept_load_pdptrs(vcpu);
3411	} else {
3412		guest_cr3 = root_hpa | kvm_get_active_pcid(vcpu);
3413	}
3414
3415	if (update_guest_cr3)
3416		vmcs_writel(GUEST_CR3, guest_cr3);
3417}
3418
3419
3420static bool vmx_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
3421{
3422	/*
3423	 * We operate under the default treatment of SMM, so VMX cannot be
3424	 * enabled under SMM.  Note, whether or not VMXE is allowed at all,
3425	 * i.e. is a reserved bit, is handled by common x86 code.
3426	 */
3427	if ((cr4 & X86_CR4_VMXE) && is_smm(vcpu))
3428		return false;
3429
3430	if (to_vmx(vcpu)->nested.vmxon && !nested_cr4_valid(vcpu, cr4))
3431		return false;
3432
3433	return true;
3434}
3435
3436void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
3437{
3438	unsigned long old_cr4 = kvm_read_cr4(vcpu);
3439	struct vcpu_vmx *vmx = to_vmx(vcpu);
3440	unsigned long hw_cr4;
3441
3442	/*
3443	 * Pass through host's Machine Check Enable value to hw_cr4, which
3444	 * is in force while we are in guest mode.  Do not let guests control
3445	 * this bit, even if host CR4.MCE == 0.
3446	 */
3447	hw_cr4 = (cr4_read_shadow() & X86_CR4_MCE) | (cr4 & ~X86_CR4_MCE);
3448	if (enable_unrestricted_guest)
3449		hw_cr4 |= KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST;
3450	else if (vmx->rmode.vm86_active)
3451		hw_cr4 |= KVM_RMODE_VM_CR4_ALWAYS_ON;
3452	else
3453		hw_cr4 |= KVM_PMODE_VM_CR4_ALWAYS_ON;
3454
3455	if (vmx_umip_emulated()) {
3456		if (cr4 & X86_CR4_UMIP) {
3457			secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_DESC);
3458			hw_cr4 &= ~X86_CR4_UMIP;
3459		} else if (!is_guest_mode(vcpu) ||
3460			!nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC)) {
3461			secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_DESC);
3462		}
3463	}
3464
3465	vcpu->arch.cr4 = cr4;
3466	kvm_register_mark_available(vcpu, VCPU_EXREG_CR4);
3467
3468	if (!enable_unrestricted_guest) {
3469		if (enable_ept) {
3470			if (!is_paging(vcpu)) {
3471				hw_cr4 &= ~X86_CR4_PAE;
3472				hw_cr4 |= X86_CR4_PSE;
3473			} else if (!(cr4 & X86_CR4_PAE)) {
3474				hw_cr4 &= ~X86_CR4_PAE;
3475			}
3476		}
3477
3478		/*
3479		 * SMEP/SMAP/PKU is disabled if CPU is in non-paging mode in
3480		 * hardware.  To emulate this behavior, SMEP/SMAP/PKU needs
3481		 * to be manually disabled when guest switches to non-paging
3482		 * mode.
3483		 *
3484		 * If !enable_unrestricted_guest, the CPU is always running
3485		 * with CR0.PG=1 and CR4 needs to be modified.
3486		 * If enable_unrestricted_guest, the CPU automatically
3487		 * disables SMEP/SMAP/PKU when the guest sets CR0.PG=0.
3488		 */
3489		if (!is_paging(vcpu))
3490			hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE);
3491	}
3492
3493	vmcs_writel(CR4_READ_SHADOW, cr4);
3494	vmcs_writel(GUEST_CR4, hw_cr4);
3495
3496	if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE))
3497		kvm_update_cpuid_runtime(vcpu);
3498}
3499
3500void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
3501{
3502	struct vcpu_vmx *vmx = to_vmx(vcpu);
3503	u32 ar;
3504
3505	if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
3506		*var = vmx->rmode.segs[seg];
3507		if (seg == VCPU_SREG_TR
3508		    || var->selector == vmx_read_guest_seg_selector(vmx, seg))
3509			return;
3510		var->base = vmx_read_guest_seg_base(vmx, seg);
3511		var->selector = vmx_read_guest_seg_selector(vmx, seg);
3512		return;
3513	}
3514	var->base = vmx_read_guest_seg_base(vmx, seg);
3515	var->limit = vmx_read_guest_seg_limit(vmx, seg);
3516	var->selector = vmx_read_guest_seg_selector(vmx, seg);
3517	ar = vmx_read_guest_seg_ar(vmx, seg);
3518	var->unusable = (ar >> 16) & 1;
3519	var->type = ar & 15;
3520	var->s = (ar >> 4) & 1;
3521	var->dpl = (ar >> 5) & 3;
3522	/*
3523	 * Some userspaces do not preserve unusable property. Since usable
3524	 * segment has to be present according to VMX spec we can use present
3525	 * property to amend userspace bug by making unusable segment always
3526	 * nonpresent. vmx_segment_access_rights() already marks nonpresent
3527	 * segment as unusable.
3528	 */
3529	var->present = !var->unusable;
3530	var->avl = (ar >> 12) & 1;
3531	var->l = (ar >> 13) & 1;
3532	var->db = (ar >> 14) & 1;
3533	var->g = (ar >> 15) & 1;
3534}
3535
3536static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
3537{
3538	struct kvm_segment s;
3539
3540	if (to_vmx(vcpu)->rmode.vm86_active) {
3541		vmx_get_segment(vcpu, &s, seg);
3542		return s.base;
3543	}
3544	return vmx_read_guest_seg_base(to_vmx(vcpu), seg);
3545}
3546
3547int vmx_get_cpl(struct kvm_vcpu *vcpu)
3548{
3549	struct vcpu_vmx *vmx = to_vmx(vcpu);
3550
3551	if (unlikely(vmx->rmode.vm86_active))
3552		return 0;
3553	else {
3554		int ar = vmx_read_guest_seg_ar(vmx, VCPU_SREG_SS);
3555		return VMX_AR_DPL(ar);
3556	}
3557}
3558
3559static u32 vmx_segment_access_rights(struct kvm_segment *var)
3560{
3561	u32 ar;
3562
3563	ar = var->type & 15;
3564	ar |= (var->s & 1) << 4;
3565	ar |= (var->dpl & 3) << 5;
3566	ar |= (var->present & 1) << 7;
3567	ar |= (var->avl & 1) << 12;
3568	ar |= (var->l & 1) << 13;
3569	ar |= (var->db & 1) << 14;
3570	ar |= (var->g & 1) << 15;
3571	ar |= (var->unusable || !var->present) << 16;
3572
3573	return ar;
3574}
3575
3576void __vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
3577{
3578	struct vcpu_vmx *vmx = to_vmx(vcpu);
3579	const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
3580
3581	vmx_segment_cache_clear(vmx);
3582
3583	if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
3584		vmx->rmode.segs[seg] = *var;
3585		if (seg == VCPU_SREG_TR)
3586			vmcs_write16(sf->selector, var->selector);
3587		else if (var->s)
3588			fix_rmode_seg(seg, &vmx->rmode.segs[seg]);
3589		return;
3590	}
3591
3592	vmcs_writel(sf->base, var->base);
3593	vmcs_write32(sf->limit, var->limit);
3594	vmcs_write16(sf->selector, var->selector);
3595
3596	/*
3597	 *   Fix the "Accessed" bit in AR field of segment registers for older
3598	 * qemu binaries.
3599	 *   IA32 arch specifies that at the time of processor reset the
3600	 * "Accessed" bit in the AR field of segment registers is 1. And qemu
3601	 * is setting it to 0 in the userland code. This causes invalid guest
3602	 * state vmexit when "unrestricted guest" mode is turned on.
3603	 *    Fix for this setup issue in cpu_reset is being pushed in the qemu
3604	 * tree. Newer qemu binaries with that qemu fix would not need this
3605	 * kvm hack.
3606	 */
3607	if (is_unrestricted_guest(vcpu) && (seg != VCPU_SREG_LDTR))
3608		var->type |= 0x1; /* Accessed */
3609
3610	vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(var));
3611}
3612
3613static void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
3614{
3615	__vmx_set_segment(vcpu, var, seg);
3616
3617	to_vmx(vcpu)->emulation_required = vmx_emulation_required(vcpu);
3618}
3619
3620static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
3621{
3622	u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS);
3623
3624	*db = (ar >> 14) & 1;
3625	*l = (ar >> 13) & 1;
3626}
3627
3628static void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
3629{
3630	dt->size = vmcs_read32(GUEST_IDTR_LIMIT);
3631	dt->address = vmcs_readl(GUEST_IDTR_BASE);
3632}
3633
3634static void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
3635{
3636	vmcs_write32(GUEST_IDTR_LIMIT, dt->size);
3637	vmcs_writel(GUEST_IDTR_BASE, dt->address);
3638}
3639
3640static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
3641{
3642	dt->size = vmcs_read32(GUEST_GDTR_LIMIT);
3643	dt->address = vmcs_readl(GUEST_GDTR_BASE);
3644}
3645
3646static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
3647{
3648	vmcs_write32(GUEST_GDTR_LIMIT, dt->size);
3649	vmcs_writel(GUEST_GDTR_BASE, dt->address);
3650}
3651
3652static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg)
3653{
3654	struct kvm_segment var;
3655	u32 ar;
3656
3657	vmx_get_segment(vcpu, &var, seg);
3658	var.dpl = 0x3;
3659	if (seg == VCPU_SREG_CS)
3660		var.type = 0x3;
3661	ar = vmx_segment_access_rights(&var);
3662
3663	if (var.base != (var.selector << 4))
3664		return false;
3665	if (var.limit != 0xffff)
3666		return false;
3667	if (ar != 0xf3)
3668		return false;
3669
3670	return true;
3671}
3672
3673static bool code_segment_valid(struct kvm_vcpu *vcpu)
3674{
3675	struct kvm_segment cs;
3676	unsigned int cs_rpl;
3677
3678	vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
3679	cs_rpl = cs.selector & SEGMENT_RPL_MASK;
3680
3681	if (cs.unusable)
3682		return false;
3683	if (~cs.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_ACCESSES_MASK))
3684		return false;
3685	if (!cs.s)
3686		return false;
3687	if (cs.type & VMX_AR_TYPE_WRITEABLE_MASK) {
3688		if (cs.dpl > cs_rpl)
3689			return false;
3690	} else {
3691		if (cs.dpl != cs_rpl)
3692			return false;
3693	}
3694	if (!cs.present)
3695		return false;
3696
3697	/* TODO: Add Reserved field check, this'll require a new member in the kvm_segment_field structure */
3698	return true;
3699}
3700
3701static bool stack_segment_valid(struct kvm_vcpu *vcpu)
3702{
3703	struct kvm_segment ss;
3704	unsigned int ss_rpl;
3705
3706	vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
3707	ss_rpl = ss.selector & SEGMENT_RPL_MASK;
3708
3709	if (ss.unusable)
3710		return true;
3711	if (ss.type != 3 && ss.type != 7)
3712		return false;
3713	if (!ss.s)
3714		return false;
3715	if (ss.dpl != ss_rpl) /* DPL != RPL */
3716		return false;
3717	if (!ss.present)
3718		return false;
3719
3720	return true;
3721}
3722
3723static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg)
3724{
3725	struct kvm_segment var;
3726	unsigned int rpl;
3727
3728	vmx_get_segment(vcpu, &var, seg);
3729	rpl = var.selector & SEGMENT_RPL_MASK;
3730
3731	if (var.unusable)
3732		return true;
3733	if (!var.s)
3734		return false;
3735	if (!var.present)
3736		return false;
3737	if (~var.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_WRITEABLE_MASK)) {
3738		if (var.dpl < rpl) /* DPL < RPL */
3739			return false;
3740	}
3741
3742	/* TODO: Add other members to kvm_segment_field to allow checking for other access
3743	 * rights flags
3744	 */
3745	return true;
3746}
3747
3748static bool tr_valid(struct kvm_vcpu *vcpu)
3749{
3750	struct kvm_segment tr;
3751
3752	vmx_get_segment(vcpu, &tr, VCPU_SREG_TR);
3753
3754	if (tr.unusable)
3755		return false;
3756	if (tr.selector & SEGMENT_TI_MASK)	/* TI = 1 */
3757		return false;
3758	if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */
3759		return false;
3760	if (!tr.present)
3761		return false;
3762
3763	return true;
3764}
3765
3766static bool ldtr_valid(struct kvm_vcpu *vcpu)
3767{
3768	struct kvm_segment ldtr;
3769
3770	vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR);
3771
3772	if (ldtr.unusable)
3773		return true;
3774	if (ldtr.selector & SEGMENT_TI_MASK)	/* TI = 1 */
3775		return false;
3776	if (ldtr.type != 2)
3777		return false;
3778	if (!ldtr.present)
3779		return false;
3780
3781	return true;
3782}
3783
3784static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
3785{
3786	struct kvm_segment cs, ss;
3787
3788	vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
3789	vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
3790
3791	return ((cs.selector & SEGMENT_RPL_MASK) ==
3792		 (ss.selector & SEGMENT_RPL_MASK));
3793}
3794
3795/*
3796 * Check if guest state is valid. Returns true if valid, false if
3797 * not.
3798 * We assume that registers are always usable
3799 */
3800bool __vmx_guest_state_valid(struct kvm_vcpu *vcpu)
3801{
3802	/* real mode guest state checks */
3803	if (!is_protmode(vcpu) || (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) {
3804		if (!rmode_segment_valid(vcpu, VCPU_SREG_CS))
3805			return false;
3806		if (!rmode_segment_valid(vcpu, VCPU_SREG_SS))
3807			return false;
3808		if (!rmode_segment_valid(vcpu, VCPU_SREG_DS))
3809			return false;
3810		if (!rmode_segment_valid(vcpu, VCPU_SREG_ES))
3811			return false;
3812		if (!rmode_segment_valid(vcpu, VCPU_SREG_FS))
3813			return false;
3814		if (!rmode_segment_valid(vcpu, VCPU_SREG_GS))
3815			return false;
3816	} else {
3817	/* protected mode guest state checks */
3818		if (!cs_ss_rpl_check(vcpu))
3819			return false;
3820		if (!code_segment_valid(vcpu))
3821			return false;
3822		if (!stack_segment_valid(vcpu))
3823			return false;
3824		if (!data_segment_valid(vcpu, VCPU_SREG_DS))
3825			return false;
3826		if (!data_segment_valid(vcpu, VCPU_SREG_ES))
3827			return false;
3828		if (!data_segment_valid(vcpu, VCPU_SREG_FS))
3829			return false;
3830		if (!data_segment_valid(vcpu, VCPU_SREG_GS))
3831			return false;
3832		if (!tr_valid(vcpu))
3833			return false;
3834		if (!ldtr_valid(vcpu))
3835			return false;
3836	}
3837	/* TODO:
3838	 * - Add checks on RIP
3839	 * - Add checks on RFLAGS
3840	 */
3841
3842	return true;
3843}
3844
3845static int init_rmode_tss(struct kvm *kvm, void __user *ua)
3846{
3847	const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0)));
3848	u16 data;
3849	int i;
3850
3851	for (i = 0; i < 3; i++) {
3852		if (__copy_to_user(ua + PAGE_SIZE * i, zero_page, PAGE_SIZE))
3853			return -EFAULT;
3854	}
3855
3856	data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
3857	if (__copy_to_user(ua + TSS_IOPB_BASE_OFFSET, &data, sizeof(u16)))
3858		return -EFAULT;
3859
3860	data = ~0;
3861	if (__copy_to_user(ua + RMODE_TSS_SIZE - 1, &data, sizeof(u8)))
3862		return -EFAULT;
3863
3864	return 0;
3865}
3866
3867static int init_rmode_identity_map(struct kvm *kvm)
3868{
3869	struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm);
3870	int i, r = 0;
3871	void __user *uaddr;
3872	u32 tmp;
3873
3874	/* Protect kvm_vmx->ept_identity_pagetable_done. */
3875	mutex_lock(&kvm->slots_lock);
3876
3877	if (likely(kvm_vmx->ept_identity_pagetable_done))
3878		goto out;
3879
3880	if (!kvm_vmx->ept_identity_map_addr)
3881		kvm_vmx->ept_identity_map_addr = VMX_EPT_IDENTITY_PAGETABLE_ADDR;
3882
3883	uaddr = __x86_set_memory_region(kvm,
3884					IDENTITY_PAGETABLE_PRIVATE_MEMSLOT,
3885					kvm_vmx->ept_identity_map_addr,
3886					PAGE_SIZE);
3887	if (IS_ERR(uaddr)) {
3888		r = PTR_ERR(uaddr);
3889		goto out;
3890	}
3891
3892	/* Set up identity-mapping pagetable for EPT in real mode */
3893	for (i = 0; i < (PAGE_SIZE / sizeof(tmp)); i++) {
3894		tmp = (i << 22) + (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |
3895			_PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE);
3896		if (__copy_to_user(uaddr + i * sizeof(tmp), &tmp, sizeof(tmp))) {
3897			r = -EFAULT;
3898			goto out;
3899		}
3900	}
3901	kvm_vmx->ept_identity_pagetable_done = true;
3902
3903out:
3904	mutex_unlock(&kvm->slots_lock);
3905	return r;
3906}
3907
3908static void seg_setup(int seg)
3909{
3910	const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
3911	unsigned int ar;
3912
3913	vmcs_write16(sf->selector, 0);
3914	vmcs_writel(sf->base, 0);
3915	vmcs_write32(sf->limit, 0xffff);
3916	ar = 0x93;
3917	if (seg == VCPU_SREG_CS)
3918		ar |= 0x08; /* code segment */
3919
3920	vmcs_write32(sf->ar_bytes, ar);
3921}
3922
3923int allocate_vpid(void)
3924{
3925	int vpid;
3926
3927	if (!enable_vpid)
3928		return 0;
3929	spin_lock(&vmx_vpid_lock);
3930	vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS);
3931	if (vpid < VMX_NR_VPIDS)
3932		__set_bit(vpid, vmx_vpid_bitmap);
3933	else
3934		vpid = 0;
3935	spin_unlock(&vmx_vpid_lock);
3936	return vpid;
3937}
3938
3939void free_vpid(int vpid)
3940{
3941	if (!enable_vpid || vpid == 0)
3942		return;
3943	spin_lock(&vmx_vpid_lock);
3944	__clear_bit(vpid, vmx_vpid_bitmap);
3945	spin_unlock(&vmx_vpid_lock);
3946}
3947
3948static void vmx_msr_bitmap_l01_changed(struct vcpu_vmx *vmx)
3949{
3950	/*
3951	 * When KVM is a nested hypervisor on top of Hyper-V and uses
3952	 * 'Enlightened MSR Bitmap' feature L0 needs to know that MSR
3953	 * bitmap has changed.
3954	 */
3955	if (kvm_is_using_evmcs()) {
3956		struct hv_enlightened_vmcs *evmcs = (void *)vmx->vmcs01.vmcs;
3957
3958		if (evmcs->hv_enlightenments_control.msr_bitmap)
3959			evmcs->hv_clean_fields &=
3960				~HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP;
3961	}
3962
3963	vmx->nested.force_msr_bitmap_recalc = true;
3964}
3965
3966void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
3967{
3968	struct vcpu_vmx *vmx = to_vmx(vcpu);
3969	unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
3970
3971	if (!cpu_has_vmx_msr_bitmap())
3972		return;
3973
3974	vmx_msr_bitmap_l01_changed(vmx);
3975
3976	/*
3977	 * Mark the desired intercept state in shadow bitmap, this is needed
3978	 * for resync when the MSR filters change.
3979	*/
3980	if (is_valid_passthrough_msr(msr)) {
3981		int idx = possible_passthrough_msr_slot(msr);
3982
3983		if (idx != -ENOENT) {
3984			if (type & MSR_TYPE_R)
3985				clear_bit(idx, vmx->shadow_msr_intercept.read);
3986			if (type & MSR_TYPE_W)
3987				clear_bit(idx, vmx->shadow_msr_intercept.write);
3988		}
3989	}
3990
3991	if ((type & MSR_TYPE_R) &&
3992	    !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ)) {
3993		vmx_set_msr_bitmap_read(msr_bitmap, msr);
3994		type &= ~MSR_TYPE_R;
3995	}
3996
3997	if ((type & MSR_TYPE_W) &&
3998	    !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE)) {
3999		vmx_set_msr_bitmap_write(msr_bitmap, msr);
4000		type &= ~MSR_TYPE_W;
4001	}
4002
4003	if (type & MSR_TYPE_R)
4004		vmx_clear_msr_bitmap_read(msr_bitmap, msr);
4005
4006	if (type & MSR_TYPE_W)
4007		vmx_clear_msr_bitmap_write(msr_bitmap, msr);
4008}
4009
4010void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
4011{
4012	struct vcpu_vmx *vmx = to_vmx(vcpu);
4013	unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
4014
4015	if (!cpu_has_vmx_msr_bitmap())
4016		return;
4017
4018	vmx_msr_bitmap_l01_changed(vmx);
4019
4020	/*
4021	 * Mark the desired intercept state in shadow bitmap, this is needed
4022	 * for resync when the MSR filter changes.
4023	*/
4024	if (is_valid_passthrough_msr(msr)) {
4025		int idx = possible_passthrough_msr_slot(msr);
4026
4027		if (idx != -ENOENT) {
4028			if (type & MSR_TYPE_R)
4029				set_bit(idx, vmx->shadow_msr_intercept.read);
4030			if (type & MSR_TYPE_W)
4031				set_bit(idx, vmx->shadow_msr_intercept.write);
4032		}
4033	}
4034
4035	if (type & MSR_TYPE_R)
4036		vmx_set_msr_bitmap_read(msr_bitmap, msr);
4037
4038	if (type & MSR_TYPE_W)
4039		vmx_set_msr_bitmap_write(msr_bitmap, msr);
4040}
4041
4042static void vmx_update_msr_bitmap_x2apic(struct kvm_vcpu *vcpu)
4043{
4044	/*
4045	 * x2APIC indices for 64-bit accesses into the RDMSR and WRMSR halves
4046	 * of the MSR bitmap.  KVM emulates APIC registers up through 0x3f0,
4047	 * i.e. MSR 0x83f, and so only needs to dynamically manipulate 64 bits.
4048	 */
4049	const int read_idx = APIC_BASE_MSR / BITS_PER_LONG_LONG;
4050	const int write_idx = read_idx + (0x800 / sizeof(u64));
4051	struct vcpu_vmx *vmx = to_vmx(vcpu);
4052	u64 *msr_bitmap = (u64 *)vmx->vmcs01.msr_bitmap;
4053	u8 mode;
4054
4055	if (!cpu_has_vmx_msr_bitmap() || WARN_ON_ONCE(!lapic_in_kernel(vcpu)))
4056		return;
4057
4058	if (cpu_has_secondary_exec_ctrls() &&
4059	    (secondary_exec_controls_get(vmx) &
4060	     SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) {
4061		mode = MSR_BITMAP_MODE_X2APIC;
4062		if (enable_apicv && kvm_vcpu_apicv_active(vcpu))
4063			mode |= MSR_BITMAP_MODE_X2APIC_APICV;
4064	} else {
4065		mode = 0;
4066	}
4067
4068	if (mode == vmx->x2apic_msr_bitmap_mode)
4069		return;
4070
4071	vmx->x2apic_msr_bitmap_mode = mode;
4072
4073	/*
4074	 * Reset the bitmap for MSRs 0x800 - 0x83f.  Leave AMD's uber-extended
4075	 * registers (0x840 and above) intercepted, KVM doesn't support them.
4076	 * Intercept all writes by default and poke holes as needed.  Pass
4077	 * through reads for all valid registers by default in x2APIC+APICv
4078	 * mode, only the current timer count needs on-demand emulation by KVM.
4079	 */
4080	if (mode & MSR_BITMAP_MODE_X2APIC_APICV)
4081		msr_bitmap[read_idx] = ~kvm_lapic_readable_reg_mask(vcpu->arch.apic);
4082	else
4083		msr_bitmap[read_idx] = ~0ull;
4084	msr_bitmap[write_idx] = ~0ull;
4085
4086	/*
4087	 * TPR reads and writes can be virtualized even if virtual interrupt
4088	 * delivery is not in use.
4089	 */
4090	vmx_set_intercept_for_msr(vcpu, X2APIC_MSR(APIC_TASKPRI), MSR_TYPE_RW,
4091				  !(mode & MSR_BITMAP_MODE_X2APIC));
4092
4093	if (mode & MSR_BITMAP_MODE_X2APIC_APICV) {
4094		vmx_enable_intercept_for_msr(vcpu, X2APIC_MSR(APIC_TMCCT), MSR_TYPE_RW);
4095		vmx_disable_intercept_for_msr(vcpu, X2APIC_MSR(APIC_EOI), MSR_TYPE_W);
4096		vmx_disable_intercept_for_msr(vcpu, X2APIC_MSR(APIC_SELF_IPI), MSR_TYPE_W);
4097		if (enable_ipiv)
4098			vmx_disable_intercept_for_msr(vcpu, X2APIC_MSR(APIC_ICR), MSR_TYPE_RW);
4099	}
4100}
4101
4102void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu)
4103{
4104	struct vcpu_vmx *vmx = to_vmx(vcpu);
4105	bool flag = !(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN);
4106	u32 i;
4107
4108	vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_STATUS, MSR_TYPE_RW, flag);
4109	vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_OUTPUT_BASE, MSR_TYPE_RW, flag);
4110	vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_OUTPUT_MASK, MSR_TYPE_RW, flag);
4111	vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_CR3_MATCH, MSR_TYPE_RW, flag);
4112	for (i = 0; i < vmx->pt_desc.num_address_ranges; i++) {
4113		vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_ADDR0_A + i * 2, MSR_TYPE_RW, flag);
4114		vmx_set_intercept_for_msr(vcpu, MSR_IA32_RTIT_ADDR0_B + i * 2, MSR_TYPE_RW, flag);
4115	}
4116}
4117
4118static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
4119{
4120	struct vcpu_vmx *vmx = to_vmx(vcpu);
4121	void *vapic_page;
4122	u32 vppr;
4123	int rvi;
4124
4125	if (WARN_ON_ONCE(!is_guest_mode(vcpu)) ||
4126		!nested_cpu_has_vid(get_vmcs12(vcpu)) ||
4127		WARN_ON_ONCE(!vmx->nested.virtual_apic_map.gfn))
4128		return false;
4129
4130	rvi = vmx_get_rvi();
4131
4132	vapic_page = vmx->nested.virtual_apic_map.hva;
4133	vppr = *((u32 *)(vapic_page + APIC_PROCPRI));
4134
4135	return ((rvi & 0xf0) > (vppr & 0xf0));
4136}
4137
4138static void vmx_msr_filter_changed(struct kvm_vcpu *vcpu)
4139{
4140	struct vcpu_vmx *vmx = to_vmx(vcpu);
4141	u32 i;
4142
4143	/*
4144	 * Redo intercept permissions for MSRs that KVM is passing through to
4145	 * the guest.  Disabling interception will check the new MSR filter and
4146	 * ensure that KVM enables interception if usersepace wants to filter
4147	 * the MSR.  MSRs that KVM is already intercepting don't need to be
4148	 * refreshed since KVM is going to intercept them regardless of what
4149	 * userspace wants.
4150	 */
4151	for (i = 0; i < ARRAY_SIZE(vmx_possible_passthrough_msrs); i++) {
4152		u32 msr = vmx_possible_passthrough_msrs[i];
4153
4154		if (!test_bit(i, vmx->shadow_msr_intercept.read))
4155			vmx_disable_intercept_for_msr(vcpu, msr, MSR_TYPE_R);
4156
4157		if (!test_bit(i, vmx->shadow_msr_intercept.write))
4158			vmx_disable_intercept_for_msr(vcpu, msr, MSR_TYPE_W);
4159	}
4160
4161	/* PT MSRs can be passed through iff PT is exposed to the guest. */
4162	if (vmx_pt_mode_is_host_guest())
4163		pt_update_intercept_for_msr(vcpu);
4164}
4165
4166static inline void kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu,
4167						     int pi_vec)
4168{
4169#ifdef CONFIG_SMP
4170	if (vcpu->mode == IN_GUEST_MODE) {
4171		/*
4172		 * The vector of the virtual has already been set in the PIR.
4173		 * Send a notification event to deliver the virtual interrupt
4174		 * unless the vCPU is the currently running vCPU, i.e. the
4175		 * event is being sent from a fastpath VM-Exit handler, in
4176		 * which case the PIR will be synced to the vIRR before
4177		 * re-entering the guest.
4178		 *
4179		 * When the target is not the running vCPU, the following
4180		 * possibilities emerge:
4181		 *
4182		 * Case 1: vCPU stays in non-root mode. Sending a notification
4183		 * event posts the interrupt to the vCPU.
4184		 *
4185		 * Case 2: vCPU exits to root mode and is still runnable. The
4186		 * PIR will be synced to the vIRR before re-entering the guest.
4187		 * Sending a notification event is ok as the host IRQ handler
4188		 * will ignore the spurious event.
4189		 *
4190		 * Case 3: vCPU exits to root mode and is blocked. vcpu_block()
4191		 * has already synced PIR to vIRR and never blocks the vCPU if
4192		 * the vIRR is not empty. Therefore, a blocked vCPU here does
4193		 * not wait for any requested interrupts in PIR, and sending a
4194		 * notification event also results in a benign, spurious event.
4195		 */
4196
4197		if (vcpu != kvm_get_running_vcpu())
4198			__apic_send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec);
4199		return;
4200	}
4201#endif
4202	/*
4203	 * The vCPU isn't in the guest; wake the vCPU in case it is blocking,
4204	 * otherwise do nothing as KVM will grab the highest priority pending
4205	 * IRQ via ->sync_pir_to_irr() in vcpu_enter_guest().
4206	 */
4207	kvm_vcpu_wake_up(vcpu);
4208}
4209
4210static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
4211						int vector)
4212{
4213	struct vcpu_vmx *vmx = to_vmx(vcpu);
4214
4215	if (is_guest_mode(vcpu) &&
4216	    vector == vmx->nested.posted_intr_nv) {
4217		/*
4218		 * If a posted intr is not recognized by hardware,
4219		 * we will accomplish it in the next vmentry.
4220		 */
4221		vmx->nested.pi_pending = true;
4222		kvm_make_request(KVM_REQ_EVENT, vcpu);
4223
4224		/*
4225		 * This pairs with the smp_mb_*() after setting vcpu->mode in
4226		 * vcpu_enter_guest() to guarantee the vCPU sees the event
4227		 * request if triggering a posted interrupt "fails" because
4228		 * vcpu->mode != IN_GUEST_MODE.  The extra barrier is needed as
4229		 * the smb_wmb() in kvm_make_request() only ensures everything
4230		 * done before making the request is visible when the request
4231		 * is visible, it doesn't ensure ordering between the store to
4232		 * vcpu->requests and the load from vcpu->mode.
4233		 */
4234		smp_mb__after_atomic();
4235
4236		/* the PIR and ON have been set by L1. */
4237		kvm_vcpu_trigger_posted_interrupt(vcpu, POSTED_INTR_NESTED_VECTOR);
4238		return 0;
4239	}
4240	return -1;
4241}
4242/*
4243 * Send interrupt to vcpu via posted interrupt way.
4244 * 1. If target vcpu is running(non-root mode), send posted interrupt
4245 * notification to vcpu and hardware will sync PIR to vIRR atomically.
4246 * 2. If target vcpu isn't running(root mode), kick it to pick up the
4247 * interrupt from PIR in next vmentry.
4248 */
4249static int vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
4250{
4251	struct vcpu_vmx *vmx = to_vmx(vcpu);
4252	int r;
4253
4254	r = vmx_deliver_nested_posted_interrupt(vcpu, vector);
4255	if (!r)
4256		return 0;
4257
4258	/* Note, this is called iff the local APIC is in-kernel. */
4259	if (!vcpu->arch.apic->apicv_active)
4260		return -1;
4261
4262	if (pi_test_and_set_pir(vector, &vmx->pi_desc))
4263		return 0;
4264
4265	/* If a previous notification has sent the IPI, nothing to do.  */
4266	if (pi_test_and_set_on(&vmx->pi_desc))
4267		return 0;
4268
4269	/*
4270	 * The implied barrier in pi_test_and_set_on() pairs with the smp_mb_*()
4271	 * after setting vcpu->mode in vcpu_enter_guest(), thus the vCPU is
4272	 * guaranteed to see PID.ON=1 and sync the PIR to IRR if triggering a
4273	 * posted interrupt "fails" because vcpu->mode != IN_GUEST_MODE.
4274	 */
4275	kvm_vcpu_trigger_posted_interrupt(vcpu, POSTED_INTR_VECTOR);
4276	return 0;
4277}
4278
4279static void vmx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
4280				  int trig_mode, int vector)
4281{
4282	struct kvm_vcpu *vcpu = apic->vcpu;
4283
4284	if (vmx_deliver_posted_interrupt(vcpu, vector)) {
4285		kvm_lapic_set_irr(vector, apic);
4286		kvm_make_request(KVM_REQ_EVENT, vcpu);
4287		kvm_vcpu_kick(vcpu);
4288	} else {
4289		trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode,
4290					   trig_mode, vector);
4291	}
4292}
4293
4294/*
4295 * Set up the vmcs's constant host-state fields, i.e., host-state fields that
4296 * will not change in the lifetime of the guest.
4297 * Note that host-state that does change is set elsewhere. E.g., host-state
4298 * that is set differently for each CPU is set in vmx_vcpu_load(), not here.
4299 */
4300void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
4301{
4302	u32 low32, high32;
4303	unsigned long tmpl;
4304	unsigned long cr0, cr3, cr4;
4305
4306	cr0 = read_cr0();
4307	WARN_ON(cr0 & X86_CR0_TS);
4308	vmcs_writel(HOST_CR0, cr0);  /* 22.2.3 */
4309
4310	/*
4311	 * Save the most likely value for this task's CR3 in the VMCS.
4312	 * We can't use __get_current_cr3_fast() because we're not atomic.
4313	 */
4314	cr3 = __read_cr3();
4315	vmcs_writel(HOST_CR3, cr3);		/* 22.2.3  FIXME: shadow tables */
4316	vmx->loaded_vmcs->host_state.cr3 = cr3;
4317
4318	/* Save the most likely value for this task's CR4 in the VMCS. */
4319	cr4 = cr4_read_shadow();
4320	vmcs_writel(HOST_CR4, cr4);			/* 22.2.3, 22.2.5 */
4321	vmx->loaded_vmcs->host_state.cr4 = cr4;
4322
4323	vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS);  /* 22.2.4 */
4324#ifdef CONFIG_X86_64
4325	/*
4326	 * Load null selectors, so we can avoid reloading them in
4327	 * vmx_prepare_switch_to_host(), in case userspace uses
4328	 * the null selectors too (the expected case).
4329	 */
4330	vmcs_write16(HOST_DS_SELECTOR, 0);
4331	vmcs_write16(HOST_ES_SELECTOR, 0);
4332#else
4333	vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
4334	vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
4335#endif
4336	vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
4337	vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8);  /* 22.2.4 */
4338
4339	vmcs_writel(HOST_IDTR_BASE, host_idt_base);   /* 22.2.4 */
4340
4341	vmcs_writel(HOST_RIP, (unsigned long)vmx_vmexit); /* 22.2.5 */
4342
4343	rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
4344	vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
4345
4346	/*
4347	 * SYSENTER is used for 32-bit system calls on either 32-bit or
4348	 * 64-bit kernels.  It is always zero If neither is allowed, otherwise
4349	 * vmx_vcpu_load_vmcs loads it with the per-CPU entry stack (and may
4350	 * have already done so!).
4351	 */
4352	if (!IS_ENABLED(CONFIG_IA32_EMULATION) && !IS_ENABLED(CONFIG_X86_32))
4353		vmcs_writel(HOST_IA32_SYSENTER_ESP, 0);
4354
4355	rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl);
4356	vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl);   /* 22.2.3 */
4357
4358	if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) {
4359		rdmsr(MSR_IA32_CR_PAT, low32, high32);
4360		vmcs_write64(HOST_IA32_PAT, low32 | ((u64) high32 << 32));
4361	}
4362
4363	if (cpu_has_load_ia32_efer())
4364		vmcs_write64(HOST_IA32_EFER, host_efer);
4365}
4366
4367void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
4368{
4369	struct kvm_vcpu *vcpu = &vmx->vcpu;
4370
4371	vcpu->arch.cr4_guest_owned_bits = KVM_POSSIBLE_CR4_GUEST_BITS &
4372					  ~vcpu->arch.cr4_guest_rsvd_bits;
4373	if (!enable_ept) {
4374		vcpu->arch.cr4_guest_owned_bits &= ~X86_CR4_TLBFLUSH_BITS;
4375		vcpu->arch.cr4_guest_owned_bits &= ~X86_CR4_PDPTR_BITS;
4376	}
4377	if (is_guest_mode(&vmx->vcpu))
4378		vcpu->arch.cr4_guest_owned_bits &=
4379			~get_vmcs12(vcpu)->cr4_guest_host_mask;
4380	vmcs_writel(CR4_GUEST_HOST_MASK, ~vcpu->arch.cr4_guest_owned_bits);
4381}
4382
4383static u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx)
4384{
4385	u32 pin_based_exec_ctrl = vmcs_config.pin_based_exec_ctrl;
4386
4387	if (!kvm_vcpu_apicv_active(&vmx->vcpu))
4388		pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR;
4389
4390	if (!enable_vnmi)
4391		pin_based_exec_ctrl &= ~PIN_BASED_VIRTUAL_NMIS;
4392
4393	if (!enable_preemption_timer)
4394		pin_based_exec_ctrl &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
4395
4396	return pin_based_exec_ctrl;
4397}
4398
4399static u32 vmx_vmentry_ctrl(void)
4400{
4401	u32 vmentry_ctrl = vmcs_config.vmentry_ctrl;
4402
4403	if (vmx_pt_mode_is_system())
4404		vmentry_ctrl &= ~(VM_ENTRY_PT_CONCEAL_PIP |
4405				  VM_ENTRY_LOAD_IA32_RTIT_CTL);
4406	/*
4407	 * IA32e mode, and loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically.
4408	 */
4409	vmentry_ctrl &= ~(VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL |
4410			  VM_ENTRY_LOAD_IA32_EFER |
4411			  VM_ENTRY_IA32E_MODE);
4412
4413	if (cpu_has_perf_global_ctrl_bug())
4414		vmentry_ctrl &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
4415
4416	return vmentry_ctrl;
4417}
4418
4419static u32 vmx_vmexit_ctrl(void)
4420{
4421	u32 vmexit_ctrl = vmcs_config.vmexit_ctrl;
4422
4423	/*
4424	 * Not used by KVM and never set in vmcs01 or vmcs02, but emulated for
4425	 * nested virtualization and thus allowed to be set in vmcs12.
4426	 */
4427	vmexit_ctrl &= ~(VM_EXIT_SAVE_IA32_PAT | VM_EXIT_SAVE_IA32_EFER |
4428			 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER);
4429
4430	if (vmx_pt_mode_is_system())
4431		vmexit_ctrl &= ~(VM_EXIT_PT_CONCEAL_PIP |
4432				 VM_EXIT_CLEAR_IA32_RTIT_CTL);
4433
4434	if (cpu_has_perf_global_ctrl_bug())
4435		vmexit_ctrl &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
4436
4437	/* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */
4438	return vmexit_ctrl &
4439		~(VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | VM_EXIT_LOAD_IA32_EFER);
4440}
4441
4442static void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
4443{
4444	struct vcpu_vmx *vmx = to_vmx(vcpu);
4445
4446	if (is_guest_mode(vcpu)) {
4447		vmx->nested.update_vmcs01_apicv_status = true;
4448		return;
4449	}
4450
4451	pin_controls_set(vmx, vmx_pin_based_exec_ctrl(vmx));
4452
4453	if (kvm_vcpu_apicv_active(vcpu)) {
4454		secondary_exec_controls_setbit(vmx,
4455					       SECONDARY_EXEC_APIC_REGISTER_VIRT |
4456					       SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
4457		if (enable_ipiv)
4458			tertiary_exec_controls_setbit(vmx, TERTIARY_EXEC_IPI_VIRT);
4459	} else {
4460		secondary_exec_controls_clearbit(vmx,
4461						 SECONDARY_EXEC_APIC_REGISTER_VIRT |
4462						 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
4463		if (enable_ipiv)
4464			tertiary_exec_controls_clearbit(vmx, TERTIARY_EXEC_IPI_VIRT);
4465	}
4466
4467	vmx_update_msr_bitmap_x2apic(vcpu);
4468}
4469
4470static u32 vmx_exec_control(struct vcpu_vmx *vmx)
4471{
4472	u32 exec_control = vmcs_config.cpu_based_exec_ctrl;
4473
4474	/*
4475	 * Not used by KVM, but fully supported for nesting, i.e. are allowed in
4476	 * vmcs12 and propagated to vmcs02 when set in vmcs12.
4477	 */
4478	exec_control &= ~(CPU_BASED_RDTSC_EXITING |
4479			  CPU_BASED_USE_IO_BITMAPS |
4480			  CPU_BASED_MONITOR_TRAP_FLAG |
4481			  CPU_BASED_PAUSE_EXITING);
4482
4483	/* INTR_WINDOW_EXITING and NMI_WINDOW_EXITING are toggled dynamically */
4484	exec_control &= ~(CPU_BASED_INTR_WINDOW_EXITING |
4485			  CPU_BASED_NMI_WINDOW_EXITING);
4486
4487	if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)
4488		exec_control &= ~CPU_BASED_MOV_DR_EXITING;
4489
4490	if (!cpu_need_tpr_shadow(&vmx->vcpu))
4491		exec_control &= ~CPU_BASED_TPR_SHADOW;
4492
4493#ifdef CONFIG_X86_64
4494	if (exec_control & CPU_BASED_TPR_SHADOW)
4495		exec_control &= ~(CPU_BASED_CR8_LOAD_EXITING |
4496				  CPU_BASED_CR8_STORE_EXITING);
4497	else
4498		exec_control |= CPU_BASED_CR8_STORE_EXITING |
4499				CPU_BASED_CR8_LOAD_EXITING;
4500#endif
4501	/* No need to intercept CR3 access or INVPLG when using EPT. */
4502	if (enable_ept)
4503		exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING |
4504				  CPU_BASED_CR3_STORE_EXITING |
4505				  CPU_BASED_INVLPG_EXITING);
4506	if (kvm_mwait_in_guest(vmx->vcpu.kvm))
4507		exec_control &= ~(CPU_BASED_MWAIT_EXITING |
4508				CPU_BASED_MONITOR_EXITING);
4509	if (kvm_hlt_in_guest(vmx->vcpu.kvm))
4510		exec_control &= ~CPU_BASED_HLT_EXITING;
4511	return exec_control;
4512}
4513
4514static u64 vmx_tertiary_exec_control(struct vcpu_vmx *vmx)
4515{
4516	u64 exec_control = vmcs_config.cpu_based_3rd_exec_ctrl;
4517
4518	/*
4519	 * IPI virtualization relies on APICv. Disable IPI virtualization if
4520	 * APICv is inhibited.
4521	 */
4522	if (!enable_ipiv || !kvm_vcpu_apicv_active(&vmx->vcpu))
4523		exec_control &= ~TERTIARY_EXEC_IPI_VIRT;
4524
4525	return exec_control;
4526}
4527
4528/*
4529 * Adjust a single secondary execution control bit to intercept/allow an
4530 * instruction in the guest.  This is usually done based on whether or not a
4531 * feature has been exposed to the guest in order to correctly emulate faults.
4532 */
4533static inline void
4534vmx_adjust_secondary_exec_control(struct vcpu_vmx *vmx, u32 *exec_control,
4535				  u32 control, bool enabled, bool exiting)
4536{
4537	/*
4538	 * If the control is for an opt-in feature, clear the control if the
4539	 * feature is not exposed to the guest, i.e. not enabled.  If the
4540	 * control is opt-out, i.e. an exiting control, clear the control if
4541	 * the feature _is_ exposed to the guest, i.e. exiting/interception is
4542	 * disabled for the associated instruction.  Note, the caller is
4543	 * responsible presetting exec_control to set all supported bits.
4544	 */
4545	if (enabled == exiting)
4546		*exec_control &= ~control;
4547
4548	/*
4549	 * Update the nested MSR settings so that a nested VMM can/can't set
4550	 * controls for features that are/aren't exposed to the guest.
4551	 */
4552	if (nested) {
4553		/*
4554		 * All features that can be added or removed to VMX MSRs must
4555		 * be supported in the first place for nested virtualization.
4556		 */
4557		if (WARN_ON_ONCE(!(vmcs_config.nested.secondary_ctls_high & control)))
4558			enabled = false;
4559
4560		if (enabled)
4561			vmx->nested.msrs.secondary_ctls_high |= control;
4562		else
4563			vmx->nested.msrs.secondary_ctls_high &= ~control;
4564	}
4565}
4566
4567/*
4568 * Wrapper macro for the common case of adjusting a secondary execution control
4569 * based on a single guest CPUID bit, with a dedicated feature bit.  This also
4570 * verifies that the control is actually supported by KVM and hardware.
4571 */
4572#define vmx_adjust_sec_exec_control(vmx, exec_control, name, feat_name, ctrl_name, exiting)	\
4573({												\
4574	struct kvm_vcpu *__vcpu = &(vmx)->vcpu;							\
4575	bool __enabled;										\
4576												\
4577	if (cpu_has_vmx_##name()) {								\
4578		if (kvm_is_governed_feature(X86_FEATURE_##feat_name))				\
4579			__enabled = guest_can_use(__vcpu, X86_FEATURE_##feat_name);		\
4580		else										\
4581			__enabled = guest_cpuid_has(__vcpu, X86_FEATURE_##feat_name);		\
4582		vmx_adjust_secondary_exec_control(vmx, exec_control, SECONDARY_EXEC_##ctrl_name,\
4583						  __enabled, exiting);				\
4584	}											\
4585})
4586
4587/* More macro magic for ENABLE_/opt-in versus _EXITING/opt-out controls. */
4588#define vmx_adjust_sec_exec_feature(vmx, exec_control, lname, uname) \
4589	vmx_adjust_sec_exec_control(vmx, exec_control, lname, uname, ENABLE_##uname, false)
4590
4591#define vmx_adjust_sec_exec_exiting(vmx, exec_control, lname, uname) \
4592	vmx_adjust_sec_exec_control(vmx, exec_control, lname, uname, uname##_EXITING, true)
4593
4594static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
4595{
4596	struct kvm_vcpu *vcpu = &vmx->vcpu;
4597
4598	u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl;
4599
4600	if (vmx_pt_mode_is_system())
4601		exec_control &= ~(SECONDARY_EXEC_PT_USE_GPA | SECONDARY_EXEC_PT_CONCEAL_VMX);
4602	if (!cpu_need_virtualize_apic_accesses(vcpu))
4603		exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
4604	if (vmx->vpid == 0)
4605		exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
4606	if (!enable_ept) {
4607		exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
4608		enable_unrestricted_guest = 0;
4609	}
4610	if (!enable_unrestricted_guest)
4611		exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
4612	if (kvm_pause_in_guest(vmx->vcpu.kvm))
4613		exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
4614	if (!kvm_vcpu_apicv_active(vcpu))
4615		exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT |
4616				  SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
4617	exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
4618
4619	/*
4620	 * KVM doesn't support VMFUNC for L1, but the control is set in KVM's
4621	 * base configuration as KVM emulates VMFUNC[EPTP_SWITCHING] for L2.
4622	 */
4623	exec_control &= ~SECONDARY_EXEC_ENABLE_VMFUNC;
4624
4625	/* SECONDARY_EXEC_DESC is enabled/disabled on writes to CR4.UMIP,
4626	 * in vmx_set_cr4.  */
4627	exec_control &= ~SECONDARY_EXEC_DESC;
4628
4629	/* SECONDARY_EXEC_SHADOW_VMCS is enabled when L1 executes VMPTRLD
4630	   (handle_vmptrld).
4631	   We can NOT enable shadow_vmcs here because we don't have yet
4632	   a current VMCS12
4633	*/
4634	exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
4635
4636	/*
4637	 * PML is enabled/disabled when dirty logging of memsmlots changes, but
4638	 * it needs to be set here when dirty logging is already active, e.g.
4639	 * if this vCPU was created after dirty logging was enabled.
4640	 */
4641	if (!enable_pml || !atomic_read(&vcpu->kvm->nr_memslots_dirty_logging))
4642		exec_control &= ~SECONDARY_EXEC_ENABLE_PML;
4643
4644	vmx_adjust_sec_exec_feature(vmx, &exec_control, xsaves, XSAVES);
4645
4646	/*
4647	 * RDPID is also gated by ENABLE_RDTSCP, turn on the control if either
4648	 * feature is exposed to the guest.  This creates a virtualization hole
4649	 * if both are supported in hardware but only one is exposed to the
4650	 * guest, but letting the guest execute RDTSCP or RDPID when either one
4651	 * is advertised is preferable to emulating the advertised instruction
4652	 * in KVM on #UD, and obviously better than incorrectly injecting #UD.
4653	 */
4654	if (cpu_has_vmx_rdtscp()) {
4655		bool rdpid_or_rdtscp_enabled =
4656			guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) ||
4657			guest_cpuid_has(vcpu, X86_FEATURE_RDPID);
4658
4659		vmx_adjust_secondary_exec_control(vmx, &exec_control,
4660						  SECONDARY_EXEC_ENABLE_RDTSCP,
4661						  rdpid_or_rdtscp_enabled, false);
4662	}
4663
4664	vmx_adjust_sec_exec_feature(vmx, &exec_control, invpcid, INVPCID);
4665
4666	vmx_adjust_sec_exec_exiting(vmx, &exec_control, rdrand, RDRAND);
4667	vmx_adjust_sec_exec_exiting(vmx, &exec_control, rdseed, RDSEED);
4668
4669	vmx_adjust_sec_exec_control(vmx, &exec_control, waitpkg, WAITPKG,
4670				    ENABLE_USR_WAIT_PAUSE, false);
4671
4672	if (!vcpu->kvm->arch.bus_lock_detection_enabled)
4673		exec_control &= ~SECONDARY_EXEC_BUS_LOCK_DETECTION;
4674
4675	if (!kvm_notify_vmexit_enabled(vcpu->kvm))
4676		exec_control &= ~SECONDARY_EXEC_NOTIFY_VM_EXITING;
4677
4678	return exec_control;
4679}
4680
4681static inline int vmx_get_pid_table_order(struct kvm *kvm)
4682{
4683	return get_order(kvm->arch.max_vcpu_ids * sizeof(*to_kvm_vmx(kvm)->pid_table));
4684}
4685
4686static int vmx_alloc_ipiv_pid_table(struct kvm *kvm)
4687{
4688	struct page *pages;
4689	struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm);
4690
4691	if (!irqchip_in_kernel(kvm) || !enable_ipiv)
4692		return 0;
4693
4694	if (kvm_vmx->pid_table)
4695		return 0;
4696
4697	pages = alloc_pages(GFP_KERNEL_ACCOUNT | __GFP_ZERO,
4698			    vmx_get_pid_table_order(kvm));
4699	if (!pages)
4700		return -ENOMEM;
4701
4702	kvm_vmx->pid_table = (void *)page_address(pages);
4703	return 0;
4704}
4705
4706static int vmx_vcpu_precreate(struct kvm *kvm)
4707{
4708	return vmx_alloc_ipiv_pid_table(kvm);
4709}
4710
4711#define VMX_XSS_EXIT_BITMAP 0
4712
4713static void init_vmcs(struct vcpu_vmx *vmx)
4714{
4715	struct kvm *kvm = vmx->vcpu.kvm;
4716	struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm);
4717
4718	if (nested)
4719		nested_vmx_set_vmcs_shadowing_bitmap();
4720
4721	if (cpu_has_vmx_msr_bitmap())
4722		vmcs_write64(MSR_BITMAP, __pa(vmx->vmcs01.msr_bitmap));
4723
4724	vmcs_write64(VMCS_LINK_POINTER, INVALID_GPA); /* 22.3.1.5 */
4725
4726	/* Control */
4727	pin_controls_set(vmx, vmx_pin_based_exec_ctrl(vmx));
4728
4729	exec_controls_set(vmx, vmx_exec_control(vmx));
4730
4731	if (cpu_has_secondary_exec_ctrls())
4732		secondary_exec_controls_set(vmx, vmx_secondary_exec_control(vmx));
4733
4734	if (cpu_has_tertiary_exec_ctrls())
4735		tertiary_exec_controls_set(vmx, vmx_tertiary_exec_control(vmx));
4736
4737	if (enable_apicv && lapic_in_kernel(&vmx->vcpu)) {
4738		vmcs_write64(EOI_EXIT_BITMAP0, 0);
4739		vmcs_write64(EOI_EXIT_BITMAP1, 0);
4740		vmcs_write64(EOI_EXIT_BITMAP2, 0);
4741		vmcs_write64(EOI_EXIT_BITMAP3, 0);
4742
4743		vmcs_write16(GUEST_INTR_STATUS, 0);
4744
4745		vmcs_write16(POSTED_INTR_NV, POSTED_INTR_VECTOR);
4746		vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->pi_desc)));
4747	}
4748
4749	if (vmx_can_use_ipiv(&vmx->vcpu)) {
4750		vmcs_write64(PID_POINTER_TABLE, __pa(kvm_vmx->pid_table));
4751		vmcs_write16(LAST_PID_POINTER_INDEX, kvm->arch.max_vcpu_ids - 1);
4752	}
4753
4754	if (!kvm_pause_in_guest(kvm)) {
4755		vmcs_write32(PLE_GAP, ple_gap);
4756		vmx->ple_window = ple_window;
4757		vmx->ple_window_dirty = true;
4758	}
4759
4760	if (kvm_notify_vmexit_enabled(kvm))
4761		vmcs_write32(NOTIFY_WINDOW, kvm->arch.notify_window);
4762
4763	vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
4764	vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
4765	vmcs_write32(CR3_TARGET_COUNT, 0);           /* 22.2.1 */
4766
4767	vmcs_write16(HOST_FS_SELECTOR, 0);            /* 22.2.4 */
4768	vmcs_write16(HOST_GS_SELECTOR, 0);            /* 22.2.4 */
4769	vmx_set_constant_host_state(vmx);
4770	vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */
4771	vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */
4772
4773	if (cpu_has_vmx_vmfunc())
4774		vmcs_write64(VM_FUNCTION_CONTROL, 0);
4775
4776	vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
4777	vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
4778	vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
4779	vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
4780	vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
4781
4782	if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
4783		vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
4784
4785	vm_exit_controls_set(vmx, vmx_vmexit_ctrl());
4786
4787	/* 22.2.1, 20.8.1 */
4788	vm_entry_controls_set(vmx, vmx_vmentry_ctrl());
4789
4790	vmx->vcpu.arch.cr0_guest_owned_bits = vmx_l1_guest_owned_cr0_bits();
4791	vmcs_writel(CR0_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr0_guest_owned_bits);
4792
4793	set_cr4_guest_host_mask(vmx);
4794
4795	if (vmx->vpid != 0)
4796		vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
4797
4798	if (cpu_has_vmx_xsaves())
4799		vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP);
4800
4801	if (enable_pml) {
4802		vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
4803		vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
4804	}
4805
4806	vmx_write_encls_bitmap(&vmx->vcpu, NULL);
4807
4808	if (vmx_pt_mode_is_host_guest()) {
4809		memset(&vmx->pt_desc, 0, sizeof(vmx->pt_desc));
4810		/* Bit[6~0] are forced to 1, writes are ignored. */
4811		vmx->pt_desc.guest.output_mask = 0x7F;
4812		vmcs_write64(GUEST_IA32_RTIT_CTL, 0);
4813	}
4814
4815	vmcs_write32(GUEST_SYSENTER_CS, 0);
4816	vmcs_writel(GUEST_SYSENTER_ESP, 0);
4817	vmcs_writel(GUEST_SYSENTER_EIP, 0);
4818	vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
4819
4820	if (cpu_has_vmx_tpr_shadow()) {
4821		vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
4822		if (cpu_need_tpr_shadow(&vmx->vcpu))
4823			vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
4824				     __pa(vmx->vcpu.arch.apic->regs));
4825		vmcs_write32(TPR_THRESHOLD, 0);
4826	}
4827
4828	vmx_setup_uret_msrs(vmx);
4829}
4830
4831static void __vmx_vcpu_reset(struct kvm_vcpu *vcpu)
4832{
4833	struct vcpu_vmx *vmx = to_vmx(vcpu);
4834
4835	init_vmcs(vmx);
4836
4837	if (nested)
4838		memcpy(&vmx->nested.msrs, &vmcs_config.nested, sizeof(vmx->nested.msrs));
4839
4840	vcpu_setup_sgx_lepubkeyhash(vcpu);
4841
4842	vmx->nested.posted_intr_nv = -1;
4843	vmx->nested.vmxon_ptr = INVALID_GPA;
4844	vmx->nested.current_vmptr = INVALID_GPA;
4845	vmx->nested.hv_evmcs_vmptr = EVMPTR_INVALID;
4846
4847	vcpu->arch.microcode_version = 0x100000000ULL;
4848	vmx->msr_ia32_feature_control_valid_bits = FEAT_CTL_LOCKED;
4849
4850	/*
4851	 * Enforce invariant: pi_desc.nv is always either POSTED_INTR_VECTOR
4852	 * or POSTED_INTR_WAKEUP_VECTOR.
4853	 */
4854	vmx->pi_desc.nv = POSTED_INTR_VECTOR;
4855	vmx->pi_desc.sn = 1;
4856}
4857
4858static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
4859{
4860	struct vcpu_vmx *vmx = to_vmx(vcpu);
4861
4862	if (!init_event)
4863		__vmx_vcpu_reset(vcpu);
4864
4865	vmx->rmode.vm86_active = 0;
4866	vmx->spec_ctrl = 0;
4867
4868	vmx->msr_ia32_umwait_control = 0;
4869
4870	vmx->hv_deadline_tsc = -1;
4871	kvm_set_cr8(vcpu, 0);
4872
4873	vmx_segment_cache_clear(vmx);
4874	kvm_register_mark_available(vcpu, VCPU_EXREG_SEGMENTS);
4875
4876	seg_setup(VCPU_SREG_CS);
4877	vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
4878	vmcs_writel(GUEST_CS_BASE, 0xffff0000ul);
4879
4880	seg_setup(VCPU_SREG_DS);
4881	seg_setup(VCPU_SREG_ES);
4882	seg_setup(VCPU_SREG_FS);
4883	seg_setup(VCPU_SREG_GS);
4884	seg_setup(VCPU_SREG_SS);
4885
4886	vmcs_write16(GUEST_TR_SELECTOR, 0);
4887	vmcs_writel(GUEST_TR_BASE, 0);
4888	vmcs_write32(GUEST_TR_LIMIT, 0xffff);
4889	vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
4890
4891	vmcs_write16(GUEST_LDTR_SELECTOR, 0);
4892	vmcs_writel(GUEST_LDTR_BASE, 0);
4893	vmcs_write32(GUEST_LDTR_LIMIT, 0xffff);
4894	vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082);
4895
4896	vmcs_writel(GUEST_GDTR_BASE, 0);
4897	vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
4898
4899	vmcs_writel(GUEST_IDTR_BASE, 0);
4900	vmcs_write32(GUEST_IDTR_LIMIT, 0xffff);
4901
4902	vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
4903	vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
4904	vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, 0);
4905	if (kvm_mpx_supported())
4906		vmcs_write64(GUEST_BNDCFGS, 0);
4907
4908	vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);  /* 22.2.1 */
4909
4910	kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
4911
4912	vpid_sync_context(vmx->vpid);
4913
4914	vmx_update_fb_clear_dis(vcpu, vmx);
4915}
4916
4917static void vmx_enable_irq_window(struct kvm_vcpu *vcpu)
4918{
4919	exec_controls_setbit(to_vmx(vcpu), CPU_BASED_INTR_WINDOW_EXITING);
4920}
4921
4922static void vmx_enable_nmi_window(struct kvm_vcpu *vcpu)
4923{
4924	if (!enable_vnmi ||
4925	    vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
4926		vmx_enable_irq_window(vcpu);
4927		return;
4928	}
4929
4930	exec_controls_setbit(to_vmx(vcpu), CPU_BASED_NMI_WINDOW_EXITING);
4931}
4932
4933static void vmx_inject_irq(struct kvm_vcpu *vcpu, bool reinjected)
4934{
4935	struct vcpu_vmx *vmx = to_vmx(vcpu);
4936	uint32_t intr;
4937	int irq = vcpu->arch.interrupt.nr;
4938
4939	trace_kvm_inj_virq(irq, vcpu->arch.interrupt.soft, reinjected);
4940
4941	++vcpu->stat.irq_injections;
4942	if (vmx->rmode.vm86_active) {
4943		int inc_eip = 0;
4944		if (vcpu->arch.interrupt.soft)
4945			inc_eip = vcpu->arch.event_exit_inst_len;
4946		kvm_inject_realmode_interrupt(vcpu, irq, inc_eip);
4947		return;
4948	}
4949	intr = irq | INTR_INFO_VALID_MASK;
4950	if (vcpu->arch.interrupt.soft) {
4951		intr |= INTR_TYPE_SOFT_INTR;
4952		vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
4953			     vmx->vcpu.arch.event_exit_inst_len);
4954	} else
4955		intr |= INTR_TYPE_EXT_INTR;
4956	vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr);
4957
4958	vmx_clear_hlt(vcpu);
4959}
4960
4961static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
4962{
4963	struct vcpu_vmx *vmx = to_vmx(vcpu);
4964
4965	if (!enable_vnmi) {
4966		/*
4967		 * Tracking the NMI-blocked state in software is built upon
4968		 * finding the next open IRQ window. This, in turn, depends on
4969		 * well-behaving guests: They have to keep IRQs disabled at
4970		 * least as long as the NMI handler runs. Otherwise we may
4971		 * cause NMI nesting, maybe breaking the guest. But as this is
4972		 * highly unlikely, we can live with the residual risk.
4973		 */
4974		vmx->loaded_vmcs->soft_vnmi_blocked = 1;
4975		vmx->loaded_vmcs->vnmi_blocked_time = 0;
4976	}
4977
4978	++vcpu->stat.nmi_injections;
4979	vmx->loaded_vmcs->nmi_known_unmasked = false;
4980
4981	if (vmx->rmode.vm86_active) {
4982		kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0);
4983		return;
4984	}
4985
4986	vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
4987			INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR);
4988
4989	vmx_clear_hlt(vcpu);
4990}
4991
4992bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
4993{
4994	struct vcpu_vmx *vmx = to_vmx(vcpu);
4995	bool masked;
4996
4997	if (!enable_vnmi)
4998		return vmx->loaded_vmcs->soft_vnmi_blocked;
4999	if (vmx->loaded_vmcs->nmi_known_unmasked)
5000		return false;
5001	masked = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI;
5002	vmx->loaded_vmcs->nmi_known_unmasked = !masked;
5003	return masked;
5004}
5005
5006void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
5007{
5008	struct vcpu_vmx *vmx = to_vmx(vcpu);
5009
5010	if (!enable_vnmi) {
5011		if (vmx->loaded_vmcs->soft_vnmi_blocked != masked) {
5012			vmx->loaded_vmcs->soft_vnmi_blocked = masked;
5013			vmx->loaded_vmcs->vnmi_blocked_time = 0;
5014		}
5015	} else {
5016		vmx->loaded_vmcs->nmi_known_unmasked = !masked;
5017		if (masked)
5018			vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
5019				      GUEST_INTR_STATE_NMI);
5020		else
5021			vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
5022					GUEST_INTR_STATE_NMI);
5023	}
5024}
5025
5026bool vmx_nmi_blocked(struct kvm_vcpu *vcpu)
5027{
5028	if (is_guest_mode(vcpu) && nested_exit_on_nmi(vcpu))
5029		return false;
5030
5031	if (!enable_vnmi && to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked)
5032		return true;
5033
5034	return (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
5035		(GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI |
5036		 GUEST_INTR_STATE_NMI));
5037}
5038
5039static int vmx_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
5040{
5041	if (to_vmx(vcpu)->nested.nested_run_pending)
5042		return -EBUSY;
5043
5044	/* An NMI must not be injected into L2 if it's supposed to VM-Exit.  */
5045	if (for_injection && is_guest_mode(vcpu) && nested_exit_on_nmi(vcpu))
5046		return -EBUSY;
5047
5048	return !vmx_nmi_blocked(vcpu);
5049}
5050
5051bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu)
5052{
5053	if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
5054		return false;
5055
5056	return !(vmx_get_rflags(vcpu) & X86_EFLAGS_IF) ||
5057	       (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
5058		(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
5059}
5060
5061static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
5062{
5063	if (to_vmx(vcpu)->nested.nested_run_pending)
5064		return -EBUSY;
5065
5066	/*
5067	 * An IRQ must not be injected into L2 if it's supposed to VM-Exit,
5068	 * e.g. if the IRQ arrived asynchronously after checking nested events.
5069	 */
5070	if (for_injection && is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
5071		return -EBUSY;
5072
5073	return !vmx_interrupt_blocked(vcpu);
5074}
5075
5076static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
5077{
5078	void __user *ret;
5079
5080	if (enable_unrestricted_guest)
5081		return 0;
5082
5083	mutex_lock(&kvm->slots_lock);
5084	ret = __x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, addr,
5085				      PAGE_SIZE * 3);
5086	mutex_unlock(&kvm->slots_lock);
5087
5088	if (IS_ERR(ret))
5089		return PTR_ERR(ret);
5090
5091	to_kvm_vmx(kvm)->tss_addr = addr;
5092
5093	return init_rmode_tss(kvm, ret);
5094}
5095
5096static int vmx_set_identity_map_addr(struct kvm *kvm, u64 ident_addr)
5097{
5098	to_kvm_vmx(kvm)->ept_identity_map_addr = ident_addr;
5099	return 0;
5100}
5101
5102static bool rmode_exception(struct kvm_vcpu *vcpu, int vec)
5103{
5104	switch (vec) {
5105	case BP_VECTOR:
5106		/*
5107		 * Update instruction length as we may reinject the exception
5108		 * from user space while in guest debugging mode.
5109		 */
5110		to_vmx(vcpu)->vcpu.arch.event_exit_inst_len =
5111			vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
5112		if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
5113			return false;
5114		fallthrough;
5115	case DB_VECTOR:
5116		return !(vcpu->guest_debug &
5117			(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP));
5118	case DE_VECTOR:
5119	case OF_VECTOR:
5120	case BR_VECTOR:
5121	case UD_VECTOR:
5122	case DF_VECTOR:
5123	case SS_VECTOR:
5124	case GP_VECTOR:
5125	case MF_VECTOR:
5126		return true;
5127	}
5128	return false;
5129}
5130
5131static int handle_rmode_exception(struct kvm_vcpu *vcpu,
5132				  int vec, u32 err_code)
5133{
5134	/*
5135	 * Instruction with address size override prefix opcode 0x67
5136	 * Cause the #SS fault with 0 error code in VM86 mode.
5137	 */
5138	if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) {
5139		if (kvm_emulate_instruction(vcpu, 0)) {
5140			if (vcpu->arch.halt_request) {
5141				vcpu->arch.halt_request = 0;
5142				return kvm_emulate_halt_noskip(vcpu);
5143			}
5144			return 1;
5145		}
5146		return 0;
5147	}
5148
5149	/*
5150	 * Forward all other exceptions that are valid in real mode.
5151	 * FIXME: Breaks guest debugging in real mode, needs to be fixed with
5152	 *        the required debugging infrastructure rework.
5153	 */
5154	kvm_queue_exception(vcpu, vec);
5155	return 1;
5156}
5157
5158static int handle_machine_check(struct kvm_vcpu *vcpu)
5159{
5160	/* handled by vmx_vcpu_run() */
5161	return 1;
5162}
5163
5164/*
5165 * If the host has split lock detection disabled, then #AC is
5166 * unconditionally injected into the guest, which is the pre split lock
5167 * detection behaviour.
5168 *
5169 * If the host has split lock detection enabled then #AC is
5170 * only injected into the guest when:
5171 *  - Guest CPL == 3 (user mode)
5172 *  - Guest has #AC detection enabled in CR0
5173 *  - Guest EFLAGS has AC bit set
5174 */
5175bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu)
5176{
5177	if (!boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
5178		return true;
5179
5180	return vmx_get_cpl(vcpu) == 3 && kvm_is_cr0_bit_set(vcpu, X86_CR0_AM) &&
5181	       (kvm_get_rflags(vcpu) & X86_EFLAGS_AC);
5182}
5183
5184static int handle_exception_nmi(struct kvm_vcpu *vcpu)
5185{
5186	struct vcpu_vmx *vmx = to_vmx(vcpu);
5187	struct kvm_run *kvm_run = vcpu->run;
5188	u32 intr_info, ex_no, error_code;
5189	unsigned long cr2, dr6;
5190	u32 vect_info;
5191
5192	vect_info = vmx->idt_vectoring_info;
5193	intr_info = vmx_get_intr_info(vcpu);
5194
5195	/*
5196	 * Machine checks are handled by handle_exception_irqoff(), or by
5197	 * vmx_vcpu_run() if a #MC occurs on VM-Entry.  NMIs are handled by
5198	 * vmx_vcpu_enter_exit().
5199	 */
5200	if (is_machine_check(intr_info) || is_nmi(intr_info))
5201		return 1;
5202
5203	/*
5204	 * Queue the exception here instead of in handle_nm_fault_irqoff().
5205	 * This ensures the nested_vmx check is not skipped so vmexit can
5206	 * be reflected to L1 (when it intercepts #NM) before reaching this
5207	 * point.
5208	 */
5209	if (is_nm_fault(intr_info)) {
5210		kvm_queue_exception(vcpu, NM_VECTOR);
5211		return 1;
5212	}
5213
5214	if (is_invalid_opcode(intr_info))
5215		return handle_ud(vcpu);
5216
5217	error_code = 0;
5218	if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
5219		error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
5220
5221	if (!vmx->rmode.vm86_active && is_gp_fault(intr_info)) {
5222		WARN_ON_ONCE(!enable_vmware_backdoor);
5223
5224		/*
5225		 * VMware backdoor emulation on #GP interception only handles
5226		 * IN{S}, OUT{S}, and RDPMC, none of which generate a non-zero
5227		 * error code on #GP.
5228		 */
5229		if (error_code) {
5230			kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
5231			return 1;
5232		}
5233		return kvm_emulate_instruction(vcpu, EMULTYPE_VMWARE_GP);
5234	}
5235
5236	/*
5237	 * The #PF with PFEC.RSVD = 1 indicates the guest is accessing
5238	 * MMIO, it is better to report an internal error.
5239	 * See the comments in vmx_handle_exit.
5240	 */
5241	if ((vect_info & VECTORING_INFO_VALID_MASK) &&
5242	    !(is_page_fault(intr_info) && !(error_code & PFERR_RSVD_MASK))) {
5243		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
5244		vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX;
5245		vcpu->run->internal.ndata = 4;
5246		vcpu->run->internal.data[0] = vect_info;
5247		vcpu->run->internal.data[1] = intr_info;
5248		vcpu->run->internal.data[2] = error_code;
5249		vcpu->run->internal.data[3] = vcpu->arch.last_vmentry_cpu;
5250		return 0;
5251	}
5252
5253	if (is_page_fault(intr_info)) {
5254		cr2 = vmx_get_exit_qual(vcpu);
5255		if (enable_ept && !vcpu->arch.apf.host_apf_flags) {
5256			/*
5257			 * EPT will cause page fault only if we need to
5258			 * detect illegal GPAs.
5259			 */
5260			WARN_ON_ONCE(!allow_smaller_maxphyaddr);
5261			kvm_fixup_and_inject_pf_error(vcpu, cr2, error_code);
5262			return 1;
5263		} else
5264			return kvm_handle_page_fault(vcpu, error_code, cr2, NULL, 0);
5265	}
5266
5267	ex_no = intr_info & INTR_INFO_VECTOR_MASK;
5268
5269	if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no))
5270		return handle_rmode_exception(vcpu, ex_no, error_code);
5271
5272	switch (ex_no) {
5273	case DB_VECTOR:
5274		dr6 = vmx_get_exit_qual(vcpu);
5275		if (!(vcpu->guest_debug &
5276		      (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
5277			/*
5278			 * If the #DB was due to ICEBP, a.k.a. INT1, skip the
5279			 * instruction.  ICEBP generates a trap-like #DB, but
5280			 * despite its interception control being tied to #DB,
5281			 * is an instruction intercept, i.e. the VM-Exit occurs
5282			 * on the ICEBP itself.  Use the inner "skip" helper to
5283			 * avoid single-step #DB and MTF updates, as ICEBP is
5284			 * higher priority.  Note, skipping ICEBP still clears
5285			 * STI and MOVSS blocking.
5286			 *
5287			 * For all other #DBs, set vmcs.PENDING_DBG_EXCEPTIONS.BS
5288			 * if single-step is enabled in RFLAGS and STI or MOVSS
5289			 * blocking is active, as the CPU doesn't set the bit
5290			 * on VM-Exit due to #DB interception.  VM-Entry has a
5291			 * consistency check that a single-step #DB is pending
5292			 * in this scenario as the previous instruction cannot
5293			 * have toggled RFLAGS.TF 0=>1 (because STI and POP/MOV
5294			 * don't modify RFLAGS), therefore the one instruction
5295			 * delay when activating single-step breakpoints must
5296			 * have already expired.  Note, the CPU sets/clears BS
5297			 * as appropriate for all other VM-Exits types.
5298			 */
5299			if (is_icebp(intr_info))
5300				WARN_ON(!skip_emulated_instruction(vcpu));
5301			else if ((vmx_get_rflags(vcpu) & X86_EFLAGS_TF) &&
5302				 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
5303				  (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS)))
5304				vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
5305					    vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS) | DR6_BS);
5306
5307			kvm_queue_exception_p(vcpu, DB_VECTOR, dr6);
5308			return 1;
5309		}
5310		kvm_run->debug.arch.dr6 = dr6 | DR6_ACTIVE_LOW;
5311		kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
5312		fallthrough;
5313	case BP_VECTOR:
5314		/*
5315		 * Update instruction length as we may reinject #BP from
5316		 * user space while in guest debugging mode. Reading it for
5317		 * #DB as well causes no harm, it is not used in that case.
5318		 */
5319		vmx->vcpu.arch.event_exit_inst_len =
5320			vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
5321		kvm_run->exit_reason = KVM_EXIT_DEBUG;
5322		kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu);
5323		kvm_run->debug.arch.exception = ex_no;
5324		break;
5325	case AC_VECTOR:
5326		if (vmx_guest_inject_ac(vcpu)) {
5327			kvm_queue_exception_e(vcpu, AC_VECTOR, error_code);
5328			return 1;
5329		}
5330
5331		/*
5332		 * Handle split lock. Depending on detection mode this will
5333		 * either warn and disable split lock detection for this
5334		 * task or force SIGBUS on it.
5335		 */
5336		if (handle_guest_split_lock(kvm_rip_read(vcpu)))
5337			return 1;
5338		fallthrough;
5339	default:
5340		kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
5341		kvm_run->ex.exception = ex_no;
5342		kvm_run->ex.error_code = error_code;
5343		break;
5344	}
5345	return 0;
5346}
5347
5348static __always_inline int handle_external_interrupt(struct kvm_vcpu *vcpu)
5349{
5350	++vcpu->stat.irq_exits;
5351	return 1;
5352}
5353
5354static int handle_triple_fault(struct kvm_vcpu *vcpu)
5355{
5356	vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
5357	vcpu->mmio_needed = 0;
5358	return 0;
5359}
5360
5361static int handle_io(struct kvm_vcpu *vcpu)
5362{
5363	unsigned long exit_qualification;
5364	int size, in, string;
5365	unsigned port;
5366
5367	exit_qualification = vmx_get_exit_qual(vcpu);
5368	string = (exit_qualification & 16) != 0;
5369
5370	++vcpu->stat.io_exits;
5371
5372	if (string)
5373		return kvm_emulate_instruction(vcpu, 0);
5374
5375	port = exit_qualification >> 16;
5376	size = (exit_qualification & 7) + 1;
5377	in = (exit_qualification & 8) != 0;
5378
5379	return kvm_fast_pio(vcpu, size, port, in);
5380}
5381
5382static void
5383vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
5384{
5385	/*
5386	 * Patch in the VMCALL instruction:
5387	 */
5388	hypercall[0] = 0x0f;
5389	hypercall[1] = 0x01;
5390	hypercall[2] = 0xc1;
5391}
5392
5393/* called to set cr0 as appropriate for a mov-to-cr0 exit. */
5394static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
5395{
5396	if (is_guest_mode(vcpu)) {
5397		struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
5398		unsigned long orig_val = val;
5399
5400		/*
5401		 * We get here when L2 changed cr0 in a way that did not change
5402		 * any of L1's shadowed bits (see nested_vmx_exit_handled_cr),
5403		 * but did change L0 shadowed bits. So we first calculate the
5404		 * effective cr0 value that L1 would like to write into the
5405		 * hardware. It consists of the L2-owned bits from the new
5406		 * value combined with the L1-owned bits from L1's guest_cr0.
5407		 */
5408		val = (val & ~vmcs12->cr0_guest_host_mask) |
5409			(vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask);
5410
5411		if (kvm_set_cr0(vcpu, val))
5412			return 1;
5413		vmcs_writel(CR0_READ_SHADOW, orig_val);
5414		return 0;
5415	} else {
5416		return kvm_set_cr0(vcpu, val);
5417	}
5418}
5419
5420static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val)
5421{
5422	if (is_guest_mode(vcpu)) {
5423		struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
5424		unsigned long orig_val = val;
5425
5426		/* analogously to handle_set_cr0 */
5427		val = (val & ~vmcs12->cr4_guest_host_mask) |
5428			(vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask);
5429		if (kvm_set_cr4(vcpu, val))
5430			return 1;
5431		vmcs_writel(CR4_READ_SHADOW, orig_val);
5432		return 0;
5433	} else
5434		return kvm_set_cr4(vcpu, val);
5435}
5436
5437static int handle_desc(struct kvm_vcpu *vcpu)
5438{
5439	/*
5440	 * UMIP emulation relies on intercepting writes to CR4.UMIP, i.e. this
5441	 * and other code needs to be updated if UMIP can be guest owned.
5442	 */
5443	BUILD_BUG_ON(KVM_POSSIBLE_CR4_GUEST_BITS & X86_CR4_UMIP);
5444
5445	WARN_ON_ONCE(!kvm_is_cr4_bit_set(vcpu, X86_CR4_UMIP));
5446	return kvm_emulate_instruction(vcpu, 0);
5447}
5448
5449static int handle_cr(struct kvm_vcpu *vcpu)
5450{
5451	unsigned long exit_qualification, val;
5452	int cr;
5453	int reg;
5454	int err;
5455	int ret;
5456
5457	exit_qualification = vmx_get_exit_qual(vcpu);
5458	cr = exit_qualification & 15;
5459	reg = (exit_qualification >> 8) & 15;
5460	switch ((exit_qualification >> 4) & 3) {
5461	case 0: /* mov to cr */
5462		val = kvm_register_read(vcpu, reg);
5463		trace_kvm_cr_write(cr, val);
5464		switch (cr) {
5465		case 0:
5466			err = handle_set_cr0(vcpu, val);
5467			return kvm_complete_insn_gp(vcpu, err);
5468		case 3:
5469			WARN_ON_ONCE(enable_unrestricted_guest);
5470
5471			err = kvm_set_cr3(vcpu, val);
5472			return kvm_complete_insn_gp(vcpu, err);
5473		case 4:
5474			err = handle_set_cr4(vcpu, val);
5475			return kvm_complete_insn_gp(vcpu, err);
5476		case 8: {
5477				u8 cr8_prev = kvm_get_cr8(vcpu);
5478				u8 cr8 = (u8)val;
5479				err = kvm_set_cr8(vcpu, cr8);
5480				ret = kvm_complete_insn_gp(vcpu, err);
5481				if (lapic_in_kernel(vcpu))
5482					return ret;
5483				if (cr8_prev <= cr8)
5484					return ret;
5485				/*
5486				 * TODO: we might be squashing a
5487				 * KVM_GUESTDBG_SINGLESTEP-triggered
5488				 * KVM_EXIT_DEBUG here.
5489				 */
5490				vcpu->run->exit_reason = KVM_EXIT_SET_TPR;
5491				return 0;
5492			}
5493		}
5494		break;
5495	case 2: /* clts */
5496		KVM_BUG(1, vcpu->kvm, "Guest always owns CR0.TS");
5497		return -EIO;
5498	case 1: /*mov from cr*/
5499		switch (cr) {
5500		case 3:
5501			WARN_ON_ONCE(enable_unrestricted_guest);
5502
5503			val = kvm_read_cr3(vcpu);
5504			kvm_register_write(vcpu, reg, val);
5505			trace_kvm_cr_read(cr, val);
5506			return kvm_skip_emulated_instruction(vcpu);
5507		case 8:
5508			val = kvm_get_cr8(vcpu);
5509			kvm_register_write(vcpu, reg, val);
5510			trace_kvm_cr_read(cr, val);
5511			return kvm_skip_emulated_instruction(vcpu);
5512		}
5513		break;
5514	case 3: /* lmsw */
5515		val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
5516		trace_kvm_cr_write(0, (kvm_read_cr0_bits(vcpu, ~0xful) | val));
5517		kvm_lmsw(vcpu, val);
5518
5519		return kvm_skip_emulated_instruction(vcpu);
5520	default:
5521		break;
5522	}
5523	vcpu->run->exit_reason = 0;
5524	vcpu_unimpl(vcpu, "unhandled control register: op %d cr %d\n",
5525	       (int)(exit_qualification >> 4) & 3, cr);
5526	return 0;
5527}
5528
5529static int handle_dr(struct kvm_vcpu *vcpu)
5530{
5531	unsigned long exit_qualification;
5532	int dr, dr7, reg;
5533	int err = 1;
5534
5535	exit_qualification = vmx_get_exit_qual(vcpu);
5536	dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
5537
5538	/* First, if DR does not exist, trigger UD */
5539	if (!kvm_require_dr(vcpu, dr))
5540		return 1;
5541
5542	if (vmx_get_cpl(vcpu) > 0)
5543		goto out;
5544
5545	dr7 = vmcs_readl(GUEST_DR7);
5546	if (dr7 & DR7_GD) {
5547		/*
5548		 * As the vm-exit takes precedence over the debug trap, we
5549		 * need to emulate the latter, either for the host or the
5550		 * guest debugging itself.
5551		 */
5552		if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
5553			vcpu->run->debug.arch.dr6 = DR6_BD | DR6_ACTIVE_LOW;
5554			vcpu->run->debug.arch.dr7 = dr7;
5555			vcpu->run->debug.arch.pc = kvm_get_linear_rip(vcpu);
5556			vcpu->run->debug.arch.exception = DB_VECTOR;
5557			vcpu->run->exit_reason = KVM_EXIT_DEBUG;
5558			return 0;
5559		} else {
5560			kvm_queue_exception_p(vcpu, DB_VECTOR, DR6_BD);
5561			return 1;
5562		}
5563	}
5564
5565	if (vcpu->guest_debug == 0) {
5566		exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_MOV_DR_EXITING);
5567
5568		/*
5569		 * No more DR vmexits; force a reload of the debug registers
5570		 * and reenter on this instruction.  The next vmexit will
5571		 * retrieve the full state of the debug registers.
5572		 */
5573		vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
5574		return 1;
5575	}
5576
5577	reg = DEBUG_REG_ACCESS_REG(exit_qualification);
5578	if (exit_qualification & TYPE_MOV_FROM_DR) {
5579		unsigned long val;
5580
5581		kvm_get_dr(vcpu, dr, &val);
5582		kvm_register_write(vcpu, reg, val);
5583		err = 0;
5584	} else {
5585		err = kvm_set_dr(vcpu, dr, kvm_register_read(vcpu, reg));
5586	}
5587
5588out:
5589	return kvm_complete_insn_gp(vcpu, err);
5590}
5591
5592static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
5593{
5594	get_debugreg(vcpu->arch.db[0], 0);
5595	get_debugreg(vcpu->arch.db[1], 1);
5596	get_debugreg(vcpu->arch.db[2], 2);
5597	get_debugreg(vcpu->arch.db[3], 3);
5598	get_debugreg(vcpu->arch.dr6, 6);
5599	vcpu->arch.dr7 = vmcs_readl(GUEST_DR7);
5600
5601	vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
5602	exec_controls_setbit(to_vmx(vcpu), CPU_BASED_MOV_DR_EXITING);
5603
5604	/*
5605	 * exc_debug expects dr6 to be cleared after it runs, avoid that it sees
5606	 * a stale dr6 from the guest.
5607	 */
5608	set_debugreg(DR6_RESERVED, 6);
5609}
5610
5611static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
5612{
5613	vmcs_writel(GUEST_DR7, val);
5614}
5615
5616static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu)
5617{
5618	kvm_apic_update_ppr(vcpu);
5619	return 1;
5620}
5621
5622static int handle_interrupt_window(struct kvm_vcpu *vcpu)
5623{
5624	exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_INTR_WINDOW_EXITING);
5625
5626	kvm_make_request(KVM_REQ_EVENT, vcpu);
5627
5628	++vcpu->stat.irq_window_exits;
5629	return 1;
5630}
5631
5632static int handle_invlpg(struct kvm_vcpu *vcpu)
5633{
5634	unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
5635
5636	kvm_mmu_invlpg(vcpu, exit_qualification);
5637	return kvm_skip_emulated_instruction(vcpu);
5638}
5639
5640static int handle_apic_access(struct kvm_vcpu *vcpu)
5641{
5642	if (likely(fasteoi)) {
5643		unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
5644		int access_type, offset;
5645
5646		access_type = exit_qualification & APIC_ACCESS_TYPE;
5647		offset = exit_qualification & APIC_ACCESS_OFFSET;
5648		/*
5649		 * Sane guest uses MOV to write EOI, with written value
5650		 * not cared. So make a short-circuit here by avoiding
5651		 * heavy instruction emulation.
5652		 */
5653		if ((access_type == TYPE_LINEAR_APIC_INST_WRITE) &&
5654		    (offset == APIC_EOI)) {
5655			kvm_lapic_set_eoi(vcpu);
5656			return kvm_skip_emulated_instruction(vcpu);
5657		}
5658	}
5659	return kvm_emulate_instruction(vcpu, 0);
5660}
5661
5662static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu)
5663{
5664	unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
5665	int vector = exit_qualification & 0xff;
5666
5667	/* EOI-induced VM exit is trap-like and thus no need to adjust IP */
5668	kvm_apic_set_eoi_accelerated(vcpu, vector);
5669	return 1;
5670}
5671
5672static int handle_apic_write(struct kvm_vcpu *vcpu)
5673{
5674	unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
5675
5676	/*
5677	 * APIC-write VM-Exit is trap-like, KVM doesn't need to advance RIP and
5678	 * hardware has done any necessary aliasing, offset adjustments, etc...
5679	 * for the access.  I.e. the correct value has already been  written to
5680	 * the vAPIC page for the correct 16-byte chunk.  KVM needs only to
5681	 * retrieve the register value and emulate the access.
5682	 */
5683	u32 offset = exit_qualification & 0xff0;
5684
5685	kvm_apic_write_nodecode(vcpu, offset);
5686	return 1;
5687}
5688
5689static int handle_task_switch(struct kvm_vcpu *vcpu)
5690{
5691	struct vcpu_vmx *vmx = to_vmx(vcpu);
5692	unsigned long exit_qualification;
5693	bool has_error_code = false;
5694	u32 error_code = 0;
5695	u16 tss_selector;
5696	int reason, type, idt_v, idt_index;
5697
5698	idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK);
5699	idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK);
5700	type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK);
5701
5702	exit_qualification = vmx_get_exit_qual(vcpu);
5703
5704	reason = (u32)exit_qualification >> 30;
5705	if (reason == TASK_SWITCH_GATE && idt_v) {
5706		switch (type) {
5707		case INTR_TYPE_NMI_INTR:
5708			vcpu->arch.nmi_injected = false;
5709			vmx_set_nmi_mask(vcpu, true);
5710			break;
5711		case INTR_TYPE_EXT_INTR:
5712		case INTR_TYPE_SOFT_INTR:
5713			kvm_clear_interrupt_queue(vcpu);
5714			break;
5715		case INTR_TYPE_HARD_EXCEPTION:
5716			if (vmx->idt_vectoring_info &
5717			    VECTORING_INFO_DELIVER_CODE_MASK) {
5718				has_error_code = true;
5719				error_code =
5720					vmcs_read32(IDT_VECTORING_ERROR_CODE);
5721			}
5722			fallthrough;
5723		case INTR_TYPE_SOFT_EXCEPTION:
5724			kvm_clear_exception_queue(vcpu);
5725			break;
5726		default:
5727			break;
5728		}
5729	}
5730	tss_selector = exit_qualification;
5731
5732	if (!idt_v || (type != INTR_TYPE_HARD_EXCEPTION &&
5733		       type != INTR_TYPE_EXT_INTR &&
5734		       type != INTR_TYPE_NMI_INTR))
5735		WARN_ON(!skip_emulated_instruction(vcpu));
5736
5737	/*
5738	 * TODO: What about debug traps on tss switch?
5739	 *       Are we supposed to inject them and update dr6?
5740	 */
5741	return kvm_task_switch(vcpu, tss_selector,
5742			       type == INTR_TYPE_SOFT_INTR ? idt_index : -1,
5743			       reason, has_error_code, error_code);
5744}
5745
5746static int handle_ept_violation(struct kvm_vcpu *vcpu)
5747{
5748	unsigned long exit_qualification;
5749	gpa_t gpa;
5750	u64 error_code;
5751
5752	exit_qualification = vmx_get_exit_qual(vcpu);
5753
5754	/*
5755	 * EPT violation happened while executing iret from NMI,
5756	 * "blocked by NMI" bit has to be set before next VM entry.
5757	 * There are errata that may cause this bit to not be set:
5758	 * AAK134, BY25.
5759	 */
5760	if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
5761			enable_vnmi &&
5762			(exit_qualification & INTR_INFO_UNBLOCK_NMI))
5763		vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI);
5764
5765	gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
5766	trace_kvm_page_fault(vcpu, gpa, exit_qualification);
5767
5768	/* Is it a read fault? */
5769	error_code = (exit_qualification & EPT_VIOLATION_ACC_READ)
5770		     ? PFERR_USER_MASK : 0;
5771	/* Is it a write fault? */
5772	error_code |= (exit_qualification & EPT_VIOLATION_ACC_WRITE)
5773		      ? PFERR_WRITE_MASK : 0;
5774	/* Is it a fetch fault? */
5775	error_code |= (exit_qualification & EPT_VIOLATION_ACC_INSTR)
5776		      ? PFERR_FETCH_MASK : 0;
5777	/* ept page table entry is present? */
5778	error_code |= (exit_qualification & EPT_VIOLATION_RWX_MASK)
5779		      ? PFERR_PRESENT_MASK : 0;
5780
5781	error_code |= (exit_qualification & EPT_VIOLATION_GVA_TRANSLATED) != 0 ?
5782	       PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK;
5783
5784	vcpu->arch.exit_qualification = exit_qualification;
5785
5786	/*
5787	 * Check that the GPA doesn't exceed physical memory limits, as that is
5788	 * a guest page fault.  We have to emulate the instruction here, because
5789	 * if the illegal address is that of a paging structure, then
5790	 * EPT_VIOLATION_ACC_WRITE bit is set.  Alternatively, if supported we
5791	 * would also use advanced VM-exit information for EPT violations to
5792	 * reconstruct the page fault error code.
5793	 */
5794	if (unlikely(allow_smaller_maxphyaddr && kvm_vcpu_is_illegal_gpa(vcpu, gpa)))
5795		return kvm_emulate_instruction(vcpu, 0);
5796
5797	return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
5798}
5799
5800static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
5801{
5802	gpa_t gpa;
5803
5804	if (!vmx_can_emulate_instruction(vcpu, EMULTYPE_PF, NULL, 0))
5805		return 1;
5806
5807	/*
5808	 * A nested guest cannot optimize MMIO vmexits, because we have an
5809	 * nGPA here instead of the required GPA.
5810	 */
5811	gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
5812	if (!is_guest_mode(vcpu) &&
5813	    !kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) {
5814		trace_kvm_fast_mmio(gpa);
5815		return kvm_skip_emulated_instruction(vcpu);
5816	}
5817
5818	return kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0);
5819}
5820
5821static int handle_nmi_window(struct kvm_vcpu *vcpu)
5822{
5823	if (KVM_BUG_ON(!enable_vnmi, vcpu->kvm))
5824		return -EIO;
5825
5826	exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_NMI_WINDOW_EXITING);
5827	++vcpu->stat.nmi_window_exits;
5828	kvm_make_request(KVM_REQ_EVENT, vcpu);
5829
5830	return 1;
5831}
5832
5833static bool vmx_emulation_required_with_pending_exception(struct kvm_vcpu *vcpu)
5834{
5835	struct vcpu_vmx *vmx = to_vmx(vcpu);
5836
5837	return vmx->emulation_required && !vmx->rmode.vm86_active &&
5838	       (kvm_is_exception_pending(vcpu) || vcpu->arch.exception.injected);
5839}
5840
5841static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
5842{
5843	struct vcpu_vmx *vmx = to_vmx(vcpu);
5844	bool intr_window_requested;
5845	unsigned count = 130;
5846
5847	intr_window_requested = exec_controls_get(vmx) &
5848				CPU_BASED_INTR_WINDOW_EXITING;
5849
5850	while (vmx->emulation_required && count-- != 0) {
5851		if (intr_window_requested && !vmx_interrupt_blocked(vcpu))
5852			return handle_interrupt_window(&vmx->vcpu);
5853
5854		if (kvm_test_request(KVM_REQ_EVENT, vcpu))
5855			return 1;
5856
5857		if (!kvm_emulate_instruction(vcpu, 0))
5858			return 0;
5859
5860		if (vmx_emulation_required_with_pending_exception(vcpu)) {
5861			kvm_prepare_emulation_failure_exit(vcpu);
5862			return 0;
5863		}
5864
5865		if (vcpu->arch.halt_request) {
5866			vcpu->arch.halt_request = 0;
5867			return kvm_emulate_halt_noskip(vcpu);
5868		}
5869
5870		/*
5871		 * Note, return 1 and not 0, vcpu_run() will invoke
5872		 * xfer_to_guest_mode() which will create a proper return
5873		 * code.
5874		 */
5875		if (__xfer_to_guest_mode_work_pending())
5876			return 1;
5877	}
5878
5879	return 1;
5880}
5881
5882static int vmx_vcpu_pre_run(struct kvm_vcpu *vcpu)
5883{
5884	if (vmx_emulation_required_with_pending_exception(vcpu)) {
5885		kvm_prepare_emulation_failure_exit(vcpu);
5886		return 0;
5887	}
5888
5889	return 1;
5890}
5891
5892static void grow_ple_window(struct kvm_vcpu *vcpu)
5893{
5894	struct vcpu_vmx *vmx = to_vmx(vcpu);
5895	unsigned int old = vmx->ple_window;
5896
5897	vmx->ple_window = __grow_ple_window(old, ple_window,
5898					    ple_window_grow,
5899					    ple_window_max);
5900
5901	if (vmx->ple_window != old) {
5902		vmx->ple_window_dirty = true;
5903		trace_kvm_ple_window_update(vcpu->vcpu_id,
5904					    vmx->ple_window, old);
5905	}
5906}
5907
5908static void shrink_ple_window(struct kvm_vcpu *vcpu)
5909{
5910	struct vcpu_vmx *vmx = to_vmx(vcpu);
5911	unsigned int old = vmx->ple_window;
5912
5913	vmx->ple_window = __shrink_ple_window(old, ple_window,
5914					      ple_window_shrink,
5915					      ple_window);
5916
5917	if (vmx->ple_window != old) {
5918		vmx->ple_window_dirty = true;
5919		trace_kvm_ple_window_update(vcpu->vcpu_id,
5920					    vmx->ple_window, old);
5921	}
5922}
5923
5924/*
5925 * Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE
5926 * exiting, so only get here on cpu with PAUSE-Loop-Exiting.
5927 */
5928static int handle_pause(struct kvm_vcpu *vcpu)
5929{
5930	if (!kvm_pause_in_guest(vcpu->kvm))
5931		grow_ple_window(vcpu);
5932
5933	/*
5934	 * Intel sdm vol3 ch-25.1.3 says: The "PAUSE-loop exiting"
5935	 * VM-execution control is ignored if CPL > 0. OTOH, KVM
5936	 * never set PAUSE_EXITING and just set PLE if supported,
5937	 * so the vcpu must be CPL=0 if it gets a PAUSE exit.
5938	 */
5939	kvm_vcpu_on_spin(vcpu, true);
5940	return kvm_skip_emulated_instruction(vcpu);
5941}
5942
5943static int handle_monitor_trap(struct kvm_vcpu *vcpu)
5944{
5945	return 1;
5946}
5947
5948static int handle_invpcid(struct kvm_vcpu *vcpu)
5949{
5950	u32 vmx_instruction_info;
5951	unsigned long type;
5952	gva_t gva;
5953	struct {
5954		u64 pcid;
5955		u64 gla;
5956	} operand;
5957	int gpr_index;
5958
5959	if (!guest_cpuid_has(vcpu, X86_FEATURE_INVPCID)) {
5960		kvm_queue_exception(vcpu, UD_VECTOR);
5961		return 1;
5962	}
5963
5964	vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5965	gpr_index = vmx_get_instr_info_reg2(vmx_instruction_info);
5966	type = kvm_register_read(vcpu, gpr_index);
5967
5968	/* According to the Intel instruction reference, the memory operand
5969	 * is read even if it isn't needed (e.g., for type==all)
5970	 */
5971	if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
5972				vmx_instruction_info, false,
5973				sizeof(operand), &gva))
5974		return 1;
5975
5976	return kvm_handle_invpcid(vcpu, type, gva);
5977}
5978
5979static int handle_pml_full(struct kvm_vcpu *vcpu)
5980{
5981	unsigned long exit_qualification;
5982
5983	trace_kvm_pml_full(vcpu->vcpu_id);
5984
5985	exit_qualification = vmx_get_exit_qual(vcpu);
5986
5987	/*
5988	 * PML buffer FULL happened while executing iret from NMI,
5989	 * "blocked by NMI" bit has to be set before next VM entry.
5990	 */
5991	if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
5992			enable_vnmi &&
5993			(exit_qualification & INTR_INFO_UNBLOCK_NMI))
5994		vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
5995				GUEST_INTR_STATE_NMI);
5996
5997	/*
5998	 * PML buffer already flushed at beginning of VMEXIT. Nothing to do
5999	 * here.., and there's no userspace involvement needed for PML.
6000	 */
6001	return 1;
6002}
6003
6004static fastpath_t handle_fastpath_preemption_timer(struct kvm_vcpu *vcpu)
6005{
6006	struct vcpu_vmx *vmx = to_vmx(vcpu);
6007
6008	if (!vmx->req_immediate_exit &&
6009	    !unlikely(vmx->loaded_vmcs->hv_timer_soft_disabled)) {
6010		kvm_lapic_expired_hv_timer(vcpu);
6011		return EXIT_FASTPATH_REENTER_GUEST;
6012	}
6013
6014	return EXIT_FASTPATH_NONE;
6015}
6016
6017static int handle_preemption_timer(struct kvm_vcpu *vcpu)
6018{
6019	handle_fastpath_preemption_timer(vcpu);
6020	return 1;
6021}
6022
6023/*
6024 * When nested=0, all VMX instruction VM Exits filter here.  The handlers
6025 * are overwritten by nested_vmx_setup() when nested=1.
6026 */
6027static int handle_vmx_instruction(struct kvm_vcpu *vcpu)
6028{
6029	kvm_queue_exception(vcpu, UD_VECTOR);
6030	return 1;
6031}
6032
6033#ifndef CONFIG_X86_SGX_KVM
6034static int handle_encls(struct kvm_vcpu *vcpu)
6035{
6036	/*
6037	 * SGX virtualization is disabled.  There is no software enable bit for
6038	 * SGX, so KVM intercepts all ENCLS leafs and injects a #UD to prevent
6039	 * the guest from executing ENCLS (when SGX is supported by hardware).
6040	 */
6041	kvm_queue_exception(vcpu, UD_VECTOR);
6042	return 1;
6043}
6044#endif /* CONFIG_X86_SGX_KVM */
6045
6046static int handle_bus_lock_vmexit(struct kvm_vcpu *vcpu)
6047{
6048	/*
6049	 * Hardware may or may not set the BUS_LOCK_DETECTED flag on BUS_LOCK
6050	 * VM-Exits. Unconditionally set the flag here and leave the handling to
6051	 * vmx_handle_exit().
6052	 */
6053	to_vmx(vcpu)->exit_reason.bus_lock_detected = true;
6054	return 1;
6055}
6056
6057static int handle_notify(struct kvm_vcpu *vcpu)
6058{
6059	unsigned long exit_qual = vmx_get_exit_qual(vcpu);
6060	bool context_invalid = exit_qual & NOTIFY_VM_CONTEXT_INVALID;
6061
6062	++vcpu->stat.notify_window_exits;
6063
6064	/*
6065	 * Notify VM exit happened while executing iret from NMI,
6066	 * "blocked by NMI" bit has to be set before next VM entry.
6067	 */
6068	if (enable_vnmi && (exit_qual & INTR_INFO_UNBLOCK_NMI))
6069		vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
6070			      GUEST_INTR_STATE_NMI);
6071
6072	if (vcpu->kvm->arch.notify_vmexit_flags & KVM_X86_NOTIFY_VMEXIT_USER ||
6073	    context_invalid) {
6074		vcpu->run->exit_reason = KVM_EXIT_NOTIFY;
6075		vcpu->run->notify.flags = context_invalid ?
6076					  KVM_NOTIFY_CONTEXT_INVALID : 0;
6077		return 0;
6078	}
6079
6080	return 1;
6081}
6082
6083/*
6084 * The exit handlers return 1 if the exit was handled fully and guest execution
6085 * may resume.  Otherwise they set the kvm_run parameter to indicate what needs
6086 * to be done to userspace and return 0.
6087 */
6088static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
6089	[EXIT_REASON_EXCEPTION_NMI]           = handle_exception_nmi,
6090	[EXIT_REASON_EXTERNAL_INTERRUPT]      = handle_external_interrupt,
6091	[EXIT_REASON_TRIPLE_FAULT]            = handle_triple_fault,
6092	[EXIT_REASON_NMI_WINDOW]	      = handle_nmi_window,
6093	[EXIT_REASON_IO_INSTRUCTION]          = handle_io,
6094	[EXIT_REASON_CR_ACCESS]               = handle_cr,
6095	[EXIT_REASON_DR_ACCESS]               = handle_dr,
6096	[EXIT_REASON_CPUID]                   = kvm_emulate_cpuid,
6097	[EXIT_REASON_MSR_READ]                = kvm_emulate_rdmsr,
6098	[EXIT_REASON_MSR_WRITE]               = kvm_emulate_wrmsr,
6099	[EXIT_REASON_INTERRUPT_WINDOW]        = handle_interrupt_window,
6100	[EXIT_REASON_HLT]                     = kvm_emulate_halt,
6101	[EXIT_REASON_INVD]		      = kvm_emulate_invd,
6102	[EXIT_REASON_INVLPG]		      = handle_invlpg,
6103	[EXIT_REASON_RDPMC]                   = kvm_emulate_rdpmc,
6104	[EXIT_REASON_VMCALL]                  = kvm_emulate_hypercall,
6105	[EXIT_REASON_VMCLEAR]		      = handle_vmx_instruction,
6106	[EXIT_REASON_VMLAUNCH]		      = handle_vmx_instruction,
6107	[EXIT_REASON_VMPTRLD]		      = handle_vmx_instruction,
6108	[EXIT_REASON_VMPTRST]		      = handle_vmx_instruction,
6109	[EXIT_REASON_VMREAD]		      = handle_vmx_instruction,
6110	[EXIT_REASON_VMRESUME]		      = handle_vmx_instruction,
6111	[EXIT_REASON_VMWRITE]		      = handle_vmx_instruction,
6112	[EXIT_REASON_VMOFF]		      = handle_vmx_instruction,
6113	[EXIT_REASON_VMON]		      = handle_vmx_instruction,
6114	[EXIT_REASON_TPR_BELOW_THRESHOLD]     = handle_tpr_below_threshold,
6115	[EXIT_REASON_APIC_ACCESS]             = handle_apic_access,
6116	[EXIT_REASON_APIC_WRITE]              = handle_apic_write,
6117	[EXIT_REASON_EOI_INDUCED]             = handle_apic_eoi_induced,
6118	[EXIT_REASON_WBINVD]                  = kvm_emulate_wbinvd,
6119	[EXIT_REASON_XSETBV]                  = kvm_emulate_xsetbv,
6120	[EXIT_REASON_TASK_SWITCH]             = handle_task_switch,
6121	[EXIT_REASON_MCE_DURING_VMENTRY]      = handle_machine_check,
6122	[EXIT_REASON_GDTR_IDTR]		      = handle_desc,
6123	[EXIT_REASON_LDTR_TR]		      = handle_desc,
6124	[EXIT_REASON_EPT_VIOLATION]	      = handle_ept_violation,
6125	[EXIT_REASON_EPT_MISCONFIG]           = handle_ept_misconfig,
6126	[EXIT_REASON_PAUSE_INSTRUCTION]       = handle_pause,
6127	[EXIT_REASON_MWAIT_INSTRUCTION]	      = kvm_emulate_mwait,
6128	[EXIT_REASON_MONITOR_TRAP_FLAG]       = handle_monitor_trap,
6129	[EXIT_REASON_MONITOR_INSTRUCTION]     = kvm_emulate_monitor,
6130	[EXIT_REASON_INVEPT]                  = handle_vmx_instruction,
6131	[EXIT_REASON_INVVPID]                 = handle_vmx_instruction,
6132	[EXIT_REASON_RDRAND]                  = kvm_handle_invalid_op,
6133	[EXIT_REASON_RDSEED]                  = kvm_handle_invalid_op,
6134	[EXIT_REASON_PML_FULL]		      = handle_pml_full,
6135	[EXIT_REASON_INVPCID]                 = handle_invpcid,
6136	[EXIT_REASON_VMFUNC]		      = handle_vmx_instruction,
6137	[EXIT_REASON_PREEMPTION_TIMER]	      = handle_preemption_timer,
6138	[EXIT_REASON_ENCLS]		      = handle_encls,
6139	[EXIT_REASON_BUS_LOCK]                = handle_bus_lock_vmexit,
6140	[EXIT_REASON_NOTIFY]		      = handle_notify,
6141};
6142
6143static const int kvm_vmx_max_exit_handlers =
6144	ARRAY_SIZE(kvm_vmx_exit_handlers);
6145
6146static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
6147			      u64 *info1, u64 *info2,
6148			      u32 *intr_info, u32 *error_code)
6149{
6150	struct vcpu_vmx *vmx = to_vmx(vcpu);
6151
6152	*reason = vmx->exit_reason.full;
6153	*info1 = vmx_get_exit_qual(vcpu);
6154	if (!(vmx->exit_reason.failed_vmentry)) {
6155		*info2 = vmx->idt_vectoring_info;
6156		*intr_info = vmx_get_intr_info(vcpu);
6157		if (is_exception_with_error_code(*intr_info))
6158			*error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
6159		else
6160			*error_code = 0;
6161	} else {
6162		*info2 = 0;
6163		*intr_info = 0;
6164		*error_code = 0;
6165	}
6166}
6167
6168static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx)
6169{
6170	if (vmx->pml_pg) {
6171		__free_page(vmx->pml_pg);
6172		vmx->pml_pg = NULL;
6173	}
6174}
6175
6176static void vmx_flush_pml_buffer(struct kvm_vcpu *vcpu)
6177{
6178	struct vcpu_vmx *vmx = to_vmx(vcpu);
6179	u64 *pml_buf;
6180	u16 pml_idx;
6181
6182	pml_idx = vmcs_read16(GUEST_PML_INDEX);
6183
6184	/* Do nothing if PML buffer is empty */
6185	if (pml_idx == (PML_ENTITY_NUM - 1))
6186		return;
6187
6188	/* PML index always points to next available PML buffer entity */
6189	if (pml_idx >= PML_ENTITY_NUM)
6190		pml_idx = 0;
6191	else
6192		pml_idx++;
6193
6194	pml_buf = page_address(vmx->pml_pg);
6195	for (; pml_idx < PML_ENTITY_NUM; pml_idx++) {
6196		u64 gpa;
6197
6198		gpa = pml_buf[pml_idx];
6199		WARN_ON(gpa & (PAGE_SIZE - 1));
6200		kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
6201	}
6202
6203	/* reset PML index */
6204	vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
6205}
6206
6207static void vmx_dump_sel(char *name, uint32_t sel)
6208{
6209	pr_err("%s sel=0x%04x, attr=0x%05x, limit=0x%08x, base=0x%016lx\n",
6210	       name, vmcs_read16(sel),
6211	       vmcs_read32(sel + GUEST_ES_AR_BYTES - GUEST_ES_SELECTOR),
6212	       vmcs_read32(sel + GUEST_ES_LIMIT - GUEST_ES_SELECTOR),
6213	       vmcs_readl(sel + GUEST_ES_BASE - GUEST_ES_SELECTOR));
6214}
6215
6216static void vmx_dump_dtsel(char *name, uint32_t limit)
6217{
6218	pr_err("%s                           limit=0x%08x, base=0x%016lx\n",
6219	       name, vmcs_read32(limit),
6220	       vmcs_readl(limit + GUEST_GDTR_BASE - GUEST_GDTR_LIMIT));
6221}
6222
6223static void vmx_dump_msrs(char *name, struct vmx_msrs *m)
6224{
6225	unsigned int i;
6226	struct vmx_msr_entry *e;
6227
6228	pr_err("MSR %s:\n", name);
6229	for (i = 0, e = m->val; i < m->nr; ++i, ++e)
6230		pr_err("  %2d: msr=0x%08x value=0x%016llx\n", i, e->index, e->value);
6231}
6232
6233void dump_vmcs(struct kvm_vcpu *vcpu)
6234{
6235	struct vcpu_vmx *vmx = to_vmx(vcpu);
6236	u32 vmentry_ctl, vmexit_ctl;
6237	u32 cpu_based_exec_ctrl, pin_based_exec_ctrl, secondary_exec_control;
6238	u64 tertiary_exec_control;
6239	unsigned long cr4;
6240	int efer_slot;
6241
6242	if (!dump_invalid_vmcs) {
6243		pr_warn_ratelimited("set kvm_intel.dump_invalid_vmcs=1 to dump internal KVM state.\n");
6244		return;
6245	}
6246
6247	vmentry_ctl = vmcs_read32(VM_ENTRY_CONTROLS);
6248	vmexit_ctl = vmcs_read32(VM_EXIT_CONTROLS);
6249	cpu_based_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
6250	pin_based_exec_ctrl = vmcs_read32(PIN_BASED_VM_EXEC_CONTROL);
6251	cr4 = vmcs_readl(GUEST_CR4);
6252
6253	if (cpu_has_secondary_exec_ctrls())
6254		secondary_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
6255	else
6256		secondary_exec_control = 0;
6257
6258	if (cpu_has_tertiary_exec_ctrls())
6259		tertiary_exec_control = vmcs_read64(TERTIARY_VM_EXEC_CONTROL);
6260	else
6261		tertiary_exec_control = 0;
6262
6263	pr_err("VMCS %p, last attempted VM-entry on CPU %d\n",
6264	       vmx->loaded_vmcs->vmcs, vcpu->arch.last_vmentry_cpu);
6265	pr_err("*** Guest State ***\n");
6266	pr_err("CR0: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
6267	       vmcs_readl(GUEST_CR0), vmcs_readl(CR0_READ_SHADOW),
6268	       vmcs_readl(CR0_GUEST_HOST_MASK));
6269	pr_err("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
6270	       cr4, vmcs_readl(CR4_READ_SHADOW), vmcs_readl(CR4_GUEST_HOST_MASK));
6271	pr_err("CR3 = 0x%016lx\n", vmcs_readl(GUEST_CR3));
6272	if (cpu_has_vmx_ept()) {
6273		pr_err("PDPTR0 = 0x%016llx  PDPTR1 = 0x%016llx\n",
6274		       vmcs_read64(GUEST_PDPTR0), vmcs_read64(GUEST_PDPTR1));
6275		pr_err("PDPTR2 = 0x%016llx  PDPTR3 = 0x%016llx\n",
6276		       vmcs_read64(GUEST_PDPTR2), vmcs_read64(GUEST_PDPTR3));
6277	}
6278	pr_err("RSP = 0x%016lx  RIP = 0x%016lx\n",
6279	       vmcs_readl(GUEST_RSP), vmcs_readl(GUEST_RIP));
6280	pr_err("RFLAGS=0x%08lx         DR7 = 0x%016lx\n",
6281	       vmcs_readl(GUEST_RFLAGS), vmcs_readl(GUEST_DR7));
6282	pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n",
6283	       vmcs_readl(GUEST_SYSENTER_ESP),
6284	       vmcs_read32(GUEST_SYSENTER_CS), vmcs_readl(GUEST_SYSENTER_EIP));
6285	vmx_dump_sel("CS:  ", GUEST_CS_SELECTOR);
6286	vmx_dump_sel("DS:  ", GUEST_DS_SELECTOR);
6287	vmx_dump_sel("SS:  ", GUEST_SS_SELECTOR);
6288	vmx_dump_sel("ES:  ", GUEST_ES_SELECTOR);
6289	vmx_dump_sel("FS:  ", GUEST_FS_SELECTOR);
6290	vmx_dump_sel("GS:  ", GUEST_GS_SELECTOR);
6291	vmx_dump_dtsel("GDTR:", GUEST_GDTR_LIMIT);
6292	vmx_dump_sel("LDTR:", GUEST_LDTR_SELECTOR);
6293	vmx_dump_dtsel("IDTR:", GUEST_IDTR_LIMIT);
6294	vmx_dump_sel("TR:  ", GUEST_TR_SELECTOR);
6295	efer_slot = vmx_find_loadstore_msr_slot(&vmx->msr_autoload.guest, MSR_EFER);
6296	if (vmentry_ctl & VM_ENTRY_LOAD_IA32_EFER)
6297		pr_err("EFER= 0x%016llx\n", vmcs_read64(GUEST_IA32_EFER));
6298	else if (efer_slot >= 0)
6299		pr_err("EFER= 0x%016llx (autoload)\n",
6300		       vmx->msr_autoload.guest.val[efer_slot].value);
6301	else if (vmentry_ctl & VM_ENTRY_IA32E_MODE)
6302		pr_err("EFER= 0x%016llx (effective)\n",
6303		       vcpu->arch.efer | (EFER_LMA | EFER_LME));
6304	else
6305		pr_err("EFER= 0x%016llx (effective)\n",
6306		       vcpu->arch.efer & ~(EFER_LMA | EFER_LME));
6307	if (vmentry_ctl & VM_ENTRY_LOAD_IA32_PAT)
6308		pr_err("PAT = 0x%016llx\n", vmcs_read64(GUEST_IA32_PAT));
6309	pr_err("DebugCtl = 0x%016llx  DebugExceptions = 0x%016lx\n",
6310	       vmcs_read64(GUEST_IA32_DEBUGCTL),
6311	       vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS));
6312	if (cpu_has_load_perf_global_ctrl() &&
6313	    vmentry_ctl & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
6314		pr_err("PerfGlobCtl = 0x%016llx\n",
6315		       vmcs_read64(GUEST_IA32_PERF_GLOBAL_CTRL));
6316	if (vmentry_ctl & VM_ENTRY_LOAD_BNDCFGS)
6317		pr_err("BndCfgS = 0x%016llx\n", vmcs_read64(GUEST_BNDCFGS));
6318	pr_err("Interruptibility = %08x  ActivityState = %08x\n",
6319	       vmcs_read32(GUEST_INTERRUPTIBILITY_INFO),
6320	       vmcs_read32(GUEST_ACTIVITY_STATE));
6321	if (secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)
6322		pr_err("InterruptStatus = %04x\n",
6323		       vmcs_read16(GUEST_INTR_STATUS));
6324	if (vmcs_read32(VM_ENTRY_MSR_LOAD_COUNT) > 0)
6325		vmx_dump_msrs("guest autoload", &vmx->msr_autoload.guest);
6326	if (vmcs_read32(VM_EXIT_MSR_STORE_COUNT) > 0)
6327		vmx_dump_msrs("guest autostore", &vmx->msr_autostore.guest);
6328
6329	pr_err("*** Host State ***\n");
6330	pr_err("RIP = 0x%016lx  RSP = 0x%016lx\n",
6331	       vmcs_readl(HOST_RIP), vmcs_readl(HOST_RSP));
6332	pr_err("CS=%04x SS=%04x DS=%04x ES=%04x FS=%04x GS=%04x TR=%04x\n",
6333	       vmcs_read16(HOST_CS_SELECTOR), vmcs_read16(HOST_SS_SELECTOR),
6334	       vmcs_read16(HOST_DS_SELECTOR), vmcs_read16(HOST_ES_SELECTOR),
6335	       vmcs_read16(HOST_FS_SELECTOR), vmcs_read16(HOST_GS_SELECTOR),
6336	       vmcs_read16(HOST_TR_SELECTOR));
6337	pr_err("FSBase=%016lx GSBase=%016lx TRBase=%016lx\n",
6338	       vmcs_readl(HOST_FS_BASE), vmcs_readl(HOST_GS_BASE),
6339	       vmcs_readl(HOST_TR_BASE));
6340	pr_err("GDTBase=%016lx IDTBase=%016lx\n",
6341	       vmcs_readl(HOST_GDTR_BASE), vmcs_readl(HOST_IDTR_BASE));
6342	pr_err("CR0=%016lx CR3=%016lx CR4=%016lx\n",
6343	       vmcs_readl(HOST_CR0), vmcs_readl(HOST_CR3),
6344	       vmcs_readl(HOST_CR4));
6345	pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n",
6346	       vmcs_readl(HOST_IA32_SYSENTER_ESP),
6347	       vmcs_read32(HOST_IA32_SYSENTER_CS),
6348	       vmcs_readl(HOST_IA32_SYSENTER_EIP));
6349	if (vmexit_ctl & VM_EXIT_LOAD_IA32_EFER)
6350		pr_err("EFER= 0x%016llx\n", vmcs_read64(HOST_IA32_EFER));
6351	if (vmexit_ctl & VM_EXIT_LOAD_IA32_PAT)
6352		pr_err("PAT = 0x%016llx\n", vmcs_read64(HOST_IA32_PAT));
6353	if (cpu_has_load_perf_global_ctrl() &&
6354	    vmexit_ctl & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
6355		pr_err("PerfGlobCtl = 0x%016llx\n",
6356		       vmcs_read64(HOST_IA32_PERF_GLOBAL_CTRL));
6357	if (vmcs_read32(VM_EXIT_MSR_LOAD_COUNT) > 0)
6358		vmx_dump_msrs("host autoload", &vmx->msr_autoload.host);
6359
6360	pr_err("*** Control State ***\n");
6361	pr_err("CPUBased=0x%08x SecondaryExec=0x%08x TertiaryExec=0x%016llx\n",
6362	       cpu_based_exec_ctrl, secondary_exec_control, tertiary_exec_control);
6363	pr_err("PinBased=0x%08x EntryControls=%08x ExitControls=%08x\n",
6364	       pin_based_exec_ctrl, vmentry_ctl, vmexit_ctl);
6365	pr_err("ExceptionBitmap=%08x PFECmask=%08x PFECmatch=%08x\n",
6366	       vmcs_read32(EXCEPTION_BITMAP),
6367	       vmcs_read32(PAGE_FAULT_ERROR_CODE_MASK),
6368	       vmcs_read32(PAGE_FAULT_ERROR_CODE_MATCH));
6369	pr_err("VMEntry: intr_info=%08x errcode=%08x ilen=%08x\n",
6370	       vmcs_read32(VM_ENTRY_INTR_INFO_FIELD),
6371	       vmcs_read32(VM_ENTRY_EXCEPTION_ERROR_CODE),
6372	       vmcs_read32(VM_ENTRY_INSTRUCTION_LEN));
6373	pr_err("VMExit: intr_info=%08x errcode=%08x ilen=%08x\n",
6374	       vmcs_read32(VM_EXIT_INTR_INFO),
6375	       vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
6376	       vmcs_read32(VM_EXIT_INSTRUCTION_LEN));
6377	pr_err("        reason=%08x qualification=%016lx\n",
6378	       vmcs_read32(VM_EXIT_REASON), vmcs_readl(EXIT_QUALIFICATION));
6379	pr_err("IDTVectoring: info=%08x errcode=%08x\n",
6380	       vmcs_read32(IDT_VECTORING_INFO_FIELD),
6381	       vmcs_read32(IDT_VECTORING_ERROR_CODE));
6382	pr_err("TSC Offset = 0x%016llx\n", vmcs_read64(TSC_OFFSET));
6383	if (secondary_exec_control & SECONDARY_EXEC_TSC_SCALING)
6384		pr_err("TSC Multiplier = 0x%016llx\n",
6385		       vmcs_read64(TSC_MULTIPLIER));
6386	if (cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW) {
6387		if (secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) {
6388			u16 status = vmcs_read16(GUEST_INTR_STATUS);
6389			pr_err("SVI|RVI = %02x|%02x ", status >> 8, status & 0xff);
6390		}
6391		pr_cont("TPR Threshold = 0x%02x\n", vmcs_read32(TPR_THRESHOLD));
6392		if (secondary_exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)
6393			pr_err("APIC-access addr = 0x%016llx ", vmcs_read64(APIC_ACCESS_ADDR));
6394		pr_cont("virt-APIC addr = 0x%016llx\n", vmcs_read64(VIRTUAL_APIC_PAGE_ADDR));
6395	}
6396	if (pin_based_exec_ctrl & PIN_BASED_POSTED_INTR)
6397		pr_err("PostedIntrVec = 0x%02x\n", vmcs_read16(POSTED_INTR_NV));
6398	if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT))
6399		pr_err("EPT pointer = 0x%016llx\n", vmcs_read64(EPT_POINTER));
6400	if (secondary_exec_control & SECONDARY_EXEC_PAUSE_LOOP_EXITING)
6401		pr_err("PLE Gap=%08x Window=%08x\n",
6402		       vmcs_read32(PLE_GAP), vmcs_read32(PLE_WINDOW));
6403	if (secondary_exec_control & SECONDARY_EXEC_ENABLE_VPID)
6404		pr_err("Virtual processor ID = 0x%04x\n",
6405		       vmcs_read16(VIRTUAL_PROCESSOR_ID));
6406}
6407
6408/*
6409 * The guest has exited.  See if we can fix it or if we need userspace
6410 * assistance.
6411 */
6412static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
6413{
6414	struct vcpu_vmx *vmx = to_vmx(vcpu);
6415	union vmx_exit_reason exit_reason = vmx->exit_reason;
6416	u32 vectoring_info = vmx->idt_vectoring_info;
6417	u16 exit_handler_index;
6418
6419	/*
6420	 * Flush logged GPAs PML buffer, this will make dirty_bitmap more
6421	 * updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before
6422	 * querying dirty_bitmap, we only need to kick all vcpus out of guest
6423	 * mode as if vcpus is in root mode, the PML buffer must has been
6424	 * flushed already.  Note, PML is never enabled in hardware while
6425	 * running L2.
6426	 */
6427	if (enable_pml && !is_guest_mode(vcpu))
6428		vmx_flush_pml_buffer(vcpu);
6429
6430	/*
6431	 * KVM should never reach this point with a pending nested VM-Enter.
6432	 * More specifically, short-circuiting VM-Entry to emulate L2 due to
6433	 * invalid guest state should never happen as that means KVM knowingly
6434	 * allowed a nested VM-Enter with an invalid vmcs12.  More below.
6435	 */
6436	if (KVM_BUG_ON(vmx->nested.nested_run_pending, vcpu->kvm))
6437		return -EIO;
6438
6439	if (is_guest_mode(vcpu)) {
6440		/*
6441		 * PML is never enabled when running L2, bail immediately if a
6442		 * PML full exit occurs as something is horribly wrong.
6443		 */
6444		if (exit_reason.basic == EXIT_REASON_PML_FULL)
6445			goto unexpected_vmexit;
6446
6447		/*
6448		 * The host physical addresses of some pages of guest memory
6449		 * are loaded into the vmcs02 (e.g. vmcs12's Virtual APIC
6450		 * Page). The CPU may write to these pages via their host
6451		 * physical address while L2 is running, bypassing any
6452		 * address-translation-based dirty tracking (e.g. EPT write
6453		 * protection).
6454		 *
6455		 * Mark them dirty on every exit from L2 to prevent them from
6456		 * getting out of sync with dirty tracking.
6457		 */
6458		nested_mark_vmcs12_pages_dirty(vcpu);
6459
6460		/*
6461		 * Synthesize a triple fault if L2 state is invalid.  In normal
6462		 * operation, nested VM-Enter rejects any attempt to enter L2
6463		 * with invalid state.  However, those checks are skipped if
6464		 * state is being stuffed via RSM or KVM_SET_NESTED_STATE.  If
6465		 * L2 state is invalid, it means either L1 modified SMRAM state
6466		 * or userspace provided bad state.  Synthesize TRIPLE_FAULT as
6467		 * doing so is architecturally allowed in the RSM case, and is
6468		 * the least awful solution for the userspace case without
6469		 * risking false positives.
6470		 */
6471		if (vmx->emulation_required) {
6472			nested_vmx_vmexit(vcpu, EXIT_REASON_TRIPLE_FAULT, 0, 0);
6473			return 1;
6474		}
6475
6476		if (nested_vmx_reflect_vmexit(vcpu))
6477			return 1;
6478	}
6479
6480	/* If guest state is invalid, start emulating.  L2 is handled above. */
6481	if (vmx->emulation_required)
6482		return handle_invalid_guest_state(vcpu);
6483
6484	if (exit_reason.failed_vmentry) {
6485		dump_vmcs(vcpu);
6486		vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
6487		vcpu->run->fail_entry.hardware_entry_failure_reason
6488			= exit_reason.full;
6489		vcpu->run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu;
6490		return 0;
6491	}
6492
6493	if (unlikely(vmx->fail)) {
6494		dump_vmcs(vcpu);
6495		vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
6496		vcpu->run->fail_entry.hardware_entry_failure_reason
6497			= vmcs_read32(VM_INSTRUCTION_ERROR);
6498		vcpu->run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu;
6499		return 0;
6500	}
6501
6502	/*
6503	 * Note:
6504	 * Do not try to fix EXIT_REASON_EPT_MISCONFIG if it caused by
6505	 * delivery event since it indicates guest is accessing MMIO.
6506	 * The vm-exit can be triggered again after return to guest that
6507	 * will cause infinite loop.
6508	 */
6509	if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
6510	    (exit_reason.basic != EXIT_REASON_EXCEPTION_NMI &&
6511	     exit_reason.basic != EXIT_REASON_EPT_VIOLATION &&
6512	     exit_reason.basic != EXIT_REASON_PML_FULL &&
6513	     exit_reason.basic != EXIT_REASON_APIC_ACCESS &&
6514	     exit_reason.basic != EXIT_REASON_TASK_SWITCH &&
6515	     exit_reason.basic != EXIT_REASON_NOTIFY)) {
6516		int ndata = 3;
6517
6518		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
6519		vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
6520		vcpu->run->internal.data[0] = vectoring_info;
6521		vcpu->run->internal.data[1] = exit_reason.full;
6522		vcpu->run->internal.data[2] = vcpu->arch.exit_qualification;
6523		if (exit_reason.basic == EXIT_REASON_EPT_MISCONFIG) {
6524			vcpu->run->internal.data[ndata++] =
6525				vmcs_read64(GUEST_PHYSICAL_ADDRESS);
6526		}
6527		vcpu->run->internal.data[ndata++] = vcpu->arch.last_vmentry_cpu;
6528		vcpu->run->internal.ndata = ndata;
6529		return 0;
6530	}
6531
6532	if (unlikely(!enable_vnmi &&
6533		     vmx->loaded_vmcs->soft_vnmi_blocked)) {
6534		if (!vmx_interrupt_blocked(vcpu)) {
6535			vmx->loaded_vmcs->soft_vnmi_blocked = 0;
6536		} else if (vmx->loaded_vmcs->vnmi_blocked_time > 1000000000LL &&
6537			   vcpu->arch.nmi_pending) {
6538			/*
6539			 * This CPU don't support us in finding the end of an
6540			 * NMI-blocked window if the guest runs with IRQs
6541			 * disabled. So we pull the trigger after 1 s of
6542			 * futile waiting, but inform the user about this.
6543			 */
6544			printk(KERN_WARNING "%s: Breaking out of NMI-blocked "
6545			       "state on VCPU %d after 1 s timeout\n",
6546			       __func__, vcpu->vcpu_id);
6547			vmx->loaded_vmcs->soft_vnmi_blocked = 0;
6548		}
6549	}
6550
6551	if (exit_fastpath != EXIT_FASTPATH_NONE)
6552		return 1;
6553
6554	if (exit_reason.basic >= kvm_vmx_max_exit_handlers)
6555		goto unexpected_vmexit;
6556#ifdef CONFIG_RETPOLINE
6557	if (exit_reason.basic == EXIT_REASON_MSR_WRITE)
6558		return kvm_emulate_wrmsr(vcpu);
6559	else if (exit_reason.basic == EXIT_REASON_PREEMPTION_TIMER)
6560		return handle_preemption_timer(vcpu);
6561	else if (exit_reason.basic == EXIT_REASON_INTERRUPT_WINDOW)
6562		return handle_interrupt_window(vcpu);
6563	else if (exit_reason.basic == EXIT_REASON_EXTERNAL_INTERRUPT)
6564		return handle_external_interrupt(vcpu);
6565	else if (exit_reason.basic == EXIT_REASON_HLT)
6566		return kvm_emulate_halt(vcpu);
6567	else if (exit_reason.basic == EXIT_REASON_EPT_MISCONFIG)
6568		return handle_ept_misconfig(vcpu);
6569#endif
6570
6571	exit_handler_index = array_index_nospec((u16)exit_reason.basic,
6572						kvm_vmx_max_exit_handlers);
6573	if (!kvm_vmx_exit_handlers[exit_handler_index])
6574		goto unexpected_vmexit;
6575
6576	return kvm_vmx_exit_handlers[exit_handler_index](vcpu);
6577
6578unexpected_vmexit:
6579	vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n",
6580		    exit_reason.full);
6581	dump_vmcs(vcpu);
6582	vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
6583	vcpu->run->internal.suberror =
6584			KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
6585	vcpu->run->internal.ndata = 2;
6586	vcpu->run->internal.data[0] = exit_reason.full;
6587	vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
6588	return 0;
6589}
6590
6591static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
6592{
6593	int ret = __vmx_handle_exit(vcpu, exit_fastpath);
6594
6595	/*
6596	 * Exit to user space when bus lock detected to inform that there is
6597	 * a bus lock in guest.
6598	 */
6599	if (to_vmx(vcpu)->exit_reason.bus_lock_detected) {
6600		if (ret > 0)
6601			vcpu->run->exit_reason = KVM_EXIT_X86_BUS_LOCK;
6602
6603		vcpu->run->flags |= KVM_RUN_X86_BUS_LOCK;
6604		return 0;
6605	}
6606	return ret;
6607}
6608
6609/*
6610 * Software based L1D cache flush which is used when microcode providing
6611 * the cache control MSR is not loaded.
6612 *
6613 * The L1D cache is 32 KiB on Nehalem and later microarchitectures, but to
6614 * flush it is required to read in 64 KiB because the replacement algorithm
6615 * is not exactly LRU. This could be sized at runtime via topology
6616 * information but as all relevant affected CPUs have 32KiB L1D cache size
6617 * there is no point in doing so.
6618 */
6619static noinstr void vmx_l1d_flush(struct kvm_vcpu *vcpu)
6620{
6621	int size = PAGE_SIZE << L1D_CACHE_ORDER;
6622
6623	/*
6624	 * This code is only executed when the flush mode is 'cond' or
6625	 * 'always'
6626	 */
6627	if (static_branch_likely(&vmx_l1d_flush_cond)) {
6628		bool flush_l1d;
6629
6630		/*
6631		 * Clear the per-vcpu flush bit, it gets set again
6632		 * either from vcpu_run() or from one of the unsafe
6633		 * VMEXIT handlers.
6634		 */
6635		flush_l1d = vcpu->arch.l1tf_flush_l1d;
6636		vcpu->arch.l1tf_flush_l1d = false;
6637
6638		/*
6639		 * Clear the per-cpu flush bit, it gets set again from
6640		 * the interrupt handlers.
6641		 */
6642		flush_l1d |= kvm_get_cpu_l1tf_flush_l1d();
6643		kvm_clear_cpu_l1tf_flush_l1d();
6644
6645		if (!flush_l1d)
6646			return;
6647	}
6648
6649	vcpu->stat.l1d_flush++;
6650
6651	if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) {
6652		native_wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
6653		return;
6654	}
6655
6656	asm volatile(
6657		/* First ensure the pages are in the TLB */
6658		"xorl	%%eax, %%eax\n"
6659		".Lpopulate_tlb:\n\t"
6660		"movzbl	(%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
6661		"addl	$4096, %%eax\n\t"
6662		"cmpl	%%eax, %[size]\n\t"
6663		"jne	.Lpopulate_tlb\n\t"
6664		"xorl	%%eax, %%eax\n\t"
6665		"cpuid\n\t"
6666		/* Now fill the cache */
6667		"xorl	%%eax, %%eax\n"
6668		".Lfill_cache:\n"
6669		"movzbl	(%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
6670		"addl	$64, %%eax\n\t"
6671		"cmpl	%%eax, %[size]\n\t"
6672		"jne	.Lfill_cache\n\t"
6673		"lfence\n"
6674		:: [flush_pages] "r" (vmx_l1d_flush_pages),
6675		    [size] "r" (size)
6676		: "eax", "ebx", "ecx", "edx");
6677}
6678
6679static void vmx_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
6680{
6681	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
6682	int tpr_threshold;
6683
6684	if (is_guest_mode(vcpu) &&
6685		nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
6686		return;
6687
6688	tpr_threshold = (irr == -1 || tpr < irr) ? 0 : irr;
6689	if (is_guest_mode(vcpu))
6690		to_vmx(vcpu)->nested.l1_tpr_threshold = tpr_threshold;
6691	else
6692		vmcs_write32(TPR_THRESHOLD, tpr_threshold);
6693}
6694
6695void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
6696{
6697	struct vcpu_vmx *vmx = to_vmx(vcpu);
6698	u32 sec_exec_control;
6699
6700	if (!lapic_in_kernel(vcpu))
6701		return;
6702
6703	if (!flexpriority_enabled &&
6704	    !cpu_has_vmx_virtualize_x2apic_mode())
6705		return;
6706
6707	/* Postpone execution until vmcs01 is the current VMCS. */
6708	if (is_guest_mode(vcpu)) {
6709		vmx->nested.change_vmcs01_virtual_apic_mode = true;
6710		return;
6711	}
6712
6713	sec_exec_control = secondary_exec_controls_get(vmx);
6714	sec_exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
6715			      SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
6716
6717	switch (kvm_get_apic_mode(vcpu)) {
6718	case LAPIC_MODE_INVALID:
6719		WARN_ONCE(true, "Invalid local APIC state");
6720		break;
6721	case LAPIC_MODE_DISABLED:
6722		break;
6723	case LAPIC_MODE_XAPIC:
6724		if (flexpriority_enabled) {
6725			sec_exec_control |=
6726				SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
6727			kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
6728
6729			/*
6730			 * Flush the TLB, reloading the APIC access page will
6731			 * only do so if its physical address has changed, but
6732			 * the guest may have inserted a non-APIC mapping into
6733			 * the TLB while the APIC access page was disabled.
6734			 */
6735			kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
6736		}
6737		break;
6738	case LAPIC_MODE_X2APIC:
6739		if (cpu_has_vmx_virtualize_x2apic_mode())
6740			sec_exec_control |=
6741				SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
6742		break;
6743	}
6744	secondary_exec_controls_set(vmx, sec_exec_control);
6745
6746	vmx_update_msr_bitmap_x2apic(vcpu);
6747}
6748
6749static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu)
6750{
6751	const gfn_t gfn = APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT;
6752	struct kvm *kvm = vcpu->kvm;
6753	struct kvm_memslots *slots = kvm_memslots(kvm);
6754	struct kvm_memory_slot *slot;
6755	unsigned long mmu_seq;
6756	kvm_pfn_t pfn;
6757
6758	/* Defer reload until vmcs01 is the current VMCS. */
6759	if (is_guest_mode(vcpu)) {
6760		to_vmx(vcpu)->nested.reload_vmcs01_apic_access_page = true;
6761		return;
6762	}
6763
6764	if (!(secondary_exec_controls_get(to_vmx(vcpu)) &
6765	    SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
6766		return;
6767
6768	/*
6769	 * Grab the memslot so that the hva lookup for the mmu_notifier retry
6770	 * is guaranteed to use the same memslot as the pfn lookup, i.e. rely
6771	 * on the pfn lookup's validation of the memslot to ensure a valid hva
6772	 * is used for the retry check.
6773	 */
6774	slot = id_to_memslot(slots, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT);
6775	if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
6776		return;
6777
6778	/*
6779	 * Ensure that the mmu_notifier sequence count is read before KVM
6780	 * retrieves the pfn from the primary MMU.  Note, the memslot is
6781	 * protected by SRCU, not the mmu_notifier.  Pairs with the smp_wmb()
6782	 * in kvm_mmu_invalidate_end().
6783	 */
6784	mmu_seq = kvm->mmu_invalidate_seq;
6785	smp_rmb();
6786
6787	/*
6788	 * No need to retry if the memslot does not exist or is invalid.  KVM
6789	 * controls the APIC-access page memslot, and only deletes the memslot
6790	 * if APICv is permanently inhibited, i.e. the memslot won't reappear.
6791	 */
6792	pfn = gfn_to_pfn_memslot(slot, gfn);
6793	if (is_error_noslot_pfn(pfn))
6794		return;
6795
6796	read_lock(&vcpu->kvm->mmu_lock);
6797	if (mmu_invalidate_retry_hva(kvm, mmu_seq,
6798				     gfn_to_hva_memslot(slot, gfn))) {
6799		kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
6800		read_unlock(&vcpu->kvm->mmu_lock);
6801		goto out;
6802	}
6803
6804	vmcs_write64(APIC_ACCESS_ADDR, pfn_to_hpa(pfn));
6805	read_unlock(&vcpu->kvm->mmu_lock);
6806
6807	/*
6808	 * No need for a manual TLB flush at this point, KVM has already done a
6809	 * flush if there were SPTEs pointing at the previous page.
6810	 */
6811out:
6812	/*
6813	 * Do not pin apic access page in memory, the MMU notifier
6814	 * will call us again if it is migrated or swapped out.
6815	 */
6816	kvm_release_pfn_clean(pfn);
6817}
6818
6819static void vmx_hwapic_isr_update(int max_isr)
6820{
6821	u16 status;
6822	u8 old;
6823
6824	if (max_isr == -1)
6825		max_isr = 0;
6826
6827	status = vmcs_read16(GUEST_INTR_STATUS);
6828	old = status >> 8;
6829	if (max_isr != old) {
6830		status &= 0xff;
6831		status |= max_isr << 8;
6832		vmcs_write16(GUEST_INTR_STATUS, status);
6833	}
6834}
6835
6836static void vmx_set_rvi(int vector)
6837{
6838	u16 status;
6839	u8 old;
6840
6841	if (vector == -1)
6842		vector = 0;
6843
6844	status = vmcs_read16(GUEST_INTR_STATUS);
6845	old = (u8)status & 0xff;
6846	if ((u8)vector != old) {
6847		status &= ~0xff;
6848		status |= (u8)vector;
6849		vmcs_write16(GUEST_INTR_STATUS, status);
6850	}
6851}
6852
6853static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
6854{
6855	/*
6856	 * When running L2, updating RVI is only relevant when
6857	 * vmcs12 virtual-interrupt-delivery enabled.
6858	 * However, it can be enabled only when L1 also
6859	 * intercepts external-interrupts and in that case
6860	 * we should not update vmcs02 RVI but instead intercept
6861	 * interrupt. Therefore, do nothing when running L2.
6862	 */
6863	if (!is_guest_mode(vcpu))
6864		vmx_set_rvi(max_irr);
6865}
6866
6867static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
6868{
6869	struct vcpu_vmx *vmx = to_vmx(vcpu);
6870	int max_irr;
6871	bool got_posted_interrupt;
6872
6873	if (KVM_BUG_ON(!enable_apicv, vcpu->kvm))
6874		return -EIO;
6875
6876	if (pi_test_on(&vmx->pi_desc)) {
6877		pi_clear_on(&vmx->pi_desc);
6878		/*
6879		 * IOMMU can write to PID.ON, so the barrier matters even on UP.
6880		 * But on x86 this is just a compiler barrier anyway.
6881		 */
6882		smp_mb__after_atomic();
6883		got_posted_interrupt =
6884			kvm_apic_update_irr(vcpu, vmx->pi_desc.pir, &max_irr);
6885	} else {
6886		max_irr = kvm_lapic_find_highest_irr(vcpu);
6887		got_posted_interrupt = false;
6888	}
6889
6890	/*
6891	 * Newly recognized interrupts are injected via either virtual interrupt
6892	 * delivery (RVI) or KVM_REQ_EVENT.  Virtual interrupt delivery is
6893	 * disabled in two cases:
6894	 *
6895	 * 1) If L2 is running and the vCPU has a new pending interrupt.  If L1
6896	 * wants to exit on interrupts, KVM_REQ_EVENT is needed to synthesize a
6897	 * VM-Exit to L1.  If L1 doesn't want to exit, the interrupt is injected
6898	 * into L2, but KVM doesn't use virtual interrupt delivery to inject
6899	 * interrupts into L2, and so KVM_REQ_EVENT is again needed.
6900	 *
6901	 * 2) If APICv is disabled for this vCPU, assigned devices may still
6902	 * attempt to post interrupts.  The posted interrupt vector will cause
6903	 * a VM-Exit and the subsequent entry will call sync_pir_to_irr.
6904	 */
6905	if (!is_guest_mode(vcpu) && kvm_vcpu_apicv_active(vcpu))
6906		vmx_set_rvi(max_irr);
6907	else if (got_posted_interrupt)
6908		kvm_make_request(KVM_REQ_EVENT, vcpu);
6909
6910	return max_irr;
6911}
6912
6913static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
6914{
6915	if (!kvm_vcpu_apicv_active(vcpu))
6916		return;
6917
6918	vmcs_write64(EOI_EXIT_BITMAP0, eoi_exit_bitmap[0]);
6919	vmcs_write64(EOI_EXIT_BITMAP1, eoi_exit_bitmap[1]);
6920	vmcs_write64(EOI_EXIT_BITMAP2, eoi_exit_bitmap[2]);
6921	vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]);
6922}
6923
6924static void vmx_apicv_pre_state_restore(struct kvm_vcpu *vcpu)
6925{
6926	struct vcpu_vmx *vmx = to_vmx(vcpu);
6927
6928	pi_clear_on(&vmx->pi_desc);
6929	memset(vmx->pi_desc.pir, 0, sizeof(vmx->pi_desc.pir));
6930}
6931
6932void vmx_do_interrupt_irqoff(unsigned long entry);
6933void vmx_do_nmi_irqoff(void);
6934
6935static void handle_nm_fault_irqoff(struct kvm_vcpu *vcpu)
6936{
6937	/*
6938	 * Save xfd_err to guest_fpu before interrupt is enabled, so the
6939	 * MSR value is not clobbered by the host activity before the guest
6940	 * has chance to consume it.
6941	 *
6942	 * Do not blindly read xfd_err here, since this exception might
6943	 * be caused by L1 interception on a platform which doesn't
6944	 * support xfd at all.
6945	 *
6946	 * Do it conditionally upon guest_fpu::xfd. xfd_err matters
6947	 * only when xfd contains a non-zero value.
6948	 *
6949	 * Queuing exception is done in vmx_handle_exit. See comment there.
6950	 */
6951	if (vcpu->arch.guest_fpu.fpstate->xfd)
6952		rdmsrl(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err);
6953}
6954
6955static void handle_exception_irqoff(struct vcpu_vmx *vmx)
6956{
6957	u32 intr_info = vmx_get_intr_info(&vmx->vcpu);
6958
6959	/* if exit due to PF check for async PF */
6960	if (is_page_fault(intr_info))
6961		vmx->vcpu.arch.apf.host_apf_flags = kvm_read_and_reset_apf_flags();
6962	/* if exit due to NM, handle before interrupts are enabled */
6963	else if (is_nm_fault(intr_info))
6964		handle_nm_fault_irqoff(&vmx->vcpu);
6965	/* Handle machine checks before interrupts are enabled */
6966	else if (is_machine_check(intr_info))
6967		kvm_machine_check();
6968}
6969
6970static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu)
6971{
6972	u32 intr_info = vmx_get_intr_info(vcpu);
6973	unsigned int vector = intr_info & INTR_INFO_VECTOR_MASK;
6974	gate_desc *desc = (gate_desc *)host_idt_base + vector;
6975
6976	if (KVM_BUG(!is_external_intr(intr_info), vcpu->kvm,
6977	    "unexpected VM-Exit interrupt info: 0x%x", intr_info))
6978		return;
6979
6980	kvm_before_interrupt(vcpu, KVM_HANDLING_IRQ);
6981	vmx_do_interrupt_irqoff(gate_offset(desc));
6982	kvm_after_interrupt(vcpu);
6983
6984	vcpu->arch.at_instruction_boundary = true;
6985}
6986
6987static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
6988{
6989	struct vcpu_vmx *vmx = to_vmx(vcpu);
6990
6991	if (vmx->emulation_required)
6992		return;
6993
6994	if (vmx->exit_reason.basic == EXIT_REASON_EXTERNAL_INTERRUPT)
6995		handle_external_interrupt_irqoff(vcpu);
6996	else if (vmx->exit_reason.basic == EXIT_REASON_EXCEPTION_NMI)
6997		handle_exception_irqoff(vmx);
6998}
6999
7000/*
7001 * The kvm parameter can be NULL (module initialization, or invocation before
7002 * VM creation). Be sure to check the kvm parameter before using it.
7003 */
7004static bool vmx_has_emulated_msr(struct kvm *kvm, u32 index)
7005{
7006	switch (index) {
7007	case MSR_IA32_SMBASE:
7008		if (!IS_ENABLED(CONFIG_KVM_SMM))
7009			return false;
7010		/*
7011		 * We cannot do SMM unless we can run the guest in big
7012		 * real mode.
7013		 */
7014		return enable_unrestricted_guest || emulate_invalid_guest_state;
7015	case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR:
7016		return nested;
7017	case MSR_AMD64_VIRT_SPEC_CTRL:
7018	case MSR_AMD64_TSC_RATIO:
7019		/* This is AMD only.  */
7020		return false;
7021	default:
7022		return true;
7023	}
7024}
7025
7026static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
7027{
7028	u32 exit_intr_info;
7029	bool unblock_nmi;
7030	u8 vector;
7031	bool idtv_info_valid;
7032
7033	idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
7034
7035	if (enable_vnmi) {
7036		if (vmx->loaded_vmcs->nmi_known_unmasked)
7037			return;
7038
7039		exit_intr_info = vmx_get_intr_info(&vmx->vcpu);
7040		unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
7041		vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
7042		/*
7043		 * SDM 3: 27.7.1.2 (September 2008)
7044		 * Re-set bit "block by NMI" before VM entry if vmexit caused by
7045		 * a guest IRET fault.
7046		 * SDM 3: 23.2.2 (September 2008)
7047		 * Bit 12 is undefined in any of the following cases:
7048		 *  If the VM exit sets the valid bit in the IDT-vectoring
7049		 *   information field.
7050		 *  If the VM exit is due to a double fault.
7051		 */
7052		if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi &&
7053		    vector != DF_VECTOR && !idtv_info_valid)
7054			vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
7055				      GUEST_INTR_STATE_NMI);
7056		else
7057			vmx->loaded_vmcs->nmi_known_unmasked =
7058				!(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
7059				  & GUEST_INTR_STATE_NMI);
7060	} else if (unlikely(vmx->loaded_vmcs->soft_vnmi_blocked))
7061		vmx->loaded_vmcs->vnmi_blocked_time +=
7062			ktime_to_ns(ktime_sub(ktime_get(),
7063					      vmx->loaded_vmcs->entry_time));
7064}
7065
7066static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu,
7067				      u32 idt_vectoring_info,
7068				      int instr_len_field,
7069				      int error_code_field)
7070{
7071	u8 vector;
7072	int type;
7073	bool idtv_info_valid;
7074
7075	idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
7076
7077	vcpu->arch.nmi_injected = false;
7078	kvm_clear_exception_queue(vcpu);
7079	kvm_clear_interrupt_queue(vcpu);
7080
7081	if (!idtv_info_valid)
7082		return;
7083
7084	kvm_make_request(KVM_REQ_EVENT, vcpu);
7085
7086	vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK;
7087	type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK;
7088
7089	switch (type) {
7090	case INTR_TYPE_NMI_INTR:
7091		vcpu->arch.nmi_injected = true;
7092		/*
7093		 * SDM 3: 27.7.1.2 (September 2008)
7094		 * Clear bit "block by NMI" before VM entry if a NMI
7095		 * delivery faulted.
7096		 */
7097		vmx_set_nmi_mask(vcpu, false);
7098		break;
7099	case INTR_TYPE_SOFT_EXCEPTION:
7100		vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
7101		fallthrough;
7102	case INTR_TYPE_HARD_EXCEPTION:
7103		if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) {
7104			u32 err = vmcs_read32(error_code_field);
7105			kvm_requeue_exception_e(vcpu, vector, err);
7106		} else
7107			kvm_requeue_exception(vcpu, vector);
7108		break;
7109	case INTR_TYPE_SOFT_INTR:
7110		vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
7111		fallthrough;
7112	case INTR_TYPE_EXT_INTR:
7113		kvm_queue_interrupt(vcpu, vector, type == INTR_TYPE_SOFT_INTR);
7114		break;
7115	default:
7116		break;
7117	}
7118}
7119
7120static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
7121{
7122	__vmx_complete_interrupts(&vmx->vcpu, vmx->idt_vectoring_info,
7123				  VM_EXIT_INSTRUCTION_LEN,
7124				  IDT_VECTORING_ERROR_CODE);
7125}
7126
7127static void vmx_cancel_injection(struct kvm_vcpu *vcpu)
7128{
7129	__vmx_complete_interrupts(vcpu,
7130				  vmcs_read32(VM_ENTRY_INTR_INFO_FIELD),
7131				  VM_ENTRY_INSTRUCTION_LEN,
7132				  VM_ENTRY_EXCEPTION_ERROR_CODE);
7133
7134	vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
7135}
7136
7137static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
7138{
7139	int i, nr_msrs;
7140	struct perf_guest_switch_msr *msrs;
7141	struct kvm_pmu *pmu = vcpu_to_pmu(&vmx->vcpu);
7142
7143	pmu->host_cross_mapped_mask = 0;
7144	if (pmu->pebs_enable & pmu->global_ctrl)
7145		intel_pmu_cross_mapped_check(pmu);
7146
7147	/* Note, nr_msrs may be garbage if perf_guest_get_msrs() returns NULL. */
7148	msrs = perf_guest_get_msrs(&nr_msrs, (void *)pmu);
7149	if (!msrs)
7150		return;
7151
7152	for (i = 0; i < nr_msrs; i++)
7153		if (msrs[i].host == msrs[i].guest)
7154			clear_atomic_switch_msr(vmx, msrs[i].msr);
7155		else
7156			add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest,
7157					msrs[i].host, false);
7158}
7159
7160static void vmx_update_hv_timer(struct kvm_vcpu *vcpu)
7161{
7162	struct vcpu_vmx *vmx = to_vmx(vcpu);
7163	u64 tscl;
7164	u32 delta_tsc;
7165
7166	if (vmx->req_immediate_exit) {
7167		vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, 0);
7168		vmx->loaded_vmcs->hv_timer_soft_disabled = false;
7169	} else if (vmx->hv_deadline_tsc != -1) {
7170		tscl = rdtsc();
7171		if (vmx->hv_deadline_tsc > tscl)
7172			/* set_hv_timer ensures the delta fits in 32-bits */
7173			delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >>
7174				cpu_preemption_timer_multi);
7175		else
7176			delta_tsc = 0;
7177
7178		vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, delta_tsc);
7179		vmx->loaded_vmcs->hv_timer_soft_disabled = false;
7180	} else if (!vmx->loaded_vmcs->hv_timer_soft_disabled) {
7181		vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, -1);
7182		vmx->loaded_vmcs->hv_timer_soft_disabled = true;
7183	}
7184}
7185
7186void noinstr vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp)
7187{
7188	if (unlikely(host_rsp != vmx->loaded_vmcs->host_state.rsp)) {
7189		vmx->loaded_vmcs->host_state.rsp = host_rsp;
7190		vmcs_writel(HOST_RSP, host_rsp);
7191	}
7192}
7193
7194void noinstr vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx,
7195					unsigned int flags)
7196{
7197	u64 hostval = this_cpu_read(x86_spec_ctrl_current);
7198
7199	if (!cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL))
7200		return;
7201
7202	if (flags & VMX_RUN_SAVE_SPEC_CTRL)
7203		vmx->spec_ctrl = __rdmsr(MSR_IA32_SPEC_CTRL);
7204
7205	/*
7206	 * If the guest/host SPEC_CTRL values differ, restore the host value.
7207	 *
7208	 * For legacy IBRS, the IBRS bit always needs to be written after
7209	 * transitioning from a less privileged predictor mode, regardless of
7210	 * whether the guest/host values differ.
7211	 */
7212	if (cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS) ||
7213	    vmx->spec_ctrl != hostval)
7214		native_wrmsrl(MSR_IA32_SPEC_CTRL, hostval);
7215
7216	barrier_nospec();
7217}
7218
7219static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
7220{
7221	switch (to_vmx(vcpu)->exit_reason.basic) {
7222	case EXIT_REASON_MSR_WRITE:
7223		return handle_fastpath_set_msr_irqoff(vcpu);
7224	case EXIT_REASON_PREEMPTION_TIMER:
7225		return handle_fastpath_preemption_timer(vcpu);
7226	default:
7227		return EXIT_FASTPATH_NONE;
7228	}
7229}
7230
7231static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
7232					unsigned int flags)
7233{
7234	struct vcpu_vmx *vmx = to_vmx(vcpu);
7235
7236	guest_state_enter_irqoff();
7237
7238	/*
7239	 * L1D Flush includes CPU buffer clear to mitigate MDS, but VERW
7240	 * mitigation for MDS is done late in VMentry and is still
7241	 * executed in spite of L1D Flush. This is because an extra VERW
7242	 * should not matter much after the big hammer L1D Flush.
7243	 */
7244	if (static_branch_unlikely(&vmx_l1d_should_flush))
7245		vmx_l1d_flush(vcpu);
7246	else if (static_branch_unlikely(&mmio_stale_data_clear) &&
7247		 kvm_arch_has_assigned_device(vcpu->kvm))
7248		mds_clear_cpu_buffers();
7249
7250	vmx_disable_fb_clear(vmx);
7251
7252	if (vcpu->arch.cr2 != native_read_cr2())
7253		native_write_cr2(vcpu->arch.cr2);
7254
7255	vmx->fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs,
7256				   flags);
7257
7258	vcpu->arch.cr2 = native_read_cr2();
7259	vcpu->arch.regs_avail &= ~VMX_REGS_LAZY_LOAD_SET;
7260
7261	vmx->idt_vectoring_info = 0;
7262
7263	vmx_enable_fb_clear(vmx);
7264
7265	if (unlikely(vmx->fail)) {
7266		vmx->exit_reason.full = 0xdead;
7267		goto out;
7268	}
7269
7270	vmx->exit_reason.full = vmcs_read32(VM_EXIT_REASON);
7271	if (likely(!vmx->exit_reason.failed_vmentry))
7272		vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
7273
7274	if ((u16)vmx->exit_reason.basic == EXIT_REASON_EXCEPTION_NMI &&
7275	    is_nmi(vmx_get_intr_info(vcpu))) {
7276		kvm_before_interrupt(vcpu, KVM_HANDLING_NMI);
7277		vmx_do_nmi_irqoff();
7278		kvm_after_interrupt(vcpu);
7279	}
7280
7281out:
7282	guest_state_exit_irqoff();
7283}
7284
7285static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
7286{
7287	struct vcpu_vmx *vmx = to_vmx(vcpu);
7288	unsigned long cr3, cr4;
7289
7290	/* Record the guest's net vcpu time for enforced NMI injections. */
7291	if (unlikely(!enable_vnmi &&
7292		     vmx->loaded_vmcs->soft_vnmi_blocked))
7293		vmx->loaded_vmcs->entry_time = ktime_get();
7294
7295	/*
7296	 * Don't enter VMX if guest state is invalid, let the exit handler
7297	 * start emulation until we arrive back to a valid state.  Synthesize a
7298	 * consistency check VM-Exit due to invalid guest state and bail.
7299	 */
7300	if (unlikely(vmx->emulation_required)) {
7301		vmx->fail = 0;
7302
7303		vmx->exit_reason.full = EXIT_REASON_INVALID_STATE;
7304		vmx->exit_reason.failed_vmentry = 1;
7305		kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1);
7306		vmx->exit_qualification = ENTRY_FAIL_DEFAULT;
7307		kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2);
7308		vmx->exit_intr_info = 0;
7309		return EXIT_FASTPATH_NONE;
7310	}
7311
7312	trace_kvm_entry(vcpu);
7313
7314	if (vmx->ple_window_dirty) {
7315		vmx->ple_window_dirty = false;
7316		vmcs_write32(PLE_WINDOW, vmx->ple_window);
7317	}
7318
7319	/*
7320	 * We did this in prepare_switch_to_guest, because it needs to
7321	 * be within srcu_read_lock.
7322	 */
7323	WARN_ON_ONCE(vmx->nested.need_vmcs12_to_shadow_sync);
7324
7325	if (kvm_register_is_dirty(vcpu, VCPU_REGS_RSP))
7326		vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
7327	if (kvm_register_is_dirty(vcpu, VCPU_REGS_RIP))
7328		vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
7329	vcpu->arch.regs_dirty = 0;
7330
7331	/*
7332	 * Refresh vmcs.HOST_CR3 if necessary.  This must be done immediately
7333	 * prior to VM-Enter, as the kernel may load a new ASID (PCID) any time
7334	 * it switches back to the current->mm, which can occur in KVM context
7335	 * when switching to a temporary mm to patch kernel code, e.g. if KVM
7336	 * toggles a static key while handling a VM-Exit.
7337	 */
7338	cr3 = __get_current_cr3_fast();
7339	if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
7340		vmcs_writel(HOST_CR3, cr3);
7341		vmx->loaded_vmcs->host_state.cr3 = cr3;
7342	}
7343
7344	cr4 = cr4_read_shadow();
7345	if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
7346		vmcs_writel(HOST_CR4, cr4);
7347		vmx->loaded_vmcs->host_state.cr4 = cr4;
7348	}
7349
7350	/* When KVM_DEBUGREG_WONT_EXIT, dr6 is accessible in guest. */
7351	if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))
7352		set_debugreg(vcpu->arch.dr6, 6);
7353
7354	/* When single-stepping over STI and MOV SS, we must clear the
7355	 * corresponding interruptibility bits in the guest state. Otherwise
7356	 * vmentry fails as it then expects bit 14 (BS) in pending debug
7357	 * exceptions being set, but that's not correct for the guest debugging
7358	 * case. */
7359	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
7360		vmx_set_interrupt_shadow(vcpu, 0);
7361
7362	kvm_load_guest_xsave_state(vcpu);
7363
7364	pt_guest_enter(vmx);
7365
7366	atomic_switch_perf_msrs(vmx);
7367	if (intel_pmu_lbr_is_enabled(vcpu))
7368		vmx_passthrough_lbr_msrs(vcpu);
7369
7370	if (enable_preemption_timer)
7371		vmx_update_hv_timer(vcpu);
7372
7373	kvm_wait_lapic_expire(vcpu);
7374
7375	/* The actual VMENTER/EXIT is in the .noinstr.text section. */
7376	vmx_vcpu_enter_exit(vcpu, __vmx_vcpu_run_flags(vmx));
7377
7378	/* All fields are clean at this point */
7379	if (kvm_is_using_evmcs()) {
7380		current_evmcs->hv_clean_fields |=
7381			HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
7382
7383		current_evmcs->hv_vp_id = kvm_hv_get_vpindex(vcpu);
7384	}
7385
7386	/* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
7387	if (vmx->host_debugctlmsr)
7388		update_debugctlmsr(vmx->host_debugctlmsr);
7389
7390#ifndef CONFIG_X86_64
7391	/*
7392	 * The sysexit path does not restore ds/es, so we must set them to
7393	 * a reasonable value ourselves.
7394	 *
7395	 * We can't defer this to vmx_prepare_switch_to_host() since that
7396	 * function may be executed in interrupt context, which saves and
7397	 * restore segments around it, nullifying its effect.
7398	 */
7399	loadsegment(ds, __USER_DS);
7400	loadsegment(es, __USER_DS);
7401#endif
7402
7403	pt_guest_exit(vmx);
7404
7405	kvm_load_host_xsave_state(vcpu);
7406
7407	if (is_guest_mode(vcpu)) {
7408		/*
7409		 * Track VMLAUNCH/VMRESUME that have made past guest state
7410		 * checking.
7411		 */
7412		if (vmx->nested.nested_run_pending &&
7413		    !vmx->exit_reason.failed_vmentry)
7414			++vcpu->stat.nested_run;
7415
7416		vmx->nested.nested_run_pending = 0;
7417	}
7418
7419	if (unlikely(vmx->fail))
7420		return EXIT_FASTPATH_NONE;
7421
7422	if (unlikely((u16)vmx->exit_reason.basic == EXIT_REASON_MCE_DURING_VMENTRY))
7423		kvm_machine_check();
7424
7425	trace_kvm_exit(vcpu, KVM_ISA_VMX);
7426
7427	if (unlikely(vmx->exit_reason.failed_vmentry))
7428		return EXIT_FASTPATH_NONE;
7429
7430	vmx->loaded_vmcs->launched = 1;
7431
7432	vmx_recover_nmi_blocking(vmx);
7433	vmx_complete_interrupts(vmx);
7434
7435	if (is_guest_mode(vcpu))
7436		return EXIT_FASTPATH_NONE;
7437
7438	return vmx_exit_handlers_fastpath(vcpu);
7439}
7440
7441static void vmx_vcpu_free(struct kvm_vcpu *vcpu)
7442{
7443	struct vcpu_vmx *vmx = to_vmx(vcpu);
7444
7445	if (enable_pml)
7446		vmx_destroy_pml_buffer(vmx);
7447	free_vpid(vmx->vpid);
7448	nested_vmx_free_vcpu(vcpu);
7449	free_loaded_vmcs(vmx->loaded_vmcs);
7450}
7451
7452static int vmx_vcpu_create(struct kvm_vcpu *vcpu)
7453{
7454	struct vmx_uret_msr *tsx_ctrl;
7455	struct vcpu_vmx *vmx;
7456	int i, err;
7457
7458	BUILD_BUG_ON(offsetof(struct vcpu_vmx, vcpu) != 0);
7459	vmx = to_vmx(vcpu);
7460
7461	INIT_LIST_HEAD(&vmx->pi_wakeup_list);
7462
7463	err = -ENOMEM;
7464
7465	vmx->vpid = allocate_vpid();
7466
7467	/*
7468	 * If PML is turned on, failure on enabling PML just results in failure
7469	 * of creating the vcpu, therefore we can simplify PML logic (by
7470	 * avoiding dealing with cases, such as enabling PML partially on vcpus
7471	 * for the guest), etc.
7472	 */
7473	if (enable_pml) {
7474		vmx->pml_pg = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
7475		if (!vmx->pml_pg)
7476			goto free_vpid;
7477	}
7478
7479	for (i = 0; i < kvm_nr_uret_msrs; ++i)
7480		vmx->guest_uret_msrs[i].mask = -1ull;
7481	if (boot_cpu_has(X86_FEATURE_RTM)) {
7482		/*
7483		 * TSX_CTRL_CPUID_CLEAR is handled in the CPUID interception.
7484		 * Keep the host value unchanged to avoid changing CPUID bits
7485		 * under the host kernel's feet.
7486		 */
7487		tsx_ctrl = vmx_find_uret_msr(vmx, MSR_IA32_TSX_CTRL);
7488		if (tsx_ctrl)
7489			tsx_ctrl->mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
7490	}
7491
7492	err = alloc_loaded_vmcs(&vmx->vmcs01);
7493	if (err < 0)
7494		goto free_pml;
7495
7496	/*
7497	 * Use Hyper-V 'Enlightened MSR Bitmap' feature when KVM runs as a
7498	 * nested (L1) hypervisor and Hyper-V in L0 supports it. Enable the
7499	 * feature only for vmcs01, KVM currently isn't equipped to realize any
7500	 * performance benefits from enabling it for vmcs02.
7501	 */
7502	if (kvm_is_using_evmcs() &&
7503	    (ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) {
7504		struct hv_enlightened_vmcs *evmcs = (void *)vmx->vmcs01.vmcs;
7505
7506		evmcs->hv_enlightenments_control.msr_bitmap = 1;
7507	}
7508
7509	/* The MSR bitmap starts with all ones */
7510	bitmap_fill(vmx->shadow_msr_intercept.read, MAX_POSSIBLE_PASSTHROUGH_MSRS);
7511	bitmap_fill(vmx->shadow_msr_intercept.write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
7512
7513	vmx_disable_intercept_for_msr(vcpu, MSR_IA32_TSC, MSR_TYPE_R);
7514#ifdef CONFIG_X86_64
7515	vmx_disable_intercept_for_msr(vcpu, MSR_FS_BASE, MSR_TYPE_RW);
7516	vmx_disable_intercept_for_msr(vcpu, MSR_GS_BASE, MSR_TYPE_RW);
7517	vmx_disable_intercept_for_msr(vcpu, MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
7518#endif
7519	vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW);
7520	vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW);
7521	vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW);
7522	if (kvm_cstate_in_guest(vcpu->kvm)) {
7523		vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C1_RES, MSR_TYPE_R);
7524		vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C3_RESIDENCY, MSR_TYPE_R);
7525		vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C6_RESIDENCY, MSR_TYPE_R);
7526		vmx_disable_intercept_for_msr(vcpu, MSR_CORE_C7_RESIDENCY, MSR_TYPE_R);
7527	}
7528
7529	vmx->loaded_vmcs = &vmx->vmcs01;
7530
7531	if (cpu_need_virtualize_apic_accesses(vcpu)) {
7532		err = kvm_alloc_apic_access_page(vcpu->kvm);
7533		if (err)
7534			goto free_vmcs;
7535	}
7536
7537	if (enable_ept && !enable_unrestricted_guest) {
7538		err = init_rmode_identity_map(vcpu->kvm);
7539		if (err)
7540			goto free_vmcs;
7541	}
7542
7543	if (vmx_can_use_ipiv(vcpu))
7544		WRITE_ONCE(to_kvm_vmx(vcpu->kvm)->pid_table[vcpu->vcpu_id],
7545			   __pa(&vmx->pi_desc) | PID_TABLE_ENTRY_VALID);
7546
7547	return 0;
7548
7549free_vmcs:
7550	free_loaded_vmcs(vmx->loaded_vmcs);
7551free_pml:
7552	vmx_destroy_pml_buffer(vmx);
7553free_vpid:
7554	free_vpid(vmx->vpid);
7555	return err;
7556}
7557
7558#define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n"
7559#define L1TF_MSG_L1D "L1TF CPU bug present and virtualization mitigation disabled, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n"
7560
7561static int vmx_vm_init(struct kvm *kvm)
7562{
7563	if (!ple_gap)
7564		kvm->arch.pause_in_guest = true;
7565
7566	if (boot_cpu_has(X86_BUG_L1TF) && enable_ept) {
7567		switch (l1tf_mitigation) {
7568		case L1TF_MITIGATION_OFF:
7569		case L1TF_MITIGATION_FLUSH_NOWARN:
7570			/* 'I explicitly don't care' is set */
7571			break;
7572		case L1TF_MITIGATION_FLUSH:
7573		case L1TF_MITIGATION_FLUSH_NOSMT:
7574		case L1TF_MITIGATION_FULL:
7575			/*
7576			 * Warn upon starting the first VM in a potentially
7577			 * insecure environment.
7578			 */
7579			if (sched_smt_active())
7580				pr_warn_once(L1TF_MSG_SMT);
7581			if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER)
7582				pr_warn_once(L1TF_MSG_L1D);
7583			break;
7584		case L1TF_MITIGATION_FULL_FORCE:
7585			/* Flush is enforced */
7586			break;
7587		}
7588	}
7589	return 0;
7590}
7591
7592static u8 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
7593{
7594	u8 cache;
7595
7596	/* We wanted to honor guest CD/MTRR/PAT, but doing so could result in
7597	 * memory aliases with conflicting memory types and sometimes MCEs.
7598	 * We have to be careful as to what are honored and when.
7599	 *
7600	 * For MMIO, guest CD/MTRR are ignored.  The EPT memory type is set to
7601	 * UC.  The effective memory type is UC or WC depending on guest PAT.
7602	 * This was historically the source of MCEs and we want to be
7603	 * conservative.
7604	 *
7605	 * When there is no need to deal with noncoherent DMA (e.g., no VT-d
7606	 * or VT-d has snoop control), guest CD/MTRR/PAT are all ignored.  The
7607	 * EPT memory type is set to WB.  The effective memory type is forced
7608	 * WB.
7609	 *
7610	 * Otherwise, we trust guest.  Guest CD/MTRR/PAT are all honored.  The
7611	 * EPT memory type is used to emulate guest CD/MTRR.
7612	 */
7613
7614	if (is_mmio)
7615		return MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT;
7616
7617	if (!kvm_arch_has_noncoherent_dma(vcpu->kvm))
7618		return (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT) | VMX_EPT_IPAT_BIT;
7619
7620	if (kvm_read_cr0_bits(vcpu, X86_CR0_CD)) {
7621		if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
7622			cache = MTRR_TYPE_WRBACK;
7623		else
7624			cache = MTRR_TYPE_UNCACHABLE;
7625
7626		return (cache << VMX_EPT_MT_EPTE_SHIFT) | VMX_EPT_IPAT_BIT;
7627	}
7628
7629	return kvm_mtrr_get_guest_memory_type(vcpu, gfn) << VMX_EPT_MT_EPTE_SHIFT;
7630}
7631
7632static void vmcs_set_secondary_exec_control(struct vcpu_vmx *vmx, u32 new_ctl)
7633{
7634	/*
7635	 * These bits in the secondary execution controls field
7636	 * are dynamic, the others are mostly based on the hypervisor
7637	 * architecture and the guest's CPUID.  Do not touch the
7638	 * dynamic bits.
7639	 */
7640	u32 mask =
7641		SECONDARY_EXEC_SHADOW_VMCS |
7642		SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
7643		SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
7644		SECONDARY_EXEC_DESC;
7645
7646	u32 cur_ctl = secondary_exec_controls_get(vmx);
7647
7648	secondary_exec_controls_set(vmx, (new_ctl & ~mask) | (cur_ctl & mask));
7649}
7650
7651/*
7652 * Generate MSR_IA32_VMX_CR{0,4}_FIXED1 according to CPUID. Only set bits
7653 * (indicating "allowed-1") if they are supported in the guest's CPUID.
7654 */
7655static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu)
7656{
7657	struct vcpu_vmx *vmx = to_vmx(vcpu);
7658	struct kvm_cpuid_entry2 *entry;
7659
7660	vmx->nested.msrs.cr0_fixed1 = 0xffffffff;
7661	vmx->nested.msrs.cr4_fixed1 = X86_CR4_PCE;
7662
7663#define cr4_fixed1_update(_cr4_mask, _reg, _cpuid_mask) do {		\
7664	if (entry && (entry->_reg & (_cpuid_mask)))			\
7665		vmx->nested.msrs.cr4_fixed1 |= (_cr4_mask);	\
7666} while (0)
7667
7668	entry = kvm_find_cpuid_entry(vcpu, 0x1);
7669	cr4_fixed1_update(X86_CR4_VME,        edx, feature_bit(VME));
7670	cr4_fixed1_update(X86_CR4_PVI,        edx, feature_bit(VME));
7671	cr4_fixed1_update(X86_CR4_TSD,        edx, feature_bit(TSC));
7672	cr4_fixed1_update(X86_CR4_DE,         edx, feature_bit(DE));
7673	cr4_fixed1_update(X86_CR4_PSE,        edx, feature_bit(PSE));
7674	cr4_fixed1_update(X86_CR4_PAE,        edx, feature_bit(PAE));
7675	cr4_fixed1_update(X86_CR4_MCE,        edx, feature_bit(MCE));
7676	cr4_fixed1_update(X86_CR4_PGE,        edx, feature_bit(PGE));
7677	cr4_fixed1_update(X86_CR4_OSFXSR,     edx, feature_bit(FXSR));
7678	cr4_fixed1_update(X86_CR4_OSXMMEXCPT, edx, feature_bit(XMM));
7679	cr4_fixed1_update(X86_CR4_VMXE,       ecx, feature_bit(VMX));
7680	cr4_fixed1_update(X86_CR4_SMXE,       ecx, feature_bit(SMX));
7681	cr4_fixed1_update(X86_CR4_PCIDE,      ecx, feature_bit(PCID));
7682	cr4_fixed1_update(X86_CR4_OSXSAVE,    ecx, feature_bit(XSAVE));
7683
7684	entry = kvm_find_cpuid_entry_index(vcpu, 0x7, 0);
7685	cr4_fixed1_update(X86_CR4_FSGSBASE,   ebx, feature_bit(FSGSBASE));
7686	cr4_fixed1_update(X86_CR4_SMEP,       ebx, feature_bit(SMEP));
7687	cr4_fixed1_update(X86_CR4_SMAP,       ebx, feature_bit(SMAP));
7688	cr4_fixed1_update(X86_CR4_PKE,        ecx, feature_bit(PKU));
7689	cr4_fixed1_update(X86_CR4_UMIP,       ecx, feature_bit(UMIP));
7690	cr4_fixed1_update(X86_CR4_LA57,       ecx, feature_bit(LA57));
7691
7692#undef cr4_fixed1_update
7693}
7694
7695static void update_intel_pt_cfg(struct kvm_vcpu *vcpu)
7696{
7697	struct vcpu_vmx *vmx = to_vmx(vcpu);
7698	struct kvm_cpuid_entry2 *best = NULL;
7699	int i;
7700
7701	for (i = 0; i < PT_CPUID_LEAVES; i++) {
7702		best = kvm_find_cpuid_entry_index(vcpu, 0x14, i);
7703		if (!best)
7704			return;
7705		vmx->pt_desc.caps[CPUID_EAX + i*PT_CPUID_REGS_NUM] = best->eax;
7706		vmx->pt_desc.caps[CPUID_EBX + i*PT_CPUID_REGS_NUM] = best->ebx;
7707		vmx->pt_desc.caps[CPUID_ECX + i*PT_CPUID_REGS_NUM] = best->ecx;
7708		vmx->pt_desc.caps[CPUID_EDX + i*PT_CPUID_REGS_NUM] = best->edx;
7709	}
7710
7711	/* Get the number of configurable Address Ranges for filtering */
7712	vmx->pt_desc.num_address_ranges = intel_pt_validate_cap(vmx->pt_desc.caps,
7713						PT_CAP_num_address_ranges);
7714
7715	/* Initialize and clear the no dependency bits */
7716	vmx->pt_desc.ctl_bitmask = ~(RTIT_CTL_TRACEEN | RTIT_CTL_OS |
7717			RTIT_CTL_USR | RTIT_CTL_TSC_EN | RTIT_CTL_DISRETC |
7718			RTIT_CTL_BRANCH_EN);
7719
7720	/*
7721	 * If CPUID.(EAX=14H,ECX=0):EBX[0]=1 CR3Filter can be set otherwise
7722	 * will inject an #GP
7723	 */
7724	if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_cr3_filtering))
7725		vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_CR3EN;
7726
7727	/*
7728	 * If CPUID.(EAX=14H,ECX=0):EBX[1]=1 CYCEn, CycThresh and
7729	 * PSBFreq can be set
7730	 */
7731	if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc))
7732		vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_CYCLEACC |
7733				RTIT_CTL_CYC_THRESH | RTIT_CTL_PSB_FREQ);
7734
7735	/*
7736	 * If CPUID.(EAX=14H,ECX=0):EBX[3]=1 MTCEn and MTCFreq can be set
7737	 */
7738	if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc))
7739		vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_MTC_EN |
7740					      RTIT_CTL_MTC_RANGE);
7741
7742	/* If CPUID.(EAX=14H,ECX=0):EBX[4]=1 FUPonPTW and PTWEn can be set */
7743	if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_ptwrite))
7744		vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_FUP_ON_PTW |
7745							RTIT_CTL_PTW_EN);
7746
7747	/* If CPUID.(EAX=14H,ECX=0):EBX[5]=1 PwrEvEn can be set */
7748	if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_power_event_trace))
7749		vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_PWR_EVT_EN;
7750
7751	/* If CPUID.(EAX=14H,ECX=0):ECX[0]=1 ToPA can be set */
7752	if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_topa_output))
7753		vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_TOPA;
7754
7755	/* If CPUID.(EAX=14H,ECX=0):ECX[3]=1 FabricEn can be set */
7756	if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_output_subsys))
7757		vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_FABRIC_EN;
7758
7759	/* unmask address range configure area */
7760	for (i = 0; i < vmx->pt_desc.num_address_ranges; i++)
7761		vmx->pt_desc.ctl_bitmask &= ~(0xfULL << (32 + i * 4));
7762}
7763
7764static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
7765{
7766	struct vcpu_vmx *vmx = to_vmx(vcpu);
7767
7768	/*
7769	 * XSAVES is effectively enabled if and only if XSAVE is also exposed
7770	 * to the guest.  XSAVES depends on CR4.OSXSAVE, and CR4.OSXSAVE can be
7771	 * set if and only if XSAVE is supported.
7772	 */
7773	if (boot_cpu_has(X86_FEATURE_XSAVE) &&
7774	    guest_cpuid_has(vcpu, X86_FEATURE_XSAVE))
7775		kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_XSAVES);
7776
7777	kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_VMX);
7778
7779	vmx_setup_uret_msrs(vmx);
7780
7781	if (cpu_has_secondary_exec_ctrls())
7782		vmcs_set_secondary_exec_control(vmx,
7783						vmx_secondary_exec_control(vmx));
7784
7785	if (guest_can_use(vcpu, X86_FEATURE_VMX))
7786		vmx->msr_ia32_feature_control_valid_bits |=
7787			FEAT_CTL_VMX_ENABLED_INSIDE_SMX |
7788			FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX;
7789	else
7790		vmx->msr_ia32_feature_control_valid_bits &=
7791			~(FEAT_CTL_VMX_ENABLED_INSIDE_SMX |
7792			  FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX);
7793
7794	if (guest_can_use(vcpu, X86_FEATURE_VMX))
7795		nested_vmx_cr_fixed1_bits_update(vcpu);
7796
7797	if (boot_cpu_has(X86_FEATURE_INTEL_PT) &&
7798			guest_cpuid_has(vcpu, X86_FEATURE_INTEL_PT))
7799		update_intel_pt_cfg(vcpu);
7800
7801	if (boot_cpu_has(X86_FEATURE_RTM)) {
7802		struct vmx_uret_msr *msr;
7803		msr = vmx_find_uret_msr(vmx, MSR_IA32_TSX_CTRL);
7804		if (msr) {
7805			bool enabled = guest_cpuid_has(vcpu, X86_FEATURE_RTM);
7806			vmx_set_guest_uret_msr(vmx, msr, enabled ? 0 : TSX_CTRL_RTM_DISABLE);
7807		}
7808	}
7809
7810	if (kvm_cpu_cap_has(X86_FEATURE_XFD))
7811		vmx_set_intercept_for_msr(vcpu, MSR_IA32_XFD_ERR, MSR_TYPE_R,
7812					  !guest_cpuid_has(vcpu, X86_FEATURE_XFD));
7813
7814	if (boot_cpu_has(X86_FEATURE_IBPB))
7815		vmx_set_intercept_for_msr(vcpu, MSR_IA32_PRED_CMD, MSR_TYPE_W,
7816					  !guest_has_pred_cmd_msr(vcpu));
7817
7818	if (boot_cpu_has(X86_FEATURE_FLUSH_L1D))
7819		vmx_set_intercept_for_msr(vcpu, MSR_IA32_FLUSH_CMD, MSR_TYPE_W,
7820					  !guest_cpuid_has(vcpu, X86_FEATURE_FLUSH_L1D));
7821
7822	set_cr4_guest_host_mask(vmx);
7823
7824	vmx_write_encls_bitmap(vcpu, NULL);
7825	if (guest_cpuid_has(vcpu, X86_FEATURE_SGX))
7826		vmx->msr_ia32_feature_control_valid_bits |= FEAT_CTL_SGX_ENABLED;
7827	else
7828		vmx->msr_ia32_feature_control_valid_bits &= ~FEAT_CTL_SGX_ENABLED;
7829
7830	if (guest_cpuid_has(vcpu, X86_FEATURE_SGX_LC))
7831		vmx->msr_ia32_feature_control_valid_bits |=
7832			FEAT_CTL_SGX_LC_ENABLED;
7833	else
7834		vmx->msr_ia32_feature_control_valid_bits &=
7835			~FEAT_CTL_SGX_LC_ENABLED;
7836
7837	/* Refresh #PF interception to account for MAXPHYADDR changes. */
7838	vmx_update_exception_bitmap(vcpu);
7839}
7840
7841static u64 vmx_get_perf_capabilities(void)
7842{
7843	u64 perf_cap = PMU_CAP_FW_WRITES;
7844	struct x86_pmu_lbr lbr;
7845	u64 host_perf_cap = 0;
7846
7847	if (!enable_pmu)
7848		return 0;
7849
7850	if (boot_cpu_has(X86_FEATURE_PDCM))
7851		rdmsrl(MSR_IA32_PERF_CAPABILITIES, host_perf_cap);
7852
7853	if (!cpu_feature_enabled(X86_FEATURE_ARCH_LBR)) {
7854		x86_perf_get_lbr(&lbr);
7855		if (lbr.nr)
7856			perf_cap |= host_perf_cap & PMU_CAP_LBR_FMT;
7857	}
7858
7859	if (vmx_pebs_supported()) {
7860		perf_cap |= host_perf_cap & PERF_CAP_PEBS_MASK;
7861		if ((perf_cap & PERF_CAP_PEBS_FORMAT) < 4)
7862			perf_cap &= ~PERF_CAP_PEBS_BASELINE;
7863	}
7864
7865	return perf_cap;
7866}
7867
7868static __init void vmx_set_cpu_caps(void)
7869{
7870	kvm_set_cpu_caps();
7871
7872	/* CPUID 0x1 */
7873	if (nested)
7874		kvm_cpu_cap_set(X86_FEATURE_VMX);
7875
7876	/* CPUID 0x7 */
7877	if (kvm_mpx_supported())
7878		kvm_cpu_cap_check_and_set(X86_FEATURE_MPX);
7879	if (!cpu_has_vmx_invpcid())
7880		kvm_cpu_cap_clear(X86_FEATURE_INVPCID);
7881	if (vmx_pt_mode_is_host_guest())
7882		kvm_cpu_cap_check_and_set(X86_FEATURE_INTEL_PT);
7883	if (vmx_pebs_supported()) {
7884		kvm_cpu_cap_check_and_set(X86_FEATURE_DS);
7885		kvm_cpu_cap_check_and_set(X86_FEATURE_DTES64);
7886	}
7887
7888	if (!enable_pmu)
7889		kvm_cpu_cap_clear(X86_FEATURE_PDCM);
7890	kvm_caps.supported_perf_cap = vmx_get_perf_capabilities();
7891
7892	if (!enable_sgx) {
7893		kvm_cpu_cap_clear(X86_FEATURE_SGX);
7894		kvm_cpu_cap_clear(X86_FEATURE_SGX_LC);
7895		kvm_cpu_cap_clear(X86_FEATURE_SGX1);
7896		kvm_cpu_cap_clear(X86_FEATURE_SGX2);
7897	}
7898
7899	if (vmx_umip_emulated())
7900		kvm_cpu_cap_set(X86_FEATURE_UMIP);
7901
7902	/* CPUID 0xD.1 */
7903	kvm_caps.supported_xss = 0;
7904	if (!cpu_has_vmx_xsaves())
7905		kvm_cpu_cap_clear(X86_FEATURE_XSAVES);
7906
7907	/* CPUID 0x80000001 and 0x7 (RDPID) */
7908	if (!cpu_has_vmx_rdtscp()) {
7909		kvm_cpu_cap_clear(X86_FEATURE_RDTSCP);
7910		kvm_cpu_cap_clear(X86_FEATURE_RDPID);
7911	}
7912
7913	if (cpu_has_vmx_waitpkg())
7914		kvm_cpu_cap_check_and_set(X86_FEATURE_WAITPKG);
7915}
7916
7917static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu)
7918{
7919	to_vmx(vcpu)->req_immediate_exit = true;
7920}
7921
7922static int vmx_check_intercept_io(struct kvm_vcpu *vcpu,
7923				  struct x86_instruction_info *info)
7924{
7925	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
7926	unsigned short port;
7927	bool intercept;
7928	int size;
7929
7930	if (info->intercept == x86_intercept_in ||
7931	    info->intercept == x86_intercept_ins) {
7932		port = info->src_val;
7933		size = info->dst_bytes;
7934	} else {
7935		port = info->dst_val;
7936		size = info->src_bytes;
7937	}
7938
7939	/*
7940	 * If the 'use IO bitmaps' VM-execution control is 0, IO instruction
7941	 * VM-exits depend on the 'unconditional IO exiting' VM-execution
7942	 * control.
7943	 *
7944	 * Otherwise, IO instruction VM-exits are controlled by the IO bitmaps.
7945	 */
7946	if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
7947		intercept = nested_cpu_has(vmcs12,
7948					   CPU_BASED_UNCOND_IO_EXITING);
7949	else
7950		intercept = nested_vmx_check_io_bitmaps(vcpu, port, size);
7951
7952	/* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED.  */
7953	return intercept ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE;
7954}
7955
7956static int vmx_check_intercept(struct kvm_vcpu *vcpu,
7957			       struct x86_instruction_info *info,
7958			       enum x86_intercept_stage stage,
7959			       struct x86_exception *exception)
7960{
7961	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
7962
7963	switch (info->intercept) {
7964	/*
7965	 * RDPID causes #UD if disabled through secondary execution controls.
7966	 * Because it is marked as EmulateOnUD, we need to intercept it here.
7967	 * Note, RDPID is hidden behind ENABLE_RDTSCP.
7968	 */
7969	case x86_intercept_rdpid:
7970		if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_RDTSCP)) {
7971			exception->vector = UD_VECTOR;
7972			exception->error_code_valid = false;
7973			return X86EMUL_PROPAGATE_FAULT;
7974		}
7975		break;
7976
7977	case x86_intercept_in:
7978	case x86_intercept_ins:
7979	case x86_intercept_out:
7980	case x86_intercept_outs:
7981		return vmx_check_intercept_io(vcpu, info);
7982
7983	case x86_intercept_lgdt:
7984	case x86_intercept_lidt:
7985	case x86_intercept_lldt:
7986	case x86_intercept_ltr:
7987	case x86_intercept_sgdt:
7988	case x86_intercept_sidt:
7989	case x86_intercept_sldt:
7990	case x86_intercept_str:
7991		if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC))
7992			return X86EMUL_CONTINUE;
7993
7994		/* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED.  */
7995		break;
7996
7997	case x86_intercept_pause:
7998		/*
7999		 * PAUSE is a single-byte NOP with a REPE prefix, i.e. collides
8000		 * with vanilla NOPs in the emulator.  Apply the interception
8001		 * check only to actual PAUSE instructions.  Don't check
8002		 * PAUSE-loop-exiting, software can't expect a given PAUSE to
8003		 * exit, i.e. KVM is within its rights to allow L2 to execute
8004		 * the PAUSE.
8005		 */
8006		if ((info->rep_prefix != REPE_PREFIX) ||
8007		    !nested_cpu_has2(vmcs12, CPU_BASED_PAUSE_EXITING))
8008			return X86EMUL_CONTINUE;
8009
8010		break;
8011
8012	/* TODO: check more intercepts... */
8013	default:
8014		break;
8015	}
8016
8017	return X86EMUL_UNHANDLEABLE;
8018}
8019
8020#ifdef CONFIG_X86_64
8021/* (a << shift) / divisor, return 1 if overflow otherwise 0 */
8022static inline int u64_shl_div_u64(u64 a, unsigned int shift,
8023				  u64 divisor, u64 *result)
8024{
8025	u64 low = a << shift, high = a >> (64 - shift);
8026
8027	/* To avoid the overflow on divq */
8028	if (high >= divisor)
8029		return 1;
8030
8031	/* Low hold the result, high hold rem which is discarded */
8032	asm("divq %2\n\t" : "=a" (low), "=d" (high) :
8033	    "rm" (divisor), "0" (low), "1" (high));
8034	*result = low;
8035
8036	return 0;
8037}
8038
8039static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
8040			    bool *expired)
8041{
8042	struct vcpu_vmx *vmx;
8043	u64 tscl, guest_tscl, delta_tsc, lapic_timer_advance_cycles;
8044	struct kvm_timer *ktimer = &vcpu->arch.apic->lapic_timer;
8045
8046	vmx = to_vmx(vcpu);
8047	tscl = rdtsc();
8048	guest_tscl = kvm_read_l1_tsc(vcpu, tscl);
8049	delta_tsc = max(guest_deadline_tsc, guest_tscl) - guest_tscl;
8050	lapic_timer_advance_cycles = nsec_to_cycles(vcpu,
8051						    ktimer->timer_advance_ns);
8052
8053	if (delta_tsc > lapic_timer_advance_cycles)
8054		delta_tsc -= lapic_timer_advance_cycles;
8055	else
8056		delta_tsc = 0;
8057
8058	/* Convert to host delta tsc if tsc scaling is enabled */
8059	if (vcpu->arch.l1_tsc_scaling_ratio != kvm_caps.default_tsc_scaling_ratio &&
8060	    delta_tsc && u64_shl_div_u64(delta_tsc,
8061				kvm_caps.tsc_scaling_ratio_frac_bits,
8062				vcpu->arch.l1_tsc_scaling_ratio, &delta_tsc))
8063		return -ERANGE;
8064
8065	/*
8066	 * If the delta tsc can't fit in the 32 bit after the multi shift,
8067	 * we can't use the preemption timer.
8068	 * It's possible that it fits on later vmentries, but checking
8069	 * on every vmentry is costly so we just use an hrtimer.
8070	 */
8071	if (delta_tsc >> (cpu_preemption_timer_multi + 32))
8072		return -ERANGE;
8073
8074	vmx->hv_deadline_tsc = tscl + delta_tsc;
8075	*expired = !delta_tsc;
8076	return 0;
8077}
8078
8079static void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu)
8080{
8081	to_vmx(vcpu)->hv_deadline_tsc = -1;
8082}
8083#endif
8084
8085static void vmx_sched_in(struct kvm_vcpu *vcpu, int cpu)
8086{
8087	if (!kvm_pause_in_guest(vcpu->kvm))
8088		shrink_ple_window(vcpu);
8089}
8090
8091void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu)
8092{
8093	struct vcpu_vmx *vmx = to_vmx(vcpu);
8094
8095	if (WARN_ON_ONCE(!enable_pml))
8096		return;
8097
8098	if (is_guest_mode(vcpu)) {
8099		vmx->nested.update_vmcs01_cpu_dirty_logging = true;
8100		return;
8101	}
8102
8103	/*
8104	 * Note, nr_memslots_dirty_logging can be changed concurrent with this
8105	 * code, but in that case another update request will be made and so
8106	 * the guest will never run with a stale PML value.
8107	 */
8108	if (atomic_read(&vcpu->kvm->nr_memslots_dirty_logging))
8109		secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_ENABLE_PML);
8110	else
8111		secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_ENABLE_PML);
8112}
8113
8114static void vmx_setup_mce(struct kvm_vcpu *vcpu)
8115{
8116	if (vcpu->arch.mcg_cap & MCG_LMCE_P)
8117		to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |=
8118			FEAT_CTL_LMCE_ENABLED;
8119	else
8120		to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &=
8121			~FEAT_CTL_LMCE_ENABLED;
8122}
8123
8124#ifdef CONFIG_KVM_SMM
8125static int vmx_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
8126{
8127	/* we need a nested vmexit to enter SMM, postpone if run is pending */
8128	if (to_vmx(vcpu)->nested.nested_run_pending)
8129		return -EBUSY;
8130	return !is_smm(vcpu);
8131}
8132
8133static int vmx_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram)
8134{
8135	struct vcpu_vmx *vmx = to_vmx(vcpu);
8136
8137	/*
8138	 * TODO: Implement custom flows for forcing the vCPU out/in of L2 on
8139	 * SMI and RSM.  Using the common VM-Exit + VM-Enter routines is wrong
8140	 * SMI and RSM only modify state that is saved and restored via SMRAM.
8141	 * E.g. most MSRs are left untouched, but many are modified by VM-Exit
8142	 * and VM-Enter, and thus L2's values may be corrupted on SMI+RSM.
8143	 */
8144	vmx->nested.smm.guest_mode = is_guest_mode(vcpu);
8145	if (vmx->nested.smm.guest_mode)
8146		nested_vmx_vmexit(vcpu, -1, 0, 0);
8147
8148	vmx->nested.smm.vmxon = vmx->nested.vmxon;
8149	vmx->nested.vmxon = false;
8150	vmx_clear_hlt(vcpu);
8151	return 0;
8152}
8153
8154static int vmx_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram)
8155{
8156	struct vcpu_vmx *vmx = to_vmx(vcpu);
8157	int ret;
8158
8159	if (vmx->nested.smm.vmxon) {
8160		vmx->nested.vmxon = true;
8161		vmx->nested.smm.vmxon = false;
8162	}
8163
8164	if (vmx->nested.smm.guest_mode) {
8165		ret = nested_vmx_enter_non_root_mode(vcpu, false);
8166		if (ret)
8167			return ret;
8168
8169		vmx->nested.nested_run_pending = 1;
8170		vmx->nested.smm.guest_mode = false;
8171	}
8172	return 0;
8173}
8174
8175static void vmx_enable_smi_window(struct kvm_vcpu *vcpu)
8176{
8177	/* RSM will cause a vmexit anyway.  */
8178}
8179#endif
8180
8181static bool vmx_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
8182{
8183	return to_vmx(vcpu)->nested.vmxon && !is_guest_mode(vcpu);
8184}
8185
8186static void vmx_migrate_timers(struct kvm_vcpu *vcpu)
8187{
8188	if (is_guest_mode(vcpu)) {
8189		struct hrtimer *timer = &to_vmx(vcpu)->nested.preemption_timer;
8190
8191		if (hrtimer_try_to_cancel(timer) == 1)
8192			hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
8193	}
8194}
8195
8196static void vmx_hardware_unsetup(void)
8197{
8198	kvm_set_posted_intr_wakeup_handler(NULL);
8199
8200	if (nested)
8201		nested_vmx_hardware_unsetup();
8202
8203	free_kvm_area();
8204}
8205
8206#define VMX_REQUIRED_APICV_INHIBITS			\
8207(							\
8208	BIT(APICV_INHIBIT_REASON_DISABLE)|		\
8209	BIT(APICV_INHIBIT_REASON_ABSENT) |		\
8210	BIT(APICV_INHIBIT_REASON_HYPERV) |		\
8211	BIT(APICV_INHIBIT_REASON_BLOCKIRQ) |		\
8212	BIT(APICV_INHIBIT_REASON_PHYSICAL_ID_ALIASED) |	\
8213	BIT(APICV_INHIBIT_REASON_APIC_ID_MODIFIED) |	\
8214	BIT(APICV_INHIBIT_REASON_APIC_BASE_MODIFIED)	\
8215)
8216
8217static void vmx_vm_destroy(struct kvm *kvm)
8218{
8219	struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm);
8220
8221	free_pages((unsigned long)kvm_vmx->pid_table, vmx_get_pid_table_order(kvm));
8222}
8223
8224static struct kvm_x86_ops vmx_x86_ops __initdata = {
8225	.name = KBUILD_MODNAME,
8226
8227	.check_processor_compatibility = vmx_check_processor_compat,
8228
8229	.hardware_unsetup = vmx_hardware_unsetup,
8230
8231	.hardware_enable = vmx_hardware_enable,
8232	.hardware_disable = vmx_hardware_disable,
8233	.has_emulated_msr = vmx_has_emulated_msr,
8234
8235	.vm_size = sizeof(struct kvm_vmx),
8236	.vm_init = vmx_vm_init,
8237	.vm_destroy = vmx_vm_destroy,
8238
8239	.vcpu_precreate = vmx_vcpu_precreate,
8240	.vcpu_create = vmx_vcpu_create,
8241	.vcpu_free = vmx_vcpu_free,
8242	.vcpu_reset = vmx_vcpu_reset,
8243
8244	.prepare_switch_to_guest = vmx_prepare_switch_to_guest,
8245	.vcpu_load = vmx_vcpu_load,
8246	.vcpu_put = vmx_vcpu_put,
8247
8248	.update_exception_bitmap = vmx_update_exception_bitmap,
8249	.get_msr_feature = vmx_get_msr_feature,
8250	.get_msr = vmx_get_msr,
8251	.set_msr = vmx_set_msr,
8252	.get_segment_base = vmx_get_segment_base,
8253	.get_segment = vmx_get_segment,
8254	.set_segment = vmx_set_segment,
8255	.get_cpl = vmx_get_cpl,
8256	.get_cs_db_l_bits = vmx_get_cs_db_l_bits,
8257	.is_valid_cr0 = vmx_is_valid_cr0,
8258	.set_cr0 = vmx_set_cr0,
8259	.is_valid_cr4 = vmx_is_valid_cr4,
8260	.set_cr4 = vmx_set_cr4,
8261	.set_efer = vmx_set_efer,
8262	.get_idt = vmx_get_idt,
8263	.set_idt = vmx_set_idt,
8264	.get_gdt = vmx_get_gdt,
8265	.set_gdt = vmx_set_gdt,
8266	.set_dr7 = vmx_set_dr7,
8267	.sync_dirty_debug_regs = vmx_sync_dirty_debug_regs,
8268	.cache_reg = vmx_cache_reg,
8269	.get_rflags = vmx_get_rflags,
8270	.set_rflags = vmx_set_rflags,
8271	.get_if_flag = vmx_get_if_flag,
8272
8273	.flush_tlb_all = vmx_flush_tlb_all,
8274	.flush_tlb_current = vmx_flush_tlb_current,
8275	.flush_tlb_gva = vmx_flush_tlb_gva,
8276	.flush_tlb_guest = vmx_flush_tlb_guest,
8277
8278	.vcpu_pre_run = vmx_vcpu_pre_run,
8279	.vcpu_run = vmx_vcpu_run,
8280	.handle_exit = vmx_handle_exit,
8281	.skip_emulated_instruction = vmx_skip_emulated_instruction,
8282	.update_emulated_instruction = vmx_update_emulated_instruction,
8283	.set_interrupt_shadow = vmx_set_interrupt_shadow,
8284	.get_interrupt_shadow = vmx_get_interrupt_shadow,
8285	.patch_hypercall = vmx_patch_hypercall,
8286	.inject_irq = vmx_inject_irq,
8287	.inject_nmi = vmx_inject_nmi,
8288	.inject_exception = vmx_inject_exception,
8289	.cancel_injection = vmx_cancel_injection,
8290	.interrupt_allowed = vmx_interrupt_allowed,
8291	.nmi_allowed = vmx_nmi_allowed,
8292	.get_nmi_mask = vmx_get_nmi_mask,
8293	.set_nmi_mask = vmx_set_nmi_mask,
8294	.enable_nmi_window = vmx_enable_nmi_window,
8295	.enable_irq_window = vmx_enable_irq_window,
8296	.update_cr8_intercept = vmx_update_cr8_intercept,
8297	.set_virtual_apic_mode = vmx_set_virtual_apic_mode,
8298	.set_apic_access_page_addr = vmx_set_apic_access_page_addr,
8299	.refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl,
8300	.load_eoi_exitmap = vmx_load_eoi_exitmap,
8301	.apicv_pre_state_restore = vmx_apicv_pre_state_restore,
8302	.required_apicv_inhibits = VMX_REQUIRED_APICV_INHIBITS,
8303	.hwapic_irr_update = vmx_hwapic_irr_update,
8304	.hwapic_isr_update = vmx_hwapic_isr_update,
8305	.guest_apic_has_interrupt = vmx_guest_apic_has_interrupt,
8306	.sync_pir_to_irr = vmx_sync_pir_to_irr,
8307	.deliver_interrupt = vmx_deliver_interrupt,
8308	.dy_apicv_has_pending_interrupt = pi_has_pending_interrupt,
8309
8310	.set_tss_addr = vmx_set_tss_addr,
8311	.set_identity_map_addr = vmx_set_identity_map_addr,
8312	.get_mt_mask = vmx_get_mt_mask,
8313
8314	.get_exit_info = vmx_get_exit_info,
8315
8316	.vcpu_after_set_cpuid = vmx_vcpu_after_set_cpuid,
8317
8318	.has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
8319
8320	.get_l2_tsc_offset = vmx_get_l2_tsc_offset,
8321	.get_l2_tsc_multiplier = vmx_get_l2_tsc_multiplier,
8322	.write_tsc_offset = vmx_write_tsc_offset,
8323	.write_tsc_multiplier = vmx_write_tsc_multiplier,
8324
8325	.load_mmu_pgd = vmx_load_mmu_pgd,
8326
8327	.check_intercept = vmx_check_intercept,
8328	.handle_exit_irqoff = vmx_handle_exit_irqoff,
8329
8330	.request_immediate_exit = vmx_request_immediate_exit,
8331
8332	.sched_in = vmx_sched_in,
8333
8334	.cpu_dirty_log_size = PML_ENTITY_NUM,
8335	.update_cpu_dirty_logging = vmx_update_cpu_dirty_logging,
8336
8337	.nested_ops = &vmx_nested_ops,
8338
8339	.pi_update_irte = vmx_pi_update_irte,
8340	.pi_start_assignment = vmx_pi_start_assignment,
8341
8342#ifdef CONFIG_X86_64
8343	.set_hv_timer = vmx_set_hv_timer,
8344	.cancel_hv_timer = vmx_cancel_hv_timer,
8345#endif
8346
8347	.setup_mce = vmx_setup_mce,
8348
8349#ifdef CONFIG_KVM_SMM
8350	.smi_allowed = vmx_smi_allowed,
8351	.enter_smm = vmx_enter_smm,
8352	.leave_smm = vmx_leave_smm,
8353	.enable_smi_window = vmx_enable_smi_window,
8354#endif
8355
8356	.can_emulate_instruction = vmx_can_emulate_instruction,
8357	.apic_init_signal_blocked = vmx_apic_init_signal_blocked,
8358	.migrate_timers = vmx_migrate_timers,
8359
8360	.msr_filter_changed = vmx_msr_filter_changed,
8361	.complete_emulated_msr = kvm_complete_insn_gp,
8362
8363	.vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector,
8364};
8365
8366static unsigned int vmx_handle_intel_pt_intr(void)
8367{
8368	struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
8369
8370	/* '0' on failure so that the !PT case can use a RET0 static call. */
8371	if (!vcpu || !kvm_handling_nmi_from_guest(vcpu))
8372		return 0;
8373
8374	kvm_make_request(KVM_REQ_PMI, vcpu);
8375	__set_bit(MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI_BIT,
8376		  (unsigned long *)&vcpu->arch.pmu.global_status);
8377	return 1;
8378}
8379
8380static __init void vmx_setup_user_return_msrs(void)
8381{
8382
8383	/*
8384	 * Though SYSCALL is only supported in 64-bit mode on Intel CPUs, kvm
8385	 * will emulate SYSCALL in legacy mode if the vendor string in guest
8386	 * CPUID.0:{EBX,ECX,EDX} is "AuthenticAMD" or "AMDisbetter!" To
8387	 * support this emulation, MSR_STAR is included in the list for i386,
8388	 * but is never loaded into hardware.  MSR_CSTAR is also never loaded
8389	 * into hardware and is here purely for emulation purposes.
8390	 */
8391	const u32 vmx_uret_msrs_list[] = {
8392	#ifdef CONFIG_X86_64
8393		MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
8394	#endif
8395		MSR_EFER, MSR_TSC_AUX, MSR_STAR,
8396		MSR_IA32_TSX_CTRL,
8397	};
8398	int i;
8399
8400	BUILD_BUG_ON(ARRAY_SIZE(vmx_uret_msrs_list) != MAX_NR_USER_RETURN_MSRS);
8401
8402	for (i = 0; i < ARRAY_SIZE(vmx_uret_msrs_list); ++i)
8403		kvm_add_user_return_msr(vmx_uret_msrs_list[i]);
8404}
8405
8406static void __init vmx_setup_me_spte_mask(void)
8407{
8408	u64 me_mask = 0;
8409
8410	/*
8411	 * kvm_get_shadow_phys_bits() returns shadow_phys_bits.  Use
8412	 * the former to avoid exposing shadow_phys_bits.
8413	 *
8414	 * On pre-MKTME system, boot_cpu_data.x86_phys_bits equals to
8415	 * shadow_phys_bits.  On MKTME and/or TDX capable systems,
8416	 * boot_cpu_data.x86_phys_bits holds the actual physical address
8417	 * w/o the KeyID bits, and shadow_phys_bits equals to MAXPHYADDR
8418	 * reported by CPUID.  Those bits between are KeyID bits.
8419	 */
8420	if (boot_cpu_data.x86_phys_bits != kvm_get_shadow_phys_bits())
8421		me_mask = rsvd_bits(boot_cpu_data.x86_phys_bits,
8422			kvm_get_shadow_phys_bits() - 1);
8423	/*
8424	 * Unlike SME, host kernel doesn't support setting up any
8425	 * MKTME KeyID on Intel platforms.  No memory encryption
8426	 * bits should be included into the SPTE.
8427	 */
8428	kvm_mmu_set_me_spte_mask(0, me_mask);
8429}
8430
8431static struct kvm_x86_init_ops vmx_init_ops __initdata;
8432
8433static __init int hardware_setup(void)
8434{
8435	unsigned long host_bndcfgs;
8436	struct desc_ptr dt;
8437	int r;
8438
8439	store_idt(&dt);
8440	host_idt_base = dt.address;
8441
8442	vmx_setup_user_return_msrs();
8443
8444	if (setup_vmcs_config(&vmcs_config, &vmx_capability) < 0)
8445		return -EIO;
8446
8447	if (cpu_has_perf_global_ctrl_bug())
8448		pr_warn_once("VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL "
8449			     "does not work properly. Using workaround\n");
8450
8451	if (boot_cpu_has(X86_FEATURE_NX))
8452		kvm_enable_efer_bits(EFER_NX);
8453
8454	if (boot_cpu_has(X86_FEATURE_MPX)) {
8455		rdmsrl(MSR_IA32_BNDCFGS, host_bndcfgs);
8456		WARN_ONCE(host_bndcfgs, "BNDCFGS in host will be lost");
8457	}
8458
8459	if (!cpu_has_vmx_mpx())
8460		kvm_caps.supported_xcr0 &= ~(XFEATURE_MASK_BNDREGS |
8461					     XFEATURE_MASK_BNDCSR);
8462
8463	if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() ||
8464	    !(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global()))
8465		enable_vpid = 0;
8466
8467	if (!cpu_has_vmx_ept() ||
8468	    !cpu_has_vmx_ept_4levels() ||
8469	    !cpu_has_vmx_ept_mt_wb() ||
8470	    !cpu_has_vmx_invept_global())
8471		enable_ept = 0;
8472
8473	/* NX support is required for shadow paging. */
8474	if (!enable_ept && !boot_cpu_has(X86_FEATURE_NX)) {
8475		pr_err_ratelimited("NX (Execute Disable) not supported\n");
8476		return -EOPNOTSUPP;
8477	}
8478
8479	if (!cpu_has_vmx_ept_ad_bits() || !enable_ept)
8480		enable_ept_ad_bits = 0;
8481
8482	if (!cpu_has_vmx_unrestricted_guest() || !enable_ept)
8483		enable_unrestricted_guest = 0;
8484
8485	if (!cpu_has_vmx_flexpriority())
8486		flexpriority_enabled = 0;
8487
8488	if (!cpu_has_virtual_nmis())
8489		enable_vnmi = 0;
8490
8491#ifdef CONFIG_X86_SGX_KVM
8492	if (!cpu_has_vmx_encls_vmexit())
8493		enable_sgx = false;
8494#endif
8495
8496	/*
8497	 * set_apic_access_page_addr() is used to reload apic access
8498	 * page upon invalidation.  No need to do anything if not
8499	 * using the APIC_ACCESS_ADDR VMCS field.
8500	 */
8501	if (!flexpriority_enabled)
8502		vmx_x86_ops.set_apic_access_page_addr = NULL;
8503
8504	if (!cpu_has_vmx_tpr_shadow())
8505		vmx_x86_ops.update_cr8_intercept = NULL;
8506
8507#if IS_ENABLED(CONFIG_HYPERV)
8508	if (ms_hyperv.nested_features & HV_X64_NESTED_GUEST_MAPPING_FLUSH
8509	    && enable_ept) {
8510		vmx_x86_ops.flush_remote_tlbs = hv_flush_remote_tlbs;
8511		vmx_x86_ops.flush_remote_tlbs_range = hv_flush_remote_tlbs_range;
8512	}
8513#endif
8514
8515	if (!cpu_has_vmx_ple()) {
8516		ple_gap = 0;
8517		ple_window = 0;
8518		ple_window_grow = 0;
8519		ple_window_max = 0;
8520		ple_window_shrink = 0;
8521	}
8522
8523	if (!cpu_has_vmx_apicv())
8524		enable_apicv = 0;
8525	if (!enable_apicv)
8526		vmx_x86_ops.sync_pir_to_irr = NULL;
8527
8528	if (!enable_apicv || !cpu_has_vmx_ipiv())
8529		enable_ipiv = false;
8530
8531	if (cpu_has_vmx_tsc_scaling())
8532		kvm_caps.has_tsc_control = true;
8533
8534	kvm_caps.max_tsc_scaling_ratio = KVM_VMX_TSC_MULTIPLIER_MAX;
8535	kvm_caps.tsc_scaling_ratio_frac_bits = 48;
8536	kvm_caps.has_bus_lock_exit = cpu_has_vmx_bus_lock_detection();
8537	kvm_caps.has_notify_vmexit = cpu_has_notify_vmexit();
8538
8539	set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
8540
8541	if (enable_ept)
8542		kvm_mmu_set_ept_masks(enable_ept_ad_bits,
8543				      cpu_has_vmx_ept_execute_only());
8544
8545	/*
8546	 * Setup shadow_me_value/shadow_me_mask to include MKTME KeyID
8547	 * bits to shadow_zero_check.
8548	 */
8549	vmx_setup_me_spte_mask();
8550
8551	kvm_configure_mmu(enable_ept, 0, vmx_get_max_ept_level(),
8552			  ept_caps_to_lpage_level(vmx_capability.ept));
8553
8554	/*
8555	 * Only enable PML when hardware supports PML feature, and both EPT
8556	 * and EPT A/D bit features are enabled -- PML depends on them to work.
8557	 */
8558	if (!enable_ept || !enable_ept_ad_bits || !cpu_has_vmx_pml())
8559		enable_pml = 0;
8560
8561	if (!enable_pml)
8562		vmx_x86_ops.cpu_dirty_log_size = 0;
8563
8564	if (!cpu_has_vmx_preemption_timer())
8565		enable_preemption_timer = false;
8566
8567	if (enable_preemption_timer) {
8568		u64 use_timer_freq = 5000ULL * 1000 * 1000;
8569
8570		cpu_preemption_timer_multi =
8571			vmcs_config.misc & VMX_MISC_PREEMPTION_TIMER_RATE_MASK;
8572
8573		if (tsc_khz)
8574			use_timer_freq = (u64)tsc_khz * 1000;
8575		use_timer_freq >>= cpu_preemption_timer_multi;
8576
8577		/*
8578		 * KVM "disables" the preemption timer by setting it to its max
8579		 * value.  Don't use the timer if it might cause spurious exits
8580		 * at a rate faster than 0.1 Hz (of uninterrupted guest time).
8581		 */
8582		if (use_timer_freq > 0xffffffffu / 10)
8583			enable_preemption_timer = false;
8584	}
8585
8586	if (!enable_preemption_timer) {
8587		vmx_x86_ops.set_hv_timer = NULL;
8588		vmx_x86_ops.cancel_hv_timer = NULL;
8589		vmx_x86_ops.request_immediate_exit = __kvm_request_immediate_exit;
8590	}
8591
8592	kvm_caps.supported_mce_cap |= MCG_LMCE_P;
8593	kvm_caps.supported_mce_cap |= MCG_CMCI_P;
8594
8595	if (pt_mode != PT_MODE_SYSTEM && pt_mode != PT_MODE_HOST_GUEST)
8596		return -EINVAL;
8597	if (!enable_ept || !enable_pmu || !cpu_has_vmx_intel_pt())
8598		pt_mode = PT_MODE_SYSTEM;
8599	if (pt_mode == PT_MODE_HOST_GUEST)
8600		vmx_init_ops.handle_intel_pt_intr = vmx_handle_intel_pt_intr;
8601	else
8602		vmx_init_ops.handle_intel_pt_intr = NULL;
8603
8604	setup_default_sgx_lepubkeyhash();
8605
8606	if (nested) {
8607		nested_vmx_setup_ctls_msrs(&vmcs_config, vmx_capability.ept);
8608
8609		r = nested_vmx_hardware_setup(kvm_vmx_exit_handlers);
8610		if (r)
8611			return r;
8612	}
8613
8614	vmx_set_cpu_caps();
8615
8616	r = alloc_kvm_area();
8617	if (r && nested)
8618		nested_vmx_hardware_unsetup();
8619
8620	kvm_set_posted_intr_wakeup_handler(pi_wakeup_handler);
8621
8622	return r;
8623}
8624
8625static struct kvm_x86_init_ops vmx_init_ops __initdata = {
8626	.hardware_setup = hardware_setup,
8627	.handle_intel_pt_intr = NULL,
8628
8629	.runtime_ops = &vmx_x86_ops,
8630	.pmu_ops = &intel_pmu_ops,
8631};
8632
8633static void vmx_cleanup_l1d_flush(void)
8634{
8635	if (vmx_l1d_flush_pages) {
8636		free_pages((unsigned long)vmx_l1d_flush_pages, L1D_CACHE_ORDER);
8637		vmx_l1d_flush_pages = NULL;
8638	}
8639	/* Restore state so sysfs ignores VMX */
8640	l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
8641}
8642
8643static void __vmx_exit(void)
8644{
8645	allow_smaller_maxphyaddr = false;
8646
8647	cpu_emergency_unregister_virt_callback(vmx_emergency_disable);
8648
8649	vmx_cleanup_l1d_flush();
8650}
8651
8652static void vmx_exit(void)
8653{
8654	kvm_exit();
8655	kvm_x86_vendor_exit();
8656
8657	__vmx_exit();
8658}
8659module_exit(vmx_exit);
8660
8661static int __init vmx_init(void)
8662{
8663	int r, cpu;
8664
8665	if (!kvm_is_vmx_supported())
8666		return -EOPNOTSUPP;
8667
8668	/*
8669	 * Note, hv_init_evmcs() touches only VMX knobs, i.e. there's nothing
8670	 * to unwind if a later step fails.
8671	 */
8672	hv_init_evmcs();
8673
8674	r = kvm_x86_vendor_init(&vmx_init_ops);
8675	if (r)
8676		return r;
8677
8678	/*
8679	 * Must be called after common x86 init so enable_ept is properly set
8680	 * up. Hand the parameter mitigation value in which was stored in
8681	 * the pre module init parser. If no parameter was given, it will
8682	 * contain 'auto' which will be turned into the default 'cond'
8683	 * mitigation mode.
8684	 */
8685	r = vmx_setup_l1d_flush(vmentry_l1d_flush_param);
8686	if (r)
8687		goto err_l1d_flush;
8688
8689	for_each_possible_cpu(cpu) {
8690		INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
8691
8692		pi_init_cpu(cpu);
8693	}
8694
8695	cpu_emergency_register_virt_callback(vmx_emergency_disable);
8696
8697	vmx_check_vmcs12_offsets();
8698
8699	/*
8700	 * Shadow paging doesn't have a (further) performance penalty
8701	 * from GUEST_MAXPHYADDR < HOST_MAXPHYADDR so enable it
8702	 * by default
8703	 */
8704	if (!enable_ept)
8705		allow_smaller_maxphyaddr = true;
8706
8707	/*
8708	 * Common KVM initialization _must_ come last, after this, /dev/kvm is
8709	 * exposed to userspace!
8710	 */
8711	r = kvm_init(sizeof(struct vcpu_vmx), __alignof__(struct vcpu_vmx),
8712		     THIS_MODULE);
8713	if (r)
8714		goto err_kvm_init;
8715
8716	return 0;
8717
8718err_kvm_init:
8719	__vmx_exit();
8720err_l1d_flush:
8721	kvm_x86_vendor_exit();
8722	return r;
8723}
8724module_init(vmx_init);
8725