xref: /kernel/linux/linux-5.10/arch/x86/kvm/svm/nested.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * AMD SVM support
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9 *
10 * Authors:
11 *   Yaniv Kamay  <yaniv@qumranet.com>
12 *   Avi Kivity   <avi@qumranet.com>
13 */
14
15#define pr_fmt(fmt) "SVM: " fmt
16
17#include <linux/kvm_types.h>
18#include <linux/kvm_host.h>
19#include <linux/kernel.h>
20
21#include <asm/msr-index.h>
22#include <asm/debugreg.h>
23
24#include "kvm_emulate.h"
25#include "trace.h"
26#include "mmu.h"
27#include "x86.h"
28#include "cpuid.h"
29#include "lapic.h"
30#include "svm.h"
31
32static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
33				       struct x86_exception *fault)
34{
35	struct vcpu_svm *svm = to_svm(vcpu);
36
37	if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) {
38		/*
39		 * TODO: track the cause of the nested page fault, and
40		 * correctly fill in the high bits of exit_info_1.
41		 */
42		svm->vmcb->control.exit_code = SVM_EXIT_NPF;
43		svm->vmcb->control.exit_code_hi = 0;
44		svm->vmcb->control.exit_info_1 = (1ULL << 32);
45		svm->vmcb->control.exit_info_2 = fault->address;
46	}
47
48	svm->vmcb->control.exit_info_1 &= ~0xffffffffULL;
49	svm->vmcb->control.exit_info_1 |= fault->error_code;
50
51	nested_svm_vmexit(svm);
52}
53
54static void svm_inject_page_fault_nested(struct kvm_vcpu *vcpu, struct x86_exception *fault)
55{
56       struct vcpu_svm *svm = to_svm(vcpu);
57       WARN_ON(!is_guest_mode(vcpu));
58
59       if (vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_EXCEPTION_OFFSET + PF_VECTOR) &&
60	   !svm->nested.nested_run_pending) {
61               svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + PF_VECTOR;
62               svm->vmcb->control.exit_code_hi = 0;
63               svm->vmcb->control.exit_info_1 = fault->error_code;
64               svm->vmcb->control.exit_info_2 = fault->address;
65               nested_svm_vmexit(svm);
66       } else {
67               kvm_inject_page_fault(vcpu, fault);
68       }
69}
70
71static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
72{
73	struct vcpu_svm *svm = to_svm(vcpu);
74	u64 cr3 = svm->nested.ctl.nested_cr3;
75	u64 pdpte;
76	int ret;
77
78	ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte,
79				       offset_in_page(cr3) + index * 8, 8);
80	if (ret)
81		return 0;
82	return pdpte;
83}
84
85static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
86{
87	struct vcpu_svm *svm = to_svm(vcpu);
88
89	return svm->nested.ctl.nested_cr3;
90}
91
92static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
93{
94	struct vcpu_svm *svm = to_svm(vcpu);
95	struct vmcb *hsave = svm->nested.hsave;
96
97	WARN_ON(mmu_is_nested(vcpu));
98
99	vcpu->arch.mmu = &vcpu->arch.guest_mmu;
100	kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, hsave->save.cr4, hsave->save.efer,
101				svm->nested.ctl.nested_cr3);
102	vcpu->arch.mmu->get_guest_pgd     = nested_svm_get_tdp_cr3;
103	vcpu->arch.mmu->get_pdptr         = nested_svm_get_tdp_pdptr;
104	vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
105	reset_shadow_zero_bits_mask(vcpu, vcpu->arch.mmu);
106	vcpu->arch.walk_mmu              = &vcpu->arch.nested_mmu;
107}
108
109static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
110{
111	vcpu->arch.mmu = &vcpu->arch.root_mmu;
112	vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
113}
114
115void recalc_intercepts(struct vcpu_svm *svm)
116{
117	struct vmcb_control_area *c, *h, *g;
118	unsigned int i;
119
120	vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
121
122	if (!is_guest_mode(&svm->vcpu))
123		return;
124
125	c = &svm->vmcb->control;
126	h = &svm->nested.hsave->control;
127	g = &svm->nested.ctl;
128
129	for (i = 0; i < MAX_INTERCEPT; i++)
130		c->intercepts[i] = h->intercepts[i];
131
132	if (g->int_ctl & V_INTR_MASKING_MASK) {
133		/* We only want the cr8 intercept bits of L1 */
134		vmcb_clr_intercept(c, INTERCEPT_CR8_READ);
135		vmcb_clr_intercept(c, INTERCEPT_CR8_WRITE);
136
137		/*
138		 * Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not
139		 * affect any interrupt we may want to inject; therefore,
140		 * interrupt window vmexits are irrelevant to L0.
141		 */
142		vmcb_clr_intercept(c, INTERCEPT_VINTR);
143	}
144
145	/* We don't want to see VMMCALLs from a nested guest */
146	vmcb_clr_intercept(c, INTERCEPT_VMMCALL);
147
148	for (i = 0; i < MAX_INTERCEPT; i++)
149		c->intercepts[i] |= g->intercepts[i];
150
151	vmcb_set_intercept(c, INTERCEPT_VMLOAD);
152	vmcb_set_intercept(c, INTERCEPT_VMSAVE);
153}
154
155static void copy_vmcb_control_area(struct vmcb_control_area *dst,
156				   struct vmcb_control_area *from)
157{
158	unsigned int i;
159
160	for (i = 0; i < MAX_INTERCEPT; i++)
161		dst->intercepts[i] = from->intercepts[i];
162
163	dst->iopm_base_pa         = from->iopm_base_pa;
164	dst->msrpm_base_pa        = from->msrpm_base_pa;
165	dst->tsc_offset           = from->tsc_offset;
166	/* asid not copied, it is handled manually for svm->vmcb.  */
167	dst->tlb_ctl              = from->tlb_ctl;
168	dst->int_ctl              = from->int_ctl;
169	dst->int_vector           = from->int_vector;
170	dst->int_state            = from->int_state;
171	dst->exit_code            = from->exit_code;
172	dst->exit_code_hi         = from->exit_code_hi;
173	dst->exit_info_1          = from->exit_info_1;
174	dst->exit_info_2          = from->exit_info_2;
175	dst->exit_int_info        = from->exit_int_info;
176	dst->exit_int_info_err    = from->exit_int_info_err;
177	dst->nested_ctl           = from->nested_ctl;
178	dst->event_inj            = from->event_inj;
179	dst->event_inj_err        = from->event_inj_err;
180	dst->nested_cr3           = from->nested_cr3;
181	dst->virt_ext              = from->virt_ext;
182	dst->pause_filter_count   = from->pause_filter_count;
183	dst->pause_filter_thresh  = from->pause_filter_thresh;
184}
185
186static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
187{
188	/*
189	 * This function merges the msr permission bitmaps of kvm and the
190	 * nested vmcb. It is optimized in that it only merges the parts where
191	 * the kvm msr permission bitmap may contain zero bits
192	 */
193	int i;
194
195	if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
196		return true;
197
198	for (i = 0; i < MSRPM_OFFSETS; i++) {
199		u32 value, p;
200		u64 offset;
201
202		if (msrpm_offsets[i] == 0xffffffff)
203			break;
204
205		p      = msrpm_offsets[i];
206		offset = svm->nested.ctl.msrpm_base_pa + (p * 4);
207
208		if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
209			return false;
210
211		svm->nested.msrpm[p] = svm->msrpm[p] | value;
212	}
213
214	svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));
215
216	return true;
217}
218
219static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
220{
221	struct vcpu_svm *svm = to_svm(vcpu);
222
223	if (WARN_ON(!is_guest_mode(vcpu)))
224		return true;
225
226	if (!nested_svm_vmrun_msrpm(svm)) {
227		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
228		vcpu->run->internal.suberror =
229			KVM_INTERNAL_ERROR_EMULATION;
230		vcpu->run->internal.ndata = 0;
231		return false;
232	}
233
234	return true;
235}
236
237static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
238{
239	if ((vmcb_is_intercept(control, INTERCEPT_VMRUN)) == 0)
240		return false;
241
242	if (control->asid == 0)
243		return false;
244
245	if ((control->nested_ctl & SVM_NESTED_CTL_NP_ENABLE) &&
246	    !npt_enabled)
247		return false;
248
249	return true;
250}
251
252static bool nested_vmcb_check_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
253{
254	struct kvm_vcpu *vcpu = &svm->vcpu;
255	bool vmcb12_lma;
256
257	/*
258	 * FIXME: these should be done after copying the fields,
259	 * to avoid TOC/TOU races.  For these save area checks
260	 * the possible damage is limited since kvm_set_cr0 and
261	 * kvm_set_cr4 handle failure; EFER_SVME is an exception
262	 * so it is force-set later in nested_prepare_vmcb_save.
263	 */
264	if ((vmcb12->save.efer & EFER_SVME) == 0)
265		return false;
266
267	if (((vmcb12->save.cr0 & X86_CR0_CD) == 0) && (vmcb12->save.cr0 & X86_CR0_NW))
268		return false;
269
270	if (!kvm_dr6_valid(vmcb12->save.dr6) || !kvm_dr7_valid(vmcb12->save.dr7))
271		return false;
272
273	vmcb12_lma = (vmcb12->save.efer & EFER_LME) && (vmcb12->save.cr0 & X86_CR0_PG);
274
275	if (vmcb12_lma) {
276		if (!(vmcb12->save.cr4 & X86_CR4_PAE) ||
277		    !(vmcb12->save.cr0 & X86_CR0_PE) ||
278		    (vmcb12->save.cr3 & vcpu->arch.cr3_lm_rsvd_bits))
279			return false;
280	}
281	if (kvm_valid_cr4(&svm->vcpu, vmcb12->save.cr4))
282		return false;
283
284	return true;
285}
286
287static void load_nested_vmcb_control(struct vcpu_svm *svm,
288				     struct vmcb_control_area *control)
289{
290	copy_vmcb_control_area(&svm->nested.ctl, control);
291
292	/* Copy it here because nested_svm_check_controls will check it.  */
293	svm->nested.ctl.asid           = control->asid;
294	svm->nested.ctl.msrpm_base_pa &= ~0x0fffULL;
295	svm->nested.ctl.iopm_base_pa  &= ~0x0fffULL;
296}
297
298/*
299 * Synchronize fields that are written by the processor, so that
300 * they can be copied back into the nested_vmcb.
301 */
302void sync_nested_vmcb_control(struct vcpu_svm *svm)
303{
304	u32 mask;
305	svm->nested.ctl.event_inj      = svm->vmcb->control.event_inj;
306	svm->nested.ctl.event_inj_err  = svm->vmcb->control.event_inj_err;
307
308	/* Only a few fields of int_ctl are written by the processor.  */
309	mask = V_IRQ_MASK | V_TPR_MASK;
310	if (!(svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) &&
311	    svm_is_intercept(svm, INTERCEPT_VINTR)) {
312		/*
313		 * In order to request an interrupt window, L0 is usurping
314		 * svm->vmcb->control.int_ctl and possibly setting V_IRQ
315		 * even if it was clear in L1's VMCB.  Restoring it would be
316		 * wrong.  However, in this case V_IRQ will remain true until
317		 * interrupt_window_interception calls svm_clear_vintr and
318		 * restores int_ctl.  We can just leave it aside.
319		 */
320		mask &= ~V_IRQ_MASK;
321	}
322	svm->nested.ctl.int_ctl        &= ~mask;
323	svm->nested.ctl.int_ctl        |= svm->vmcb->control.int_ctl & mask;
324}
325
326/*
327 * Transfer any event that L0 or L1 wanted to inject into L2 to
328 * EXIT_INT_INFO.
329 */
330static void nested_vmcb_save_pending_event(struct vcpu_svm *svm,
331					   struct vmcb *vmcb12)
332{
333	struct kvm_vcpu *vcpu = &svm->vcpu;
334	u32 exit_int_info = 0;
335	unsigned int nr;
336
337	if (vcpu->arch.exception.injected) {
338		nr = vcpu->arch.exception.nr;
339		exit_int_info = nr | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT;
340
341		if (vcpu->arch.exception.has_error_code) {
342			exit_int_info |= SVM_EVTINJ_VALID_ERR;
343			vmcb12->control.exit_int_info_err =
344				vcpu->arch.exception.error_code;
345		}
346
347	} else if (vcpu->arch.nmi_injected) {
348		exit_int_info = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
349
350	} else if (vcpu->arch.interrupt.injected) {
351		nr = vcpu->arch.interrupt.nr;
352		exit_int_info = nr | SVM_EVTINJ_VALID;
353
354		if (vcpu->arch.interrupt.soft)
355			exit_int_info |= SVM_EVTINJ_TYPE_SOFT;
356		else
357			exit_int_info |= SVM_EVTINJ_TYPE_INTR;
358	}
359
360	vmcb12->control.exit_int_info = exit_int_info;
361}
362
363static inline bool nested_npt_enabled(struct vcpu_svm *svm)
364{
365	return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE;
366}
367
368/*
369 * Load guest's/host's cr3 on nested vmentry or vmexit. @nested_npt is true
370 * if we are emulating VM-Entry into a guest with NPT enabled.
371 */
372static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
373			       bool nested_npt)
374{
375	if (cr3 & rsvd_bits(cpuid_maxphyaddr(vcpu), 63))
376		return -EINVAL;
377
378	if (!nested_npt && is_pae_paging(vcpu) &&
379	    (cr3 != kvm_read_cr3(vcpu) || pdptrs_changed(vcpu))) {
380		if (!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
381			return -EINVAL;
382	}
383
384	/*
385	 * TODO: optimize unconditional TLB flush/MMU sync here and in
386	 * kvm_init_shadow_npt_mmu().
387	 */
388	if (!nested_npt)
389		kvm_mmu_new_pgd(vcpu, cr3, false, false);
390
391	vcpu->arch.cr3 = cr3;
392	kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
393
394	kvm_init_mmu(vcpu, false);
395
396	return 0;
397}
398
399static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
400{
401	/* Load the nested guest state */
402	svm->vmcb->save.es = vmcb12->save.es;
403	svm->vmcb->save.cs = vmcb12->save.cs;
404	svm->vmcb->save.ss = vmcb12->save.ss;
405	svm->vmcb->save.ds = vmcb12->save.ds;
406	svm->vmcb->save.gdtr = vmcb12->save.gdtr;
407	svm->vmcb->save.idtr = vmcb12->save.idtr;
408	kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags);
409
410	/*
411	 * Force-set EFER_SVME even though it is checked earlier on the
412	 * VMCB12, because the guest can flip the bit between the check
413	 * and now.  Clearing EFER_SVME would call svm_free_nested.
414	 */
415	svm_set_efer(&svm->vcpu, vmcb12->save.efer | EFER_SVME);
416
417	svm_set_cr0(&svm->vcpu, vmcb12->save.cr0);
418	svm_set_cr4(&svm->vcpu, vmcb12->save.cr4);
419	svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = vmcb12->save.cr2;
420	kvm_rax_write(&svm->vcpu, vmcb12->save.rax);
421	kvm_rsp_write(&svm->vcpu, vmcb12->save.rsp);
422	kvm_rip_write(&svm->vcpu, vmcb12->save.rip);
423
424	/* In case we don't even reach vcpu_run, the fields are not updated */
425	svm->vmcb->save.rax = vmcb12->save.rax;
426	svm->vmcb->save.rsp = vmcb12->save.rsp;
427	svm->vmcb->save.rip = vmcb12->save.rip;
428	svm->vmcb->save.dr7 = vmcb12->save.dr7;
429	svm->vcpu.arch.dr6  = vmcb12->save.dr6;
430	svm->vmcb->save.cpl = vmcb12->save.cpl;
431}
432
433static void nested_prepare_vmcb_control(struct vcpu_svm *svm)
434{
435	const u32 int_ctl_vmcb01_bits =
436		V_INTR_MASKING_MASK | V_GIF_MASK | V_GIF_ENABLE_MASK;
437
438	const u32 int_ctl_vmcb12_bits = V_TPR_MASK | V_IRQ_INJECTION_BITS_MASK;
439
440	if (nested_npt_enabled(svm))
441		nested_svm_init_mmu_context(&svm->vcpu);
442
443	svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset =
444		svm->vcpu.arch.l1_tsc_offset + svm->nested.ctl.tsc_offset;
445
446	svm->vmcb->control.int_ctl =
447		(svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) |
448		(svm->nested.hsave->control.int_ctl & int_ctl_vmcb01_bits);
449
450	svm->vmcb->control.int_vector          = svm->nested.ctl.int_vector;
451	svm->vmcb->control.int_state           = svm->nested.ctl.int_state;
452	svm->vmcb->control.event_inj           = svm->nested.ctl.event_inj;
453	svm->vmcb->control.event_inj_err       = svm->nested.ctl.event_inj_err;
454
455	svm->vmcb->control.pause_filter_count  = svm->nested.ctl.pause_filter_count;
456	svm->vmcb->control.pause_filter_thresh = svm->nested.ctl.pause_filter_thresh;
457
458	/* Enter Guest-Mode */
459	enter_guest_mode(&svm->vcpu);
460
461	/*
462	 * Merge guest and host intercepts - must be called  with vcpu in
463	 * guest-mode to take affect here
464	 */
465	recalc_intercepts(svm);
466
467	vmcb_mark_all_dirty(svm->vmcb);
468}
469
470int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb12_gpa,
471			 struct vmcb *vmcb12)
472{
473	int ret;
474
475	svm->nested.vmcb12_gpa = vmcb12_gpa;
476	nested_prepare_vmcb_save(svm, vmcb12);
477	nested_prepare_vmcb_control(svm);
478
479	ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3,
480				  nested_npt_enabled(svm));
481	if (ret)
482		return ret;
483
484	if (!npt_enabled)
485		svm->vcpu.arch.mmu->inject_page_fault = svm_inject_page_fault_nested;
486
487	svm_set_gif(svm, true);
488
489	return 0;
490}
491
492int nested_svm_vmrun(struct vcpu_svm *svm)
493{
494	int ret;
495	struct vmcb *vmcb12;
496	struct vmcb *hsave = svm->nested.hsave;
497	struct vmcb *vmcb = svm->vmcb;
498	struct kvm_host_map map;
499	u64 vmcb12_gpa;
500
501	if (is_smm(&svm->vcpu)) {
502		kvm_queue_exception(&svm->vcpu, UD_VECTOR);
503		return 1;
504	}
505
506	vmcb12_gpa = svm->vmcb->save.rax;
507	ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb12_gpa), &map);
508	if (ret == -EINVAL) {
509		kvm_inject_gp(&svm->vcpu, 0);
510		return 1;
511	} else if (ret) {
512		return kvm_skip_emulated_instruction(&svm->vcpu);
513	}
514
515	ret = kvm_skip_emulated_instruction(&svm->vcpu);
516
517	vmcb12 = map.hva;
518
519	if (WARN_ON_ONCE(!svm->nested.initialized))
520		return -EINVAL;
521
522	load_nested_vmcb_control(svm, &vmcb12->control);
523
524	if (!nested_vmcb_check_save(svm, vmcb12) ||
525	    !nested_vmcb_check_controls(&svm->nested.ctl)) {
526		vmcb12->control.exit_code    = SVM_EXIT_ERR;
527		vmcb12->control.exit_code_hi = 0;
528		vmcb12->control.exit_info_1  = 0;
529		vmcb12->control.exit_info_2  = 0;
530		goto out;
531	}
532
533	trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb12_gpa,
534			       vmcb12->save.rip,
535			       vmcb12->control.int_ctl,
536			       vmcb12->control.event_inj,
537			       vmcb12->control.nested_ctl);
538
539	trace_kvm_nested_intercepts(vmcb12->control.intercepts[INTERCEPT_CR] & 0xffff,
540				    vmcb12->control.intercepts[INTERCEPT_CR] >> 16,
541				    vmcb12->control.intercepts[INTERCEPT_EXCEPTION],
542				    vmcb12->control.intercepts[INTERCEPT_WORD3],
543				    vmcb12->control.intercepts[INTERCEPT_WORD4],
544				    vmcb12->control.intercepts[INTERCEPT_WORD5]);
545
546	/* Clear internal status */
547	kvm_clear_exception_queue(&svm->vcpu);
548	kvm_clear_interrupt_queue(&svm->vcpu);
549
550	/*
551	 * Save the old vmcb, so we don't need to pick what we save, but can
552	 * restore everything when a VMEXIT occurs
553	 */
554	hsave->save.es     = vmcb->save.es;
555	hsave->save.cs     = vmcb->save.cs;
556	hsave->save.ss     = vmcb->save.ss;
557	hsave->save.ds     = vmcb->save.ds;
558	hsave->save.gdtr   = vmcb->save.gdtr;
559	hsave->save.idtr   = vmcb->save.idtr;
560	hsave->save.efer   = svm->vcpu.arch.efer;
561	hsave->save.cr0    = kvm_read_cr0(&svm->vcpu);
562	hsave->save.cr4    = svm->vcpu.arch.cr4;
563	hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
564	hsave->save.rip    = kvm_rip_read(&svm->vcpu);
565	hsave->save.rsp    = vmcb->save.rsp;
566	hsave->save.rax    = vmcb->save.rax;
567	if (npt_enabled)
568		hsave->save.cr3    = vmcb->save.cr3;
569	else
570		hsave->save.cr3    = kvm_read_cr3(&svm->vcpu);
571
572	copy_vmcb_control_area(&hsave->control, &vmcb->control);
573
574	svm->nested.nested_run_pending = 1;
575
576	if (enter_svm_guest_mode(svm, vmcb12_gpa, vmcb12))
577		goto out_exit_err;
578
579	if (nested_svm_vmrun_msrpm(svm))
580		goto out;
581
582out_exit_err:
583	svm->nested.nested_run_pending = 0;
584
585	svm->vmcb->control.exit_code    = SVM_EXIT_ERR;
586	svm->vmcb->control.exit_code_hi = 0;
587	svm->vmcb->control.exit_info_1  = 0;
588	svm->vmcb->control.exit_info_2  = 0;
589
590	nested_svm_vmexit(svm);
591
592out:
593	kvm_vcpu_unmap(&svm->vcpu, &map, true);
594
595	return ret;
596}
597
598void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
599{
600	to_vmcb->save.fs = from_vmcb->save.fs;
601	to_vmcb->save.gs = from_vmcb->save.gs;
602	to_vmcb->save.tr = from_vmcb->save.tr;
603	to_vmcb->save.ldtr = from_vmcb->save.ldtr;
604	to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
605	to_vmcb->save.star = from_vmcb->save.star;
606	to_vmcb->save.lstar = from_vmcb->save.lstar;
607	to_vmcb->save.cstar = from_vmcb->save.cstar;
608	to_vmcb->save.sfmask = from_vmcb->save.sfmask;
609	to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
610	to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
611	to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
612}
613
614int nested_svm_vmexit(struct vcpu_svm *svm)
615{
616	int rc;
617	struct vmcb *vmcb12;
618	struct vmcb *hsave = svm->nested.hsave;
619	struct vmcb *vmcb = svm->vmcb;
620	struct kvm_host_map map;
621
622	rc = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map);
623	if (rc) {
624		if (rc == -EINVAL)
625			kvm_inject_gp(&svm->vcpu, 0);
626		return 1;
627	}
628
629	vmcb12 = map.hva;
630
631	/* Exit Guest-Mode */
632	leave_guest_mode(&svm->vcpu);
633	svm->nested.vmcb12_gpa = 0;
634	WARN_ON_ONCE(svm->nested.nested_run_pending);
635
636	kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, &svm->vcpu);
637
638	/* in case we halted in L2 */
639	svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
640
641	/* Give the current vmcb to the guest */
642
643	vmcb12->save.es     = vmcb->save.es;
644	vmcb12->save.cs     = vmcb->save.cs;
645	vmcb12->save.ss     = vmcb->save.ss;
646	vmcb12->save.ds     = vmcb->save.ds;
647	vmcb12->save.gdtr   = vmcb->save.gdtr;
648	vmcb12->save.idtr   = vmcb->save.idtr;
649	vmcb12->save.efer   = svm->vcpu.arch.efer;
650	vmcb12->save.cr0    = kvm_read_cr0(&svm->vcpu);
651	vmcb12->save.cr3    = kvm_read_cr3(&svm->vcpu);
652	vmcb12->save.cr2    = vmcb->save.cr2;
653	vmcb12->save.cr4    = svm->vcpu.arch.cr4;
654	vmcb12->save.rflags = kvm_get_rflags(&svm->vcpu);
655	vmcb12->save.rip    = kvm_rip_read(&svm->vcpu);
656	vmcb12->save.rsp    = kvm_rsp_read(&svm->vcpu);
657	vmcb12->save.rax    = kvm_rax_read(&svm->vcpu);
658	vmcb12->save.dr7    = vmcb->save.dr7;
659	vmcb12->save.dr6    = svm->vcpu.arch.dr6;
660	vmcb12->save.cpl    = vmcb->save.cpl;
661
662	vmcb12->control.int_state         = vmcb->control.int_state;
663	vmcb12->control.exit_code         = vmcb->control.exit_code;
664	vmcb12->control.exit_code_hi      = vmcb->control.exit_code_hi;
665	vmcb12->control.exit_info_1       = vmcb->control.exit_info_1;
666	vmcb12->control.exit_info_2       = vmcb->control.exit_info_2;
667
668	if (vmcb12->control.exit_code != SVM_EXIT_ERR)
669		nested_vmcb_save_pending_event(svm, vmcb12);
670
671	if (svm->nrips_enabled)
672		vmcb12->control.next_rip  = vmcb->control.next_rip;
673
674	vmcb12->control.int_ctl           = svm->nested.ctl.int_ctl;
675	vmcb12->control.tlb_ctl           = svm->nested.ctl.tlb_ctl;
676	vmcb12->control.event_inj         = svm->nested.ctl.event_inj;
677	vmcb12->control.event_inj_err     = svm->nested.ctl.event_inj_err;
678
679	vmcb12->control.pause_filter_count =
680		svm->vmcb->control.pause_filter_count;
681	vmcb12->control.pause_filter_thresh =
682		svm->vmcb->control.pause_filter_thresh;
683
684	/* Restore the original control entries */
685	copy_vmcb_control_area(&vmcb->control, &hsave->control);
686
687	/* On vmexit the  GIF is set to false */
688	svm_set_gif(svm, false);
689
690	svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset =
691		svm->vcpu.arch.l1_tsc_offset;
692
693	svm->nested.ctl.nested_cr3 = 0;
694
695	/* Restore selected save entries */
696	svm->vmcb->save.es = hsave->save.es;
697	svm->vmcb->save.cs = hsave->save.cs;
698	svm->vmcb->save.ss = hsave->save.ss;
699	svm->vmcb->save.ds = hsave->save.ds;
700	svm->vmcb->save.gdtr = hsave->save.gdtr;
701	svm->vmcb->save.idtr = hsave->save.idtr;
702	kvm_set_rflags(&svm->vcpu, hsave->save.rflags);
703	svm_set_efer(&svm->vcpu, hsave->save.efer);
704	svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
705	svm_set_cr4(&svm->vcpu, hsave->save.cr4);
706	kvm_rax_write(&svm->vcpu, hsave->save.rax);
707	kvm_rsp_write(&svm->vcpu, hsave->save.rsp);
708	kvm_rip_write(&svm->vcpu, hsave->save.rip);
709	svm->vmcb->save.dr7 = 0;
710	svm->vmcb->save.cpl = 0;
711	svm->vmcb->control.exit_int_info = 0;
712
713	vmcb_mark_all_dirty(svm->vmcb);
714
715	trace_kvm_nested_vmexit_inject(vmcb12->control.exit_code,
716				       vmcb12->control.exit_info_1,
717				       vmcb12->control.exit_info_2,
718				       vmcb12->control.exit_int_info,
719				       vmcb12->control.exit_int_info_err,
720				       KVM_ISA_SVM);
721
722	kvm_vcpu_unmap(&svm->vcpu, &map, true);
723
724	nested_svm_uninit_mmu_context(&svm->vcpu);
725
726	rc = nested_svm_load_cr3(&svm->vcpu, hsave->save.cr3, false);
727	if (rc)
728		return 1;
729
730	if (npt_enabled)
731		svm->vmcb->save.cr3 = hsave->save.cr3;
732
733	/*
734	 * Drop what we picked up for L2 via svm_complete_interrupts() so it
735	 * doesn't end up in L1.
736	 */
737	svm->vcpu.arch.nmi_injected = false;
738	kvm_clear_exception_queue(&svm->vcpu);
739	kvm_clear_interrupt_queue(&svm->vcpu);
740
741	return 0;
742}
743
744int svm_allocate_nested(struct vcpu_svm *svm)
745{
746	struct page *hsave_page;
747
748	if (svm->nested.initialized)
749		return 0;
750
751	hsave_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
752	if (!hsave_page)
753		return -ENOMEM;
754	svm->nested.hsave = page_address(hsave_page);
755
756	svm->nested.msrpm = svm_vcpu_alloc_msrpm();
757	if (!svm->nested.msrpm)
758		goto err_free_hsave;
759	svm_vcpu_init_msrpm(&svm->vcpu, svm->nested.msrpm);
760
761	svm->nested.initialized = true;
762	return 0;
763
764err_free_hsave:
765	__free_page(hsave_page);
766	return -ENOMEM;
767}
768
769void svm_free_nested(struct vcpu_svm *svm)
770{
771	if (!svm->nested.initialized)
772		return;
773
774	svm_vcpu_free_msrpm(svm->nested.msrpm);
775	svm->nested.msrpm = NULL;
776
777	__free_page(virt_to_page(svm->nested.hsave));
778	svm->nested.hsave = NULL;
779
780	svm->nested.initialized = false;
781}
782
783/*
784 * Forcibly leave nested mode in order to be able to reset the VCPU later on.
785 */
786void svm_leave_nested(struct kvm_vcpu *vcpu)
787{
788	struct vcpu_svm *svm = to_svm(vcpu);
789
790	if (is_guest_mode(&svm->vcpu)) {
791		struct vmcb *hsave = svm->nested.hsave;
792		struct vmcb *vmcb = svm->vmcb;
793
794		svm->nested.nested_run_pending = 0;
795		leave_guest_mode(&svm->vcpu);
796		copy_vmcb_control_area(&vmcb->control, &hsave->control);
797		nested_svm_uninit_mmu_context(&svm->vcpu);
798	}
799
800	kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, &svm->vcpu);
801}
802
803static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
804{
805	u32 offset, msr, value;
806	int write, mask;
807
808	if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
809		return NESTED_EXIT_HOST;
810
811	msr    = svm->vcpu.arch.regs[VCPU_REGS_RCX];
812	offset = svm_msrpm_offset(msr);
813	write  = svm->vmcb->control.exit_info_1 & 1;
814	mask   = 1 << ((2 * (msr & 0xf)) + write);
815
816	if (offset == MSR_INVALID)
817		return NESTED_EXIT_DONE;
818
819	/* Offset is in 32 bit units but need in 8 bit units */
820	offset *= 4;
821
822	if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.ctl.msrpm_base_pa + offset, &value, 4))
823		return NESTED_EXIT_DONE;
824
825	return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
826}
827
828static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
829{
830	unsigned port, size, iopm_len;
831	u16 val, mask;
832	u8 start_bit;
833	u64 gpa;
834
835	if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_IOIO_PROT)))
836		return NESTED_EXIT_HOST;
837
838	port = svm->vmcb->control.exit_info_1 >> 16;
839	size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
840		SVM_IOIO_SIZE_SHIFT;
841	gpa  = svm->nested.ctl.iopm_base_pa + (port / 8);
842	start_bit = port % 8;
843	iopm_len = (start_bit + size > 8) ? 2 : 1;
844	mask = (0xf >> (4 - size)) << start_bit;
845	val = 0;
846
847	if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
848		return NESTED_EXIT_DONE;
849
850	return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
851}
852
853static int nested_svm_intercept(struct vcpu_svm *svm)
854{
855	u32 exit_code = svm->vmcb->control.exit_code;
856	int vmexit = NESTED_EXIT_HOST;
857
858	switch (exit_code) {
859	case SVM_EXIT_MSR:
860		vmexit = nested_svm_exit_handled_msr(svm);
861		break;
862	case SVM_EXIT_IOIO:
863		vmexit = nested_svm_intercept_ioio(svm);
864		break;
865	case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
866		if (vmcb_is_intercept(&svm->nested.ctl, exit_code))
867			vmexit = NESTED_EXIT_DONE;
868		break;
869	}
870	case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
871		if (vmcb_is_intercept(&svm->nested.ctl, exit_code))
872			vmexit = NESTED_EXIT_DONE;
873		break;
874	}
875	case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
876		/*
877		 * Host-intercepted exceptions have been checked already in
878		 * nested_svm_exit_special.  There is nothing to do here,
879		 * the vmexit is injected by svm_check_nested_events.
880		 */
881		vmexit = NESTED_EXIT_DONE;
882		break;
883	}
884	case SVM_EXIT_ERR: {
885		vmexit = NESTED_EXIT_DONE;
886		break;
887	}
888	default: {
889		if (vmcb_is_intercept(&svm->nested.ctl, exit_code))
890			vmexit = NESTED_EXIT_DONE;
891	}
892	}
893
894	return vmexit;
895}
896
897int nested_svm_exit_handled(struct vcpu_svm *svm)
898{
899	int vmexit;
900
901	vmexit = nested_svm_intercept(svm);
902
903	if (vmexit == NESTED_EXIT_DONE)
904		nested_svm_vmexit(svm);
905
906	return vmexit;
907}
908
909int nested_svm_check_permissions(struct vcpu_svm *svm)
910{
911	if (!(svm->vcpu.arch.efer & EFER_SVME) ||
912	    !is_paging(&svm->vcpu)) {
913		kvm_queue_exception(&svm->vcpu, UD_VECTOR);
914		return 1;
915	}
916
917	if (svm->vmcb->save.cpl) {
918		kvm_inject_gp(&svm->vcpu, 0);
919		return 1;
920	}
921
922	return 0;
923}
924
925static bool nested_exit_on_exception(struct vcpu_svm *svm)
926{
927	unsigned int nr = svm->vcpu.arch.exception.nr;
928
929	return (svm->nested.ctl.intercepts[INTERCEPT_EXCEPTION] & BIT(nr));
930}
931
932static void nested_svm_inject_exception_vmexit(struct vcpu_svm *svm)
933{
934	unsigned int nr = svm->vcpu.arch.exception.nr;
935
936	svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
937	svm->vmcb->control.exit_code_hi = 0;
938
939	if (svm->vcpu.arch.exception.has_error_code)
940		svm->vmcb->control.exit_info_1 = svm->vcpu.arch.exception.error_code;
941
942	/*
943	 * EXITINFO2 is undefined for all exception intercepts other
944	 * than #PF.
945	 */
946	if (nr == PF_VECTOR) {
947		if (svm->vcpu.arch.exception.nested_apf)
948			svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token;
949		else if (svm->vcpu.arch.exception.has_payload)
950			svm->vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload;
951		else
952			svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
953	} else if (nr == DB_VECTOR) {
954		/* See inject_pending_event.  */
955		kvm_deliver_exception_payload(&svm->vcpu);
956		if (svm->vcpu.arch.dr7 & DR7_GD) {
957			svm->vcpu.arch.dr7 &= ~DR7_GD;
958			kvm_update_dr7(&svm->vcpu);
959		}
960	} else
961		WARN_ON(svm->vcpu.arch.exception.has_payload);
962
963	nested_svm_vmexit(svm);
964}
965
966static void nested_svm_smi(struct vcpu_svm *svm)
967{
968	svm->vmcb->control.exit_code = SVM_EXIT_SMI;
969	svm->vmcb->control.exit_info_1 = 0;
970	svm->vmcb->control.exit_info_2 = 0;
971
972	nested_svm_vmexit(svm);
973}
974
975static void nested_svm_nmi(struct vcpu_svm *svm)
976{
977	svm->vmcb->control.exit_code = SVM_EXIT_NMI;
978	svm->vmcb->control.exit_info_1 = 0;
979	svm->vmcb->control.exit_info_2 = 0;
980
981	nested_svm_vmexit(svm);
982}
983
984static void nested_svm_intr(struct vcpu_svm *svm)
985{
986	trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
987
988	svm->vmcb->control.exit_code   = SVM_EXIT_INTR;
989	svm->vmcb->control.exit_info_1 = 0;
990	svm->vmcb->control.exit_info_2 = 0;
991
992	nested_svm_vmexit(svm);
993}
994
995static inline bool nested_exit_on_init(struct vcpu_svm *svm)
996{
997	return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_INIT);
998}
999
1000static void nested_svm_init(struct vcpu_svm *svm)
1001{
1002	svm->vmcb->control.exit_code   = SVM_EXIT_INIT;
1003	svm->vmcb->control.exit_info_1 = 0;
1004	svm->vmcb->control.exit_info_2 = 0;
1005
1006	nested_svm_vmexit(svm);
1007}
1008
1009
1010static int svm_check_nested_events(struct kvm_vcpu *vcpu)
1011{
1012	struct vcpu_svm *svm = to_svm(vcpu);
1013	bool block_nested_events =
1014		kvm_event_needs_reinjection(vcpu) || svm->nested.nested_run_pending;
1015	struct kvm_lapic *apic = vcpu->arch.apic;
1016
1017	if (lapic_in_kernel(vcpu) &&
1018	    test_bit(KVM_APIC_INIT, &apic->pending_events)) {
1019		if (block_nested_events)
1020			return -EBUSY;
1021		if (!nested_exit_on_init(svm))
1022			return 0;
1023		nested_svm_init(svm);
1024		return 0;
1025	}
1026
1027	if (vcpu->arch.exception.pending) {
1028		if (block_nested_events)
1029                        return -EBUSY;
1030		if (!nested_exit_on_exception(svm))
1031			return 0;
1032		nested_svm_inject_exception_vmexit(svm);
1033		return 0;
1034	}
1035
1036	if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) {
1037		if (block_nested_events)
1038			return -EBUSY;
1039		if (!nested_exit_on_smi(svm))
1040			return 0;
1041		nested_svm_smi(svm);
1042		return 0;
1043	}
1044
1045	if (vcpu->arch.nmi_pending && !svm_nmi_blocked(vcpu)) {
1046		if (block_nested_events)
1047			return -EBUSY;
1048		if (!nested_exit_on_nmi(svm))
1049			return 0;
1050		nested_svm_nmi(svm);
1051		return 0;
1052	}
1053
1054	if (kvm_cpu_has_interrupt(vcpu) && !svm_interrupt_blocked(vcpu)) {
1055		if (block_nested_events)
1056			return -EBUSY;
1057		if (!nested_exit_on_intr(svm))
1058			return 0;
1059		nested_svm_intr(svm);
1060		return 0;
1061	}
1062
1063	return 0;
1064}
1065
1066int nested_svm_exit_special(struct vcpu_svm *svm)
1067{
1068	u32 exit_code = svm->vmcb->control.exit_code;
1069
1070	switch (exit_code) {
1071	case SVM_EXIT_INTR:
1072	case SVM_EXIT_NMI:
1073	case SVM_EXIT_NPF:
1074		return NESTED_EXIT_HOST;
1075	case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
1076		u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
1077
1078		if (get_host_vmcb(svm)->control.intercepts[INTERCEPT_EXCEPTION] &
1079				excp_bits)
1080			return NESTED_EXIT_HOST;
1081		else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR &&
1082			 svm->vcpu.arch.apf.host_apf_flags)
1083			/* Trap async PF even if not shadowing */
1084			return NESTED_EXIT_HOST;
1085		break;
1086	}
1087	default:
1088		break;
1089	}
1090
1091	return NESTED_EXIT_CONTINUE;
1092}
1093
1094static int svm_get_nested_state(struct kvm_vcpu *vcpu,
1095				struct kvm_nested_state __user *user_kvm_nested_state,
1096				u32 user_data_size)
1097{
1098	struct vcpu_svm *svm;
1099	struct kvm_nested_state kvm_state = {
1100		.flags = 0,
1101		.format = KVM_STATE_NESTED_FORMAT_SVM,
1102		.size = sizeof(kvm_state),
1103	};
1104	struct vmcb __user *user_vmcb = (struct vmcb __user *)
1105		&user_kvm_nested_state->data.svm[0];
1106
1107	if (!vcpu)
1108		return kvm_state.size + KVM_STATE_NESTED_SVM_VMCB_SIZE;
1109
1110	svm = to_svm(vcpu);
1111
1112	if (user_data_size < kvm_state.size)
1113		goto out;
1114
1115	/* First fill in the header and copy it out.  */
1116	if (is_guest_mode(vcpu)) {
1117		kvm_state.hdr.svm.vmcb_pa = svm->nested.vmcb12_gpa;
1118		kvm_state.size += KVM_STATE_NESTED_SVM_VMCB_SIZE;
1119		kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
1120
1121		if (svm->nested.nested_run_pending)
1122			kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
1123	}
1124
1125	if (gif_set(svm))
1126		kvm_state.flags |= KVM_STATE_NESTED_GIF_SET;
1127
1128	if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
1129		return -EFAULT;
1130
1131	if (!is_guest_mode(vcpu))
1132		goto out;
1133
1134	/*
1135	 * Copy over the full size of the VMCB rather than just the size
1136	 * of the structs.
1137	 */
1138	if (clear_user(user_vmcb, KVM_STATE_NESTED_SVM_VMCB_SIZE))
1139		return -EFAULT;
1140	if (copy_to_user(&user_vmcb->control, &svm->nested.ctl,
1141			 sizeof(user_vmcb->control)))
1142		return -EFAULT;
1143	if (copy_to_user(&user_vmcb->save, &svm->nested.hsave->save,
1144			 sizeof(user_vmcb->save)))
1145		return -EFAULT;
1146
1147out:
1148	return kvm_state.size;
1149}
1150
1151static int svm_set_nested_state(struct kvm_vcpu *vcpu,
1152				struct kvm_nested_state __user *user_kvm_nested_state,
1153				struct kvm_nested_state *kvm_state)
1154{
1155	struct vcpu_svm *svm = to_svm(vcpu);
1156	struct vmcb *hsave = svm->nested.hsave;
1157	struct vmcb __user *user_vmcb = (struct vmcb __user *)
1158		&user_kvm_nested_state->data.svm[0];
1159	struct vmcb_control_area *ctl;
1160	struct vmcb_save_area *save;
1161	int ret;
1162	u32 cr0;
1163
1164	BUILD_BUG_ON(sizeof(struct vmcb_control_area) + sizeof(struct vmcb_save_area) >
1165		     KVM_STATE_NESTED_SVM_VMCB_SIZE);
1166
1167	if (kvm_state->format != KVM_STATE_NESTED_FORMAT_SVM)
1168		return -EINVAL;
1169
1170	if (kvm_state->flags & ~(KVM_STATE_NESTED_GUEST_MODE |
1171				 KVM_STATE_NESTED_RUN_PENDING |
1172				 KVM_STATE_NESTED_GIF_SET))
1173		return -EINVAL;
1174
1175	/*
1176	 * If in guest mode, vcpu->arch.efer actually refers to the L2 guest's
1177	 * EFER.SVME, but EFER.SVME still has to be 1 for VMRUN to succeed.
1178	 */
1179	if (!(vcpu->arch.efer & EFER_SVME)) {
1180		/* GIF=1 and no guest mode are required if SVME=0.  */
1181		if (kvm_state->flags != KVM_STATE_NESTED_GIF_SET)
1182			return -EINVAL;
1183	}
1184
1185	/* SMM temporarily disables SVM, so we cannot be in guest mode.  */
1186	if (is_smm(vcpu) && (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
1187		return -EINVAL;
1188
1189	if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) {
1190		svm_leave_nested(vcpu);
1191		svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
1192		return 0;
1193	}
1194
1195	if (!page_address_valid(vcpu, kvm_state->hdr.svm.vmcb_pa))
1196		return -EINVAL;
1197	if (kvm_state->size < sizeof(*kvm_state) + KVM_STATE_NESTED_SVM_VMCB_SIZE)
1198		return -EINVAL;
1199
1200	ret  = -ENOMEM;
1201	ctl  = kzalloc(sizeof(*ctl),  GFP_KERNEL_ACCOUNT);
1202	save = kzalloc(sizeof(*save), GFP_KERNEL_ACCOUNT);
1203	if (!ctl || !save)
1204		goto out_free;
1205
1206	ret = -EFAULT;
1207	if (copy_from_user(ctl, &user_vmcb->control, sizeof(*ctl)))
1208		goto out_free;
1209	if (copy_from_user(save, &user_vmcb->save, sizeof(*save)))
1210		goto out_free;
1211
1212	ret = -EINVAL;
1213	if (!nested_vmcb_check_controls(ctl))
1214		goto out_free;
1215
1216	/*
1217	 * Processor state contains L2 state.  Check that it is
1218	 * valid for guest mode (see nested_vmcb_checks).
1219	 */
1220	cr0 = kvm_read_cr0(vcpu);
1221        if (((cr0 & X86_CR0_CD) == 0) && (cr0 & X86_CR0_NW))
1222		goto out_free;
1223
1224	/*
1225	 * Validate host state saved from before VMRUN (see
1226	 * nested_svm_check_permissions).
1227	 * TODO: validate reserved bits for all saved state.
1228	 */
1229	if (!(save->cr0 & X86_CR0_PG))
1230		goto out_free;
1231	if (!(save->efer & EFER_SVME))
1232		goto out_free;
1233
1234	/*
1235	 * All checks done, we can enter guest mode.  L1 control fields
1236	 * come from the nested save state.  Guest state is already
1237	 * in the registers, the save area of the nested state instead
1238	 * contains saved L1 state.
1239	 */
1240	copy_vmcb_control_area(&hsave->control, &svm->vmcb->control);
1241	hsave->save = *save;
1242
1243	if (is_guest_mode(vcpu))
1244		svm_leave_nested(vcpu);
1245
1246	svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa;
1247	load_nested_vmcb_control(svm, ctl);
1248	nested_prepare_vmcb_control(svm);
1249
1250	kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
1251	ret = 0;
1252out_free:
1253	kfree(save);
1254	kfree(ctl);
1255
1256	return ret;
1257}
1258
1259struct kvm_x86_nested_ops svm_nested_ops = {
1260	.leave_nested = svm_leave_nested,
1261	.check_events = svm_check_nested_events,
1262	.get_nested_state_pages = svm_get_nested_state_pages,
1263	.get_state = svm_get_nested_state,
1264	.set_state = svm_set_nested_state,
1265};
1266