Lines Matching refs:to_vmx

543 			tmp_eptp = to_vmx(vcpu)->ept_pointer;
544 } else if (tmp_eptp != to_vmx(vcpu)->ept_pointer) {
566 u64 ept_pointer = to_vmx(vcpu)->ept_pointer;
594 if (VALID_PAGE(to_vmx(vcpu)->ept_pointer))
626 evmcs = (struct hv_enlightened_vmcs *)to_vmx(vcpu)->loaded_vmcs->vmcs;
887 if (to_vmx(vcpu)->rmode.vm86_active)
1274 struct vcpu_vmx *vmx = to_vmx(vcpu);
1405 struct vcpu_vmx *vmx = to_vmx(vcpu);
1478 struct vcpu_vmx *vmx = to_vmx(vcpu);
1491 vmx_prepare_switch_to_host(to_vmx(vcpu));
1501 struct vcpu_vmx *vmx = to_vmx(vcpu);
1519 struct vcpu_vmx *vmx = to_vmx(vcpu);
1577 struct vcpu_vmx *vmx = to_vmx(vcpu);
1665 to_vmx(vcpu)->exit_reason.basic != EXIT_REASON_EPT_MISCONFIG) {
1696 struct vcpu_vmx *vmx = to_vmx(vcpu);
1737 struct vcpu_vmx *vmx = to_vmx(vcpu);
1865 uint64_t valid_bits = to_vmx(vcpu)->msr_ia32_feature_control_valid_bits;
1892 struct vcpu_vmx *vmx = to_vmx(vcpu);
1926 msr_info->data = to_vmx(vcpu)->spec_ctrl;
2053 struct vcpu_vmx *vmx = to_vmx(vcpu);
2206 !(to_vmx(vcpu)->msr_ia32_feature_control &
2214 (to_vmx(vcpu)->msr_ia32_feature_control &
2808 struct vcpu_vmx *vmx = to_vmx(vcpu);
2879 struct vcpu_vmx *vmx = to_vmx(vcpu);
2927 struct vcpu_vmx *vmx = to_vmx(vcpu);
2936 vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
2939 vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
2953 vmx_segment_cache_clear(to_vmx(vcpu));
2968 vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
2976 struct vcpu_vmx *vmx = to_vmx(vcpu);
3001 return to_vmx(vcpu)->vpid;
3076 struct vcpu_vmx *vmx = to_vmx(vcpu);
3191 to_vmx(vcpu)->ept_pointer = eptp;
3222 if (to_vmx(vcpu)->nested.vmxon && !nested_cr4_valid(vcpu, cr4))
3230 struct vcpu_vmx *vmx = to_vmx(vcpu);
3290 struct vcpu_vmx *vmx = to_vmx(vcpu);
3328 if (to_vmx(vcpu)->rmode.vm86_active) {
3332 return vmx_read_guest_seg_base(to_vmx(vcpu), seg);
3337 struct vcpu_vmx *vmx = to_vmx(vcpu);
3366 struct vcpu_vmx *vmx = to_vmx(vcpu);
3406 u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS);
3830 struct vcpu_vmx *vmx = to_vmx(vcpu);
3875 struct vcpu_vmx *vmx = to_vmx(vcpu);
3919 (secondary_exec_controls_get(to_vmx(vcpu)) &
3931 unsigned long *msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap;
3969 struct vcpu_vmx *vmx = to_vmx(vcpu);
3984 struct vcpu_vmx *vmx = to_vmx(vcpu);
4000 struct vcpu_vmx *vmx = to_vmx(vcpu);
4020 struct vcpu_vmx *vmx = to_vmx(vcpu);
4083 struct vcpu_vmx *vmx = to_vmx(vcpu);
4109 struct vcpu_vmx *vmx = to_vmx(vcpu);
4227 struct vcpu_vmx *vmx = to_vmx(vcpu);
4511 struct vcpu_vmx *vmx = to_vmx(vcpu);
4608 exec_controls_setbit(to_vmx(vcpu), CPU_BASED_INTR_WINDOW_EXITING);
4619 exec_controls_setbit(to_vmx(vcpu), CPU_BASED_NMI_WINDOW_EXITING);
4624 struct vcpu_vmx *vmx = to_vmx(vcpu);
4652 struct vcpu_vmx *vmx = to_vmx(vcpu);
4683 struct vcpu_vmx *vmx = to_vmx(vcpu);
4697 struct vcpu_vmx *vmx = to_vmx(vcpu);
4720 if (!enable_vnmi && to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked)
4730 if (to_vmx(vcpu)->nested.nested_run_pending)
4752 if (to_vmx(vcpu)->nested.nested_run_pending)
4797 to_vmx(vcpu)->vcpu.arch.event_exit_inst_len =
4892 struct vcpu_vmx *vmx = to_vmx(vcpu);
5108 if (to_vmx(vcpu)->nested.vmxon &&
5255 exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_MOV_DR_EXITING);
5290 exec_controls_setbit(to_vmx(vcpu), CPU_BASED_MOV_DR_EXITING);
5306 exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_INTR_WINDOW_EXITING);
5400 struct vcpu_vmx *vmx = to_vmx(vcpu);
5469 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
5532 exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_NMI_WINDOW_EXITING);
5541 struct vcpu_vmx *vmx = to_vmx(vcpu);
5586 struct vcpu_vmx *vmx = to_vmx(vcpu);
5602 struct vcpu_vmx *vmx = to_vmx(vcpu);
5721 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
5736 struct vcpu_vmx *vmx = to_vmx(vcpu);
5838 struct vcpu_vmx *vmx = to_vmx(vcpu);
5865 struct vcpu_vmx *vmx = to_vmx(vcpu);
6081 struct vcpu_vmx *vmx = to_vmx(vcpu);
6313 to_vmx(vcpu)->nested.l1_tpr_threshold = tpr_threshold;
6320 struct vcpu_vmx *vmx = to_vmx(vcpu);
6377 to_vmx(vcpu)->nested.reload_vmcs01_apic_access_page = true;
6381 if (!(secondary_exec_controls_get(to_vmx(vcpu)) &
6449 struct vcpu_vmx *vmx = to_vmx(vcpu);
6493 struct vcpu_vmx *vmx = to_vmx(vcpu);
6541 struct vcpu_vmx *vmx = to_vmx(vcpu);
6702 struct vcpu_vmx *vmx = to_vmx(vcpu);
6761 switch (to_vmx(vcpu)->exit_reason.basic) {
6839 struct vcpu_vmx *vmx = to_vmx(vcpu);
6988 struct vcpu_vmx *vmx = to_vmx(vcpu);
7003 vmx = to_vmx(vcpu);
7280 struct vcpu_vmx *vmx = to_vmx(vcpu);
7320 struct vcpu_vmx *vmx = to_vmx(vcpu);
7337 struct vcpu_vmx *vmx = to_vmx(vcpu);
7406 struct vcpu_vmx *vmx = to_vmx(vcpu);
7417 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |=
7421 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &=
7485 to_vmx(vcpu)->req_immediate_exit = true;
7612 vmx = to_vmx(vcpu);
7647 to_vmx(vcpu)->hv_deadline_tsc = -1;
7705 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |=
7708 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &=
7715 if (to_vmx(vcpu)->nested.nested_run_pending)
7722 struct vcpu_vmx *vmx = to_vmx(vcpu);
7736 struct vcpu_vmx *vmx = to_vmx(vcpu);
7761 return to_vmx(vcpu)->nested.vmxon;
7767 struct hrtimer *timer = &to_vmx(vcpu)->nested.preemption_timer;