/kernel/linux/linux-6.6/tools/testing/selftests/kvm/lib/x86_64/ |
H A D | vmx.c | 3 * tools/testing/selftests/kvm/lib/x86_64/vmx.c 13 #include "vmx.h" 78 struct vmx_pages *vmx = addr_gva2hva(vm, vmx_gva); in vcpu_alloc_vmx() local 81 vmx->vmxon = (void *)vm_vaddr_alloc_page(vm); in vcpu_alloc_vmx() 82 vmx->vmxon_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmxon); in vcpu_alloc_vmx() 83 vmx->vmxon_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmxon); in vcpu_alloc_vmx() 86 vmx->vmcs = (void *)vm_vaddr_alloc_page(vm); in vcpu_alloc_vmx() 87 vmx in vcpu_alloc_vmx() 116 prepare_for_vmx_operation(struct vmx_pages *vmx) prepare_for_vmx_operation() argument 160 load_vmcs(struct vmx_pages *vmx) load_vmcs() argument 191 init_vmcs_control_fields(struct vmx_pages *vmx) init_vmcs_control_fields() argument 360 prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp) prepare_vmcs() argument 399 __nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr, int target_level) __nested_pg_map() argument 450 nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr) nested_pg_map() argument 473 __nested_map(struct vmx_pages *vmx, struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr, uint64_t size, int level) __nested_map() argument 490 nested_map(struct vmx_pages *vmx, struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr, uint64_t size) nested_map() argument 499 nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm, uint32_t memslot) nested_map_memslot() argument 521 nested_identity_map_1g(struct vmx_pages *vmx, struct kvm_vm *vm, uint64_t addr, uint64_t size) nested_identity_map_1g() argument 539 prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm, uint32_t eptp_memslot) prepare_eptp() argument 549 prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm) prepare_virtualize_apic_accesses() argument [all...] |
H A D | memstress.c | 16 #include "vmx.h" 32 static void memstress_l1_guest_code(struct vmx_pages *vmx, uint64_t vcpu_id) in memstress_l1_guest_code() argument 38 GUEST_ASSERT(vmx->vmcs_gpa); in memstress_l1_guest_code() 39 GUEST_ASSERT(prepare_for_vmx_operation(vmx)); in memstress_l1_guest_code() 40 GUEST_ASSERT(load_vmcs(vmx)); in memstress_l1_guest_code() 45 prepare_vmcs(vmx, memstress_l2_guest_entry, rsp); in memstress_l1_guest_code() 62 void memstress_setup_ept(struct vmx_pages *vmx, struct kvm_vm *vm) in memstress_setup_ept() argument 66 prepare_eptp(vmx, vm, 0); in memstress_setup_ept() 73 nested_identity_map_1g(vmx, vm, 0, 0x100000000ULL); in memstress_setup_ept() 77 nested_identity_map_1g(vmx, v in memstress_setup_ept() 82 struct vmx_pages *vmx, *vmx0 = NULL; memstress_setup_nested() local [all...] |
/kernel/linux/linux-5.10/tools/testing/selftests/kvm/lib/x86_64/ |
H A D | vmx.c | 3 * tools/testing/selftests/kvm/lib/x86_64/vmx.c 12 #include "vmx.h" 81 struct vmx_pages *vmx = addr_gva2hva(vm, vmx_gva); in vcpu_alloc_vmx() local 84 vmx->vmxon = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0); in vcpu_alloc_vmx() 85 vmx->vmxon_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmxon); in vcpu_alloc_vmx() 86 vmx->vmxon_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmxon); in vcpu_alloc_vmx() 89 vmx->vmcs = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0); in vcpu_alloc_vmx() 90 vmx in vcpu_alloc_vmx() 133 prepare_for_vmx_operation(struct vmx_pages *vmx) prepare_for_vmx_operation() argument 177 load_vmcs(struct vmx_pages *vmx) load_vmcs() argument 206 init_vmcs_control_fields(struct vmx_pages *vmx) init_vmcs_control_fields() argument 375 prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp) prepare_vmcs() argument 397 nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr, uint32_t eptp_memslot) nested_pg_map() argument 496 nested_map(struct vmx_pages *vmx, struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr, uint64_t size, uint32_t eptp_memslot) nested_map() argument 516 nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm, uint32_t memslot, uint32_t eptp_memslot) nested_map_memslot() argument 538 prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm, uint32_t eptp_memslot) prepare_eptp() argument 546 prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm, uint32_t eptp_memslot) prepare_virtualize_apic_accesses() argument [all...] |
/kernel/linux/linux-6.6/arch/x86/kvm/vmx/ |
H A D | nested.c | 17 #include "vmx.h" 190 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_fail() local 196 if (vmx->nested.current_vmptr == INVALID_GPA && in nested_vmx_fail() 197 !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) in nested_vmx_fail() 207 pr_debug_ratelimited("nested vmx abort, indicator %d\n", indicator); in nested_vmx_abort() 220 static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx) in vmx_disable_shadow_vmcs() argument 222 secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_SHADOW_VMCS); in vmx_disable_shadow_vmcs() 224 vmx->nested.need_vmcs12_to_shadow_sync = false; in vmx_disable_shadow_vmcs() 230 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_release_evmcs() local 232 if (evmptr_is_valid(vmx in nested_release_evmcs() 246 vmx_sync_vmcs_host_state(struct vcpu_vmx *vmx, struct loaded_vmcs *prev) vmx_sync_vmcs_host_state() argument 267 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_switch_vmcs() local 296 struct vcpu_vmx *vmx = to_vmx(vcpu); free_nested() local 382 struct vcpu_vmx *vmx = to_vmx(vcpu); nested_ept_inject_page_fault() local 413 struct vcpu_vmx *vmx = to_vmx(vcpu); nested_ept_new_eptp() local 551 nested_vmx_set_intercept_for_msr(struct vcpu_vmx *vmx, unsigned long *msr_bitmap_l1, unsigned long *msr_bitmap_l0, u32 msr, int types) nested_vmx_set_intercept_for_msr() argument 571 struct vcpu_vmx *vmx = to_vmx(vcpu); nested_vmx_prepare_msr_bitmap() local 673 struct vcpu_vmx *vmx = to_vmx(vcpu); nested_cache_shadow_vmcs12() local 692 struct vcpu_vmx *vmx = to_vmx(vcpu); nested_flush_cached_shadow_vmcs12() local 891 struct vcpu_vmx *vmx = to_vmx(vcpu); nested_vmx_max_atomic_switch_msrs() local 947 struct vcpu_vmx *vmx = to_vmx(vcpu); nested_vmx_get_vmexit_msr_value() local 1045 struct vcpu_vmx *vmx = to_vmx(vcpu); prepare_vmx_msr_autostore_list() local 1140 struct vcpu_vmx *vmx = to_vmx(vcpu); nested_vmx_transition_tlb_flush() local 1204 vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data) vmx_restore_vmx_basic() argument 1264 vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) vmx_restore_control_msr() argument 1287 vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data) vmx_restore_vmx_misc() argument 1322 vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data) vmx_restore_vmx_ept_vpid_cap() argument 1348 vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) vmx_restore_fixed0_msr() argument 1370 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_set_vmx_msr() local 1518 copy_shadow_to_vmcs12(struct vcpu_vmx *vmx) copy_shadow_to_vmcs12() argument 1545 copy_vmcs12_to_shadow(struct vcpu_vmx *vmx) copy_vmcs12_to_shadow() argument 1579 copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx, u32 hv_clean_fields) copy_enlightened_to_vmcs12() argument 1823 copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx) copy_vmcs12_to_enlightened() argument 2004 struct vcpu_vmx *vmx = to_vmx(vcpu); nested_vmx_handle_enlightened_vmptrld() local 2089 struct vcpu_vmx *vmx = to_vmx(vcpu); nested_sync_vmcs12_to_shadow() local 2101 struct vcpu_vmx *vmx = vmx_preemption_timer_fn() local 2113 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_calc_preemption_timer_value() local 2130 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_start_preemption_timer() local 2152 nested_vmx_calc_efer(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) nested_vmx_calc_efer() argument 2163 prepare_vmcs02_constant_state(struct vcpu_vmx *vmx) prepare_vmcs02_constant_state() argument 2224 prepare_vmcs02_early_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) prepare_vmcs02_early_rare() argument 2239 prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs01, struct vmcs12 *vmcs12) prepare_vmcs02_early() argument 2404 prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) prepare_vmcs02_rare() argument 2537 struct vcpu_vmx *vmx = to_vmx(vcpu); prepare_vmcs02() local 2689 struct vcpu_vmx *vmx = to_vmx(vcpu); nested_vmx_check_eptp() local 2738 struct vcpu_vmx *vmx = to_vmx(vcpu); nested_check_vm_execution_controls() local 2797 struct vcpu_vmx *vmx = to_vmx(vcpu); nested_check_vm_exit_controls() local 2814 struct vcpu_vmx *vmx = to_vmx(vcpu); nested_check_vm_entry_controls() local 2980 struct vcpu_vmx *vmx = to_vmx(vcpu); nested_vmx_check_vmcs_link_ptr() local 3088 struct vcpu_vmx *vmx = to_vmx(vcpu); nested_vmx_check_vmentry_hw() local 3166 struct vcpu_vmx *vmx = to_vmx(vcpu); nested_get_evmcs_page() local 3195 struct vcpu_vmx *vmx = to_vmx(vcpu); nested_get_vmcs12_pages() local 3308 struct vcpu_vmx *vmx = to_vmx(vcpu); nested_vmx_write_pml_buffer() local 3388 struct vcpu_vmx *vmx = to_vmx(vcpu); nested_vmx_enter_non_root_mode() local 3554 struct vcpu_vmx *vmx = to_vmx(vcpu); nested_vmx_run() local 3817 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_complete_nested_posted_interrupt() local 4057 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_check_nested_events() local 4257 struct vcpu_vmx *vmx = to_vmx(vcpu); sync_vmcs02_to_vmcs12_rare() local 4302 struct vcpu_vmx *vmx = to_vmx(vcpu); copy_vmcs02_to_vmcs12_rare() local 4330 struct vcpu_vmx *vmx = to_vmx(vcpu); sync_vmcs02_to_vmcs12() local 4590 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx) nested_vmx_get_vmcs01_guest_efer() argument 4616 struct vcpu_vmx *vmx = to_vmx(vcpu); nested_vmx_restore_host_state() local 4726 struct vcpu_vmx *vmx = to_vmx(vcpu); nested_vmx_vmexit() local 5071 struct vcpu_vmx *vmx = to_vmx(vcpu); alloc_shadow_vmcs() local 5094 struct vcpu_vmx *vmx = to_vmx(vcpu); enter_vmx_operation() local 5148 struct vcpu_vmx *vmx = to_vmx(vcpu); handle_vmxon() local 5236 struct vcpu_vmx *vmx = to_vmx(vcpu); nested_release_vmcs12() local 5278 struct vcpu_vmx *vmx = to_vmx(vcpu); handle_vmclear() local 5349 struct vcpu_vmx *vmx = to_vmx(vcpu); handle_vmread() local 5455 struct vcpu_vmx *vmx = to_vmx(vcpu); handle_vmwrite() local 5556 set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr) set_current_vmptr() argument 5572 struct vcpu_vmx *vmx = to_vmx(vcpu); handle_vmptrld() local 5670 struct vcpu_vmx *vmx = to_vmx(vcpu); handle_invept() local 5750 struct vcpu_vmx *vmx = to_vmx(vcpu); handle_invvpid() local 5872 struct vcpu_vmx *vmx = to_vmx(vcpu); handle_vmfunc() local 6355 struct vcpu_vmx *vmx = to_vmx(vcpu); nested_vmx_reflect_vmexit() local 6409 struct vcpu_vmx *vmx; vmx_get_nested_state() local 6541 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_set_nested_state() local [all...] |
H A D | vmx.c | 50 #include <asm/vmx.h> 66 #include "vmx.h" 365 static __always_inline void vmx_disable_fb_clear(struct vcpu_vmx *vmx) in vmx_disable_fb_clear() argument 369 if (!vmx->disable_fb_clear) in vmx_disable_fb_clear() 376 vmx->msr_ia32_mcu_opt_ctrl = msr; in vmx_disable_fb_clear() 379 static __always_inline void vmx_enable_fb_clear(struct vcpu_vmx *vmx) in vmx_enable_fb_clear() argument 381 if (!vmx->disable_fb_clear) in vmx_enable_fb_clear() 384 vmx->msr_ia32_mcu_opt_ctrl &= ~FB_CLEAR_DIS; in vmx_enable_fb_clear() 385 native_wrmsrl(MSR_IA32_MCU_OPT_CTRL, vmx->msr_ia32_mcu_opt_ctrl); in vmx_enable_fb_clear() 388 static void vmx_update_fb_clear_dis(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx) in vmx_update_fb_clear_dis() argument 519 vmx_segment_cache_clear(struct vcpu_vmx *vmx) vmx_segment_cache_clear() argument 720 vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr) vmx_find_uret_msr() argument 730 vmx_set_guest_uret_msr(struct vcpu_vmx *vmx, struct vmx_uret_msr *msr, u64 data) vmx_set_guest_uret_msr() argument 830 vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg, unsigned field) vmx_segment_cache_test_set() argument 845 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg) vmx_read_guest_seg_selector() argument 854 vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg) vmx_read_guest_seg_base() argument 863 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg) vmx_read_guest_seg_limit() argument 872 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg) vmx_read_guest_seg_ar() argument 943 msr_write_intercepted(struct vcpu_vmx *vmx, u32 msr) msr_write_intercepted() argument 951 __vmx_vcpu_run_flags(struct vcpu_vmx *vmx) __vmx_vcpu_run_flags() argument 969 clear_atomic_switch_msr_special(struct vcpu_vmx *vmx, unsigned long entry, unsigned long exit) clear_atomic_switch_msr_special() argument 987 clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) clear_atomic_switch_msr() argument 1027 add_atomic_switch_msr_special(struct vcpu_vmx *vmx, unsigned long entry, unsigned long exit, unsigned long guest_val_vmcs, unsigned long host_val_vmcs, u64 guest_val, u64 host_val) add_atomic_switch_msr_special() argument 1039 add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, u64 guest_val, u64 host_val, bool entry_only) add_atomic_switch_msr() argument 1105 update_transition_efer(struct vcpu_vmx *vmx) update_transition_efer() argument 1187 pt_can_write_msr(struct vcpu_vmx *vmx) pt_can_write_msr() argument 1227 pt_guest_enter(struct vcpu_vmx *vmx) pt_guest_enter() argument 1244 pt_guest_exit(struct vcpu_vmx *vmx) pt_guest_exit() argument 1291 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_prepare_switch_to_guest() local 1363 vmx_prepare_switch_to_host(struct vcpu_vmx *vmx) vmx_prepare_switch_to_host() argument 1403 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx) vmx_read_guest_kernel_gs_base() argument 1412 vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data) vmx_write_guest_kernel_gs_base() argument 1425 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_vcpu_load_vmcs() local 1495 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_vcpu_load() local 1518 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_get_rflags() local 1536 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_set_rflags() local 1599 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_rtit_ctl_check() local 1757 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_update_emulated_instruction() local 1808 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_inject_exception() local 1849 vmx_setup_uret_msr(struct vcpu_vmx *vmx, unsigned int msr, bool load_into_hardware) vmx_setup_uret_msr() argument 1867 vmx_setup_uret_msrs(struct vcpu_vmx *vmx) vmx_setup_uret_msrs() argument 1949 is_vmx_feature_control_msr_valid(struct vcpu_vmx *vmx, struct msr_data *msr) is_vmx_feature_control_msr_valid() argument 1992 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_get_msr() local 2171 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_set_msr() local 3005 struct vcpu_vmx *vmx = to_vmx(vcpu); enter_pmode() local 3075 struct vcpu_vmx *vmx = to_vmx(vcpu); enter_rmode() local 3122 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_set_efer() local 3171 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_flush_tlb_all() local 3282 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_set_cr0() local 3439 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_set_cr4() local 3502 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_get_segment() local 3549 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_get_cpl() local 3578 struct vcpu_vmx *vmx = to_vmx(vcpu); __vmx_set_segment() local 3948 vmx_msr_bitmap_l01_changed(struct vcpu_vmx *vmx) vmx_msr_bitmap_l01_changed() argument 3968 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_disable_intercept_for_msr() local 4012 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_enable_intercept_for_msr() local 4051 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_update_msr_bitmap_x2apic() local 4104 struct vcpu_vmx *vmx = to_vmx(vcpu); pt_update_intercept_for_msr() local 4120 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_guest_apic_has_interrupt() local 4140 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_msr_filter_changed() local 4213 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_deliver_nested_posted_interrupt() local 4251 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_deliver_posted_interrupt() local 4300 vmx_set_constant_host_state(struct vcpu_vmx *vmx) vmx_set_constant_host_state() argument 4367 set_cr4_guest_host_mask(struct vcpu_vmx *vmx) set_cr4_guest_host_mask() argument 4383 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx) vmx_pin_based_exec_ctrl() argument 4444 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_refresh_apicv_exec_ctrl() local 4470 vmx_exec_control(struct vcpu_vmx *vmx) vmx_exec_control() argument 4514 vmx_tertiary_exec_control(struct vcpu_vmx *vmx) vmx_tertiary_exec_control() argument 4534 vmx_adjust_secondary_exec_control(struct vcpu_vmx *vmx, u32 *exec_control, u32 control, bool enabled, bool exiting) vmx_adjust_secondary_exec_control() argument 4594 vmx_secondary_exec_control(struct vcpu_vmx *vmx) vmx_secondary_exec_control() argument 4713 init_vmcs(struct vcpu_vmx *vmx) init_vmcs() argument 4833 struct vcpu_vmx *vmx = to_vmx(vcpu); __vmx_vcpu_reset() local 4860 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_vcpu_reset() local 4935 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_inject_irq() local 4963 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_inject_nmi() local 4994 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_get_nmi_mask() local 5008 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_set_nmi_mask() local 5186 struct vcpu_vmx *vmx = to_vmx(vcpu); handle_exception_nmi() local 5691 struct vcpu_vmx *vmx = to_vmx(vcpu); handle_task_switch() local 5835 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_emulation_required_with_pending_exception() local 5843 struct vcpu_vmx *vmx = to_vmx(vcpu); handle_invalid_guest_state() local 5894 struct vcpu_vmx *vmx = to_vmx(vcpu); grow_ple_window() local 5910 struct vcpu_vmx *vmx = to_vmx(vcpu); shrink_ple_window() local 6006 struct vcpu_vmx *vmx = to_vmx(vcpu); handle_fastpath_preemption_timer() local 6150 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_get_exit_info() local 6168 vmx_destroy_pml_buffer(struct vcpu_vmx *vmx) vmx_destroy_pml_buffer() argument 6178 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_flush_pml_buffer() local 6235 struct vcpu_vmx *vmx = to_vmx(vcpu); dump_vmcs() local 6414 struct vcpu_vmx *vmx = to_vmx(vcpu); __vmx_handle_exit() local 6697 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_set_virtual_apic_mode() local 6869 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_sync_pir_to_irr() local 6926 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_apicv_pre_state_restore() local 6955 handle_exception_irqoff(struct vcpu_vmx *vmx) handle_exception_irqoff() argument 6989 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_handle_exit_irqoff() local 7026 vmx_recover_nmi_blocking(struct vcpu_vmx *vmx) vmx_recover_nmi_blocking() argument 7120 vmx_complete_interrupts(struct vcpu_vmx *vmx) vmx_complete_interrupts() argument 7137 atomic_switch_perf_msrs(struct vcpu_vmx *vmx) atomic_switch_perf_msrs() argument 7162 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_update_hv_timer() local 7186 vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp) vmx_update_host_rsp() argument 7194 vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx, unsigned int flags) vmx_spec_ctrl_restore_host() argument 7234 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_vcpu_enter_exit() local 7287 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_vcpu_run() local 7443 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_vcpu_free() local 7455 struct vcpu_vmx *vmx; vmx_vcpu_create() local 7632 vmcs_set_secondary_exec_control(struct vcpu_vmx *vmx, u32 new_ctl) vmcs_set_secondary_exec_control() argument 7657 struct vcpu_vmx *vmx = to_vmx(vcpu); nested_vmx_cr_fixed1_bits_update() local 7697 struct vcpu_vmx *vmx = to_vmx(vcpu); update_intel_pt_cfg() local 7766 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_vcpu_after_set_cpuid() local 8042 struct vcpu_vmx *vmx; vmx_set_hv_timer() local 8093 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_update_cpu_dirty_logging() local 8135 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_enter_smm() local 8156 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_leave_smm() local [all...] |
H A D | vmx.h | 255 * If true, host state has been stored in vmx->loaded_vmcs for 259 * and vmx->loaded_vmcs->host_state is invalid. 381 void vmx_set_constant_host_state(struct vcpu_vmx *vmx); 394 void set_cr4_guest_host_mask(struct vcpu_vmx *vmx); 407 struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr); 409 void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp); 410 void vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx, unsigned int flags); 411 unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx); 412 bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, 582 static inline void lname##_controls_set(struct vcpu_vmx *vmx, 679 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_get_exit_qual() local 689 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_get_intr_info() local 709 vmx_has_waitpkg(struct vcpu_vmx *vmx) vmx_has_waitpkg() argument [all...] |
H A D | posted_intr.c | 13 #include "vmx.h" 56 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_vcpu_pi_load() local 93 list_del(&vmx->pi_wakeup_list); in vmx_vcpu_pi_load() 149 struct vcpu_vmx *vmx = to_vmx(vcpu); in pi_enable_wakeup_handler() local 156 list_add_tail(&vmx->pi_wakeup_list, in pi_enable_wakeup_handler() 223 struct vcpu_vmx *vmx; in pi_wakeup_handler() local 226 list_for_each_entry(vmx, wakeup_list, pi_wakeup_list) { in pi_wakeup_handler() 228 if (pi_test_on(&vmx->pi_desc)) in pi_wakeup_handler() 229 kvm_vcpu_wake_up(&vmx->vcpu); in pi_wakeup_handler()
|
/kernel/linux/linux-5.10/arch/x86/kvm/vmx/ |
H A D | nested.c | 15 #include "vmx.h" 189 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_fail() local 195 if (vmx->nested.current_vmptr == -1ull && !vmx->nested.hv_evmcs) in nested_vmx_fail() 205 pr_debug_ratelimited("kvm: nested vmx abort, indicator %d\n", indicator); in nested_vmx_abort() 218 static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx) in vmx_disable_shadow_vmcs() argument 220 secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_SHADOW_VMCS); in vmx_disable_shadow_vmcs() 222 vmx->nested.need_vmcs12_to_shadow_sync = false; in vmx_disable_shadow_vmcs() 227 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_release_evmcs() local 229 if (!vmx in nested_release_evmcs() 237 vmx_sync_vmcs_host_state(struct vcpu_vmx *vmx, struct loaded_vmcs *prev) vmx_sync_vmcs_host_state() argument 258 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_switch_vmcs() local 281 struct vcpu_vmx *vmx = to_vmx(vcpu); free_nested() local 337 struct vcpu_vmx *vmx = to_vmx(vcpu); nested_ept_inject_page_fault() local 685 struct vcpu_vmx *vmx = to_vmx(vcpu); nested_flush_cached_shadow_vmcs12() local 881 struct vcpu_vmx *vmx = to_vmx(vcpu); nested_vmx_max_atomic_switch_msrs() local 937 struct vcpu_vmx *vmx = to_vmx(vcpu); nested_vmx_get_vmexit_msr_value() local 1035 struct vcpu_vmx *vmx = to_vmx(vcpu); prepare_vmx_msr_autostore_list() local 1193 struct vcpu_vmx *vmx = to_vmx(vcpu); nested_vmx_transition_tlb_flush() local 1241 vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data) vmx_restore_vmx_basic() argument 1301 vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) vmx_restore_control_msr() argument 1324 vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data) vmx_restore_vmx_misc() argument 1359 vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data) vmx_restore_vmx_ept_vpid_cap() argument 1385 vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) vmx_restore_fixed0_msr() argument 1407 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_set_vmx_msr() local 1555 copy_shadow_to_vmcs12(struct vcpu_vmx *vmx) copy_shadow_to_vmcs12() argument 1582 copy_vmcs12_to_shadow(struct vcpu_vmx *vmx) copy_vmcs12_to_shadow() argument 1616 copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx) copy_enlightened_to_vmcs12() argument 1832 copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx) copy_vmcs12_to_enlightened() argument 2002 struct vcpu_vmx *vmx = to_vmx(vcpu); nested_vmx_handle_enlightened_vmptrld() local 2084 struct vcpu_vmx *vmx = to_vmx(vcpu); nested_sync_vmcs12_to_shadow() local 2100 struct vcpu_vmx *vmx = vmx_preemption_timer_fn() local 2112 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_calc_preemption_timer_value() local 2129 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_start_preemption_timer() local 2151 nested_vmx_calc_efer(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) nested_vmx_calc_efer() argument 2162 prepare_vmcs02_constant_state(struct vcpu_vmx *vmx) prepare_vmcs02_constant_state() argument 2220 prepare_vmcs02_early_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) prepare_vmcs02_early_rare() argument 2235 prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs01, struct vmcs12 *vmcs12) prepare_vmcs02_early() argument 2398 prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) prepare_vmcs02_rare() argument 2530 struct vcpu_vmx *vmx = to_vmx(vcpu); prepare_vmcs02() local 2664 struct vcpu_vmx *vmx = to_vmx(vcpu); nested_vmx_check_eptp() local 2714 struct vcpu_vmx *vmx = to_vmx(vcpu); nested_check_vm_execution_controls() local 2773 struct vcpu_vmx *vmx = to_vmx(vcpu); nested_check_vm_exit_controls() local 2790 struct vcpu_vmx *vmx = to_vmx(vcpu); nested_check_vm_entry_controls() local 3065 struct vcpu_vmx *vmx = to_vmx(vcpu); nested_vmx_check_vmentry_hw() local 3143 struct vcpu_vmx *vmx = to_vmx(vcpu); nested_get_evmcs_page() local 3165 struct vcpu_vmx *vmx = to_vmx(vcpu); nested_get_vmcs12_pages() local 3264 struct vcpu_vmx *vmx = to_vmx(vcpu); nested_vmx_write_pml_buffer() local 3344 struct vcpu_vmx *vmx = to_vmx(vcpu); nested_vmx_enter_non_root_mode() local 3509 struct vcpu_vmx *vmx = to_vmx(vcpu); nested_vmx_run() local 3758 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_complete_nested_posted_interrupt() local 3857 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_check_nested_events() local 4019 struct vcpu_vmx *vmx = to_vmx(vcpu); sync_vmcs02_to_vmcs12_rare() local 4066 struct vcpu_vmx *vmx = to_vmx(vcpu); copy_vmcs02_to_vmcs12_rare() local 4094 struct vcpu_vmx *vmx = to_vmx(vcpu); sync_vmcs02_to_vmcs12() local 4348 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx) nested_vmx_get_vmcs01_guest_efer() argument 4374 struct vcpu_vmx *vmx = to_vmx(vcpu); nested_vmx_restore_host_state() local 4487 struct vcpu_vmx *vmx = to_vmx(vcpu); nested_vmx_vmexit() local 4786 struct vcpu_vmx *vmx; nested_vmx_pmu_entry_exit_ctls_update() local 4835 struct vcpu_vmx *vmx = to_vmx(vcpu); alloc_shadow_vmcs() local 4856 struct vcpu_vmx *vmx = to_vmx(vcpu); enter_vmx_operation() local 4916 struct vcpu_vmx *vmx = to_vmx(vcpu); handle_vmon() local 5004 struct vcpu_vmx *vmx = to_vmx(vcpu); nested_release_vmcs12() local 5046 struct vcpu_vmx *vmx = to_vmx(vcpu); handle_vmclear() local 5107 struct vcpu_vmx *vmx = to_vmx(vcpu); handle_vmread() local 5191 struct vcpu_vmx *vmx = to_vmx(vcpu); handle_vmwrite() local 5292 set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr) set_current_vmptr() argument 5307 struct vcpu_vmx *vmx = to_vmx(vcpu); handle_vmptrld() local 5406 struct vcpu_vmx *vmx = to_vmx(vcpu); handle_invept() local 5485 struct vcpu_vmx *vmx = to_vmx(vcpu); handle_invvpid() local 5607 struct vcpu_vmx *vmx = to_vmx(vcpu); handle_vmfunc() local 6050 struct vcpu_vmx *vmx = to_vmx(vcpu); nested_vmx_reflect_vmexit() local 6104 struct vcpu_vmx *vmx; vmx_get_nested_state() local 6231 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_set_nested_state() local [all...] |
H A D | vmx.c | 50 #include <asm/vmx.h> 64 #include "vmx.h" 375 static __always_inline void vmx_disable_fb_clear(struct vcpu_vmx *vmx) in vmx_disable_fb_clear() argument 379 if (!vmx->disable_fb_clear) in vmx_disable_fb_clear() 386 vmx->msr_ia32_mcu_opt_ctrl = msr; in vmx_disable_fb_clear() 389 static __always_inline void vmx_enable_fb_clear(struct vcpu_vmx *vmx) in vmx_enable_fb_clear() argument 391 if (!vmx->disable_fb_clear) in vmx_enable_fb_clear() 394 vmx->msr_ia32_mcu_opt_ctrl &= ~FB_CLEAR_DIS; in vmx_enable_fb_clear() 395 native_wrmsrl(MSR_IA32_MCU_OPT_CTRL, vmx->msr_ia32_mcu_opt_ctrl); in vmx_enable_fb_clear() 398 static void vmx_update_fb_clear_dis(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx) in vmx_update_fb_clear_dis() argument 508 vmx_segment_cache_clear(struct vcpu_vmx *vmx) vmx_segment_cache_clear() argument 731 __vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr) __vmx_find_uret_msr() argument 741 vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr) vmx_find_uret_msr() argument 751 vmx_set_guest_uret_msr(struct vcpu_vmx *vmx, struct vmx_uret_msr *msr, u64 data) vmx_set_guest_uret_msr() argument 818 vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg, unsigned field) vmx_segment_cache_test_set() argument 833 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg) vmx_read_guest_seg_selector() argument 842 vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg) vmx_read_guest_seg_base() argument 851 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg) vmx_read_guest_seg_limit() argument 860 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg) vmx_read_guest_seg_ar() argument 918 msr_write_intercepted(struct vcpu_vmx *vmx, u32 msr) msr_write_intercepted() argument 938 __vmx_vcpu_run_flags(struct vcpu_vmx *vmx) __vmx_vcpu_run_flags() argument 956 clear_atomic_switch_msr_special(struct vcpu_vmx *vmx, unsigned long entry, unsigned long exit) clear_atomic_switch_msr_special() argument 974 clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) clear_atomic_switch_msr() argument 1014 add_atomic_switch_msr_special(struct vcpu_vmx *vmx, unsigned long entry, unsigned long exit, unsigned long guest_val_vmcs, unsigned long host_val_vmcs, u64 guest_val, u64 host_val) add_atomic_switch_msr_special() argument 1026 add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, u64 guest_val, u64 host_val, bool entry_only) add_atomic_switch_msr() argument 1092 update_transition_efer(struct vcpu_vmx *vmx) update_transition_efer() argument 1174 pt_can_write_msr(struct vcpu_vmx *vmx) pt_can_write_msr() argument 1214 pt_guest_enter(struct vcpu_vmx *vmx) pt_guest_enter() argument 1231 pt_guest_exit(struct vcpu_vmx *vmx) pt_guest_exit() argument 1274 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_prepare_switch_to_guest() local 1343 vmx_prepare_switch_to_host(struct vcpu_vmx *vmx) vmx_prepare_switch_to_host() argument 1383 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx) vmx_read_guest_kernel_gs_base() argument 1392 vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data) vmx_write_guest_kernel_gs_base() argument 1405 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_vcpu_load_vmcs() local 1478 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_vcpu_load() local 1501 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_get_rflags() local 1519 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_set_rflags() local 1577 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_rtit_ctl_check() local 1696 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_update_emulated_instruction() local 1737 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_queue_exception() local 1782 vmx_setup_uret_msr(struct vcpu_vmx *vmx, unsigned int msr) vmx_setup_uret_msr() argument 1802 setup_msrs(struct vcpu_vmx *vmx) setup_msrs() argument 1892 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_get_msr() local 2053 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_set_msr() local 2808 struct vcpu_vmx *vmx = to_vmx(vcpu); enter_pmode() local 2879 struct vcpu_vmx *vmx = to_vmx(vcpu); enter_rmode() local 2927 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_set_efer() local 2976 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_flush_tlb_all() local 3076 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_set_cr0() local 3230 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_set_cr4() local 3290 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_get_segment() local 3337 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_get_cpl() local 3366 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_set_segment() local 3811 vmx_msr_bitmap_l01_changed(struct vcpu_vmx *vmx) vmx_msr_bitmap_l01_changed() argument 3830 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_disable_intercept_for_msr() local 3875 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_enable_intercept_for_msr() local 3969 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_update_msr_bitmap() local 3984 struct vcpu_vmx *vmx = to_vmx(vcpu); pt_update_intercept_for_msr() local 4000 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_guest_apic_has_interrupt() local 4020 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_msr_filter_changed() local 4083 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_deliver_nested_posted_interrupt() local 4109 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_deliver_posted_interrupt() local 4138 vmx_set_constant_host_state(struct vcpu_vmx *vmx) vmx_set_constant_host_state() argument 4195 set_cr4_guest_host_mask(struct vcpu_vmx *vmx) set_cr4_guest_host_mask() argument 4209 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx) vmx_pin_based_exec_ctrl() argument 4227 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_refresh_apicv_exec_ctrl() local 4245 vmx_exec_control(struct vcpu_vmx *vmx) vmx_exec_control() argument 4277 vmx_adjust_secondary_exec_control(struct vcpu_vmx *vmx, u32 *exec_control, u32 control, bool enabled, bool exiting) vmx_adjust_secondary_exec_control() argument 4327 vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx) vmx_compute_secondary_exec_control() argument 4417 init_vmcs(struct vcpu_vmx *vmx) init_vmcs() argument 4511 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_vcpu_reset() local 4624 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_inject_irq() local 4652 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_inject_nmi() local 4683 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_get_nmi_mask() local 4697 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_set_nmi_mask() local 4892 struct vcpu_vmx *vmx = to_vmx(vcpu); handle_exception_nmi() local 5400 struct vcpu_vmx *vmx = to_vmx(vcpu); handle_task_switch() local 5541 struct vcpu_vmx *vmx = to_vmx(vcpu); handle_invalid_guest_state() local 5586 struct vcpu_vmx *vmx = to_vmx(vcpu); grow_ple_window() local 5602 struct vcpu_vmx *vmx = to_vmx(vcpu); shrink_ple_window() local 5736 struct vcpu_vmx *vmx = to_vmx(vcpu); handle_fastpath_preemption_timer() local 5838 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_get_exit_info() local 5855 vmx_destroy_pml_buffer(struct vcpu_vmx *vmx) vmx_destroy_pml_buffer() argument 5865 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_flush_pml_buffer() local 6081 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_handle_exit() local 6320 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_set_virtual_apic_mode() local 6449 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_sync_pir_to_irr() local 6493 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_apicv_post_state_restore() local 6509 handle_exception_nmi_irqoff(struct vcpu_vmx *vmx) handle_exception_nmi_irqoff() argument 6541 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_handle_exit_irqoff() local 6571 vmx_recover_nmi_blocking(struct vcpu_vmx *vmx) vmx_recover_nmi_blocking() argument 6665 vmx_complete_interrupts(struct vcpu_vmx *vmx) vmx_complete_interrupts() argument 6682 atomic_switch_perf_msrs(struct vcpu_vmx *vmx) atomic_switch_perf_msrs() argument 6702 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_update_hv_timer() local 6726 vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp) vmx_update_host_rsp() argument 6734 vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx, unsigned int flags) vmx_spec_ctrl_restore_host() argument 6771 vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx, unsigned long flags) vmx_vcpu_enter_exit() argument 6839 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_vcpu_run() local 6988 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_free_vcpu() local 6999 struct vcpu_vmx *vmx; vmx_create_vcpu() local 7254 vmcs_set_secondary_exec_control(struct vcpu_vmx *vmx) vmcs_set_secondary_exec_control() argument 7280 struct vcpu_vmx *vmx = to_vmx(vcpu); nested_vmx_cr_fixed1_bits_update() local 7320 struct vcpu_vmx *vmx = to_vmx(vcpu); nested_vmx_entry_exit_ctls_update() local 7337 struct vcpu_vmx *vmx = to_vmx(vcpu); update_intel_pt_cfg() local 7406 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_vcpu_after_set_cpuid() local 7608 struct vcpu_vmx *vmx; vmx_set_hv_timer() local 7722 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_pre_enter_smm() local 7736 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_pre_leave_smm() local [all...] |
H A D | vmx.h | 210 * If true, host state has been stored in vmx->loaded_vmcs for 214 * and vmx->loaded_vmcs->host_state is invalid. 339 void vmx_set_constant_host_state(struct vcpu_vmx *vmx); 351 void set_cr4_guest_host_mask(struct vcpu_vmx *vmx); 366 struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr); 368 void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp); 369 void vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx, unsigned int flags); 370 unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx); 371 bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, 382 static inline void lname##_controls_set(struct vcpu_vmx *vmx, u3 462 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_get_exit_qual() local 473 struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_get_intr_info() local 494 decache_tsc_multiplier(struct vcpu_vmx *vmx) decache_tsc_multiplier() argument 500 vmx_has_waitpkg(struct vcpu_vmx *vmx) vmx_has_waitpkg() argument [all...] |
H A D | nested.h | 7 #include "vmx.h" 57 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_has_valid_vmcs12() local 65 return is_guest_mode(vcpu) || vmx->nested.current_vmptr != -1ull || in vmx_has_valid_vmcs12() 66 vmx->nested.hv_evmcs; in vmx_has_valid_vmcs12() 71 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_get_vpid02() local 73 return vmx->nested.vpid02 ? vmx->nested.vpid02 : vmx->vpid; in nested_get_vpid02()
|
/kernel/linux/linux-5.10/tools/testing/selftests/kvm/x86_64/ |
H A D | vmx_set_nested_state_test.c | 13 #include "vmx.h" 22 * Mirror of VMCS12_REVISION in arch/x86/kvm/vmx/vmcs12.h. If that value 83 state->hdr.vmx.vmxon_pa = 0x1000; in set_default_vmx_state() 84 state->hdr.vmx.vmcs12_pa = 0x2000; in set_default_vmx_state() 85 state->hdr.vmx.smm.flags = 0; in set_default_vmx_state() 114 state->hdr.vmx.vmxon_pa = -1ull; in test_vmx_nested_state() 117 state->hdr.vmx.vmcs12_pa = -1ull; in test_vmx_nested_state() 134 state->hdr.vmx.vmxon_pa = -1ull; in test_vmx_nested_state() 135 state->hdr.vmx.vmcs12_pa = -1ull; in test_vmx_nested_state() 146 state->hdr.vmx in test_vmx_nested_state() [all...] |
H A D | vmx_dirty_log_test.c | 18 #include "vmx.h" 50 void l1_guest_code(struct vmx_pages *vmx) in l1_guest_code() argument 55 GUEST_ASSERT(vmx->vmcs_gpa); in l1_guest_code() 56 GUEST_ASSERT(prepare_for_vmx_operation(vmx)); in l1_guest_code() 57 GUEST_ASSERT(load_vmcs(vmx)); in l1_guest_code() 59 prepare_vmcs(vmx, l2_guest_code, in l1_guest_code() 72 struct vmx_pages *vmx; in main() local 86 vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva); in main() 110 prepare_eptp(vmx, vm, 0); in main() 111 nested_map_memslot(vmx, v in main() [all...] |
/kernel/linux/linux-6.6/tools/testing/selftests/kvm/x86_64/ |
H A D | vmx_set_nested_state_test.c | 13 #include "vmx.h" 22 * Mirror of VMCS12_REVISION in arch/x86/kvm/vmx/vmcs12.h. If that value 82 state->hdr.vmx.vmxon_pa = 0x1000; in set_default_vmx_state() 83 state->hdr.vmx.vmcs12_pa = 0x2000; in set_default_vmx_state() 84 state->hdr.vmx.smm.flags = 0; in set_default_vmx_state() 113 state->hdr.vmx.vmxon_pa = -1ull; in test_vmx_nested_state() 116 state->hdr.vmx.vmcs12_pa = -1ull; in test_vmx_nested_state() 133 state->hdr.vmx.vmxon_pa = -1ull; in test_vmx_nested_state() 134 state->hdr.vmx.vmcs12_pa = -1ull; in test_vmx_nested_state() 145 state->hdr.vmx in test_vmx_nested_state() [all...] |
H A D | vmx_dirty_log_test.c | 18 #include "vmx.h" 48 void l1_guest_code(struct vmx_pages *vmx) in l1_guest_code() argument 53 GUEST_ASSERT(vmx->vmcs_gpa); in l1_guest_code() 54 GUEST_ASSERT(prepare_for_vmx_operation(vmx)); in l1_guest_code() 55 GUEST_ASSERT(load_vmcs(vmx)); in l1_guest_code() 57 prepare_vmcs(vmx, l2_guest_code, in l1_guest_code() 70 struct vmx_pages *vmx; in main() local 84 vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva); in main() 107 prepare_eptp(vmx, vm, 0); in main() 108 nested_map_memslot(vmx, v in main() [all...] |
/kernel/linux/linux-5.10/tools/testing/selftests/powerpc/ptrace/ |
H A D | ptrace-vsx.h | 28 * unsigned long vmx[32][2] 31 int validate_vmx(unsigned long vmx[][2], unsigned long *load) in validate_vmx() argument 37 if ((vmx[i][0] != load[64 + 2 * i]) || in validate_vmx() 38 (vmx[i][1] != load[65 + 2 * i])) { in validate_vmx() 39 printf("vmx[%d][0]: %lx load[%d] %lx\n", in validate_vmx() 40 i, vmx[i][0], 64 + 2 * i, in validate_vmx() 42 printf("vmx[%d][1]: %lx load[%d] %lx\n", in validate_vmx() 43 i, vmx[i][1], 65 + 2 * i, in validate_vmx() 51 if ((vmx[i][0] != load[65 + 2 * i]) || in validate_vmx() 52 (vmx[ in validate_vmx() 108 load_vsx_vmx(unsigned long *load, unsigned long *vsx, unsigned long vmx[][2]) load_vsx_vmx() argument [all...] |
H A D | ptrace-tm-spd-vsx.c | 99 unsigned long vmx[VMX_MAX + 2][2]; in trace_tm_spd_vsx() local 104 FAIL_IF(show_vmx(child, vmx)); in trace_tm_spd_vsx() 105 FAIL_IF(validate_vmx(vmx, fp_load)); in trace_tm_spd_vsx() 108 FAIL_IF(show_vmx_ckpt(child, vmx)); in trace_tm_spd_vsx() 109 FAIL_IF(validate_vmx(vmx, fp_load_ckpt)); in trace_tm_spd_vsx() 112 memset(vmx, 0, sizeof(vmx)); in trace_tm_spd_vsx() 114 load_vsx_vmx(fp_load_ckpt_new, vsx, vmx); in trace_tm_spd_vsx() 117 FAIL_IF(write_vmx_ckpt(child, vmx)); in trace_tm_spd_vsx()
|
H A D | ptrace-tm-vsx.c | 87 unsigned long vmx[VMX_MAX + 2][2]; in trace_tm_vsx() local 92 FAIL_IF(show_vmx(child, vmx)); in trace_tm_vsx() 93 FAIL_IF(validate_vmx(vmx, fp_load)); in trace_tm_vsx() 96 FAIL_IF(show_vmx_ckpt(child, vmx)); in trace_tm_vsx() 97 FAIL_IF(validate_vmx(vmx, fp_load_ckpt)); in trace_tm_vsx() 99 memset(vmx, 0, sizeof(vmx)); in trace_tm_vsx() 101 load_vsx_vmx(fp_load_ckpt_new, vsx, vmx); in trace_tm_vsx() 104 FAIL_IF(write_vmx_ckpt(child, vmx)); in trace_tm_vsx()
|
H A D | ptrace-vsx.c | 40 unsigned long vmx[VMX_MAX + 2][2]; in trace_vsx() local 45 FAIL_IF(show_vmx(child, vmx)); in trace_vsx() 46 FAIL_IF(validate_vmx(vmx, fp_load)); in trace_vsx() 49 memset(vmx, 0, sizeof(vmx)); in trace_vsx() 50 load_vsx_vmx(fp_load_new, vsx, vmx); in trace_vsx() 53 FAIL_IF(write_vmx(child, vmx)); in trace_vsx()
|
/kernel/linux/linux-6.6/tools/testing/selftests/powerpc/ptrace/ |
H A D | ptrace-vsx.h | 28 * unsigned long vmx[32][2] 31 int validate_vmx(unsigned long vmx[][2], unsigned long *load) in validate_vmx() argument 37 if ((vmx[i][0] != load[64 + 2 * i]) || in validate_vmx() 38 (vmx[i][1] != load[65 + 2 * i])) { in validate_vmx() 39 printf("vmx[%d][0]: %lx load[%d] %lx\n", in validate_vmx() 40 i, vmx[i][0], 64 + 2 * i, in validate_vmx() 42 printf("vmx[%d][1]: %lx load[%d] %lx\n", in validate_vmx() 43 i, vmx[i][1], 65 + 2 * i, in validate_vmx() 51 if ((vmx[i][0] != load[65 + 2 * i]) || in validate_vmx() 52 (vmx[ in validate_vmx() 108 load_vsx_vmx(unsigned long *load, unsigned long *vsx, unsigned long vmx[][2]) load_vsx_vmx() argument [all...] |
H A D | ptrace-tm-spd-vsx.c | 99 unsigned long vmx[VMX_MAX + 2][2]; in trace_tm_spd_vsx() local 104 FAIL_IF(show_vmx(child, vmx)); in trace_tm_spd_vsx() 105 FAIL_IF(validate_vmx(vmx, fp_load)); in trace_tm_spd_vsx() 108 FAIL_IF(show_vmx_ckpt(child, vmx)); in trace_tm_spd_vsx() 109 FAIL_IF(validate_vmx(vmx, fp_load_ckpt)); in trace_tm_spd_vsx() 112 memset(vmx, 0, sizeof(vmx)); in trace_tm_spd_vsx() 114 load_vsx_vmx(fp_load_ckpt_new, vsx, vmx); in trace_tm_spd_vsx() 117 FAIL_IF(write_vmx_ckpt(child, vmx)); in trace_tm_spd_vsx()
|
H A D | ptrace-tm-vsx.c | 87 unsigned long vmx[VMX_MAX + 2][2]; in trace_tm_vsx() local 92 FAIL_IF(show_vmx(child, vmx)); in trace_tm_vsx() 93 FAIL_IF(validate_vmx(vmx, fp_load)); in trace_tm_vsx() 96 FAIL_IF(show_vmx_ckpt(child, vmx)); in trace_tm_vsx() 97 FAIL_IF(validate_vmx(vmx, fp_load_ckpt)); in trace_tm_vsx() 99 memset(vmx, 0, sizeof(vmx)); in trace_tm_vsx() 101 load_vsx_vmx(fp_load_ckpt_new, vsx, vmx); in trace_tm_vsx() 104 FAIL_IF(write_vmx_ckpt(child, vmx)); in trace_tm_vsx()
|
H A D | ptrace-vsx.c | 40 unsigned long vmx[VMX_MAX + 2][2]; in trace_vsx() local 45 FAIL_IF(show_vmx(child, vmx)); in trace_vsx() 46 FAIL_IF(validate_vmx(vmx, fp_load)); in trace_vsx() 49 memset(vmx, 0, sizeof(vmx)); in trace_vsx() 50 load_vsx_vmx(fp_load_new, vsx, vmx); in trace_vsx() 53 FAIL_IF(write_vmx(child, vmx)); in trace_vsx()
|
/kernel/linux/linux-6.6/arch/x86/kvm/ |
H A D | Makefile | 25 kvm-intel-y += vmx/vmx.o vmx/vmenter.o vmx/pmu_intel.o vmx/vmcs12.o \ 26 vmx/hyperv.o vmx/nested.o vmx/posted_intr.o 27 kvm-intel-$(CONFIG_X86_SGX_KVM) += vmx/sgx.o 44 $(obj)/vmx/vmente [all...] |
/kernel/linux/linux-5.10/arch/x86/kvm/ |
H A D | Makefile | 21 kvm-intel-y += vmx/vmx.o vmx/vmenter.o vmx/pmu_intel.o vmx/vmcs12.o \ 22 vmx/evmcs.o vmx/nested.o vmx/posted_intr.o
|