Lines Matching refs:kvm_x86_ops

114 struct kvm_x86_ops kvm_x86_ops __read_mostly;
115 EXPORT_SYMBOL_GPL(kvm_x86_ops);
716 if (kvm_x86_ops.get_cpl(vcpu) <= required_cpl)
858 kvm_x86_ops.get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
871 kvm_x86_ops.set_cr0(vcpu, cr0);
981 if (kvm_x86_ops.get_cpl(vcpu) != 0 ||
998 if (!kvm_x86_ops.is_valid_cr4(vcpu, cr4))
1035 kvm_x86_ops.set_cr4(vcpu, cr4);
1123 kvm_x86_ops.set_dr7(vcpu, dr7);
1468 return kvm_x86_ops.get_msr_feature(msr);
1544 r = kvm_x86_ops.set_efer(vcpu, efer);
1653 return kvm_x86_ops.set_msr(vcpu, &msr);
1686 ret = kvm_x86_ops.get_msr(vcpu, &msr);
2274 vcpu->arch.tsc_offset = kvm_x86_ops.write_l1_tsc_offset(vcpu, offset);
3023 kvm_x86_ops.tlb_flush_all(vcpu);
3029 kvm_x86_ops.tlb_flush_guest(vcpu);
3899 r = kvm_x86_ops.has_emulated_msr(MSR_IA32_SMBASE);
3902 r = !kvm_x86_ops.cpu_has_accelerated_tpr();
3929 r = kvm_x86_ops.nested_ops->get_state ?
3930 kvm_x86_ops.nested_ops->get_state(NULL, NULL, 0) : 0;
3933 r = kvm_x86_ops.enable_direct_tlbflush != NULL;
3936 r = kvm_x86_ops.nested_ops->enable_evmcs != NULL;
4058 if (kvm_x86_ops.has_wbinvd_exit())
4065 kvm_x86_ops.vcpu_load(vcpu, cpu);
4159 vcpu->arch.preempted_in_kernel = !kvm_x86_ops.get_cpl(vcpu);
4170 kvm_x86_ops.vcpu_put(vcpu);
4184 kvm_x86_ops.sync_pir_to_irr(vcpu);
4303 kvm_x86_ops.setup_mce(vcpu);
4410 events->interrupt.shadow = kvm_x86_ops.get_interrupt_shadow(vcpu);
4414 events->nmi.masked = kvm_x86_ops.get_nmi_mask(vcpu);
4481 kvm_x86_ops.set_interrupt_shadow(vcpu,
4487 kvm_x86_ops.set_nmi_mask(vcpu, events->nmi.masked);
4500 kvm_x86_ops.nested_ops->leave_nested(vcpu);
4761 if (!kvm_x86_ops.nested_ops->enable_evmcs)
4763 r = kvm_x86_ops.nested_ops->enable_evmcs(vcpu, &vmcs_version);
4772 if (!kvm_x86_ops.enable_direct_tlbflush)
4775 return kvm_x86_ops.enable_direct_tlbflush(vcpu);
5086 if (!kvm_x86_ops.nested_ops->get_state)
5094 r = kvm_x86_ops.nested_ops->get_state(vcpu, user_kvm_nested_state,
5116 if (!kvm_x86_ops.nested_ops->set_state)
5139 r = kvm_x86_ops.nested_ops->set_state(vcpu, user_kvm_nested_state, &kvm_state);
5183 ret = kvm_x86_ops.set_tss_addr(kvm, addr);
5190 return kvm_x86_ops.set_identity_map_addr(kvm, ident_addr);
5347 if (kvm_x86_ops.flush_log_dirty)
5348 kvm_x86_ops.flush_log_dirty(kvm);
5890 if (kvm_x86_ops.mem_enc_op)
5891 r = kvm_x86_ops.mem_enc_op(kvm, argp);
5902 if (kvm_x86_ops.mem_enc_reg_region)
5903 r = kvm_x86_ops.mem_enc_reg_region(kvm, &region);
5914 if (kvm_x86_ops.mem_enc_unreg_region)
5915 r = kvm_x86_ops.mem_enc_unreg_region(kvm, &region);
6024 if (!kvm_x86_ops.has_emulated_msr(emulated_msrs_all[i]))
6087 kvm_x86_ops.set_segment(vcpu, var, seg);
6093 kvm_x86_ops.get_segment(vcpu, var, seg);
6113 u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
6120 u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
6128 u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
6177 u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
6202 u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
6223 if (!system && kvm_x86_ops.get_cpl(vcpu) == 3)
6276 if (!system && kvm_x86_ops.get_cpl(vcpu) == 3)
6301 if (unlikely(!kvm_x86_ops.can_emulate_instruction(vcpu, NULL, 0)))
6335 u32 access = ((kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0)
6743 return kvm_x86_ops.get_segment_base(vcpu, seg);
6756 if (kvm_x86_ops.has_wbinvd_exit()) {
6861 return kvm_x86_ops.get_cpl(emul_to_vcpu(ctxt));
6866 kvm_x86_ops.get_gdt(emul_to_vcpu(ctxt), dt);
6871 kvm_x86_ops.get_idt(emul_to_vcpu(ctxt), dt);
6876 kvm_x86_ops.set_gdt(emul_to_vcpu(ctxt), dt);
6881 kvm_x86_ops.set_idt(emul_to_vcpu(ctxt), dt);
7023 return kvm_x86_ops.check_intercept(emul_to_vcpu(ctxt), info, stage,
7066 kvm_x86_ops.set_nmi_mask(emul_to_vcpu(ctxt), masked);
7085 return kvm_x86_ops.pre_leave_smm(emul_to_vcpu(ctxt), smstate);
7148 u32 int_shadow = kvm_x86_ops.get_interrupt_shadow(vcpu);
7159 kvm_x86_ops.set_interrupt_shadow(vcpu, mask);
7201 kvm_x86_ops.get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
7267 if (!is_guest_mode(vcpu) && kvm_x86_ops.get_cpl(vcpu) == 0) {
7448 unsigned long rflags = kvm_x86_ops.get_rflags(vcpu);
7451 r = kvm_x86_ops.skip_emulated_instruction(vcpu);
7574 if (unlikely(!kvm_x86_ops.can_emulate_instruction(vcpu, insn, insn_len)))
7708 unsigned long rflags = kvm_x86_ops.get_rflags(vcpu);
7722 if (kvm_x86_ops.update_emulated_instruction)
7723 kvm_x86_ops.update_emulated_instruction(vcpu);
8052 user_mode = kvm_x86_ops.get_cpl(__this_cpu_read(current_vcpu));
8145 if (kvm_x86_ops.hardware_enable) {
8253 kvm_x86_ops.hardware_enable = NULL;
8394 if (kvm_x86_ops.get_cpl(vcpu) != 0) {
8451 kvm_x86_ops.patch_hypercall(vcpu, instruction);
8480 if (!kvm_x86_ops.update_cr8_intercept)
8499 kvm_x86_ops.update_cr8_intercept(vcpu, tpr, max_irr);
8511 kvm_x86_ops.queue_exception(vcpu);
8541 kvm_x86_ops.set_nmi(vcpu);
8544 kvm_x86_ops.set_irq(vcpu);
8559 r = kvm_x86_ops.nested_ops->check_events(vcpu);
8605 * The kvm_x86_ops hooks communicate this by returning -EBUSY.
8608 r = can_inject ? kvm_x86_ops.smi_allowed(vcpu, true) : -EBUSY;
8617 kvm_x86_ops.enable_smi_window(vcpu);
8621 r = can_inject ? kvm_x86_ops.nmi_allowed(vcpu, true) : -EBUSY;
8627 kvm_x86_ops.set_nmi(vcpu);
8629 WARN_ON(kvm_x86_ops.nmi_allowed(vcpu, true) < 0);
8632 kvm_x86_ops.enable_nmi_window(vcpu);
8636 r = can_inject ? kvm_x86_ops.interrupt_allowed(vcpu, true) : -EBUSY;
8641 kvm_x86_ops.set_irq(vcpu);
8642 WARN_ON(kvm_x86_ops.interrupt_allowed(vcpu, true) < 0);
8645 kvm_x86_ops.enable_irq_window(vcpu);
8649 kvm_x86_ops.nested_ops->hv_timer_pending &&
8650 kvm_x86_ops.nested_ops->hv_timer_pending(vcpu))
8670 if (kvm_x86_ops.get_nmi_mask(vcpu) || vcpu->arch.nmi_injected)
8760 kvm_x86_ops.get_gdt(vcpu, &dt);
8764 kvm_x86_ops.get_idt(vcpu, &dt);
8814 kvm_x86_ops.get_idt(vcpu, &dt);
8824 kvm_x86_ops.get_gdt(vcpu, &dt);
8854 kvm_x86_ops.pre_enter_smm(vcpu, buf);
8859 if (kvm_x86_ops.get_nmi_mask(vcpu))
8862 kvm_x86_ops.set_nmi_mask(vcpu, true);
8868 kvm_x86_ops.set_cr0(vcpu, cr0);
8871 kvm_x86_ops.set_cr4(vcpu, 0);
8875 kvm_x86_ops.set_idt(vcpu, &dt);
8906 kvm_x86_ops.set_efer(vcpu, 0);
8944 kvm_x86_ops.refresh_apicv_exec_ctrl(vcpu);
8960 if (!kvm_x86_ops.check_apicv_inhibit_reasons ||
8961 !kvm_x86_ops.check_apicv_inhibit_reasons(bit))
8980 if (kvm_x86_ops.pre_update_apicv_exec_ctrl)
8981 kvm_x86_ops.pre_update_apicv_exec_ctrl(kvm, activate);
9007 kvm_x86_ops.sync_pir_to_irr(vcpu);
9027 kvm_x86_ops.load_eoi_exitmap(vcpu, eoi_exit_bitmap);
9046 if (kvm_x86_ops.guest_memory_reclaimed)
9047 kvm_x86_ops.guest_memory_reclaimed(kvm);
9055 if (!kvm_x86_ops.set_apic_access_page_addr)
9058 kvm_x86_ops.set_apic_access_page_addr(vcpu);
9084 if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) {
9192 kvm_x86_ops.msr_filter_changed(vcpu);
9205 kvm_x86_ops.enable_irq_window(vcpu);
9220 kvm_x86_ops.prepare_guest_switch(vcpu);
9251 kvm_x86_ops.sync_pir_to_irr(vcpu);
9265 kvm_x86_ops.request_immediate_exit(vcpu);
9286 exit_fastpath = kvm_x86_ops.run(vcpu);
9296 kvm_x86_ops.sync_dirty_debug_regs(vcpu);
9318 kvm_x86_ops.handle_exit_irqoff(vcpu);
9369 r = kvm_x86_ops.handle_exit(vcpu, exit_fastpath);
9375 kvm_x86_ops.cancel_injection(vcpu);
9385 (!kvm_x86_ops.pre_block || kvm_x86_ops.pre_block(vcpu) == 0)) {
9390 if (kvm_x86_ops.post_block)
9391 kvm_x86_ops.post_block(vcpu);
9418 kvm_x86_ops.nested_ops->check_events(vcpu);
9575 /* PKRU is separately restored in kvm_x86_ops.run. */
9779 kvm_x86_ops.get_idt(vcpu, &dt);
9782 kvm_x86_ops.get_gdt(vcpu, &dt);
9931 kvm_x86_ops.set_idt(vcpu, &dt);
9934 kvm_x86_ops.set_gdt(vcpu, &dt);
9944 kvm_x86_ops.set_efer(vcpu, sregs->efer);
9947 kvm_x86_ops.set_cr0(vcpu, sregs->cr0);
9953 kvm_x86_ops.set_cr4(vcpu, sregs->cr4);
10059 kvm_x86_ops.update_exception_bitmap(vcpu);
10269 r = kvm_x86_ops.vcpu_create(vcpu);
10329 kvm_x86_ops.vcpu_free(vcpu);
10418 kvm_x86_ops.vcpu_reset(vcpu, init_event);
10443 ret = kvm_x86_ops.hardware_enable();
10525 kvm_x86_ops.hardware_disable();
10543 memcpy(&kvm_x86_ops, ops->runtime_ops, sizeof(kvm_x86_ops));
10572 kvm_x86_ops.hardware_unsetup();
10612 kvm_x86_ops.sched_in(vcpu, cpu);
10661 return kvm_x86_ops.vm_init(kvm);
10779 if (kvm_x86_ops.vm_destroy)
10780 kvm_x86_ops.vm_destroy(kvm);
10950 * When PML is enabled, the kvm_x86_ops dirty logging hooks are
10969 if (kvm_x86_ops.slot_enable_log_dirty) {
10970 kvm_x86_ops.slot_enable_log_dirty(kvm, new);
10987 if (kvm_x86_ops.slot_disable_log_dirty)
10988 kvm_x86_ops.slot_disable_log_dirty(kvm, new);
11026 kvm_x86_ops.guest_apic_has_interrupt &&
11027 kvm_x86_ops.guest_apic_has_interrupt(vcpu));
11046 kvm_x86_ops.nmi_allowed(vcpu, false)))
11051 kvm_x86_ops.smi_allowed(vcpu, false)))
11063 kvm_x86_ops.nested_ops->hv_timer_pending &&
11064 kvm_x86_ops.nested_ops->hv_timer_pending(vcpu))
11085 if (vcpu->arch.apicv_active && kvm_x86_ops.dy_apicv_has_pending_interrupt(vcpu))
11103 return kvm_x86_ops.interrupt_allowed(vcpu, false);
11125 rflags = kvm_x86_ops.get_rflags(vcpu);
11137 kvm_x86_ops.set_rflags(vcpu, rflags);
11267 (vcpu->arch.apf.send_user_only && kvm_x86_ops.get_cpl(vcpu) == 0))
11412 ret = kvm_x86_ops.update_pi_irte(irqfd->kvm,
11437 ret = kvm_x86_ops.update_pi_irte(irqfd->kvm, prod->irq, irqfd->gsi, 0);
11448 return kvm_x86_ops.update_pi_irte(kvm, host_irq, guest_irq, set);