Lines Matching refs:fault
92 struct x86_exception fault;
249 ret = __try_cmpxchg_user(ptep_user, &orig_pte, pte, fault);
352 * Queue a page fault for injection if this assertion fails, as callers
353 * assume that walker.fault contains sane info on a walk failure. I.e.
380 nested_access, &walker->fault);
384 * instruction) triggers a nested page fault. The exit
386 * "guest page access" as the nested page fault's cause,
448 real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(gfn), access, &walker->fault);
458 * On a write fault, fold the dirty bit into accessed_dirty.
481 walker->fault.vector = PF_VECTOR;
482 walker->fault.error_code_valid = true;
483 walker->fault.error_code = errcode;
517 walker->fault.address = addr;
518 walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu;
519 walker->fault.async_page_fault = false;
521 trace_kvm_mmu_walker_error(walker->fault.error_code);
627 static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
634 gfn_t base_gfn = fault->gfn;
657 * loading a dummy root and handling the resulting page fault, e.g. if
665 for_each_shadow_entry(vcpu, fault->addr, it) {
709 if (fault->write && table_gfn == fault->gfn)
710 fault->write_fault_to_shadow_pgtable = true;
719 kvm_mmu_hugepage_adjust(vcpu, fault);
721 trace_kvm_mmu_spte_requested(fault);
728 if (fault->nx_huge_page_workaround_enabled)
729 disallowed_hugepage_adjust(fault, *it.sptep, it.level);
731 base_gfn = gfn_round_for_level(fault->gfn, it.level);
732 if (it.level == fault->goal_level)
743 if (fault->huge_page_disallowed)
745 fault->req_level >= it.level);
748 if (WARN_ON_ONCE(it.level != fault->goal_level))
751 ret = mmu_set_spte(vcpu, fault->slot, it.sptep, gw->pte_access,
752 base_gfn, fault->pfn, fault);
764 * Page fault handler. There are several causes for a page fault:
771 * - normal guest page fault due to the guest pte marked not present, not
777 static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
782 WARN_ON_ONCE(fault->is_tdp);
786 * If PFEC.RSVD is set, this is a shadow page fault.
789 r = FNAME(walk_addr)(&walker, vcpu, fault->addr,
790 fault->error_code & ~PFERR_RSVD_MASK);
796 if (!fault->prefetch)
797 kvm_inject_emulated_page_fault(vcpu, &walker.fault);
802 fault->gfn = walker.gfn;
803 fault->max_level = walker.level;
804 fault->slot = kvm_vcpu_gfn_to_memslot(vcpu, fault->gfn);
806 if (page_fault_handle_page_track(vcpu, fault)) {
807 shadow_page_table_clear_flood(vcpu, fault->addr);
815 r = kvm_faultin_pfn(vcpu, fault, walker.pte_access);
823 if (fault->write && !(walker.pte_access & ACC_WRITE_MASK) &&
824 !is_cr0_wp(vcpu->arch.mmu) && !fault->user && fault->slot) {
841 if (is_page_fault_stale(vcpu, fault))
847 r = FNAME(fetch)(vcpu, fault, &walker);
851 kvm_release_pfn_clean(fault->pfn);
887 *exception = walker.fault;