Lines Matching refs:walker
83 * table walker.
234 struct guest_walker *walker,
247 for (level = walker->max_level; level >= walker->level; --level) {
248 pte = orig_pte = walker->ptes[level - 1];
249 table_gfn = walker->table_gfn[level - 1];
250 ptep_user = walker->ptep_user[level - 1];
256 if (level == walker->level && write_fault &&
281 if (unlikely(!walker->pte_writable[level - 1]))
289 walker->ptes[level - 1] = pte;
308 static int FNAME(walk_addr_generic)(struct guest_walker *walker,
332 walker->level = mmu->root_level;
338 if (walker->level == PT32E_ROOT_LEVEL) {
340 trace_kvm_mmu_paging_element(pte, walker->level);
343 --walker->level;
346 walker->max_level = walker->level;
357 ++walker->level;
363 --walker->level;
365 index = PT_INDEX(addr, walker->level);
370 BUG_ON(walker->level < 1);
371 walker->table_gfn[walker->level - 1] = table_gfn;
372 walker->pte_gpa[walker->level - 1] = pte_gpa;
376 &walker->fault);
392 &walker->pte_writable[walker->level - 1]);
399 walker->ptep_user[walker->level - 1] = ptep_user;
401 trace_kvm_mmu_paging_element(pte, walker->level);
412 if (unlikely(FNAME(is_rsvd_bits_set)(mmu, pte, walker->level))) {
417 walker->ptes[walker->level - 1] = pte;
420 walker->pt_access[walker->level - 1] = FNAME(gpte_access)(pt_access ^ walk_nx_mask);
421 } while (!is_last_gpte(mmu, walker->level, pte));
427 walker->pte_access = FNAME(gpte_access)(pte_access ^ walk_nx_mask);
428 errcode = permission_fault(vcpu, mmu, walker->pte_access, pte_pkey, access);
432 gfn = gpte_to_gfn_lvl(pte, walker->level);
433 gfn += (addr & PT_LVL_OFFSET_MASK(walker->level)) >> PAGE_SHIFT;
435 if (PTTYPE == 32 && walker->level > PG_LEVEL_4K && is_cpuid_PSE36())
438 real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn), access, &walker->fault);
442 walker->gfn = real_gpa >> PAGE_SHIFT;
445 FNAME(protect_clean_gpte)(mmu, &walker->pte_access, pte);
456 ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker,
465 __func__, (u64)pte, walker->pte_access,
466 walker->pt_access[walker->level - 1]);
474 walker->fault.vector = PF_VECTOR;
475 walker->fault.error_code_valid = true;
476 walker->fault.error_code = errcode;
503 walker->fault.address = addr;
504 walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu;
506 trace_kvm_mmu_walker_error(walker->fault.error_code);
510 static int FNAME(walk_addr)(struct guest_walker *walker,
513 return FNAME(walk_addr_generic)(walker, vcpu, vcpu->arch.mmu, addr,
518 static int FNAME(walk_addr_nested)(struct guest_walker *walker,
522 return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.nested_mmu,
750 struct guest_walker *walker, bool user_fault,
754 gfn_t mask = ~(KVM_PAGES_PER_HPAGE(walker->level) - 1);
757 if (!(walker->pte_access & ACC_WRITE_MASK ||
761 for (level = walker->level; level <= walker->max_level; level++) {
762 gfn_t gfn = walker->gfn ^ walker->table_gfn[level - 1];
790 struct guest_walker walker;
808 r = FNAME(walk_addr)(&walker, vcpu, addr, error_code);
816 kvm_inject_emulated_page_fault(vcpu, &walker.fault);
821 if (page_fault_handle_page_track(vcpu, error_code, walker.gfn)) {
833 &walker, user_fault, &vcpu->arch.write_fault_to_shadow_pgtable);
838 max_level = walker.level;
843 if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault,
847 if (handle_abnormal_pfn(vcpu, addr, walker.gfn, pfn, walker.pte_access, &r))
854 if (write_fault && !(walker.pte_access & ACC_WRITE_MASK) &&
857 walker.pte_access |= ACC_WRITE_MASK;
858 walker.pte_access &= ~ACC_USER_MASK;
867 walker.pte_access &= ~ACC_EXEC_MASK;
879 r = FNAME(fetch)(vcpu, addr, &walker, error_code, max_level, pfn,
964 struct guest_walker walker;
968 r = FNAME(walk_addr)(&walker, vcpu, addr, access);
971 gpa = gfn_to_gpa(walker.gfn);
974 *exception = walker.fault;
985 struct guest_walker walker;
994 r = FNAME(walk_addr_nested)(&walker, vcpu, vaddr, access);
997 gpa = gfn_to_gpa(walker.gfn);
1000 *exception = walker.fault;