Lines Matching refs:walker
78 * table walker.
199 struct guest_walker *walker,
212 for (level = walker->max_level; level >= walker->level; --level) {
213 pte = orig_pte = walker->ptes[level - 1];
214 table_gfn = walker->table_gfn[level - 1];
215 ptep_user = walker->ptep_user[level - 1];
221 if (level == walker->level && write_fault &&
246 if (unlikely(!walker->pte_writable[level - 1]))
254 walker->ptes[level - 1] = pte;
302 static int FNAME(walk_addr_generic)(struct guest_walker *walker,
326 walker->level = mmu->cpu_role.base.level;
332 if (walker->level == PT32E_ROOT_LEVEL) {
334 trace_kvm_mmu_paging_element(pte, walker->level);
337 --walker->level;
340 walker->max_level = walker->level;
353 * assume that walker.fault contains sane info on a walk failure. I.e.
361 ++walker->level;
368 --walker->level;
370 index = PT_INDEX(addr, walker->level);
375 BUG_ON(walker->level < 1);
376 walker->table_gfn[walker->level - 1] = table_gfn;
377 walker->pte_gpa[walker->level - 1] = pte_gpa;
380 nested_access, &walker->fault);
400 &walker->pte_writable[walker->level - 1]);
407 walker->ptep_user[walker->level - 1] = ptep_user;
409 trace_kvm_mmu_paging_element(pte, walker->level);
420 if (unlikely(FNAME(is_rsvd_bits_set)(mmu, pte, walker->level))) {
425 walker->ptes[walker->level - 1] = pte;
428 walker->pt_access[walker->level - 1] = FNAME(gpte_access)(pt_access ^ walk_nx_mask);
429 } while (!FNAME(is_last_gpte)(mmu, walker->level, pte));
435 walker->pte_access = FNAME(gpte_access)(pte_access ^ walk_nx_mask);
436 errcode = permission_fault(vcpu, mmu, walker->pte_access, pte_pkey, access);
440 gfn = gpte_to_gfn_lvl(pte, walker->level);
441 gfn += (addr & PT_LVL_OFFSET_MASK(walker->level)) >> PAGE_SHIFT;
444 if (walker->level > PG_LEVEL_4K && is_cpuid_PSE36())
448 real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(gfn), access, &walker->fault);
452 walker->gfn = real_gpa >> PAGE_SHIFT;
455 FNAME(protect_clean_gpte)(mmu, &walker->pte_access, pte);
466 ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker,
481 walker->fault.vector = PF_VECTOR;
482 walker->fault.error_code_valid = true;
483 walker->fault.error_code = errcode;
517 walker->fault.address = addr;
518 walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu;
519 walker->fault.async_page_fault = false;
521 trace_kvm_mmu_walker_error(walker->fault.error_code);
525 static int FNAME(walk_addr)(struct guest_walker *walker,
528 return FNAME(walk_addr_generic)(walker, vcpu, vcpu->arch.mmu, addr,
779 struct guest_walker walker;
789 r = FNAME(walk_addr)(&walker, vcpu, fault->addr,
797 kvm_inject_emulated_page_fault(vcpu, &walker.fault);
802 fault->gfn = walker.gfn;
803 fault->max_level = walker.level;
815 r = kvm_faultin_pfn(vcpu, fault, walker.pte_access);
823 if (fault->write && !(walker.pte_access & ACC_WRITE_MASK) &&
825 walker.pte_access |= ACC_WRITE_MASK;
826 walker.pte_access &= ~ACC_USER_MASK;
835 walker.pte_access &= ~ACC_EXEC_MASK;
847 r = FNAME(fetch)(vcpu, fault, &walker);
872 struct guest_walker walker;
881 r = FNAME(walk_addr_generic)(&walker, vcpu, mmu, addr, access);
884 gpa = gfn_to_gpa(walker.gfn);
887 *exception = walker.fault;