Lines Matching refs:access
19 * The MMU needs to be able to access/walk 32-bit and 64-bit guest page tables,
109 static inline void FNAME(protect_clean_gpte)(struct kvm_mmu *mmu, unsigned *access,
121 /* Allow write access to dirty gptes */
124 *access &= mask;
181 unsigned access;
183 access = ((gpte & VMX_EPT_WRITABLE_MASK) ? ACC_WRITE_MASK : 0) |
189 access = gpte & (PT_WRITABLE_MASK | PT_USER_MASK | PT_PRESENT_MASK);
191 access ^= (gpte >> PT64_NX_SHIFT);
194 return access;
304 gpa_t addr, u64 access)
317 const int write_fault = access & PFERR_WRITE_MASK;
318 const int user_fault = access & PFERR_USER_MASK;
319 const int fetch_fault = access & PFERR_FETCH_MASK;
324 trace_kvm_mmu_pagetable_walk(addr, access);
386 * "guest page access" as the nested page fault's cause,
387 * instead of "guest page structure access". To fix this,
436 errcode = permission_fault(vcpu, mmu, walker->pte_access, pte_pkey, access);
448 real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(gfn), access, &walker->fault);
492 * [2:0] - Derive from the access bits. The exit_qualification might be
526 struct kvm_vcpu *vcpu, gpa_t addr, u64 access)
529 access);
545 pte_access = sp->role.access & FNAME(gpte_access)(gpte);
632 unsigned int direct_access, access;
673 access = gw->pt_access[it.level - 2];
675 false, access);
766 * - write access through a shadow pte marked read only so that we can set
768 * - write access to a shadow pte marked read only so we can update the page
770 * - mmio access; in this case we will never install a present shadow pte
821 * we will cache the incorrect access into mmio spte.
869 gpa_t addr, u64 access,
881 r = FNAME(walk_addr_generic)(&walker, vcpu, mmu, addr, access);
928 pte_access = sp->role.access;
956 /* Update the shadowed access bits in case they changed. */