Lines Matching refs:access
19 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
105 static inline void FNAME(protect_clean_gpte)(struct kvm_mmu *mmu, unsigned *access,
117 /* Allow write access to dirty gptes */
120 *access &= mask;
216 unsigned access;
218 access = ((gpte & VMX_EPT_WRITABLE_MASK) ? ACC_WRITE_MASK : 0) |
224 access = gpte & (PT_WRITABLE_MASK | PT_USER_MASK | PT_PRESENT_MASK);
226 access ^= (gpte >> PT64_NX_SHIFT);
229 return access;
310 gpa_t addr, u32 access)
323 const int write_fault = access & PFERR_WRITE_MASK;
324 const int user_fault = access & PFERR_USER_MASK;
325 const int fetch_fault = access & PFERR_FETCH_MASK;
330 trace_kvm_mmu_pagetable_walk(addr, access);
382 * "guest page access" as the nested page fault's cause,
383 * instead of "guest page structure access". To fix this,
428 errcode = permission_fault(vcpu, mmu, walker->pte_access, pte_pkey, access);
438 real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn), access, &walker->fault);
485 * [2:0] - Derive from the access bits. The exit_qualification might be
511 struct kvm_vcpu *vcpu, gpa_t addr, u32 access)
514 access);
520 u32 access)
523 addr, access);
541 pte_access = sp->role.access & FNAME(gpte_access)(gpte);
637 unsigned int direct_access, access;
669 access = gw->pt_access[it.level - 2];
671 false, access);
774 * - write access through a shadow pte marked read only so that we can set
776 * - write access to a shadow pte marked read only so we can update the page
778 * - mmio access; in this case we will never install a present shadow pte
852 * we will cache the incorrect access into mmio spte.
961 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gpa_t addr, u32 access,
968 r = FNAME(walk_addr)(&walker, vcpu, addr, access);
982 u32 access,
994 r = FNAME(walk_addr_nested)(&walker, vcpu, vaddr, access);
1015 * used by guest then tlbs are not flushed, so guest is allowed to access the
1058 pte_access = sp->role.access;