Lines Matching defs:access
218 unsigned int access)
220 u64 mask = make_mmio_spte(vcpu, gfn, access);
242 kvm_pfn_t pfn, unsigned int access)
245 mark_mmio_spte(vcpu, sptep, gfn, access);
267 static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
618 * lost when the SPTE is marked for access tracking.
1187 * do not bother adding back write access to pages marked
2025 unsigned int access)
2042 role.access = access;
2204 * sp's access: allow writable in the read-only sp,
2206 * a new sp with the correct access.
2209 if (child->role.access == direct_access)
2680 unsigned int access = sp->role.access;
2685 slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK);
2694 mmu_set_spte(vcpu, start, access, false, sp->role.level, gfn,
2916 * into the spte otherwise read access on readonly gfn also can
2917 * caused mmio page fault and treat it as mmio access.
2931 kvm_pfn_t pfn, unsigned int access,
2942 access & shadow_mmio_access_mask);
2964 * the fault is potentially caused by access tracking (if enabled).
2969 * However, if access tracking is disabled we know that a non-present
2971 * So, if access tracking is disabled, we return true only for write
3027 /* Fault was on Read access */
3060 * Check whether the memory access that caused the fault would
3066 * Need not check the access of upper level table entries since
3082 * write-protected for dirty-logging or access tracking.
3095 * normal spte to fix the access.
3443 u32 access, struct x86_exception *exception)
3451 u32 access,
3456 return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception);
3582 unsigned int access = get_mmio_spte_access(spte);
3590 trace_handle_mmio_page_fault(addr, gfn, access);
3591 vcpu_cache_mmio_info(vcpu, addr, gfn, access);
3913 * The last MMIO access's GVA and GPA are cached in the VCPU. When
3943 unsigned int access, int *nr_present)
3952 mark_mmio_spte(vcpu, sptep, gfn, access);
4231 #define BYTE_MASK(access) \
4232 ((1 & (access) ? 2 : 0) | \
4233 (2 & (access) ? 4 : 0) | \
4234 (3 & (access) ? 8 : 0) | \
4235 (4 & (access) ? 16 : 0) | \
4236 (5 & (access) ? 32 : 0) | \
4237 (6 & (access) ? 64 : 0) | \
4238 (7 & (access) ? 128 : 0))
4296 * - The access is not a fetch
4302 * PFERR_RSVD_MASK bit will be set in PFEC if the access is
4314 * PKU is an additional mechanism by which the paging controls access to
4327 * - PKRU.WD is ignored if CR0.WP=0 and the access is a supervisor access.
4369 * Only need to check the access which is not an
4374 * write access is controlled by PKRU if it is a
4375 * user access or CR0.WP = 1.
4379 /* PKRU.AD stops both read and write access. */
4381 /* PKRU.WD stops write access. */
4475 role.base.access = ACC_ALL;
4663 role.base.access = ACC_ALL;