Lines Matching defs:access

297 			   unsigned int access)
299 u64 spte = make_mmio_spte(vcpu, gfn, access);
632 * lost when the SPTE is marked for access tracking.
733 * For leaf SPTEs, fetch the *guest* access permissions being shadowed. Note
734 * that the SPTE itself may have a more constrained access permissions that
745 * KVM is not shadowing any guest page tables, so the "guest access
749 * is shadowing a guest huge page with small pages, the guest access
750 * permissions being shadowed are the access permissions of the huge
753 * In both cases, sp->role.access contains the correct access bits.
755 return sp->role.access;
759 gfn_t gfn, unsigned int access)
762 sp->shadowed_translation[index] = (gfn << PAGE_SHIFT) | access;
766 WARN_ONCE(access != kvm_mmu_page_get_access(sp, index),
767 "access mismatch under %s page %llx (expected %u, got %u)\n",
769 sp->gfn, kvm_mmu_page_get_access(sp, index), access);
778 unsigned int access)
782 kvm_mmu_page_set_translation(sp, index, gfn, access);
1638 u64 *spte, gfn_t gfn, unsigned int access)
1645 kvm_mmu_page_set_translation(sp, spte_index(spte), gfn, access);
1660 u64 *spte, gfn_t gfn, unsigned int access)
1664 __rmap_add(vcpu->kvm, cache, slot, spte, gfn, access);
1922 * - access: updated based on the new guest PTE
1927 .access = 0x7,
2302 unsigned int access)
2309 role.access = access;
2349 bool direct, unsigned int access)
2356 role = kvm_mmu_child_role(sptep, direct, access);
2473 * sp's access: allow writable in the read-only sp,
2475 * a new sp with the correct access.
2478 if (child->role.access == direct_access)
2977 unsigned int access = sp->role.access;
2982 slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK);
2991 mmu_set_spte(vcpu, slot, start, access, gfn,
3277 * into the spte otherwise read access on readonly gfn also can
3278 * caused mmio page fault and treat it as mmio access.
3293 unsigned int access)
3298 access & shadow_mmio_access_mask);
3337 * caused by access tracking (if enabled). If A/D bits are enabled
3339 * bits for L2 and employ access tracking, but the fast page fault
3341 * 2. The shadow page table entry is present, the access is a write,
3394 /* Fault was on Read access */
3453 * Check whether the memory access that caused the fault would
3459 * Need not check the access of upper level table entries since
3473 * enabled, the SPTE can't be an access-tracked SPTE.
3481 * that were write-protected for dirty-logging or access
3486 * shadow-present, i.e. except for access tracking restoration
3500 * normal spte to fix the access.
3785 * On SVM, reading PDPTRs might access guest memory, which might fault
4048 gpa_t vaddr, u64 access,
4053 return kvm_translate_gpa(vcpu, mmu, vaddr, access, exception);
4160 unsigned int access = get_mmio_spte_access(spte);
4168 trace_handle_mmio_page_fault(addr, gfn, access);
4169 vcpu_cache_mmio_info(vcpu, addr, gfn, access);
4276 * If the APIC access page exists but is disabled, go directly
4277 * to emulation without caching the MMIO access or creating a
4316 unsigned int access)
4331 return kvm_handle_noslot_fault(vcpu, fault, access);
4645 * The last MMIO access's GVA and GPA are cached in the VCPU. When
4666 unsigned int access)
4674 mark_mmio_spte(vcpu, sptep, gfn, access);
4935 #define BYTE_MASK(access) \
4936 ((1 & (access) ? 2 : 0) | \
4937 (2 & (access) ? 4 : 0) | \
4938 (3 & (access) ? 8 : 0) | \
4939 (4 & (access) ? 16 : 0) | \
4940 (5 & (access) ? 32 : 0) | \
4941 (6 & (access) ? 64 : 0) | \
4942 (7 & (access) ? 128 : 0))
5000 * - The access is not a fetch
5001 * - The access is supervisor mode
5002 * - If implicit supervisor access or X86_EFLAGS_AC is clear
5006 * PFERR_RSVD_MASK bit will be set in PFEC if the access is
5018 * PKU is an additional mechanism by which the paging controls access to
5031 * - PKRU.WD is ignored if CR0.WP=0 and the access is a supervisor access.
5066 * Only need to check the access which is not an
5071 * write access is controlled by PKRU if it is a
5072 * user access or CR0.WP = 1.
5076 /* PKRU.AD stops both read and write access. */
5078 /* PKRU.WD stops write access. */
5115 role.base.access = ACC_ALL;
5184 role.access = ACC_ALL;
5315 role.base.access = ACC_ALL;
5704 * checks when emulating instructions that triggers implicit access.
6346 unsigned int access;
6350 access = kvm_mmu_page_get_access(huge_sp, spte_index(huge_sptep));
6358 role = kvm_mmu_child_role(huge_sptep, /*direct=*/true, access);
6389 * page is aliased by multiple sptes with the same access
6408 __rmap_add(kvm, cache, slot, sptep, gfn, sp->role.access);