Lines Matching refs:gpte
96 static inline gfn_t pse36_gfn_delta(u32 gpte)
100 return (gpte & PT32_DIR_PSE36_MASK) << shift;
104 static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl)
106 return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT;
110 unsigned gpte)
122 mask |= (gpte >> (PT_GUEST_DIRTY_SHIFT - PT_WRITABLE_SHIFT)) &
136 static bool FNAME(is_bad_mt_xwr)(struct rsvd_bits_validate *rsvd_check, u64 gpte)
141 return __is_bad_mt_xwr(rsvd_check, gpte);
145 static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, u64 gpte, int level)
147 return __is_rsvd_bits_set(&mmu->guest_rsvd_check, gpte, level) ||
148 FNAME(is_bad_mt_xwr)(&mmu->guest_rsvd_check, gpte);
153 u64 gpte)
155 if (!FNAME(is_present_gpte)(gpte))
160 !(gpte & PT_GUEST_ACCESSED_MASK))
163 if (FNAME(is_rsvd_bits_set)(vcpu->arch.mmu, gpte, PG_LEVEL_4K))
179 static inline unsigned FNAME(gpte_access)(u64 gpte)
183 access = ((gpte & VMX_EPT_WRITABLE_MASK) ? ACC_WRITE_MASK : 0) |
184 ((gpte & VMX_EPT_EXECUTABLE_MASK) ? ACC_EXEC_MASK : 0) |
185 ((gpte & VMX_EPT_READABLE_MASK) ? ACC_USER_MASK : 0);
189 access = gpte & (PT_WRITABLE_MASK | PT_USER_MASK | PT_PRESENT_MASK);
191 access ^= (gpte >> PT64_NX_SHIFT);
259 static inline unsigned FNAME(gpte_pkeys)(struct kvm_vcpu *vcpu, u64 gpte)
263 pte_t pte = {.pte = gpte};
271 unsigned int level, unsigned int gpte)
281 * CR4.PSE=0, not reserved. Clear bit 7 in the gpte if the level is
286 * so clear PT_PAGE_SIZE_MASK in gpte if that is the case.
288 gpte &= level - (PT32_ROOT_LEVEL + mmu->cpu_role.ext.cr4_pse);
293 * level == PG_LEVEL_4K; set PT_PAGE_SIZE_MASK in gpte then.
295 gpte |= level - PG_LEVEL_4K - 1;
297 return gpte & PT_PAGE_SIZE_MASK;
534 u64 *spte, pt_element_t gpte)
541 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
544 gfn = gpte_to_gfn(gpte);
545 pte_access = sp->role.access & FNAME(gpte_access)(gpte);
546 FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte);
643 * Verify that the top-level gpte is still there. Since the page
681 * the gpte is changed from non-present to present.
700 * Verify that the gpte in the page we've just write
910 pt_element_t gpte;
920 if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte,
924 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte))
927 gfn = gpte_to_gfn(gpte);
929 pte_access &= FNAME(gpte_access)(gpte);
930 FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte);