Lines Matching refs:role
218 * The MMU itself (with a valid role) is the single source of truth for the
219 * MMU. Do not use the regs used to build the MMU/role, nor the vCPU. The
293 kvm_flush_remote_tlbs_gfn(kvm, gfn, sp->role.level);
569 int level = sptep_to_sp(sptep)->role.level;
723 if (sp->role.passthrough)
726 if (!sp->role.direct)
729 return sp->gfn + (index << ((sp->role.level - 1) * SPTE_LEVEL_BITS));
753 * In both cases, sp->role.access contains the correct access bits.
755 return sp->role.access;
768 sp->role.passthrough ? "passthrough" : "direct",
773 sp->role.passthrough ? "passthrough" : "direct",
829 slots = kvm_memslots_for_spte_role(kvm, sp->role);
833 if (sp->role.level > PG_LEVEL_4K)
877 slots = kvm_memslots_for_spte_role(kvm, sp->role);
879 if (sp->role.level > PG_LEVEL_4K)
1103 * information in sp->role.
1105 slots = kvm_memslots_for_spte_role(kvm, sp->role);
1108 rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
1202 WARN_ON_ONCE(sp->role.level == PG_LEVEL_4K);
1646 kvm_update_page_stats(kvm, sp->role.level, 1);
1648 rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
1655 kvm_flush_remote_tlbs_gfn(kvm, gfn, sp->role.level);
1738 if (!sp->role.direct)
1893 if (sp->role.direct)
1896 if (sp->role.passthrough)
1920 * - level: not part of the overall MMU role and will never match as the MMU's
1923 * - quadrant: not part of the overall MMU role (similar to level)
1934 * sync a shadow page for a different MMU context, e.g. if the role
1938 if (WARN_ON_ONCE(sp->role.direct || !vcpu->arch.mmu->sync_spte ||
1939 (sp->role.word ^ root_role.word) & ~sync_role_ign.word))
2007 if (sp->role.invalid)
2034 int level = sp->role.level;
2058 level = sp->role.level;
2143 * unsync, thus @vcpu can be NULL if @role.direct is true.
2149 union kvm_mmu_page_role role)
2162 if (sp->role.word != role.word) {
2172 if (role.level > PG_LEVEL_4K && sp->unsync)
2179 if (sp->role.direct)
2234 union kvm_mmu_page_role role)
2240 if (!role.direct)
2257 sp->role = role;
2265 /* Note, @vcpu may be NULL if @role.direct is true; see kvm_mmu_find_shadow_page. */
2270 union kvm_mmu_page_role role)
2278 sp = kvm_mmu_find_shadow_page(kvm, vcpu, gfn, sp_list, role);
2281 sp = kvm_mmu_alloc_shadow_page(kvm, caches, gfn, sp_list, role);
2290 union kvm_mmu_page_role role)
2298 return __kvm_mmu_get_shadow_page(vcpu->kvm, vcpu, &caches, gfn, role);
2305 union kvm_mmu_page_role role;
2307 role = parent_sp->role;
2308 role.level--;
2309 role.access = access;
2310 role.direct = direct;
2311 role.passthrough = 0;
2318 * requires extra bookkeeping in the role.
2322 * 1GiB of the address space. @role.quadrant encodes which quarter of
2327 * @role.quadrant encodes which half of the region they map.
2339 if (role.has_4_byte_gpte) {
2340 WARN_ON_ONCE(role.level != PG_LEVEL_4K);
2341 role.quadrant = spte_index(sptep) & 1;
2344 return role;
2351 union kvm_mmu_page_role role;
2356 role = kvm_mmu_child_role(sptep, direct, access);
2357 return kvm_mmu_get_shadow_page(vcpu, gfn, role);
2478 if (child->role.access == direct_access)
2495 if (is_last_spte(pte, sp->role.level)) {
2507 child->role.guest_mode && !child->parent_ptes.val)
2547 if (parent->role.level == PG_LEVEL_4K)
2580 if (!sp->role.invalid && sp_has_gptes(sp))
2594 if (sp->role.invalid)
2617 sp->role.invalid = 1;
2657 WARN_ON_ONCE(!sp->role.invalid || sp->root_count);
2808 * i.e. this guards the role.level == 4K assertion below!
2852 WARN_ON_ONCE(sp->role.level != PG_LEVEL_4K);
2905 int level = sp->role.level;
2977 unsigned int access = sp->role.access;
3005 WARN_ON_ONCE(!sp->role.direct);
3038 if (sp->role.level > PG_LEVEL_4K)
3449 if (!is_last_spte(spte, sp->role.level))
3502 if (sp->role.level > PG_LEVEL_4K &&
3552 else if (!--sp->root_count && sp->role.invalid)
3634 if (!sp || sp->role.guest_mode)
3645 union kvm_mmu_page_role role = vcpu->arch.mmu->root_role;
3648 role.level = level;
3649 role.quadrant = quadrant;
3651 WARN_ON_ONCE(quadrant && !role.has_4_byte_gpte);
3652 WARN_ON_ONCE(role.direct && role.has_4_byte_gpte);
3654 sp = kvm_mmu_get_shadow_page(vcpu, gfn, role);
4523 union kvm_mmu_page_role role)
4530 if (!role.direct && pgd != root->pgd)
4537 return role.word == sp->role.word;
4541 * Find out if a previously cached root matching the new pgd/role is available,
4576 * Find out if a previously cached root matching the new pgd/role is available.
5113 union kvm_cpu_role role = {0};
5115 role.base.access = ACC_ALL;
5116 role.base.smm = is_smm(vcpu);
5117 role.base.guest_mode = is_guest_mode(vcpu);
5118 role.ext.valid = 1;
5121 role.base.direct = 1;
5122 return role;
5125 role.base.efer_nx = ____is_efer_nx(regs);
5126 role.base.cr0_wp = ____is_cr0_wp(regs);
5127 role.base.smep_andnot_wp = ____is_cr4_smep(regs) && !____is_cr0_wp(regs);
5128 role.base.smap_andnot_wp = ____is_cr4_smap(regs) && !____is_cr0_wp(regs);
5129 role.base.has_4_byte_gpte = !____is_cr4_pae(regs);
5132 role.base.level = ____is_cr4_la57(regs) ? PT64_ROOT_5LEVEL
5135 role.base.level = PT32E_ROOT_LEVEL;
5137 role.base.level = PT32_ROOT_LEVEL;
5139 role.ext.cr4_smep = ____is_cr4_smep(regs);
5140 role.ext.cr4_smap = ____is_cr4_smap(regs);
5141 role.ext.cr4_pse = ____is_cr4_pse(regs);
5144 role.ext.cr4_pke = ____is_efer_lma(regs) && ____is_cr4_pke(regs);
5145 role.ext.cr4_la57 = ____is_efer_lma(regs) && ____is_cr4_la57(regs);
5146 role.ext.efer_lma = ____is_efer_lma(regs);
5147 return role;
5182 union kvm_mmu_page_role role = {0};
5184 role.access = ACC_ALL;
5185 role.cr0_wp = true;
5186 role.efer_nx = true;
5187 role.smm = cpu_role.base.smm;
5188 role.guest_mode = cpu_role.base.guest_mode;
5189 role.ad_disabled = !kvm_ad_enabled();
5190 role.level = kvm_mmu_get_tdp_level(vcpu);
5191 role.direct = true;
5192 role.has_4_byte_gpte = false;
5194 return role;
5260 * KVM forces EFER.NX=1 when TDP is disabled, reflect it in the MMU role.
5303 union kvm_cpu_role role = {0};
5307 * support the "entry to SMM" control either. role.base.smm is always 0.
5310 role.base.level = level;
5311 role.base.has_4_byte_gpte = false;
5312 role.base.direct = false;
5313 role.base.ad_disabled = !accessed_dirty;
5314 role.base.guest_mode = true;
5315 role.base.access = ACC_ALL;
5317 role.ext.word = 0;
5318 role.ext.execonly = execonly;
5319 role.ext.valid = 1;
5321 return role;
5584 if (sp->role.level == PG_LEVEL_4K)
5601 pte_size = sp->role.has_4_byte_gpte ? 4 : 8;
5623 level = sp->role.level;
5625 if (sp->role.has_4_byte_gpte) {
5639 if (quadrant != sp->role.quadrant)
5685 if (gentry && sp->role.level != PG_LEVEL_4K)
6070 if (WARN_ON_ONCE(sp->role.invalid))
6345 union kvm_mmu_page_role role;
6358 role = kvm_mmu_child_role(huge_sptep, /*direct=*/true, access);
6365 return __kvm_mmu_get_shadow_page(kvm, NULL, &caches, gfn, role);
6402 flush |= !is_last_spte(*sptep, sp->role.level);
6406 spte = make_huge_page_split_spte(kvm, huge_spte, sp->role, index);
6408 __rmap_add(kvm, cache, slot, sptep, gfn, sp->role.access);
6425 level = huge_sp->role.level;
6467 if (WARN_ON_ONCE(!sp->role.guest_mode))
6479 if (sp->role.invalid)
6588 if (sp->role.direct &&
6589 sp->role.level < kvm_mmu_max_mapping_level(kvm, slot, sp->gfn,
6671 if (WARN_ON_ONCE(sp->role.invalid))
7048 WARN_ON_ONCE(!sp->role.direct);
7074 slots = kvm_memslots_for_spte_role(kvm, sp->role);