Home
last modified time | relevance | path

Searched refs:PG_LEVEL_4K (Results 1 - 25 of 47) sorted by relevance

12

/kernel/linux/linux-6.6/arch/x86/include/asm/
H A Dkfence.h33 if (level != PG_LEVEL_4K) in arch_kfence_init_pool()
46 if (WARN_ON(!pte || level != PG_LEVEL_4K)) in kfence_protect_page()
/kernel/linux/linux-6.6/arch/x86/coco/tdx/
H A Dtdx-shared.c24 case PG_LEVEL_4K: in try_accept_one()
64 accept_size = try_accept_one(start, len, PG_LEVEL_4K); in tdx_accept_memory()
/kernel/linux/linux-6.6/arch/x86/kvm/mmu/
H A Dspte.c176 if (level > PG_LEVEL_4K && (pte_access & ACC_EXEC_MASK) && in make_spte()
189 if (level > PG_LEVEL_4K) in make_spte()
243 WARN_ON_ONCE(level > PG_LEVEL_4K); in make_spte()
294 if (role.level == PG_LEVEL_4K) { in make_huge_page_split_spte()
H A Dpage_track.c67 index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K); in update_gfn_write_track()
96 if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn, PG_LEVEL_4K)) in __kvm_write_track_add_gfn()
134 index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K); in kvm_gfn_is_write_tracked()
H A Dpaging_tmpl.h74 #define gpte_to_gfn(pte) gpte_to_gfn_lvl((pte), PG_LEVEL_4K)
163 if (FNAME(is_rsvd_bits_set)(vcpu->arch.mmu, gpte, PG_LEVEL_4K)) in prefetch_invalid_gpte()
291 * PG_LEVEL_4K always terminates. The RHS has bit 7 set in is_last_gpte()
292 * iff level <= PG_LEVEL_4K, which for our purpose means in is_last_gpte()
293 * level == PG_LEVEL_4K; set PT_PAGE_SIZE_MASK in gpte then. in is_last_gpte()
295 gpte |= level - PG_LEVEL_4K - 1; in is_last_gpte()
444 if (walker->level > PG_LEVEL_4K && is_cpuid_PSE36()) in walk_addr_generic()
569 if (level == PG_LEVEL_4K) { in gpte_changed()
594 if (sp->role.level > PG_LEVEL_4K) in pte_prefetch()
684 * For PG_LEVEL_4K, kvm_mmu_get_pag in fetch()
[all...]
H A Dmmu.c833 if (sp->role.level > PG_LEVEL_4K) in account_shadowed()
838 if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn, PG_LEVEL_4K)) in account_shadowed()
839 kvm_flush_remote_tlbs_gfn(kvm, gfn, PG_LEVEL_4K); in account_shadowed()
879 if (sp->role.level > PG_LEVEL_4K) in unaccount_shadowed()
1086 return &slot->arch.rmap[level - PG_LEVEL_4K][idx]; in gfn_to_rmap()
1202 WARN_ON_ONCE(sp->role.level == PG_LEVEL_4K); in drop_large_spte()
1316 PG_LEVEL_4K, slot); in kvm_mmu_write_protect_pt_masked()
1349 PG_LEVEL_4K, slot); in kvm_mmu_clear_dirty_pt_masked()
1385 kvm_mmu_try_split_huge_pages(kvm, slot, start, end, PG_LEVEL_4K); in kvm_arch_mmu_enable_log_dirty_pt_masked()
1435 return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn, PG_LEVEL_4K); in kvm_vcpu_write_protect_gfn()
[all...]
H A Dmmu_internal.h297 .req_level = PG_LEVEL_4K, in kvm_mmu_do_page_fault()
298 .goal_level = PG_LEVEL_4K, in kvm_mmu_do_page_fault()
H A Dtdp_iter.h129 for_each_tdp_pte_min_level(iter, root, PG_LEVEL_4K, start, end)
/kernel/linux/linux-5.10/arch/x86/kvm/mmu/
H A Dmmu.c768 if (sp->role.level > PG_LEVEL_4K) in account_shadowed()
796 if (sp->role.level > PG_LEVEL_4K) in unaccount_shadowed()
943 return &slot->arch.rmap[level - PG_LEVEL_4K][idx]; in __gfn_to_rmap()
1074 WARN_ON(sptep_to_sp(sptep)->role.level == PG_LEVEL_4K); in __drop_large_spte()
1229 PG_LEVEL_4K, slot); in kvm_mmu_write_protect_pt_masked()
1258 PG_LEVEL_4K, slot); in kvm_mmu_clear_dirty_pt_masked()
1295 for (i = PG_LEVEL_4K; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) { in kvm_mmu_slot_gfn_write_protect()
1482 for_each_slot_rmap_range(memslot, PG_LEVEL_4K, in kvm_handle_hva_range()
1896 WARN_ON(s->role.level != PG_LEVEL_4K); in kvm_sync_pages()
1925 if (level == PG_LEVEL_4K) in mmu_pages_next()
[all...]
H A Dmmu_audit.c103 if (level != PG_LEVEL_4K) { in audit_mappings()
179 if (sp->role.level != PG_LEVEL_4K) in check_mappings_rmap()
203 rmap_head = __gfn_to_rmap(sp->gfn, PG_LEVEL_4K, slot); in audit_write_protection()
H A Dpaging_tmpl.h79 #define gpte_to_gfn(pte) gpte_to_gfn_lvl((pte), PG_LEVEL_4K)
198 if (FNAME(is_rsvd_bits_set)(vcpu->arch.mmu, gpte, PG_LEVEL_4K)) in prefetch_invalid_gpte()
435 if (PTTYPE == 32 && walker->level > PG_LEVEL_4K && is_cpuid_PSE36()) in walk_addr_generic()
552 mmu_set_spte(vcpu, spte, pte_access, false, PG_LEVEL_4K, gfn, pfn, in prefetch_gpte()
575 if (level == PG_LEVEL_4K) { in gpte_changed()
600 if (sp->role.level > PG_LEVEL_4K) in pte_prefetch()
836 max_level = PG_LEVEL_4K; in page_fault()
893 WARN_ON(sp->role.level != PG_LEVEL_4K); in get_level1_sp_gpa()
1082 pte_access, PG_LEVEL_4K, in sync_page()
H A Dtdp_iter.h56 for_each_tdp_pte_min_level(iter, root, root_level, PG_LEVEL_4K, start, end)
H A Dspte.c107 if (level > PG_LEVEL_4K && (pte_access & ACC_EXEC_MASK) && in make_spte()
120 if (level > PG_LEVEL_4K) in make_spte()
H A Dpage_track.c64 index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K); in update_gfn_track()
154 index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K); in kvm_page_track_is_active()
H A Dtdp_mmu.c221 if (level > PG_LEVEL_4K) in handle_changed_spte_dirty_log()
259 WARN_ON(level < PG_LEVEL_4K); in __handle_changed_spte()
798 if (iter.level != PG_LEVEL_4K) in set_tdp_spte()
969 if (iter.level > PG_LEVEL_4K || in clear_dirty_pt_masked()
H A Dspte.h194 if (level == PG_LEVEL_4K) in is_last_spte()
/kernel/linux/linux-5.10/arch/x86/xen/
H A Dp2m.c250 BUG_ON(!ptep || level != PG_LEVEL_4K); in xen_build_mfn_list_list()
448 BUG_ON(!ptep || level != PG_LEVEL_4K); in get_phys_to_machine()
542 BUG_ON(!ptep || level != PG_LEVEL_4K); in xen_alloc_p2m_entry()
667 BUG_ON(!ptep || level != PG_LEVEL_4K); in __set_phys_to_machine()
/kernel/linux/linux-6.6/arch/x86/xen/
H A Dp2m.c245 BUG_ON(!ptep || level != PG_LEVEL_4K); in xen_build_mfn_list_list()
443 BUG_ON(!ptep || level != PG_LEVEL_4K); in get_phys_to_machine()
537 BUG_ON(!ptep || level != PG_LEVEL_4K); in xen_alloc_p2m_entry()
662 BUG_ON(!ptep || level != PG_LEVEL_4K); in __set_phys_to_machine()
/kernel/linux/linux-6.6/tools/testing/selftests/kvm/lib/x86_64/
H A Dvmx.c429 for (int level = PG_LEVEL_512G; level >= PG_LEVEL_4K; level--) { in __nested_pg_map()
453 __nested_pg_map(vmx, vm, nested_paddr, paddr, PG_LEVEL_4K); in nested_pg_map()
493 __nested_map(vmx, vm, nested_paddr, paddr, size, PG_LEVEL_4K); in nested_map()
/kernel/linux/linux-5.10/arch/x86/mm/
H A Dmem_encrypt.c209 case PG_LEVEL_4K: in __set_clr_pte_enc()
275 if (level == PG_LEVEL_4K) { in early_set_memory_enc_dec()
/kernel/linux/linux-6.6/arch/x86/mm/
H A Dmem_encrypt_amd.c259 case PG_LEVEL_4K: in pg_level_to_pfn()
425 if (level == PG_LEVEL_4K) { in early_set_memory_enc_dec()
/kernel/linux/linux-6.6/tools/testing/selftests/kvm/x86_64/
H A Dnx_huge_pages_test.c148 virt_map_level(vm, HPAGE_GVA, HPAGE_GPA, nr_bytes, PG_LEVEL_4K); in run_test()
/kernel/linux/linux-5.10/arch/x86/mm/pat/
H A Dcpa-test.c206 if (level != PG_LEVEL_4K) { in pageattr_test()
/kernel/linux/linux-6.6/arch/x86/kvm/vmx/
H A Dcapabilities.h330 return PG_LEVEL_4K; in ept_caps_to_lpage_level()
/kernel/linux/linux-6.6/arch/x86/mm/pat/
H A Dcpa-test.c206 if (level != PG_LEVEL_4K) { in pageattr_test()

Completed in 26 milliseconds

12