Home
last modified time | relevance | path

Searched refs:pgt (Results 1 - 25 of 39) sorted by relevance

12

/kernel/linux/linux-6.6/drivers/net/ethernet/mellanox/mlxsw/
H A Dspectrum_pgt.c32 mutex_lock(&mlxsw_sp->pgt->lock); in mlxsw_sp_pgt_mid_alloc()
33 index = idr_alloc(&mlxsw_sp->pgt->pgt_idr, NULL, 0, in mlxsw_sp_pgt_mid_alloc()
34 mlxsw_sp->pgt->end_index, GFP_KERNEL); in mlxsw_sp_pgt_mid_alloc()
42 mutex_unlock(&mlxsw_sp->pgt->lock); in mlxsw_sp_pgt_mid_alloc()
46 mutex_unlock(&mlxsw_sp->pgt->lock); in mlxsw_sp_pgt_mid_alloc()
52 mutex_lock(&mlxsw_sp->pgt->lock); in mlxsw_sp_pgt_mid_free()
53 WARN_ON(idr_remove(&mlxsw_sp->pgt->pgt_idr, mid_base)); in mlxsw_sp_pgt_mid_free()
54 mutex_unlock(&mlxsw_sp->pgt->lock); in mlxsw_sp_pgt_mid_free()
63 mutex_lock(&mlxsw_sp->pgt->lock); in mlxsw_sp_pgt_mid_alloc_range()
70 idr_cursor = idr_get_cursor(&mlxsw_sp->pgt in mlxsw_sp_pgt_mid_alloc_range()
123 mlxsw_sp_pgt_entry_create(struct mlxsw_sp_pgt *pgt, u16 mid, u16 smpe) mlxsw_sp_pgt_entry_create() argument
149 mlxsw_sp_pgt_entry_destroy(struct mlxsw_sp_pgt *pgt, struct mlxsw_sp_pgt_entry *pgt_entry) mlxsw_sp_pgt_entry_destroy() argument
162 mlxsw_sp_pgt_entry_get(struct mlxsw_sp_pgt *pgt, u16 mid, u16 smpe) mlxsw_sp_pgt_entry_get() argument
173 mlxsw_sp_pgt_entry_put(struct mlxsw_sp_pgt *pgt, u16 mid) mlxsw_sp_pgt_entry_put() argument
323 struct mlxsw_sp_pgt *pgt; mlxsw_sp_pgt_init() local
[all...]
/kernel/linux/linux-6.6/arch/arm64/kvm/hyp/
H A Dpgtable.c109 static u32 kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr) in kvm_pgd_page_idx() argument
111 u64 shift = kvm_granule_shift(pgt->start_level - 1); /* May underflow */ in kvm_pgd_page_idx()
112 u64 mask = BIT(pgt->ia_bits) - 1; in kvm_pgd_page_idx()
119 struct kvm_pgtable pgt = { in kvm_pgd_pages() local
124 return kvm_pgd_page_idx(&pgt, -1ULL) + 1; in kvm_pgd_pages()
297 static int _kvm_pgtable_walk(struct kvm_pgtable *pgt, struct kvm_pgtable_walk_data *data) in _kvm_pgtable_walk() argument
301 u64 limit = BIT(pgt->ia_bits); in _kvm_pgtable_walk()
306 if (!pgt->pgd) in _kvm_pgtable_walk()
309 for (idx = kvm_pgd_page_idx(pgt, data->addr); data->addr < data->end; ++idx) { in _kvm_pgtable_walk()
310 kvm_pteref_t pteref = &pgt in _kvm_pgtable_walk()
320 kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size, struct kvm_pgtable_walker *walker) kvm_pgtable_walk() argument
357 kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr, kvm_pte_t *ptep, u32 *level) kvm_pgtable_get_leaf() argument
484 kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, enum kvm_pgtable_prot prot) kvm_pgtable_hyp_map() argument
547 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size) kvm_pgtable_hyp_unmap() argument
563 kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits, struct kvm_pgtable_mm_ops *mm_ops) kvm_pgtable_hyp_init() argument
597 kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt) kvm_pgtable_hyp_destroy() argument
665 stage2_has_fwb(struct kvm_pgtable *pgt) stage2_has_fwb() argument
695 stage2_set_prot_attr(struct kvm_pgtable *pgt, enum kvm_pgtable_prot prot, kvm_pte_t *ptep) stage2_set_prot_attr() argument
834 stage2_unmap_defer_tlb_flush(struct kvm_pgtable *pgt) stage2_unmap_defer_tlb_flush() argument
851 struct kvm_pgtable *pgt = ctx->arg; stage2_unmap_put_pte() local
869 stage2_pte_cacheable(struct kvm_pgtable *pgt, kvm_pte_t pte) stage2_pte_cacheable() argument
917 struct kvm_pgtable *pgt = data->mmu->pgt; stage2_map_walker_try_leaf() local
1034 kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, enum kvm_pgtable_prot prot, void *mc, enum kvm_pgtable_walk_flags flags) kvm_pgtable_stage2_map() argument
1065 kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size, void *mc, u8 owner_id) kvm_pgtable_stage2_set_owner() argument
1093 struct kvm_pgtable *pgt = ctx->arg; stage2_unmap_walker() local
1133 kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size) kvm_pgtable_stage2_unmap() argument
1194 stage2_update_leaf_attrs(struct kvm_pgtable *pgt, u64 addr, u64 size, kvm_pte_t attr_set, kvm_pte_t attr_clr, kvm_pte_t *orig_pte, u32 *level, enum kvm_pgtable_walk_flags flags) stage2_update_leaf_attrs() argument
1223 kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size) kvm_pgtable_stage2_wrprotect() argument
1230 kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr) kvm_pgtable_stage2_mkyoung() argument
1279 kvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr, u64 size, bool mkold) kvm_pgtable_stage2_test_clear_young() argument
1295 kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, enum kvm_pgtable_prot prot) kvm_pgtable_stage2_relax_perms() argument
1325 struct kvm_pgtable *pgt = ctx->arg; stage2_flush_walker() local
1337 kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size) kvm_pgtable_stage2_flush() argument
1351 kvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt, u64 phys, u32 level, enum kvm_pgtable_prot prot, void *mc, bool force_pte) kvm_pgtable_stage2_create_unlinked() argument
1496 kvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size, struct kvm_mmu_memory_cache *mc) kvm_pgtable_stage2_split() argument
1508 __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu, struct kvm_pgtable_mm_ops *mm_ops, enum kvm_pgtable_stage2_flags flags, kvm_pgtable_force_pte_cb_t force_pte_cb) __kvm_pgtable_stage2_init() argument
1561 kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt) kvm_pgtable_stage2_destroy() argument
[all...]
/kernel/linux/linux-5.10/arch/arm64/include/asm/
H A Dkvm_pgtable.h87 * @pgt: Uninitialised page-table structure to initialise.
92 int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits);
96 * @pgt: Page-table structure initialised by kvm_pgtable_hyp_init().
101 void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt);
105 * @pgt: Page-table structure initialised by kvm_pgtable_hyp_init().
122 int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
127 * @pgt: Uninitialised page-table structure to initialise.
132 int kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm *kvm);
136 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init().
141 void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
[all...]
/kernel/linux/linux-5.10/arch/arm64/kvm/hyp/
H A Dpgtable.c49 struct kvm_pgtable *pgt; member
92 static u32 __kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr) in __kvm_pgd_page_idx() argument
94 u64 shift = kvm_granule_shift(pgt->start_level - 1); /* May underflow */ in __kvm_pgd_page_idx()
95 u64 mask = BIT(pgt->ia_bits) - 1; in __kvm_pgd_page_idx()
102 return __kvm_pgd_page_idx(data->pgt, data->addr); in kvm_pgd_page_idx()
107 struct kvm_pgtable pgt = { in kvm_pgd_pages() local
112 return __kvm_pgd_page_idx(&pgt, -1ULL) + 1; in kvm_pgd_pages()
274 struct kvm_pgtable *pgt = data->pgt; in _kvm_pgtable_walk() local
275 u64 limit = BIT(pgt in _kvm_pgtable_walk()
294 kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size, struct kvm_pgtable_walker *walker) kvm_pgtable_walk() argument
374 kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, enum kvm_pgtable_prot prot) kvm_pgtable_hyp_map() argument
397 kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits) kvm_pgtable_hyp_init() argument
418 kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt) kvm_pgtable_hyp_destroy() argument
617 kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, enum kvm_pgtable_prot prot, struct kvm_mmu_memory_cache *mc) kvm_pgtable_stage2_map() argument
698 kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size) kvm_pgtable_stage2_unmap() argument
742 stage2_update_leaf_attrs(struct kvm_pgtable *pgt, u64 addr, u64 size, kvm_pte_t attr_set, kvm_pte_t attr_clr, kvm_pte_t *orig_pte, u32 *level) stage2_update_leaf_attrs() argument
771 kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size) kvm_pgtable_stage2_wrprotect() argument
778 kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr) kvm_pgtable_stage2_mkyoung() argument
787 kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr) kvm_pgtable_stage2_mkold() argument
801 kvm_pgtable_stage2_is_young(struct kvm_pgtable *pgt, u64 addr) kvm_pgtable_stage2_is_young() argument
808 kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, enum kvm_pgtable_prot prot) kvm_pgtable_stage2_relax_perms() argument
843 kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size) kvm_pgtable_stage2_flush() argument
856 kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm *kvm) kvm_pgtable_stage2_init() argument
895 kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt) kvm_pgtable_stage2_destroy() argument
[all...]
/kernel/linux/linux-6.6/arch/arm64/include/asm/
H A Dkvm_pgtable.h356 * @pgt: Uninitialised page-table structure to initialise.
362 int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits,
367 * @pgt: Page-table structure initialised by kvm_pgtable_hyp_init().
372 void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt);
376 * @pgt: Page-table structure initialised by kvm_pgtable_hyp_init().
393 int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
398 * @pgt: Page-table structure initialised by kvm_pgtable_hyp_init().
415 u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
443 * @pgt: Uninitialised page-table structure to initialise.
452 int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struc
[all...]
/kernel/linux/linux-5.10/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
H A Dvmm.c30 struct nvkm_vmm_pt *pgt = *ppgt; in nvkm_vmm_pt_del() local
31 if (pgt) { in nvkm_vmm_pt_del()
32 kvfree(pgt->pde); in nvkm_vmm_pt_del()
33 kfree(pgt); in nvkm_vmm_pt_del()
44 struct nvkm_vmm_pt *pgt; in nvkm_vmm_pt_new() local
56 if (!(pgt = kzalloc(sizeof(*pgt) + lpte, GFP_KERNEL))) in nvkm_vmm_pt_new()
58 pgt->page = page ? page->shift : 0; in nvkm_vmm_pt_new()
59 pgt->sparse = sparse; in nvkm_vmm_pt_new()
62 pgt in nvkm_vmm_pt_new()
143 struct nvkm_vmm_pt *pgt = it->pt[it->lvl]; nvkm_vmm_unref_pdes() local
197 nvkm_vmm_unref_sptes(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgt, const struct nvkm_vmm_desc *desc, u32 ptei, u32 ptes) nvkm_vmm_unref_sptes() argument
262 struct nvkm_vmm_pt *pgt = it->pt[0]; nvkm_vmm_unref_ptes() local
296 nvkm_vmm_ref_sptes(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgt, const struct nvkm_vmm_desc *desc, u32 ptei, u32 ptes) nvkm_vmm_ref_sptes() argument
368 struct nvkm_vmm_pt *pgt = it->pt[0]; nvkm_vmm_ref_ptes() local
381 nvkm_vmm_sparse_ptes(const struct nvkm_vmm_desc *desc, struct nvkm_vmm_pt *pgt, u32 ptei, u32 ptes) nvkm_vmm_sparse_ptes() argument
417 struct nvkm_vmm_pt *pgt = pgd->pde[pdei]; nvkm_vmm_ref_hwpt() local
487 struct nvkm_vmm_pt *pgt = pgd->pde[pdei]; nvkm_vmm_ref_swpt() local
532 struct nvkm_vmm_pt *pgt = it.pt[it.lvl]; nvkm_vmm_iter() local
[all...]
H A Dvmmgp100.c235 struct nvkm_vmm_pt *pgt = pgd->pde[pdei]; in gp100_vmm_pd0_pde() local
239 if (pgt->pt[0] && !gp100_vmm_pde(pgt->pt[0], &data[0])) in gp100_vmm_pd0_pde()
241 if (pgt->pt[1] && !gp100_vmm_pde(pgt->pt[1], &data[1])) in gp100_vmm_pd0_pde()
359 struct nvkm_vmm_pt *pgt = pgd->pde[pdei]; in gp100_vmm_pd1_pde() local
363 if (!gp100_vmm_pde(pgt->pt[0], &data)) in gp100_vmm_pd1_pde()
H A Dvmmnv50.c106 nv50_vmm_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgt, u64 *pdata) in nv50_vmm_pde() argument
110 if (pgt && (pt = pgt->pt[0])) { in nv50_vmm_pde()
111 switch (pgt->page) { in nv50_vmm_pde()
/kernel/linux/linux-6.6/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
H A Dvmm.c30 struct nvkm_vmm_pt *pgt = *ppgt; in nvkm_vmm_pt_del() local
31 if (pgt) { in nvkm_vmm_pt_del()
32 kvfree(pgt->pde); in nvkm_vmm_pt_del()
33 kfree(pgt); in nvkm_vmm_pt_del()
44 struct nvkm_vmm_pt *pgt; in nvkm_vmm_pt_new() local
56 if (!(pgt = kzalloc(sizeof(*pgt) + lpte, GFP_KERNEL))) in nvkm_vmm_pt_new()
58 pgt->page = page ? page->shift : 0; in nvkm_vmm_pt_new()
59 pgt->sparse = sparse; in nvkm_vmm_pt_new()
62 pgt in nvkm_vmm_pt_new()
143 struct nvkm_vmm_pt *pgt = it->pt[it->lvl]; nvkm_vmm_unref_pdes() local
197 nvkm_vmm_unref_sptes(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgt, const struct nvkm_vmm_desc *desc, u32 ptei, u32 ptes) nvkm_vmm_unref_sptes() argument
262 struct nvkm_vmm_pt *pgt = it->pt[0]; nvkm_vmm_unref_ptes() local
296 nvkm_vmm_ref_sptes(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgt, const struct nvkm_vmm_desc *desc, u32 ptei, u32 ptes) nvkm_vmm_ref_sptes() argument
368 struct nvkm_vmm_pt *pgt = it->pt[0]; nvkm_vmm_ref_ptes() local
381 nvkm_vmm_sparse_ptes(const struct nvkm_vmm_desc *desc, struct nvkm_vmm_pt *pgt, u32 ptei, u32 ptes) nvkm_vmm_sparse_ptes() argument
417 struct nvkm_vmm_pt *pgt = pgd->pde[pdei]; nvkm_vmm_ref_hwpt() local
487 struct nvkm_vmm_pt *pgt = pgd->pde[pdei]; nvkm_vmm_ref_swpt() local
532 struct nvkm_vmm_pt *pgt = it.pt[it.lvl]; nvkm_vmm_iter() local
[all...]
H A Dvmmgp100.c238 struct nvkm_vmm_pt *pgt = pgd->pde[pdei]; in gp100_vmm_pd0_pde() local
242 if (pgt->pt[0] && !gp100_vmm_pde(pgt->pt[0], &data[0])) in gp100_vmm_pd0_pde()
244 if (pgt->pt[1] && !gp100_vmm_pde(pgt->pt[1], &data[1])) in gp100_vmm_pd0_pde()
365 struct nvkm_vmm_pt *pgt = pgd->pde[pdei]; in gp100_vmm_pd1_pde() local
369 if (!gp100_vmm_pde(pgt->pt[0], &data)) in gp100_vmm_pd1_pde()
H A Dvmmnv50.c106 nv50_vmm_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgt, u64 *pdata) in nv50_vmm_pde() argument
110 if (pgt && (pt = pgt->pt[0])) { in nv50_vmm_pde()
111 switch (pgt->page) { in nv50_vmm_pde()
/kernel/linux/linux-5.10/arch/arm64/kvm/
H A Dmmu.c51 struct kvm_pgtable *pgt = kvm->arch.mmu.pgt; in stage2_apply_range() local
52 if (!pgt) in stage2_apply_range()
56 ret = fn(pgt, addr, next - addr); in stage2_apply_range()
366 struct kvm_pgtable *pgt; in kvm_init_stage2_mmu() local
368 if (mmu->pgt != NULL) { in kvm_init_stage2_mmu()
373 pgt = kzalloc(sizeof(*pgt), GFP_KERNEL); in kvm_init_stage2_mmu()
374 if (!pgt) in kvm_init_stage2_mmu()
377 err = kvm_pgtable_stage2_init(pgt, kv in kvm_init_stage2_mmu()
473 struct kvm_pgtable *pgt = NULL; kvm_free_stage2_pgd() local
505 struct kvm_pgtable *pgt = kvm->arch.mmu.pgt; kvm_phys_addr_ioremap() local
760 struct kvm_pgtable *pgt; user_mem_abort() local
[all...]
/kernel/linux/linux-6.6/arch/arm64/kvm/
H A Dmmu.c66 struct kvm_pgtable *pgt = mmu->pgt; in stage2_apply_range() local
67 if (!pgt) in stage2_apply_range()
71 ret = fn(pgt, addr, next - addr); in stage2_apply_range()
118 struct kvm_pgtable *pgt; in kvm_mmu_split_huge_pages() local
145 pgt = kvm->arch.mmu.pgt; in kvm_mmu_split_huge_pages()
146 if (!pgt) in kvm_mmu_split_huge_pages()
150 ret = kvm_pgtable_stage2_split(pgt, addr, next - addr, cache); in kvm_mmu_split_huge_pages()
804 struct kvm_pgtable pgt in get_user_mapping_size() local
870 struct kvm_pgtable *pgt; kvm_init_stage2_mmu() local
1012 struct kvm_pgtable *pgt = NULL; kvm_free_stage2_pgd() local
1070 struct kvm_pgtable *pgt = kvm->arch.mmu.pgt; kvm_phys_addr_ioremap() local
1413 struct kvm_pgtable *pgt; user_mem_abort() local
[all...]
/kernel/linux/linux-5.10/drivers/gpu/drm/nouveau/nvkm/engine/dma/
H A Dusernv04.c52 struct nvkm_memory *pgt = in nv04_dmaobj_bind() local
55 return nvkm_gpuobj_wrap(pgt, pgpuobj); in nv04_dmaobj_bind()
56 nvkm_kmap(pgt); in nv04_dmaobj_bind()
57 offset = nvkm_ro32(pgt, 8 + (offset >> 10)); in nv04_dmaobj_bind()
59 nvkm_done(pgt); in nv04_dmaobj_bind()
/kernel/linux/linux-6.6/drivers/gpu/drm/nouveau/nvkm/engine/dma/
H A Dusernv04.c52 struct nvkm_memory *pgt = in nv04_dmaobj_bind() local
55 return nvkm_gpuobj_wrap(pgt, pgpuobj); in nv04_dmaobj_bind()
56 nvkm_kmap(pgt); in nv04_dmaobj_bind()
57 offset = nvkm_ro32(pgt, 8 + (offset >> 10)); in nv04_dmaobj_bind()
59 nvkm_done(pgt); in nv04_dmaobj_bind()
/kernel/linux/linux-6.6/drivers/firmware/efi/libstub/
H A Dx86-5lvl.c68 u64 *pgt = (void *)la57_toggle + PAGE_SIZE; in efi_5level_switch() local
81 new_cr3 = memset(pgt, 0, PAGE_SIZE); in efi_5level_switch()
89 new_cr3 = memcpy(pgt, new_cr3, PAGE_SIZE); in efi_5level_switch()
/kernel/linux/linux-6.6/arch/arm64/kvm/hyp/nvhe/
H A Dmem_protect.c151 ret = __kvm_pgtable_stage2_init(&host_mmu.pgt, mmu, in kvm_host_prepare_stage2()
157 mmu->pgd_phys = __hyp_pa(host_mmu.pgt.pgd); in kvm_host_prepare_stage2()
158 mmu->pgt = &host_mmu.pgt; in kvm_host_prepare_stage2()
258 ret = __kvm_pgtable_stage2_init(mmu->pgt, mmu, &vm->mm_ops, 0, in kvm_guest_prepare_stage2()
264 vm->kvm.arch.mmu.pgd_phys = __hyp_pa(vm->pgt.pgd); in kvm_guest_prepare_stage2()
275 kvm_pgtable_stage2_destroy(&vm->pgt); in reclaim_guest_pages()
328 struct kvm_pgtable *pgt = &host_mmu.pgt; in host_stage2_unmap_dev_all() local
336 ret = kvm_pgtable_stage2_unmap(pgt, add in host_stage2_unmap_dev_all()
589 check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size, struct check_walk_data *data) check_page_state_range() argument
[all...]
/kernel/linux/linux-5.10/arch/s390/kvm/
H A Dgaccess.c979 * @pgt: pointer to the beginning of the page table for the given address if
982 * @fake: pgt references contiguous guest memory block, not a pgtable
985 unsigned long *pgt, int *dat_protection, in kvm_s390_shadow_tables()
1039 *pgt = ptr + vaddr.rfx * 8; in kvm_s390_shadow_tables()
1066 *pgt = ptr + vaddr.rsx * 8; in kvm_s390_shadow_tables()
1094 *pgt = ptr + vaddr.rtx * 8; in kvm_s390_shadow_tables()
1131 *pgt = ptr + vaddr.sx * 8; in kvm_s390_shadow_tables()
1157 *pgt = ptr; in kvm_s390_shadow_tables()
1180 unsigned long pgt = 0; in kvm_s390_shadow_fault() local
1192 rc = gmap_shadow_pgt_lookup(sg, saddr, &pgt, in kvm_s390_shadow_fault()
984 kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr, unsigned long *pgt, int *dat_protection, int *fake) kvm_s390_shadow_tables() argument
[all...]
/kernel/linux/linux-6.6/arch/s390/kvm/
H A Dgaccess.c1375 * @pgt: pointer to the beginning of the page table for the given address if
1379 * @fake: pgt references contiguous guest memory block, not a pgtable
1382 unsigned long *pgt, int *dat_protection, in kvm_s390_shadow_tables()
1438 *pgt = ptr + vaddr.rfx * 8; in kvm_s390_shadow_tables()
1466 *pgt = ptr + vaddr.rsx * 8; in kvm_s390_shadow_tables()
1495 *pgt = ptr + vaddr.rtx * 8; in kvm_s390_shadow_tables()
1533 *pgt = ptr + vaddr.sx * 8; in kvm_s390_shadow_tables()
1560 *pgt = ptr; in kvm_s390_shadow_tables()
1583 unsigned long pgt = 0; in kvm_s390_shadow_fault() local
1595 rc = gmap_shadow_pgt_lookup(sg, saddr, &pgt, in kvm_s390_shadow_fault()
1381 kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr, unsigned long *pgt, int *dat_protection, int *fake) kvm_s390_shadow_tables() argument
[all...]
/kernel/linux/linux-5.10/arch/s390/mm/
H A Dgmap.c1303 * @pgt: pointer to the start of a shadow page table
1308 unsigned long *pgt) in __gmap_unshadow_pgt()
1314 pgt[i] = _PAGE_INVALID; in __gmap_unshadow_pgt()
1326 unsigned long sto, *ste, *pgt; in gmap_unshadow_pgt() local
1336 pgt = (unsigned long *)(*ste & _SEGMENT_ENTRY_ORIGIN); in gmap_unshadow_pgt()
1338 __gmap_unshadow_pgt(sg, raddr, pgt); in gmap_unshadow_pgt()
1340 page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT); in gmap_unshadow_pgt()
1356 unsigned long *pgt; in __gmap_unshadow_sgt() local
1364 pgt = (unsigned long *)(sgt[i] & _REGION_ENTRY_ORIGIN); in __gmap_unshadow_sgt()
1366 __gmap_unshadow_pgt(sg, raddr, pgt); in __gmap_unshadow_sgt()
1307 __gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr, unsigned long *pgt) __gmap_unshadow_pgt() argument
1987 gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr, unsigned long *pgt, int *dat_protection, int *fake) gmap_shadow_pgt_lookup() argument
2027 gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt, int fake) gmap_shadow_pgt() argument
[all...]
/kernel/linux/linux-5.10/arch/s390/include/asm/
H A Dgmap.h135 int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
138 unsigned long *pgt, int *dat_protection, int *fake);
/kernel/linux/linux-6.6/arch/s390/include/asm/
H A Dgmap.h135 int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
138 unsigned long *pgt, int *dat_protection, int *fake);
/kernel/linux/linux-6.6/arch/s390/mm/
H A Dgmap.c1326 * @pgt: pointer to the start of a shadow page table
1331 unsigned long *pgt) in __gmap_unshadow_pgt()
1337 pgt[i] = _PAGE_INVALID; in __gmap_unshadow_pgt()
1350 phys_addr_t sto, pgt; in gmap_unshadow_pgt() local
1360 pgt = *ste & _SEGMENT_ENTRY_ORIGIN; in gmap_unshadow_pgt()
1362 __gmap_unshadow_pgt(sg, raddr, __va(pgt)); in gmap_unshadow_pgt()
1364 page = phys_to_page(pgt); in gmap_unshadow_pgt()
1381 phys_addr_t pgt; in __gmap_unshadow_sgt() local
1388 pgt = sgt[i] & _REGION_ENTRY_ORIGIN; in __gmap_unshadow_sgt()
1390 __gmap_unshadow_pgt(sg, raddr, __va(pgt)); in __gmap_unshadow_sgt()
1330 __gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr, unsigned long *pgt) __gmap_unshadow_pgt() argument
2016 gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr, unsigned long *pgt, int *dat_protection, int *fake) gmap_shadow_pgt_lookup() argument
2056 gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt, int fake) gmap_shadow_pgt() argument
[all...]
/kernel/linux/linux-6.6/arch/arm64/kvm/hyp/include/nvhe/
H A Dpkvm.h35 struct kvm_pgtable pgt; member
/kernel/linux/linux-5.10/arch/powerpc/kvm/
H A Dbook3s_64_mmu_radix.c1275 pgd_t *pgt; in debugfs_radix_read() local
1313 pgt = NULL; in debugfs_radix_read()
1317 pgt = NULL; in debugfs_radix_read()
1327 if (!pgt) { in debugfs_radix_read()
1329 pgt = kvm->arch.pgtable; in debugfs_radix_read()
1336 pgt = nested->shadow_pgtable; in debugfs_radix_read()
1345 "pgdir: %lx\n", (unsigned long)pgt); in debugfs_radix_read()
1350 pgdp = pgt + pgd_index(gpa); in debugfs_radix_read()

Completed in 28 milliseconds

12