Lines Matching refs:pgt
49 struct kvm_pgtable *pgt;
92 static u32 __kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr)
94 u64 shift = kvm_granule_shift(pgt->start_level - 1); /* May underflow */
95 u64 mask = BIT(pgt->ia_bits) - 1;
102 return __kvm_pgd_page_idx(data->pgt, data->addr);
107 struct kvm_pgtable pgt = {
112 return __kvm_pgd_page_idx(&pgt, -1ULL) + 1;
274 struct kvm_pgtable *pgt = data->pgt;
275 u64 limit = BIT(pgt->ia_bits);
280 if (!pgt->pgd)
284 kvm_pte_t *ptep = &pgt->pgd[idx * PTRS_PER_PTE];
286 ret = __kvm_pgtable_walk(data, ptep, pgt->start_level);
294 int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
298 .pgt = pgt,
374 int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
391 ret = kvm_pgtable_walk(pgt, addr, size, &walker);
397 int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits)
401 pgt->pgd = (kvm_pte_t *)get_zeroed_page(GFP_KERNEL);
402 if (!pgt->pgd)
405 pgt->ia_bits = va_bits;
406 pgt->start_level = KVM_PGTABLE_MAX_LEVELS - levels;
407 pgt->mmu = NULL;
418 void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt)
425 WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker));
426 free_page((unsigned long)pgt->pgd);
427 pgt->pgd = NULL;
617 int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
624 .mmu = pgt->mmu,
639 ret = kvm_pgtable_walk(pgt, addr, size, &walker);
698 int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
702 .arg = pgt->mmu,
706 return kvm_pgtable_walk(pgt, addr, size, &walker);
742 static int stage2_update_leaf_attrs(struct kvm_pgtable *pgt, u64 addr,
759 ret = kvm_pgtable_walk(pgt, addr, size, &walker);
771 int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size)
773 return stage2_update_leaf_attrs(pgt, addr, size, 0,
778 kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr)
781 stage2_update_leaf_attrs(pgt, addr, 1, KVM_PTE_LEAF_ATTR_LO_S2_AF, 0,
787 kvm_pte_t kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr)
790 stage2_update_leaf_attrs(pgt, addr, 1, 0, KVM_PTE_LEAF_ATTR_LO_S2_AF,
801 bool kvm_pgtable_stage2_is_young(struct kvm_pgtable *pgt, u64 addr)
804 stage2_update_leaf_attrs(pgt, addr, 1, 0, 0, &pte, NULL);
808 int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
824 ret = stage2_update_leaf_attrs(pgt, addr, 1, set, clr, NULL, &level);
826 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, pgt->mmu, addr, level);
843 int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size)
853 return kvm_pgtable_walk(pgt, addr, size, &walker);
856 int kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm *kvm)
865 pgt->pgd = alloc_pages_exact(pgd_sz, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
866 if (!pgt->pgd)
869 pgt->ia_bits = ia_bits;
870 pgt->start_level = start_level;
871 pgt->mmu = &kvm->arch.mmu;
895 void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
904 WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker));
905 pgd_sz = kvm_pgd_pages(pgt->ia_bits, pgt->start_level) * PAGE_SIZE;
906 free_pages_exact(pgt->pgd, pgd_sz);
907 pgt->pgd = NULL;