Lines Matching refs:pgt

66 		struct kvm_pgtable *pgt = mmu->pgt;
67 if (!pgt)
71 ret = fn(pgt, addr, next - addr);
118 struct kvm_pgtable *pgt;
145 pgt = kvm->arch.mmu.pgt;
146 if (!pgt)
150 ret = kvm_pgtable_stage2_split(pgt, addr, next - addr, cache);
804 struct kvm_pgtable pgt = {
822 ret = kvm_pgtable_get_leaf(&pgt, addr, &pte, &level);
870 struct kvm_pgtable *pgt;
897 if (mmu->pgt != NULL) {
902 pgt = kzalloc(sizeof(*pgt), GFP_KERNEL_ACCOUNT);
903 if (!pgt)
907 err = kvm_pgtable_stage2_init(pgt, mmu, &kvm_s2_mm_ops);
924 mmu->pgt = pgt;
925 mmu->pgd_phys = __pa(pgt->pgd);
929 kvm_pgtable_stage2_destroy(pgt);
931 kfree(pgt);
1012 struct kvm_pgtable *pgt = NULL;
1015 pgt = mmu->pgt;
1016 if (pgt) {
1018 mmu->pgt = NULL;
1023 if (pgt) {
1024 kvm_pgtable_stage2_destroy(pgt);
1025 kfree(pgt);
1070 struct kvm_pgtable *pgt = kvm->arch.mmu.pgt;
1088 ret = kvm_pgtable_stage2_map(pgt, addr, PAGE_SIZE, pa, prot,
1413 struct kvm_pgtable *pgt;
1540 pgt = vcpu->arch.hw_mmu->pgt;
1590 ret = kvm_pgtable_stage2_relax_perms(pgt, fault_ipa, prot);
1592 ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize,
1620 pte = kvm_pgtable_stage2_mkyoung(mmu->pgt, fault_ipa);
1661 if (fault_ipa >= BIT_ULL(vcpu->arch.hw_mmu->pgt->ia_bits)) {
1773 if (!kvm->arch.mmu.pgt)
1787 if (!kvm->arch.mmu.pgt)
1810 kvm_pgtable_stage2_map(kvm->arch.mmu.pgt, range->start << PAGE_SHIFT,
1821 if (!kvm->arch.mmu.pgt)
1824 return kvm_pgtable_stage2_test_clear_young(kvm->arch.mmu.pgt,
1833 if (!kvm->arch.mmu.pgt)
1836 return kvm_pgtable_stage2_test_clear_young(kvm->arch.mmu.pgt,