Lines Matching refs:pgtable

42 static int ivpu_mmu_pgtable_init(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
46 pgtable->pgd_dma_ptr = dma_alloc_coherent(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pgd_dma,
48 if (!pgtable->pgd_dma_ptr)
51 pgtable->pgd_dma = pgd_dma;
63 static void ivpu_mmu_pgtables_free(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
70 pud_dma_ptr = pgtable->pud_ptrs[pgd_idx];
71 pud_dma = pgtable->pgd_dma_ptr[pgd_idx];
77 pmd_dma_ptr = pgtable->pmd_ptrs[pgd_idx][pud_idx];
78 pmd_dma = pgtable->pud_ptrs[pgd_idx][pud_idx];
84 pte_dma_ptr = pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx];
85 pte_dma = pgtable->pmd_ptrs[pgd_idx][pud_idx][pmd_idx];
90 kfree(pgtable->pte_ptrs[pgd_idx][pud_idx]);
94 kfree(pgtable->pmd_ptrs[pgd_idx]);
95 kfree(pgtable->pte_ptrs[pgd_idx]);
99 ivpu_mmu_pgtable_free(vdev, pgtable->pgd_dma_ptr, pgtable->pgd_dma);
103 ivpu_mmu_ensure_pud(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, int pgd_idx)
105 u64 *pud_dma_ptr = pgtable->pud_ptrs[pgd_idx];
115 drm_WARN_ON(&vdev->drm, pgtable->pmd_ptrs[pgd_idx]);
116 pgtable->pmd_ptrs[pgd_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL);
117 if (!pgtable->pmd_ptrs[pgd_idx])
120 drm_WARN_ON(&vdev->drm, pgtable->pte_ptrs[pgd_idx]);
121 pgtable->pte_ptrs[pgd_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL);
122 if (!pgtable->pte_ptrs[pgd_idx])
125 pgtable->pud_ptrs[pgd_idx] = pud_dma_ptr;
126 pgtable->pgd_dma_ptr[pgd_idx] = pud_dma | IVPU_MMU_ENTRY_VALID;
131 kfree(pgtable->pmd_ptrs[pgd_idx]);
139 ivpu_mmu_ensure_pmd(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, int pgd_idx,
142 u64 *pmd_dma_ptr = pgtable->pmd_ptrs[pgd_idx][pud_idx];
152 drm_WARN_ON(&vdev->drm, pgtable->pte_ptrs[pgd_idx][pud_idx]);
153 pgtable->pte_ptrs[pgd_idx][pud_idx] = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL);
154 if (!pgtable->pte_ptrs[pgd_idx][pud_idx])
157 pgtable->pmd_ptrs[pgd_idx][pud_idx] = pmd_dma_ptr;
158 pgtable->pud_ptrs[pgd_idx][pud_idx] = pmd_dma | IVPU_MMU_ENTRY_VALID;
168 ivpu_mmu_ensure_pte(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable,
171 u64 *pte_dma_ptr = pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx];
181 pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx] = pte_dma_ptr;
182 pgtable->pmd_ptrs[pgd_idx][pud_idx][pmd_idx] = pte_dma | IVPU_MMU_ENTRY_VALID;
198 if (!ivpu_mmu_ensure_pud(vdev, &ctx->pgtable, pgd_idx))
202 if (!ivpu_mmu_ensure_pmd(vdev, &ctx->pgtable, pgd_idx, pud_idx))
206 pte = ivpu_mmu_ensure_pte(vdev, &ctx->pgtable, pgd_idx, pud_idx, pmd_idx);
249 ctx->pgtable.pte_ptrs[pgd_idx][pud_idx][pmd_idx][pte_idx] = IVPU_MMU_ENTRY_INVALID;
255 struct ivpu_mmu_pgtable *pgtable = &ctx->pgtable;
272 clflush_cache_range(pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx],
276 clflush_cache_range(pgtable->pmd_ptrs[pgd_idx][pud_idx],
279 clflush_cache_range(pgtable->pud_ptrs[pgd_idx], IVPU_MMU_PGTABLE_SIZE);
281 clflush_cache_range(pgtable->pgd_dma_ptr, IVPU_MMU_PGTABLE_SIZE);
426 ret = ivpu_mmu_pgtable_init(vdev, &ctx->pgtable);
446 if (drm_WARN_ON(&vdev->drm, !ctx->pgtable.pgd_dma_ptr))
450 ivpu_mmu_pgtables_free(vdev, &ctx->pgtable);
453 ctx->pgtable.pgd_dma_ptr = NULL;
454 ctx->pgtable.pgd_dma = 0;
492 ret = ivpu_mmu_set_pgtable(vdev, ctx_id, &ctx->pgtable);