Lines Matching refs:vdev
42 static int ivpu_mmu_pgtable_init(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
46 pgtable->pgd_dma_ptr = dma_alloc_coherent(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pgd_dma,
56 static void ivpu_mmu_pgtable_free(struct ivpu_device *vdev, u64 *cpu_addr, dma_addr_t dma_addr)
59 dma_free_coherent(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, cpu_addr,
63 static void ivpu_mmu_pgtables_free(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
87 ivpu_mmu_pgtable_free(vdev, pte_dma_ptr, pte_dma);
91 ivpu_mmu_pgtable_free(vdev, pmd_dma_ptr, pmd_dma);
96 ivpu_mmu_pgtable_free(vdev, pud_dma_ptr, pud_dma);
99 ivpu_mmu_pgtable_free(vdev, pgtable->pgd_dma_ptr, pgtable->pgd_dma);
103 ivpu_mmu_ensure_pud(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, int pgd_idx)
111 pud_dma_ptr = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pud_dma, GFP_KERNEL);
115 drm_WARN_ON(&vdev->drm, pgtable->pmd_ptrs[pgd_idx]);
120 drm_WARN_ON(&vdev->drm, pgtable->pte_ptrs[pgd_idx]);
134 ivpu_mmu_pgtable_free(vdev, pud_dma_ptr, pud_dma);
139 ivpu_mmu_ensure_pmd(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, int pgd_idx,
148 pmd_dma_ptr = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pmd_dma, GFP_KERNEL);
152 drm_WARN_ON(&vdev->drm, pgtable->pte_ptrs[pgd_idx][pud_idx]);
163 ivpu_mmu_pgtable_free(vdev, pmd_dma_ptr, pmd_dma);
168 ivpu_mmu_ensure_pte(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable,
177 pte_dma_ptr = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pte_dma, GFP_KERNEL);
188 ivpu_mmu_context_map_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
198 if (!ivpu_mmu_ensure_pud(vdev, &ctx->pgtable, pgd_idx))
202 if (!ivpu_mmu_ensure_pmd(vdev, &ctx->pgtable, pgd_idx, pud_idx))
206 pte = ivpu_mmu_ensure_pte(vdev, &ctx->pgtable, pgd_idx, pud_idx, pmd_idx);
217 ivpu_mmu_context_map_cont_64k(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr,
222 drm_WARN_ON(&vdev->drm, !IS_ALIGNED(vpu_addr, size));
223 drm_WARN_ON(&vdev->drm, !IS_ALIGNED(dma_addr, size));
228 int ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot);
285 ivpu_mmu_context_map_pages(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
294 ret = ivpu_mmu_context_map_cont_64k(vdev, ctx, vpu_addr, dma_addr, prot);
297 ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot);
322 ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
346 ret = ivpu_mmu_context_map_pages(vdev, ctx, vpu_addr, dma_addr, size, prot);
348 ivpu_err(vdev, "Failed to map context pages\n");
358 ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
360 ivpu_err(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret);
365 ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
373 ivpu_warn(vdev, "Unaligned vpu_addr: 0x%llx\n", vpu_addr);
387 ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
389 ivpu_warn(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret);
418 ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 context_id)
426 ret = ivpu_mmu_pgtable_init(vdev, &ctx->pgtable);
431 start = vdev->hw->ranges.global.start;
432 end = vdev->hw->ranges.shave.end;
434 start = vdev->hw->ranges.user.start;
435 end = vdev->hw->ranges.dma.end;
444 static void ivpu_mmu_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
446 if (drm_WARN_ON(&vdev->drm, !ctx->pgtable.pgd_dma_ptr))
450 ivpu_mmu_pgtables_free(vdev, &ctx->pgtable);
457 int ivpu_mmu_global_context_init(struct ivpu_device *vdev)
459 return ivpu_mmu_context_init(vdev, &vdev->gctx, IVPU_GLOBAL_CONTEXT_MMU_SSID);
462 void ivpu_mmu_global_context_fini(struct ivpu_device *vdev)
464 return ivpu_mmu_context_fini(vdev, &vdev->gctx);
467 void ivpu_mmu_user_context_mark_invalid(struct ivpu_device *vdev, u32 ssid)
471 xa_lock(&vdev->context_xa);
473 file_priv = xa_load(&vdev->context_xa, ssid);
477 xa_unlock(&vdev->context_xa);
480 int ivpu_mmu_user_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 ctx_id)
484 drm_WARN_ON(&vdev->drm, !ctx_id);
486 ret = ivpu_mmu_context_init(vdev, ctx, ctx_id);
488 ivpu_err(vdev, "Failed to initialize context: %d\n", ret);
492 ret = ivpu_mmu_set_pgtable(vdev, ctx_id, &ctx->pgtable);
494 ivpu_err(vdev, "Failed to set page table: %d\n", ret);
501 ivpu_mmu_context_fini(vdev, ctx);
505 void ivpu_mmu_user_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
507 drm_WARN_ON(&vdev->drm, !ctx->id);
509 ivpu_mmu_clear_pgtable(vdev, ctx->id);
510 ivpu_mmu_context_fini(vdev, ctx);