Lines Matching refs:vdev

243 static void ivpu_mmu_config_check(struct ivpu_device *vdev)
248 if (ivpu_is_simics(vdev))
255 ivpu_dbg(vdev, MMU, "IDR0 0x%x != IDR0_REF 0x%x\n", val, val_ref);
259 ivpu_dbg(vdev, MMU, "IDR1 0x%x != IDR1_REF 0x%x\n", val, IVPU_MMU_IDR1_REF);
263 ivpu_dbg(vdev, MMU, "IDR3 0x%x != IDR3_REF 0x%x\n", val, IVPU_MMU_IDR3_REF);
265 if (ivpu_is_simics(vdev))
267 else if (ivpu_is_fpga(vdev))
274 ivpu_dbg(vdev, MMU, "IDR5 0x%x != IDR5_REF 0x%x\n", val, val_ref);
277 static int ivpu_mmu_cdtab_alloc(struct ivpu_device *vdev)
279 struct ivpu_mmu_info *mmu = vdev->mmu;
283 cdtab->base = dmam_alloc_coherent(vdev->drm.dev, size, &cdtab->dma, GFP_KERNEL);
287 ivpu_dbg(vdev, MMU, "CDTAB alloc: dma=%pad size=%zu\n", &cdtab->dma, size);
292 static int ivpu_mmu_strtab_alloc(struct ivpu_device *vdev)
294 struct ivpu_mmu_info *mmu = vdev->mmu;
298 strtab->base = dmam_alloc_coherent(vdev->drm.dev, size, &strtab->dma, GFP_KERNEL);
306 ivpu_dbg(vdev, MMU, "STRTAB alloc: dma=%pad dma_q=%pad size=%zu\n",
312 static int ivpu_mmu_cmdq_alloc(struct ivpu_device *vdev)
314 struct ivpu_mmu_info *mmu = vdev->mmu;
317 q->base = dmam_alloc_coherent(vdev->drm.dev, IVPU_MMU_CMDQ_SIZE, &q->dma, GFP_KERNEL);
325 ivpu_dbg(vdev, MMU, "CMDQ alloc: dma=%pad dma_q=%pad size=%u\n",
331 static int ivpu_mmu_evtq_alloc(struct ivpu_device *vdev)
333 struct ivpu_mmu_info *mmu = vdev->mmu;
336 q->base = dmam_alloc_coherent(vdev->drm.dev, IVPU_MMU_EVTQ_SIZE, &q->dma, GFP_KERNEL);
344 ivpu_dbg(vdev, MMU, "EVTQ alloc: dma=%pad dma_q=%pad size=%u\n",
350 static int ivpu_mmu_structs_alloc(struct ivpu_device *vdev)
354 ret = ivpu_mmu_cdtab_alloc(vdev);
356 ivpu_err(vdev, "Failed to allocate cdtab: %d\n", ret);
360 ret = ivpu_mmu_strtab_alloc(vdev);
362 ivpu_err(vdev, "Failed to allocate strtab: %d\n", ret);
366 ret = ivpu_mmu_cmdq_alloc(vdev);
368 ivpu_err(vdev, "Failed to allocate cmdq: %d\n", ret);
372 ret = ivpu_mmu_evtq_alloc(vdev);
374 ivpu_err(vdev, "Failed to allocate evtq: %d\n", ret);
379 static int ivpu_mmu_reg_write(struct ivpu_device *vdev, u32 reg, u32 val)
389 ivpu_err(vdev, "Failed to write register 0x%x\n", reg);
394 static int ivpu_mmu_irqs_setup(struct ivpu_device *vdev)
399 ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_IRQ_CTRL, 0);
403 return ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_IRQ_CTRL, irq_ctrl);
406 static int ivpu_mmu_cmdq_wait_for_cons(struct ivpu_device *vdev)
408 struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq;
414 static int ivpu_mmu_cmdq_cmd_write(struct ivpu_device *vdev, const char *name, u64 data0, u64 data1)
416 struct ivpu_mmu_queue *q = &vdev->mmu->cmdq;
421 ivpu_err(vdev, "Failed to write MMU CMD %s\n", name);
429 ivpu_dbg(vdev, MMU, "CMD write: %s data: 0x%llx 0x%llx\n", name, data0, data1);
434 static int ivpu_mmu_cmdq_sync(struct ivpu_device *vdev)
436 struct ivpu_mmu_queue *q = &vdev->mmu->cmdq;
445 ret = ivpu_mmu_cmdq_cmd_write(vdev, "SYNC", val, 0);
452 ret = ivpu_mmu_cmdq_wait_for_cons(vdev);
454 ivpu_err(vdev, "Timed out waiting for consumer: %d\n", ret);
459 static int ivpu_mmu_cmdq_write_cfgi_all(struct ivpu_device *vdev)
464 return ivpu_mmu_cmdq_cmd_write(vdev, "CFGI_ALL", data0, data1);
467 static int ivpu_mmu_cmdq_write_tlbi_nh_asid(struct ivpu_device *vdev, u16 ssid)
472 return ivpu_mmu_cmdq_cmd_write(vdev, "TLBI_NH_ASID", val, 0);
475 static int ivpu_mmu_cmdq_write_tlbi_nsnh_all(struct ivpu_device *vdev)
479 return ivpu_mmu_cmdq_cmd_write(vdev, "TLBI_NSNH_ALL", val, 0);
482 static int ivpu_mmu_reset(struct ivpu_device *vdev)
484 struct ivpu_mmu_info *mmu = vdev->mmu;
497 ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, 0);
517 ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val);
521 ret = ivpu_mmu_cmdq_write_cfgi_all(vdev);
525 ret = ivpu_mmu_cmdq_write_tlbi_nsnh_all(vdev);
529 ret = ivpu_mmu_cmdq_sync(vdev);
538 ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val);
543 ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val);
547 ret = ivpu_mmu_irqs_setup(vdev);
552 return ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val);
555 static void ivpu_mmu_strtab_link_cd(struct ivpu_device *vdev, u32 sid)
557 struct ivpu_mmu_info *mmu = vdev->mmu;
585 ivpu_dbg(vdev, MMU, "STRTAB write entry (SSID=%u): 0x%llx, 0x%llx\n", sid, str[0], str[1]);
588 static int ivpu_mmu_strtab_init(struct ivpu_device *vdev)
590 ivpu_mmu_strtab_link_cd(vdev, IVPU_MMU_STREAM_ID0);
591 ivpu_mmu_strtab_link_cd(vdev, IVPU_MMU_STREAM_ID3);
596 int ivpu_mmu_invalidate_tlb(struct ivpu_device *vdev, u16 ssid)
598 struct ivpu_mmu_info *mmu = vdev->mmu;
605 ret = ivpu_mmu_cmdq_write_tlbi_nh_asid(vdev, ssid);
609 ret = ivpu_mmu_cmdq_sync(vdev);
615 static int ivpu_mmu_cd_add(struct ivpu_device *vdev, u32 ssid, u64 cd_dma)
617 struct ivpu_mmu_info *mmu = vdev->mmu;
659 ivpu_dbg(vdev, MMU, "CDTAB %s entry (SSID=%u, dma=%pad): 0x%llx, 0x%llx, 0x%llx, 0x%llx\n",
666 ret = ivpu_mmu_cmdq_write_cfgi_all(vdev);
670 ret = ivpu_mmu_cmdq_sync(vdev);
676 static int ivpu_mmu_cd_add_gbl(struct ivpu_device *vdev)
680 ret = ivpu_mmu_cd_add(vdev, 0, vdev->gctx.pgtable.pgd_dma);
682 ivpu_err(vdev, "Failed to add global CD entry: %d\n", ret);
687 static int ivpu_mmu_cd_add_user(struct ivpu_device *vdev, u32 ssid, dma_addr_t cd_dma)
692 ivpu_err(vdev, "Invalid SSID: %u\n", ssid);
696 ret = ivpu_mmu_cd_add(vdev, ssid, cd_dma);
698 ivpu_err(vdev, "Failed to add CD entry SSID=%u: %d\n", ssid, ret);
703 int ivpu_mmu_init(struct ivpu_device *vdev)
705 struct ivpu_mmu_info *mmu = vdev->mmu;
708 ivpu_dbg(vdev, MMU, "Init..\n");
710 drmm_mutex_init(&vdev->drm, &mmu->lock);
711 ivpu_mmu_config_check(vdev);
713 ret = ivpu_mmu_structs_alloc(vdev);
717 ret = ivpu_mmu_strtab_init(vdev);
719 ivpu_err(vdev, "Failed to initialize strtab: %d\n", ret);
723 ret = ivpu_mmu_cd_add_gbl(vdev);
725 ivpu_err(vdev, "Failed to initialize strtab: %d\n", ret);
729 ret = ivpu_mmu_enable(vdev);
731 ivpu_err(vdev, "Failed to resume MMU: %d\n", ret);
735 ivpu_dbg(vdev, MMU, "Init done\n");
740 int ivpu_mmu_enable(struct ivpu_device *vdev)
742 struct ivpu_mmu_info *mmu = vdev->mmu;
749 ret = ivpu_mmu_reset(vdev);
751 ivpu_err(vdev, "Failed to reset MMU: %d\n", ret);
755 ret = ivpu_mmu_cmdq_write_cfgi_all(vdev);
759 ret = ivpu_mmu_cmdq_write_tlbi_nsnh_all(vdev);
763 ret = ivpu_mmu_cmdq_sync(vdev);
776 void ivpu_mmu_disable(struct ivpu_device *vdev)
778 struct ivpu_mmu_info *mmu = vdev->mmu;
785 static void ivpu_mmu_dump_event(struct ivpu_device *vdev, u32 *event)
793 ivpu_err(vdev, "MMU EVTQ: 0x%x (%s) SSID: %d SID: %d, e[2] %08x, e[3] %08x, in addr: 0x%llx, fetch addr: 0x%llx\n",
797 static u32 *ivpu_mmu_get_event(struct ivpu_device *vdev)
799 struct ivpu_mmu_queue *evtq = &vdev->mmu->evtq;
813 void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev)
819 ivpu_dbg(vdev, IRQ, "MMU event queue\n");
821 while ((event = ivpu_mmu_get_event(vdev)) != NULL) {
822 ivpu_mmu_dump_event(vdev, event);
828 ivpu_mmu_user_context_mark_invalid(vdev, ssid);
832 ivpu_pm_schedule_recovery(vdev);
835 void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev)
839 ivpu_dbg(vdev, IRQ, "MMU error\n");
849 ivpu_warn_ratelimited(vdev, "MMU MSI ABT write aborted\n");
852 ivpu_warn_ratelimited(vdev, "MMU PRIQ MSI ABT write aborted\n");
855 ivpu_warn_ratelimited(vdev, "MMU EVTQ MSI ABT write aborted\n");
858 ivpu_warn_ratelimited(vdev, "MMU CMDQ MSI ABT write aborted\n");
861 ivpu_err_ratelimited(vdev, "MMU PRIQ write aborted\n");
864 ivpu_err_ratelimited(vdev, "MMU EVTQ write aborted\n");
867 ivpu_err_ratelimited(vdev, "MMU CMDQ write aborted\n");
872 int ivpu_mmu_set_pgtable(struct ivpu_device *vdev, int ssid, struct ivpu_mmu_pgtable *pgtable)
874 return ivpu_mmu_cd_add_user(vdev, ssid, pgtable->pgd_dma);
877 void ivpu_mmu_clear_pgtable(struct ivpu_device *vdev, int ssid)
879 ivpu_mmu_cd_add_user(vdev, ssid, 0); /* 0 will clear CD entry */