Lines Matching defs:bases
136 void __iomem **bases;
396 writel(command, iommu->bases[i] + RK_MMU_COMMAND);
416 rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova);
427 active &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) & RK_MMU_STATUS_STALL_ACTIVE);
439 enable &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) & RK_MMU_STATUS_PAGING_ENABLED);
451 done &= rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR) == 0;
488 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
525 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
562 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
600 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
635 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY);
642 dte_addr = rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR);
667 void __iomem *base = iommu->bases[index];
744 int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS);
750 iova = rk_iommu_read(iommu->bases[i], RK_MMU_PAGE_FAULT_ADDR);
755 status = rk_iommu_read(iommu->bases[i], RK_MMU_STATUS);
774 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
781 int_mask = rk_iommu_read(iommu->bases[i], RK_MMU_INT_MASK);
783 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE);
795 rk_iommu_write(iommu->bases[i], RK_MMU_INT_CLEAR, int_status);
1266 rk_iommu_write(iommu->bases[i], RK_MMU_COMMAND, RK_MMU_CMD_ZAP_CACHE);
1292 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
1293 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0);
1341 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dt_v2);
1343 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, rk_domain->dt_dma);
1345 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
1346 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
1349 auto_gate = rk_iommu_read(iommu->bases[i], RK_MMU_AUTO_GATING);
1351 rk_iommu_write(iommu->bases[i], RK_MMU_AUTO_GATING, auto_gate);
1660 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
1676 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
1677 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
1679 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE);
1766 iommu->bases = devm_kcalloc(dev, num_res, sizeof(*iommu->bases), GFP_KERNEL);
1767 if (!iommu->bases) {
1776 iommu->bases[i] = devm_ioremap_resource(&pdev->dev, res);
1777 if (IS_ERR(iommu->bases[i])) {
1783 return PTR_ERR(iommu->bases[0]);