Lines Matching refs:iova

244  * rk3288 iova (IOMMU Virtual Address) format
260 static u32 rk_iova_dte_index(dma_addr_t iova)
262 return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT;
265 static u32 rk_iova_pte_index(dma_addr_t iova)
267 return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT;
270 static u32 rk_iova_page_offset(dma_addr_t iova)
272 return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT;
307 dma_addr_t iova;
309 for (iova = iova_start; iova < iova_end; iova += SPAGE_SIZE)
310 rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova);
473 static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
487 dte_index = rk_iova_dte_index(iova);
488 pte_index = rk_iova_pte_index(iova);
489 page_offset = rk_iova_page_offset(iova);
512 dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n",
513 &iova, dte_index, pte_index, page_offset);
525 dma_addr_t iova;
542 iova = rk_iommu_read(iommu->bases[i], RK_MMU_PAGE_FAULT_ADDR);
552 &iova,
555 log_iova(iommu, i, iova);
563 report_iommu_fault(iommu->domain, iommu->dev, iova,
573 dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova);
590 dma_addr_t iova)
600 dte = rk_domain->dt[rk_iova_dte_index(iova)];
606 pte = page_table[rk_iova_pte_index(iova)];
610 phys = rk_pte_page_address(pte) + rk_iova_page_offset(iova);
618 dma_addr_t iova, size_t size)
623 /* shootdown these iova from all iommus using this domain */
638 rk_iommu_zap_lines(iommu, iova, size);
647 dma_addr_t iova, size_t size)
649 rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE);
651 rk_iommu_zap_iova(rk_domain, iova + size - SPAGE_SIZE,
656 dma_addr_t iova)
665 dte_index = rk_iova_dte_index(iova);
716 dma_addr_t pte_dma, dma_addr_t iova,
739 * Zap the first and last iova to evict from iotlb any previously
741 * We only zap the first and last iova, since only they could have
744 rk_iommu_zap_iova_first_last(rk_domain, iova, size);
752 iova += pte_count * SPAGE_SIZE;
754 pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n",
755 &iova, &page_phys, &paddr, prot);
765 dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
773 * pgsize_bitmap specifies iova sizes that fit in one page table
776 * Since iommu_map() guarantees that both iova and size will be
779 page_table = rk_dte_get_page_table(rk_domain, iova);
785 dte_index = rk_domain->dt[rk_iova_dte_index(iova)];
786 pte_index = rk_iova_pte_index(iova);
789 ret = rk_iommu_map_iova(rk_domain, pte_addr, pte_dma, iova,
802 dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
811 * pgsize_bitmap specifies iova sizes that fit in one page table
814 * Since iommu_unmap() guarantees that both iova and size will be
817 dte = rk_domain->dt[rk_iova_dte_index(iova)];
818 /* Just return 0 if iova is unmapped */
825 pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova);
826 pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32);
831 /* Shootdown iotlb entries for iova range that was just unmapped */
832 rk_iommu_zap_iova(rk_domain, iova, unmap_size);