Lines Matching refs:iova
350 * rk3288 iova (IOMMU Virtual Address) format
366 static u32 rk_iova_dte_index(dma_addr_t iova)
368 return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT;
371 static u32 rk_iova_pte_index(dma_addr_t iova)
373 return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT;
376 static u32 rk_iova_page_offset(dma_addr_t iova)
378 return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT;
413 dma_addr_t iova;
415 for (iova = iova_start; iova < iova_end; iova += SPAGE_SIZE) {
416 rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova);
665 static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
679 dte_index = rk_iova_dte_index(iova);
680 pte_index = rk_iova_pte_index(iova);
681 page_offset = rk_iova_page_offset(iova);
717 dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n", &iova, dte_index,
730 dma_addr_t iova;
750 iova = rk_iommu_read(iommu->bases[i], RK_MMU_PAGE_FAULT_ADDR);
758 dev_err(iommu->dev, "Page fault at %pad of type %s\n", &iova,
761 log_iova(iommu, i, iova);
769 report_iommu_fault(iommu->domain, iommu->dev, iova, status);
788 dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova);
805 static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
815 dte = rk_domain->dt[rk_iova_dte_index(iova)];
822 pte = page_table[rk_iova_pte_index(iova)];
827 phys = rk_pte_page_address(pte) + rk_iova_page_offset(iova);
834 static phys_addr_t rk_iommu_iova_to_phys_v2(struct iommu_domain *domain, dma_addr_t iova)
844 dte = rk_domain->dt[rk_iova_dte_index(iova)];
851 pte = page_table[rk_iova_pte_index(iova)];
856 phys = rk_pte_page_address_v2(pte) + rk_iova_page_offset(iova);
863 static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain, dma_addr_t iova, size_t size)
868 /* shootdown these iova from all iommus using this domain */
884 rk_iommu_zap_lines(iommu, iova, size);
892 static void rk_iommu_zap_iova_first_last(struct rk_iommu_domain *rk_domain, dma_addr_t iova, size_t size)
894 rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE);
896 rk_iommu_zap_iova(rk_domain, iova + size - SPAGE_SIZE, SPAGE_SIZE);
900 static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain, dma_addr_t iova)
909 dte_index = rk_iova_dte_index(iova);
938 static u32 *rk_dte_get_page_table_v2(struct rk_iommu_domain *rk_domain, dma_addr_t iova)
947 dte_index = rk_iova_dte_index(iova);
997 static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr, dma_addr_t pte_dma, dma_addr_t iova,
1021 * Zap the first and last iova to evict from iotlb any previously
1023 * We only zap the first and last iova, since only they could have
1028 rk_iommu_zap_iova_first_last(rk_domain, iova, size);
1036 iova += pte_count * SPAGE_SIZE;
1038 pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n", &iova, &page_phys, &paddr, prot);
1043 static int rk_iommu_map_iova_v2(struct rk_iommu_domain *rk_domain, u32 *pte_addr, dma_addr_t pte_dma, dma_addr_t iova,
1067 * Zap the first and last iova to evict from iotlb any previously
1069 * We only zap the first and last iova, since only they could have
1074 rk_iommu_zap_iova_first_last(rk_domain, iova, size);
1082 iova += pte_count * SPAGE_SIZE;
1084 pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n", &iova, &page_phys, &paddr, prot);
1094 dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
1102 * pgsize_bitmap specifies iova sizes that fit in one page table
1105 * Since iommu_map() guarantees that both iova and size will be
1108 page_table = rk_dte_get_page_table(rk_domain, iova);
1114 dte = rk_domain->dt[rk_iova_dte_index(iova)];
1115 pte_index = rk_iova_pte_index(iova);
1118 ret = rk_iommu_map_iova(rk_domain, pte_addr, pte_dma, iova, paddr, size, prot);
1130 dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
1138 * pgsize_bitmap specifies iova sizes that fit in one page table
1141 * Since iommu_map() guarantees that both iova and size will be
1144 page_table = rk_dte_get_page_table_v2(rk_domain, iova);
1150 dte = rk_domain->dt[rk_iova_dte_index(iova)];
1151 pte_index = rk_iova_pte_index(iova);
1154 ret = rk_iommu_map_iova_v2(rk_domain, pte_addr, pte_dma, iova, paddr, size, prot);
1166 dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
1175 * pgsize_bitmap specifies iova sizes that fit in one page table
1178 * Since iommu_unmap() guarantees that both iova and size will be
1181 dte = rk_domain->dt[rk_iova_dte_index(iova)];
1182 /* Just return 0 if iova is unmapped */
1189 pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova);
1190 pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32);
1195 /* Shootdown iotlb entries for iova range that was just unmapped */
1196 rk_iommu_zap_iova(rk_domain, iova, unmap_size);
1206 dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
1215 * pgsize_bitmap specifies iova sizes that fit in one page table
1218 * Since iommu_unmap() guarantees that both iova and size will be
1221 dte = rk_domain->dt[rk_iova_dte_index(iova)];
1222 /* Just return 0 if iova is unmapped */
1229 pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova);
1230 pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32);
1235 /* Shootdown iotlb entries for iova range that was just unmapped */
1238 rk_iommu_zap_iova(rk_domain, iova, unmap_size);