Lines Matching refs:iommu
13 #include <linux/dma-iommu.h>
18 #include <linux/iommu.h>
107 struct iommu_device iommu;
109 struct iommu_domain *domain; /* domain to which iommu is attached */
115 struct rk_iommu *iommu;
134 * The Rockchip rk3288 iommu uses a 2-level page table.
143 * Each iommu device has a MMU_DTE_ADDR register that contains the physical
285 static void rk_iommu_command(struct rk_iommu *iommu, u32 command)
289 for (i = 0; i < iommu->num_mmu; i++)
290 writel(command, iommu->bases[i] + RK_MMU_COMMAND);
297 static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova_start,
306 for (i = 0; i < iommu->num_mmu; i++) {
310 rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova);
314 static bool rk_iommu_is_stall_active(struct rk_iommu *iommu)
319 for (i = 0; i < iommu->num_mmu; i++)
320 active &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
326 static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu)
331 for (i = 0; i < iommu->num_mmu; i++)
332 enable &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
338 static bool rk_iommu_is_reset_done(struct rk_iommu *iommu)
343 for (i = 0; i < iommu->num_mmu; i++)
344 done &= rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR) == 0;
349 static int rk_iommu_enable_stall(struct rk_iommu *iommu)
354 if (rk_iommu_is_stall_active(iommu))
358 if (!rk_iommu_is_paging_enabled(iommu))
361 rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL);
363 ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val,
367 for (i = 0; i < iommu->num_mmu; i++)
368 dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n",
369 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
374 static int rk_iommu_disable_stall(struct rk_iommu *iommu)
379 if (!rk_iommu_is_stall_active(iommu))
382 rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL);
384 ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val,
388 for (i = 0; i < iommu->num_mmu; i++)
389 dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n",
390 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
395 static int rk_iommu_enable_paging(struct rk_iommu *iommu)
400 if (rk_iommu_is_paging_enabled(iommu))
403 rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING);
405 ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val,
409 for (i = 0; i < iommu->num_mmu; i++)
410 dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n",
411 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
416 static int rk_iommu_disable_paging(struct rk_iommu *iommu)
421 if (!rk_iommu_is_paging_enabled(iommu))
424 rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING);
426 ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val,
430 for (i = 0; i < iommu->num_mmu; i++)
431 dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n",
432 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
437 static int rk_iommu_force_reset(struct rk_iommu *iommu)
443 if (iommu->reset_disabled)
450 for (i = 0; i < iommu->num_mmu; i++) {
451 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY);
453 dte_addr = rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR);
455 dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n");
460 rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET);
462 ret = readx_poll_timeout(rk_iommu_is_reset_done, iommu, val,
466 dev_err(iommu->dev, "FORCE_RESET command timed out\n");
473 static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
475 void __iomem *base = iommu->bases[index];
512 dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n",
514 dev_err(iommu->dev, "mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n",
522 struct rk_iommu *iommu = dev_id;
529 err = pm_runtime_get_if_in_use(iommu->dev);
533 if (WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)))
536 for (i = 0; i < iommu->num_mmu; i++) {
537 int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS);
542 iova = rk_iommu_read(iommu->bases[i], RK_MMU_PAGE_FAULT_ADDR);
547 status = rk_iommu_read(iommu->bases[i], RK_MMU_STATUS);
551 dev_err(iommu->dev, "Page fault at %pad of type %s\n",
555 log_iova(iommu, i, iova);
562 if (iommu->domain)
563 report_iommu_fault(iommu->domain, iommu->dev, iova,
566 dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n");
568 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
569 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE);
573 dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova);
576 dev_err(iommu->dev, "unexpected int_status: %#08x\n",
579 rk_iommu_write(iommu->bases[i], RK_MMU_INT_CLEAR, int_status);
582 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
585 pm_runtime_put(iommu->dev);
626 struct rk_iommu *iommu;
629 iommu = list_entry(pos, struct rk_iommu, node);
632 ret = pm_runtime_get_if_in_use(iommu->dev);
636 WARN_ON(clk_bulk_enable(iommu->num_clocks,
637 iommu->clocks));
638 rk_iommu_zap_lines(iommu, iova, size);
639 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
640 pm_runtime_put(iommu->dev);
841 return data ? data->iommu : NULL;
844 /* Must be called with iommu powered on and attached */
845 static void rk_iommu_disable(struct rk_iommu *iommu)
850 WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
851 rk_iommu_enable_stall(iommu);
852 rk_iommu_disable_paging(iommu);
853 for (i = 0; i < iommu->num_mmu; i++) {
854 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
855 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0);
857 rk_iommu_disable_stall(iommu);
858 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
861 /* Must be called with iommu powered on and attached */
862 static int rk_iommu_enable(struct rk_iommu *iommu)
864 struct iommu_domain *domain = iommu->domain;
868 ret = clk_bulk_enable(iommu->num_clocks, iommu->clocks);
872 ret = rk_iommu_enable_stall(iommu);
876 ret = rk_iommu_force_reset(iommu);
880 for (i = 0; i < iommu->num_mmu; i++) {
881 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
883 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
884 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
887 ret = rk_iommu_enable_paging(iommu);
890 rk_iommu_disable_stall(iommu);
892 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
899 struct rk_iommu *iommu;
905 iommu = rk_iommu_from_dev(dev);
906 if (!iommu)
909 dev_dbg(dev, "Detaching from iommu domain\n");
911 /* iommu already detached */
912 if (iommu->domain != domain)
915 iommu->domain = NULL;
918 list_del_init(&iommu->node);
921 ret = pm_runtime_get_if_in_use(iommu->dev);
924 rk_iommu_disable(iommu);
925 pm_runtime_put(iommu->dev);
932 struct rk_iommu *iommu;
939 * Such a device does not belong to an iommu group.
941 iommu = rk_iommu_from_dev(dev);
942 if (!iommu)
945 dev_dbg(dev, "Attaching to iommu domain\n");
947 /* iommu already attached */
948 if (iommu->domain == domain)
951 if (iommu->domain)
952 rk_iommu_detach_device(iommu->domain, dev);
954 iommu->domain = domain;
957 list_add_tail(&iommu->node, &rk_domain->iommus);
960 ret = pm_runtime_get_if_in_use(iommu->dev);
964 ret = rk_iommu_enable(iommu);
966 rk_iommu_detach_device(iommu->domain, dev);
968 pm_runtime_put(iommu->dev);
1060 struct rk_iommu *iommu;
1066 iommu = rk_iommu_from_dev(dev);
1068 data->link = device_link_add(dev, iommu->dev,
1071 return &iommu->iommu;
1083 struct rk_iommu *iommu;
1085 iommu = rk_iommu_from_dev(dev);
1087 return iommu_group_ref_get(iommu->group);
1102 data->iommu = platform_get_drvdata(iommu_dev);
1128 struct rk_iommu *iommu;
1133 iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
1134 if (!iommu)
1137 platform_set_drvdata(pdev, iommu);
1138 iommu->dev = dev;
1139 iommu->num_mmu = 0;
1141 iommu->bases = devm_kcalloc(dev, num_res, sizeof(*iommu->bases),
1143 if (!iommu->bases)
1150 iommu->bases[i] = devm_ioremap_resource(&pdev->dev, res);
1151 if (IS_ERR(iommu->bases[i]))
1153 iommu->num_mmu++;
1155 if (iommu->num_mmu == 0)
1156 return PTR_ERR(iommu->bases[0]);
1158 iommu->num_irq = platform_irq_count(pdev);
1159 if (iommu->num_irq < 0)
1160 return iommu->num_irq;
1162 iommu->reset_disabled = device_property_read_bool(dev,
1165 iommu->num_clocks = ARRAY_SIZE(rk_iommu_clocks);
1166 iommu->clocks = devm_kcalloc(iommu->dev, iommu->num_clocks,
1167 sizeof(*iommu->clocks), GFP_KERNEL);
1168 if (!iommu->clocks)
1171 for (i = 0; i < iommu->num_clocks; ++i)
1172 iommu->clocks[i].id = rk_iommu_clocks[i];
1175 * iommu clocks should be present for all new devices and devicetrees
1179 err = devm_clk_bulk_get(iommu->dev, iommu->num_clocks, iommu->clocks);
1181 iommu->num_clocks = 0;
1185 err = clk_bulk_prepare(iommu->num_clocks, iommu->clocks);
1189 iommu->group = iommu_group_alloc();
1190 if (IS_ERR(iommu->group)) {
1191 err = PTR_ERR(iommu->group);
1195 err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev));
1199 iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops);
1200 iommu_device_set_fwnode(&iommu->iommu, &dev->of_node->fwnode);
1202 err = iommu_device_register(&iommu->iommu);
1218 for (i = 0; i < iommu->num_irq; i++) {
1226 err = devm_request_irq(iommu->dev, irq, rk_iommu_irq,
1227 IRQF_SHARED, dev_name(dev), iommu);
1236 iommu_device_sysfs_remove(&iommu->iommu);
1238 iommu_group_put(iommu->group);
1240 clk_bulk_unprepare(iommu->num_clocks, iommu->clocks);
1246 struct rk_iommu *iommu = platform_get_drvdata(pdev);
1249 for (i = 0; i < iommu->num_irq; i++) {
1252 devm_free_irq(iommu->dev, irq, iommu);
1260 struct rk_iommu *iommu = dev_get_drvdata(dev);
1262 if (!iommu->domain)
1265 rk_iommu_disable(iommu);
1271 struct rk_iommu *iommu = dev_get_drvdata(dev);
1273 if (!iommu->domain)
1276 return rk_iommu_enable(iommu);
1286 { .compatible = "rockchip,iommu" },