Lines Matching refs:iommu
17 #include <linux/iommu.h>
113 struct iommu_device iommu;
115 struct iommu_domain *domain; /* domain to which iommu is attached */
121 struct rk_iommu *iommu;
142 * The Rockchip rk3288 iommu uses a 2-level page table.
151 * Each iommu device has a MMU_DTE_ADDR register that contains the physical
345 static void rk_iommu_command(struct rk_iommu *iommu, u32 command)
349 for (i = 0; i < iommu->num_mmu; i++)
350 writel(command, iommu->bases[i] + RK_MMU_COMMAND);
357 static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova_start,
366 for (i = 0; i < iommu->num_mmu; i++) {
370 rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova);
374 static bool rk_iommu_is_stall_active(struct rk_iommu *iommu)
379 for (i = 0; i < iommu->num_mmu; i++)
380 active &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
386 static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu)
391 for (i = 0; i < iommu->num_mmu; i++)
392 enable &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
398 static bool rk_iommu_is_reset_done(struct rk_iommu *iommu)
403 for (i = 0; i < iommu->num_mmu; i++)
404 done &= rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR) == 0;
409 static int rk_iommu_enable_stall(struct rk_iommu *iommu)
414 if (rk_iommu_is_stall_active(iommu))
418 if (!rk_iommu_is_paging_enabled(iommu))
421 rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL);
423 ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val,
427 for (i = 0; i < iommu->num_mmu; i++)
428 dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n",
429 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
434 static int rk_iommu_disable_stall(struct rk_iommu *iommu)
439 if (!rk_iommu_is_stall_active(iommu))
442 rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL);
444 ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val,
448 for (i = 0; i < iommu->num_mmu; i++)
449 dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n",
450 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
455 static int rk_iommu_enable_paging(struct rk_iommu *iommu)
460 if (rk_iommu_is_paging_enabled(iommu))
463 rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING);
465 ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val,
469 for (i = 0; i < iommu->num_mmu; i++)
470 dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n",
471 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
476 static int rk_iommu_disable_paging(struct rk_iommu *iommu)
481 if (!rk_iommu_is_paging_enabled(iommu))
484 rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING);
486 ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val,
490 for (i = 0; i < iommu->num_mmu; i++)
491 dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n",
492 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
497 static int rk_iommu_force_reset(struct rk_iommu *iommu)
503 if (iommu->reset_disabled)
510 for (i = 0; i < iommu->num_mmu; i++) {
512 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dte_addr);
514 if (dte_addr != rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR)) {
515 dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n");
520 rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET);
522 ret = readx_poll_timeout(rk_iommu_is_reset_done, iommu, val,
526 dev_err(iommu->dev, "FORCE_RESET command timed out\n");
533 static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
535 void __iomem *base = iommu->bases[index];
572 dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n",
574 dev_err(iommu->dev, "mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n",
582 struct rk_iommu *iommu = dev_id;
589 err = pm_runtime_get_if_in_use(iommu->dev);
593 if (WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)))
596 for (i = 0; i < iommu->num_mmu; i++) {
597 int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS);
602 iova = rk_iommu_read(iommu->bases[i], RK_MMU_PAGE_FAULT_ADDR);
607 status = rk_iommu_read(iommu->bases[i], RK_MMU_STATUS);
611 dev_err(iommu->dev, "Page fault at %pad of type %s\n",
615 log_iova(iommu, i, iova);
622 if (iommu->domain != &rk_identity_domain)
623 report_iommu_fault(iommu->domain, iommu->dev, iova,
626 dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n");
628 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
629 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE);
633 dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova);
636 dev_err(iommu->dev, "unexpected int_status: %#08x\n",
639 rk_iommu_write(iommu->bases[i], RK_MMU_INT_CLEAR, int_status);
642 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
645 pm_runtime_put(iommu->dev);
686 struct rk_iommu *iommu;
689 iommu = list_entry(pos, struct rk_iommu, node);
692 ret = pm_runtime_get_if_in_use(iommu->dev);
696 WARN_ON(clk_bulk_enable(iommu->num_clocks,
697 iommu->clocks));
698 rk_iommu_zap_lines(iommu, iova, size);
699 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
700 pm_runtime_put(iommu->dev);
901 return data ? data->iommu : NULL;
904 /* Must be called with iommu powered on and attached */
905 static void rk_iommu_disable(struct rk_iommu *iommu)
910 WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
911 rk_iommu_enable_stall(iommu);
912 rk_iommu_disable_paging(iommu);
913 for (i = 0; i < iommu->num_mmu; i++) {
914 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
915 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0);
917 rk_iommu_disable_stall(iommu);
918 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
921 /* Must be called with iommu powered on and attached */
922 static int rk_iommu_enable(struct rk_iommu *iommu)
924 struct iommu_domain *domain = iommu->domain;
928 ret = clk_bulk_enable(iommu->num_clocks, iommu->clocks);
932 ret = rk_iommu_enable_stall(iommu);
936 ret = rk_iommu_force_reset(iommu);
940 for (i = 0; i < iommu->num_mmu; i++) {
941 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
943 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
944 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
947 ret = rk_iommu_enable_paging(iommu);
950 rk_iommu_disable_stall(iommu);
952 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
959 struct rk_iommu *iommu;
965 iommu = rk_iommu_from_dev(dev);
966 if (!iommu)
969 rk_domain = to_rk_domain(iommu->domain);
971 dev_dbg(dev, "Detaching from iommu domain\n");
973 if (iommu->domain == identity_domain)
976 iommu->domain = identity_domain;
979 list_del_init(&iommu->node);
982 ret = pm_runtime_get_if_in_use(iommu->dev);
985 rk_iommu_disable(iommu);
986 pm_runtime_put(iommu->dev);
1016 struct rk_iommu *iommu;
1023 * Such a device does not belong to an iommu group.
1025 iommu = rk_iommu_from_dev(dev);
1026 if (!iommu)
1029 dev_dbg(dev, "Attaching to iommu domain\n");
1031 /* iommu already attached */
1032 if (iommu->domain == domain)
1039 iommu->domain = domain;
1042 list_add_tail(&iommu->node, &rk_domain->iommus);
1045 ret = pm_runtime_get_if_in_use(iommu->dev);
1049 ret = rk_iommu_enable(iommu);
1053 pm_runtime_put(iommu->dev);
1137 struct rk_iommu *iommu;
1143 iommu = rk_iommu_from_dev(dev);
1145 data->link = device_link_add(dev, iommu->dev,
1148 return &iommu->iommu;
1160 struct rk_iommu *iommu;
1162 iommu = rk_iommu_from_dev(dev);
1164 return iommu_group_ref_get(iommu->group);
1179 data->iommu = platform_get_drvdata(iommu_dev);
1180 data->iommu->domain = &rk_identity_domain;
1210 struct rk_iommu *iommu;
1216 iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
1217 if (!iommu)
1220 platform_set_drvdata(pdev, iommu);
1221 iommu->dev = dev;
1222 iommu->num_mmu = 0;
1235 iommu->bases = devm_kcalloc(dev, num_res, sizeof(*iommu->bases),
1237 if (!iommu->bases)
1244 iommu->bases[i] = devm_ioremap_resource(&pdev->dev, res);
1245 if (IS_ERR(iommu->bases[i]))
1247 iommu->num_mmu++;
1249 if (iommu->num_mmu == 0)
1250 return PTR_ERR(iommu->bases[0]);
1252 iommu->num_irq = platform_irq_count(pdev);
1253 if (iommu->num_irq < 0)
1254 return iommu->num_irq;
1256 iommu->reset_disabled = device_property_read_bool(dev,
1259 iommu->num_clocks = ARRAY_SIZE(rk_iommu_clocks);
1260 iommu->clocks = devm_kcalloc(iommu->dev, iommu->num_clocks,
1261 sizeof(*iommu->clocks), GFP_KERNEL);
1262 if (!iommu->clocks)
1265 for (i = 0; i < iommu->num_clocks; ++i)
1266 iommu->clocks[i].id = rk_iommu_clocks[i];
1269 * iommu clocks should be present for all new devices and devicetrees
1273 err = devm_clk_bulk_get(iommu->dev, iommu->num_clocks, iommu->clocks);
1275 iommu->num_clocks = 0;
1279 err = clk_bulk_prepare(iommu->num_clocks, iommu->clocks);
1283 iommu->group = iommu_group_alloc();
1284 if (IS_ERR(iommu->group)) {
1285 err = PTR_ERR(iommu->group);
1289 err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev));
1293 err = iommu_device_register(&iommu->iommu, &rk_iommu_ops, dev);
1307 for (i = 0; i < iommu->num_irq; i++) {
1315 err = devm_request_irq(iommu->dev, irq, rk_iommu_irq,
1316 IRQF_SHARED, dev_name(dev), iommu);
1327 iommu_device_sysfs_remove(&iommu->iommu);
1329 iommu_group_put(iommu->group);
1331 clk_bulk_unprepare(iommu->num_clocks, iommu->clocks);
1337 struct rk_iommu *iommu = platform_get_drvdata(pdev);
1340 for (i = 0; i < iommu->num_irq; i++) {
1343 devm_free_irq(iommu->dev, irq, iommu);
1351 struct rk_iommu *iommu = dev_get_drvdata(dev);
1353 if (iommu->domain == &rk_identity_domain)
1356 rk_iommu_disable(iommu);
1362 struct rk_iommu *iommu = dev_get_drvdata(dev);
1364 if (iommu->domain == &rk_identity_domain)
1367 return rk_iommu_enable(iommu);
1393 { .compatible = "rockchip,iommu",
1396 { .compatible = "rockchip,rk3568-iommu",