Lines Matching defs:iommu

15 #include <linux/dma-iommu.h>

20 #include <linux/iommu.h>
142 bool skip_read; /* rk3126/rk3128 can't read vop iommu registers */
143 bool dlr_disable; /* avoid access iommu when runtime ops called */
145 struct iommu_device iommu;
147 struct iommu_domain *domain; /* domain to which iommu is attached */
155 struct rk_iommu *iommu;
174 * The Rockchip rk3288 iommu uses a 2-level page table.
183 * Each iommu device has a MMU_DTE_ADDR register that contains the physical
391 static void rk_iommu_command(struct rk_iommu *iommu, u32 command)
395 for (i = 0; i < iommu->num_mmu; i++) {
396 writel(command, iommu->bases[i] + RK_MMU_COMMAND);
404 static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova_start, size_t size)
412 for (i = 0; i < iommu->num_mmu; i++) {
416 rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova);
421 static bool rk_iommu_is_stall_active(struct rk_iommu *iommu)
426 for (i = 0; i < iommu->num_mmu; i++) {
427 active &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) & RK_MMU_STATUS_STALL_ACTIVE);
433 static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu)
438 for (i = 0; i < iommu->num_mmu; i++) {
439 enable &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) & RK_MMU_STATUS_PAGING_ENABLED);
445 static bool rk_iommu_is_reset_done(struct rk_iommu *iommu)
450 for (i = 0; i < iommu->num_mmu; i++) {
451 done &= rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR) == 0;
457 static int rk_iommu_enable_stall(struct rk_iommu *iommu)
463 if (iommu->skip_read) {
467 if (rk_iommu_is_stall_active(iommu)) {
472 if (!rk_iommu_is_paging_enabled(iommu)) {
478 rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL);
479 if (iommu->skip_read) {
483 ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val, val, RK_MMU_POLL_PERIOD_US,
486 for (i = 0; i < iommu->num_mmu; i++) {
487 dev_err(iommu->dev, "Enable stall request timed out, retry_count = %d, status: %#08x\n", retry_count,
488 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
490 if (iommu->cmd_retry && (retry_count++ < CMD_RETRY_COUNT)) {
499 static int rk_iommu_disable_stall(struct rk_iommu *iommu)
505 if (iommu->skip_read) {
509 if (!rk_iommu_is_stall_active(iommu)) {
515 rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL);
516 if (iommu->skip_read) {
520 ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val, !val, RK_MMU_POLL_PERIOD_US,
523 for (i = 0; i < iommu->num_mmu; i++) {
524 dev_err(iommu->dev, "Disable stall request timed out, retry_count = %d, status: %#08x\n", retry_count,
525 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
527 if (iommu->cmd_retry && (retry_count++ < CMD_RETRY_COUNT)) {
536 static int rk_iommu_enable_paging(struct rk_iommu *iommu)
542 if (iommu->skip_read) {
546 if (rk_iommu_is_paging_enabled(iommu)) {
552 rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING);
553 if (iommu->skip_read) {
557 ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val, val, RK_MMU_POLL_PERIOD_US,
560 for (i = 0; i < iommu->num_mmu; i++) {
561 dev_err(iommu->dev, "Enable paging request timed out, retry_count = %d, status: %#08x\n", retry_count,
562 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
564 if (iommu->cmd_retry && (retry_count++ < CMD_RETRY_COUNT)) {
573 static int rk_iommu_disable_paging(struct rk_iommu *iommu)
579 if (iommu->skip_read) {
583 if (!rk_iommu_is_paging_enabled(iommu)) {
589 rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING);
590 if (iommu->skip_read) {
595 readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val, !val, RK_MMU_POLL_PERIOD_US,
598 for (i = 0; i < iommu->num_mmu; i++) {
599 dev_err(iommu->dev, "Disable paging request timed out, retry_count = %d, status: %#08x\n", retry_count,
600 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
602 if (iommu->cmd_retry && (retry_count++ < CMD_RETRY_COUNT)) {
611 static int rk_iommu_force_reset(struct rk_iommu *iommu)
618 if (iommu->reset_disabled) {
622 if (iommu->skip_read) {
634 for (i = 0; i < iommu->num_mmu; i++) {
635 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY);
637 if (iommu->version >= 0x2) {
642 dte_addr = rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR);
644 dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n");
650 rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET);
651 if (iommu->skip_read) {
655 ret = readx_poll_timeout(rk_iommu_is_reset_done, iommu, val, val, RK_MMU_FORCE_RESET_TIMEOUT_US,
658 dev_err(iommu->dev, "FORCE_RESET command timed out\n");
665 static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
667 void __iomem *base = iommu->bases[index];
685 if (iommu->version >= RK_IOMMU_VERSION_CMP) {
697 if (iommu->version >= RK_IOMMU_VERSION_CMP) {
709 if (iommu->version >= RK_IOMMU_VERSION_CMP) {
717 dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n", &iova, dte_index,
719 dev_err(iommu->dev, "mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n",
726 struct rk_iommu *iommu = dev_id;
734 err = pm_runtime_get_if_in_use(iommu->dev);
739 if (WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks))) {
743 for (i = 0; i < iommu->num_mmu; i++) {
744 int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS);
750 iova = rk_iommu_read(iommu->bases[i], RK_MMU_PAGE_FAULT_ADDR);
755 status = rk_iommu_read(iommu->bases[i], RK_MMU_STATUS);
758 dev_err(iommu->dev, "Page fault at %pad of type %s\n", &iova,
761 log_iova(iommu, i, iova);
768 if (iommu->domain) {
769 report_iommu_fault(iommu->domain, iommu->dev, iova, status);
771 dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n");
774 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
777 * Master may clear the int_mask to prevent iommu
781 int_mask = rk_iommu_read(iommu->bases[i], RK_MMU_INT_MASK);
783 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE);
788 dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova);
792 dev_err(iommu->dev, "unexpected int_status: %#08x\n", int_status);
795 rk_iommu_write(iommu->bases[i], RK_MMU_INT_CLEAR, int_status);
798 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
801 pm_runtime_put(iommu->dev);
872 struct rk_iommu *iommu;
875 iommu = list_entry(pos, struct rk_iommu, node);
878 ret = pm_runtime_get_if_in_use(iommu->dev);
883 WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
884 rk_iommu_zap_lines(iommu, iova, size);
885 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
886 pm_runtime_put(iommu->dev);
1254 struct rk_iommu *iommu;
1257 iommu = list_entry(pos, struct rk_iommu, node);
1259 ret = pm_runtime_get_if_in_use(iommu->dev);
1264 WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
1265 for (i = 0; i < iommu->num_mmu; i++) {
1266 rk_iommu_write(iommu->bases[i], RK_MMU_COMMAND, RK_MMU_CMD_ZAP_CACHE);
1268 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
1269 pm_runtime_put(iommu->dev);
1279 return data ? data->iommu : NULL;
1282 /* Must be called with iommu powered on and attached */
1283 static void rk_iommu_disable(struct rk_iommu *iommu)
1288 WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
1289 rk_iommu_enable_stall(iommu);
1290 rk_iommu_disable_paging(iommu);
1291 for (i = 0; i < iommu->num_mmu; i++) {
1292 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
1293 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0);
1295 rk_iommu_disable_stall(iommu);
1296 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
1301 struct rk_iommu *iommu;
1303 iommu = rk_iommu_from_dev(dev);
1304 if (!iommu) {
1308 rk_iommu_disable(iommu);
1314 /* Must be called with iommu powered on and attached */
1315 static int rk_iommu_enable(struct rk_iommu *iommu)
1317 struct iommu_domain *domain = iommu->domain;
1323 ret = clk_bulk_enable(iommu->num_clocks, iommu->clocks);
1328 ret = rk_iommu_enable_stall(iommu);
1333 ret = rk_iommu_force_reset(iommu);
1338 for (i = 0; i < iommu->num_mmu; i++) {
1339 if (iommu->version >= 0x2) {
1341 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dt_v2);
1343 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, rk_domain->dt_dma);
1345 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
1346 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
1348 /* Workaround for iommu blocked, BIT(31) default to 1 */
1349 auto_gate = rk_iommu_read(iommu->bases[i], RK_MMU_AUTO_GATING);
1351 rk_iommu_write(iommu->bases[i], RK_MMU_AUTO_GATING, auto_gate);
1354 ret = rk_iommu_enable_paging(iommu);
1357 rk_iommu_disable_stall(iommu);
1359 clk_bulk_disable(iommu->num_clocks, iommu->clocks);
1365 struct rk_iommu *iommu;
1367 iommu = rk_iommu_from_dev(dev);
1368 if (!iommu) {
1372 return rk_iommu_enable(iommu);
1378 struct rk_iommu *iommu;
1384 iommu = rk_iommu_from_dev(dev);
1385 if (!iommu) {
1389 dev_dbg(dev, "Detaching from iommu domain\n");
1391 if (!iommu->domain) {
1395 iommu->domain = NULL;
1398 list_del_init(&iommu->node);
1401 ret = pm_runtime_get_if_in_use(iommu->dev);
1404 rk_iommu_disable(iommu);
1405 pm_runtime_put(iommu->dev);
1411 struct rk_iommu *iommu;
1418 * Such a device does not belong to an iommu group.
1420 iommu = rk_iommu_from_dev(dev);
1421 if (!iommu) {
1425 dev_dbg(dev, "Attaching to iommu domain\n");
1427 if (iommu->domain) {
1428 rk_iommu_detach_device(iommu->domain, dev);
1431 iommu->domain = domain;
1433 /* Attach NULL for disable iommu */
1439 list_add_tail(&iommu->node, &rk_domain->iommus);
1442 rk_domain->shootdown_entire = iommu->shootdown_entire;
1443 ret = pm_runtime_get_if_in_use(iommu->dev);
1448 ret = rk_iommu_enable(iommu);
1450 rk_iommu_detach_device(iommu->domain, dev);
1453 pm_runtime_put(iommu->dev);
1576 struct rk_iommu *iommu;
1583 iommu = rk_iommu_from_dev(dev);
1585 data->link = device_link_add(dev, iommu->dev, DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME);
1599 return &iommu->iommu;
1611 struct rk_iommu *iommu;
1613 iommu = rk_iommu_from_dev(dev);
1615 return iommu_group_ref_get(iommu->group);
1637 data->iommu = platform_get_drvdata(iommu_dev);
1652 struct rk_iommu *iommu = rk_iommu_from_dev(dev);
1655 if (!iommu) {
1659 for (i = 0; i < iommu->num_mmu; i++) {
1660 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
1667 struct rk_iommu *iommu = rk_iommu_from_dev(dev);
1670 if (!iommu) {
1674 for (i = 0; i < iommu->num_mmu; i++) {
1676 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
1677 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
1678 /* Leave iommu in pagefault state until mapping finished */
1679 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE);
1728 .compatible = "rockchip,iommu",
1732 .compatible = "rockchip,iommu-v2",
1741 struct rk_iommu *iommu;
1748 iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
1749 if (!iommu) {
1759 iommu->version = data->version;
1760 dev_info(dev, "version = %x\n", iommu->version);
1762 platform_set_drvdata(pdev, iommu);
1763 iommu->dev = dev;
1764 iommu->num_mmu = 0;
1766 iommu->bases = devm_kcalloc(dev, num_res, sizeof(*iommu->bases), GFP_KERNEL);
1767 if (!iommu->bases) {
1776 iommu->bases[i] = devm_ioremap_resource(&pdev->dev, res);
1777 if (IS_ERR(iommu->bases[i])) {
1780 iommu->num_mmu++;
1782 if (iommu->num_mmu == 0) {
1783 return PTR_ERR(iommu->bases[0]);
1786 iommu->num_irq = platform_irq_count(pdev);
1787 if (iommu->num_irq < 0) {
1788 return iommu->num_irq;
1791 iommu->reset_disabled = device_property_read_bool(dev, "rockchip,disable-mmu-reset");
1792 iommu->skip_read = device_property_read_bool(dev, "rockchip,skip-mmu-read");
1793 iommu->dlr_disable = device_property_read_bool(dev, "rockchip,disable-device-link-resume");
1794 iommu->shootdown_entire = device_property_read_bool(dev, "rockchip,shootdown-entire");
1797 iommu->cmd_retry = device_property_read_bool(dev, "rockchip,enable-cmd-retry");
1801 * iommu clocks should be present for all new devices and devicetrees
1805 err = devm_clk_bulk_get_all(dev, &iommu->clocks);
1807 iommu->num_clocks = 0;
1811 iommu->num_clocks = err;
1814 err = clk_bulk_prepare(iommu->num_clocks, iommu->clocks);
1819 iommu->group = iommu_group_alloc();
1820 if (IS_ERR(iommu->group)) {
1821 err = PTR_ERR(iommu->group);
1825 err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev));
1830 if (iommu->version >= 0x2) {
1831 iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops_v2);
1833 iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops);
1835 iommu_device_set_fwnode(&iommu->iommu, &dev->of_node->fwnode);
1837 err = iommu_device_register(&iommu->iommu);
1851 if (iommu->version >= 0x2) {
1859 if (iommu->skip_read) {
1863 for (i = 0; i < iommu->num_irq; i++) {
1869 err = devm_request_irq(iommu->dev, irq, rk_iommu_irq, IRQF_SHARED, dev_name(dev), iommu);
1879 iommu_device_sysfs_remove(&iommu->iommu);
1881 iommu_group_put(iommu->group);
1883 clk_bulk_unprepare(iommu->num_clocks, iommu->clocks);
1889 struct rk_iommu *iommu = platform_get_drvdata(pdev);
1892 for (i = 0; i < iommu->num_irq; i++) {
1895 devm_free_irq(iommu->dev, irq, iommu);
1903 struct rk_iommu *iommu = dev_get_drvdata(dev);
1905 if (!iommu->domain) {
1909 if (iommu->dlr_disable) {
1913 rk_iommu_disable(iommu);
1919 struct rk_iommu *iommu = dev_get_drvdata(dev);
1921 if (!iommu->domain) {
1925 if (iommu->dlr_disable) {
1929 return rk_iommu_enable(iommu);
1956 MODULE_ALIAS("platform:rockchip-iommu");