Lines Matching refs:smmu
18 #define pr_fmt(fmt) "arm-smmu: " fmt
44 #include "arm-smmu.h"
74 static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu)
76 if (pm_runtime_enabled(smmu->dev))
77 return pm_runtime_resume_and_get(smmu->dev);
82 static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu)
84 if (pm_runtime_enabled(smmu->dev))
85 pm_runtime_put_autosuspend(smmu->dev);
135 struct arm_smmu_device **smmu)
178 *smmu = dev_get_drvdata(smmu_dev);
200 struct arm_smmu_device **smmu)
212 static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
218 if (smmu->impl && unlikely(smmu->impl->tlb_sync))
219 return smmu->impl->tlb_sync(smmu, page, sync, status);
221 arm_smmu_writel(smmu, page, sync, QCOM_DUMMY_VAL);
224 reg = arm_smmu_readl(smmu, page, status);
231 dev_err_ratelimited(smmu->dev,
235 static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
239 spin_lock_irqsave(&smmu->global_sync_lock, flags);
240 __arm_smmu_tlb_sync(smmu, ARM_SMMU_GR0, ARM_SMMU_GR0_sTLBGSYNC,
242 spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
247 struct arm_smmu_device *smmu = smmu_domain->smmu;
251 __arm_smmu_tlb_sync(smmu, ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx),
264 arm_smmu_cb_write(smmu_domain->smmu, smmu_domain->cfg.cbndx,
272 struct arm_smmu_device *smmu = smmu_domain->smmu;
276 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
277 arm_smmu_tlb_sync_global(smmu);
284 struct arm_smmu_device *smmu = smmu_domain->smmu;
288 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
295 arm_smmu_cb_write(smmu, idx, reg, iova);
302 arm_smmu_cb_writeq(smmu, idx, reg, iova);
312 struct arm_smmu_device *smmu = smmu_domain->smmu;
315 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
321 arm_smmu_cb_writeq(smmu, idx, reg, iova);
323 arm_smmu_cb_write(smmu, idx, reg, iova);
393 struct arm_smmu_device *smmu = smmu_domain->smmu;
395 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
398 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
428 struct arm_smmu_device *smmu = smmu_domain->smmu;
431 fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
435 fsynr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR0);
436 iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR);
437 cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx));
439 dev_err_ratelimited(smmu->dev,
443 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr);
450 struct arm_smmu_device *smmu = dev;
454 gfsr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
455 gfsynr0 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR0);
456 gfsynr1 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR1);
457 gfsynr2 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR2);
465 dev_err(smmu->dev,
466 "Blocked unknown Stream ID 0x%hx; boot with \"arm-smmu.disable_bypass=0\" to allow, but this may have security implications\n",
469 dev_err(smmu->dev,
471 dev_err(smmu->dev,
476 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, gfsr);
484 struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
537 void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
541 struct arm_smmu_cb *cb = &smmu->cbs[idx];
546 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, 0);
553 if (smmu->version > ARM_SMMU_V1) {
559 if (smmu->features & ARM_SMMU_FEAT_VMID16)
562 arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBA2R(idx), reg);
567 if (smmu->version < ARM_SMMU_V2)
579 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
583 arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(idx), reg);
590 if (stage1 && smmu->version > ARM_SMMU_V1)
591 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR2, cb->tcr[1]);
592 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR, cb->tcr[0]);
596 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_CONTEXTIDR, cfg->asid);
597 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
598 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR1, cb->ttbr[1]);
600 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
602 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR1,
608 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR0, cb->mair[0]);
609 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR1, cb->mair[1]);
620 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg);
624 struct arm_smmu_device *smmu,
627 if (smmu->impl && smmu->impl->alloc_context_bank)
628 return smmu->impl->alloc_context_bank(smmu_domain, smmu, dev, start);
630 return __arm_smmu_alloc_bitmap(smmu->context_map, start, smmu->num_context_banks);
634 struct arm_smmu_device *smmu,
647 if (smmu_domain->smmu)
652 smmu_domain->smmu = smmu;
674 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
676 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
687 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
691 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
695 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
708 start = smmu->num_s2_context_banks;
709 ias = smmu->va_size;
710 oas = smmu->ipa_size;
732 ias = smmu->ipa_size;
733 oas = smmu->pa_size;
741 if (smmu->version == ARM_SMMU_V2)
751 ret = arm_smmu_alloc_context_bank(smmu_domain, smmu, dev, start);
756 smmu_domain->smmu = smmu;
759 if (smmu->version < ARM_SMMU_V2) {
760 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
761 cfg->irptndx %= smmu->num_context_irqs;
772 .pgsize_bitmap = smmu->pgsize_bitmap,
775 .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK,
777 .iommu_dev = smmu->dev,
780 if (smmu->impl && smmu->impl->init_context) {
781 ret = smmu->impl->init_context(smmu_domain, &pgtbl_cfg, dev);
809 arm_smmu_write_context_bank(smmu, cfg->cbndx);
815 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
817 if (smmu->impl && smmu->impl->context_fault)
818 context_fault = smmu->impl->context_fault;
822 ret = devm_request_irq(smmu->dev, irq, context_fault,
823 IRQF_SHARED, "arm-smmu-context-fault", domain);
825 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
837 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
838 smmu_domain->smmu = NULL;
847 struct arm_smmu_device *smmu = smmu_domain->smmu;
851 if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY)
854 ret = arm_smmu_rpm_get(smmu);
862 smmu->cbs[cfg->cbndx].cfg = NULL;
863 arm_smmu_write_context_bank(smmu, cfg->cbndx);
866 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
867 devm_free_irq(smmu->dev, irq, domain);
871 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
873 arm_smmu_rpm_put(smmu);
918 static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
920 struct arm_smmu_smr *smr = smmu->smrs + idx;
924 if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
926 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(idx), reg);
929 static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
931 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
934 if (smmu->impl && smmu->impl->write_s2cr) {
935 smmu->impl->write_s2cr(smmu, idx);
943 if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
944 smmu->smrs[idx].valid)
946 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), reg);
949 static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
951 arm_smmu_write_s2cr(smmu, idx);
952 if (smmu->smrs)
953 arm_smmu_write_smr(smmu, idx);
960 static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
965 if (!smmu->smrs)
975 for (i = 0; i < smmu->num_mapping_groups; i++)
976 if (!smmu->smrs[i].valid)
985 smr = FIELD_PREP(ARM_SMMU_SMR_ID, smmu->streamid_mask);
986 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(i), smr);
987 smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
988 smmu->streamid_mask = FIELD_GET(ARM_SMMU_SMR_ID, smr);
990 smr = FIELD_PREP(ARM_SMMU_SMR_MASK, smmu->streamid_mask);
991 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(i), smr);
992 smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
993 smmu->smr_mask_mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr);
996 static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
998 struct arm_smmu_smr *smrs = smmu->smrs;
1006 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1038 static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
1040 if (--smmu->s2crs[idx].count)
1043 smmu->s2crs[idx] = s2cr_init_val;
1044 if (smmu->smrs)
1045 smmu->smrs[idx].valid = false;
1054 struct arm_smmu_device *smmu = cfg->smmu;
1055 struct arm_smmu_smr *smrs = smmu->smrs;
1058 mutex_lock(&smmu->stream_map_mutex);
1069 ret = arm_smmu_find_sme(smmu, sid, mask);
1074 if (smrs && smmu->s2crs[idx].count == 0) {
1079 smmu->s2crs[idx].count++;
1085 arm_smmu_write_sme(smmu, idx);
1087 mutex_unlock(&smmu->stream_map_mutex);
1092 arm_smmu_free_sme(smmu, cfg->smendx[i]);
1095 mutex_unlock(&smmu->stream_map_mutex);
1102 struct arm_smmu_device *smmu = cfg->smmu;
1105 mutex_lock(&smmu->stream_map_mutex);
1107 if (arm_smmu_free_sme(smmu, idx))
1108 arm_smmu_write_sme(smmu, idx);
1111 mutex_unlock(&smmu->stream_map_mutex);
1118 struct arm_smmu_device *smmu = smmu_domain->smmu;
1119 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
1136 arm_smmu_write_s2cr(smmu, idx);
1146 struct arm_smmu_device *smmu;
1165 smmu = cfg->smmu;
1167 ret = arm_smmu_rpm_get(smmu);
1172 ret = arm_smmu_init_domain_context(domain, smmu, dev);
1180 if (smmu_domain->smmu != smmu) {
1183 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
1202 pm_runtime_set_autosuspend_delay(smmu->dev, 20);
1203 pm_runtime_use_autosuspend(smmu->dev);
1206 arm_smmu_rpm_put(smmu);
1214 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1220 arm_smmu_rpm_get(smmu);
1222 arm_smmu_rpm_put(smmu);
1231 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1237 arm_smmu_rpm_get(smmu);
1239 arm_smmu_rpm_put(smmu);
1247 struct arm_smmu_device *smmu = smmu_domain->smmu;
1250 arm_smmu_rpm_get(smmu);
1252 arm_smmu_rpm_put(smmu);
1260 struct arm_smmu_device *smmu = smmu_domain->smmu;
1262 if (!smmu)
1265 arm_smmu_rpm_get(smmu);
1266 if (smmu->version == ARM_SMMU_V2 ||
1270 arm_smmu_tlb_sync_global(smmu);
1271 arm_smmu_rpm_put(smmu);
1278 struct arm_smmu_device *smmu = smmu_domain->smmu;
1281 struct device *dev = smmu->dev;
1289 ret = arm_smmu_rpm_get(smmu);
1296 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
1298 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
1300 reg = arm_smmu_page(smmu, ARM_SMMU_CB(smmu, idx)) + ARM_SMMU_CB_ATSR;
1307 arm_smmu_rpm_put(smmu);
1311 phys = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_PAR);
1321 arm_smmu_rpm_put(smmu);
1338 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1372 struct arm_smmu_device *smmu = NULL;
1378 ret = arm_smmu_register_legacy_master(dev, &smmu);
1389 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
1399 if (sid & ~smmu->streamid_mask) {
1401 sid, smmu->streamid_mask);
1404 if (mask & ~smmu->smr_mask_mask) {
1406 mask, smmu->smr_mask_mask);
1417 cfg->smmu = smmu;
1422 ret = arm_smmu_rpm_get(smmu);
1427 arm_smmu_rpm_put(smmu);
1432 device_link_add(dev, smmu->dev,
1435 return &smmu->iommu;
1448 struct arm_smmu_device *smmu;
1455 smmu = cfg->smmu;
1457 ret = arm_smmu_rpm_get(smmu);
1463 arm_smmu_rpm_put(smmu);
1474 struct arm_smmu_device *smmu = cfg->smmu;
1479 if (group && smmu->s2crs[idx].group &&
1480 group != smmu->s2crs[idx].group)
1483 group = smmu->s2crs[idx].group;
1499 smmu->s2crs[idx].group = group;
1545 if (smmu_domain->smmu) {
1610 const struct arm_smmu_impl *impl = cfg->smmu->impl;
1640 static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1646 reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
1647 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, reg);
1653 for (i = 0; i < smmu->num_mapping_groups; ++i)
1654 arm_smmu_write_sme(smmu, i);
1657 for (i = 0; i < smmu->num_context_banks; ++i) {
1658 arm_smmu_write_context_bank(smmu, i);
1659 arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_FSR, ARM_SMMU_FSR_FAULT);
1663 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLH, QCOM_DUMMY_VAL);
1664 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLNSNH, QCOM_DUMMY_VAL);
1666 reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sCR0);
1688 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1691 if (smmu->features & ARM_SMMU_FEAT_EXIDS)
1694 if (smmu->impl && smmu->impl->reset)
1695 smmu->impl->reset(smmu);
1698 arm_smmu_tlb_sync_global(smmu);
1699 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, reg);
1721 static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1725 bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
1728 dev_notice(smmu->dev, "probing hardware configuration...\n");
1729 dev_notice(smmu->dev, "SMMUv%d with:\n",
1730 smmu->version == ARM_SMMU_V2 ? 2 : 1);
1733 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID0);
1742 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1743 dev_notice(smmu->dev, "\tstage 1 translation\n");
1747 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1748 dev_notice(smmu->dev, "\tstage 2 translation\n");
1752 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1753 dev_notice(smmu->dev, "\tnested translation\n");
1756 if (!(smmu->features &
1758 dev_err(smmu->dev, "\tno translation support!\n");
1763 ((smmu->version < ARM_SMMU_V2) || !(id & ARM_SMMU_ID0_ATOSNS))) {
1764 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1765 dev_notice(smmu->dev, "\taddress translation ops\n");
1776 dev_notice(smmu->dev, "\t%scoherent table walk\n",
1779 dev_notice(smmu->dev,
1783 if (smmu->version == ARM_SMMU_V2 && id & ARM_SMMU_ID0_EXIDS) {
1784 smmu->features |= ARM_SMMU_FEAT_EXIDS;
1789 smmu->streamid_mask = size - 1;
1791 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
1794 dev_err(smmu->dev,
1800 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1802 if (!smmu->smrs)
1805 dev_notice(smmu->dev,
1809 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
1811 if (!smmu->s2crs)
1814 smmu->s2crs[i] = s2cr_init_val;
1816 smmu->num_mapping_groups = size;
1817 mutex_init(&smmu->stream_map_mutex);
1818 spin_lock_init(&smmu->global_sync_lock);
1820 if (smmu->version < ARM_SMMU_V2 ||
1822 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1824 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1828 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID1);
1829 smmu->pgshift = (id & ARM_SMMU_ID1_PAGESIZE) ? 16 : 12;
1833 if (smmu->numpage != 2 * size << smmu->pgshift)
1834 dev_warn(smmu->dev,
1836 2 * size << smmu->pgshift, smmu->numpage);
1838 smmu->numpage = size;
1840 smmu->num_s2_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMS2CB, id);
1841 smmu->num_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMCB, id);
1842 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1843 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1846 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1847 smmu->num_context_banks, smmu->num_s2_context_banks);
1848 smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks,
1849 sizeof(*smmu->cbs), GFP_KERNEL);
1850 if (!smmu->cbs)
1854 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID2);
1856 smmu->ipa_size = size;
1860 smmu->pa_size = size;
1863 smmu->features |= ARM_SMMU_FEAT_VMID16;
1870 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1871 dev_warn(smmu->dev,
1874 if (smmu->version < ARM_SMMU_V2) {
1875 smmu->va_size = smmu->ipa_size;
1876 if (smmu->version == ARM_SMMU_V1_64K)
1877 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
1880 smmu->va_size = arm_smmu_id_size_to_bits(size);
1882 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
1884 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
1886 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
1889 if (smmu->impl && smmu->impl->cfg_probe) {
1890 ret = smmu->impl->cfg_probe(smmu);
1896 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
1897 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
1898 if (smmu->features &
1900 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
1901 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
1902 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
1903 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
1904 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
1907 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
1909 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
1910 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
1911 smmu->pgsize_bitmap);
1914 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1915 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
1916 smmu->va_size, smmu->ipa_size);
1918 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1919 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
1920 smmu->ipa_size, smmu->pa_size);
1941 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1942 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1946 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
1947 { .compatible = "nvidia,smmu-500", .data = &arm_mmu500 },
1948 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
1954 static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
1961 smmu->version = ARM_SMMU_V1;
1962 smmu->model = GENERIC_SMMU;
1965 smmu->version = ARM_SMMU_V1_64K;
1966 smmu->model = GENERIC_SMMU;
1969 smmu->version = ARM_SMMU_V2;
1970 smmu->model = GENERIC_SMMU;
1973 smmu->version = ARM_SMMU_V2;
1974 smmu->model = ARM_MMU500;
1977 smmu->version = ARM_SMMU_V2;
1978 smmu->model = CAVIUM_SMMUV2;
1988 struct arm_smmu_device *smmu)
1990 struct device *dev = smmu->dev;
1999 ret = acpi_smmu_get_data(iort_smmu->model, smmu);
2004 smmu->num_global_irqs = 1;
2007 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2013 struct arm_smmu_device *smmu)
2020 struct arm_smmu_device *smmu)
2027 &smmu->num_global_irqs)) {
2033 smmu->version = data->version;
2034 smmu->model = data->model;
2051 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2106 struct arm_smmu_device *smmu;
2111 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2112 if (!smmu) {
2116 smmu->dev = dev;
2119 err = arm_smmu_device_dt_probe(pdev, smmu);
2121 err = arm_smmu_device_acpi_probe(pdev, smmu);
2126 smmu->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
2127 if (IS_ERR(smmu->base))
2128 return PTR_ERR(smmu->base);
2134 smmu->numpage = resource_size(res);
2136 smmu = arm_smmu_impl_init(smmu);
2137 if (IS_ERR(smmu))
2138 return PTR_ERR(smmu);
2143 if (num_irqs > smmu->num_global_irqs)
2144 smmu->num_context_irqs++;
2147 if (!smmu->num_context_irqs) {
2149 num_irqs, smmu->num_global_irqs + 1);
2153 smmu->irqs = devm_kcalloc(dev, num_irqs, sizeof(*smmu->irqs),
2155 if (!smmu->irqs) {
2165 smmu->irqs[i] = irq;
2168 err = devm_clk_bulk_get_all(dev, &smmu->clks);
2173 smmu->num_clks = err;
2175 err = clk_bulk_prepare_enable(smmu->num_clks, smmu->clks);
2179 err = arm_smmu_device_cfg_probe(smmu);
2183 if (smmu->version == ARM_SMMU_V2) {
2184 if (smmu->num_context_banks > smmu->num_context_irqs) {
2187 smmu->num_context_irqs, smmu->num_context_banks);
2192 smmu->num_context_irqs = smmu->num_context_banks;
2195 if (smmu->impl && smmu->impl->global_fault)
2196 global_fault = smmu->impl->global_fault;
2200 for (i = 0; i < smmu->num_global_irqs; ++i) {
2201 err = devm_request_irq(smmu->dev, smmu->irqs[i],
2204 "arm-smmu global fault",
2205 smmu);
2208 i, smmu->irqs[i]);
2213 err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
2214 "smmu.%pa", &ioaddr);
2220 iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
2221 iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
2223 err = iommu_device_register(&smmu->iommu);
2229 platform_set_drvdata(pdev, smmu);
2230 arm_smmu_device_reset(smmu);
2231 arm_smmu_test_smr_masks(smmu);
2257 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
2259 if (!smmu)
2262 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
2266 iommu_device_unregister(&smmu->iommu);
2267 iommu_device_sysfs_remove(&smmu->iommu);
2269 arm_smmu_rpm_get(smmu);
2271 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, ARM_SMMU_sCR0_CLIENTPD);
2272 arm_smmu_rpm_put(smmu);
2274 if (pm_runtime_enabled(smmu->dev))
2275 pm_runtime_force_suspend(smmu->dev);
2277 clk_bulk_disable(smmu->num_clks, smmu->clks);
2279 clk_bulk_unprepare(smmu->num_clks, smmu->clks);
2290 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2293 ret = clk_bulk_enable(smmu->num_clks, smmu->clks);
2297 arm_smmu_device_reset(smmu);
2304 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2306 clk_bulk_disable(smmu->num_clks, smmu->clks);
2335 .name = "arm-smmu",
2348 MODULE_ALIAS("platform:arm-smmu");