Lines Matching refs:smmu
18 #define pr_fmt(fmt) "arm-smmu: " fmt
40 #include "arm-smmu.h"
71 static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu)
73 if (pm_runtime_enabled(smmu->dev))
74 return pm_runtime_resume_and_get(smmu->dev);
79 static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu)
81 if (pm_runtime_enabled(smmu->dev))
82 pm_runtime_put_autosuspend(smmu->dev);
130 struct arm_smmu_device **smmu)
173 *smmu = dev_get_drvdata(smmu_dev);
181 struct arm_smmu_device **smmu)
193 static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
199 if (smmu->impl && unlikely(smmu->impl->tlb_sync))
200 return smmu->impl->tlb_sync(smmu, page, sync, status);
202 arm_smmu_writel(smmu, page, sync, QCOM_DUMMY_VAL);
205 reg = arm_smmu_readl(smmu, page, status);
212 dev_err_ratelimited(smmu->dev,
216 static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
220 spin_lock_irqsave(&smmu->global_sync_lock, flags);
221 __arm_smmu_tlb_sync(smmu, ARM_SMMU_GR0, ARM_SMMU_GR0_sTLBGSYNC,
223 spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
228 struct arm_smmu_device *smmu = smmu_domain->smmu;
232 __arm_smmu_tlb_sync(smmu, ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx),
245 arm_smmu_cb_write(smmu_domain->smmu, smmu_domain->cfg.cbndx,
253 struct arm_smmu_device *smmu = smmu_domain->smmu;
257 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
258 arm_smmu_tlb_sync_global(smmu);
265 struct arm_smmu_device *smmu = smmu_domain->smmu;
269 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
276 arm_smmu_cb_write(smmu, idx, reg, iova);
283 arm_smmu_cb_writeq(smmu, idx, reg, iova);
293 struct arm_smmu_device *smmu = smmu_domain->smmu;
296 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
302 arm_smmu_cb_writeq(smmu, idx, reg, iova);
304 arm_smmu_cb_write(smmu, idx, reg, iova);
365 struct arm_smmu_device *smmu = smmu_domain->smmu;
367 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
370 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
397 struct arm_smmu_device *smmu = smmu_domain->smmu;
401 fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
405 fsynr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR0);
406 iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR);
407 cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx));
413 dev_err_ratelimited(smmu->dev,
417 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr);
424 struct arm_smmu_device *smmu = dev;
428 gfsr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
429 gfsynr0 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR0);
430 gfsynr1 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR1);
431 gfsynr2 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR2);
439 dev_err(smmu->dev,
440 "Blocked unknown Stream ID 0x%hx; boot with \"arm-smmu.disable_bypass=0\" to allow, but this may have security implications\n",
443 dev_err(smmu->dev,
445 dev_err(smmu->dev,
450 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, gfsr);
458 struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
511 void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
515 struct arm_smmu_cb *cb = &smmu->cbs[idx];
520 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, 0);
527 if (smmu->version > ARM_SMMU_V1) {
533 if (smmu->features & ARM_SMMU_FEAT_VMID16)
536 arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBA2R(idx), reg);
541 if (smmu->version < ARM_SMMU_V2)
553 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
557 arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(idx), reg);
564 if (stage1 && smmu->version > ARM_SMMU_V1)
565 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR2, cb->tcr[1]);
566 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR, cb->tcr[0]);
570 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_CONTEXTIDR, cfg->asid);
571 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
572 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR1, cb->ttbr[1]);
574 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
576 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR1,
582 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR0, cb->mair[0]);
583 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR1, cb->mair[1]);
594 if (smmu->impl && smmu->impl->write_sctlr)
595 smmu->impl->write_sctlr(smmu, idx, reg);
597 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg);
601 struct arm_smmu_device *smmu,
604 if (smmu->impl && smmu->impl->alloc_context_bank)
605 return smmu->impl->alloc_context_bank(smmu_domain, smmu, dev, start);
607 return __arm_smmu_alloc_bitmap(smmu->context_map, start, smmu->num_context_banks);
611 struct arm_smmu_device *smmu,
624 if (smmu_domain->smmu)
629 smmu_domain->smmu = smmu;
651 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
653 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
664 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
668 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
672 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
685 start = smmu->num_s2_context_banks;
686 ias = smmu->va_size;
687 oas = smmu->ipa_size;
709 ias = smmu->ipa_size;
710 oas = smmu->pa_size;
718 if (smmu->version == ARM_SMMU_V2)
728 ret = arm_smmu_alloc_context_bank(smmu_domain, smmu, dev, start);
733 smmu_domain->smmu = smmu;
736 if (smmu->version < ARM_SMMU_V2) {
737 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
738 cfg->irptndx %= smmu->num_context_irqs;
749 .pgsize_bitmap = smmu->pgsize_bitmap,
752 .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK,
754 .iommu_dev = smmu->dev,
757 if (smmu->impl && smmu->impl->init_context) {
758 ret = smmu->impl->init_context(smmu_domain, &pgtbl_cfg, dev);
786 arm_smmu_write_context_bank(smmu, cfg->cbndx);
792 irq = smmu->irqs[cfg->irptndx];
794 if (smmu->impl && smmu->impl->context_fault)
795 context_fault = smmu->impl->context_fault;
799 ret = devm_request_irq(smmu->dev, irq, context_fault,
800 IRQF_SHARED, "arm-smmu-context-fault", domain);
802 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
814 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
815 smmu_domain->smmu = NULL;
824 struct arm_smmu_device *smmu = smmu_domain->smmu;
828 if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY)
831 ret = arm_smmu_rpm_get(smmu);
839 smmu->cbs[cfg->cbndx].cfg = NULL;
840 arm_smmu_write_context_bank(smmu, cfg->cbndx);
843 irq = smmu->irqs[cfg->irptndx];
844 devm_free_irq(smmu->dev, irq, domain);
848 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
850 arm_smmu_rpm_put(smmu);
888 static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
890 struct arm_smmu_smr *smr = smmu->smrs + idx;
894 if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
896 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(idx), reg);
899 static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
901 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
904 if (smmu->impl && smmu->impl->write_s2cr) {
905 smmu->impl->write_s2cr(smmu, idx);
913 if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
914 smmu->smrs[idx].valid)
916 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), reg);
919 static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
921 arm_smmu_write_s2cr(smmu, idx);
922 if (smmu->smrs)
923 arm_smmu_write_smr(smmu, idx);
930 static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
935 if (!smmu->smrs)
945 for (i = 0; i < smmu->num_mapping_groups; i++)
946 if (!smmu->smrs[i].valid)
955 smr = FIELD_PREP(ARM_SMMU_SMR_ID, smmu->streamid_mask);
956 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(i), smr);
957 smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
958 smmu->streamid_mask = FIELD_GET(ARM_SMMU_SMR_ID, smr);
960 smr = FIELD_PREP(ARM_SMMU_SMR_MASK, smmu->streamid_mask);
961 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(i), smr);
962 smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
963 smmu->smr_mask_mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr);
966 static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
968 struct arm_smmu_smr *smrs = smmu->smrs;
976 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1008 static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
1010 if (--smmu->s2crs[idx].count)
1013 smmu->s2crs[idx] = s2cr_init_val;
1014 if (smmu->smrs)
1015 smmu->smrs[idx].valid = false;
1024 struct arm_smmu_device *smmu = cfg->smmu;
1025 struct arm_smmu_smr *smrs = smmu->smrs;
1028 mutex_lock(&smmu->stream_map_mutex);
1039 ret = arm_smmu_find_sme(smmu, sid, mask);
1044 if (smrs && smmu->s2crs[idx].count == 0) {
1049 smmu->s2crs[idx].count++;
1055 arm_smmu_write_sme(smmu, idx);
1057 mutex_unlock(&smmu->stream_map_mutex);
1062 arm_smmu_free_sme(smmu, cfg->smendx[i]);
1065 mutex_unlock(&smmu->stream_map_mutex);
1072 struct arm_smmu_device *smmu = cfg->smmu;
1075 mutex_lock(&smmu->stream_map_mutex);
1077 if (arm_smmu_free_sme(smmu, idx))
1078 arm_smmu_write_sme(smmu, idx);
1081 mutex_unlock(&smmu->stream_map_mutex);
1088 struct arm_smmu_device *smmu = smmu_domain->smmu;
1089 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
1106 arm_smmu_write_s2cr(smmu, idx);
1116 struct arm_smmu_device *smmu;
1135 smmu = cfg->smmu;
1137 ret = arm_smmu_rpm_get(smmu);
1142 ret = arm_smmu_init_domain_context(domain, smmu, dev);
1150 if (smmu_domain->smmu != smmu) {
1169 pm_runtime_set_autosuspend_delay(smmu->dev, 20);
1170 pm_runtime_use_autosuspend(smmu->dev);
1173 arm_smmu_rpm_put(smmu);
1182 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1188 arm_smmu_rpm_get(smmu);
1190 arm_smmu_rpm_put(smmu);
1200 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1206 arm_smmu_rpm_get(smmu);
1208 arm_smmu_rpm_put(smmu);
1216 struct arm_smmu_device *smmu = smmu_domain->smmu;
1219 arm_smmu_rpm_get(smmu);
1221 arm_smmu_rpm_put(smmu);
1229 struct arm_smmu_device *smmu = smmu_domain->smmu;
1231 if (!smmu)
1234 arm_smmu_rpm_get(smmu);
1235 if (smmu->version == ARM_SMMU_V2 ||
1239 arm_smmu_tlb_sync_global(smmu);
1240 arm_smmu_rpm_put(smmu);
1247 struct arm_smmu_device *smmu = smmu_domain->smmu;
1250 struct device *dev = smmu->dev;
1258 ret = arm_smmu_rpm_get(smmu);
1265 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
1267 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
1269 reg = arm_smmu_page(smmu, ARM_SMMU_CB(smmu, idx)) + ARM_SMMU_CB_ATSR;
1276 arm_smmu_rpm_put(smmu);
1280 phys = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_PAR);
1290 arm_smmu_rpm_put(smmu);
1304 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1323 return cfg->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK ||
1344 struct arm_smmu_device *smmu = NULL;
1350 ret = arm_smmu_register_legacy_master(dev, &smmu);
1361 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
1371 if (sid & ~smmu->streamid_mask) {
1373 sid, smmu->streamid_mask);
1376 if (mask & ~smmu->smr_mask_mask) {
1378 mask, smmu->smr_mask_mask);
1389 cfg->smmu = smmu;
1394 ret = arm_smmu_rpm_get(smmu);
1399 arm_smmu_rpm_put(smmu);
1404 device_link_add(dev, smmu->dev,
1407 return &smmu->iommu;
1422 ret = arm_smmu_rpm_get(cfg->smmu);
1428 arm_smmu_rpm_put(cfg->smmu);
1437 struct arm_smmu_device *smmu;
1440 smmu = cfg->smmu;
1442 if (smmu->impl && smmu->impl->probe_finalize)
1443 smmu->impl->probe_finalize(smmu, dev);
1450 struct arm_smmu_device *smmu = cfg->smmu;
1454 mutex_lock(&smmu->stream_map_mutex);
1456 if (group && smmu->s2crs[idx].group &&
1457 group != smmu->s2crs[idx].group) {
1458 mutex_unlock(&smmu->stream_map_mutex);
1462 group = smmu->s2crs[idx].group;
1466 mutex_unlock(&smmu->stream_map_mutex);
1480 smmu->s2crs[idx].group = group;
1482 mutex_unlock(&smmu->stream_map_mutex);
1492 if (smmu_domain->smmu)
1508 if (smmu_domain->smmu)
1551 const struct arm_smmu_impl *impl = cfg->smmu->impl;
1587 static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1593 reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
1594 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, reg);
1600 for (i = 0; i < smmu->num_mapping_groups; ++i)
1601 arm_smmu_write_sme(smmu, i);
1604 for (i = 0; i < smmu->num_context_banks; ++i) {
1605 arm_smmu_write_context_bank(smmu, i);
1606 arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_FSR, ARM_SMMU_FSR_FAULT);
1610 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLH, QCOM_DUMMY_VAL);
1611 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLNSNH, QCOM_DUMMY_VAL);
1613 reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sCR0);
1635 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1638 if (smmu->features & ARM_SMMU_FEAT_EXIDS)
1641 if (smmu->impl && smmu->impl->reset)
1642 smmu->impl->reset(smmu);
1645 arm_smmu_tlb_sync_global(smmu);
1646 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, reg);
1668 static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1672 bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
1675 dev_notice(smmu->dev, "probing hardware configuration...\n");
1676 dev_notice(smmu->dev, "SMMUv%d with:\n",
1677 smmu->version == ARM_SMMU_V2 ? 2 : 1);
1680 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID0);
1689 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1690 dev_notice(smmu->dev, "\tstage 1 translation\n");
1694 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1695 dev_notice(smmu->dev, "\tstage 2 translation\n");
1699 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1700 dev_notice(smmu->dev, "\tnested translation\n");
1703 if (!(smmu->features &
1705 dev_err(smmu->dev, "\tno translation support!\n");
1710 ((smmu->version < ARM_SMMU_V2) || !(id & ARM_SMMU_ID0_ATOSNS))) {
1711 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1712 dev_notice(smmu->dev, "\taddress translation ops\n");
1723 dev_notice(smmu->dev, "\t%scoherent table walk\n",
1726 dev_notice(smmu->dev,
1730 if (smmu->version == ARM_SMMU_V2 && id & ARM_SMMU_ID0_EXIDS) {
1731 smmu->features |= ARM_SMMU_FEAT_EXIDS;
1736 smmu->streamid_mask = size - 1;
1738 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
1741 dev_err(smmu->dev,
1747 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1749 if (!smmu->smrs)
1752 dev_notice(smmu->dev,
1756 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
1758 if (!smmu->s2crs)
1761 smmu->s2crs[i] = s2cr_init_val;
1763 smmu->num_mapping_groups = size;
1764 mutex_init(&smmu->stream_map_mutex);
1765 spin_lock_init(&smmu->global_sync_lock);
1767 if (smmu->version < ARM_SMMU_V2 ||
1769 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1771 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1775 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID1);
1776 smmu->pgshift = (id & ARM_SMMU_ID1_PAGESIZE) ? 16 : 12;
1780 if (smmu->numpage != 2 * size << smmu->pgshift)
1781 dev_warn(smmu->dev,
1783 2 * size << smmu->pgshift, smmu->numpage);
1785 smmu->numpage = size;
1787 smmu->num_s2_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMS2CB, id);
1788 smmu->num_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMCB, id);
1789 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1790 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1793 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1794 smmu->num_context_banks, smmu->num_s2_context_banks);
1795 smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks,
1796 sizeof(*smmu->cbs), GFP_KERNEL);
1797 if (!smmu->cbs)
1801 id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID2);
1803 smmu->ipa_size = size;
1807 smmu->pa_size = size;
1810 smmu->features |= ARM_SMMU_FEAT_VMID16;
1817 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1818 dev_warn(smmu->dev,
1821 if (smmu->version < ARM_SMMU_V2) {
1822 smmu->va_size = smmu->ipa_size;
1823 if (smmu->version == ARM_SMMU_V1_64K)
1824 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
1827 smmu->va_size = arm_smmu_id_size_to_bits(size);
1829 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
1831 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
1833 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
1836 if (smmu->impl && smmu->impl->cfg_probe) {
1837 ret = smmu->impl->cfg_probe(smmu);
1843 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
1844 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
1845 if (smmu->features &
1847 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
1848 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
1849 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
1850 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
1851 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
1854 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
1856 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
1857 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
1858 smmu->pgsize_bitmap);
1861 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1862 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
1863 smmu->va_size, smmu->ipa_size);
1865 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1866 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
1867 smmu->ipa_size, smmu->pa_size);
1888 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1889 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1893 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
1894 { .compatible = "nvidia,smmu-500", .data = &arm_mmu500 },
1895 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
1901 static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
1908 smmu->version = ARM_SMMU_V1;
1909 smmu->model = GENERIC_SMMU;
1912 smmu->version = ARM_SMMU_V1_64K;
1913 smmu->model = GENERIC_SMMU;
1916 smmu->version = ARM_SMMU_V2;
1917 smmu->model = GENERIC_SMMU;
1920 smmu->version = ARM_SMMU_V2;
1921 smmu->model = ARM_MMU500;
1924 smmu->version = ARM_SMMU_V2;
1925 smmu->model = CAVIUM_SMMUV2;
1934 static int arm_smmu_device_acpi_probe(struct arm_smmu_device *smmu,
1937 struct device *dev = smmu->dev;
1946 ret = acpi_smmu_get_data(iort_smmu->model, smmu);
1955 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
1960 static inline int arm_smmu_device_acpi_probe(struct arm_smmu_device *smmu,
1967 static int arm_smmu_device_dt_probe(struct arm_smmu_device *smmu,
1971 struct device *dev = smmu->dev;
1980 smmu->version = data->version;
1981 smmu->model = data->model;
1998 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2003 static void arm_smmu_rmr_install_bypass_smr(struct arm_smmu_device *smmu)
2011 iort_get_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
2019 reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sCR0);
2021 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, reg);
2029 idx = arm_smmu_find_sme(smmu, rmr->sids[i], ~0);
2033 if (smmu->s2crs[idx].count == 0) {
2034 smmu->smrs[idx].id = rmr->sids[i];
2035 smmu->smrs[idx].mask = 0;
2036 smmu->smrs[idx].valid = true;
2038 smmu->s2crs[idx].count++;
2039 smmu->s2crs[idx].type = S2CR_TYPE_BYPASS;
2040 smmu->s2crs[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
2046 dev_notice(smmu->dev, "\tpreserved %d boot mapping%s\n", cnt,
2048 iort_put_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
2054 struct arm_smmu_device *smmu;
2060 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2061 if (!smmu) {
2065 smmu->dev = dev;
2068 err = arm_smmu_device_dt_probe(smmu, &global_irqs, &pmu_irqs);
2070 err = arm_smmu_device_acpi_probe(smmu, &global_irqs, &pmu_irqs);
2074 smmu->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
2075 if (IS_ERR(smmu->base))
2076 return PTR_ERR(smmu->base);
2077 smmu->ioaddr = res->start;
2083 smmu->numpage = resource_size(res);
2085 smmu = arm_smmu_impl_init(smmu);
2086 if (IS_ERR(smmu))
2087 return PTR_ERR(smmu);
2091 smmu->num_context_irqs = num_irqs - global_irqs - pmu_irqs;
2092 if (smmu->num_context_irqs <= 0)
2097 smmu->irqs = devm_kcalloc(dev, smmu->num_context_irqs,
2098 sizeof(*smmu->irqs), GFP_KERNEL);
2099 if (!smmu->irqs)
2101 smmu->num_context_irqs);
2103 for (i = 0; i < smmu->num_context_irqs; i++) {
2108 smmu->irqs[i] = irq;
2111 err = devm_clk_bulk_get_all(dev, &smmu->clks);
2116 smmu->num_clks = err;
2118 err = clk_bulk_prepare_enable(smmu->num_clks, smmu->clks);
2122 err = arm_smmu_device_cfg_probe(smmu);
2126 if (smmu->version == ARM_SMMU_V2) {
2127 if (smmu->num_context_banks > smmu->num_context_irqs) {
2130 smmu->num_context_irqs, smmu->num_context_banks);
2135 smmu->num_context_irqs = smmu->num_context_banks;
2138 if (smmu->impl && smmu->impl->global_fault)
2139 global_fault = smmu->impl->global_fault;
2150 "arm-smmu global fault", smmu);
2157 err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
2158 "smmu.%pa", &smmu->ioaddr);
2164 err = iommu_device_register(&smmu->iommu, &arm_smmu_ops, dev);
2167 iommu_device_sysfs_remove(&smmu->iommu);
2171 platform_set_drvdata(pdev, smmu);
2174 arm_smmu_rmr_install_bypass_smr(smmu);
2176 arm_smmu_device_reset(smmu);
2177 arm_smmu_test_smr_masks(smmu);
2195 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
2197 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
2200 arm_smmu_rpm_get(smmu);
2202 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, ARM_SMMU_sCR0_CLIENTPD);
2203 arm_smmu_rpm_put(smmu);
2205 if (pm_runtime_enabled(smmu->dev))
2206 pm_runtime_force_suspend(smmu->dev);
2208 clk_bulk_disable(smmu->num_clks, smmu->clks);
2210 clk_bulk_unprepare(smmu->num_clks, smmu->clks);
2215 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
2217 iommu_device_unregister(&smmu->iommu);
2218 iommu_device_sysfs_remove(&smmu->iommu);
2225 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2228 ret = clk_bulk_enable(smmu->num_clks, smmu->clks);
2232 arm_smmu_device_reset(smmu);
2239 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2241 clk_bulk_disable(smmu->num_clks, smmu->clks);
2249 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2251 ret = clk_bulk_prepare(smmu->num_clks, smmu->clks);
2260 clk_bulk_unprepare(smmu->num_clks, smmu->clks);
2268 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2278 clk_bulk_unprepare(smmu->num_clks, smmu->clks);
2290 .name = "arm-smmu",
2303 MODULE_ALIAS("platform:arm-smmu");