Lines Matching refs:smmu

30 #include "arm-smmu-v3.h"
89 static void parse_driver_options(struct arm_smmu_device *smmu)
94 if (of_property_read_bool(smmu->dev->of_node,
96 smmu->options |= arm_smmu_options[i].opt;
97 dev_notice(smmu->dev, "option %s\n",
192 static void queue_poll_init(struct arm_smmu_device *smmu,
197 qp->wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
348 static struct arm_smmu_cmdq *arm_smmu_get_cmdq(struct arm_smmu_device *smmu)
350 return &smmu->cmdq;
353 static void arm_smmu_cmdq_build_sync_cmd(u64 *cmd, struct arm_smmu_device *smmu,
364 if (smmu->options & ARM_SMMU_OPT_MSIPOLL) {
372 static void __arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu,
390 dev_err(smmu->dev, "CMDQ error (cons 0x%08x): %s\n", cons,
395 dev_err(smmu->dev, "retrying command fetch\n");
417 dev_err(smmu->dev, "skipping command in error state:\n");
419 dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]);
427 static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
429 __arm_smmu_cmdq_skip_err(smmu, &smmu->cmdq.q);
593 static int arm_smmu_cmdq_poll_until_not_full(struct arm_smmu_device *smmu,
598 struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu);
612 queue_poll_init(smmu, &qp);
628 static int __arm_smmu_cmdq_poll_until_msi(struct arm_smmu_device *smmu,
633 struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu);
636 queue_poll_init(smmu, &qp);
652 static int __arm_smmu_cmdq_poll_until_consumed(struct arm_smmu_device *smmu,
656 struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu);
660 queue_poll_init(smmu, &qp);
702 static int arm_smmu_cmdq_poll_until_sync(struct arm_smmu_device *smmu,
705 if (smmu->options & ARM_SMMU_OPT_MSIPOLL)
706 return __arm_smmu_cmdq_poll_until_msi(smmu, llq);
708 return __arm_smmu_cmdq_poll_until_consumed(smmu, llq);
744 static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
751 struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu);
765 if (arm_smmu_cmdq_poll_until_not_full(smmu, &llq))
766 dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
791 arm_smmu_cmdq_build_sync_cmd(cmd_sync, smmu, &cmdq->q, prod);
841 ret = arm_smmu_cmdq_poll_until_sync(smmu, &llq);
843 dev_err_ratelimited(smmu->dev,
864 static int __arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
871 dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n",
876 return arm_smmu_cmdq_issue_cmdlist(smmu, cmd, 1, sync);
879 static int arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
882 return __arm_smmu_cmdq_issue_cmd(smmu, ent, false);
885 static int arm_smmu_cmdq_issue_cmd_with_sync(struct arm_smmu_device *smmu,
888 return __arm_smmu_cmdq_issue_cmd(smmu, ent, true);
891 static void arm_smmu_cmdq_batch_add(struct arm_smmu_device *smmu,
898 (smmu->options & ARM_SMMU_OPT_CMDQ_FORCE_SYNC)) {
899 arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, true);
904 arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, false);
910 dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n",
918 static int arm_smmu_cmdq_batch_submit(struct arm_smmu_device *smmu,
921 return arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, true);
951 arm_smmu_cmdq_issue_cmd(master->smmu, &cmd);
963 void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid)
966 .opcode = smmu->features & ARM_SMMU_FEAT_E2H ?
971 arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
981 struct arm_smmu_device *smmu = smmu_domain->smmu;
996 arm_smmu_cmdq_batch_add(smmu, &cmds, &cmd);
1001 arm_smmu_cmdq_batch_submit(smmu, &cmds);
1004 static int arm_smmu_alloc_cd_leaf_table(struct arm_smmu_device *smmu,
1009 l1_desc->l2ptr = dmam_alloc_coherent(smmu->dev, size,
1012 dev_warn(smmu->dev,
1035 struct arm_smmu_device *smmu = smmu_domain->smmu;
1044 if (arm_smmu_alloc_cd_leaf_table(smmu, l1_desc))
1140 struct arm_smmu_device *smmu = smmu_domain->smmu;
1146 if (!(smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB) ||
1157 cdcfg->l1_desc = devm_kcalloc(smmu->dev, cdcfg->num_l1_ents,
1166 cdcfg->cdtab = dmam_alloc_coherent(smmu->dev, l1size, &cdcfg->cdtab_dma,
1169 dev_warn(smmu->dev, "failed to allocate context descriptor\n");
1178 devm_kfree(smmu->dev, cdcfg->l1_desc);
1188 struct arm_smmu_device *smmu = smmu_domain->smmu;
1198 dmam_free_coherent(smmu->dev, size,
1202 devm_kfree(smmu->dev, cdcfg->l1_desc);
1210 dmam_free_coherent(smmu->dev, l1size, cdcfg->cdtab, cdcfg->cdtab_dma);
1244 static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
1254 arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
1278 struct arm_smmu_device *smmu = NULL;
1291 smmu = master->smmu;
1342 if (smmu)
1343 arm_smmu_sync_ste_for_sid(smmu, sid);
1348 u64 strw = smmu->features & ARM_SMMU_FEAT_E2H ?
1359 if (smmu->features & ARM_SMMU_FEAT_STALLS &&
1389 arm_smmu_sync_ste_for_sid(smmu, sid);
1392 arm_smmu_sync_ste_for_sid(smmu, sid);
1395 if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH))
1396 arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
1418 static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
1422 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1432 desc->l2ptr = dmam_alloc_coherent(smmu->dev, size, &desc->l2ptr_dma,
1435 dev_err(smmu->dev,
1447 arm_smmu_find_master(struct arm_smmu_device *smmu, u32 sid)
1452 lockdep_assert_held(&smmu->streams_mutex);
1454 node = smmu->streams.rb_node;
1469 static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt)
1540 mutex_lock(&smmu->streams_mutex);
1541 master = arm_smmu_find_master(smmu, sid);
1559 mutex_unlock(&smmu->streams_mutex);
1566 struct arm_smmu_device *smmu = dev;
1567 struct arm_smmu_queue *q = &smmu->evtq.q;
1577 ret = arm_smmu_handle_evt(smmu, evt);
1581 dev_info(smmu->dev, "event 0x%02x received:\n", id);
1583 dev_info(smmu->dev, "\t0x%016llx\n",
1594 dev_err(smmu->dev, "EVTQ overflow detected -- events lost\n");
1602 static void arm_smmu_handle_ppr(struct arm_smmu_device *smmu, u64 *evt)
1614 dev_info(smmu->dev, "unexpected PRI request received:\n");
1615 dev_info(smmu->dev,
1636 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1642 struct arm_smmu_device *smmu = dev;
1643 struct arm_smmu_queue *q = &smmu->priq.q;
1649 arm_smmu_handle_ppr(smmu, evt);
1652 dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n");
1660 static int arm_smmu_device_disable(struct arm_smmu_device *smmu);
1665 struct arm_smmu_device *smmu = dev;
1667 gerror = readl_relaxed(smmu->base + ARM_SMMU_GERROR);
1668 gerrorn = readl_relaxed(smmu->base + ARM_SMMU_GERRORN);
1674 dev_warn(smmu->dev,
1679 dev_err(smmu->dev, "device has entered Service Failure Mode!\n");
1680 arm_smmu_device_disable(smmu);
1684 dev_warn(smmu->dev, "GERROR MSI write aborted\n");
1687 dev_warn(smmu->dev, "PRIQ MSI write aborted\n");
1690 dev_warn(smmu->dev, "EVTQ MSI write aborted\n");
1693 dev_warn(smmu->dev, "CMDQ MSI write aborted\n");
1696 dev_err(smmu->dev, "PRIQ write aborted -- events may have been lost\n");
1699 dev_err(smmu->dev, "EVTQ write aborted -- events may have been lost\n");
1702 arm_smmu_cmdq_skip_err(smmu);
1704 writel(gerror, smmu->base + ARM_SMMU_GERRORN);
1710 struct arm_smmu_device *smmu = dev;
1713 if (smmu->features & ARM_SMMU_FEAT_PRI)
1803 arm_smmu_cmdq_batch_add(master->smmu, &cmds, &cmd);
1806 return arm_smmu_cmdq_batch_submit(master->smmu, &cmds);
1818 if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_ATS))
1849 arm_smmu_cmdq_batch_add(smmu_domain->smmu, &cmds, &cmd);
1854 return arm_smmu_cmdq_batch_submit(smmu_domain->smmu, &cmds);
1861 struct arm_smmu_device *smmu = smmu_domain->smmu;
1872 arm_smmu_tlb_inv_asid(smmu, smmu_domain->s1_cfg.cd.asid);
1876 arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
1886 struct arm_smmu_device *smmu = smmu_domain->smmu;
1894 if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) {
1920 if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) {
1946 arm_smmu_cmdq_batch_add(smmu, &cmds, cmd);
1949 arm_smmu_cmdq_batch_submit(smmu, &cmds);
1963 cmd.opcode = smmu_domain->smmu->features & ARM_SMMU_FEAT_E2H ?
1984 .opcode = smmu_domain->smmu->features & ARM_SMMU_FEAT_E2H ?
2025 return master->smmu->features & ARM_SMMU_FEAT_COHERENCY;
2066 struct arm_smmu_device *smmu = smmu_domain->smmu;
2083 ida_free(&smmu->vmid_map, cfg->vmid);
2095 struct arm_smmu_device *smmu = smmu_domain->smmu;
2104 XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL);
2153 struct arm_smmu_device *smmu = smmu_domain->smmu;
2158 vmid = ida_alloc_range(&smmu->vmid_map, 1, (1 << smmu->vmid_bits) - 1,
2188 struct arm_smmu_device *smmu = smmu_domain->smmu;
2196 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
2198 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
2203 ias = (smmu->features & ARM_SMMU_FEAT_VAX) ? 52 : 48;
2205 oas = smmu->ias;
2211 ias = smmu->ias;
2212 oas = smmu->oas;
2221 .pgsize_bitmap = smmu->pgsize_bitmap,
2224 .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENCY,
2226 .iommu_dev = smmu->dev,
2247 static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
2250 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2252 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
2272 struct arm_smmu_device *smmu = master->smmu;
2276 __le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
2292 struct arm_smmu_device *smmu = master->smmu;
2295 if (!(smmu->features & ARM_SMMU_FEAT_ATS))
2308 struct arm_smmu_device *smmu = master->smmu;
2316 stu = __ffs(smmu->pgsize_bitmap);
2369 master->smmu->ssid_bits);
2413 struct arm_smmu_device *smmu;
2421 smmu = master->smmu;
2437 if (!smmu_domain->smmu) {
2438 smmu_domain->smmu = smmu;
2441 smmu_domain->smmu = NULL;
2444 } else if (smmu_domain->smmu != smmu) {
2511 if (smmu_domain->smmu)
2550 static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
2552 unsigned long limit = smmu->strtab_cfg.num_l1_ents;
2554 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
2560 static int arm_smmu_init_sid_strtab(struct arm_smmu_device *smmu, u32 sid)
2563 if (!arm_smmu_sid_in_range(smmu, sid))
2567 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
2568 return arm_smmu_init_l2_strtab(smmu, sid);
2573 static int arm_smmu_insert_master(struct arm_smmu_device *smmu,
2588 mutex_lock(&smmu->streams_mutex);
2596 ret = arm_smmu_init_sid_strtab(smmu, sid);
2601 new_node = &(smmu->streams.rb_node);
2622 rb_insert_color(&new_stream->node, &smmu->streams);
2627 rb_erase(&master->streams[i].node, &smmu->streams);
2630 mutex_unlock(&smmu->streams_mutex);
2638 struct arm_smmu_device *smmu = master->smmu;
2641 if (!smmu || !master->streams)
2644 mutex_lock(&smmu->streams_mutex);
2646 rb_erase(&master->streams[i].node, &smmu->streams);
2647 mutex_unlock(&smmu->streams_mutex);
2657 struct arm_smmu_device *smmu;
2667 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
2668 if (!smmu)
2676 master->smmu = smmu;
2680 ret = arm_smmu_insert_master(smmu, master);
2685 master->ssid_bits = min(smmu->ssid_bits, master->ssid_bits);
2697 if (!(smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB))
2701 if ((smmu->features & ARM_SMMU_FEAT_STALLS &&
2703 smmu->features & ARM_SMMU_FEAT_STALL_FORCE)
2706 return &smmu->iommu;
2719 iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
2749 if (smmu_domain->smmu)
2890 static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
2901 q->base = dmam_alloc_coherent(smmu->dev, qsz, &q->base_dma,
2910 dev_err(smmu->dev,
2917 dev_info(smmu->dev, "allocated %u entries for %s\n",
2933 static int arm_smmu_cmdq_init(struct arm_smmu_device *smmu)
2935 struct arm_smmu_cmdq *cmdq = &smmu->cmdq;
2941 cmdq->valid_map = (atomic_long_t *)devm_bitmap_zalloc(smmu->dev, nents,
2949 static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
2954 ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, smmu->base,
2960 ret = arm_smmu_cmdq_init(smmu);
2965 ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, smmu->page1,
2971 if ((smmu->features & ARM_SMMU_FEAT_SVA) &&
2972 (smmu->features & ARM_SMMU_FEAT_STALLS)) {
2973 smmu->evtq.iopf = iopf_queue_alloc(dev_name(smmu->dev));
2974 if (!smmu->evtq.iopf)
2979 if (!(smmu->features & ARM_SMMU_FEAT_PRI))
2982 return arm_smmu_init_one_queue(smmu, &smmu->priq.q, smmu->page1,
2987 static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu)
2990 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2991 void *strtab = smmu->strtab_cfg.strtab;
2993 cfg->l1_desc = devm_kcalloc(smmu->dev, cfg->num_l1_ents,
3006 static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
3011 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
3015 size = min(size, smmu->sid_bits - STRTAB_SPLIT);
3019 if (size < smmu->sid_bits)
3020 dev_warn(smmu->dev,
3022 size, smmu->sid_bits);
3025 strtab = dmam_alloc_coherent(smmu->dev, l1size, &cfg->strtab_dma,
3028 dev_err(smmu->dev,
3041 return arm_smmu_init_l1_strtab(smmu);
3044 static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
3049 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
3051 size = (1 << smmu->sid_bits) * (STRTAB_STE_DWORDS << 3);
3052 strtab = dmam_alloc_coherent(smmu->dev, size, &cfg->strtab_dma,
3055 dev_err(smmu->dev,
3061 cfg->num_l1_ents = 1 << smmu->sid_bits;
3065 reg |= FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, smmu->sid_bits);
3072 static int arm_smmu_init_strtab(struct arm_smmu_device *smmu)
3077 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
3078 ret = arm_smmu_init_strtab_2lvl(smmu);
3080 ret = arm_smmu_init_strtab_linear(smmu);
3086 reg = smmu->strtab_cfg.strtab_dma & STRTAB_BASE_ADDR_MASK;
3088 smmu->strtab_cfg.strtab_base = reg;
3090 ida_init(&smmu->vmid_map);
3095 static int arm_smmu_init_structures(struct arm_smmu_device *smmu)
3099 mutex_init(&smmu->streams_mutex);
3100 smmu->streams = RB_ROOT;
3102 ret = arm_smmu_init_queues(smmu);
3106 return arm_smmu_init_strtab(smmu);
3109 static int arm_smmu_write_reg_sync(struct arm_smmu_device *smmu, u32 val,
3114 writel_relaxed(val, smmu->base + reg_off);
3115 return readl_relaxed_poll_timeout(smmu->base + ack_off, reg, reg == val,
3120 static int arm_smmu_update_gbpa(struct arm_smmu_device *smmu, u32 set, u32 clr)
3123 u32 reg, __iomem *gbpa = smmu->base + ARM_SMMU_GBPA;
3137 dev_err(smmu->dev, "GBPA not responding to update\n");
3151 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
3157 writeq_relaxed(doorbell, smmu->base + cfg[0]);
3158 writel_relaxed(msg->data, smmu->base + cfg[1]);
3159 writel_relaxed(ARM_SMMU_MEMATTR_DEVICE_nGnRE, smmu->base + cfg[2]);
3162 static void arm_smmu_setup_msis(struct arm_smmu_device *smmu)
3165 struct device *dev = smmu->dev;
3168 writeq_relaxed(0, smmu->base + ARM_SMMU_GERROR_IRQ_CFG0);
3169 writeq_relaxed(0, smmu->base + ARM_SMMU_EVTQ_IRQ_CFG0);
3171 if (smmu->features & ARM_SMMU_FEAT_PRI)
3172 writeq_relaxed(0, smmu->base + ARM_SMMU_PRIQ_IRQ_CFG0);
3176 if (!(smmu->features & ARM_SMMU_FEAT_MSI))
3180 dev_info(smmu->dev, "msi_domain absent - falling back to wired irqs\n");
3191 smmu->evtq.q.irq = msi_get_virq(dev, EVTQ_MSI_INDEX);
3192 smmu->gerr_irq = msi_get_virq(dev, GERROR_MSI_INDEX);
3193 smmu->priq.q.irq = msi_get_virq(dev, PRIQ_MSI_INDEX);
3199 static void arm_smmu_setup_unique_irqs(struct arm_smmu_device *smmu)
3203 arm_smmu_setup_msis(smmu);
3206 irq = smmu->evtq.q.irq;
3208 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
3211 "arm-smmu-v3-evtq", smmu);
3213 dev_warn(smmu->dev, "failed to enable evtq irq\n");
3215 dev_warn(smmu->dev, "no evtq irq - events will not be reported!\n");
3218 irq = smmu->gerr_irq;
3220 ret = devm_request_irq(smmu->dev, irq, arm_smmu_gerror_handler,
3221 0, "arm-smmu-v3-gerror", smmu);
3223 dev_warn(smmu->dev, "failed to enable gerror irq\n");
3225 dev_warn(smmu->dev, "no gerr irq - errors will not be reported!\n");
3228 if (smmu->features & ARM_SMMU_FEAT_PRI) {
3229 irq = smmu->priq.q.irq;
3231 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
3234 "arm-smmu-v3-priq",
3235 smmu);
3237 dev_warn(smmu->dev,
3240 dev_warn(smmu->dev, "no priq irq - PRI will be broken\n");
3245 static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
3251 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_IRQ_CTRL,
3254 dev_err(smmu->dev, "failed to disable irqs\n");
3258 irq = smmu->combined_irq;
3264 ret = devm_request_threaded_irq(smmu->dev, irq,
3268 "arm-smmu-v3-combined-irq", smmu);
3270 dev_warn(smmu->dev, "failed to enable combined irq\n");
3272 arm_smmu_setup_unique_irqs(smmu);
3274 if (smmu->features & ARM_SMMU_FEAT_PRI)
3278 ret = arm_smmu_write_reg_sync(smmu, irqen_flags,
3281 dev_warn(smmu->dev, "failed to enable irqs\n");
3286 static int arm_smmu_device_disable(struct arm_smmu_device *smmu)
3290 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_CR0, ARM_SMMU_CR0ACK);
3292 dev_err(smmu->dev, "failed to clear cr0\n");
3297 static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
3304 reg = readl_relaxed(smmu->base + ARM_SMMU_CR0);
3306 dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n");
3308 arm_smmu_update_gbpa(smmu, GBPA_ABORT, 0);
3311 ret = arm_smmu_device_disable(smmu);
3322 writel_relaxed(reg, smmu->base + ARM_SMMU_CR1);
3327 if (smmu->features & ARM_SMMU_FEAT_E2H)
3330 writel_relaxed(reg, smmu->base + ARM_SMMU_CR2);
3333 writeq_relaxed(smmu->strtab_cfg.strtab_base,
3334 smmu->base + ARM_SMMU_STRTAB_BASE);
3335 writel_relaxed(smmu->strtab_cfg.strtab_base_cfg,
3336 smmu->base + ARM_SMMU_STRTAB_BASE_CFG);
3339 writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE);
3340 writel_relaxed(smmu->cmdq.q.llq.prod, smmu->base + ARM_SMMU_CMDQ_PROD);
3341 writel_relaxed(smmu->cmdq.q.llq.cons, smmu->base + ARM_SMMU_CMDQ_CONS);
3344 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
3347 dev_err(smmu->dev, "failed to enable command queue\n");
3353 arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
3356 if (smmu->features & ARM_SMMU_FEAT_HYP) {
3358 arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
3362 arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
3365 writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE);
3366 writel_relaxed(smmu->evtq.q.llq.prod, smmu->page1 + ARM_SMMU_EVTQ_PROD);
3367 writel_relaxed(smmu->evtq.q.llq.cons, smmu->page1 + ARM_SMMU_EVTQ_CONS);
3370 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
3373 dev_err(smmu->dev, "failed to enable event queue\n");
3378 if (smmu->features & ARM_SMMU_FEAT_PRI) {
3379 writeq_relaxed(smmu->priq.q.q_base,
3380 smmu->base + ARM_SMMU_PRIQ_BASE);
3381 writel_relaxed(smmu->priq.q.llq.prod,
3382 smmu->page1 + ARM_SMMU_PRIQ_PROD);
3383 writel_relaxed(smmu->priq.q.llq.cons,
3384 smmu->page1 + ARM_SMMU_PRIQ_CONS);
3387 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
3390 dev_err(smmu->dev, "failed to enable PRI queue\n");
3395 if (smmu->features & ARM_SMMU_FEAT_ATS) {
3397 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
3400 dev_err(smmu->dev, "failed to enable ATS check\n");
3405 ret = arm_smmu_setup_irqs(smmu);
3407 dev_err(smmu->dev, "failed to setup irqs\n");
3418 ret = arm_smmu_update_gbpa(smmu, 0, GBPA_ABORT);
3422 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
3425 dev_err(smmu->dev, "failed to enable SMMU interface\n");
3436 static void arm_smmu_device_iidr_probe(struct arm_smmu_device *smmu)
3441 reg = readl_relaxed(smmu->base + ARM_SMMU_IIDR);
3453 smmu->features &= ~ARM_SMMU_FEAT_SEV;
3456 smmu->features &= ~ARM_SMMU_FEAT_NESTING;
3460 smmu->features &= ~ARM_SMMU_FEAT_BTM;
3461 smmu->options |= ARM_SMMU_OPT_CMDQ_FORCE_SYNC;
3463 smmu->features &= ~ARM_SMMU_FEAT_NESTING;
3470 static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
3473 bool coherent = smmu->features & ARM_SMMU_FEAT_COHERENCY;
3476 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR0);
3480 smmu->features |= ARM_SMMU_FEAT_2_LVL_STRTAB;
3483 smmu->features |= ARM_SMMU_FEAT_2_LVL_CDTAB;
3492 smmu->features |= ARM_SMMU_FEAT_TT_LE | ARM_SMMU_FEAT_TT_BE;
3496 smmu->features |= ARM_SMMU_FEAT_TT_BE;
3500 smmu->features |= ARM_SMMU_FEAT_TT_LE;
3504 dev_err(smmu->dev, "unknown/unsupported TT endianness!\n");
3510 smmu->features |= ARM_SMMU_FEAT_PRI;
3513 smmu->features |= ARM_SMMU_FEAT_ATS;
3516 smmu->features |= ARM_SMMU_FEAT_SEV;
3519 smmu->features |= ARM_SMMU_FEAT_MSI;
3521 smmu->options |= ARM_SMMU_OPT_MSIPOLL;
3525 smmu->features |= ARM_SMMU_FEAT_HYP;
3527 smmu->features |= ARM_SMMU_FEAT_E2H;
3535 dev_warn(smmu->dev, "IDR0.COHACC overridden by FW configuration (%s)\n",
3540 smmu->features |= ARM_SMMU_FEAT_STALL_FORCE;
3543 smmu->features |= ARM_SMMU_FEAT_STALLS;
3547 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
3550 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
3553 dev_err(smmu->dev, "no translation support!\n");
3560 smmu->ias = 40;
3565 dev_err(smmu->dev, "AArch64 table format not supported!\n");
3570 smmu->asid_bits = reg & IDR0_ASID16 ? 16 : 8;
3571 smmu->vmid_bits = reg & IDR0_VMID16 ? 16 : 8;
3574 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR1);
3576 dev_err(smmu->dev, "embedded implementation not supported\n");
3581 smmu->cmdq.q.llq.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT,
3583 if (smmu->cmdq.q.llq.max_n_shift <= ilog2(CMDQ_BATCH_ENTRIES)) {
3590 dev_err(smmu->dev, "command queue size <= %d entries not supported\n",
3595 smmu->evtq.q.llq.max_n_shift = min_t(u32, EVTQ_MAX_SZ_SHIFT,
3597 smmu->priq.q.llq.max_n_shift = min_t(u32, PRIQ_MAX_SZ_SHIFT,
3601 smmu->ssid_bits = FIELD_GET(IDR1_SSIDSIZE, reg);
3602 smmu->sid_bits = FIELD_GET(IDR1_SIDSIZE, reg);
3603 smmu->iommu.max_pasids = 1UL << smmu->ssid_bits;
3609 if (smmu->sid_bits <= STRTAB_SPLIT)
3610 smmu->features &= ~ARM_SMMU_FEAT_2_LVL_STRTAB;
3613 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR3);
3615 smmu->features |= ARM_SMMU_FEAT_RANGE_INV;
3618 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5);
3621 smmu->evtq.max_stalls = FIELD_GET(IDR5_STALL_MAX, reg);
3625 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
3627 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
3629 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
3633 smmu->features |= ARM_SMMU_FEAT_VAX;
3638 smmu->oas = 32;
3641 smmu->oas = 36;
3644 smmu->oas = 40;
3647 smmu->oas = 42;
3650 smmu->oas = 44;
3653 smmu->oas = 52;
3654 smmu->pgsize_bitmap |= 1ULL << 42; /* 4TB */
3657 dev_info(smmu->dev,
3661 smmu->oas = 48;
3665 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
3667 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
3670 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(smmu->oas)))
3671 dev_warn(smmu->dev,
3674 smmu->ias = max(smmu->ias, smmu->oas);
3676 if ((smmu->features & ARM_SMMU_FEAT_TRANS_S1) &&
3677 (smmu->features & ARM_SMMU_FEAT_TRANS_S2))
3678 smmu->features |= ARM_SMMU_FEAT_NESTING;
3680 arm_smmu_device_iidr_probe(smmu);
3682 if (arm_smmu_sva_supported(smmu))
3683 smmu->features |= ARM_SMMU_FEAT_SVA;
3685 dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n",
3686 smmu->ias, smmu->oas, smmu->features);
3691 static void acpi_smmu_get_options(u32 model, struct arm_smmu_device *smmu)
3695 smmu->options |= ARM_SMMU_OPT_PAGE0_REGS_ONLY;
3698 smmu->options |= ARM_SMMU_OPT_SKIP_PREFETCH;
3702 dev_notice(smmu->dev, "option mask 0x%x\n", smmu->options);
3706 struct arm_smmu_device *smmu)
3709 struct device *dev = smmu->dev;
3717 acpi_smmu_get_options(iort_smmu->model, smmu);
3720 smmu->features |= ARM_SMMU_FEAT_COHERENCY;
3726 struct arm_smmu_device *smmu)
3733 struct arm_smmu_device *smmu)
3746 parse_driver_options(smmu);
3749 smmu->features |= ARM_SMMU_FEAT_COHERENCY;
3754 static unsigned long arm_smmu_resource_size(struct arm_smmu_device *smmu)
3756 if (smmu->options & ARM_SMMU_OPT_PAGE0_REGS_ONLY)
3770 static void arm_smmu_rmr_install_bypass_ste(struct arm_smmu_device *smmu)
3776 iort_get_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
3785 ret = arm_smmu_init_sid_strtab(smmu, rmr->sids[i]);
3787 dev_err(smmu->dev, "RMR SID(0x%x) bypass failed\n",
3792 step = arm_smmu_get_step_for_sid(smmu, rmr->sids[i]);
3797 iort_put_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
3805 struct arm_smmu_device *smmu;
3809 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
3810 if (!smmu)
3812 smmu->dev = dev;
3815 ret = arm_smmu_device_dt_probe(pdev, smmu);
3817 ret = arm_smmu_device_acpi_probe(pdev, smmu);
3829 if (resource_size(res) < arm_smmu_resource_size(smmu)) {
3839 smmu->base = arm_smmu_ioremap(dev, ioaddr, ARM_SMMU_REG_SZ);
3840 if (IS_ERR(smmu->base))
3841 return PTR_ERR(smmu->base);
3843 if (arm_smmu_resource_size(smmu) > SZ_64K) {
3844 smmu->page1 = arm_smmu_ioremap(dev, ioaddr + SZ_64K,
3846 if (IS_ERR(smmu->page1))
3847 return PTR_ERR(smmu->page1);
3849 smmu->page1 = smmu->base;
3856 smmu->combined_irq = irq;
3860 smmu->evtq.q.irq = irq;
3864 smmu->priq.q.irq = irq;
3868 smmu->gerr_irq = irq;
3871 ret = arm_smmu_device_hw_probe(smmu);
3876 ret = arm_smmu_init_structures(smmu);
3881 platform_set_drvdata(pdev, smmu);
3884 arm_smmu_rmr_install_bypass_ste(smmu);
3887 ret = arm_smmu_device_reset(smmu, bypass);
3892 ret = iommu_device_sysfs_add(&smmu->iommu, dev, NULL,
3897 ret = iommu_device_register(&smmu->iommu, &arm_smmu_ops, dev);
3900 iommu_device_sysfs_remove(&smmu->iommu);
3909 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
3911 iommu_device_unregister(&smmu->iommu);
3912 iommu_device_sysfs_remove(&smmu->iommu);
3913 arm_smmu_device_disable(smmu);
3914 iopf_queue_free(smmu->evtq.iopf);
3915 ida_destroy(&smmu->vmid_map);
3920 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
3922 arm_smmu_device_disable(smmu);
3926 { .compatible = "arm,smmu-v3", },
3939 .name = "arm-smmu-v3",
3952 MODULE_ALIAS("platform:arm-smmu-v3");