Lines Matching refs:smmu

34 #include "arm-smmu-v3.h"
86 struct arm_smmu_device *smmu)
89 return smmu->page1 + offset - SZ_64K;
91 return smmu->base + offset;
99 static void parse_driver_options(struct arm_smmu_device *smmu)
104 if (of_property_read_bool(smmu->dev->of_node,
106 smmu->options |= arm_smmu_options[i].opt;
107 dev_notice(smmu->dev, "option %s\n",
202 static void queue_poll_init(struct arm_smmu_device *smmu,
207 qp->wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
350 static void arm_smmu_cmdq_build_sync_cmd(u64 *cmd, struct arm_smmu_device *smmu,
353 struct arm_smmu_queue *q = &smmu->cmdq.q;
362 if (smmu->options & ARM_SMMU_OPT_MSIPOLL) {
370 static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
381 struct arm_smmu_queue *q = &smmu->cmdq.q;
388 dev_err(smmu->dev, "CMDQ error (cons 0x%08x): %s\n", cons,
393 dev_err(smmu->dev, "retrying command fetch\n");
414 dev_err(smmu->dev, "skipping command in error state:\n");
416 dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]);
420 dev_err(smmu->dev, "failed to convert to CMD_SYNC\n");
588 static int arm_smmu_cmdq_poll_until_not_full(struct arm_smmu_device *smmu,
593 struct arm_smmu_cmdq *cmdq = &smmu->cmdq;
607 queue_poll_init(smmu, &qp);
609 llq->val = READ_ONCE(smmu->cmdq.q.llq.val);
623 static int __arm_smmu_cmdq_poll_until_msi(struct arm_smmu_device *smmu,
628 struct arm_smmu_cmdq *cmdq = &smmu->cmdq;
631 queue_poll_init(smmu, &qp);
647 static int __arm_smmu_cmdq_poll_until_consumed(struct arm_smmu_device *smmu,
651 struct arm_smmu_cmdq *cmdq = &smmu->cmdq;
655 queue_poll_init(smmu, &qp);
656 llq->val = READ_ONCE(smmu->cmdq.q.llq.val);
697 static int arm_smmu_cmdq_poll_until_sync(struct arm_smmu_device *smmu,
700 if (smmu->options & ARM_SMMU_OPT_MSIPOLL)
701 return __arm_smmu_cmdq_poll_until_msi(smmu, llq);
703 return __arm_smmu_cmdq_poll_until_consumed(smmu, llq);
739 static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
746 struct arm_smmu_cmdq *cmdq = &smmu->cmdq;
760 if (arm_smmu_cmdq_poll_until_not_full(smmu, &llq))
761 dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
786 arm_smmu_cmdq_build_sync_cmd(cmd_sync, smmu, prod);
836 ret = arm_smmu_cmdq_poll_until_sync(smmu, &llq);
838 dev_err_ratelimited(smmu->dev,
859 static int arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
865 dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n",
870 return arm_smmu_cmdq_issue_cmdlist(smmu, cmd, 1, false);
873 static int arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu)
875 return arm_smmu_cmdq_issue_cmdlist(smmu, NULL, 0, true);
878 static void arm_smmu_cmdq_batch_add(struct arm_smmu_device *smmu,
883 arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, false);
890 static int arm_smmu_cmdq_batch_submit(struct arm_smmu_device *smmu,
893 return arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, true);
897 void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid)
904 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
905 arm_smmu_cmdq_issue_sync(smmu);
915 struct arm_smmu_device *smmu = smmu_domain->smmu;
928 arm_smmu_cmdq_batch_add(smmu, &cmds, &cmd);
933 arm_smmu_cmdq_batch_submit(smmu, &cmds);
936 static int arm_smmu_alloc_cd_leaf_table(struct arm_smmu_device *smmu,
941 l1_desc->l2ptr = dmam_alloc_coherent(smmu->dev, size,
944 dev_warn(smmu->dev,
967 struct arm_smmu_device *smmu = smmu_domain->smmu;
976 if (arm_smmu_alloc_cd_leaf_table(smmu, l1_desc))
1003 struct arm_smmu_device *smmu = smmu_domain->smmu;
1047 if (smmu->features & ARM_SMMU_FEAT_STALL_FORCE)
1070 struct arm_smmu_device *smmu = smmu_domain->smmu;
1076 if (!(smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB) ||
1087 cdcfg->l1_desc = devm_kcalloc(smmu->dev, cdcfg->num_l1_ents,
1096 cdcfg->cdtab = dmam_alloc_coherent(smmu->dev, l1size, &cdcfg->cdtab_dma,
1099 dev_warn(smmu->dev, "failed to allocate context descriptor\n");
1108 devm_kfree(smmu->dev, cdcfg->l1_desc);
1118 struct arm_smmu_device *smmu = smmu_domain->smmu;
1128 dmam_free_coherent(smmu->dev, size,
1132 devm_kfree(smmu->dev, cdcfg->l1_desc);
1140 dmam_free_coherent(smmu->dev, l1size, cdcfg->cdtab, cdcfg->cdtab_dma);
1174 static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
1184 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1185 arm_smmu_cmdq_issue_sync(smmu);
1209 struct arm_smmu_device *smmu = NULL;
1222 smmu = master->smmu;
1273 if (smmu)
1274 arm_smmu_sync_ste_for_sid(smmu, sid);
1287 if (smmu->features & ARM_SMMU_FEAT_STALLS &&
1288 !(smmu->features & ARM_SMMU_FEAT_STALL_FORCE))
1317 arm_smmu_sync_ste_for_sid(smmu, sid);
1320 arm_smmu_sync_ste_for_sid(smmu, sid);
1323 if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH))
1324 arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
1337 static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
1341 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1351 desc->l2ptr = dmam_alloc_coherent(smmu->dev, size, &desc->l2ptr_dma,
1354 dev_err(smmu->dev,
1369 struct arm_smmu_device *smmu = dev;
1370 struct arm_smmu_queue *q = &smmu->evtq.q;
1378 dev_info(smmu->dev, "event 0x%02x received:\n", id);
1380 dev_info(smmu->dev, "\t0x%016llx\n",
1391 dev_err(smmu->dev, "EVTQ overflow detected -- events lost\n");
1399 static void arm_smmu_handle_ppr(struct arm_smmu_device *smmu, u64 *evt)
1411 dev_info(smmu->dev, "unexpected PRI request received:\n");
1412 dev_info(smmu->dev,
1433 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1439 struct arm_smmu_device *smmu = dev;
1440 struct arm_smmu_queue *q = &smmu->priq.q;
1446 arm_smmu_handle_ppr(smmu, evt);
1449 dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n");
1457 static int arm_smmu_device_disable(struct arm_smmu_device *smmu);
1462 struct arm_smmu_device *smmu = dev;
1464 gerror = readl_relaxed(smmu->base + ARM_SMMU_GERROR);
1465 gerrorn = readl_relaxed(smmu->base + ARM_SMMU_GERRORN);
1471 dev_warn(smmu->dev,
1476 dev_err(smmu->dev, "device has entered Service Failure Mode!\n");
1477 arm_smmu_device_disable(smmu);
1481 dev_warn(smmu->dev, "GERROR MSI write aborted\n");
1484 dev_warn(smmu->dev, "PRIQ MSI write aborted\n");
1487 dev_warn(smmu->dev, "EVTQ MSI write aborted\n");
1490 dev_warn(smmu->dev, "CMDQ MSI write aborted\n");
1493 dev_err(smmu->dev, "PRIQ write aborted -- events may have been lost\n");
1496 dev_err(smmu->dev, "EVTQ write aborted -- events may have been lost\n");
1499 arm_smmu_cmdq_skip_err(smmu);
1501 writel(gerror, smmu->base + ARM_SMMU_GERRORN);
1507 struct arm_smmu_device *smmu = dev;
1510 if (smmu->features & ARM_SMMU_FEAT_PRI)
1584 arm_smmu_cmdq_issue_cmd(master->smmu, &cmd);
1587 return arm_smmu_cmdq_issue_sync(master->smmu);
1599 if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_ATS))
1628 arm_smmu_cmdq_batch_add(smmu_domain->smmu, &cmds, &cmd);
1633 return arm_smmu_cmdq_batch_submit(smmu_domain->smmu, &cmds);
1640 struct arm_smmu_device *smmu = smmu_domain->smmu;
1651 arm_smmu_tlb_inv_asid(smmu, smmu_domain->s1_cfg.cd.asid);
1655 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1656 arm_smmu_cmdq_issue_sync(smmu);
1665 struct arm_smmu_device *smmu = smmu_domain->smmu;
1686 if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) {
1700 if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) {
1726 arm_smmu_cmdq_batch_add(smmu, &cmds, &cmd);
1729 arm_smmu_cmdq_batch_submit(smmu, &cmds);
1832 struct arm_smmu_device *smmu = smmu_domain->smmu;
1850 arm_smmu_bitmap_free(smmu->vmid_map, cfg->vmid);
1862 struct arm_smmu_device *smmu = smmu_domain->smmu;
1871 XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL);
1918 struct arm_smmu_device *smmu = smmu_domain->smmu;
1922 vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits);
1951 struct arm_smmu_device *smmu = smmu_domain->smmu;
1959 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1961 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1966 ias = (smmu->features & ARM_SMMU_FEAT_VAX) ? 52 : 48;
1968 oas = smmu->ias;
1974 ias = smmu->ias;
1975 oas = smmu->oas;
1984 .pgsize_bitmap = smmu->pgsize_bitmap,
1987 .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENCY,
1989 .iommu_dev = smmu->dev,
2013 static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
2016 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2018 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
2038 struct arm_smmu_device *smmu = master->smmu;
2042 __le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
2058 struct arm_smmu_device *smmu = master->smmu;
2061 if (!(smmu->features & ARM_SMMU_FEAT_ATS))
2074 struct arm_smmu_device *smmu = master->smmu;
2082 stu = __ffs(smmu->pgsize_bitmap);
2135 master->smmu->ssid_bits);
2179 struct arm_smmu_device *smmu;
2187 smmu = master->smmu;
2203 if (!smmu_domain->smmu) {
2204 smmu_domain->smmu = smmu;
2207 smmu_domain->smmu = NULL;
2210 } else if (smmu_domain->smmu != smmu) {
2213 dev_name(smmu_domain->smmu->dev),
2214 dev_name(smmu->dev));
2271 if (smmu_domain->smmu)
2309 static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
2311 unsigned long limit = smmu->strtab_cfg.num_l1_ents;
2313 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
2324 struct arm_smmu_device *smmu;
2334 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
2335 if (!smmu)
2343 master->smmu = smmu;
2353 if (!arm_smmu_sid_in_range(smmu, sid)) {
2359 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
2360 ret = arm_smmu_init_l2_strtab(smmu, sid);
2366 master->ssid_bits = min(smmu->ssid_bits, fwspec->num_pasid_bits);
2378 if (!(smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB))
2382 return &smmu->iommu;
2464 if (smmu_domain->smmu) {
2606 static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
2616 q->base = dmam_alloc_coherent(smmu->dev, qsz, &q->base_dma,
2625 dev_err(smmu->dev,
2632 dev_info(smmu->dev, "allocated %u entries for %s\n",
2636 q->prod_reg = arm_smmu_page1_fixup(prod_off, smmu);
2637 q->cons_reg = arm_smmu_page1_fixup(cons_off, smmu);
2654 static int arm_smmu_cmdq_init(struct arm_smmu_device *smmu)
2657 struct arm_smmu_cmdq *cmdq = &smmu->cmdq;
2666 dev_err(smmu->dev, "failed to allocate cmdq bitmap\n");
2670 devm_add_action(smmu->dev, arm_smmu_cmdq_free_bitmap, bitmap);
2676 static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
2681 ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, ARM_SMMU_CMDQ_PROD,
2687 ret = arm_smmu_cmdq_init(smmu);
2692 ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, ARM_SMMU_EVTQ_PROD,
2699 if (!(smmu->features & ARM_SMMU_FEAT_PRI))
2702 return arm_smmu_init_one_queue(smmu, &smmu->priq.q, ARM_SMMU_PRIQ_PROD,
2707 static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu)
2710 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2712 void *strtab = smmu->strtab_cfg.strtab;
2714 cfg->l1_desc = devm_kzalloc(smmu->dev, size, GFP_KERNEL);
2716 dev_err(smmu->dev, "failed to allocate l1 stream table desc\n");
2728 static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
2733 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2737 size = min(size, smmu->sid_bits - STRTAB_SPLIT);
2741 if (size < smmu->sid_bits)
2742 dev_warn(smmu->dev,
2744 size, smmu->sid_bits);
2747 strtab = dmam_alloc_coherent(smmu->dev, l1size, &cfg->strtab_dma,
2750 dev_err(smmu->dev,
2763 return arm_smmu_init_l1_strtab(smmu);
2766 static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
2771 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2773 size = (1 << smmu->sid_bits) * (STRTAB_STE_DWORDS << 3);
2774 strtab = dmam_alloc_coherent(smmu->dev, size, &cfg->strtab_dma,
2777 dev_err(smmu->dev,
2783 cfg->num_l1_ents = 1 << smmu->sid_bits;
2787 reg |= FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, smmu->sid_bits);
2794 static int arm_smmu_init_strtab(struct arm_smmu_device *smmu)
2799 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
2800 ret = arm_smmu_init_strtab_2lvl(smmu);
2802 ret = arm_smmu_init_strtab_linear(smmu);
2808 reg = smmu->strtab_cfg.strtab_dma & STRTAB_BASE_ADDR_MASK;
2810 smmu->strtab_cfg.strtab_base = reg;
2813 set_bit(0, smmu->vmid_map);
2817 static int arm_smmu_init_structures(struct arm_smmu_device *smmu)
2821 ret = arm_smmu_init_queues(smmu);
2825 return arm_smmu_init_strtab(smmu);
2828 static int arm_smmu_write_reg_sync(struct arm_smmu_device *smmu, u32 val,
2833 writel_relaxed(val, smmu->base + reg_off);
2834 return readl_relaxed_poll_timeout(smmu->base + ack_off, reg, reg == val,
2839 static int arm_smmu_update_gbpa(struct arm_smmu_device *smmu, u32 set, u32 clr)
2842 u32 reg, __iomem *gbpa = smmu->base + ARM_SMMU_GBPA;
2856 dev_err(smmu->dev, "GBPA not responding to update\n");
2870 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2876 writeq_relaxed(doorbell, smmu->base + cfg[0]);
2877 writel_relaxed(msg->data, smmu->base + cfg[1]);
2878 writel_relaxed(ARM_SMMU_MEMATTR_DEVICE_nGnRE, smmu->base + cfg[2]);
2881 static void arm_smmu_setup_msis(struct arm_smmu_device *smmu)
2885 struct device *dev = smmu->dev;
2888 writeq_relaxed(0, smmu->base + ARM_SMMU_GERROR_IRQ_CFG0);
2889 writeq_relaxed(0, smmu->base + ARM_SMMU_EVTQ_IRQ_CFG0);
2891 if (smmu->features & ARM_SMMU_FEAT_PRI)
2892 writeq_relaxed(0, smmu->base + ARM_SMMU_PRIQ_IRQ_CFG0);
2896 if (!(smmu->features & ARM_SMMU_FEAT_MSI))
2900 dev_info(smmu->dev, "msi_domain absent - falling back to wired irqs\n");
2914 smmu->evtq.q.irq = desc->irq;
2917 smmu->gerr_irq = desc->irq;
2920 smmu->priq.q.irq = desc->irq;
2931 static void arm_smmu_setup_unique_irqs(struct arm_smmu_device *smmu)
2935 arm_smmu_setup_msis(smmu);
2938 irq = smmu->evtq.q.irq;
2940 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
2943 "arm-smmu-v3-evtq", smmu);
2945 dev_warn(smmu->dev, "failed to enable evtq irq\n");
2947 dev_warn(smmu->dev, "no evtq irq - events will not be reported!\n");
2950 irq = smmu->gerr_irq;
2952 ret = devm_request_irq(smmu->dev, irq, arm_smmu_gerror_handler,
2953 0, "arm-smmu-v3-gerror", smmu);
2955 dev_warn(smmu->dev, "failed to enable gerror irq\n");
2957 dev_warn(smmu->dev, "no gerr irq - errors will not be reported!\n");
2960 if (smmu->features & ARM_SMMU_FEAT_PRI) {
2961 irq = smmu->priq.q.irq;
2963 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
2966 "arm-smmu-v3-priq",
2967 smmu);
2969 dev_warn(smmu->dev,
2972 dev_warn(smmu->dev, "no priq irq - PRI will be broken\n");
2977 static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
2983 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_IRQ_CTRL,
2986 dev_err(smmu->dev, "failed to disable irqs\n");
2990 irq = smmu->combined_irq;
2996 ret = devm_request_threaded_irq(smmu->dev, irq,
3000 "arm-smmu-v3-combined-irq", smmu);
3002 dev_warn(smmu->dev, "failed to enable combined irq\n");
3004 arm_smmu_setup_unique_irqs(smmu);
3006 if (smmu->features & ARM_SMMU_FEAT_PRI)
3010 ret = arm_smmu_write_reg_sync(smmu, irqen_flags,
3013 dev_warn(smmu->dev, "failed to enable irqs\n");
3018 static int arm_smmu_device_disable(struct arm_smmu_device *smmu)
3022 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_CR0, ARM_SMMU_CR0ACK);
3024 dev_err(smmu->dev, "failed to clear cr0\n");
3029 static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
3036 reg = readl_relaxed(smmu->base + ARM_SMMU_CR0);
3038 dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n");
3040 arm_smmu_update_gbpa(smmu, GBPA_ABORT, 0);
3043 ret = arm_smmu_device_disable(smmu);
3054 writel_relaxed(reg, smmu->base + ARM_SMMU_CR1);
3058 writel_relaxed(reg, smmu->base + ARM_SMMU_CR2);
3061 writeq_relaxed(smmu->strtab_cfg.strtab_base,
3062 smmu->base + ARM_SMMU_STRTAB_BASE);
3063 writel_relaxed(smmu->strtab_cfg.strtab_base_cfg,
3064 smmu->base + ARM_SMMU_STRTAB_BASE_CFG);
3067 writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE);
3068 writel_relaxed(smmu->cmdq.q.llq.prod, smmu->base + ARM_SMMU_CMDQ_PROD);
3069 writel_relaxed(smmu->cmdq.q.llq.cons, smmu->base + ARM_SMMU_CMDQ_CONS);
3072 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
3075 dev_err(smmu->dev, "failed to enable command queue\n");
3081 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
3082 arm_smmu_cmdq_issue_sync(smmu);
3085 if (smmu->features & ARM_SMMU_FEAT_HYP) {
3087 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
3091 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
3092 arm_smmu_cmdq_issue_sync(smmu);
3095 writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE);
3096 writel_relaxed(smmu->evtq.q.llq.prod,
3097 arm_smmu_page1_fixup(ARM_SMMU_EVTQ_PROD, smmu));
3098 writel_relaxed(smmu->evtq.q.llq.cons,
3099 arm_smmu_page1_fixup(ARM_SMMU_EVTQ_CONS, smmu));
3102 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
3105 dev_err(smmu->dev, "failed to enable event queue\n");
3110 if (smmu->features & ARM_SMMU_FEAT_PRI) {
3111 writeq_relaxed(smmu->priq.q.q_base,
3112 smmu->base + ARM_SMMU_PRIQ_BASE);
3113 writel_relaxed(smmu->priq.q.llq.prod,
3114 arm_smmu_page1_fixup(ARM_SMMU_PRIQ_PROD, smmu));
3115 writel_relaxed(smmu->priq.q.llq.cons,
3116 arm_smmu_page1_fixup(ARM_SMMU_PRIQ_CONS, smmu));
3119 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
3122 dev_err(smmu->dev, "failed to enable PRI queue\n");
3127 if (smmu->features & ARM_SMMU_FEAT_ATS) {
3129 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
3132 dev_err(smmu->dev, "failed to enable ATS check\n");
3137 ret = arm_smmu_setup_irqs(smmu);
3139 dev_err(smmu->dev, "failed to setup irqs\n");
3150 ret = arm_smmu_update_gbpa(smmu, 0, GBPA_ABORT);
3154 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
3157 dev_err(smmu->dev, "failed to enable SMMU interface\n");
3164 static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
3167 bool coherent = smmu->features & ARM_SMMU_FEAT_COHERENCY;
3170 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR0);
3174 smmu->features |= ARM_SMMU_FEAT_2_LVL_STRTAB;
3177 smmu->features |= ARM_SMMU_FEAT_2_LVL_CDTAB;
3186 smmu->features |= ARM_SMMU_FEAT_TT_LE | ARM_SMMU_FEAT_TT_BE;
3190 smmu->features |= ARM_SMMU_FEAT_TT_BE;
3194 smmu->features |= ARM_SMMU_FEAT_TT_LE;
3198 dev_err(smmu->dev, "unknown/unsupported TT endianness!\n");
3204 smmu->features |= ARM_SMMU_FEAT_PRI;
3207 smmu->features |= ARM_SMMU_FEAT_ATS;
3210 smmu->features |= ARM_SMMU_FEAT_SEV;
3213 smmu->features |= ARM_SMMU_FEAT_MSI;
3215 smmu->options |= ARM_SMMU_OPT_MSIPOLL;
3219 smmu->features |= ARM_SMMU_FEAT_HYP;
3226 dev_warn(smmu->dev, "IDR0.COHACC overridden by FW configuration (%s)\n",
3231 smmu->features |= ARM_SMMU_FEAT_STALL_FORCE;
3234 smmu->features |= ARM_SMMU_FEAT_STALLS;
3238 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
3241 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
3244 dev_err(smmu->dev, "no translation support!\n");
3251 smmu->ias = 40;
3256 dev_err(smmu->dev, "AArch64 table format not supported!\n");
3261 smmu->asid_bits = reg & IDR0_ASID16 ? 16 : 8;
3262 smmu->vmid_bits = reg & IDR0_VMID16 ? 16 : 8;
3265 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR1);
3267 dev_err(smmu->dev, "embedded implementation not supported\n");
3272 smmu->cmdq.q.llq.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT,
3274 if (smmu->cmdq.q.llq.max_n_shift <= ilog2(CMDQ_BATCH_ENTRIES)) {
3281 dev_err(smmu->dev, "command queue size <= %d entries not supported\n",
3286 smmu->evtq.q.llq.max_n_shift = min_t(u32, EVTQ_MAX_SZ_SHIFT,
3288 smmu->priq.q.llq.max_n_shift = min_t(u32, PRIQ_MAX_SZ_SHIFT,
3292 smmu->ssid_bits = FIELD_GET(IDR1_SSIDSIZE, reg);
3293 smmu->sid_bits = FIELD_GET(IDR1_SIDSIZE, reg);
3299 if (smmu->sid_bits <= STRTAB_SPLIT)
3300 smmu->features &= ~ARM_SMMU_FEAT_2_LVL_STRTAB;
3303 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR3);
3305 smmu->features |= ARM_SMMU_FEAT_RANGE_INV;
3308 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5);
3311 smmu->evtq.max_stalls = FIELD_GET(IDR5_STALL_MAX, reg);
3315 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
3317 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
3319 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
3323 smmu->features |= ARM_SMMU_FEAT_VAX;
3328 smmu->oas = 32;
3331 smmu->oas = 36;
3334 smmu->oas = 40;
3337 smmu->oas = 42;
3340 smmu->oas = 44;
3343 smmu->oas = 52;
3344 smmu->pgsize_bitmap |= 1ULL << 42; /* 4TB */
3347 dev_info(smmu->dev,
3351 smmu->oas = 48;
3355 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
3357 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
3360 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(smmu->oas)))
3361 dev_warn(smmu->dev,
3364 smmu->ias = max(smmu->ias, smmu->oas);
3366 if (arm_smmu_sva_supported(smmu))
3367 smmu->features |= ARM_SMMU_FEAT_SVA;
3369 dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n",
3370 smmu->ias, smmu->oas, smmu->features);
3375 static void acpi_smmu_get_options(u32 model, struct arm_smmu_device *smmu)
3379 smmu->options |= ARM_SMMU_OPT_PAGE0_REGS_ONLY;
3382 smmu->options |= ARM_SMMU_OPT_SKIP_PREFETCH;
3386 dev_notice(smmu->dev, "option mask 0x%x\n", smmu->options);
3390 struct arm_smmu_device *smmu)
3393 struct device *dev = smmu->dev;
3401 acpi_smmu_get_options(iort_smmu->model, smmu);
3404 smmu->features |= ARM_SMMU_FEAT_COHERENCY;
3410 struct arm_smmu_device *smmu)
3417 struct arm_smmu_device *smmu)
3430 parse_driver_options(smmu);
3433 smmu->features |= ARM_SMMU_FEAT_COHERENCY;
3438 static unsigned long arm_smmu_resource_size(struct arm_smmu_device *smmu)
3440 if (smmu->options & ARM_SMMU_OPT_PAGE0_REGS_ONLY)
3500 struct arm_smmu_device *smmu;
3504 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
3505 if (!smmu) {
3509 smmu->dev = dev;
3512 ret = arm_smmu_device_dt_probe(pdev, smmu);
3514 ret = arm_smmu_device_acpi_probe(pdev, smmu);
3526 if (resource_size(res) < arm_smmu_resource_size(smmu)) {
3536 smmu->base = arm_smmu_ioremap(dev, ioaddr, ARM_SMMU_REG_SZ);
3537 if (IS_ERR(smmu->base))
3538 return PTR_ERR(smmu->base);
3540 if (arm_smmu_resource_size(smmu) > SZ_64K) {
3541 smmu->page1 = arm_smmu_ioremap(dev, ioaddr + SZ_64K,
3543 if (IS_ERR(smmu->page1))
3544 return PTR_ERR(smmu->page1);
3546 smmu->page1 = smmu->base;
3553 smmu->combined_irq = irq;
3557 smmu->evtq.q.irq = irq;
3561 smmu->priq.q.irq = irq;
3565 smmu->gerr_irq = irq;
3568 ret = arm_smmu_device_hw_probe(smmu);
3573 ret = arm_smmu_init_structures(smmu);
3578 platform_set_drvdata(pdev, smmu);
3581 ret = arm_smmu_device_reset(smmu, bypass);
3586 ret = iommu_device_sysfs_add(&smmu->iommu, dev, NULL,
3591 iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
3592 iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
3594 ret = iommu_device_register(&smmu->iommu);
3605 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
3608 iommu_device_unregister(&smmu->iommu);
3609 iommu_device_sysfs_remove(&smmu->iommu);
3610 arm_smmu_device_disable(smmu);
3621 { .compatible = "arm,smmu-v3", },
3628 .name = "arm-smmu-v3",
3640 MODULE_ALIAS("platform:arm-smmu-v3");