1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2019, The Linux Foundation. All rights reserved.
4 */
5
6#include <linux/of_device.h>
7#include <linux/qcom_scm.h>
8
9#include "arm-smmu.h"
10
11struct qcom_smmu {
12	struct arm_smmu_device smmu;
13	bool bypass_quirk;
14	u8 bypass_cbndx;
15};
16
17static struct qcom_smmu *to_qcom_smmu(struct arm_smmu_device *smmu)
18{
19	return container_of(smmu, struct qcom_smmu, smmu);
20}
21
22static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
23	{ .compatible = "qcom,adreno" },
24	{ .compatible = "qcom,adreno-gmu" },
25	{ .compatible = "qcom,mdp4" },
26	{ .compatible = "qcom,mdss" },
27	{ .compatible = "qcom,sc7180-mdss" },
28	{ .compatible = "qcom,sc7180-mss-pil" },
29	{ .compatible = "qcom,sdm845-mdss" },
30	{ .compatible = "qcom,sdm845-mss-pil" },
31	{ }
32};
33
34static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu)
35{
36	struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
37	unsigned int last_s2cr;
38	u32 reg;
39	u32 smr;
40	int i;
41
42	/*
43	 * Some platforms support more than the Arm SMMU architected maximum of
44	 * 128 stream matching groups. For unknown reasons, the additional
45	 * groups don't exhibit the same behavior as the architected registers,
46	 * so limit the groups to 128 until the behavior is fixed for the other
47	 * groups.
48	 */
49	if (smmu->num_mapping_groups > 128) {
50		dev_notice(smmu->dev, "\tLimiting the stream matching groups to 128\n");
51		smmu->num_mapping_groups = 128;
52	}
53
54	last_s2cr = ARM_SMMU_GR0_S2CR(smmu->num_mapping_groups - 1);
55
56	/*
57	 * With some firmware versions writes to S2CR of type FAULT are
58	 * ignored, and writing BYPASS will end up written as FAULT in the
59	 * register. Perform a write to S2CR to detect if this is the case and
60	 * if so reserve a context bank to emulate bypass streams.
61	 */
62	reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, S2CR_TYPE_BYPASS) |
63	      FIELD_PREP(ARM_SMMU_S2CR_CBNDX, 0xff) |
64	      FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, S2CR_PRIVCFG_DEFAULT);
65	arm_smmu_gr0_write(smmu, last_s2cr, reg);
66	reg = arm_smmu_gr0_read(smmu, last_s2cr);
67	if (FIELD_GET(ARM_SMMU_S2CR_TYPE, reg) != S2CR_TYPE_BYPASS) {
68		qsmmu->bypass_quirk = true;
69		qsmmu->bypass_cbndx = smmu->num_context_banks - 1;
70
71		set_bit(qsmmu->bypass_cbndx, smmu->context_map);
72
73		arm_smmu_cb_write(smmu, qsmmu->bypass_cbndx, ARM_SMMU_CB_SCTLR, 0);
74
75		reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, CBAR_TYPE_S1_TRANS_S2_BYPASS);
76		arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(qsmmu->bypass_cbndx), reg);
77	}
78
79	for (i = 0; i < smmu->num_mapping_groups; i++) {
80		smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
81
82		if (FIELD_GET(ARM_SMMU_SMR_VALID, smr)) {
83			/* Ignore valid bit for SMR mask extraction. */
84			smr &= ~ARM_SMMU_SMR_VALID;
85			smmu->smrs[i].id = FIELD_GET(ARM_SMMU_SMR_ID, smr);
86			smmu->smrs[i].mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr);
87			smmu->smrs[i].valid = true;
88
89			smmu->s2crs[i].type = S2CR_TYPE_BYPASS;
90			smmu->s2crs[i].privcfg = S2CR_PRIVCFG_DEFAULT;
91			smmu->s2crs[i].cbndx = 0xff;
92		}
93	}
94
95	return 0;
96}
97
98static void qcom_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
99{
100	struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
101	struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
102	u32 cbndx = s2cr->cbndx;
103	u32 type = s2cr->type;
104	u32 reg;
105
106	if (qsmmu->bypass_quirk) {
107		if (type == S2CR_TYPE_BYPASS) {
108			/*
109			 * Firmware with quirky S2CR handling will substitute
110			 * BYPASS writes with FAULT, so point the stream to the
111			 * reserved context bank and ask for translation on the
112			 * stream
113			 */
114			type = S2CR_TYPE_TRANS;
115			cbndx = qsmmu->bypass_cbndx;
116		} else if (type == S2CR_TYPE_FAULT) {
117			/*
118			 * Firmware with quirky S2CR handling will ignore FAULT
119			 * writes, so trick it to write FAULT by asking for a
120			 * BYPASS.
121			 */
122			type = S2CR_TYPE_BYPASS;
123			cbndx = 0xff;
124		}
125	}
126
127	reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, type) |
128	      FIELD_PREP(ARM_SMMU_S2CR_CBNDX, cbndx) |
129	      FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg);
130	arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), reg);
131}
132
133static int qcom_smmu_def_domain_type(struct device *dev)
134{
135	const struct of_device_id *match =
136		of_match_device(qcom_smmu_client_of_match, dev);
137
138	return match ? IOMMU_DOMAIN_IDENTITY : 0;
139}
140
141static int qcom_sdm845_smmu500_reset(struct arm_smmu_device *smmu)
142{
143	int ret;
144
145	/*
146	 * To address performance degradation in non-real time clients,
147	 * such as USB and UFS, turn off wait-for-safe on sdm845 based boards,
148	 * such as MTP and db845, whose firmwares implement secure monitor
149	 * call handlers to turn on/off the wait-for-safe logic.
150	 */
151	ret = qcom_scm_qsmmu500_wait_safe_toggle(0);
152	if (ret)
153		dev_warn(smmu->dev, "Failed to turn off SAFE logic\n");
154
155	return ret;
156}
157
158static int qcom_smmu500_reset(struct arm_smmu_device *smmu)
159{
160	const struct device_node *np = smmu->dev->of_node;
161
162	arm_mmu500_reset(smmu);
163
164	if (of_device_is_compatible(np, "qcom,sdm845-smmu-500"))
165		return qcom_sdm845_smmu500_reset(smmu);
166
167	return 0;
168}
169
170static const struct arm_smmu_impl qcom_smmu_impl = {
171	.cfg_probe = qcom_smmu_cfg_probe,
172	.def_domain_type = qcom_smmu_def_domain_type,
173	.reset = qcom_smmu500_reset,
174	.write_s2cr = qcom_smmu_write_s2cr,
175};
176
177struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu)
178{
179	struct qcom_smmu *qsmmu;
180
181	/* Check to make sure qcom_scm has finished probing */
182	if (!qcom_scm_is_available())
183		return ERR_PTR(-EPROBE_DEFER);
184
185	qsmmu = devm_kzalloc(smmu->dev, sizeof(*qsmmu), GFP_KERNEL);
186	if (!qsmmu)
187		return ERR_PTR(-ENOMEM);
188
189	qsmmu->smmu = *smmu;
190
191	qsmmu->smmu.impl = &qcom_smmu_impl;
192	devm_kfree(smmu->dev, smmu);
193
194	return &qsmmu->smmu;
195}
196