1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Implementation of the IOMMU SVA API for the ARM SMMUv3
4 */
5
6#include <linux/mm.h>
7#include <linux/mmu_context.h>
8#include <linux/slab.h>
9
10#include "arm-smmu-v3.h"
11#include "../../io-pgtable-arm.h"
12
13static DEFINE_MUTEX(sva_lock);
14
15/*
16 * Check if the CPU ASID is available on the SMMU side. If a private context
17 * descriptor is using it, try to replace it.
18 */
19static struct arm_smmu_ctx_desc *
20arm_smmu_share_asid(struct mm_struct *mm, u16 asid)
21{
22	int ret;
23	u32 new_asid;
24	struct arm_smmu_ctx_desc *cd;
25	struct arm_smmu_device *smmu;
26	struct arm_smmu_domain *smmu_domain;
27
28	cd = xa_load(&arm_smmu_asid_xa, asid);
29	if (!cd)
30		return NULL;
31
32	if (cd->mm) {
33		if (WARN_ON(cd->mm != mm))
34			return ERR_PTR(-EINVAL);
35		/* All devices bound to this mm use the same cd struct. */
36		refcount_inc(&cd->refs);
37		return cd;
38	}
39
40	smmu_domain = container_of(cd, struct arm_smmu_domain, s1_cfg.cd);
41	smmu = smmu_domain->smmu;
42
43	ret = xa_alloc(&arm_smmu_asid_xa, &new_asid, cd,
44		       XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL);
45	if (ret)
46		return ERR_PTR(-ENOSPC);
47	/*
48	 * Race with unmap: TLB invalidations will start targeting the new ASID,
49	 * which isn't assigned yet. We'll do an invalidate-all on the old ASID
50	 * later, so it doesn't matter.
51	 */
52	cd->asid = new_asid;
53	/*
54	 * Update ASID and invalidate CD in all associated masters. There will
55	 * be some overlap between use of both ASIDs, until we invalidate the
56	 * TLB.
57	 */
58	arm_smmu_write_ctx_desc(smmu_domain, 0, cd);
59
60	/* Invalidate TLB entries previously associated with that context */
61	arm_smmu_tlb_inv_asid(smmu, asid);
62
63	xa_erase(&arm_smmu_asid_xa, asid);
64	return NULL;
65}
66
67__maybe_unused
68static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm)
69{
70	u16 asid;
71	int err = 0;
72	u64 tcr, par, reg;
73	struct arm_smmu_ctx_desc *cd;
74	struct arm_smmu_ctx_desc *ret = NULL;
75
76	asid = arm64_mm_context_get(mm);
77	if (!asid)
78		return ERR_PTR(-ESRCH);
79
80	cd = kzalloc(sizeof(*cd), GFP_KERNEL);
81	if (!cd) {
82		err = -ENOMEM;
83		goto out_put_context;
84	}
85
86	refcount_set(&cd->refs, 1);
87
88	mutex_lock(&arm_smmu_asid_lock);
89	ret = arm_smmu_share_asid(mm, asid);
90	if (ret) {
91		mutex_unlock(&arm_smmu_asid_lock);
92		goto out_free_cd;
93	}
94
95	err = xa_insert(&arm_smmu_asid_xa, asid, cd, GFP_KERNEL);
96	mutex_unlock(&arm_smmu_asid_lock);
97
98	if (err)
99		goto out_free_asid;
100
101	tcr = FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, 64ULL - vabits_actual) |
102	      FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, ARM_LPAE_TCR_RGN_WBWA) |
103	      FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, ARM_LPAE_TCR_RGN_WBWA) |
104	      FIELD_PREP(CTXDESC_CD_0_TCR_SH0, ARM_LPAE_TCR_SH_IS) |
105	      CTXDESC_CD_0_TCR_EPD1 | CTXDESC_CD_0_AA64;
106
107	switch (PAGE_SIZE) {
108	case SZ_4K:
109		tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_4K);
110		break;
111	case SZ_16K:
112		tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_16K);
113		break;
114	case SZ_64K:
115		tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_64K);
116		break;
117	default:
118		WARN_ON(1);
119		err = -EINVAL;
120		goto out_free_asid;
121	}
122
123	reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
124	par = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_PARANGE_SHIFT);
125	tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_IPS, par);
126
127	cd->ttbr = virt_to_phys(mm->pgd);
128	cd->tcr = tcr;
129	/*
130	 * MAIR value is pretty much constant and global, so we can just get it
131	 * from the current CPU register
132	 */
133	cd->mair = read_sysreg(mair_el1);
134	cd->asid = asid;
135	cd->mm = mm;
136
137	return cd;
138
139out_free_asid:
140	arm_smmu_free_asid(cd);
141out_free_cd:
142	kfree(cd);
143out_put_context:
144	arm64_mm_context_put(mm);
145	return err < 0 ? ERR_PTR(err) : ret;
146}
147
148__maybe_unused
149static void arm_smmu_free_shared_cd(struct arm_smmu_ctx_desc *cd)
150{
151	if (arm_smmu_free_asid(cd)) {
152		/* Unpin ASID */
153		arm64_mm_context_put(cd->mm);
154		kfree(cd);
155	}
156}
157
158bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
159{
160	unsigned long reg, fld;
161	unsigned long oas;
162	unsigned long asid_bits;
163	u32 feat_mask = ARM_SMMU_FEAT_BTM | ARM_SMMU_FEAT_COHERENCY;
164
165	if (vabits_actual == 52)
166		feat_mask |= ARM_SMMU_FEAT_VAX;
167
168	if ((smmu->features & feat_mask) != feat_mask)
169		return false;
170
171	if (!(smmu->pgsize_bitmap & PAGE_SIZE))
172		return false;
173
174	/*
175	 * Get the smallest PA size of all CPUs (sanitized by cpufeature). We're
176	 * not even pretending to support AArch32 here. Abort if the MMU outputs
177	 * addresses larger than what we support.
178	 */
179	reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
180	fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_PARANGE_SHIFT);
181	oas = id_aa64mmfr0_parange_to_phys_shift(fld);
182	if (smmu->oas < oas)
183		return false;
184
185	/* We can support bigger ASIDs than the CPU, but not smaller */
186	fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_ASID_SHIFT);
187	asid_bits = fld ? 16 : 8;
188	if (smmu->asid_bits < asid_bits)
189		return false;
190
191	/*
192	 * See max_pinned_asids in arch/arm64/mm/context.c. The following is
193	 * generally the maximum number of bindable processes.
194	 */
195	if (arm64_kernel_unmapped_at_el0())
196		asid_bits--;
197	dev_dbg(smmu->dev, "%d shared contexts\n", (1 << asid_bits) -
198		num_possible_cpus() - 2);
199
200	return true;
201}
202
203static bool arm_smmu_iopf_supported(struct arm_smmu_master *master)
204{
205	return false;
206}
207
208bool arm_smmu_master_sva_supported(struct arm_smmu_master *master)
209{
210	if (!(master->smmu->features & ARM_SMMU_FEAT_SVA))
211		return false;
212
213	/* SSID and IOPF support are mandatory for the moment */
214	return master->ssid_bits && arm_smmu_iopf_supported(master);
215}
216
217bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master)
218{
219	bool enabled;
220
221	mutex_lock(&sva_lock);
222	enabled = master->sva_enabled;
223	mutex_unlock(&sva_lock);
224	return enabled;
225}
226
227int arm_smmu_master_enable_sva(struct arm_smmu_master *master)
228{
229	mutex_lock(&sva_lock);
230	master->sva_enabled = true;
231	mutex_unlock(&sva_lock);
232
233	return 0;
234}
235
236int arm_smmu_master_disable_sva(struct arm_smmu_master *master)
237{
238	mutex_lock(&sva_lock);
239	if (!list_empty(&master->bonds)) {
240		dev_err(master->dev, "cannot disable SVA, device is bound\n");
241		mutex_unlock(&sva_lock);
242		return -EBUSY;
243	}
244	master->sva_enabled = false;
245	mutex_unlock(&sva_lock);
246
247	return 0;
248}
249