1// SPDX-License-Identifier: GPL-2.0-only
2// Copyright (C) 2019-2020 NVIDIA CORPORATION.  All rights reserved.
3
4#include <linux/bitfield.h>
5#include <linux/delay.h>
6#include <linux/of.h>
7#include <linux/platform_device.h>
8#include <linux/slab.h>
9
10#include "arm-smmu.h"
11
12/*
13 * Tegra194 has three ARM MMU-500 Instances.
14 * Two of them are used together and must be programmed identically for
15 * interleaved IOVA accesses across them and translates accesses from
16 * non-isochronous HW devices.
17 * Third one is used for translating accesses from isochronous HW devices.
18 * This implementation supports programming of the two instances that must
19 * be programmed identically.
20 * The third instance usage is through standard arm-smmu driver itself and
21 * is out of scope of this implementation.
22 */
23#define NUM_SMMU_INSTANCES 2
24
25struct nvidia_smmu {
26	struct arm_smmu_device	smmu;
27	void __iomem		*bases[NUM_SMMU_INSTANCES];
28};
29
30static inline void __iomem *nvidia_smmu_page(struct arm_smmu_device *smmu,
31					     unsigned int inst, int page)
32{
33	struct nvidia_smmu *nvidia_smmu;
34
35	nvidia_smmu = container_of(smmu, struct nvidia_smmu, smmu);
36	return nvidia_smmu->bases[inst] + (page << smmu->pgshift);
37}
38
39static u32 nvidia_smmu_read_reg(struct arm_smmu_device *smmu,
40				int page, int offset)
41{
42	void __iomem *reg = nvidia_smmu_page(smmu, 0, page) + offset;
43
44	return readl_relaxed(reg);
45}
46
47static void nvidia_smmu_write_reg(struct arm_smmu_device *smmu,
48				  int page, int offset, u32 val)
49{
50	unsigned int i;
51
52	for (i = 0; i < NUM_SMMU_INSTANCES; i++) {
53		void __iomem *reg = nvidia_smmu_page(smmu, i, page) + offset;
54
55		writel_relaxed(val, reg);
56	}
57}
58
59static u64 nvidia_smmu_read_reg64(struct arm_smmu_device *smmu,
60				  int page, int offset)
61{
62	void __iomem *reg = nvidia_smmu_page(smmu, 0, page) + offset;
63
64	return readq_relaxed(reg);
65}
66
67static void nvidia_smmu_write_reg64(struct arm_smmu_device *smmu,
68				    int page, int offset, u64 val)
69{
70	unsigned int i;
71
72	for (i = 0; i < NUM_SMMU_INSTANCES; i++) {
73		void __iomem *reg = nvidia_smmu_page(smmu, i, page) + offset;
74
75		writeq_relaxed(val, reg);
76	}
77}
78
79static void nvidia_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
80				 int sync, int status)
81{
82	unsigned int delay;
83
84	arm_smmu_writel(smmu, page, sync, 0);
85
86	for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
87		unsigned int spin_cnt;
88
89		for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
90			u32 val = 0;
91			unsigned int i;
92
93			for (i = 0; i < NUM_SMMU_INSTANCES; i++) {
94				void __iomem *reg;
95
96				reg = nvidia_smmu_page(smmu, i, page) + status;
97				val |= readl_relaxed(reg);
98			}
99
100			if (!(val & ARM_SMMU_sTLBGSTATUS_GSACTIVE))
101				return;
102
103			cpu_relax();
104		}
105
106		udelay(delay);
107	}
108
109	dev_err_ratelimited(smmu->dev,
110			    "TLB sync timed out -- SMMU may be deadlocked\n");
111}
112
113static int nvidia_smmu_reset(struct arm_smmu_device *smmu)
114{
115	unsigned int i;
116
117	for (i = 0; i < NUM_SMMU_INSTANCES; i++) {
118		u32 val;
119		void __iomem *reg = nvidia_smmu_page(smmu, i, ARM_SMMU_GR0) +
120				    ARM_SMMU_GR0_sGFSR;
121
122		/* clear global FSR */
123		val = readl_relaxed(reg);
124		writel_relaxed(val, reg);
125	}
126
127	return 0;
128}
129
130static irqreturn_t nvidia_smmu_global_fault_inst(int irq,
131						 struct arm_smmu_device *smmu,
132						 int inst)
133{
134	u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
135	void __iomem *gr0_base = nvidia_smmu_page(smmu, inst, 0);
136
137	gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
138	if (!gfsr)
139		return IRQ_NONE;
140
141	gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
142	gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
143	gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
144
145	dev_err_ratelimited(smmu->dev,
146			    "Unexpected global fault, this could be serious\n");
147	dev_err_ratelimited(smmu->dev,
148			    "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
149			    gfsr, gfsynr0, gfsynr1, gfsynr2);
150
151	writel_relaxed(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
152	return IRQ_HANDLED;
153}
154
155static irqreturn_t nvidia_smmu_global_fault(int irq, void *dev)
156{
157	unsigned int inst;
158	irqreturn_t ret = IRQ_NONE;
159	struct arm_smmu_device *smmu = dev;
160
161	for (inst = 0; inst < NUM_SMMU_INSTANCES; inst++) {
162		irqreturn_t irq_ret;
163
164		irq_ret = nvidia_smmu_global_fault_inst(irq, smmu, inst);
165		if (irq_ret == IRQ_HANDLED)
166			ret = IRQ_HANDLED;
167	}
168
169	return ret;
170}
171
172static irqreturn_t nvidia_smmu_context_fault_bank(int irq,
173						  struct arm_smmu_device *smmu,
174						  int idx, int inst)
175{
176	u32 fsr, fsynr, cbfrsynra;
177	unsigned long iova;
178	void __iomem *gr1_base = nvidia_smmu_page(smmu, inst, 1);
179	void __iomem *cb_base = nvidia_smmu_page(smmu, inst, smmu->numpage + idx);
180
181	fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
182	if (!(fsr & ARM_SMMU_FSR_FAULT))
183		return IRQ_NONE;
184
185	fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
186	iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
187	cbfrsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(idx));
188
189	dev_err_ratelimited(smmu->dev,
190			    "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
191			    fsr, iova, fsynr, cbfrsynra, idx);
192
193	writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
194	return IRQ_HANDLED;
195}
196
197static irqreturn_t nvidia_smmu_context_fault(int irq, void *dev)
198{
199	int idx;
200	unsigned int inst;
201	irqreturn_t ret = IRQ_NONE;
202	struct arm_smmu_device *smmu;
203	struct iommu_domain *domain = dev;
204	struct arm_smmu_domain *smmu_domain;
205
206	smmu_domain = container_of(domain, struct arm_smmu_domain, domain);
207	smmu = smmu_domain->smmu;
208
209	for (inst = 0; inst < NUM_SMMU_INSTANCES; inst++) {
210		irqreturn_t irq_ret;
211
212		/*
213		 * Interrupt line is shared between all contexts.
214		 * Check for faults across all contexts.
215		 */
216		for (idx = 0; idx < smmu->num_context_banks; idx++) {
217			irq_ret = nvidia_smmu_context_fault_bank(irq, smmu,
218								 idx, inst);
219			if (irq_ret == IRQ_HANDLED)
220				ret = IRQ_HANDLED;
221		}
222	}
223
224	return ret;
225}
226
227static const struct arm_smmu_impl nvidia_smmu_impl = {
228	.read_reg = nvidia_smmu_read_reg,
229	.write_reg = nvidia_smmu_write_reg,
230	.read_reg64 = nvidia_smmu_read_reg64,
231	.write_reg64 = nvidia_smmu_write_reg64,
232	.reset = nvidia_smmu_reset,
233	.tlb_sync = nvidia_smmu_tlb_sync,
234	.global_fault = nvidia_smmu_global_fault,
235	.context_fault = nvidia_smmu_context_fault,
236};
237
238struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu)
239{
240	struct resource *res;
241	struct device *dev = smmu->dev;
242	struct nvidia_smmu *nvidia_smmu;
243	struct platform_device *pdev = to_platform_device(dev);
244
245	nvidia_smmu = devm_kzalloc(dev, sizeof(*nvidia_smmu), GFP_KERNEL);
246	if (!nvidia_smmu)
247		return ERR_PTR(-ENOMEM);
248
249	/*
250	 * Copy the data from struct arm_smmu_device *smmu allocated in
251	 * arm-smmu.c. The smmu from struct nvidia_smmu replaces the smmu
252	 * pointer used in arm-smmu.c once this function returns.
253	 * This is necessary to derive nvidia_smmu from smmu pointer passed
254	 * through arm_smmu_impl function calls subsequently.
255	 */
256	nvidia_smmu->smmu = *smmu;
257	/* Instance 0 is ioremapped by arm-smmu.c. */
258	nvidia_smmu->bases[0] = smmu->base;
259
260	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
261	if (!res)
262		return ERR_PTR(-ENODEV);
263
264	nvidia_smmu->bases[1] = devm_ioremap_resource(dev, res);
265	if (IS_ERR(nvidia_smmu->bases[1]))
266		return ERR_CAST(nvidia_smmu->bases[1]);
267
268	nvidia_smmu->smmu.impl = &nvidia_smmu_impl;
269
270	/*
271	 * Free the struct arm_smmu_device *smmu allocated in arm-smmu.c.
272	 * Once this function returns, arm-smmu.c would use arm_smmu_device
273	 * allocated as part of struct nvidia_smmu.
274	 */
275	devm_kfree(dev, smmu);
276
277	return &nvidia_smmu->smmu;
278}
279