1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2011,2016 Samsung Electronics Co., Ltd.
4 *		http://www.samsung.com
5 */
6
7#ifdef CONFIG_EXYNOS_IOMMU_DEBUG
8#define DEBUG
9#endif
10
11#include <linux/clk.h>
12#include <linux/dma-mapping.h>
13#include <linux/err.h>
14#include <linux/io.h>
15#include <linux/iommu.h>
16#include <linux/interrupt.h>
17#include <linux/kmemleak.h>
18#include <linux/list.h>
19#include <linux/of.h>
20#include <linux/of_platform.h>
21#include <linux/platform_device.h>
22#include <linux/pm_runtime.h>
23#include <linux/slab.h>
24
25typedef u32 sysmmu_iova_t;
26typedef u32 sysmmu_pte_t;
27
28/* We do not consider super section mapping (16MB) */
29#define SECT_ORDER 20
30#define LPAGE_ORDER 16
31#define SPAGE_ORDER 12
32
33#define SECT_SIZE (1 << SECT_ORDER)
34#define LPAGE_SIZE (1 << LPAGE_ORDER)
35#define SPAGE_SIZE (1 << SPAGE_ORDER)
36
37#define SECT_MASK (~(SECT_SIZE - 1))
38#define LPAGE_MASK (~(LPAGE_SIZE - 1))
39#define SPAGE_MASK (~(SPAGE_SIZE - 1))
40
41#define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
42			   ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
43#define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK)
44#define lv1ent_page_zero(sent) ((*(sent) & 3) == 1)
45#define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
46			  ((*(sent) & 3) == 1))
47#define lv1ent_section(sent) ((*(sent) & 3) == 2)
48
49#define lv2ent_fault(pent) ((*(pent) & 3) == 0)
50#define lv2ent_small(pent) ((*(pent) & 2) == 2)
51#define lv2ent_large(pent) ((*(pent) & 3) == 1)
52
53/*
54 * v1.x - v3.x SYSMMU supports 32bit physical and 32bit virtual address spaces
55 * v5.0 introduced support for 36bit physical address space by shifting
56 * all page entry values by 4 bits.
57 * All SYSMMU controllers in the system support the address spaces of the same
58 * size, so PG_ENT_SHIFT can be initialized on first SYSMMU probe to proper
59 * value (0 or 4).
60 */
61static short PG_ENT_SHIFT = -1;
62#define SYSMMU_PG_ENT_SHIFT 0
63#define SYSMMU_V5_PG_ENT_SHIFT 4
64
65static const sysmmu_pte_t *LV1_PROT;
66static const sysmmu_pte_t SYSMMU_LV1_PROT[] = {
67	((0 << 15) | (0 << 10)), /* no access */
68	((1 << 15) | (1 << 10)), /* IOMMU_READ only */
69	((0 << 15) | (1 << 10)), /* IOMMU_WRITE not supported, use read/write */
70	((0 << 15) | (1 << 10)), /* IOMMU_READ | IOMMU_WRITE */
71};
72static const sysmmu_pte_t SYSMMU_V5_LV1_PROT[] = {
73	(0 << 4), /* no access */
74	(1 << 4), /* IOMMU_READ only */
75	(2 << 4), /* IOMMU_WRITE only */
76	(3 << 4), /* IOMMU_READ | IOMMU_WRITE */
77};
78
79static const sysmmu_pte_t *LV2_PROT;
80static const sysmmu_pte_t SYSMMU_LV2_PROT[] = {
81	((0 << 9) | (0 << 4)), /* no access */
82	((1 << 9) | (1 << 4)), /* IOMMU_READ only */
83	((0 << 9) | (1 << 4)), /* IOMMU_WRITE not supported, use read/write */
84	((0 << 9) | (1 << 4)), /* IOMMU_READ | IOMMU_WRITE */
85};
86static const sysmmu_pte_t SYSMMU_V5_LV2_PROT[] = {
87	(0 << 2), /* no access */
88	(1 << 2), /* IOMMU_READ only */
89	(2 << 2), /* IOMMU_WRITE only */
90	(3 << 2), /* IOMMU_READ | IOMMU_WRITE */
91};
92
93#define SYSMMU_SUPPORTED_PROT_BITS (IOMMU_READ | IOMMU_WRITE)
94
95#define sect_to_phys(ent) (((phys_addr_t) ent) << PG_ENT_SHIFT)
96#define section_phys(sent) (sect_to_phys(*(sent)) & SECT_MASK)
97#define section_offs(iova) (iova & (SECT_SIZE - 1))
98#define lpage_phys(pent) (sect_to_phys(*(pent)) & LPAGE_MASK)
99#define lpage_offs(iova) (iova & (LPAGE_SIZE - 1))
100#define spage_phys(pent) (sect_to_phys(*(pent)) & SPAGE_MASK)
101#define spage_offs(iova) (iova & (SPAGE_SIZE - 1))
102
103#define NUM_LV1ENTRIES 4096
104#define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
105
106static u32 lv1ent_offset(sysmmu_iova_t iova)
107{
108	return iova >> SECT_ORDER;
109}
110
111static u32 lv2ent_offset(sysmmu_iova_t iova)
112{
113	return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1);
114}
115
116#define LV1TABLE_SIZE (NUM_LV1ENTRIES * sizeof(sysmmu_pte_t))
117#define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
118
119#define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
120#define lv2table_base(sent) (sect_to_phys(*(sent) & 0xFFFFFFC0))
121
122#define mk_lv1ent_sect(pa, prot) ((pa >> PG_ENT_SHIFT) | LV1_PROT[prot] | 2)
123#define mk_lv1ent_page(pa) ((pa >> PG_ENT_SHIFT) | 1)
124#define mk_lv2ent_lpage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 1)
125#define mk_lv2ent_spage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 2)
126
127#define CTRL_ENABLE	0x5
128#define CTRL_BLOCK	0x7
129#define CTRL_DISABLE	0x0
130
131#define CFG_LRU		0x1
132#define CFG_EAP		(1 << 2)
133#define CFG_QOS(n)	((n & 0xF) << 7)
134#define CFG_ACGEN	(1 << 24) /* System MMU 3.3 only */
135#define CFG_SYSSEL	(1 << 22) /* System MMU 3.2 only */
136#define CFG_FLPDCACHE	(1 << 20) /* System MMU 3.2+ only */
137
138#define CTRL_VM_ENABLE			BIT(0)
139#define CTRL_VM_FAULT_MODE_STALL	BIT(3)
140#define CAPA0_CAPA1_EXIST		BIT(11)
141#define CAPA1_VCR_ENABLED		BIT(14)
142
143/* common registers */
144#define REG_MMU_CTRL		0x000
145#define REG_MMU_CFG		0x004
146#define REG_MMU_STATUS		0x008
147#define REG_MMU_VERSION		0x034
148
149#define MMU_MAJ_VER(val)	((val) >> 7)
150#define MMU_MIN_VER(val)	((val) & 0x7F)
151#define MMU_RAW_VER(reg)	(((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
152
153#define MAKE_MMU_VER(maj, min)	((((maj) & 0xF) << 7) | ((min) & 0x7F))
154
155/* v1.x - v3.x registers */
156#define REG_PAGE_FAULT_ADDR	0x024
157#define REG_AW_FAULT_ADDR	0x028
158#define REG_AR_FAULT_ADDR	0x02C
159#define REG_DEFAULT_SLAVE_ADDR	0x030
160
161/* v5.x registers */
162#define REG_V5_FAULT_AR_VA	0x070
163#define REG_V5_FAULT_AW_VA	0x080
164
165/* v7.x registers */
166#define REG_V7_CAPA0		0x870
167#define REG_V7_CAPA1		0x874
168#define REG_V7_CTRL_VM		0x8000
169
170#define has_sysmmu(dev)		(dev_iommu_priv_get(dev) != NULL)
171
172static struct device *dma_dev;
173static struct kmem_cache *lv2table_kmem_cache;
174static sysmmu_pte_t *zero_lv2_table;
175#define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
176
177static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova)
178{
179	return pgtable + lv1ent_offset(iova);
180}
181
182static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova)
183{
184	return (sysmmu_pte_t *)phys_to_virt(
185				lv2table_base(sent)) + lv2ent_offset(iova);
186}
187
188struct sysmmu_fault {
189	sysmmu_iova_t addr;	/* IOVA address that caused fault */
190	const char *name;	/* human readable fault name */
191	unsigned int type;	/* fault type for report_iommu_fault() */
192};
193
194struct sysmmu_v1_fault_info {
195	unsigned short addr_reg; /* register to read IOVA fault address */
196	const char *name;	/* human readable fault name */
197	unsigned int type;	/* fault type for report_iommu_fault */
198};
199
200static const struct sysmmu_v1_fault_info sysmmu_v1_faults[] = {
201	{ REG_PAGE_FAULT_ADDR, "PAGE", IOMMU_FAULT_READ },
202	{ REG_AR_FAULT_ADDR, "MULTI-HIT", IOMMU_FAULT_READ },
203	{ REG_AW_FAULT_ADDR, "MULTI-HIT", IOMMU_FAULT_WRITE },
204	{ REG_DEFAULT_SLAVE_ADDR, "BUS ERROR", IOMMU_FAULT_READ },
205	{ REG_AR_FAULT_ADDR, "SECURITY PROTECTION", IOMMU_FAULT_READ },
206	{ REG_AR_FAULT_ADDR, "ACCESS PROTECTION", IOMMU_FAULT_READ },
207	{ REG_AW_FAULT_ADDR, "SECURITY PROTECTION", IOMMU_FAULT_WRITE },
208	{ REG_AW_FAULT_ADDR, "ACCESS PROTECTION", IOMMU_FAULT_WRITE },
209};
210
211/* SysMMU v5 has the same faults for AR (0..4 bits) and AW (16..20 bits) */
212static const char * const sysmmu_v5_fault_names[] = {
213	"PTW",
214	"PAGE",
215	"MULTI-HIT",
216	"ACCESS PROTECTION",
217	"SECURITY PROTECTION"
218};
219
220static const char * const sysmmu_v7_fault_names[] = {
221	"PTW",
222	"PAGE",
223	"ACCESS PROTECTION",
224	"RESERVED"
225};
226
227/*
228 * This structure is attached to dev->iommu->priv of the master device
229 * on device add, contains a list of SYSMMU controllers defined by device tree,
230 * which are bound to given master device. It is usually referenced by 'owner'
231 * pointer.
232*/
233struct exynos_iommu_owner {
234	struct list_head controllers;	/* list of sysmmu_drvdata.owner_node */
235	struct iommu_domain *domain;	/* domain this device is attached */
236	struct mutex rpm_lock;		/* for runtime pm of all sysmmus */
237};
238
239/*
240 * This structure exynos specific generalization of struct iommu_domain.
241 * It contains list of SYSMMU controllers from all master devices, which has
242 * been attached to this domain and page tables of IO address space defined by
243 * it. It is usually referenced by 'domain' pointer.
244 */
245struct exynos_iommu_domain {
246	struct list_head clients; /* list of sysmmu_drvdata.domain_node */
247	sysmmu_pte_t *pgtable;	/* lv1 page table, 16KB */
248	short *lv2entcnt;	/* free lv2 entry counter for each section */
249	spinlock_t lock;	/* lock for modyfying list of clients */
250	spinlock_t pgtablelock;	/* lock for modifying page table @ pgtable */
251	struct iommu_domain domain; /* generic domain data structure */
252};
253
254struct sysmmu_drvdata;
255
256/*
257 * SysMMU version specific data. Contains offsets for the registers which can
258 * be found in different SysMMU variants, but have different offset values.
259 * Also contains version specific callbacks to abstract the hardware.
260 */
261struct sysmmu_variant {
262	u32 pt_base;		/* page table base address (physical) */
263	u32 flush_all;		/* invalidate all TLB entries */
264	u32 flush_entry;	/* invalidate specific TLB entry */
265	u32 flush_range;	/* invalidate TLB entries in specified range */
266	u32 flush_start;	/* start address of range invalidation */
267	u32 flush_end;		/* end address of range invalidation */
268	u32 int_status;		/* interrupt status information */
269	u32 int_clear;		/* clear the interrupt */
270	u32 fault_va;		/* IOVA address that caused fault */
271	u32 fault_info;		/* fault transaction info */
272
273	int (*get_fault_info)(struct sysmmu_drvdata *data, unsigned int itype,
274			      struct sysmmu_fault *fault);
275};
276
277/*
278 * This structure hold all data of a single SYSMMU controller, this includes
279 * hw resources like registers and clocks, pointers and list nodes to connect
280 * it to all other structures, internal state and parameters read from device
281 * tree. It is usually referenced by 'data' pointer.
282 */
283struct sysmmu_drvdata {
284	struct device *sysmmu;		/* SYSMMU controller device */
285	struct device *master;		/* master device (owner) */
286	struct device_link *link;	/* runtime PM link to master */
287	void __iomem *sfrbase;		/* our registers */
288	struct clk *clk;		/* SYSMMU's clock */
289	struct clk *aclk;		/* SYSMMU's aclk clock */
290	struct clk *pclk;		/* SYSMMU's pclk clock */
291	struct clk *clk_master;		/* master's device clock */
292	spinlock_t lock;		/* lock for modyfying state */
293	bool active;			/* current status */
294	struct exynos_iommu_domain *domain; /* domain we belong to */
295	struct list_head domain_node;	/* node for domain clients list */
296	struct list_head owner_node;	/* node for owner controllers list */
297	phys_addr_t pgtable;		/* assigned page table structure */
298	unsigned int version;		/* our version */
299
300	struct iommu_device iommu;	/* IOMMU core handle */
301	const struct sysmmu_variant *variant; /* version specific data */
302
303	/* v7 fields */
304	bool has_vcr;			/* virtual machine control register */
305};
306
307#define SYSMMU_REG(data, reg) ((data)->sfrbase + (data)->variant->reg)
308
309static int exynos_sysmmu_v1_get_fault_info(struct sysmmu_drvdata *data,
310					   unsigned int itype,
311					   struct sysmmu_fault *fault)
312{
313	const struct sysmmu_v1_fault_info *finfo;
314
315	if (itype >= ARRAY_SIZE(sysmmu_v1_faults))
316		return -ENXIO;
317
318	finfo = &sysmmu_v1_faults[itype];
319	fault->addr = readl(data->sfrbase + finfo->addr_reg);
320	fault->name = finfo->name;
321	fault->type = finfo->type;
322
323	return 0;
324}
325
326static int exynos_sysmmu_v5_get_fault_info(struct sysmmu_drvdata *data,
327					   unsigned int itype,
328					   struct sysmmu_fault *fault)
329{
330	unsigned int addr_reg;
331
332	if (itype < ARRAY_SIZE(sysmmu_v5_fault_names)) {
333		fault->type = IOMMU_FAULT_READ;
334		addr_reg = REG_V5_FAULT_AR_VA;
335	} else if (itype >= 16 && itype <= 20) {
336		fault->type = IOMMU_FAULT_WRITE;
337		addr_reg = REG_V5_FAULT_AW_VA;
338		itype -= 16;
339	} else {
340		return -ENXIO;
341	}
342
343	fault->name = sysmmu_v5_fault_names[itype];
344	fault->addr = readl(data->sfrbase + addr_reg);
345
346	return 0;
347}
348
349static int exynos_sysmmu_v7_get_fault_info(struct sysmmu_drvdata *data,
350					   unsigned int itype,
351					   struct sysmmu_fault *fault)
352{
353	u32 info = readl(SYSMMU_REG(data, fault_info));
354
355	fault->addr = readl(SYSMMU_REG(data, fault_va));
356	fault->name = sysmmu_v7_fault_names[itype % 4];
357	fault->type = (info & BIT(20)) ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
358
359	return 0;
360}
361
362/* SysMMU v1..v3 */
363static const struct sysmmu_variant sysmmu_v1_variant = {
364	.flush_all	= 0x0c,
365	.flush_entry	= 0x10,
366	.pt_base	= 0x14,
367	.int_status	= 0x18,
368	.int_clear	= 0x1c,
369
370	.get_fault_info	= exynos_sysmmu_v1_get_fault_info,
371};
372
373/* SysMMU v5 */
374static const struct sysmmu_variant sysmmu_v5_variant = {
375	.pt_base	= 0x0c,
376	.flush_all	= 0x10,
377	.flush_entry	= 0x14,
378	.flush_range	= 0x18,
379	.flush_start	= 0x20,
380	.flush_end	= 0x24,
381	.int_status	= 0x60,
382	.int_clear	= 0x64,
383
384	.get_fault_info	= exynos_sysmmu_v5_get_fault_info,
385};
386
387/* SysMMU v7: non-VM capable register layout */
388static const struct sysmmu_variant sysmmu_v7_variant = {
389	.pt_base	= 0x0c,
390	.flush_all	= 0x10,
391	.flush_entry	= 0x14,
392	.flush_range	= 0x18,
393	.flush_start	= 0x20,
394	.flush_end	= 0x24,
395	.int_status	= 0x60,
396	.int_clear	= 0x64,
397	.fault_va	= 0x70,
398	.fault_info	= 0x78,
399
400	.get_fault_info	= exynos_sysmmu_v7_get_fault_info,
401};
402
403/* SysMMU v7: VM capable register layout */
404static const struct sysmmu_variant sysmmu_v7_vm_variant = {
405	.pt_base	= 0x800c,
406	.flush_all	= 0x8010,
407	.flush_entry	= 0x8014,
408	.flush_range	= 0x8018,
409	.flush_start	= 0x8020,
410	.flush_end	= 0x8024,
411	.int_status	= 0x60,
412	.int_clear	= 0x64,
413	.fault_va	= 0x1000,
414	.fault_info	= 0x1004,
415
416	.get_fault_info	= exynos_sysmmu_v7_get_fault_info,
417};
418
419static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
420{
421	return container_of(dom, struct exynos_iommu_domain, domain);
422}
423
424static void sysmmu_unblock(struct sysmmu_drvdata *data)
425{
426	writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
427}
428
429static bool sysmmu_block(struct sysmmu_drvdata *data)
430{
431	int i = 120;
432
433	writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
434	while ((i > 0) && !(readl(data->sfrbase + REG_MMU_STATUS) & 1))
435		--i;
436
437	if (!(readl(data->sfrbase + REG_MMU_STATUS) & 1)) {
438		sysmmu_unblock(data);
439		return false;
440	}
441
442	return true;
443}
444
445static void __sysmmu_tlb_invalidate(struct sysmmu_drvdata *data)
446{
447	writel(0x1, SYSMMU_REG(data, flush_all));
448}
449
450static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
451				sysmmu_iova_t iova, unsigned int num_inv)
452{
453	unsigned int i;
454
455	if (MMU_MAJ_VER(data->version) < 5 || num_inv == 1) {
456		for (i = 0; i < num_inv; i++) {
457			writel((iova & SPAGE_MASK) | 1,
458			       SYSMMU_REG(data, flush_entry));
459			iova += SPAGE_SIZE;
460		}
461	} else {
462		writel(iova & SPAGE_MASK, SYSMMU_REG(data, flush_start));
463		writel((iova & SPAGE_MASK) + (num_inv - 1) * SPAGE_SIZE,
464		       SYSMMU_REG(data, flush_end));
465		writel(0x1, SYSMMU_REG(data, flush_range));
466	}
467}
468
469static void __sysmmu_set_ptbase(struct sysmmu_drvdata *data, phys_addr_t pgd)
470{
471	u32 pt_base;
472
473	if (MMU_MAJ_VER(data->version) < 5)
474		pt_base = pgd;
475	else
476		pt_base = pgd >> SPAGE_ORDER;
477
478	writel(pt_base, SYSMMU_REG(data, pt_base));
479	__sysmmu_tlb_invalidate(data);
480}
481
482static void __sysmmu_enable_clocks(struct sysmmu_drvdata *data)
483{
484	BUG_ON(clk_prepare_enable(data->clk_master));
485	BUG_ON(clk_prepare_enable(data->clk));
486	BUG_ON(clk_prepare_enable(data->pclk));
487	BUG_ON(clk_prepare_enable(data->aclk));
488}
489
490static void __sysmmu_disable_clocks(struct sysmmu_drvdata *data)
491{
492	clk_disable_unprepare(data->aclk);
493	clk_disable_unprepare(data->pclk);
494	clk_disable_unprepare(data->clk);
495	clk_disable_unprepare(data->clk_master);
496}
497
498static bool __sysmmu_has_capa1(struct sysmmu_drvdata *data)
499{
500	u32 capa0 = readl(data->sfrbase + REG_V7_CAPA0);
501
502	return capa0 & CAPA0_CAPA1_EXIST;
503}
504
505static void __sysmmu_get_vcr(struct sysmmu_drvdata *data)
506{
507	u32 capa1 = readl(data->sfrbase + REG_V7_CAPA1);
508
509	data->has_vcr = capa1 & CAPA1_VCR_ENABLED;
510}
511
512static void __sysmmu_get_version(struct sysmmu_drvdata *data)
513{
514	u32 ver;
515
516	__sysmmu_enable_clocks(data);
517
518	ver = readl(data->sfrbase + REG_MMU_VERSION);
519
520	/* controllers on some SoCs don't report proper version */
521	if (ver == 0x80000001u)
522		data->version = MAKE_MMU_VER(1, 0);
523	else
524		data->version = MMU_RAW_VER(ver);
525
526	dev_dbg(data->sysmmu, "hardware version: %d.%d\n",
527		MMU_MAJ_VER(data->version), MMU_MIN_VER(data->version));
528
529	if (MMU_MAJ_VER(data->version) < 5) {
530		data->variant = &sysmmu_v1_variant;
531	} else if (MMU_MAJ_VER(data->version) < 7) {
532		data->variant = &sysmmu_v5_variant;
533	} else {
534		if (__sysmmu_has_capa1(data))
535			__sysmmu_get_vcr(data);
536		if (data->has_vcr)
537			data->variant = &sysmmu_v7_vm_variant;
538		else
539			data->variant = &sysmmu_v7_variant;
540	}
541
542	__sysmmu_disable_clocks(data);
543}
544
545static void show_fault_information(struct sysmmu_drvdata *data,
546				   const struct sysmmu_fault *fault)
547{
548	sysmmu_pte_t *ent;
549
550	dev_err(data->sysmmu, "%s: [%s] %s FAULT occurred at %#x\n",
551		dev_name(data->master),
552		fault->type == IOMMU_FAULT_READ ? "READ" : "WRITE",
553		fault->name, fault->addr);
554	dev_dbg(data->sysmmu, "Page table base: %pa\n", &data->pgtable);
555	ent = section_entry(phys_to_virt(data->pgtable), fault->addr);
556	dev_dbg(data->sysmmu, "\tLv1 entry: %#x\n", *ent);
557	if (lv1ent_page(ent)) {
558		ent = page_entry(ent, fault->addr);
559		dev_dbg(data->sysmmu, "\t Lv2 entry: %#x\n", *ent);
560	}
561}
562
563static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
564{
565	struct sysmmu_drvdata *data = dev_id;
566	unsigned int itype;
567	struct sysmmu_fault fault;
568	int ret = -ENOSYS;
569
570	WARN_ON(!data->active);
571
572	spin_lock(&data->lock);
573	clk_enable(data->clk_master);
574
575	itype = __ffs(readl(SYSMMU_REG(data, int_status)));
576	ret = data->variant->get_fault_info(data, itype, &fault);
577	if (ret) {
578		dev_err(data->sysmmu, "Unhandled interrupt bit %u\n", itype);
579		goto out;
580	}
581	show_fault_information(data, &fault);
582
583	if (data->domain) {
584		ret = report_iommu_fault(&data->domain->domain, data->master,
585					 fault.addr, fault.type);
586	}
587	if (ret)
588		panic("Unrecoverable System MMU Fault!");
589
590out:
591	writel(1 << itype, SYSMMU_REG(data, int_clear));
592
593	/* SysMMU is in blocked state when interrupt occurred */
594	sysmmu_unblock(data);
595	clk_disable(data->clk_master);
596	spin_unlock(&data->lock);
597
598	return IRQ_HANDLED;
599}
600
601static void __sysmmu_disable(struct sysmmu_drvdata *data)
602{
603	unsigned long flags;
604
605	clk_enable(data->clk_master);
606
607	spin_lock_irqsave(&data->lock, flags);
608	writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
609	writel(0, data->sfrbase + REG_MMU_CFG);
610	data->active = false;
611	spin_unlock_irqrestore(&data->lock, flags);
612
613	__sysmmu_disable_clocks(data);
614}
615
616static void __sysmmu_init_config(struct sysmmu_drvdata *data)
617{
618	unsigned int cfg;
619
620	if (data->version <= MAKE_MMU_VER(3, 1))
621		cfg = CFG_LRU | CFG_QOS(15);
622	else if (data->version <= MAKE_MMU_VER(3, 2))
623		cfg = CFG_LRU | CFG_QOS(15) | CFG_FLPDCACHE | CFG_SYSSEL;
624	else
625		cfg = CFG_QOS(15) | CFG_FLPDCACHE | CFG_ACGEN;
626
627	cfg |= CFG_EAP; /* enable access protection bits check */
628
629	writel(cfg, data->sfrbase + REG_MMU_CFG);
630}
631
632static void __sysmmu_enable_vid(struct sysmmu_drvdata *data)
633{
634	u32 ctrl;
635
636	if (MMU_MAJ_VER(data->version) < 7 || !data->has_vcr)
637		return;
638
639	ctrl = readl(data->sfrbase + REG_V7_CTRL_VM);
640	ctrl |= CTRL_VM_ENABLE | CTRL_VM_FAULT_MODE_STALL;
641	writel(ctrl, data->sfrbase + REG_V7_CTRL_VM);
642}
643
644static void __sysmmu_enable(struct sysmmu_drvdata *data)
645{
646	unsigned long flags;
647
648	__sysmmu_enable_clocks(data);
649
650	spin_lock_irqsave(&data->lock, flags);
651	writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
652	__sysmmu_init_config(data);
653	__sysmmu_set_ptbase(data, data->pgtable);
654	__sysmmu_enable_vid(data);
655	writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
656	data->active = true;
657	spin_unlock_irqrestore(&data->lock, flags);
658
659	/*
660	 * SYSMMU driver keeps master's clock enabled only for the short
661	 * time, while accessing the registers. For performing address
662	 * translation during DMA transaction it relies on the client
663	 * driver to enable it.
664	 */
665	clk_disable(data->clk_master);
666}
667
668static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
669					    sysmmu_iova_t iova)
670{
671	unsigned long flags;
672
673	spin_lock_irqsave(&data->lock, flags);
674	if (data->active && data->version >= MAKE_MMU_VER(3, 3)) {
675		clk_enable(data->clk_master);
676		if (sysmmu_block(data)) {
677			if (data->version >= MAKE_MMU_VER(5, 0))
678				__sysmmu_tlb_invalidate(data);
679			else
680				__sysmmu_tlb_invalidate_entry(data, iova, 1);
681			sysmmu_unblock(data);
682		}
683		clk_disable(data->clk_master);
684	}
685	spin_unlock_irqrestore(&data->lock, flags);
686}
687
688static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
689					sysmmu_iova_t iova, size_t size)
690{
691	unsigned long flags;
692
693	spin_lock_irqsave(&data->lock, flags);
694	if (data->active) {
695		unsigned int num_inv = 1;
696
697		clk_enable(data->clk_master);
698
699		/*
700		 * L2TLB invalidation required
701		 * 4KB page: 1 invalidation
702		 * 64KB page: 16 invalidations
703		 * 1MB page: 64 invalidations
704		 * because it is set-associative TLB
705		 * with 8-way and 64 sets.
706		 * 1MB page can be cached in one of all sets.
707		 * 64KB page can be one of 16 consecutive sets.
708		 */
709		if (MMU_MAJ_VER(data->version) == 2)
710			num_inv = min_t(unsigned int, size / SPAGE_SIZE, 64);
711
712		if (sysmmu_block(data)) {
713			__sysmmu_tlb_invalidate_entry(data, iova, num_inv);
714			sysmmu_unblock(data);
715		}
716		clk_disable(data->clk_master);
717	}
718	spin_unlock_irqrestore(&data->lock, flags);
719}
720
721static const struct iommu_ops exynos_iommu_ops;
722
723static int exynos_sysmmu_probe(struct platform_device *pdev)
724{
725	int irq, ret;
726	struct device *dev = &pdev->dev;
727	struct sysmmu_drvdata *data;
728	struct resource *res;
729
730	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
731	if (!data)
732		return -ENOMEM;
733
734	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
735	data->sfrbase = devm_ioremap_resource(dev, res);
736	if (IS_ERR(data->sfrbase))
737		return PTR_ERR(data->sfrbase);
738
739	irq = platform_get_irq(pdev, 0);
740	if (irq <= 0)
741		return irq;
742
743	ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
744				dev_name(dev), data);
745	if (ret) {
746		dev_err(dev, "Unabled to register handler of irq %d\n", irq);
747		return ret;
748	}
749
750	data->clk = devm_clk_get_optional(dev, "sysmmu");
751	if (IS_ERR(data->clk))
752		return PTR_ERR(data->clk);
753
754	data->aclk = devm_clk_get_optional(dev, "aclk");
755	if (IS_ERR(data->aclk))
756		return PTR_ERR(data->aclk);
757
758	data->pclk = devm_clk_get_optional(dev, "pclk");
759	if (IS_ERR(data->pclk))
760		return PTR_ERR(data->pclk);
761
762	if (!data->clk && (!data->aclk || !data->pclk)) {
763		dev_err(dev, "Failed to get device clock(s)!\n");
764		return -ENOSYS;
765	}
766
767	data->clk_master = devm_clk_get_optional(dev, "master");
768	if (IS_ERR(data->clk_master))
769		return PTR_ERR(data->clk_master);
770
771	data->sysmmu = dev;
772	spin_lock_init(&data->lock);
773
774	__sysmmu_get_version(data);
775
776	ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
777				     dev_name(data->sysmmu));
778	if (ret)
779		return ret;
780
781	platform_set_drvdata(pdev, data);
782
783	if (PG_ENT_SHIFT < 0) {
784		if (MMU_MAJ_VER(data->version) < 5) {
785			PG_ENT_SHIFT = SYSMMU_PG_ENT_SHIFT;
786			LV1_PROT = SYSMMU_LV1_PROT;
787			LV2_PROT = SYSMMU_LV2_PROT;
788		} else {
789			PG_ENT_SHIFT = SYSMMU_V5_PG_ENT_SHIFT;
790			LV1_PROT = SYSMMU_V5_LV1_PROT;
791			LV2_PROT = SYSMMU_V5_LV2_PROT;
792		}
793	}
794
795	if (MMU_MAJ_VER(data->version) >= 5) {
796		ret = dma_set_mask(dev, DMA_BIT_MASK(36));
797		if (ret) {
798			dev_err(dev, "Unable to set DMA mask: %d\n", ret);
799			goto err_dma_set_mask;
800		}
801	}
802
803	/*
804	 * use the first registered sysmmu device for performing
805	 * dma mapping operations on iommu page tables (cpu cache flush)
806	 */
807	if (!dma_dev)
808		dma_dev = &pdev->dev;
809
810	pm_runtime_enable(dev);
811
812	ret = iommu_device_register(&data->iommu, &exynos_iommu_ops, dev);
813	if (ret)
814		goto err_dma_set_mask;
815
816	return 0;
817
818err_dma_set_mask:
819	iommu_device_sysfs_remove(&data->iommu);
820	return ret;
821}
822
823static int __maybe_unused exynos_sysmmu_suspend(struct device *dev)
824{
825	struct sysmmu_drvdata *data = dev_get_drvdata(dev);
826	struct device *master = data->master;
827
828	if (master) {
829		struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
830
831		mutex_lock(&owner->rpm_lock);
832		if (data->domain) {
833			dev_dbg(data->sysmmu, "saving state\n");
834			__sysmmu_disable(data);
835		}
836		mutex_unlock(&owner->rpm_lock);
837	}
838	return 0;
839}
840
841static int __maybe_unused exynos_sysmmu_resume(struct device *dev)
842{
843	struct sysmmu_drvdata *data = dev_get_drvdata(dev);
844	struct device *master = data->master;
845
846	if (master) {
847		struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
848
849		mutex_lock(&owner->rpm_lock);
850		if (data->domain) {
851			dev_dbg(data->sysmmu, "restoring state\n");
852			__sysmmu_enable(data);
853		}
854		mutex_unlock(&owner->rpm_lock);
855	}
856	return 0;
857}
858
859static const struct dev_pm_ops sysmmu_pm_ops = {
860	SET_RUNTIME_PM_OPS(exynos_sysmmu_suspend, exynos_sysmmu_resume, NULL)
861	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
862				pm_runtime_force_resume)
863};
864
865static const struct of_device_id sysmmu_of_match[] = {
866	{ .compatible	= "samsung,exynos-sysmmu", },
867	{ },
868};
869
870static struct platform_driver exynos_sysmmu_driver __refdata = {
871	.probe	= exynos_sysmmu_probe,
872	.driver	= {
873		.name		= "exynos-sysmmu",
874		.of_match_table	= sysmmu_of_match,
875		.pm		= &sysmmu_pm_ops,
876		.suppress_bind_attrs = true,
877	}
878};
879
880static inline void exynos_iommu_set_pte(sysmmu_pte_t *ent, sysmmu_pte_t val)
881{
882	dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), sizeof(*ent),
883				DMA_TO_DEVICE);
884	*ent = cpu_to_le32(val);
885	dma_sync_single_for_device(dma_dev, virt_to_phys(ent), sizeof(*ent),
886				   DMA_TO_DEVICE);
887}
888
889static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
890{
891	struct exynos_iommu_domain *domain;
892	dma_addr_t handle;
893	int i;
894
895	/* Check if correct PTE offsets are initialized */
896	BUG_ON(PG_ENT_SHIFT < 0 || !dma_dev);
897
898	if (type != IOMMU_DOMAIN_DMA && type != IOMMU_DOMAIN_UNMANAGED)
899		return NULL;
900
901	domain = kzalloc(sizeof(*domain), GFP_KERNEL);
902	if (!domain)
903		return NULL;
904
905	domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
906	if (!domain->pgtable)
907		goto err_pgtable;
908
909	domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
910	if (!domain->lv2entcnt)
911		goto err_counter;
912
913	/* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
914	for (i = 0; i < NUM_LV1ENTRIES; i++)
915		domain->pgtable[i] = ZERO_LV2LINK;
916
917	handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE,
918				DMA_TO_DEVICE);
919	/* For mapping page table entries we rely on dma == phys */
920	BUG_ON(handle != virt_to_phys(domain->pgtable));
921	if (dma_mapping_error(dma_dev, handle))
922		goto err_lv2ent;
923
924	spin_lock_init(&domain->lock);
925	spin_lock_init(&domain->pgtablelock);
926	INIT_LIST_HEAD(&domain->clients);
927
928	domain->domain.geometry.aperture_start = 0;
929	domain->domain.geometry.aperture_end   = ~0UL;
930	domain->domain.geometry.force_aperture = true;
931
932	return &domain->domain;
933
934err_lv2ent:
935	free_pages((unsigned long)domain->lv2entcnt, 1);
936err_counter:
937	free_pages((unsigned long)domain->pgtable, 2);
938err_pgtable:
939	kfree(domain);
940	return NULL;
941}
942
943static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
944{
945	struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
946	struct sysmmu_drvdata *data, *next;
947	unsigned long flags;
948	int i;
949
950	WARN_ON(!list_empty(&domain->clients));
951
952	spin_lock_irqsave(&domain->lock, flags);
953
954	list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
955		spin_lock(&data->lock);
956		__sysmmu_disable(data);
957		data->pgtable = 0;
958		data->domain = NULL;
959		list_del_init(&data->domain_node);
960		spin_unlock(&data->lock);
961	}
962
963	spin_unlock_irqrestore(&domain->lock, flags);
964
965	dma_unmap_single(dma_dev, virt_to_phys(domain->pgtable), LV1TABLE_SIZE,
966			 DMA_TO_DEVICE);
967
968	for (i = 0; i < NUM_LV1ENTRIES; i++)
969		if (lv1ent_page(domain->pgtable + i)) {
970			phys_addr_t base = lv2table_base(domain->pgtable + i);
971
972			dma_unmap_single(dma_dev, base, LV2TABLE_SIZE,
973					 DMA_TO_DEVICE);
974			kmem_cache_free(lv2table_kmem_cache,
975					phys_to_virt(base));
976		}
977
978	free_pages((unsigned long)domain->pgtable, 2);
979	free_pages((unsigned long)domain->lv2entcnt, 1);
980	kfree(domain);
981}
982
983static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
984				    struct device *dev)
985{
986	struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
987	struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
988	phys_addr_t pagetable = virt_to_phys(domain->pgtable);
989	struct sysmmu_drvdata *data, *next;
990	unsigned long flags;
991
992	if (!has_sysmmu(dev) || owner->domain != iommu_domain)
993		return;
994
995	mutex_lock(&owner->rpm_lock);
996
997	list_for_each_entry(data, &owner->controllers, owner_node) {
998		pm_runtime_get_noresume(data->sysmmu);
999		if (pm_runtime_active(data->sysmmu))
1000			__sysmmu_disable(data);
1001		pm_runtime_put(data->sysmmu);
1002	}
1003
1004	spin_lock_irqsave(&domain->lock, flags);
1005	list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
1006		spin_lock(&data->lock);
1007		data->pgtable = 0;
1008		data->domain = NULL;
1009		list_del_init(&data->domain_node);
1010		spin_unlock(&data->lock);
1011	}
1012	owner->domain = NULL;
1013	spin_unlock_irqrestore(&domain->lock, flags);
1014
1015	mutex_unlock(&owner->rpm_lock);
1016
1017	dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n", __func__,
1018		&pagetable);
1019}
1020
1021static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
1022				   struct device *dev)
1023{
1024	struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1025	struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
1026	struct sysmmu_drvdata *data;
1027	phys_addr_t pagetable = virt_to_phys(domain->pgtable);
1028	unsigned long flags;
1029
1030	if (!has_sysmmu(dev))
1031		return -ENODEV;
1032
1033	if (owner->domain)
1034		exynos_iommu_detach_device(owner->domain, dev);
1035
1036	mutex_lock(&owner->rpm_lock);
1037
1038	spin_lock_irqsave(&domain->lock, flags);
1039	list_for_each_entry(data, &owner->controllers, owner_node) {
1040		spin_lock(&data->lock);
1041		data->pgtable = pagetable;
1042		data->domain = domain;
1043		list_add_tail(&data->domain_node, &domain->clients);
1044		spin_unlock(&data->lock);
1045	}
1046	owner->domain = iommu_domain;
1047	spin_unlock_irqrestore(&domain->lock, flags);
1048
1049	list_for_each_entry(data, &owner->controllers, owner_node) {
1050		pm_runtime_get_noresume(data->sysmmu);
1051		if (pm_runtime_active(data->sysmmu))
1052			__sysmmu_enable(data);
1053		pm_runtime_put(data->sysmmu);
1054	}
1055
1056	mutex_unlock(&owner->rpm_lock);
1057
1058	dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa\n", __func__,
1059		&pagetable);
1060
1061	return 0;
1062}
1063
1064static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
1065		sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter)
1066{
1067	if (lv1ent_section(sent)) {
1068		WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova);
1069		return ERR_PTR(-EADDRINUSE);
1070	}
1071
1072	if (lv1ent_fault(sent)) {
1073		dma_addr_t handle;
1074		sysmmu_pte_t *pent;
1075		bool need_flush_flpd_cache = lv1ent_zero(sent);
1076
1077		pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
1078		BUG_ON((uintptr_t)pent & (LV2TABLE_SIZE - 1));
1079		if (!pent)
1080			return ERR_PTR(-ENOMEM);
1081
1082		exynos_iommu_set_pte(sent, mk_lv1ent_page(virt_to_phys(pent)));
1083		kmemleak_ignore(pent);
1084		*pgcounter = NUM_LV2ENTRIES;
1085		handle = dma_map_single(dma_dev, pent, LV2TABLE_SIZE,
1086					DMA_TO_DEVICE);
1087		if (dma_mapping_error(dma_dev, handle)) {
1088			kmem_cache_free(lv2table_kmem_cache, pent);
1089			return ERR_PTR(-EADDRINUSE);
1090		}
1091
1092		/*
1093		 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
1094		 * FLPD cache may cache the address of zero_l2_table. This
1095		 * function replaces the zero_l2_table with new L2 page table
1096		 * to write valid mappings.
1097		 * Accessing the valid area may cause page fault since FLPD
1098		 * cache may still cache zero_l2_table for the valid area
1099		 * instead of new L2 page table that has the mapping
1100		 * information of the valid area.
1101		 * Thus any replacement of zero_l2_table with other valid L2
1102		 * page table must involve FLPD cache invalidation for System
1103		 * MMU v3.3.
1104		 * FLPD cache invalidation is performed with TLB invalidation
1105		 * by VPN without blocking. It is safe to invalidate TLB without
1106		 * blocking because the target address of TLB invalidation is
1107		 * not currently mapped.
1108		 */
1109		if (need_flush_flpd_cache) {
1110			struct sysmmu_drvdata *data;
1111
1112			spin_lock(&domain->lock);
1113			list_for_each_entry(data, &domain->clients, domain_node)
1114				sysmmu_tlb_invalidate_flpdcache(data, iova);
1115			spin_unlock(&domain->lock);
1116		}
1117	}
1118
1119	return page_entry(sent, iova);
1120}
1121
1122static int lv1set_section(struct exynos_iommu_domain *domain,
1123			  sysmmu_pte_t *sent, sysmmu_iova_t iova,
1124			  phys_addr_t paddr, int prot, short *pgcnt)
1125{
1126	if (lv1ent_section(sent)) {
1127		WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
1128			iova);
1129		return -EADDRINUSE;
1130	}
1131
1132	if (lv1ent_page(sent)) {
1133		if (*pgcnt != NUM_LV2ENTRIES) {
1134			WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
1135				iova);
1136			return -EADDRINUSE;
1137		}
1138
1139		kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0));
1140		*pgcnt = 0;
1141	}
1142
1143	exynos_iommu_set_pte(sent, mk_lv1ent_sect(paddr, prot));
1144
1145	spin_lock(&domain->lock);
1146	if (lv1ent_page_zero(sent)) {
1147		struct sysmmu_drvdata *data;
1148		/*
1149		 * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD
1150		 * entry by speculative prefetch of SLPD which has no mapping.
1151		 */
1152		list_for_each_entry(data, &domain->clients, domain_node)
1153			sysmmu_tlb_invalidate_flpdcache(data, iova);
1154	}
1155	spin_unlock(&domain->lock);
1156
1157	return 0;
1158}
1159
1160static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
1161		       int prot, short *pgcnt)
1162{
1163	if (size == SPAGE_SIZE) {
1164		if (WARN_ON(!lv2ent_fault(pent)))
1165			return -EADDRINUSE;
1166
1167		exynos_iommu_set_pte(pent, mk_lv2ent_spage(paddr, prot));
1168		*pgcnt -= 1;
1169	} else { /* size == LPAGE_SIZE */
1170		int i;
1171		dma_addr_t pent_base = virt_to_phys(pent);
1172
1173		dma_sync_single_for_cpu(dma_dev, pent_base,
1174					sizeof(*pent) * SPAGES_PER_LPAGE,
1175					DMA_TO_DEVICE);
1176		for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
1177			if (WARN_ON(!lv2ent_fault(pent))) {
1178				if (i > 0)
1179					memset(pent - i, 0, sizeof(*pent) * i);
1180				return -EADDRINUSE;
1181			}
1182
1183			*pent = mk_lv2ent_lpage(paddr, prot);
1184		}
1185		dma_sync_single_for_device(dma_dev, pent_base,
1186					   sizeof(*pent) * SPAGES_PER_LPAGE,
1187					   DMA_TO_DEVICE);
1188		*pgcnt -= SPAGES_PER_LPAGE;
1189	}
1190
1191	return 0;
1192}
1193
1194/*
1195 * *CAUTION* to the I/O virtual memory managers that support exynos-iommu:
1196 *
1197 * System MMU v3.x has advanced logic to improve address translation
1198 * performance with caching more page table entries by a page table walk.
1199 * However, the logic has a bug that while caching faulty page table entries,
1200 * System MMU reports page fault if the cached fault entry is hit even though
1201 * the fault entry is updated to a valid entry after the entry is cached.
1202 * To prevent caching faulty page table entries which may be updated to valid
1203 * entries later, the virtual memory manager should care about the workaround
1204 * for the problem. The following describes the workaround.
1205 *
1206 * Any two consecutive I/O virtual address regions must have a hole of 128KiB
1207 * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug).
1208 *
1209 * Precisely, any start address of I/O virtual region must be aligned with
1210 * the following sizes for System MMU v3.1 and v3.2.
1211 * System MMU v3.1: 128KiB
1212 * System MMU v3.2: 256KiB
1213 *
1214 * Because System MMU v3.3 caches page table entries more aggressively, it needs
1215 * more workarounds.
1216 * - Any two consecutive I/O virtual regions must have a hole of size larger
1217 *   than or equal to 128KiB.
1218 * - Start address of an I/O virtual region must be aligned by 128KiB.
1219 */
1220static int exynos_iommu_map(struct iommu_domain *iommu_domain,
1221			    unsigned long l_iova, phys_addr_t paddr, size_t size,
1222			    int prot, gfp_t gfp)
1223{
1224	struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1225	sysmmu_pte_t *entry;
1226	sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
1227	unsigned long flags;
1228	int ret = -ENOMEM;
1229
1230	BUG_ON(domain->pgtable == NULL);
1231	prot &= SYSMMU_SUPPORTED_PROT_BITS;
1232
1233	spin_lock_irqsave(&domain->pgtablelock, flags);
1234
1235	entry = section_entry(domain->pgtable, iova);
1236
1237	if (size == SECT_SIZE) {
1238		ret = lv1set_section(domain, entry, iova, paddr, prot,
1239				     &domain->lv2entcnt[lv1ent_offset(iova)]);
1240	} else {
1241		sysmmu_pte_t *pent;
1242
1243		pent = alloc_lv2entry(domain, entry, iova,
1244				      &domain->lv2entcnt[lv1ent_offset(iova)]);
1245
1246		if (IS_ERR(pent))
1247			ret = PTR_ERR(pent);
1248		else
1249			ret = lv2set_page(pent, paddr, size, prot,
1250				       &domain->lv2entcnt[lv1ent_offset(iova)]);
1251	}
1252
1253	if (ret)
1254		pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n",
1255			__func__, ret, size, iova);
1256
1257	spin_unlock_irqrestore(&domain->pgtablelock, flags);
1258
1259	return ret;
1260}
1261
1262static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain,
1263					      sysmmu_iova_t iova, size_t size)
1264{
1265	struct sysmmu_drvdata *data;
1266	unsigned long flags;
1267
1268	spin_lock_irqsave(&domain->lock, flags);
1269
1270	list_for_each_entry(data, &domain->clients, domain_node)
1271		sysmmu_tlb_invalidate_entry(data, iova, size);
1272
1273	spin_unlock_irqrestore(&domain->lock, flags);
1274}
1275
1276static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
1277				 unsigned long l_iova, size_t size,
1278				 struct iommu_iotlb_gather *gather)
1279{
1280	struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1281	sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
1282	sysmmu_pte_t *ent;
1283	size_t err_pgsize;
1284	unsigned long flags;
1285
1286	BUG_ON(domain->pgtable == NULL);
1287
1288	spin_lock_irqsave(&domain->pgtablelock, flags);
1289
1290	ent = section_entry(domain->pgtable, iova);
1291
1292	if (lv1ent_section(ent)) {
1293		if (WARN_ON(size < SECT_SIZE)) {
1294			err_pgsize = SECT_SIZE;
1295			goto err;
1296		}
1297
1298		/* workaround for h/w bug in System MMU v3.3 */
1299		exynos_iommu_set_pte(ent, ZERO_LV2LINK);
1300		size = SECT_SIZE;
1301		goto done;
1302	}
1303
1304	if (unlikely(lv1ent_fault(ent))) {
1305		if (size > SECT_SIZE)
1306			size = SECT_SIZE;
1307		goto done;
1308	}
1309
1310	/* lv1ent_page(sent) == true here */
1311
1312	ent = page_entry(ent, iova);
1313
1314	if (unlikely(lv2ent_fault(ent))) {
1315		size = SPAGE_SIZE;
1316		goto done;
1317	}
1318
1319	if (lv2ent_small(ent)) {
1320		exynos_iommu_set_pte(ent, 0);
1321		size = SPAGE_SIZE;
1322		domain->lv2entcnt[lv1ent_offset(iova)] += 1;
1323		goto done;
1324	}
1325
1326	/* lv1ent_large(ent) == true here */
1327	if (WARN_ON(size < LPAGE_SIZE)) {
1328		err_pgsize = LPAGE_SIZE;
1329		goto err;
1330	}
1331
1332	dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent),
1333				sizeof(*ent) * SPAGES_PER_LPAGE,
1334				DMA_TO_DEVICE);
1335	memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
1336	dma_sync_single_for_device(dma_dev, virt_to_phys(ent),
1337				   sizeof(*ent) * SPAGES_PER_LPAGE,
1338				   DMA_TO_DEVICE);
1339	size = LPAGE_SIZE;
1340	domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
1341done:
1342	spin_unlock_irqrestore(&domain->pgtablelock, flags);
1343
1344	exynos_iommu_tlb_invalidate_entry(domain, iova, size);
1345
1346	return size;
1347err:
1348	spin_unlock_irqrestore(&domain->pgtablelock, flags);
1349
1350	pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n",
1351		__func__, size, iova, err_pgsize);
1352
1353	return 0;
1354}
1355
1356static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain,
1357					  dma_addr_t iova)
1358{
1359	struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1360	sysmmu_pte_t *entry;
1361	unsigned long flags;
1362	phys_addr_t phys = 0;
1363
1364	spin_lock_irqsave(&domain->pgtablelock, flags);
1365
1366	entry = section_entry(domain->pgtable, iova);
1367
1368	if (lv1ent_section(entry)) {
1369		phys = section_phys(entry) + section_offs(iova);
1370	} else if (lv1ent_page(entry)) {
1371		entry = page_entry(entry, iova);
1372
1373		if (lv2ent_large(entry))
1374			phys = lpage_phys(entry) + lpage_offs(iova);
1375		else if (lv2ent_small(entry))
1376			phys = spage_phys(entry) + spage_offs(iova);
1377	}
1378
1379	spin_unlock_irqrestore(&domain->pgtablelock, flags);
1380
1381	return phys;
1382}
1383
1384static struct iommu_device *exynos_iommu_probe_device(struct device *dev)
1385{
1386	struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
1387	struct sysmmu_drvdata *data;
1388
1389	if (!has_sysmmu(dev))
1390		return ERR_PTR(-ENODEV);
1391
1392	list_for_each_entry(data, &owner->controllers, owner_node) {
1393		/*
1394		 * SYSMMU will be runtime activated via device link
1395		 * (dependency) to its master device, so there are no
1396		 * direct calls to pm_runtime_get/put in this driver.
1397		 */
1398		data->link = device_link_add(dev, data->sysmmu,
1399					     DL_FLAG_STATELESS |
1400					     DL_FLAG_PM_RUNTIME);
1401	}
1402
1403	/* There is always at least one entry, see exynos_iommu_of_xlate() */
1404	data = list_first_entry(&owner->controllers,
1405				struct sysmmu_drvdata, owner_node);
1406
1407	return &data->iommu;
1408}
1409
1410static void exynos_iommu_set_platform_dma(struct device *dev)
1411{
1412	struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
1413
1414	if (owner->domain) {
1415		struct iommu_group *group = iommu_group_get(dev);
1416
1417		if (group) {
1418			exynos_iommu_detach_device(owner->domain, dev);
1419			iommu_group_put(group);
1420		}
1421	}
1422}
1423
1424static void exynos_iommu_release_device(struct device *dev)
1425{
1426	struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
1427	struct sysmmu_drvdata *data;
1428
1429	exynos_iommu_set_platform_dma(dev);
1430
1431	list_for_each_entry(data, &owner->controllers, owner_node)
1432		device_link_del(data->link);
1433}
1434
1435static int exynos_iommu_of_xlate(struct device *dev,
1436				 struct of_phandle_args *spec)
1437{
1438	struct platform_device *sysmmu = of_find_device_by_node(spec->np);
1439	struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
1440	struct sysmmu_drvdata *data, *entry;
1441
1442	if (!sysmmu)
1443		return -ENODEV;
1444
1445	data = platform_get_drvdata(sysmmu);
1446	if (!data) {
1447		put_device(&sysmmu->dev);
1448		return -ENODEV;
1449	}
1450
1451	if (!owner) {
1452		owner = kzalloc(sizeof(*owner), GFP_KERNEL);
1453		if (!owner) {
1454			put_device(&sysmmu->dev);
1455			return -ENOMEM;
1456		}
1457
1458		INIT_LIST_HEAD(&owner->controllers);
1459		mutex_init(&owner->rpm_lock);
1460		dev_iommu_priv_set(dev, owner);
1461	}
1462
1463	list_for_each_entry(entry, &owner->controllers, owner_node)
1464		if (entry == data)
1465			return 0;
1466
1467	list_add_tail(&data->owner_node, &owner->controllers);
1468	data->master = dev;
1469
1470	return 0;
1471}
1472
1473static const struct iommu_ops exynos_iommu_ops = {
1474	.domain_alloc = exynos_iommu_domain_alloc,
1475	.device_group = generic_device_group,
1476#ifdef CONFIG_ARM
1477	.set_platform_dma_ops = exynos_iommu_set_platform_dma,
1478#endif
1479	.probe_device = exynos_iommu_probe_device,
1480	.release_device = exynos_iommu_release_device,
1481	.pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
1482	.of_xlate = exynos_iommu_of_xlate,
1483	.default_domain_ops = &(const struct iommu_domain_ops) {
1484		.attach_dev	= exynos_iommu_attach_device,
1485		.map		= exynos_iommu_map,
1486		.unmap		= exynos_iommu_unmap,
1487		.iova_to_phys	= exynos_iommu_iova_to_phys,
1488		.free		= exynos_iommu_domain_free,
1489	}
1490};
1491
1492static int __init exynos_iommu_init(void)
1493{
1494	struct device_node *np;
1495	int ret;
1496
1497	np = of_find_matching_node(NULL, sysmmu_of_match);
1498	if (!np)
1499		return 0;
1500
1501	of_node_put(np);
1502
1503	lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
1504				LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
1505	if (!lv2table_kmem_cache) {
1506		pr_err("%s: Failed to create kmem cache\n", __func__);
1507		return -ENOMEM;
1508	}
1509
1510	zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL);
1511	if (zero_lv2_table == NULL) {
1512		pr_err("%s: Failed to allocate zero level2 page table\n",
1513			__func__);
1514		ret = -ENOMEM;
1515		goto err_zero_lv2;
1516	}
1517
1518	ret = platform_driver_register(&exynos_sysmmu_driver);
1519	if (ret) {
1520		pr_err("%s: Failed to register driver\n", __func__);
1521		goto err_reg_driver;
1522	}
1523
1524	return 0;
1525err_reg_driver:
1526	kmem_cache_free(lv2table_kmem_cache, zero_lv2_table);
1527err_zero_lv2:
1528	kmem_cache_destroy(lv2table_kmem_cache);
1529	return ret;
1530}
1531core_initcall(exynos_iommu_init);
1532