1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 *
4 * Copyright (C) 2013 Freescale Semiconductor, Inc.
5 * Author: Varun Sethi <varun.sethi@freescale.com>
6 */
7
8#define pr_fmt(fmt)    "fsl-pamu-domain: %s: " fmt, __func__
9
10#include "fsl_pamu_domain.h"
11
12#include <linux/platform_device.h>
13#include <sysdev/fsl_pci.h>
14
15/*
16 * Global spinlock that needs to be held while
17 * configuring PAMU.
18 */
19static DEFINE_SPINLOCK(iommu_lock);
20
21static struct kmem_cache *fsl_pamu_domain_cache;
22static struct kmem_cache *iommu_devinfo_cache;
23static DEFINE_SPINLOCK(device_domain_lock);
24
25struct iommu_device pamu_iommu;	/* IOMMU core code handle */
26
27static struct fsl_dma_domain *to_fsl_dma_domain(struct iommu_domain *dom)
28{
29	return container_of(dom, struct fsl_dma_domain, iommu_domain);
30}
31
32static int __init iommu_init_mempool(void)
33{
34	fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain",
35						  sizeof(struct fsl_dma_domain),
36						  0,
37						  SLAB_HWCACHE_ALIGN,
38						  NULL);
39	if (!fsl_pamu_domain_cache) {
40		pr_debug("Couldn't create fsl iommu_domain cache\n");
41		return -ENOMEM;
42	}
43
44	iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
45						sizeof(struct device_domain_info),
46						0,
47						SLAB_HWCACHE_ALIGN,
48						NULL);
49	if (!iommu_devinfo_cache) {
50		pr_debug("Couldn't create devinfo cache\n");
51		kmem_cache_destroy(fsl_pamu_domain_cache);
52		return -ENOMEM;
53	}
54
55	return 0;
56}
57
58static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
59			      u32 val)
60{
61	int ret = 0;
62	unsigned long flags;
63
64	spin_lock_irqsave(&iommu_lock, flags);
65	ret = pamu_update_paace_stash(liodn, val);
66	if (ret) {
67		pr_debug("Failed to update SPAACE for liodn %d\n ", liodn);
68		spin_unlock_irqrestore(&iommu_lock, flags);
69		return ret;
70	}
71
72	spin_unlock_irqrestore(&iommu_lock, flags);
73
74	return ret;
75}
76
77/* Set the geometry parameters for a LIODN */
78static int pamu_set_liodn(struct fsl_dma_domain *dma_domain, struct device *dev,
79			  int liodn)
80{
81	u32 omi_index = ~(u32)0;
82	unsigned long flags;
83	int ret;
84
85	/*
86	 * Configure the omi_index at the geometry setup time.
87	 * This is a static value which depends on the type of
88	 * device and would not change thereafter.
89	 */
90	get_ome_index(&omi_index, dev);
91
92	spin_lock_irqsave(&iommu_lock, flags);
93	ret = pamu_disable_liodn(liodn);
94	if (ret)
95		goto out_unlock;
96	ret = pamu_config_ppaace(liodn, omi_index, dma_domain->stash_id, 0);
97	if (ret)
98		goto out_unlock;
99	ret = pamu_config_ppaace(liodn, ~(u32)0, dma_domain->stash_id,
100				 PAACE_AP_PERMS_QUERY | PAACE_AP_PERMS_UPDATE);
101out_unlock:
102	spin_unlock_irqrestore(&iommu_lock, flags);
103	if (ret) {
104		pr_debug("PAACE configuration failed for liodn %d\n",
105			 liodn);
106	}
107	return ret;
108}
109
110static void remove_device_ref(struct device_domain_info *info)
111{
112	unsigned long flags;
113
114	list_del(&info->link);
115	spin_lock_irqsave(&iommu_lock, flags);
116	pamu_disable_liodn(info->liodn);
117	spin_unlock_irqrestore(&iommu_lock, flags);
118	spin_lock_irqsave(&device_domain_lock, flags);
119	dev_iommu_priv_set(info->dev, NULL);
120	kmem_cache_free(iommu_devinfo_cache, info);
121	spin_unlock_irqrestore(&device_domain_lock, flags);
122}
123
124static void detach_device(struct device *dev, struct fsl_dma_domain *dma_domain)
125{
126	struct device_domain_info *info, *tmp;
127	unsigned long flags;
128
129	spin_lock_irqsave(&dma_domain->domain_lock, flags);
130	/* Remove the device from the domain device list */
131	list_for_each_entry_safe(info, tmp, &dma_domain->devices, link) {
132		if (!dev || (info->dev == dev))
133			remove_device_ref(info);
134	}
135	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
136}
137
138static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct device *dev)
139{
140	struct device_domain_info *info, *old_domain_info;
141	unsigned long flags;
142
143	spin_lock_irqsave(&device_domain_lock, flags);
144	/*
145	 * Check here if the device is already attached to domain or not.
146	 * If the device is already attached to a domain detach it.
147	 */
148	old_domain_info = dev_iommu_priv_get(dev);
149	if (old_domain_info && old_domain_info->domain != dma_domain) {
150		spin_unlock_irqrestore(&device_domain_lock, flags);
151		detach_device(dev, old_domain_info->domain);
152		spin_lock_irqsave(&device_domain_lock, flags);
153	}
154
155	info = kmem_cache_zalloc(iommu_devinfo_cache, GFP_ATOMIC);
156
157	info->dev = dev;
158	info->liodn = liodn;
159	info->domain = dma_domain;
160
161	list_add(&info->link, &dma_domain->devices);
162	/*
163	 * In case of devices with multiple LIODNs just store
164	 * the info for the first LIODN as all
165	 * LIODNs share the same domain
166	 */
167	if (!dev_iommu_priv_get(dev))
168		dev_iommu_priv_set(dev, info);
169	spin_unlock_irqrestore(&device_domain_lock, flags);
170}
171
172static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain,
173					 dma_addr_t iova)
174{
175	if (iova < domain->geometry.aperture_start ||
176	    iova > domain->geometry.aperture_end)
177		return 0;
178	return iova;
179}
180
181static bool fsl_pamu_capable(struct device *dev, enum iommu_cap cap)
182{
183	return cap == IOMMU_CAP_CACHE_COHERENCY;
184}
185
186static void fsl_pamu_domain_free(struct iommu_domain *domain)
187{
188	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
189
190	/* remove all the devices from the device list */
191	detach_device(NULL, dma_domain);
192	kmem_cache_free(fsl_pamu_domain_cache, dma_domain);
193}
194
195static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type)
196{
197	struct fsl_dma_domain *dma_domain;
198
199	if (type != IOMMU_DOMAIN_UNMANAGED)
200		return NULL;
201
202	dma_domain = kmem_cache_zalloc(fsl_pamu_domain_cache, GFP_KERNEL);
203	if (!dma_domain)
204		return NULL;
205
206	dma_domain->stash_id = ~(u32)0;
207	INIT_LIST_HEAD(&dma_domain->devices);
208	spin_lock_init(&dma_domain->domain_lock);
209
210	/* default geometry 64 GB i.e. maximum system address */
211	dma_domain->iommu_domain. geometry.aperture_start = 0;
212	dma_domain->iommu_domain.geometry.aperture_end = (1ULL << 36) - 1;
213	dma_domain->iommu_domain.geometry.force_aperture = true;
214
215	return &dma_domain->iommu_domain;
216}
217
218/* Update stash destination for all LIODNs associated with the domain */
219static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val)
220{
221	struct device_domain_info *info;
222	int ret = 0;
223
224	list_for_each_entry(info, &dma_domain->devices, link) {
225		ret = update_liodn_stash(info->liodn, dma_domain, val);
226		if (ret)
227			break;
228	}
229
230	return ret;
231}
232
233static int fsl_pamu_attach_device(struct iommu_domain *domain,
234				  struct device *dev)
235{
236	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
237	unsigned long flags;
238	int len, ret = 0, i;
239	const u32 *liodn;
240	struct pci_dev *pdev = NULL;
241	struct pci_controller *pci_ctl;
242
243	/*
244	 * Use LIODN of the PCI controller while attaching a
245	 * PCI device.
246	 */
247	if (dev_is_pci(dev)) {
248		pdev = to_pci_dev(dev);
249		pci_ctl = pci_bus_to_host(pdev->bus);
250		/*
251		 * make dev point to pci controller device
252		 * so we can get the LIODN programmed by
253		 * u-boot.
254		 */
255		dev = pci_ctl->parent;
256	}
257
258	liodn = of_get_property(dev->of_node, "fsl,liodn", &len);
259	if (!liodn) {
260		pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);
261		return -ENODEV;
262	}
263
264	spin_lock_irqsave(&dma_domain->domain_lock, flags);
265	for (i = 0; i < len / sizeof(u32); i++) {
266		/* Ensure that LIODN value is valid */
267		if (liodn[i] >= PAACE_NUMBER_ENTRIES) {
268			pr_debug("Invalid liodn %d, attach device failed for %pOF\n",
269				 liodn[i], dev->of_node);
270			ret = -ENODEV;
271			break;
272		}
273
274		attach_device(dma_domain, liodn[i], dev);
275		ret = pamu_set_liodn(dma_domain, dev, liodn[i]);
276		if (ret)
277			break;
278		ret = pamu_enable_liodn(liodn[i]);
279		if (ret)
280			break;
281	}
282	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
283	return ret;
284}
285
286static void fsl_pamu_set_platform_dma(struct device *dev)
287{
288	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
289	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
290	const u32 *prop;
291	int len;
292	struct pci_dev *pdev = NULL;
293	struct pci_controller *pci_ctl;
294
295	/*
296	 * Use LIODN of the PCI controller while detaching a
297	 * PCI device.
298	 */
299	if (dev_is_pci(dev)) {
300		pdev = to_pci_dev(dev);
301		pci_ctl = pci_bus_to_host(pdev->bus);
302		/*
303		 * make dev point to pci controller device
304		 * so we can get the LIODN programmed by
305		 * u-boot.
306		 */
307		dev = pci_ctl->parent;
308	}
309
310	prop = of_get_property(dev->of_node, "fsl,liodn", &len);
311	if (prop)
312		detach_device(dev, dma_domain);
313	else
314		pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);
315}
316
317/* Set the domain stash attribute */
318int fsl_pamu_configure_l1_stash(struct iommu_domain *domain, u32 cpu)
319{
320	struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
321	unsigned long flags;
322	int ret;
323
324	spin_lock_irqsave(&dma_domain->domain_lock, flags);
325	dma_domain->stash_id = get_stash_id(PAMU_ATTR_CACHE_L1, cpu);
326	if (dma_domain->stash_id == ~(u32)0) {
327		pr_debug("Invalid stash attributes\n");
328		spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
329		return -EINVAL;
330	}
331	ret = update_domain_stash(dma_domain, dma_domain->stash_id);
332	spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
333
334	return ret;
335}
336
337static  bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl)
338{
339	u32 version;
340
341	/* Check the PCI controller version number by readding BRR1 register */
342	version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2));
343	version &= PCI_FSL_BRR1_VER;
344	/* If PCI controller version is >= 0x204 we can partition endpoints */
345	return version >= 0x204;
346}
347
348static struct iommu_group *fsl_pamu_device_group(struct device *dev)
349{
350	struct iommu_group *group;
351	struct pci_dev *pdev;
352
353	/*
354	 * For platform devices we allocate a separate group for each of the
355	 * devices.
356	 */
357	if (!dev_is_pci(dev))
358		return generic_device_group(dev);
359
360	/*
361	 * We can partition PCIe devices so assign device group to the device
362	 */
363	pdev = to_pci_dev(dev);
364	if (check_pci_ctl_endpt_part(pci_bus_to_host(pdev->bus)))
365		return pci_device_group(&pdev->dev);
366
367	/*
368	 * All devices connected to the controller will share the same device
369	 * group.
370	 *
371	 * Due to ordering between fsl_pamu_init() and fsl_pci_init() it is
372	 * guaranteed that the pci_ctl->parent platform_device will have the
373	 * iommu driver bound and will already have a group set. So we just
374	 * re-use this group as the group for every device in the hose.
375	 */
376	group = iommu_group_get(pci_bus_to_host(pdev->bus)->parent);
377	if (WARN_ON(!group))
378		return ERR_PTR(-EINVAL);
379	return group;
380}
381
382static struct iommu_device *fsl_pamu_probe_device(struct device *dev)
383{
384	int len;
385
386	/*
387	 * uboot must fill the fsl,liodn for platform devices to be supported by
388	 * the iommu.
389	 */
390	if (!dev_is_pci(dev) &&
391	    !of_get_property(dev->of_node, "fsl,liodn", &len))
392		return ERR_PTR(-ENODEV);
393
394	return &pamu_iommu;
395}
396
397static const struct iommu_ops fsl_pamu_ops = {
398	.capable	= fsl_pamu_capable,
399	.domain_alloc	= fsl_pamu_domain_alloc,
400	.probe_device	= fsl_pamu_probe_device,
401	.device_group   = fsl_pamu_device_group,
402	.set_platform_dma_ops = fsl_pamu_set_platform_dma,
403	.default_domain_ops = &(const struct iommu_domain_ops) {
404		.attach_dev	= fsl_pamu_attach_device,
405		.iova_to_phys	= fsl_pamu_iova_to_phys,
406		.free		= fsl_pamu_domain_free,
407	}
408};
409
410int __init pamu_domain_init(void)
411{
412	int ret = 0;
413
414	ret = iommu_init_mempool();
415	if (ret)
416		return ret;
417
418	ret = iommu_device_sysfs_add(&pamu_iommu, NULL, NULL, "iommu0");
419	if (ret)
420		return ret;
421
422	ret = iommu_device_register(&pamu_iommu, &fsl_pamu_ops, NULL);
423	if (ret) {
424		iommu_device_sysfs_remove(&pamu_iommu);
425		pr_err("Can't register iommu device\n");
426	}
427
428	return ret;
429}
430