1// SPDX-License-Identifier: GPL-2.0
2/*
3 * IOMMU API for s390 PCI devices
4 *
5 * Copyright IBM Corp. 2015
6 * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
7 */
8
9#include <linux/pci.h>
10#include <linux/iommu.h>
11#include <linux/iommu-helper.h>
12#include <linux/sizes.h>
13#include <linux/rculist.h>
14#include <linux/rcupdate.h>
15#include <asm/pci_dma.h>
16
17static const struct iommu_ops s390_iommu_ops;
18
19struct s390_domain {
20	struct iommu_domain	domain;
21	struct list_head	devices;
22	unsigned long		*dma_table;
23	spinlock_t		list_lock;
24	struct rcu_head		rcu;
25};
26
27static struct s390_domain *to_s390_domain(struct iommu_domain *dom)
28{
29	return container_of(dom, struct s390_domain, domain);
30}
31
32static bool s390_iommu_capable(struct device *dev, enum iommu_cap cap)
33{
34	switch (cap) {
35	case IOMMU_CAP_CACHE_COHERENCY:
36		return true;
37	default:
38		return false;
39	}
40}
41
42static struct iommu_domain *s390_domain_alloc(unsigned domain_type)
43{
44	struct s390_domain *s390_domain;
45
46	if (domain_type != IOMMU_DOMAIN_UNMANAGED)
47		return NULL;
48
49	s390_domain = kzalloc(sizeof(*s390_domain), GFP_KERNEL);
50	if (!s390_domain)
51		return NULL;
52
53	s390_domain->dma_table = dma_alloc_cpu_table(GFP_KERNEL);
54	if (!s390_domain->dma_table) {
55		kfree(s390_domain);
56		return NULL;
57	}
58	s390_domain->domain.geometry.force_aperture = true;
59	s390_domain->domain.geometry.aperture_start = 0;
60	s390_domain->domain.geometry.aperture_end = ZPCI_TABLE_SIZE_RT - 1;
61
62	spin_lock_init(&s390_domain->list_lock);
63	INIT_LIST_HEAD_RCU(&s390_domain->devices);
64
65	return &s390_domain->domain;
66}
67
68static void s390_iommu_rcu_free_domain(struct rcu_head *head)
69{
70	struct s390_domain *s390_domain = container_of(head, struct s390_domain, rcu);
71
72	dma_cleanup_tables(s390_domain->dma_table);
73	kfree(s390_domain);
74}
75
76static void s390_domain_free(struct iommu_domain *domain)
77{
78	struct s390_domain *s390_domain = to_s390_domain(domain);
79
80	rcu_read_lock();
81	WARN_ON(!list_empty(&s390_domain->devices));
82	rcu_read_unlock();
83
84	call_rcu(&s390_domain->rcu, s390_iommu_rcu_free_domain);
85}
86
87static void __s390_iommu_detach_device(struct zpci_dev *zdev)
88{
89	struct s390_domain *s390_domain = zdev->s390_domain;
90	unsigned long flags;
91
92	if (!s390_domain)
93		return;
94
95	spin_lock_irqsave(&s390_domain->list_lock, flags);
96	list_del_rcu(&zdev->iommu_list);
97	spin_unlock_irqrestore(&s390_domain->list_lock, flags);
98
99	zpci_unregister_ioat(zdev, 0);
100	zdev->s390_domain = NULL;
101	zdev->dma_table = NULL;
102}
103
104static int s390_iommu_attach_device(struct iommu_domain *domain,
105				    struct device *dev)
106{
107	struct s390_domain *s390_domain = to_s390_domain(domain);
108	struct zpci_dev *zdev = to_zpci_dev(dev);
109	unsigned long flags;
110	u8 status;
111	int cc;
112
113	if (!zdev)
114		return -ENODEV;
115
116	if (WARN_ON(domain->geometry.aperture_start > zdev->end_dma ||
117		domain->geometry.aperture_end < zdev->start_dma))
118		return -EINVAL;
119
120	if (zdev->s390_domain)
121		__s390_iommu_detach_device(zdev);
122	else if (zdev->dma_table)
123		zpci_dma_exit_device(zdev);
124
125	cc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
126				virt_to_phys(s390_domain->dma_table), &status);
127	/*
128	 * If the device is undergoing error recovery the reset code
129	 * will re-establish the new domain.
130	 */
131	if (cc && status != ZPCI_PCI_ST_FUNC_NOT_AVAIL)
132		return -EIO;
133	zdev->dma_table = s390_domain->dma_table;
134
135	zdev->dma_table = s390_domain->dma_table;
136	zdev->s390_domain = s390_domain;
137
138	spin_lock_irqsave(&s390_domain->list_lock, flags);
139	list_add_rcu(&zdev->iommu_list, &s390_domain->devices);
140	spin_unlock_irqrestore(&s390_domain->list_lock, flags);
141
142	return 0;
143}
144
145static void s390_iommu_set_platform_dma(struct device *dev)
146{
147	struct zpci_dev *zdev = to_zpci_dev(dev);
148
149	__s390_iommu_detach_device(zdev);
150	zpci_dma_init_device(zdev);
151}
152
153static void s390_iommu_get_resv_regions(struct device *dev,
154					struct list_head *list)
155{
156	struct zpci_dev *zdev = to_zpci_dev(dev);
157	struct iommu_resv_region *region;
158
159	if (zdev->start_dma) {
160		region = iommu_alloc_resv_region(0, zdev->start_dma, 0,
161						 IOMMU_RESV_RESERVED, GFP_KERNEL);
162		if (!region)
163			return;
164		list_add_tail(&region->list, list);
165	}
166
167	if (zdev->end_dma < ZPCI_TABLE_SIZE_RT - 1) {
168		region = iommu_alloc_resv_region(zdev->end_dma + 1,
169						 ZPCI_TABLE_SIZE_RT - zdev->end_dma - 1,
170						 0, IOMMU_RESV_RESERVED, GFP_KERNEL);
171		if (!region)
172			return;
173		list_add_tail(&region->list, list);
174	}
175}
176
177static struct iommu_device *s390_iommu_probe_device(struct device *dev)
178{
179	struct zpci_dev *zdev;
180
181	if (!dev_is_pci(dev))
182		return ERR_PTR(-ENODEV);
183
184	zdev = to_zpci_dev(dev);
185
186	if (zdev->start_dma > zdev->end_dma ||
187	    zdev->start_dma > ZPCI_TABLE_SIZE_RT - 1)
188		return ERR_PTR(-EINVAL);
189
190	if (zdev->end_dma > ZPCI_TABLE_SIZE_RT - 1)
191		zdev->end_dma = ZPCI_TABLE_SIZE_RT - 1;
192
193	return &zdev->iommu_dev;
194}
195
196static void s390_iommu_release_device(struct device *dev)
197{
198	struct zpci_dev *zdev = to_zpci_dev(dev);
199
200	/*
201	 * release_device is expected to detach any domain currently attached
202	 * to the device, but keep it attached to other devices in the group.
203	 */
204	if (zdev)
205		__s390_iommu_detach_device(zdev);
206}
207
208static void s390_iommu_flush_iotlb_all(struct iommu_domain *domain)
209{
210	struct s390_domain *s390_domain = to_s390_domain(domain);
211	struct zpci_dev *zdev;
212
213	rcu_read_lock();
214	list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) {
215		zpci_refresh_trans((u64)zdev->fh << 32, zdev->start_dma,
216				   zdev->end_dma - zdev->start_dma + 1);
217	}
218	rcu_read_unlock();
219}
220
221static void s390_iommu_iotlb_sync(struct iommu_domain *domain,
222				  struct iommu_iotlb_gather *gather)
223{
224	struct s390_domain *s390_domain = to_s390_domain(domain);
225	size_t size = gather->end - gather->start + 1;
226	struct zpci_dev *zdev;
227
228	/* If gather was never added to there is nothing to flush */
229	if (!gather->end)
230		return;
231
232	rcu_read_lock();
233	list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) {
234		zpci_refresh_trans((u64)zdev->fh << 32, gather->start,
235				   size);
236	}
237	rcu_read_unlock();
238}
239
240static void s390_iommu_iotlb_sync_map(struct iommu_domain *domain,
241				      unsigned long iova, size_t size)
242{
243	struct s390_domain *s390_domain = to_s390_domain(domain);
244	struct zpci_dev *zdev;
245
246	rcu_read_lock();
247	list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) {
248		if (!zdev->tlb_refresh)
249			continue;
250		zpci_refresh_trans((u64)zdev->fh << 32,
251				   iova, size);
252	}
253	rcu_read_unlock();
254}
255
256static int s390_iommu_validate_trans(struct s390_domain *s390_domain,
257				     phys_addr_t pa, dma_addr_t dma_addr,
258				     unsigned long nr_pages, int flags,
259				     gfp_t gfp)
260{
261	phys_addr_t page_addr = pa & PAGE_MASK;
262	unsigned long *entry;
263	unsigned long i;
264	int rc;
265
266	for (i = 0; i < nr_pages; i++) {
267		entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr,
268					   gfp);
269		if (unlikely(!entry)) {
270			rc = -ENOMEM;
271			goto undo_cpu_trans;
272		}
273		dma_update_cpu_trans(entry, page_addr, flags);
274		page_addr += PAGE_SIZE;
275		dma_addr += PAGE_SIZE;
276	}
277
278	return 0;
279
280undo_cpu_trans:
281	while (i-- > 0) {
282		dma_addr -= PAGE_SIZE;
283		entry = dma_walk_cpu_trans(s390_domain->dma_table,
284					   dma_addr, gfp);
285		if (!entry)
286			break;
287		dma_update_cpu_trans(entry, 0, ZPCI_PTE_INVALID);
288	}
289
290	return rc;
291}
292
293static int s390_iommu_invalidate_trans(struct s390_domain *s390_domain,
294				       dma_addr_t dma_addr, unsigned long nr_pages)
295{
296	unsigned long *entry;
297	unsigned long i;
298	int rc = 0;
299
300	for (i = 0; i < nr_pages; i++) {
301		entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr,
302					   GFP_ATOMIC);
303		if (unlikely(!entry)) {
304			rc = -EINVAL;
305			break;
306		}
307		dma_update_cpu_trans(entry, 0, ZPCI_PTE_INVALID);
308		dma_addr += PAGE_SIZE;
309	}
310
311	return rc;
312}
313
314static int s390_iommu_map_pages(struct iommu_domain *domain,
315				unsigned long iova, phys_addr_t paddr,
316				size_t pgsize, size_t pgcount,
317				int prot, gfp_t gfp, size_t *mapped)
318{
319	struct s390_domain *s390_domain = to_s390_domain(domain);
320	size_t size = pgcount << __ffs(pgsize);
321	int flags = ZPCI_PTE_VALID, rc = 0;
322
323	if (pgsize != SZ_4K)
324		return -EINVAL;
325
326	if (iova < s390_domain->domain.geometry.aperture_start ||
327	    (iova + size - 1) > s390_domain->domain.geometry.aperture_end)
328		return -EINVAL;
329
330	if (!IS_ALIGNED(iova | paddr, pgsize))
331		return -EINVAL;
332
333	if (!(prot & IOMMU_READ))
334		return -EINVAL;
335
336	if (!(prot & IOMMU_WRITE))
337		flags |= ZPCI_TABLE_PROTECTED;
338
339	rc = s390_iommu_validate_trans(s390_domain, paddr, iova,
340				       pgcount, flags, gfp);
341	if (!rc)
342		*mapped = size;
343
344	return rc;
345}
346
347static phys_addr_t s390_iommu_iova_to_phys(struct iommu_domain *domain,
348					   dma_addr_t iova)
349{
350	struct s390_domain *s390_domain = to_s390_domain(domain);
351	unsigned long *rto, *sto, *pto;
352	unsigned long ste, pte, rte;
353	unsigned int rtx, sx, px;
354	phys_addr_t phys = 0;
355
356	if (iova < domain->geometry.aperture_start ||
357	    iova > domain->geometry.aperture_end)
358		return 0;
359
360	rtx = calc_rtx(iova);
361	sx = calc_sx(iova);
362	px = calc_px(iova);
363	rto = s390_domain->dma_table;
364
365	rte = READ_ONCE(rto[rtx]);
366	if (reg_entry_isvalid(rte)) {
367		sto = get_rt_sto(rte);
368		ste = READ_ONCE(sto[sx]);
369		if (reg_entry_isvalid(ste)) {
370			pto = get_st_pto(ste);
371			pte = READ_ONCE(pto[px]);
372			if (pt_entry_isvalid(pte))
373				phys = pte & ZPCI_PTE_ADDR_MASK;
374		}
375	}
376
377	return phys;
378}
379
380static size_t s390_iommu_unmap_pages(struct iommu_domain *domain,
381				     unsigned long iova,
382				     size_t pgsize, size_t pgcount,
383				     struct iommu_iotlb_gather *gather)
384{
385	struct s390_domain *s390_domain = to_s390_domain(domain);
386	size_t size = pgcount << __ffs(pgsize);
387	int rc;
388
389	if (WARN_ON(iova < s390_domain->domain.geometry.aperture_start ||
390	    (iova + size - 1) > s390_domain->domain.geometry.aperture_end))
391		return 0;
392
393	rc = s390_iommu_invalidate_trans(s390_domain, iova, pgcount);
394	if (rc)
395		return 0;
396
397	iommu_iotlb_gather_add_range(gather, iova, size);
398
399	return size;
400}
401
402int zpci_init_iommu(struct zpci_dev *zdev)
403{
404	int rc = 0;
405
406	rc = iommu_device_sysfs_add(&zdev->iommu_dev, NULL, NULL,
407				    "s390-iommu.%08x", zdev->fid);
408	if (rc)
409		goto out_err;
410
411	rc = iommu_device_register(&zdev->iommu_dev, &s390_iommu_ops, NULL);
412	if (rc)
413		goto out_sysfs;
414
415	return 0;
416
417out_sysfs:
418	iommu_device_sysfs_remove(&zdev->iommu_dev);
419
420out_err:
421	return rc;
422}
423
424void zpci_destroy_iommu(struct zpci_dev *zdev)
425{
426	iommu_device_unregister(&zdev->iommu_dev);
427	iommu_device_sysfs_remove(&zdev->iommu_dev);
428}
429
430static const struct iommu_ops s390_iommu_ops = {
431	.capable = s390_iommu_capable,
432	.domain_alloc = s390_domain_alloc,
433	.probe_device = s390_iommu_probe_device,
434	.release_device = s390_iommu_release_device,
435	.device_group = generic_device_group,
436	.set_platform_dma_ops = s390_iommu_set_platform_dma,
437	.pgsize_bitmap = SZ_4K,
438	.get_resv_regions = s390_iommu_get_resv_regions,
439	.default_domain_ops = &(const struct iommu_domain_ops) {
440		.attach_dev	= s390_iommu_attach_device,
441		.map_pages	= s390_iommu_map_pages,
442		.unmap_pages	= s390_iommu_unmap_pages,
443		.flush_iotlb_all = s390_iommu_flush_iotlb_all,
444		.iotlb_sync      = s390_iommu_iotlb_sync,
445		.iotlb_sync_map  = s390_iommu_iotlb_sync_map,
446		.iova_to_phys	= s390_iommu_iova_to_phys,
447		.free		= s390_domain_free,
448	}
449};
450