1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Loongson IOMMU Driver
4 *
5 * Copyright (C) 2020-2021 Loongson Technology Ltd.
6 * Author: Lv Chen <lvchen@loongson.cn>
7 * Wang Yang <wangyang@loongson.cn>
8 */
9
10 #include <linux/kernel.h>
11 #include <linux/delay.h>
12 #include <linux/device.h>
13 #include <linux/err.h>
14 #include <linux/errno.h>
15 #include <linux/interrupt.h>
16 #include <linux/io.h>
17 #include <linux/iommu.h>
18 #include <linux/list.h>
19 #include <linux/module.h>
20 #include <linux/acpi.h>
21 #include <linux/pci.h>
22 #include <linux/pci_regs.h>
23 #include <linux/printk.h>
24 #include <linux/sizes.h>
25 #include <linux/slab.h>
26 #include <linux/spinlock.h>
27 #include "iommu.h"
28
29 #define LOOP_TIMEOUT 100000
30 #define IOVA_START (SZ_256M)
31 #define IOVA_END0 (SZ_2G + SZ_256M)
32
33 #define IVRS_HEADER_LENGTH 48
34 #define ACPI_IVHD_TYPE_MAX_SUPPORTED 0x40
35 #define IVHD_DEV_ALL 0x01
36 #define IVHD_DEV_SELECT 0x02
37 #define IVHD_DEV_SELECT_RANGE_START 0x03
38 #define IVHD_DEV_RANGE_END 0x04
39 #define IVHD_DEV_ALIAS 0x42
40 #define IVHD_DEV_EXT_SELECT 0x46
41 #define IVHD_DEV_ACPI_HID 0xf0
42
43 #define IVHD_HEAD_TYPE10 0x10
44 #define IVHD_HEAD_TYPE11 0x11
45 #define IVHD_HEAD_TYPE40 0x40
46
47 #define MAX_BDF_NUM 0xffff
48
49 #define RLOOKUP_TABLE_ENTRY_SIZE (sizeof(void *))
50
51 /*
52 * structure describing one IOMMU in the ACPI table. Typically followed by one
53 * or more ivhd_entrys.
54 */
55 struct ivhd_header {
56 u8 type;
57 u8 flags;
58 u16 length;
59 u16 devid;
60 u16 cap_ptr;
61 u64 mmio_phys;
62 u16 pci_seg;
63 u16 info;
64 u32 efr_attr;
65
66 /* Following only valid on IVHD type 11h and 40h */
67 u64 efr_reg; /* Exact copy of MMIO_EXT_FEATURES */
68 u64 res;
69 } __attribute__((packed));
70
71 /*
72 * A device entry describing which devices a specific IOMMU translates and
73 * which requestor ids they use.
74 */
75 struct ivhd_entry {
76 u8 type;
77 u16 devid;
78 u8 flags;
79 u32 ext;
80 u32 hidh;
81 u64 cid;
82 u8 uidf;
83 u8 uidl;
84 u8 uid;
85 } __attribute__((packed));
86
87 LIST_HEAD(loongson_iommu_list); /* list of all IOMMUs in the system */
88 LIST_HEAD(loongson_rlookup_iommu_list);
89
90 static u32 rlookup_table_size; /* size if the rlookup table */
91 static int loongson_iommu_target_ivhd_type;
92 u16 loongson_iommu_last_bdf; /* largest PCI device id we have to handle */
93
94 int loongson_iommu_disable;
95 static struct iommu_ops loongson_iommu_ops;
96
iommu_write_regl(loongson_iommu *iommu, unsigned long off, u32 val)97 static void iommu_write_regl(loongson_iommu *iommu, unsigned long off, u32 val)
98 {
99 *(u32 *)(iommu->membase + off) = val;
100 iob();
101 }
102
iommu_read_regl(loongson_iommu *iommu, unsigned long off)103 static u32 iommu_read_regl(loongson_iommu *iommu, unsigned long off)
104 {
105 u32 val;
106
107 val = *(u32 *)(iommu->membase + off);
108 iob();
109 return val;
110 }
111
iommu_translate_disable(loongson_iommu *iommu)112 static void iommu_translate_disable(loongson_iommu *iommu)
113 {
114 u32 val;
115
116 if (iommu == NULL) {
117 pr_err("%s iommu is NULL", __func__);
118 return;
119 }
120
121 val = iommu_read_regl(iommu, LA_IOMMU_EIVDB);
122
123 /* Disable */
124 val &= ~(1 << 31);
125 iommu_write_regl(iommu, LA_IOMMU_EIVDB, val);
126
127 /* Write cmd */
128 val = iommu_read_regl(iommu, LA_IOMMU_CMD);
129 val &= 0xfffffffc;
130 iommu_write_regl(iommu, LA_IOMMU_CMD, val);
131 }
132
iommu_translate_enable(loongson_iommu *iommu)133 static void iommu_translate_enable(loongson_iommu *iommu)
134 {
135 u32 val = 0;
136
137 if (iommu == NULL) {
138 pr_err("%s iommu is NULL", __func__);
139 return;
140 }
141
142 val = iommu_read_regl(iommu, LA_IOMMU_EIVDB);
143
144 /* Enable */
145 val |= (1 << 31);
146 iommu_write_regl(iommu, LA_IOMMU_EIVDB, val);
147
148 /* Write cmd */
149 val = iommu_read_regl(iommu, LA_IOMMU_CMD);
150 val &= 0xfffffffc;
151 iommu_write_regl(iommu, LA_IOMMU_CMD, val);
152 }
153
loongson_iommu_capable(enum iommu_cap cap)154 static bool loongson_iommu_capable(enum iommu_cap cap)
155 {
156 switch (cap) {
157 case IOMMU_CAP_CACHE_COHERENCY:
158 return true;
159 default:
160 return false;
161 }
162 }
163
to_dom_info(struct iommu_domain *dom)164 static dom_info *to_dom_info(struct iommu_domain *dom)
165 {
166 return container_of(dom, dom_info, domain);
167 }
168
169 /*
170 * Check whether the system has a priv.
171 * If yes, it returns 1 and if not, it returns 0
172 */
has_dom(loongson_iommu *iommu)173 static int has_dom(loongson_iommu *iommu)
174 {
175 spin_lock(&iommu->dom_info_lock);
176 while (!list_empty(&iommu->dom_list)) {
177 spin_unlock(&iommu->dom_info_lock);
178 return 1;
179 }
180 spin_unlock(&iommu->dom_info_lock);
181
182 return 0;
183 }
184
update_dev_table(struct loongson_iommu_dev_data *dev_data, int flag)185 static int update_dev_table(struct loongson_iommu_dev_data *dev_data, int flag)
186 {
187 u32 val = 0;
188 int index;
189 unsigned short bdf;
190 loongson_iommu *iommu;
191 u16 domain_id;
192
193 if (dev_data == NULL) {
194 pr_err("%s dev_data is NULL", __func__);
195 return 0;
196 }
197
198 if (dev_data->iommu == NULL) {
199 pr_err("%s iommu is NULL", __func__);
200 return 0;
201 }
202
203 if (dev_data->iommu_entry == NULL) {
204 pr_err("%s iommu_entry is NULL", __func__);
205 return 0;
206 }
207
208 iommu = dev_data->iommu;
209 domain_id = dev_data->iommu_entry->id;
210 bdf = dev_data->bdf;
211
212 /* Set device table */
213 if (flag) {
214 index = find_first_zero_bit(iommu->devtable_bitmap, MAX_ATTACHED_DEV_ID);
215 if (index < MAX_ATTACHED_DEV_ID) {
216 __set_bit(index, iommu->devtable_bitmap);
217 dev_data->index = index;
218 } else {
219 pr_err("%s get id from dev table failed\n", __func__);
220 return 0;
221 }
222
223 pr_info("%s bdf %x domain_id %d index %x"
224 " iommu segment %d flag %x\n",
225 __func__, bdf, domain_id, index,
226 iommu->segment, flag);
227
228 val = bdf & 0xffff;
229 val |= ((domain_id & 0xf) << 16); /* domain id */
230 val |= ((index & 0xf) << 24); /* index */
231 val |= (0x1 << 20); /* valid */
232 val |= (0x1 << 31); /* enable */
233 iommu_write_regl(iommu, LA_IOMMU_EIVDB, val);
234
235 val = iommu_read_regl(iommu, LA_IOMMU_CMD);
236 val &= 0xfffffffc;
237 iommu_write_regl(iommu, LA_IOMMU_CMD, val);
238 } else {
239 /* Flush device table */
240 index = dev_data->index;
241 pr_info("%s bdf %x domain_id %d index %x"
242 " iommu segment %d flag %x\n",
243 __func__, bdf, domain_id, index,
244 iommu->segment, flag);
245
246 val = iommu_read_regl(iommu, LA_IOMMU_EIVDB);
247 val &= ~(0x7fffffff);
248 val |= ((index & 0xf) << 24); /* index */
249 iommu_write_regl(iommu, LA_IOMMU_EIVDB, val);
250
251 val = iommu_read_regl(iommu, LA_IOMMU_CMD);
252 val &= 0xfffffffc;
253 iommu_write_regl(iommu, LA_IOMMU_CMD, val);
254
255 if (index < MAX_ATTACHED_DEV_ID)
256 __clear_bit(index, iommu->devtable_bitmap);
257 }
258
259 return 0;
260 }
261
flush_iotlb(loongson_iommu *iommu)262 static void flush_iotlb(loongson_iommu *iommu)
263 {
264 u32 val, cmd;
265
266 if (iommu == NULL) {
267 pr_err("%s iommu is NULL", __func__);
268 return;
269 }
270
271 val = iommu_read_regl(iommu, LA_IOMMU_VBTC);
272 val &= ~0x1f;
273
274 /* Flush all tlb */
275 val |= 0x5;
276 iommu_write_regl(iommu, LA_IOMMU_VBTC, val);
277
278 cmd = iommu_read_regl(iommu, LA_IOMMU_CMD);
279 cmd &= 0xfffffffc;
280 iommu_write_regl(iommu, LA_IOMMU_CMD, cmd);
281 }
282
flush_pgtable_is_busy(loongson_iommu *iommu)283 static int flush_pgtable_is_busy(loongson_iommu *iommu)
284 {
285 u32 val;
286
287 if (iommu == NULL) {
288 pr_err("%s iommu is NULL", __func__);
289 return 0;
290 }
291
292 val = iommu_read_regl(iommu, LA_IOMMU_VBTC);
293
294 return val & IOMMU_PGTABLE_BUSY;
295 }
296
__iommu_flush_iotlb_all(loongson_iommu *iommu)297 static int __iommu_flush_iotlb_all(loongson_iommu *iommu)
298 {
299 u32 retry = 0;
300
301 if (iommu == NULL) {
302 pr_err("%s iommu is NULL", __func__);
303 return 0;
304 }
305
306 flush_iotlb(iommu);
307 while (flush_pgtable_is_busy(iommu)) {
308 if (retry == LOOP_TIMEOUT) {
309 pr_err("Loongson-IOMMU: iotlb flush busy\n");
310 return -EIO;
311 }
312 retry++;
313 udelay(1);
314 }
315 iommu_translate_enable(iommu);
316
317 return 0;
318 }
319
priv_flush_iotlb_pde(loongson_iommu *iommu)320 static void priv_flush_iotlb_pde(loongson_iommu *iommu)
321 {
322 if (iommu == NULL) {
323 pr_err("%s iommu is NULL", __func__);
324 return;
325 }
326
327 __iommu_flush_iotlb_all(iommu);
328 }
329
do_attach(iommu_info *info, struct loongson_iommu_dev_data *dev_data)330 static void do_attach(iommu_info *info, struct loongson_iommu_dev_data *dev_data)
331 {
332 if (!dev_data->count)
333 return;
334
335 dev_data->iommu_entry = info;
336
337 spin_lock(&info->devlock);
338 list_add(&dev_data->list, &info->dev_list);
339 info->dev_cnt += 1;
340 spin_unlock(&info->devlock);
341
342 update_dev_table(dev_data, 1);
343 if (info->dev_cnt > 0)
344 priv_flush_iotlb_pde(dev_data->iommu);
345 }
346
do_detach(struct loongson_iommu_dev_data *dev_data)347 static void do_detach(struct loongson_iommu_dev_data *dev_data)
348 {
349 iommu_info *iommu_entry = NULL;
350
351 if (dev_data == NULL) {
352 pr_err("%s dev_data is NULL", __func__);
353 return;
354 }
355
356 if (dev_data->count)
357 return;
358
359 iommu_entry = dev_data->iommu_entry;
360 if (iommu_entry == NULL) {
361 pr_err("%s iommu_entry is NULL", __func__);
362 return;
363 }
364
365 list_del(&dev_data->list);
366 iommu_entry->dev_cnt -= 1;
367
368 update_dev_table(dev_data, 0);
369 dev_data->iommu_entry = NULL;
370 }
371
cleanup_iommu_entry(iommu_info *iommu_entry)372 static void cleanup_iommu_entry(iommu_info *iommu_entry)
373 {
374 struct loongson_iommu_dev_data *dev_data = NULL;
375
376 spin_lock(&iommu_entry->devlock);
377 while (!list_empty(&iommu_entry->dev_list)) {
378 dev_data = list_first_entry(&iommu_entry->dev_list,
379 struct loongson_iommu_dev_data, list);
380 do_detach(dev_data);
381 }
382
383 spin_unlock(&iommu_entry->devlock);
384 }
385
domain_id_alloc(loongson_iommu *iommu)386 static int domain_id_alloc(loongson_iommu *iommu)
387 {
388 int id = -1;
389
390 if (iommu == NULL) {
391 pr_err("%s iommu is NULL", __func__);
392 return id;
393 }
394
395 spin_lock(&iommu->domain_bitmap_lock);
396 id = find_first_zero_bit(iommu->domain_bitmap, MAX_DOMAIN_ID);
397 if (id < MAX_DOMAIN_ID)
398 __set_bit(id, iommu->domain_bitmap);
399 else
400 pr_err("Loongson-IOMMU: Alloc domain id over max domain id\n");
401
402 spin_unlock(&iommu->domain_bitmap_lock);
403
404 return id;
405 }
406
domain_id_free(loongson_iommu *iommu, int id)407 static void domain_id_free(loongson_iommu *iommu, int id)
408 {
409 if (iommu == NULL) {
410 pr_err("%s iommu is NULL", __func__);
411 return;
412 }
413
414 spin_lock(&iommu->domain_bitmap_lock);
415 if ((id >= 0) && (id < MAX_DOMAIN_ID))
416 __clear_bit(id, iommu->domain_bitmap);
417
418 spin_unlock(&iommu->domain_bitmap_lock);
419 }
420
421 /*
422 * This function adds a private domain to the global domain list
423 */
add_domain_to_list(loongson_iommu *iommu, dom_info *priv)424 static void add_domain_to_list(loongson_iommu *iommu, dom_info *priv)
425 {
426 spin_lock(&iommu->dom_info_lock);
427 list_add(&priv->list, &iommu->dom_list);
428 spin_unlock(&iommu->dom_info_lock);
429 }
430
del_domain_from_list(loongson_iommu *iommu, dom_info *priv)431 static void del_domain_from_list(loongson_iommu *iommu, dom_info *priv)
432 {
433 spin_lock(&iommu->dom_info_lock);
434 list_del(&priv->list);
435 spin_unlock(&iommu->dom_info_lock);
436 }
437
iommu_zalloc_page(loongson_iommu *iommu)438 static spt_entry *iommu_zalloc_page(loongson_iommu *iommu)
439 {
440 int index;
441 void *addr;
442 spt_entry *shd_entry;
443
444 spin_lock(&iommu->pgtable_bitmap_lock);
445 index = find_first_zero_bit(iommu->pgtable_bitmap, iommu->maxpages);
446 if (index < iommu->maxpages)
447 __set_bit(index, iommu->pgtable_bitmap);
448 spin_unlock(&iommu->pgtable_bitmap_lock);
449
450 shd_entry = NULL;
451 if (index < iommu->maxpages) {
452 shd_entry = kmalloc(sizeof(*shd_entry), GFP_KERNEL);
453 if (!shd_entry) {
454 pr_err("%s alloc memory for shadow page entry failed\n", __func__);
455 goto fail;
456 }
457
458 shd_entry->shadow_ptable = (unsigned long *)get_zeroed_page(GFP_KERNEL);
459 if (!shd_entry->shadow_ptable) {
460 pr_err("Loongson-IOMMU: get zeroed page err\n");
461 kfree(shd_entry);
462 goto fail;
463 }
464
465 addr = iommu->pgtbase + index * IOMMU_PAGE_SIZE;
466 memset(addr, 0x0, IOMMU_PAGE_SIZE);
467 shd_entry->index = index;
468 shd_entry->gmem_ptable = addr;
469 }
470
471 return shd_entry;
472 fail:
473 spin_lock(&iommu->pgtable_bitmap_lock);
474 __clear_bit(index, iommu->pgtable_bitmap);
475 spin_unlock(&iommu->pgtable_bitmap_lock);
476 return NULL;
477 }
478
iommu_free_page(loongson_iommu *iommu, spt_entry *shadw_entry)479 static void iommu_free_page(loongson_iommu *iommu, spt_entry *shadw_entry)
480 {
481 void *addr;
482
483 if (shadw_entry->index < iommu->maxpages) {
484 addr = shadw_entry->gmem_ptable;
485 memset(addr, 0x0, IOMMU_PAGE_SIZE);
486
487 spin_lock(&iommu->pgtable_bitmap_lock);
488 __clear_bit(shadw_entry->index, iommu->pgtable_bitmap);
489 spin_unlock(&iommu->pgtable_bitmap_lock);
490
491 shadw_entry->index = -1;
492 free_page((unsigned long)shadw_entry->shadow_ptable);
493 shadw_entry->shadow_ptable = NULL;
494 shadw_entry->gmem_ptable = NULL;
495 kfree(shadw_entry);
496 }
497 }
498
free_pagetable_one_level(iommu_info *iommu_entry, spt_entry *shd_entry, int level)499 static void free_pagetable_one_level(iommu_info *iommu_entry, spt_entry *shd_entry, int level)
500 {
501 int i;
502 unsigned long *psentry;
503 spt_entry *shd_entry_tmp;
504 loongson_iommu *iommu = iommu_entry->iommu;
505
506 psentry = (unsigned long *)shd_entry;
507 if (level == IOMMU_PT_LEVEL1) {
508 if (iommu_pt_present(psentry) && (!iommu_pt_huge(psentry)))
509 iommu_free_page(iommu, shd_entry);
510 return;
511 }
512
513 for (i = 0; i < IOMMU_PTRS_PER_LEVEL; i++) {
514 psentry = shd_entry->shadow_ptable + i;
515 if (!iommu_pt_present(psentry))
516 continue;
517
518 shd_entry_tmp = (spt_entry *)(*psentry);
519 free_pagetable_one_level(iommu_entry, shd_entry_tmp, level - 1);
520 }
521
522 iommu_free_page(iommu, shd_entry);
523 }
524
free_pagetable(iommu_info *iommu_entry)525 static void free_pagetable(iommu_info *iommu_entry)
526 {
527 spt_entry *shd_entry;
528 loongson_iommu *iommu;
529
530 iommu = iommu_entry->iommu;
531 shd_entry = iommu_entry->shadow_pgd;
532 free_pagetable_one_level(iommu_entry, shd_entry, IOMMU_LEVEL_MAX);
533 iommu_entry->shadow_pgd = NULL;
534 }
535
dom_info_alloc(void)536 static dom_info *dom_info_alloc(void)
537 {
538 dom_info *info;
539
540 info = kzalloc(sizeof(*info), GFP_KERNEL);
541 if (info == NULL) {
542 pr_err("%s alloc memory for info failed\n", __func__);
543 return NULL;
544 }
545
546 /* 0x10000000~0x8fffffff */
547 info->mmio_pgd = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 6);
548 if (info->mmio_pgd == NULL) {
549 pr_err("%s alloc memory for virtio pgtable failed\n", __func__);
550 kfree(info);
551 return NULL;
552 }
553
554 INIT_LIST_HEAD(&info->iommu_devlist);
555 spin_lock_init(&info->lock);
556 return info;
557 }
558
dom_info_free(dom_info *info)559 static void dom_info_free(dom_info *info)
560 {
561 /* 0x10000000~0x8fffffff */
562 if (info->mmio_pgd) {
563 free_pages((unsigned long)info->mmio_pgd, 6);
564 info->mmio_pgd = NULL;
565 }
566
567 kfree(info);
568 }
569
loongson_iommu_domain_alloc(unsigned int type)570 static struct iommu_domain *loongson_iommu_domain_alloc(unsigned int type)
571 {
572 dom_info *info;
573
574 switch (type) {
575 case IOMMU_DOMAIN_UNMANAGED:
576 info = dom_info_alloc();
577 if (info == NULL)
578 return NULL;
579
580 info->domain.geometry.aperture_start = 0;
581 info->domain.geometry.aperture_end = ~0ULL;
582 info->domain.geometry.force_aperture = true;
583 break;
584
585 default:
586 return NULL;
587 }
588
589 return &info->domain;
590 }
591
domain_deattach_iommu(dom_info *priv, iommu_info *iommu_entry)592 void domain_deattach_iommu(dom_info *priv, iommu_info *iommu_entry)
593 {
594 loongson_iommu *iommu = NULL;
595
596 if (priv == NULL) {
597 pr_err("%s priv is NULL", __func__);
598 return;
599 }
600
601 if (iommu_entry == NULL) {
602 pr_err("%s iommu_entry is NULL", __func__);
603 return;
604 }
605
606 if (iommu_entry->dev_cnt != 0)
607 return;
608
609 iommu = iommu_entry->iommu;
610 if (iommu == NULL) {
611 pr_err("%s iommu is NULL", __func__);
612 return;
613 }
614
615 domain_id_free(iommu_entry->iommu, iommu_entry->id);
616
617 mutex_lock(&iommu->loongson_iommu_pgtlock);
618 free_pagetable(iommu_entry);
619 mutex_unlock(&iommu->loongson_iommu_pgtlock);
620
621 spin_lock(&priv->lock);
622 list_del(&iommu_entry->list);
623 spin_unlock(&priv->lock);
624
625 kfree(iommu_entry);
626 del_domain_from_list(iommu, priv);
627 }
628
loongson_iommu_domain_free(struct iommu_domain *domain)629 static void loongson_iommu_domain_free(struct iommu_domain *domain)
630 {
631
632 dom_info *priv;
633 loongson_iommu *iommu = NULL;
634 struct iommu_info *iommu_entry, *iommu_entry_temp;
635
636 priv = to_dom_info(domain);
637
638 spin_lock(&priv->lock);
639 list_for_each_entry_safe(iommu_entry, iommu_entry_temp, &priv->iommu_devlist, list) {
640 iommu = iommu_entry->iommu;
641
642 if (iommu_entry->dev_cnt > 0)
643 cleanup_iommu_entry(iommu_entry);
644
645 spin_unlock(&priv->lock);
646 domain_deattach_iommu(priv, iommu_entry);
647 spin_lock(&priv->lock);
648
649 __iommu_flush_iotlb_all(iommu);
650
651 if (!has_dom(iommu))
652 iommu_translate_disable(iommu);
653
654 }
655 spin_unlock(&priv->lock);
656
657 dom_info_free(priv);
658 }
659
lookup_rlooptable(int pcisegment)660 struct loongson_iommu_rlookup_entry *lookup_rlooptable(int pcisegment)
661 {
662 struct loongson_iommu_rlookup_entry *rlookupentry = NULL;
663
664 list_for_each_entry(rlookupentry, &loongson_rlookup_iommu_list, list) {
665 if (rlookupentry->pcisegment == pcisegment)
666 return rlookupentry;
667 }
668
669 return NULL;
670 }
671
find_iommu_by_dev(struct pci_dev *pdev)672 loongson_iommu *find_iommu_by_dev(struct pci_dev *pdev)
673 {
674 int pcisegment;
675 unsigned short devid;
676 struct loongson_iommu_rlookup_entry *rlookupentry = NULL;
677 loongson_iommu *iommu = NULL;
678
679 devid = pdev->devfn & 0xff;
680
681 pcisegment = pci_domain_nr(pdev->bus);
682
683 rlookupentry = lookup_rlooptable(pcisegment);
684 if (rlookupentry == NULL) {
685 pr_info("%s find segment %d rlookupentry failed\n", __func__,
686 pcisegment);
687 return iommu;
688 }
689
690 iommu = rlookupentry->loongson_iommu_rlookup_table[devid];
691
692 return iommu;
693 }
694
iommu_init_device(struct device *dev)695 static int iommu_init_device(struct device *dev)
696 {
697 unsigned char busnum;
698 unsigned short bdf, devid;
699 struct pci_dev *pdev = to_pci_dev(dev);
700 struct pci_bus *bus = pdev->bus;
701 struct loongson_iommu_dev_data *dev_data;
702 loongson_iommu *iommu = NULL;
703
704 bdf = pdev->devfn & 0xff;
705 busnum = bus->number;
706 if (busnum != 0) {
707 while (bus->parent->parent)
708 bus = bus->parent;
709 bdf = bus->self->devfn & 0xff;
710 }
711
712 if (dev_iommu_priv_get(dev)) {
713 pr_info("Loongson-IOMMU: bdf:0x%x has added\n", bdf);
714 return 0;
715 }
716
717 dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
718 if (!dev_data)
719 return -ENOMEM;
720
721 devid = PCI_DEVID(bus->number, bdf);
722 dev_data->bdf = devid;
723
724 pci_info(pdev, "%s devid %x bus %x\n", __func__, devid, busnum);
725 iommu = find_iommu_by_dev(pdev);
726 if (iommu == NULL)
727 pci_info(pdev, "%s find iommu failed by dev\n", __func__);
728
729 /* The initial state is 0, and 1 is added only when attach dev */
730 dev_data->count = 0;
731 dev_data->iommu = iommu;
732
733 dev_iommu_priv_set(dev, dev_data);
734
735 return 0;
736 }
737
loongson_iommu_probe_device(struct device *dev)738 static struct iommu_device *loongson_iommu_probe_device(struct device *dev)
739 {
740 int ret = 0;
741
742 ret = iommu_init_device(dev);
743 if (ret < 0)
744 pr_err("Loongson-IOMMU: unable to alloc memory for dev_data\n");
745
746 return 0;
747 }
748
loongson_iommu_device_group(struct device *dev)749 static struct iommu_group *loongson_iommu_device_group(struct device *dev)
750 {
751 struct iommu_group *group;
752
753 /*
754 * We don't support devices sharing stream IDs other than PCI RID
755 * aliases, since the necessary ID-to-device lookup becomes rather
756 * impractical given a potential sparse 32-bit stream ID space.
757 */
758 if (dev_is_pci(dev))
759 group = pci_device_group(dev);
760 else
761 group = generic_device_group(dev);
762
763 return group;
764 }
765
loongson_iommu_release_device(struct device *dev)766 static void loongson_iommu_release_device(struct device *dev)
767 {
768 struct loongson_iommu_dev_data *dev_data;
769
770 dev_data = dev_iommu_priv_get(dev);
771 dev_iommu_priv_set(dev, NULL);
772 kfree(dev_data);
773 }
774
get_first_iommu_entry(dom_info *priv)775 iommu_info *get_first_iommu_entry(dom_info *priv)
776 {
777 struct iommu_info *iommu_entry;
778
779 if (priv == NULL) {
780 pr_err("%s priv is NULL", __func__);
781 return NULL;
782 }
783
784 iommu_entry = list_first_entry_or_null(&priv->iommu_devlist,
785 struct iommu_info, list);
786
787 return iommu_entry;
788 }
789
get_iommu_entry(dom_info *priv, loongson_iommu *iommu)790 iommu_info *get_iommu_entry(dom_info *priv, loongson_iommu *iommu)
791 {
792 struct iommu_info *iommu_entry;
793
794 spin_lock(&priv->lock);
795 list_for_each_entry(iommu_entry, &priv->iommu_devlist, list) {
796 if (iommu_entry->iommu == iommu) {
797 spin_unlock(&priv->lock);
798 return iommu_entry;
799 }
800 }
801 spin_unlock(&priv->lock);
802
803 return NULL;
804 }
805
domain_attach_iommu(dom_info *priv, loongson_iommu *iommu)806 iommu_info *domain_attach_iommu(dom_info *priv, loongson_iommu *iommu)
807 {
808 unsigned long pgd_pa;
809 u32 dir_ctrl, pgd_lo, pgd_hi;
810 struct iommu_info *iommu_entry = NULL;
811 spt_entry *shd_entry = NULL;
812
813 iommu_entry = get_iommu_entry(priv, iommu);
814 if (iommu_entry)
815 return iommu_entry;
816
817 iommu_entry = kzalloc(sizeof(struct iommu_info), GFP_KERNEL);
818 if (iommu_entry == NULL) {
819 pr_info("%s alloc memory for iommu_entry failed\n", __func__);
820 return NULL;
821 }
822
823 INIT_LIST_HEAD(&iommu_entry->dev_list);
824 iommu_entry->iommu = iommu;
825 iommu_entry->id = domain_id_alloc(iommu);
826 if (iommu_entry->id == -1) {
827 pr_info("%s alloc id for domain failed\n", __func__);
828 kfree(iommu_entry);
829 return NULL;
830 }
831
832 shd_entry = iommu_zalloc_page(iommu);
833 if (!shd_entry) {
834 pr_info("%s alloc shadow page entry err\n", __func__);
835 domain_id_free(iommu, iommu_entry->id);
836 kfree(iommu_entry);
837 return NULL;
838 }
839
840 iommu_entry->shadow_pgd = shd_entry;
841 dir_ctrl = (IOMMU_LEVEL_STRIDE << 26) | (IOMMU_LEVEL_SHIFT(2) << 20);
842 dir_ctrl |= (IOMMU_LEVEL_STRIDE << 16) | (IOMMU_LEVEL_SHIFT(1) << 10);
843 dir_ctrl |= (IOMMU_LEVEL_STRIDE << 6) | IOMMU_LEVEL_SHIFT(0);
844 pgd_pa = iommu_pgt_v2p(iommu, shd_entry->gmem_ptable);
845 pgd_hi = pgd_pa >> 32;
846 pgd_lo = pgd_pa & 0xffffffff;
847 iommu_write_regl(iommu, LA_IOMMU_DIR_CTRL(iommu_entry->id), dir_ctrl);
848 iommu_write_regl(iommu, LA_IOMMU_PGD_HI(iommu_entry->id), pgd_hi);
849 iommu_write_regl(iommu, LA_IOMMU_PGD_LO(iommu_entry->id), pgd_lo);
850
851 spin_lock(&priv->lock);
852 list_add(&iommu_entry->list, &priv->iommu_devlist);
853 spin_unlock(&priv->lock);
854
855 add_domain_to_list(iommu, priv);
856 pr_info("%s iommu_entry->iommu %lx id %x\n", __func__,
857 (unsigned long)iommu_entry->iommu, iommu_entry->id);
858
859 return iommu_entry;
860 }
861
iommu_get_devdata(dom_info *info, loongson_iommu *iommu, unsigned long bdf)862 static struct loongson_iommu_dev_data *iommu_get_devdata(dom_info *info,
863 loongson_iommu *iommu, unsigned long bdf)
864 {
865 struct iommu_info *entry;
866 struct loongson_iommu_dev_data *dev_data;
867
868 entry = get_iommu_entry(info, iommu);
869 if (!entry)
870 return NULL;
871
872 /* Find from priv list */
873 spin_lock(&entry->devlock);
874 list_for_each_entry(dev_data, &entry->dev_list, list) {
875 if (dev_data->bdf == bdf) {
876 spin_unlock(&entry->devlock);
877 return dev_data;
878 }
879 }
880
881 spin_unlock(&entry->devlock);
882 return NULL;
883 }
884
loongson_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)885 static int loongson_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
886 {
887 unsigned short bdf;
888 struct pci_dev *pdev = to_pci_dev(dev);
889 struct pci_bus *bus = pdev->bus;
890 unsigned char busnum = pdev->bus->number;
891 struct loongson_iommu_dev_data *dev_data;
892 dom_info *priv = to_dom_info(domain);
893 loongson_iommu *iommu;
894 iommu_info *iommu_entry = NULL;
895
896 bdf = pdev->devfn & 0xff;
897 if (busnum != 0) {
898 while (bus->parent->parent)
899 bus = bus->parent;
900 bdf = bus->self->devfn & 0xff;
901 }
902
903 dev_data = (struct loongson_iommu_dev_data *)dev_iommu_priv_get(dev);
904 if (dev_data == NULL) {
905 pci_info(pdev, "%s dev_data is Invalid\n", __func__);
906 return 0;
907 }
908
909 iommu = dev_data->iommu;
910 if (iommu == NULL) {
911 pci_info(pdev, "%s iommu is Invalid\n", __func__);
912 return 0;
913 }
914
915 pci_info(pdev, "%s busnum %x bdf %x priv %lx iommu %lx\n", __func__,
916 busnum, bdf, (unsigned long)priv, (unsigned long)iommu);
917 dev_data = iommu_get_devdata(priv, iommu, bdf);
918 if (dev_data) {
919 dev_data->count++;
920 pci_info(pdev, "Loongson-IOMMU: bdf 0x%x devfn %x has attached,"
921 " count:0x%x\n",
922 bdf, pdev->devfn, dev_data->count);
923 return 0;
924 } else {
925 dev_data = (struct loongson_iommu_dev_data *)dev_iommu_priv_get(dev);
926 }
927
928 iommu_entry = domain_attach_iommu(priv, iommu);
929 if (iommu_entry == NULL) {
930 pci_info(pdev, "domain attach iommu failed\n");
931 return 0;
932 }
933
934 dev_data->count++;
935 do_attach(iommu_entry, dev_data);
936
937 return 0;
938 }
939
loongson_iommu_detach_dev(struct iommu_domain *domain, struct device *dev)940 static void loongson_iommu_detach_dev(struct iommu_domain *domain,
941 struct device *dev)
942 {
943 unsigned short bdf;
944 struct pci_dev *pdev = to_pci_dev(dev);
945 struct pci_bus *bus = pdev->bus;
946 unsigned char busnum = pdev->bus->number;
947 struct loongson_iommu_dev_data *dev_data;
948 dom_info *priv = to_dom_info(domain);
949 loongson_iommu *iommu;
950 iommu_info *iommu_entry = NULL;
951
952 bdf = pdev->devfn & 0xff;
953 if (busnum != 0) {
954 while (bus->parent->parent)
955 bus = bus->parent;
956 bdf = bus->self->devfn & 0xff;
957 }
958
959 dev_data = (struct loongson_iommu_dev_data *)dev_iommu_priv_get(dev);
960 if (dev_data == NULL) {
961 pci_info(pdev, "%s dev_data is Invalid\n", __func__);
962 return;
963 }
964
965 iommu = dev_data->iommu;
966 if (iommu == NULL) {
967 pci_info(pdev, "%s iommu is Invalid\n", __func__);
968 return;
969 }
970
971 dev_data = iommu_get_devdata(priv, iommu, bdf);
972 if (dev_data == NULL) {
973 pci_info(pdev, "Loongson-IOMMU: bdf 0x%x devfn %x dev_data is NULL\n",
974 bdf, pdev->devfn & 0xff);
975 return;
976 }
977
978 iommu = dev_data->iommu;
979 dev_data->count--;
980 iommu_entry = get_iommu_entry(priv, iommu);
981 if (iommu_entry == NULL) {
982 pci_info(pdev, "%s get iommu_entry failed\n", __func__);
983 return;
984 }
985
986 spin_lock(&iommu_entry->devlock);
987 do_detach(dev_data);
988 spin_unlock(&iommu_entry->devlock);
989
990 pci_info(pdev, "%s iommu devid %x sigment %x\n", __func__,
991 iommu->devid, iommu->segment);
992 }
993
iommu_get_spte(spt_entry *entry, unsigned long iova, int level)994 static unsigned long *iommu_get_spte(spt_entry *entry, unsigned long iova, int level)
995 {
996 int i;
997 unsigned long *pte;
998
999 if (level > (IOMMU_LEVEL_MAX - 1))
1000 return NULL;
1001
1002 for (i = IOMMU_LEVEL_MAX - 1; i >= level; i--) {
1003 pte = iommu_shadow_offset(entry, iova, i);
1004 if (!iommu_pt_present(pte))
1005 break;
1006
1007 if (iommu_pt_huge(pte))
1008 break;
1009
1010 entry = (spt_entry *)(*pte);
1011 }
1012
1013 return pte;
1014 }
1015
_iommu_alloc_ptable(loongson_iommu *iommu, unsigned long *psentry, unsigned long *phwentry)1016 static int _iommu_alloc_ptable(loongson_iommu *iommu,
1017 unsigned long *psentry, unsigned long *phwentry)
1018 {
1019 unsigned long pte;
1020 iommu_pte *new_phwentry;
1021 spt_entry *new_shd_entry;
1022
1023 if (!iommu_pt_present(psentry)) {
1024 new_shd_entry = iommu_zalloc_page(iommu);
1025 if (!new_shd_entry) {
1026 pr_err("Loongson-IOMMU: new_shd_entry alloc err\n");
1027 return -ENOMEM;
1028 }
1029 /* fill shd_entry */
1030 *psentry = (unsigned long)new_shd_entry;
1031 /* fill gmem phwentry */
1032 new_phwentry = (iommu_pte *)new_shd_entry->gmem_ptable;
1033 pte = iommu_pgt_v2p(iommu, new_phwentry) & IOMMU_PAGE_MASK;
1034 pte |= IOMMU_PTE_RW;
1035 *phwentry = pte;
1036 }
1037
1038 return 0;
1039 }
1040
iommu_ptw_map(loongson_iommu *iommu, spt_entry *shd_entry, unsigned long start, unsigned long end, phys_addr_t pa, int level)1041 static size_t iommu_ptw_map(loongson_iommu *iommu, spt_entry *shd_entry,
1042 unsigned long start, unsigned long end, phys_addr_t pa, int level)
1043 {
1044 int ret, huge;
1045 unsigned long pte;
1046 unsigned long next, old, step;
1047 unsigned long *psentry, *phwentry;
1048
1049 old = start;
1050 psentry = iommu_shadow_offset(shd_entry, start, level);
1051 phwentry = iommu_ptable_offset(shd_entry->gmem_ptable, start, level);
1052 if (level == IOMMU_PT_LEVEL0) {
1053 pa = pa & IOMMU_PAGE_MASK;
1054 do {
1055 pte = pa | IOMMU_PTE_RW;
1056 *phwentry = pte;
1057 *psentry = pte;
1058 psentry++;
1059 phwentry++;
1060 start += IOMMU_PAGE_SIZE;
1061 pa += IOMMU_PAGE_SIZE;
1062 } while (start < end);
1063
1064 return start - old;
1065 }
1066
1067 do {
1068 next = iommu_ptable_end(start, end, level);
1069 step = next - start;
1070
1071 huge = 0;
1072 if ((level == IOMMU_PT_LEVEL1) && (step == IOMMU_HPAGE_SIZE))
1073 if (!iommu_pt_present(psentry) || iommu_pt_huge(psentry))
1074 huge = 1;
1075
1076 if (huge) {
1077 pte = (pa & IOMMU_HPAGE_MASK) | IOMMU_PTE_RW | IOMMU_PTE_HP;
1078 *phwentry = pte;
1079 *psentry = pte;
1080 } else {
1081 ret = _iommu_alloc_ptable(iommu, psentry, phwentry);
1082 if (ret != 0)
1083 break;
1084 iommu_ptw_map(iommu, (spt_entry *)*psentry, start, next, pa, level - 1);
1085 }
1086
1087 psentry++;
1088 phwentry++;
1089 pa += step;
1090 start = next;
1091 } while (start < end);
1092
1093 return start - old;
1094 }
1095
dev_map_page(iommu_info *iommu_entry, unsigned long start, phys_addr_t pa, size_t size)1096 static int dev_map_page(iommu_info *iommu_entry, unsigned long start,
1097 phys_addr_t pa, size_t size)
1098 {
1099 int ret = 0;
1100 spt_entry *entry;
1101 phys_addr_t end;
1102 size_t map_size;
1103 loongson_iommu *iommu;
1104
1105 end = start + size;
1106 iommu = iommu_entry->iommu;
1107
1108 mutex_lock(&iommu->loongson_iommu_pgtlock);
1109 entry = iommu_entry->shadow_pgd;
1110 map_size = iommu_ptw_map(iommu, entry, start, end, pa, IOMMU_LEVEL_MAX - 1);
1111 if (map_size != size)
1112 ret = -EFAULT;
1113
1114 if (has_dom(iommu))
1115 __iommu_flush_iotlb_all(iommu);
1116 mutex_unlock(&iommu->loongson_iommu_pgtlock);
1117
1118 return ret;
1119 }
1120
iommu_ptw_unmap(loongson_iommu *iommu, spt_entry *shd_entry, unsigned long start, unsigned long end, int level)1121 static size_t iommu_ptw_unmap(loongson_iommu *iommu, spt_entry *shd_entry,
1122 unsigned long start, unsigned long end, int level)
1123 {
1124 unsigned long next, old;
1125 unsigned long *psentry, *phwentry;
1126
1127 old = start;
1128 psentry = iommu_shadow_offset(shd_entry, start, level);
1129 phwentry = iommu_ptable_offset(shd_entry->gmem_ptable, start, level);
1130 if (level == IOMMU_PT_LEVEL0) {
1131 do {
1132 *phwentry++ = 0;
1133 *psentry++ = 0;
1134 start += IOMMU_PAGE_SIZE;
1135 } while (start < end);
1136 } else {
1137 do {
1138 next = iommu_ptable_end(start, end, level);
1139 if (!iommu_pt_present(psentry))
1140 continue;
1141
1142 if (iommu_pt_huge(psentry)) {
1143 if ((next - start) != IOMMU_HPAGE_SIZE)
1144 pr_err("Map pte on hugepage not supported now\n");
1145 *phwentry = 0;
1146 *psentry = 0;
1147 } else
1148 iommu_ptw_unmap(iommu, (spt_entry *)*psentry, start, next, level - 1);
1149 } while (psentry++, phwentry++, start = next, start < end);
1150 }
1151
1152 return start - old;
1153 }
1154
iommu_map_page(dom_info *priv, unsigned long start, phys_addr_t pa, size_t size, int prot, gfp_t gfp)1155 static int iommu_map_page(dom_info *priv, unsigned long start,
1156 phys_addr_t pa, size_t size, int prot, gfp_t gfp)
1157 {
1158
1159 unsigned long *pte;
1160 int ret = 0;
1161 iommu_info *iommu_entry = NULL;
1162
1163 /* 0x10000000~0x8fffffff */
1164 if ((start >= IOVA_START) && (start < IOVA_END0)) {
1165 start -= IOVA_START;
1166 pte = (unsigned long *)priv->mmio_pgd;
1167 while (size > 0) {
1168 pte[start >> LA_VIRTIO_PAGE_SHIFT] =
1169 pa & LA_VIRTIO_PAGE_MASK;
1170 size -= IOMMU_PAGE_SIZE;
1171 start += IOMMU_PAGE_SIZE;
1172 pa += IOMMU_PAGE_SIZE;
1173 }
1174 return 0;
1175 }
1176
1177 spin_lock(&priv->lock);
1178 list_for_each_entry(iommu_entry, &priv->iommu_devlist, list) {
1179 ret |= dev_map_page(iommu_entry, start, pa, size);
1180 }
1181 spin_unlock(&priv->lock);
1182
1183 return ret;
1184 }
1185
iommu_unmap_page(iommu_info *iommu_entry, unsigned long start, size_t size)1186 static size_t iommu_unmap_page(iommu_info *iommu_entry, unsigned long start, size_t size)
1187 {
1188 loongson_iommu *iommu;
1189 spt_entry *entry;
1190 size_t unmap_len;
1191 unsigned long end;
1192
1193 end = start + size;
1194 iommu = iommu_entry->iommu;
1195 mutex_lock(&iommu->loongson_iommu_pgtlock);
1196 entry = iommu_entry->shadow_pgd;
1197 unmap_len = iommu_ptw_unmap(iommu, entry, start, end, (IOMMU_LEVEL_MAX - 1));
1198
1199 if (has_dom(iommu))
1200 __iommu_flush_iotlb_all(iommu);
1201 mutex_unlock(&iommu->loongson_iommu_pgtlock);
1202 return unmap_len;
1203 }
1204
domain_unmap_page(dom_info *info, unsigned long start, size_t size)1205 static size_t domain_unmap_page(dom_info *info, unsigned long start, size_t size)
1206 {
1207 unsigned long *pte;
1208 size_t unmap_len = 0;
1209 iommu_info *entry;
1210
1211 /* 0x10000000~0x8fffffff */
1212 if ((start >= IOVA_START) && (start < IOVA_END0)) {
1213 start -= IOVA_START;
1214 pte = (unsigned long *)info->mmio_pgd;
1215 while (size > 0) {
1216 pte[start >> LA_VIRTIO_PAGE_SHIFT] = 0;
1217 size -= 0x4000;
1218 unmap_len += 0x4000;
1219 start += 0x4000;
1220 }
1221 unmap_len += size;
1222
1223 return unmap_len;
1224 }
1225
1226 spin_lock(&info->lock);
1227 list_for_each_entry(entry, &info->iommu_devlist, list)
1228 unmap_len = iommu_unmap_page(entry, start, size);
1229 spin_unlock(&info->lock);
1230
1231 return unmap_len;
1232 }
1233
loongson_iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t pa, size_t len, int prot, gfp_t gfp)1234 static int loongson_iommu_map(struct iommu_domain *domain, unsigned long iova,
1235 phys_addr_t pa, size_t len, int prot, gfp_t gfp)
1236 {
1237 dom_info *priv = to_dom_info(domain);
1238
1239 return iommu_map_page(priv, iova, pa, len, prot, GFP_KERNEL);
1240 }
1241
loongson_iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size, struct iommu_iotlb_gather *gather)1242 static size_t loongson_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
1243 size_t size, struct iommu_iotlb_gather *gather)
1244 {
1245 dom_info *priv = to_dom_info(domain);
1246
1247 return domain_unmap_page(priv, iova, size);
1248 }
1249
loongson_iommu_iova_to_pa(struct iommu_domain *domain, dma_addr_t iova)1250 static phys_addr_t loongson_iommu_iova_to_pa(struct iommu_domain *domain,
1251 dma_addr_t iova)
1252 {
1253 unsigned long pa, offset, tmpva, page_size, page_mask;
1254 dom_info *priv = to_dom_info(domain);
1255 unsigned long *psentry, *pte;
1256 int ret = 0;
1257 spt_entry *entry;
1258 loongson_iommu *iommu;
1259 iommu_info *iommu_entry = NULL;
1260
1261 /* 0x10000000~0x8fffffff */
1262 if ((iova >= IOVA_START) && (iova < IOVA_END0)) {
1263 tmpva = iova & LA_VIRTIO_PAGE_MASK;
1264 pte = (unsigned long *)priv->mmio_pgd;
1265 offset = iova & ((1ULL << LA_VIRTIO_PAGE_SHIFT) - 1);
1266 pa = pte[(tmpva - IOVA_START) >> 14] + offset;
1267
1268 return pa;
1269 }
1270
1271 iommu_entry = get_first_iommu_entry(priv);
1272 if (iommu_entry == NULL) {
1273 pr_err("%s iova:0x%llx iommu_entry is invalid\n",
1274 __func__, iova);
1275 ret = -EFAULT;
1276 return ret;
1277 }
1278
1279 iommu = iommu_entry->iommu;
1280
1281 mutex_lock(&iommu->loongson_iommu_pgtlock);
1282 entry = iommu_entry->shadow_pgd;
1283 psentry = iommu_get_spte(entry, iova, IOMMU_PT_LEVEL0);
1284 mutex_unlock(&iommu->loongson_iommu_pgtlock);
1285
1286 if (!psentry || !iommu_pt_present(psentry)) {
1287 ret = -EFAULT;
1288 pr_warn_once("Loongson-IOMMU: shadow pte is null or not present with iova %llx \n", iova);
1289 return ret;
1290 }
1291
1292 if (iommu_pt_huge(psentry)) {
1293 page_size = IOMMU_HPAGE_SIZE;
1294 page_mask = IOMMU_HPAGE_MASK;
1295 } else {
1296 page_size = IOMMU_PAGE_SIZE;
1297 page_mask = IOMMU_PAGE_MASK;
1298 }
1299
1300 pa = *psentry & page_mask;
1301 pa |= (iova & (page_size - 1));
1302
1303 return (phys_addr_t)pa;
1304 }
1305
loongson_iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)1306 static phys_addr_t loongson_iommu_iova_to_phys(struct iommu_domain *domain,
1307 dma_addr_t iova)
1308 {
1309 phys_addr_t pa;
1310
1311 pa = loongson_iommu_iova_to_pa(domain, iova);
1312
1313 return pa;
1314 }
1315
loongson_iommu_flush_iotlb_all(struct iommu_domain *domain)1316 static void loongson_iommu_flush_iotlb_all(struct iommu_domain *domain)
1317 {
1318 int ret;
1319 dom_info *priv = to_dom_info(domain);
1320 iommu_info *iommu_entry;
1321 loongson_iommu *iommu;
1322
1323 spin_lock(&priv->lock);
1324 list_for_each_entry(iommu_entry, &priv->iommu_devlist, list) {
1325 iommu = iommu_entry->iommu;
1326
1327 ret = __iommu_flush_iotlb_all(iommu);
1328 }
1329 spin_unlock(&priv->lock);
1330 }
1331
loongson_iommu_iotlb_sync(struct iommu_domain *domain, struct iommu_iotlb_gather *gather)1332 static void loongson_iommu_iotlb_sync(struct iommu_domain *domain,
1333 struct iommu_iotlb_gather *gather)
1334 {
1335 loongson_iommu_flush_iotlb_all(domain);
1336 }
1337
1338 static struct iommu_ops loongson_iommu_ops = {
1339 .capable = loongson_iommu_capable,
1340 .domain_alloc = loongson_iommu_domain_alloc,
1341 .domain_free = loongson_iommu_domain_free,
1342 .attach_dev = loongson_iommu_attach_dev,
1343 .detach_dev = loongson_iommu_detach_dev,
1344 .map = loongson_iommu_map,
1345 .unmap = loongson_iommu_unmap,
1346 .iova_to_phys = loongson_iommu_iova_to_phys,
1347 .probe_device = loongson_iommu_probe_device,
1348 .release_device = loongson_iommu_release_device,
1349 .device_group = loongson_iommu_device_group,
1350 .pgsize_bitmap = LA_IOMMU_PGSIZE,
1351 .flush_iotlb_all = loongson_iommu_flush_iotlb_all,
1352 .iotlb_sync = loongson_iommu_iotlb_sync,
1353 };
1354
loongarch_get_iommu(struct pci_dev *pdev)1355 loongson_iommu *loongarch_get_iommu(struct pci_dev *pdev)
1356 {
1357 int pcisegment;
1358 unsigned short devid;
1359 loongson_iommu *iommu = NULL;
1360
1361 devid = pdev->devfn & 0xff;
1362 pcisegment = pci_domain_nr(pdev->bus);
1363
1364 list_for_each_entry(iommu, &loongson_iommu_list, list) {
1365 if ((iommu->segment == pcisegment) &&
1366 (iommu->devid == devid)) {
1367 return iommu;
1368 }
1369 }
1370
1371 return NULL;
1372 }
1373
loongson_iommu_probe(struct pci_dev *pdev, const struct pci_device_id *ent)1374 static int loongson_iommu_probe(struct pci_dev *pdev,
1375 const struct pci_device_id *ent)
1376 {
1377 int ret = 1;
1378 int bitmap_sz = 0;
1379 int tmp;
1380 struct loongson_iommu *iommu = NULL;
1381 resource_size_t base, size;
1382
1383 iommu = loongarch_get_iommu(pdev);
1384 if (iommu == NULL) {
1385 pci_info(pdev, "%s can't find iommu\n", __func__);
1386 return -ENODEV;
1387 }
1388
1389 base = pci_resource_start(pdev, 0);
1390 size = pci_resource_len(pdev, 0);
1391 if (!request_mem_region(base, size, "loongson_iommu")) {
1392 pci_err(pdev, "can't reserve mmio registers\n");
1393 return -ENOMEM;
1394 }
1395
1396 iommu->membase = ioremap(base, size);
1397 if (iommu->membase == NULL) {
1398 pci_info(pdev, "%s iommu pci dev bar0 is NULL\n", __func__);
1399 return ret;
1400 }
1401
1402 base = pci_resource_start(pdev, 2);
1403 size = pci_resource_len(pdev, 2);
1404 if (!request_mem_region(base, size, "loongson_iommu")) {
1405 pci_err(pdev, "can't reserve mmio registers\n");
1406 return -ENOMEM;
1407 }
1408 iommu->pgtbase = ioremap(base, size);
1409 if (iommu->pgtbase == NULL)
1410 return -ENOMEM;
1411
1412 iommu->maxpages = size / IOMMU_PAGE_SIZE;
1413 pr_info("iommu membase %p pgtbase %p pgtsize %llx maxpages %lx\n", iommu->membase, iommu->pgtbase, size, iommu->maxpages);
1414 tmp = MAX_DOMAIN_ID / 8;
1415 bitmap_sz = (MAX_DOMAIN_ID % 8) ? (tmp + 1) : tmp;
1416 iommu->domain_bitmap = bitmap_zalloc(bitmap_sz, GFP_KERNEL);
1417 if (iommu->domain_bitmap == NULL) {
1418 pr_err("Loongson-IOMMU: domain bitmap alloc err bitmap_sz:%d\n", bitmap_sz);
1419 goto out_err;
1420 }
1421
1422 tmp = MAX_ATTACHED_DEV_ID / 8;
1423 bitmap_sz = (MAX_ATTACHED_DEV_ID % 8) ? (tmp + 1) : tmp;
1424 iommu->devtable_bitmap = bitmap_zalloc(bitmap_sz, GFP_KERNEL);
1425 if (iommu->devtable_bitmap == NULL) {
1426 pr_err("Loongson-IOMMU: devtable bitmap alloc err bitmap_sz:%d\n", bitmap_sz);
1427 goto out_err_1;
1428 }
1429
1430 tmp = iommu->maxpages / 8;
1431 bitmap_sz = (iommu->maxpages % 8) ? (tmp + 1) : tmp;
1432 iommu->pgtable_bitmap = bitmap_zalloc(bitmap_sz, GFP_KERNEL);
1433 if (iommu->pgtable_bitmap == NULL) {
1434 pr_err("Loongson-IOMMU: pgtable bitmap alloc err bitmap_sz:%d\n", bitmap_sz);
1435 goto out_err_2;
1436 }
1437
1438 bus_set_iommu(&pci_bus_type, &loongson_iommu_ops);
1439
1440 return 0;
1441
1442 out_err_2:
1443 kfree(iommu->devtable_bitmap);
1444 iommu->devtable_bitmap = NULL;
1445 out_err_1:
1446 kfree(iommu->domain_bitmap);
1447 iommu->domain_bitmap = NULL;
1448 out_err:
1449
1450 return ret;
1451 }
1452
loongson_iommu_remove(struct pci_dev *pdev)1453 static void loongson_iommu_remove(struct pci_dev *pdev)
1454 {
1455 struct loongson_iommu *iommu = NULL;
1456
1457 iommu = loongarch_get_iommu(pdev);
1458 if (iommu == NULL)
1459 return;
1460
1461 if (iommu->domain_bitmap != NULL) {
1462 kfree(iommu->domain_bitmap);
1463 iommu->domain_bitmap = NULL;
1464 }
1465
1466 if (iommu->devtable_bitmap != NULL) {
1467 kfree(iommu->devtable_bitmap);
1468 iommu->devtable_bitmap = NULL;
1469 }
1470
1471 if (iommu->pgtable_bitmap != NULL) {
1472 kfree(iommu->pgtable_bitmap);
1473 iommu->pgtable_bitmap = NULL;
1474 }
1475
1476 iommu->membase = NULL;
1477 iommu->pgtbase = NULL;
1478 }
1479
check_ivrs_checksum(struct acpi_table_header *table)1480 static int __init check_ivrs_checksum(struct acpi_table_header *table)
1481 {
1482 int i;
1483 u8 checksum = 0, *p = (u8 *)table;
1484
1485 for (i = 0; i < table->length; ++i)
1486 checksum += p[i];
1487 if (checksum != 0) {
1488 /* ACPI table corrupt */
1489 pr_err("IVRS invalid checksum\n");
1490 return -ENODEV;
1491 }
1492
1493 return 0;
1494 }
1495
create_rlookup_entry(int pcisegment)1496 struct loongson_iommu_rlookup_entry *create_rlookup_entry(int pcisegment)
1497 {
1498 struct loongson_iommu_rlookup_entry *rlookupentry = NULL;
1499
1500 rlookupentry = kzalloc(sizeof(struct loongson_iommu_rlookup_entry), GFP_KERNEL);
1501 if (rlookupentry == NULL)
1502 return rlookupentry;
1503
1504 rlookupentry->pcisegment = pcisegment;
1505
1506 /* IOMMU rlookup table - find the IOMMU for a specific device */
1507 rlookupentry->loongson_iommu_rlookup_table = (void *)__get_free_pages(
1508 GFP_KERNEL | __GFP_ZERO, get_order(rlookup_table_size));
1509 if (rlookupentry->loongson_iommu_rlookup_table == NULL) {
1510 kfree(rlookupentry);
1511 rlookupentry = NULL;
1512 } else {
1513 list_add(&rlookupentry->list, &loongson_rlookup_iommu_list);
1514 }
1515
1516 return rlookupentry;
1517 }
1518
1519 /* Writes the specific IOMMU for a device into the rlookup table */
set_iommu_for_device(loongson_iommu *iommu, u16 devid)1520 static void __init set_iommu_for_device(loongson_iommu *iommu,
1521 u16 devid)
1522 {
1523 struct loongson_iommu_rlookup_entry *rlookupentry = NULL;
1524
1525 rlookupentry = lookup_rlooptable(iommu->segment);
1526 if (rlookupentry == NULL)
1527 rlookupentry = create_rlookup_entry(iommu->segment);
1528
1529 if (rlookupentry != NULL)
1530 rlookupentry->loongson_iommu_rlookup_table[devid] = iommu;
1531 }
1532
get_ivhd_header_size(struct ivhd_header *h)1533 static inline u32 get_ivhd_header_size(struct ivhd_header *h)
1534 {
1535 u32 size = 0;
1536
1537 switch (h->type) {
1538 case IVHD_HEAD_TYPE10:
1539 size = 24;
1540 break;
1541 case IVHD_HEAD_TYPE11:
1542 case IVHD_HEAD_TYPE40:
1543 size = 40;
1544 break;
1545 }
1546 return size;
1547 }
1548
update_last_devid(u16 devid)1549 static inline void update_last_devid(u16 devid)
1550 {
1551 if (devid > loongson_iommu_last_bdf)
1552 loongson_iommu_last_bdf = devid;
1553 }
1554
1555 /*
1556 * This function calculates the length of a given IVHD entry
1557 */
ivhd_entry_length(u8 *ivhd)1558 static inline int ivhd_entry_length(u8 *ivhd)
1559 {
1560 u32 type = ((struct ivhd_entry *)ivhd)->type;
1561
1562 if (type < 0x80) {
1563 return 0x04 << (*ivhd >> 6);
1564 } else if (type == IVHD_DEV_ACPI_HID) {
1565 /* For ACPI_HID, offset 21 is uid len */
1566 return *((u8 *)ivhd + 21) + 22;
1567 }
1568 return 0;
1569 }
1570
1571 /*
1572 * After reading the highest device id from the IOMMU PCI capability header
1573 * this function looks if there is a higher device id defined in the ACPI table
1574 */
find_last_devid_from_ivhd(struct ivhd_header *h)1575 static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
1576 {
1577 u8 *p = (void *)h, *end = (void *)h;
1578 struct ivhd_entry *dev;
1579
1580 u32 ivhd_size = get_ivhd_header_size(h);
1581
1582 if (!ivhd_size) {
1583 pr_err("loongson-iommu: Unsupported IVHD type %#x\n", h->type);
1584 return -EINVAL;
1585 }
1586
1587 p += ivhd_size;
1588 end += h->length;
1589
1590 while (p < end) {
1591 dev = (struct ivhd_entry *)p;
1592 switch (dev->type) {
1593 case IVHD_DEV_ALL:
1594 /* Use maximum BDF value for DEV_ALL */
1595 update_last_devid(MAX_BDF_NUM);
1596 break;
1597 case IVHD_DEV_SELECT:
1598 case IVHD_DEV_RANGE_END:
1599 case IVHD_DEV_ALIAS:
1600 case IVHD_DEV_EXT_SELECT:
1601 /* all the above subfield types refer to device ids */
1602 update_last_devid(dev->devid);
1603 break;
1604 default:
1605 break;
1606 }
1607 p += ivhd_entry_length(p);
1608 }
1609
1610 WARN_ON(p != end);
1611
1612 return 0;
1613 }
1614
1615 /*
1616 * Iterate over all IVHD entries in the ACPI table and find the highest device
1617 * id which we need to handle. This is the first of three functions which parse
1618 * the ACPI table. So we check the checksum here.
1619 */
find_last_devid_acpi(struct acpi_table_header *table)1620 static int __init find_last_devid_acpi(struct acpi_table_header *table)
1621 {
1622 u8 *p = (u8 *)table, *end = (u8 *)table;
1623 struct ivhd_header *h;
1624
1625 p += IVRS_HEADER_LENGTH;
1626
1627 end += table->length;
1628 while (p < end) {
1629 h = (struct ivhd_header *)p;
1630 if (h->type == loongson_iommu_target_ivhd_type) {
1631 int ret = find_last_devid_from_ivhd(h);
1632
1633 if (ret)
1634 return ret;
1635 }
1636
1637 if (h->length == 0)
1638 break;
1639
1640 p += h->length;
1641 }
1642
1643 if (p != end)
1644 return -EINVAL;
1645
1646
1647 return 0;
1648 }
1649
1650 /*
1651 * Takes a pointer to an loongarch IOMMU entry in the ACPI table and
1652 * initializes the hardware and our data structures with it.
1653 */
init_iommu_from_acpi(loongson_iommu *iommu, struct ivhd_header *h)1654 static int __init init_iommu_from_acpi(loongson_iommu *iommu,
1655 struct ivhd_header *h)
1656 {
1657 u8 *p = (u8 *)h;
1658 u8 *end = p;
1659 u16 devid = 0, devid_start = 0;
1660 u32 dev_i, ivhd_size;
1661 struct ivhd_entry *e;
1662
1663 /*
1664 * Done. Now parse the device entries
1665 */
1666 ivhd_size = get_ivhd_header_size(h);
1667 if (!ivhd_size) {
1668 pr_err("loongarch iommu: Unsupported IVHD type %#x\n", h->type);
1669 return -EINVAL;
1670 }
1671
1672 if (h->length == 0)
1673 return -EINVAL;
1674
1675 p += ivhd_size;
1676 end += h->length;
1677
1678 while (p < end) {
1679 e = (struct ivhd_entry *)p;
1680 switch (e->type) {
1681 case IVHD_DEV_ALL:
1682 for (dev_i = 0; dev_i <= loongson_iommu_last_bdf; ++dev_i)
1683 set_iommu_for_device(iommu, dev_i);
1684 break;
1685
1686 case IVHD_DEV_SELECT:
1687 pr_info(" DEV_SELECT\t\t\t devid: %02x:%02x.%x\n",
1688 PCI_BUS_NUM(e->devid), PCI_SLOT(e->devid), PCI_FUNC(e->devid));
1689
1690 devid = e->devid;
1691 set_iommu_for_device(iommu, devid);
1692 break;
1693
1694 case IVHD_DEV_SELECT_RANGE_START:
1695 pr_info(" DEV_SELECT_RANGE_START\t devid: %02x:%02x.%x\n",
1696 PCI_BUS_NUM(e->devid), PCI_SLOT(e->devid), PCI_FUNC(e->devid));
1697
1698 devid_start = e->devid;
1699 break;
1700
1701 case IVHD_DEV_RANGE_END:
1702 pr_info(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
1703 PCI_BUS_NUM(e->devid), PCI_SLOT(e->devid), PCI_FUNC(e->devid));
1704
1705 devid = e->devid;
1706 for (dev_i = devid_start; dev_i <= devid; ++dev_i)
1707 set_iommu_for_device(iommu, dev_i);
1708 break;
1709
1710 default:
1711 break;
1712 }
1713
1714 p += ivhd_entry_length(p);
1715 }
1716
1717 return 0;
1718 }
1719
1720 /*
1721 * This function clues the initialization function for one IOMMU
1722 * together and also allocates the command buffer and programs the
1723 * hardware. It does NOT enable the IOMMU. This is done afterwards.
1724 */
init_iommu_one(loongson_iommu *iommu, struct ivhd_header *h)1725 static int __init init_iommu_one(loongson_iommu *iommu, struct ivhd_header *h)
1726 {
1727 int ret;
1728 struct loongson_iommu_rlookup_entry *rlookupentry = NULL;
1729
1730 spin_lock_init(&iommu->domain_bitmap_lock);
1731 spin_lock_init(&iommu->dom_info_lock);
1732 spin_lock_init(&iommu->pgtable_bitmap_lock);
1733 mutex_init(&iommu->loongson_iommu_pgtlock);
1734
1735 /* Add IOMMU to internal data structures */
1736 INIT_LIST_HEAD(&iommu->dom_list);
1737
1738 list_add_tail(&iommu->list, &loongson_iommu_list);
1739
1740 /*
1741 * Copy data from ACPI table entry to the iommu struct
1742 */
1743 iommu->devid = h->devid;
1744 iommu->segment = h->pci_seg;
1745
1746 ret = init_iommu_from_acpi(iommu, h);
1747 if (ret) {
1748 pr_err("%s init iommu from acpi failed\n", __func__);
1749 return ret;
1750 }
1751
1752 rlookupentry = lookup_rlooptable(iommu->segment);
1753 if (rlookupentry != NULL) {
1754 /*
1755 * Make sure IOMMU is not considered to translate itself.
1756 * The IVRS table tells us so, but this is a lie!
1757 */
1758 rlookupentry->loongson_iommu_rlookup_table[iommu->devid] = NULL;
1759 }
1760
1761 return 0;
1762 }
1763
1764 /*
1765 * Iterates over all IOMMU entries in the ACPI table, allocates the
1766 * IOMMU structure and initializes it with init_iommu_one()
1767 */
init_iommu_all(struct acpi_table_header *table)1768 static int __init init_iommu_all(struct acpi_table_header *table)
1769 {
1770 int ret;
1771 u8 *p = (u8 *)table, *end = (u8 *)table;
1772 struct ivhd_header *h;
1773 loongson_iommu *iommu;
1774
1775 end += table->length;
1776 p += IVRS_HEADER_LENGTH;
1777
1778 while (p < end) {
1779 h = (struct ivhd_header *)p;
1780
1781 if (h->length == 0)
1782 break;
1783
1784 if (*p == loongson_iommu_target_ivhd_type) {
1785 pr_info("device: %02x:%02x.%01x seg: %d\n",
1786 PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid), PCI_FUNC(h->devid), h->pci_seg);
1787
1788 iommu = kzalloc(sizeof(loongson_iommu), GFP_KERNEL);
1789 if (iommu == NULL) {
1790 pr_info("%s alloc memory for iommu failed\n", __func__);
1791 return -ENOMEM;
1792 }
1793
1794 ret = init_iommu_one(iommu, h);
1795 if (ret) {
1796 kfree(iommu);
1797 pr_info("%s init iommu failed\n", __func__);
1798 return ret;
1799 }
1800 }
1801 p += h->length;
1802 }
1803
1804 if (p != end)
1805 return -EINVAL;
1806
1807 return 0;
1808 }
1809
1810 /**
1811 * get_highest_supported_ivhd_type - Look up the appropriate IVHD type
1812 * @ivrs Pointer to the IVRS header
1813 *
1814 * This function search through all IVDB of the maximum supported IVHD
1815 */
get_highest_supported_ivhd_type(struct acpi_table_header *ivrs)1816 static u8 get_highest_supported_ivhd_type(struct acpi_table_header *ivrs)
1817 {
1818 u8 *base = (u8 *)ivrs;
1819 struct ivhd_header *ivhd = (struct ivhd_header *)(base + IVRS_HEADER_LENGTH);
1820 u8 last_type = ivhd->type;
1821 u16 devid = ivhd->devid;
1822
1823 while (((u8 *)ivhd - base < ivrs->length) &&
1824 (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED) &&
1825 (ivhd->length > 0)) {
1826 u8 *p = (u8 *) ivhd;
1827
1828 if (ivhd->devid == devid)
1829 last_type = ivhd->type;
1830 ivhd = (struct ivhd_header *)(p + ivhd->length);
1831 }
1832
1833 return last_type;
1834 }
1835
tbl_size(int entry_size)1836 static inline unsigned long tbl_size(int entry_size)
1837 {
1838 unsigned int shift = PAGE_SHIFT +
1839 get_order(((int)loongson_iommu_last_bdf + 1) * entry_size);
1840
1841 return 1UL << shift;
1842 }
1843
loongson_iommu_ivrs_init(void)1844 static int __init loongson_iommu_ivrs_init(void)
1845 {
1846 int ret = 0;
1847 acpi_status status;
1848 struct acpi_table_header *ivrs_base;
1849
1850 status = acpi_get_table("IVRS", 0, &ivrs_base);
1851 if (status == AE_NOT_FOUND) {
1852 pr_info("%s get ivrs table failed\n", __func__);
1853 return -ENODEV;
1854 }
1855
1856 /*
1857 * Validate checksum here so we don't need to do it when
1858 * we actually parse the table
1859 */
1860 ret = check_ivrs_checksum(ivrs_base);
1861 if (ret)
1862 goto out;
1863
1864 loongson_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
1865 pr_info("Using IVHD type %#x\n", loongson_iommu_target_ivhd_type);
1866
1867 /*
1868 * First parse ACPI tables to find the largest Bus/Dev/Func
1869 * we need to handle. Upon this information the shared data
1870 * structures for the IOMMUs in the system will be allocated
1871 */
1872 ret = find_last_devid_acpi(ivrs_base);
1873 if (ret) {
1874 pr_err("%s find last devid failed\n", __func__);
1875 goto out;
1876 }
1877
1878 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
1879
1880 /*
1881 * now the data structures are allocated and basically initialized
1882 * start the real acpi table scan
1883 */
1884 ret = init_iommu_all(ivrs_base);
1885
1886 out:
1887 /* Don't leak any ACPI memory */
1888 acpi_put_table(ivrs_base);
1889 ivrs_base = NULL;
1890
1891 return ret;
1892 }
1893
loongson_iommu_ivrs_init_stub(void)1894 static int __init loongson_iommu_ivrs_init_stub(void)
1895 {
1896 u32 dev_i;
1897 loongson_iommu *iommu;
1898 struct loongson_iommu_rlookup_entry *rlookupentry = NULL;
1899
1900 /* Use maximum BDF value for DEV_ALL */
1901 update_last_devid(MAX_BDF_NUM);
1902
1903 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
1904
1905 iommu = kzalloc(sizeof(loongson_iommu), GFP_KERNEL);
1906 if (iommu == NULL) {
1907 pr_info("%s alloc memory for iommu failed\n", __func__);
1908 return -ENOMEM;
1909 }
1910
1911 spin_lock_init(&iommu->domain_bitmap_lock);
1912 spin_lock_init(&iommu->dom_info_lock);
1913 spin_lock_init(&iommu->pgtable_bitmap_lock);
1914 mutex_init(&iommu->loongson_iommu_pgtlock);
1915
1916 /* Add IOMMU to internal data structures */
1917 INIT_LIST_HEAD(&iommu->dom_list);
1918
1919 list_add_tail(&iommu->list, &loongson_iommu_list);
1920
1921 /*
1922 * Copy data from ACPI table entry to the iommu struct
1923 */
1924 iommu->devid = 0xd0;
1925 iommu->segment = 0;
1926
1927 for (dev_i = 0; dev_i <= loongson_iommu_last_bdf; ++dev_i)
1928 set_iommu_for_device(iommu, dev_i);
1929
1930 rlookupentry = lookup_rlooptable(iommu->segment);
1931 if (rlookupentry != NULL) {
1932 /*
1933 * Make sure IOMMU is not considered to translate itself.
1934 * The IVRS table tells us so, but this is a lie!
1935 */
1936 rlookupentry->loongson_iommu_rlookup_table[iommu->devid] = NULL;
1937 }
1938
1939 return 0;
1940 }
1941
free_iommu_rlookup_entry(void)1942 static void free_iommu_rlookup_entry(void)
1943 {
1944 loongson_iommu *iommu = NULL;
1945 struct loongson_iommu_rlookup_entry *rlookupentry = NULL;
1946
1947 while (!list_empty(&loongson_iommu_list)) {
1948 iommu = list_first_entry(&loongson_iommu_list, loongson_iommu, list);
1949 list_del(&iommu->list);
1950 kfree(iommu);
1951 }
1952
1953 while (!list_empty(&loongson_rlookup_iommu_list)) {
1954 rlookupentry = list_first_entry(&loongson_rlookup_iommu_list,
1955 struct loongson_iommu_rlookup_entry, list);
1956
1957 list_del(&rlookupentry->list);
1958 if (rlookupentry->loongson_iommu_rlookup_table != NULL) {
1959 free_pages(
1960 (unsigned long)rlookupentry->loongson_iommu_rlookup_table,
1961 get_order(rlookup_table_size));
1962
1963 rlookupentry->loongson_iommu_rlookup_table = NULL;
1964 }
1965
1966 kfree(rlookupentry);
1967 }
1968 }
1969
loonson_iommu_setup(char *str)1970 static int __init loonson_iommu_setup(char *str)
1971 {
1972 if (!str)
1973 return -EINVAL;
1974
1975 while (*str) {
1976 if (!strncmp(str, "on", 2)) {
1977 loongson_iommu_disable = 0;
1978 pr_info("IOMMU enabled\n");
1979 } else if (!strncmp(str, "off", 3)) {
1980 loongson_iommu_disable = 1;
1981 pr_info("IOMMU disabled\n");
1982 }
1983 str += strcspn(str, ",");
1984 while (*str == ',')
1985 str++;
1986 }
1987 return 0;
1988 }
1989 __setup("loongson_iommu=", loonson_iommu_setup);
1990
1991 static const struct pci_device_id loongson_iommu_pci_tbl[] = {
1992 { PCI_DEVICE(0x14, 0x7a1f) },
1993 { 0, }
1994 };
1995
1996 static struct pci_driver loongson_iommu_driver = {
1997 .name = "loongson-iommu",
1998 .probe = loongson_iommu_probe,
1999 .remove = loongson_iommu_remove,
2000 .id_table = loongson_iommu_pci_tbl,
2001 };
2002
loongson_iommu_driver_init(void)2003 static int __init loongson_iommu_driver_init(void)
2004 {
2005 int ret = 0;
2006
2007 if (!loongson_iommu_disable) {
2008 ret = loongson_iommu_ivrs_init();
2009 if (ret < 0) {
2010 free_iommu_rlookup_entry();
2011 pr_err("Failed to init iommu by ivrs\n");
2012 ret = loongson_iommu_ivrs_init_stub();
2013 if (ret < 0) {
2014 free_iommu_rlookup_entry();
2015 pr_err("Failed to init iommu by stub\n");
2016 return ret;
2017 }
2018 }
2019
2020 ret = pci_register_driver(&loongson_iommu_driver);
2021 if (ret < 0) {
2022 pr_err("Failed to register IOMMU driver\n");
2023 return ret;
2024 }
2025 }
2026
2027 return ret;
2028 }
2029
loongson_iommu_driver_exit(void)2030 static void __exit loongson_iommu_driver_exit(void)
2031 {
2032 if (!loongson_iommu_disable) {
2033 free_iommu_rlookup_entry();
2034 pci_unregister_driver(&loongson_iommu_driver);
2035 }
2036 }
2037
2038 module_init(loongson_iommu_driver_init);
2039 module_exit(loongson_iommu_driver_exit);
2040