1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Loongson IOMMU Driver
4 *
5 * Copyright (C) 2020-2021 Loongson Technology Ltd.
6 * Author: Lv Chen <lvchen@loongson.cn>
7 * Wang Yang <wangyang@loongson.cn>
8 */
9
10 #ifndef LOONGSON_IOMMU_H
11 #define LOONGSON_IOMMU_H
12
13 #include <linux/device.h>
14 #include <linux/errno.h>
15 #include <linux/io.h>
16 #include <linux/iommu.h>
17 #include <linux/list.h>
18 #include <linux/sizes.h>
19 #include <linux/spinlock.h>
20 #include <asm/addrspace.h>
21
22 #define IOVA_WIDTH 47
23
24 /* Bit value definition for I/O PTE fields */
25 #define IOMMU_PTE_PR (1ULL << 0) /* Present */
26 #define IOMMU_PTE_HP (1ULL << 1) /* HugePage */
27 #define IOMMU_PTE_IR (1ULL << 2) /* Readable */
28 #define IOMMU_PTE_IW (1ULL << 3) /* Writeable */
29 #define IOMMU_PTE_RW (IOMMU_PTE_PR | IOMMU_PTE_IR | IOMMU_PTE_IW)
30
31 #define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_PR)
32 #define IOMMU_PTE_HUGEPAGE(pte) ((pte) & IOMMU_PTE_HP)
33
34 #define iommu_pt_present(shadow_entry) ((*shadow_entry != 0))
35
36 /*
37 * shadow_entry using kmalloc to request memory space,
38 * the memory address requested by kmalloc is ARCH_DMA_MINALIGN-aligned,
39 * when the shadow_entry address is not a ARCH_DMA_MINALIGN-aligned
40 * address, we think that shadow_entry store large pages
41 */
42 #define iommu_pt_huge(shd_entry_ptr) ((*shd_entry_ptr) & IOMMU_PTE_HP)
43
44 #define LA_IOMMU_PGSIZE (SZ_16K | SZ_32M)
45
46 #define IOMMU_PT_LEVEL0 0x00
47 #define IOMMU_PT_LEVEL1 0x01
48
49 /* IOMMU page table */
50 #define IOMMU_PAGE_SHIFT PAGE_SHIFT
51 #define IOMMU_PAGE_SIZE (_AC(1, UL) << IOMMU_PAGE_SHIFT)
52 #define IOMMU_LEVEL_STRIDE (IOMMU_PAGE_SHIFT - 3)
53 #define IOMMU_PTRS_PER_LEVEL (IOMMU_PAGE_SIZE >> 3)
54 #define IOMMU_LEVEL_SHIFT(n) (((n) * IOMMU_LEVEL_STRIDE) + IOMMU_PAGE_SHIFT)
55 #define IOMMU_LEVEL_SIZE(n) (_AC(1, UL) << (((n) * IOMMU_LEVEL_STRIDE) + IOMMU_PAGE_SHIFT))
56 #define IOMMU_LEVEL_MASK(n) (~(IOMMU_LEVEL_SIZE(n) - 1))
57 #define IOMMU_LEVEL_MAX DIV_ROUND_UP((IOVA_WIDTH - IOMMU_PAGE_SHIFT), IOMMU_LEVEL_STRIDE)
58 #define IOMMU_PAGE_MASK (~(IOMMU_PAGE_SIZE - 1))
59
60 #define IOMMU_HPAGE_SIZE (1UL << IOMMU_LEVEL_SHIFT(IOMMU_PT_LEVEL1))
61 #define IOMMU_HPAGE_MASK (~(IOMMU_HPAGE_SIZE - 1))
62
63 /* Virtio page use size of 16k */
64 #define LA_VIRTIO_PAGE_SHIFT 14
65 #define LA_VIRTIO_PAGE_SIZE (_AC(1, UL) << LA_VIRTIO_PAGE_SHIFT)
66 #define LA_VIRTIO_PAGE_MASK (~((1ULL << LA_VIRTIO_PAGE_SHIFT) - 1))
67
68 /* Bits of iommu map address space field */
69 #define LA_IOMMU_PFN_LO 0x0
70 #define PFN_LO_SHIFT 12
71 #define LA_IOMMU_PFN_HI 0x4
72 #define PFN_HI_MASK 0x3ffff
73 #define LA_IOMMU_VFN_LO 0x8
74 #define VFN_LO_SHIFT 12
75 #define LA_IOMMU_VFN_HI 0xC
76 #define VFN_HI_MASK 0x3ffff
77
78 /* wired | index | domain | shift */
79 #define LA_IOMMU_WIDS 0x10
80 /* valid | busy | tlbar/aw | cmd */
81 #define LA_IOMMU_VBTC 0x14
82 #define IOMMU_PGTABLE_BUSY (1 << 16)
83 /* enable |index | valid | domain | bdf */
84 #define LA_IOMMU_EIVDB 0x18
85 /* enable | valid | cmd */
86 #define LA_IOMMU_CMD 0x1C
87 #define LA_IOMMU_PGD0_LO 0x20
88 #define LA_IOMMU_PGD0_HI 0x24
89 #define STEP_PGD 0x8
90 #define STEP_PGD_SHIFT 3
91 #define LA_IOMMU_PGD_LO(domain_id) \
92 (LA_IOMMU_PGD0_LO + ((domain_id) << STEP_PGD_SHIFT))
93 #define LA_IOMMU_PGD_HI(domain_id) \
94 (LA_IOMMU_PGD0_HI + ((domain_id) << STEP_PGD_SHIFT))
95
96 #define LA_IOMMU_DIR_CTRL0 0xA0
97 #define LA_IOMMU_DIR_CTRL1 0xA4
98 #define LA_IOMMU_DIR_CTRL(x) (LA_IOMMU_DIR_CTRL0 + ((x) << 2))
99
100 #define LA_IOMMU_SAFE_BASE_HI 0xE0
101 #define LA_IOMMU_SAFE_BASE_LO 0xE4
102 #define LA_IOMMU_EX_ADDR_LO 0xE8
103 #define LA_IOMMU_EX_ADDR_HI 0xEC
104
105 #define LA_IOMMU_PFM_CNT_EN 0x100
106
107 #define LA_IOMMU_RD_HIT_CNT_0 0x110
108 #define LA_IOMMU_RD_MISS_CNT_O 0x114
109 #define LA_IOMMU_WR_HIT_CNT_0 0x118
110 #define LA_IOMMU_WR_MISS_CNT_0 0x11C
111 #define LA_IOMMU_RD_HIT_CNT_1 0x120
112 #define LA_IOMMU_RD_MISS_CNT_1 0x124
113 #define LA_IOMMU_WR_HIT_CNT_1 0x128
114 #define LA_IOMMU_WR_MISS_CNT_1 0x12C
115 #define LA_IOMMU_RD_HIT_CNT_2 0x130
116 #define LA_IOMMU_RD_MISS_CNT_2 0x134
117 #define LA_IOMMU_WR_HIT_CNT_2 0x138
118 #define LA_IOMMU_WR_MISS_CNT_2 0x13C
119
120 #define MAX_DOMAIN_ID 16
121 #define MAX_ATTACHED_DEV_ID 16
122 #define MAX_PAGES_NUM (SZ_128M / IOMMU_PAGE_SIZE)
123
124 #define iommu_ptable_end(addr, end, level) \
125 ({ unsigned long __boundary = ((addr) + IOMMU_LEVEL_SIZE(level)) & IOMMU_LEVEL_MASK(level); \
126 (__boundary - 1 < (end) - 1) ? __boundary : (end); \
127 })
128
129 /* To find an entry in an iommu page table directory */
130 #define iommu_shadow_index(addr, level) \
131 (((addr) >> ((level * IOMMU_LEVEL_STRIDE) + IOMMU_PAGE_SHIFT)) & (IOMMU_PTRS_PER_LEVEL - 1))
132
133 /* IOMMU iommu_table entry */
134 typedef struct { unsigned long iommu_pte; } iommu_pte;
135
136 typedef struct loongson_iommu {
137 struct list_head list;
138 spinlock_t domain_bitmap_lock; /* Lock for domain allocing */
139 spinlock_t dom_info_lock; /* Lock for priv->list */
140 spinlock_t pgtable_bitmap_lock; /* Lock for bitmap of page table */
141 struct mutex loongson_iommu_pgtlock; /* Lock for iommu page table */
142 void *domain_bitmap; /* Bitmap of global domains */
143 void *devtable_bitmap; /* Bitmap of devtable */
144 void *pgtable_bitmap; /* Bitmap of devtable pages for page table */
145 struct list_head dom_list; /* List of all domain privates */
146 u16 devid; /* PCI device id of the IOMMU device */
147 int segment; /* PCI segment# */
148 void *membase;
149 void *pgtbase;
150 unsigned long maxpages;
151 } loongson_iommu;
152
153 struct loongson_iommu_rlookup_entry
154 {
155 struct list_head list;
156 struct loongson_iommu **loongson_iommu_rlookup_table;
157 int pcisegment;
158 };
159
160 /* shadow page table entry */
161 typedef struct spt_entry {
162 unsigned long *gmem_ptable; /* gmemory entry address base*/
163 unsigned long *shadow_ptable; /* virtual address base for shadow page */
164 int index; /* index 128M gmem */
165 int dirty;
166 int present;
167 } spt_entry;
168
169 typedef struct iommu_info {
170 struct list_head list; /* for loongson_iommu_pri->iommu_devlist */
171 spt_entry *shadow_pgd;
172 struct loongson_iommu *iommu;
173 spinlock_t devlock; /* priv dev list lock */
174 struct list_head dev_list; /* List of all devices in this domain iommu */
175 unsigned int dev_cnt; /* devices assigned to this domain iommu */
176 short id;
177 } iommu_info;
178
179 /* One vm is equal to a domain, one domain has a priv */
180 typedef struct dom_info {
181 struct list_head list; /* For list of all domains */
182 struct list_head iommu_devlist;
183 struct iommu_domain domain;
184 void *mmio_pgd; /* 0x10000000~0x8fffffff */
185 spinlock_t lock; /* Lock for priv->iommu_devlist */
186 } dom_info;
187
188 /* A device for passthrough */
189 struct loongson_iommu_dev_data {
190 struct list_head list; /* for iommu_entry->dev_list */
191 struct loongson_iommu *iommu;
192 iommu_info *iommu_entry;
193 unsigned short bdf;
194 int count;
195 int index; /* index in device table */
196 };
197
iommu_pgt_v2p(loongson_iommu *iommu, void *va)198 static inline unsigned long iommu_pgt_v2p(loongson_iommu *iommu, void *va)
199 {
200 return (unsigned long)(va - iommu->pgtbase);
201 }
202
iommu_ptable_offset(unsigned long *table_entry, unsigned long addr, int level)203 static inline unsigned long *iommu_ptable_offset(unsigned long *table_entry,
204 unsigned long addr, int level)
205 {
206 return table_entry + iommu_shadow_index(addr, level);
207 }
208
iommu_shadow_offset(spt_entry *shadow_entry, unsigned long addr, int level)209 static inline unsigned long *iommu_shadow_offset(spt_entry *shadow_entry,
210 unsigned long addr, int level)
211 {
212 unsigned long *table_base;
213
214 table_base = shadow_entry->shadow_ptable;
215
216 return table_base + iommu_shadow_index(addr, level);
217 }
218
219 #endif /* LOONGSON_IOMMU_H */
220