xref: /kernel/linux/linux-5.10/arch/loongarch/mm/tlb.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2020 Loongson Technology Corporation Limited
4 */
5#include <linux/init.h>
6#include <linux/sched.h>
7#include <linux/smp.h>
8#include <linux/mm.h>
9#include <linux/hugetlb.h>
10#include <linux/export.h>
11
12#include <asm/cpu.h>
13#include <asm/bootinfo.h>
14#include <asm/mmu_context.h>
15#include <asm/pgtable.h>
16#include <asm/tlb.h>
17
18extern void *exception_table[];
19
20void local_flush_tlb_all(void)
21{
22	invtlb_all(INVTLB_CURRENT_ALL, 0, 0);
23}
24EXPORT_SYMBOL(local_flush_tlb_all);
25
26void local_flush_tlb_user(void)
27{
28	invtlb_all(INVTLB_CURRENT_GFALSE, 0, 0);
29}
30EXPORT_SYMBOL(local_flush_tlb_user);
31
32void local_flush_tlb_kernel(void)
33{
34	invtlb_all(INVTLB_CURRENT_GTRUE, 0, 0);
35}
36EXPORT_SYMBOL(local_flush_tlb_kernel);
37
38/* All entries common to a mm share an asid.  To effectively flush
39   these entries, we just bump the asid. */
40void local_flush_tlb_mm(struct mm_struct *mm)
41{
42	int cpu;
43
44	preempt_disable();
45
46	cpu = smp_processor_id();
47
48	if (asid_valid(mm, cpu)) {
49		drop_mmu_context(mm, cpu);
50	} else {
51		cpumask_clear_cpu(cpu, mm_cpumask(mm));
52	}
53
54	preempt_enable();
55}
56
57void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
58	unsigned long end)
59{
60	struct mm_struct *mm = vma->vm_mm;
61	int cpu = smp_processor_id();
62
63	if (asid_valid(mm, cpu)) {
64		unsigned long size, flags;
65
66		local_irq_save(flags);
67		start = round_down(start, PAGE_SIZE << 1);
68		end = round_up(end, PAGE_SIZE << 1);
69		size = (end - start) >> (PAGE_SHIFT + 1);
70		if (size <= (current_cpu_data.tlbsizestlbsets ?
71			     current_cpu_data.tlbsize / 8 :
72			     current_cpu_data.tlbsize / 2)) {
73
74			int asid = cpu_asid(cpu, mm);
75			while (start < end) {
76				invtlb(INVTLB_ADDR_GFALSE_AND_ASID, asid, start);
77				start += (PAGE_SIZE << 1);
78			}
79		} else {
80			drop_mmu_context(mm, cpu);
81		}
82		local_irq_restore(flags);
83	} else {
84		cpumask_clear_cpu(cpu, mm_cpumask(mm));
85	}
86}
87
88void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
89{
90	unsigned long size, flags;
91
92	local_irq_save(flags);
93	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
94	size = (size + 1) >> 1;
95	if (size <= (current_cpu_data.tlbsizestlbsets ?
96		     current_cpu_data.tlbsize / 8 :
97		     current_cpu_data.tlbsize / 2)) {
98
99		start &= (PAGE_MASK << 1);
100		end += ((PAGE_SIZE << 1) - 1);
101		end &= (PAGE_MASK << 1);
102
103		while (start < end) {
104			invtlb_addr(INVTLB_ADDR_GTRUE_OR_ASID, 0, start);
105			start += (PAGE_SIZE << 1);
106		}
107	} else {
108		local_flush_tlb_kernel();
109	}
110	local_irq_restore(flags);
111}
112
113void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
114{
115	int cpu = smp_processor_id();
116
117	if (asid_valid(vma->vm_mm, cpu)) {
118		int newpid;
119
120		newpid = cpu_asid(cpu, vma->vm_mm);
121		page &= (PAGE_MASK << 1);
122		invtlb(INVTLB_ADDR_GFALSE_AND_ASID, newpid, page);
123	} else {
124		cpumask_clear_cpu(cpu, mm_cpumask(vma->vm_mm));
125	}
126}
127
128/*
129 * This one is only used for pages with the global bit set so we don't care
130 * much about the ASID.
131 */
132void local_flush_tlb_one(unsigned long page)
133{
134	page &= (PAGE_MASK << 1);
135	invtlb_addr(INVTLB_ADDR_GTRUE_OR_ASID, 0, page);
136}
137
138static void __update_hugetlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
139{
140#ifdef CONFIG_HUGETLB_PAGE
141	int idx;
142	unsigned long lo;
143	unsigned long flags;
144
145	local_irq_save(flags);
146
147	address &= (PAGE_MASK << 1);
148	write_csr_entryhi(address);
149	tlb_probe();
150	idx = read_csr_tlbidx();
151	write_csr_pagesize(PS_HUGE_SIZE);
152	lo = pmd_to_entrylo(pte_val(*ptep));
153	write_csr_entrylo0(lo);
154	write_csr_entrylo1(lo + (HPAGE_SIZE >> 1));
155
156	if (idx < 0)
157		tlb_write_random();
158	else
159		tlb_write_indexed();
160	write_csr_pagesize(PS_DEFAULT_SIZE);
161
162	local_irq_restore(flags);
163#endif
164}
165
166void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
167{
168	int idx;
169	unsigned long flags;
170
171	if (cpu_has_ptw)
172		return;
173
174	/*
175	 * Handle debugger faulting in for debugee.
176	 */
177	if (current->active_mm != vma->vm_mm)
178		return;
179
180	if (pte_val(*ptep) & _PAGE_HUGE) {
181		__update_hugetlb(vma, address, ptep);
182		return;
183	}
184
185	local_irq_save(flags);
186
187	if ((unsigned long)ptep & sizeof(pte_t))
188		ptep--;
189
190	address &= (PAGE_MASK << 1);
191	write_csr_entryhi(address);
192	tlb_probe();
193	idx = read_csr_tlbidx();
194	write_csr_pagesize(PS_DEFAULT_SIZE);
195	write_csr_entrylo0(pte_val(*ptep++));
196	write_csr_entrylo1(pte_val(*ptep));
197	if (idx < 0)
198		tlb_write_random();
199	else
200		tlb_write_indexed();
201
202	local_irq_restore(flags);
203}
204
205static void setup_ptwalker(void)
206{
207	unsigned long pwctl0, pwctl1;
208	unsigned long pgd_i = 0, pgd_w = 0;
209	unsigned long pud_i = 0, pud_w = 0;
210	unsigned long pmd_i = 0, pmd_w = 0;
211	unsigned long pte_i = 0, pte_w = 0;
212
213	pgd_i = PGDIR_SHIFT;
214	pgd_w = PAGE_SHIFT - 3;
215#if CONFIG_PGTABLE_LEVELS > 3
216	pud_i = PUD_SHIFT;
217	pud_w = PAGE_SHIFT - 3;
218#endif
219#if CONFIG_PGTABLE_LEVELS > 2
220	pmd_i = PMD_SHIFT;
221	pmd_w = PAGE_SHIFT - 3;
222#endif
223	pte_i = PAGE_SHIFT;
224	pte_w = PAGE_SHIFT - 3;
225
226	pwctl0 = pte_i | pte_w << 5 | pmd_i << 10 | pmd_w << 15 | pud_i << 20 | pud_w << 25;
227	pwctl1 = pgd_i | pgd_w << 6;
228
229	if (cpu_has_ptw)
230		pwctl1 |= CSR_PWCTL1_PTW;
231
232	csr_write64(pwctl0, LOONGARCH_CSR_PWCTL0);
233	csr_write64(pwctl1, LOONGARCH_CSR_PWCTL1);
234	csr_write64((long)swapper_pg_dir, LOONGARCH_CSR_PGDH);
235	csr_write64((long)invalid_pg_dir, LOONGARCH_CSR_PGDL);
236	csr_write64((long)smp_processor_id(), LOONGARCH_CSR_TMID);
237}
238
239static void output_pgtable_bits_defines(void)
240{
241#define pr_define(fmt, ...)					\
242	pr_debug("#define " fmt, ##__VA_ARGS__)
243
244	pr_debug("#include <asm/asm.h>\n");
245	pr_debug("#include <asm/regdef.h>\n");
246	pr_debug("\n");
247
248	pr_define("_PAGE_VALID_SHIFT %d\n", _PAGE_VALID_SHIFT);
249	pr_define("_PAGE_DIRTY_SHIFT %d\n", _PAGE_DIRTY_SHIFT);
250	pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT);
251	pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT);
252	pr_define("_PAGE_PRESENT_SHIFT %d\n", _PAGE_PRESENT_SHIFT);
253	pr_define("_PAGE_WRITE_SHIFT %d\n", _PAGE_WRITE_SHIFT);
254	pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT);
255	pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT);
256	pr_define("_PFN_SHIFT %d\n", _PFN_SHIFT);
257	pr_debug("\n");
258}
259
260#ifdef CONFIG_NUMA
261static unsigned long pcpu_handlers[NR_CPUS];
262#endif
263extern long exception_handlers[VECSIZE * 128 / sizeof(long)];
264
265static void setup_tlb_handler(int cpu)
266{
267	int i;
268
269	setup_ptwalker();
270	local_flush_tlb_all();
271
272	if (cpu_has_ptw) {
273		exception_table[EXCCODE_TLBI] = handle_tlb_load_ptw;
274		exception_table[EXCCODE_TLBL] = handle_tlb_load_ptw;
275		exception_table[EXCCODE_TLBS] = handle_tlb_store_ptw;
276		exception_table[EXCCODE_TLBM] = handle_tlb_modify_ptw;
277	}
278
279	/* The tlb handlers are generated only once */
280	if (cpu == 0) {
281		memcpy((void *)tlbrentry, handle_tlb_refill, 0x80);
282		local_flush_icache_range(tlbrentry, tlbrentry + 0x80);
283
284		for (i = EXCCODE_TLBL; i <= EXCCODE_TLBPE; i++)
285			set_handler(i * VECSIZE, exception_table[i], VECSIZE);
286	} else {
287		int vec_sz __maybe_unused;
288		void *addr __maybe_unused;
289		struct page *page __maybe_unused;
290
291		/* Avoid lockdep warning */
292		rcu_cpu_starting(cpu);
293
294#ifdef CONFIG_NUMA
295		vec_sz = sizeof(exception_handlers);
296
297		if (pcpu_handlers[cpu])
298			return;
299
300		page = alloc_pages_node(cpu_to_node(cpu), GFP_ATOMIC, get_order(vec_sz));
301		if (!page)
302			return;
303
304		addr = page_address(page);
305		pcpu_handlers[cpu] = (unsigned long)addr;
306		memcpy((void *)addr, (void *)eentry, vec_sz);
307		local_flush_icache_range((unsigned long)addr, (unsigned long)addr + vec_sz);
308		csr_write64(pcpu_handlers[cpu], LOONGARCH_CSR_EENTRY);
309		csr_write64(pcpu_handlers[cpu], LOONGARCH_CSR_MERRENTRY);
310		csr_write64(pcpu_handlers[cpu] + 80*VECSIZE, LOONGARCH_CSR_TLBRENTRY);
311#endif
312	}
313}
314
315void tlb_init(int cpu)
316{
317	write_csr_pagesize(PS_DEFAULT_SIZE);
318	write_csr_stlbpgsize(PS_DEFAULT_SIZE);
319	write_csr_tlbrefill_pagesize(PS_DEFAULT_SIZE);
320
321	setup_tlb_handler(cpu);
322	output_pgtable_bits_defines();
323}
324