xref: /kernel/linux/linux-6.6/arch/loongarch/mm/tlb.c (revision 62306a36)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4 */
5#include <linux/init.h>
6#include <linux/sched.h>
7#include <linux/smp.h>
8#include <linux/mm.h>
9#include <linux/hugetlb.h>
10#include <linux/export.h>
11
12#include <asm/cpu.h>
13#include <asm/bootinfo.h>
14#include <asm/mmu_context.h>
15#include <asm/pgtable.h>
16#include <asm/tlb.h>
17
18void local_flush_tlb_all(void)
19{
20	invtlb_all(INVTLB_CURRENT_ALL, 0, 0);
21}
22EXPORT_SYMBOL(local_flush_tlb_all);
23
24void local_flush_tlb_user(void)
25{
26	invtlb_all(INVTLB_CURRENT_GFALSE, 0, 0);
27}
28EXPORT_SYMBOL(local_flush_tlb_user);
29
30void local_flush_tlb_kernel(void)
31{
32	invtlb_all(INVTLB_CURRENT_GTRUE, 0, 0);
33}
34EXPORT_SYMBOL(local_flush_tlb_kernel);
35
36/*
37 * All entries common to a mm share an asid. To effectively flush
38 * these entries, we just bump the asid.
39 */
40void local_flush_tlb_mm(struct mm_struct *mm)
41{
42	int cpu;
43
44	preempt_disable();
45
46	cpu = smp_processor_id();
47
48	if (asid_valid(mm, cpu))
49		drop_mmu_context(mm, cpu);
50	else
51		cpumask_clear_cpu(cpu, mm_cpumask(mm));
52
53	preempt_enable();
54}
55
56void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
57	unsigned long end)
58{
59	struct mm_struct *mm = vma->vm_mm;
60	int cpu = smp_processor_id();
61
62	if (asid_valid(mm, cpu)) {
63		unsigned long size, flags;
64
65		local_irq_save(flags);
66		start = round_down(start, PAGE_SIZE << 1);
67		end = round_up(end, PAGE_SIZE << 1);
68		size = (end - start) >> (PAGE_SHIFT + 1);
69		if (size <= (current_cpu_data.tlbsizestlbsets ?
70			     current_cpu_data.tlbsize / 8 :
71			     current_cpu_data.tlbsize / 2)) {
72			int asid = cpu_asid(cpu, mm);
73
74			while (start < end) {
75				invtlb(INVTLB_ADDR_GFALSE_AND_ASID, asid, start);
76				start += (PAGE_SIZE << 1);
77			}
78		} else {
79			drop_mmu_context(mm, cpu);
80		}
81		local_irq_restore(flags);
82	} else {
83		cpumask_clear_cpu(cpu, mm_cpumask(mm));
84	}
85}
86
87void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
88{
89	unsigned long size, flags;
90
91	local_irq_save(flags);
92	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
93	size = (size + 1) >> 1;
94	if (size <= (current_cpu_data.tlbsizestlbsets ?
95		     current_cpu_data.tlbsize / 8 :
96		     current_cpu_data.tlbsize / 2)) {
97
98		start &= (PAGE_MASK << 1);
99		end += ((PAGE_SIZE << 1) - 1);
100		end &= (PAGE_MASK << 1);
101
102		while (start < end) {
103			invtlb_addr(INVTLB_ADDR_GTRUE_OR_ASID, 0, start);
104			start += (PAGE_SIZE << 1);
105		}
106	} else {
107		local_flush_tlb_kernel();
108	}
109	local_irq_restore(flags);
110}
111
112void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
113{
114	int cpu = smp_processor_id();
115
116	if (asid_valid(vma->vm_mm, cpu)) {
117		int newpid;
118
119		newpid = cpu_asid(cpu, vma->vm_mm);
120		page &= (PAGE_MASK << 1);
121		invtlb(INVTLB_ADDR_GFALSE_AND_ASID, newpid, page);
122	} else {
123		cpumask_clear_cpu(cpu, mm_cpumask(vma->vm_mm));
124	}
125}
126
127/*
128 * This one is only used for pages with the global bit set so we don't care
129 * much about the ASID.
130 */
131void local_flush_tlb_one(unsigned long page)
132{
133	page &= (PAGE_MASK << 1);
134	invtlb_addr(INVTLB_ADDR_GTRUE_OR_ASID, 0, page);
135}
136
137static void __update_hugetlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
138{
139#ifdef CONFIG_HUGETLB_PAGE
140	int idx;
141	unsigned long lo;
142	unsigned long flags;
143
144	local_irq_save(flags);
145
146	address &= (PAGE_MASK << 1);
147	write_csr_entryhi(address);
148	tlb_probe();
149	idx = read_csr_tlbidx();
150	write_csr_pagesize(PS_HUGE_SIZE);
151	lo = pmd_to_entrylo(pte_val(*ptep));
152	write_csr_entrylo0(lo);
153	write_csr_entrylo1(lo + (HPAGE_SIZE >> 1));
154
155	if (idx < 0)
156		tlb_write_random();
157	else
158		tlb_write_indexed();
159	write_csr_pagesize(PS_DEFAULT_SIZE);
160
161	local_irq_restore(flags);
162#endif
163}
164
165void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
166{
167	int idx;
168	unsigned long flags;
169
170	if (cpu_has_ptw)
171		return;
172
173	/*
174	 * Handle debugger faulting in for debugee.
175	 */
176	if (current->active_mm != vma->vm_mm)
177		return;
178
179	if (pte_val(*ptep) & _PAGE_HUGE) {
180		__update_hugetlb(vma, address, ptep);
181		return;
182	}
183
184	local_irq_save(flags);
185
186	if ((unsigned long)ptep & sizeof(pte_t))
187		ptep--;
188
189	address &= (PAGE_MASK << 1);
190	write_csr_entryhi(address);
191	tlb_probe();
192	idx = read_csr_tlbidx();
193	write_csr_pagesize(PS_DEFAULT_SIZE);
194	write_csr_entrylo0(pte_val(*ptep++));
195	write_csr_entrylo1(pte_val(*ptep));
196	if (idx < 0)
197		tlb_write_random();
198	else
199		tlb_write_indexed();
200
201	local_irq_restore(flags);
202}
203
204static void setup_ptwalker(void)
205{
206	unsigned long pwctl0, pwctl1;
207	unsigned long pgd_i = 0, pgd_w = 0;
208	unsigned long pud_i = 0, pud_w = 0;
209	unsigned long pmd_i = 0, pmd_w = 0;
210	unsigned long pte_i = 0, pte_w = 0;
211
212	pgd_i = PGDIR_SHIFT;
213	pgd_w = PAGE_SHIFT - 3;
214#if CONFIG_PGTABLE_LEVELS > 3
215	pud_i = PUD_SHIFT;
216	pud_w = PAGE_SHIFT - 3;
217#endif
218#if CONFIG_PGTABLE_LEVELS > 2
219	pmd_i = PMD_SHIFT;
220	pmd_w = PAGE_SHIFT - 3;
221#endif
222	pte_i = PAGE_SHIFT;
223	pte_w = PAGE_SHIFT - 3;
224
225	pwctl0 = pte_i | pte_w << 5 | pmd_i << 10 | pmd_w << 15 | pud_i << 20 | pud_w << 25;
226	pwctl1 = pgd_i | pgd_w << 6;
227
228	if (cpu_has_ptw)
229		pwctl1 |= CSR_PWCTL1_PTW;
230
231	csr_write64(pwctl0, LOONGARCH_CSR_PWCTL0);
232	csr_write64(pwctl1, LOONGARCH_CSR_PWCTL1);
233	csr_write64((long)swapper_pg_dir, LOONGARCH_CSR_PGDH);
234	csr_write64((long)invalid_pg_dir, LOONGARCH_CSR_PGDL);
235	csr_write64((long)smp_processor_id(), LOONGARCH_CSR_TMID);
236}
237
238static void output_pgtable_bits_defines(void)
239{
240#define pr_define(fmt, ...)					\
241	pr_debug("#define " fmt, ##__VA_ARGS__)
242
243	pr_debug("#include <asm/asm.h>\n");
244	pr_debug("#include <asm/regdef.h>\n");
245	pr_debug("\n");
246
247	pr_define("_PAGE_VALID_SHIFT %d\n", _PAGE_VALID_SHIFT);
248	pr_define("_PAGE_DIRTY_SHIFT %d\n", _PAGE_DIRTY_SHIFT);
249	pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT);
250	pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT);
251	pr_define("_PAGE_PRESENT_SHIFT %d\n", _PAGE_PRESENT_SHIFT);
252	pr_define("_PAGE_WRITE_SHIFT %d\n", _PAGE_WRITE_SHIFT);
253	pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT);
254	pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT);
255	pr_define("PFN_PTE_SHIFT %d\n", PFN_PTE_SHIFT);
256	pr_debug("\n");
257}
258
259#ifdef CONFIG_NUMA
260unsigned long pcpu_handlers[NR_CPUS];
261#endif
262extern long exception_handlers[VECSIZE * 128 / sizeof(long)];
263
264static void setup_tlb_handler(int cpu)
265{
266	setup_ptwalker();
267	local_flush_tlb_all();
268
269	/* The tlb handlers are generated only once */
270	if (cpu == 0) {
271		memcpy((void *)tlbrentry, handle_tlb_refill, 0x80);
272		local_flush_icache_range(tlbrentry, tlbrentry + 0x80);
273		if (!cpu_has_ptw) {
274			set_handler(EXCCODE_TLBI * VECSIZE, handle_tlb_load, VECSIZE);
275			set_handler(EXCCODE_TLBL * VECSIZE, handle_tlb_load, VECSIZE);
276			set_handler(EXCCODE_TLBS * VECSIZE, handle_tlb_store, VECSIZE);
277			set_handler(EXCCODE_TLBM * VECSIZE, handle_tlb_modify, VECSIZE);
278		} else {
279			set_handler(EXCCODE_TLBI * VECSIZE, handle_tlb_load_ptw, VECSIZE);
280			set_handler(EXCCODE_TLBL * VECSIZE, handle_tlb_load_ptw, VECSIZE);
281			set_handler(EXCCODE_TLBS * VECSIZE, handle_tlb_store_ptw, VECSIZE);
282			set_handler(EXCCODE_TLBM * VECSIZE, handle_tlb_modify_ptw, VECSIZE);
283		}
284		set_handler(EXCCODE_TLBNR * VECSIZE, handle_tlb_protect, VECSIZE);
285		set_handler(EXCCODE_TLBNX * VECSIZE, handle_tlb_protect, VECSIZE);
286		set_handler(EXCCODE_TLBPE * VECSIZE, handle_tlb_protect, VECSIZE);
287	} else {
288		int vec_sz __maybe_unused;
289		void *addr __maybe_unused;
290		struct page *page __maybe_unused;
291
292		/* Avoid lockdep warning */
293		rcu_cpu_starting(cpu);
294
295#ifdef CONFIG_NUMA
296		vec_sz = sizeof(exception_handlers);
297
298		if (pcpu_handlers[cpu])
299			return;
300
301		page = alloc_pages_node(cpu_to_node(cpu), GFP_ATOMIC, get_order(vec_sz));
302		if (!page)
303			return;
304
305		addr = page_address(page);
306		pcpu_handlers[cpu] = (unsigned long)addr;
307		memcpy((void *)addr, (void *)eentry, vec_sz);
308		local_flush_icache_range((unsigned long)addr, (unsigned long)addr + vec_sz);
309		csr_write64(pcpu_handlers[cpu], LOONGARCH_CSR_EENTRY);
310		csr_write64(pcpu_handlers[cpu], LOONGARCH_CSR_MERRENTRY);
311		csr_write64(pcpu_handlers[cpu] + 80*VECSIZE, LOONGARCH_CSR_TLBRENTRY);
312#endif
313	}
314}
315
316void tlb_init(int cpu)
317{
318	write_csr_pagesize(PS_DEFAULT_SIZE);
319	write_csr_stlbpgsize(PS_DEFAULT_SIZE);
320	write_csr_tlbrefill_pagesize(PS_DEFAULT_SIZE);
321
322	setup_tlb_handler(cpu);
323	output_pgtable_bits_defines();
324}
325