xref: /kernel/linux/linux-6.6/arch/riscv/mm/tlbflush.c (revision 62306a36)
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/mm.h>
4#include <linux/smp.h>
5#include <linux/sched.h>
6#include <asm/sbi.h>
7#include <asm/mmu_context.h>
8
9static inline void local_flush_tlb_all_asid(unsigned long asid)
10{
11	if (asid != FLUSH_TLB_NO_ASID)
12		__asm__ __volatile__ ("sfence.vma x0, %0"
13				:
14				: "r" (asid)
15				: "memory");
16	else
17		local_flush_tlb_all();
18}
19
20static inline void local_flush_tlb_page_asid(unsigned long addr,
21		unsigned long asid)
22{
23	if (asid != FLUSH_TLB_NO_ASID)
24		__asm__ __volatile__ ("sfence.vma %0, %1"
25				:
26				: "r" (addr), "r" (asid)
27				: "memory");
28	else
29		local_flush_tlb_page(addr);
30}
31
32/*
33 * Flush entire TLB if number of entries to be flushed is greater
34 * than the threshold below.
35 */
36static unsigned long tlb_flush_all_threshold __read_mostly = 64;
37
38static void local_flush_tlb_range_threshold_asid(unsigned long start,
39						 unsigned long size,
40						 unsigned long stride,
41						 unsigned long asid)
42{
43	unsigned long nr_ptes_in_range = DIV_ROUND_UP(size, stride);
44	int i;
45
46	if (nr_ptes_in_range > tlb_flush_all_threshold) {
47		local_flush_tlb_all_asid(asid);
48		return;
49	}
50
51	for (i = 0; i < nr_ptes_in_range; ++i) {
52		local_flush_tlb_page_asid(start, asid);
53		start += stride;
54	}
55}
56
57static inline void local_flush_tlb_range_asid(unsigned long start,
58		unsigned long size, unsigned long stride, unsigned long asid)
59{
60	if (size <= stride)
61		local_flush_tlb_page_asid(start, asid);
62	else if (size == FLUSH_TLB_MAX_SIZE)
63		local_flush_tlb_all_asid(asid);
64	else
65		local_flush_tlb_range_threshold_asid(start, size, stride, asid);
66}
67
68/* Flush a range of kernel pages without broadcasting */
69void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
70{
71	local_flush_tlb_range_asid(start, end - start, PAGE_SIZE, FLUSH_TLB_NO_ASID);
72}
73
74static void __ipi_flush_tlb_all(void *info)
75{
76	local_flush_tlb_all();
77}
78
79void flush_tlb_all(void)
80{
81	if (riscv_use_ipi_for_rfence())
82		on_each_cpu(__ipi_flush_tlb_all, NULL, 1);
83	else
84		sbi_remote_sfence_vma_asid(NULL, 0, FLUSH_TLB_MAX_SIZE, FLUSH_TLB_NO_ASID);
85}
86
87struct flush_tlb_range_data {
88	unsigned long asid;
89	unsigned long start;
90	unsigned long size;
91	unsigned long stride;
92};
93
94static void __ipi_flush_tlb_range_asid(void *info)
95{
96	struct flush_tlb_range_data *d = info;
97
98	local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid);
99}
100
101static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
102			      unsigned long size, unsigned long stride)
103{
104	struct flush_tlb_range_data ftd;
105	const struct cpumask *cmask;
106	unsigned long asid = FLUSH_TLB_NO_ASID;
107	bool broadcast;
108
109	if (mm) {
110		unsigned int cpuid;
111
112		cmask = mm_cpumask(mm);
113		if (cpumask_empty(cmask))
114			return;
115
116		cpuid = get_cpu();
117		/* check if the tlbflush needs to be sent to other CPUs */
118		broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
119
120		if (static_branch_unlikely(&use_asid_allocator))
121			asid = atomic_long_read(&mm->context.id) & asid_mask;
122	} else {
123		cmask = cpu_online_mask;
124		broadcast = true;
125	}
126
127	if (broadcast) {
128		if (riscv_use_ipi_for_rfence()) {
129			ftd.asid = asid;
130			ftd.start = start;
131			ftd.size = size;
132			ftd.stride = stride;
133			on_each_cpu_mask(cmask,
134					 __ipi_flush_tlb_range_asid,
135					 &ftd, 1);
136		} else
137			sbi_remote_sfence_vma_asid(cmask,
138						   start, size, asid);
139	} else {
140		local_flush_tlb_range_asid(start, size, stride, asid);
141	}
142
143	if (mm)
144		put_cpu();
145}
146
147void flush_tlb_mm(struct mm_struct *mm)
148{
149	__flush_tlb_range(mm, 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
150}
151
152void flush_tlb_mm_range(struct mm_struct *mm,
153			unsigned long start, unsigned long end,
154			unsigned int page_size)
155{
156	__flush_tlb_range(mm, start, end - start, page_size);
157}
158
159void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
160{
161	__flush_tlb_range(vma->vm_mm, addr, PAGE_SIZE, PAGE_SIZE);
162}
163
164void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
165		     unsigned long end)
166{
167	__flush_tlb_range(vma->vm_mm, start, end - start, PAGE_SIZE);
168}
169
170void flush_tlb_kernel_range(unsigned long start, unsigned long end)
171{
172	__flush_tlb_range(NULL, start, end - start, PAGE_SIZE);
173}
174
175#ifdef CONFIG_TRANSPARENT_HUGEPAGE
176void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
177			unsigned long end)
178{
179	__flush_tlb_range(vma->vm_mm, start, end - start, PMD_SIZE);
180}
181#endif
182