xref: /kernel/linux/linux-5.10/arch/xtensa/mm/fault.c (revision 8c2ecf20)
1// TODO VM_EXEC flag work-around, cache aliasing
2/*
3 * arch/xtensa/mm/fault.c
4 *
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License.  See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * Copyright (C) 2001 - 2010 Tensilica Inc.
10 *
11 * Chris Zankel <chris@zankel.net>
12 * Joe Taylor	<joe@tensilica.com, joetylr@yahoo.com>
13 */
14
15#include <linux/mm.h>
16#include <linux/extable.h>
17#include <linux/hardirq.h>
18#include <linux/perf_event.h>
19#include <linux/uaccess.h>
20#include <asm/mmu_context.h>
21#include <asm/cacheflush.h>
22#include <asm/hardirq.h>
23
24DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST;
25void bad_page_fault(struct pt_regs*, unsigned long, int);
26
27/*
28 * This routine handles page faults.  It determines the address,
29 * and the problem, and then passes it off to one of the appropriate
30 * routines.
31 *
32 * Note: does not handle Miss and MultiHit.
33 */
34
35void do_page_fault(struct pt_regs *regs)
36{
37	struct vm_area_struct * vma;
38	struct mm_struct *mm = current->mm;
39	unsigned int exccause = regs->exccause;
40	unsigned int address = regs->excvaddr;
41	int code;
42
43	int is_write, is_exec;
44	vm_fault_t fault;
45	unsigned int flags = FAULT_FLAG_DEFAULT;
46
47	code = SEGV_MAPERR;
48
49	/* We fault-in kernel-space virtual memory on-demand. The
50	 * 'reference' page table is init_mm.pgd.
51	 */
52	if (address >= TASK_SIZE && !user_mode(regs))
53		goto vmalloc_fault;
54
55	/* If we're in an interrupt or have no user
56	 * context, we must not take the fault..
57	 */
58	if (faulthandler_disabled() || !mm) {
59		bad_page_fault(regs, address, SIGSEGV);
60		return;
61	}
62
63	is_write = (exccause == EXCCAUSE_STORE_CACHE_ATTRIBUTE) ? 1 : 0;
64	is_exec =  (exccause == EXCCAUSE_ITLB_PRIVILEGE ||
65		    exccause == EXCCAUSE_ITLB_MISS ||
66		    exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0;
67
68	pr_debug("[%s:%d:%08x:%d:%08lx:%s%s]\n",
69		 current->comm, current->pid,
70		 address, exccause, regs->pc,
71		 is_write ? "w" : "", is_exec ? "x" : "");
72
73	if (user_mode(regs))
74		flags |= FAULT_FLAG_USER;
75
76	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
77
78retry:
79	mmap_read_lock(mm);
80	vma = find_vma(mm, address);
81
82	if (!vma)
83		goto bad_area;
84	if (vma->vm_start <= address)
85		goto good_area;
86	if (!(vma->vm_flags & VM_GROWSDOWN))
87		goto bad_area;
88	if (expand_stack(vma, address))
89		goto bad_area;
90
91	/* Ok, we have a good vm_area for this memory access, so
92	 * we can handle it..
93	 */
94
95good_area:
96	code = SEGV_ACCERR;
97
98	if (is_write) {
99		if (!(vma->vm_flags & VM_WRITE))
100			goto bad_area;
101		flags |= FAULT_FLAG_WRITE;
102	} else if (is_exec) {
103		if (!(vma->vm_flags & VM_EXEC))
104			goto bad_area;
105	} else	/* Allow read even from write-only pages. */
106		if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
107			goto bad_area;
108
109	/* If for any reason at all we couldn't handle the fault,
110	 * make sure we exit gracefully rather than endlessly redo
111	 * the fault.
112	 */
113	fault = handle_mm_fault(vma, address, flags, regs);
114
115	if (fault_signal_pending(fault, regs)) {
116		if (!user_mode(regs))
117			goto bad_page_fault;
118		return;
119	}
120
121	if (unlikely(fault & VM_FAULT_ERROR)) {
122		if (fault & VM_FAULT_OOM)
123			goto out_of_memory;
124		else if (fault & VM_FAULT_SIGSEGV)
125			goto bad_area;
126		else if (fault & VM_FAULT_SIGBUS)
127			goto do_sigbus;
128		BUG();
129	}
130	if (flags & FAULT_FLAG_ALLOW_RETRY) {
131		if (fault & VM_FAULT_RETRY) {
132			flags |= FAULT_FLAG_TRIED;
133
134			 /* No need to mmap_read_unlock(mm) as we would
135			 * have already released it in __lock_page_or_retry
136			 * in mm/filemap.c.
137			 */
138
139			goto retry;
140		}
141	}
142
143	mmap_read_unlock(mm);
144	return;
145
146	/* Something tried to access memory that isn't in our memory map..
147	 * Fix it, but check if it's kernel or user first..
148	 */
149bad_area:
150	mmap_read_unlock(mm);
151	if (user_mode(regs)) {
152		current->thread.bad_vaddr = address;
153		current->thread.error_code = is_write;
154		force_sig_fault(SIGSEGV, code, (void *) address);
155		return;
156	}
157	bad_page_fault(regs, address, SIGSEGV);
158	return;
159
160
161	/* We ran out of memory, or some other thing happened to us that made
162	 * us unable to handle the page fault gracefully.
163	 */
164out_of_memory:
165	mmap_read_unlock(mm);
166	if (!user_mode(regs))
167		bad_page_fault(regs, address, SIGKILL);
168	else
169		pagefault_out_of_memory();
170	return;
171
172do_sigbus:
173	mmap_read_unlock(mm);
174
175	/* Send a sigbus, regardless of whether we were in kernel
176	 * or user mode.
177	 */
178	current->thread.bad_vaddr = address;
179	force_sig_fault(SIGBUS, BUS_ADRERR, (void *) address);
180
181	/* Kernel mode? Handle exceptions or die */
182	if (!user_mode(regs))
183		bad_page_fault(regs, address, SIGBUS);
184	return;
185
186vmalloc_fault:
187	{
188		/* Synchronize this task's top level page-table
189		 * with the 'reference' page table.
190		 */
191		struct mm_struct *act_mm = current->active_mm;
192		int index = pgd_index(address);
193		pgd_t *pgd, *pgd_k;
194		p4d_t *p4d, *p4d_k;
195		pud_t *pud, *pud_k;
196		pmd_t *pmd, *pmd_k;
197		pte_t *pte_k;
198
199		if (act_mm == NULL)
200			goto bad_page_fault;
201
202		pgd = act_mm->pgd + index;
203		pgd_k = init_mm.pgd + index;
204
205		if (!pgd_present(*pgd_k))
206			goto bad_page_fault;
207
208		pgd_val(*pgd) = pgd_val(*pgd_k);
209
210		p4d = p4d_offset(pgd, address);
211		p4d_k = p4d_offset(pgd_k, address);
212		if (!p4d_present(*p4d) || !p4d_present(*p4d_k))
213			goto bad_page_fault;
214
215		pud = pud_offset(p4d, address);
216		pud_k = pud_offset(p4d_k, address);
217		if (!pud_present(*pud) || !pud_present(*pud_k))
218			goto bad_page_fault;
219
220		pmd = pmd_offset(pud, address);
221		pmd_k = pmd_offset(pud_k, address);
222		if (!pmd_present(*pmd) || !pmd_present(*pmd_k))
223			goto bad_page_fault;
224
225		pmd_val(*pmd) = pmd_val(*pmd_k);
226		pte_k = pte_offset_kernel(pmd_k, address);
227
228		if (!pte_present(*pte_k))
229			goto bad_page_fault;
230		return;
231	}
232bad_page_fault:
233	bad_page_fault(regs, address, SIGKILL);
234	return;
235}
236
237
238void
239bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
240{
241	extern void die(const char*, struct pt_regs*, long);
242	const struct exception_table_entry *entry;
243
244	/* Are we prepared to handle this kernel fault?  */
245	if ((entry = search_exception_tables(regs->pc)) != NULL) {
246		pr_debug("%s: Exception at pc=%#010lx (%lx)\n",
247			 current->comm, regs->pc, entry->fixup);
248		current->thread.bad_uaddr = address;
249		regs->pc = entry->fixup;
250		return;
251	}
252
253	/* Oops. The kernel tried to access some bad page. We'll have to
254	 * terminate things with extreme prejudice.
255	 */
256	pr_alert("Unable to handle kernel paging request at virtual "
257		 "address %08lx\n pc = %08lx, ra = %08lx\n",
258		 address, regs->pc, regs->areg[0]);
259	die("Oops", regs, sig);
260	do_exit(sig);
261}
262