xref: /kernel/linux/linux-6.6/arch/ia64/mm/fault.c (revision 62306a36)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * MMU fault handling support.
4 *
5 * Copyright (C) 1998-2002 Hewlett-Packard Co
6 *	David Mosberger-Tang <davidm@hpl.hp.com>
7 */
8#include <linux/sched/signal.h>
9#include <linux/kernel.h>
10#include <linux/mm.h>
11#include <linux/extable.h>
12#include <linux/interrupt.h>
13#include <linux/kprobes.h>
14#include <linux/kdebug.h>
15#include <linux/prefetch.h>
16#include <linux/uaccess.h>
17#include <linux/perf_event.h>
18
19#include <asm/processor.h>
20#include <asm/exception.h>
21
22extern int die(char *, struct pt_regs *, long);
23
24/*
25 * Return TRUE if ADDRESS points at a page in the kernel's mapped segment
26 * (inside region 5, on ia64) and that page is present.
27 */
28static int
29mapped_kernel_page_is_present (unsigned long address)
30{
31	pgd_t *pgd;
32	p4d_t *p4d;
33	pud_t *pud;
34	pmd_t *pmd;
35	pte_t *ptep, pte;
36
37	pgd = pgd_offset_k(address);
38	if (pgd_none(*pgd) || pgd_bad(*pgd))
39		return 0;
40
41	p4d = p4d_offset(pgd, address);
42	if (p4d_none(*p4d) || p4d_bad(*p4d))
43		return 0;
44
45	pud = pud_offset(p4d, address);
46	if (pud_none(*pud) || pud_bad(*pud))
47		return 0;
48
49	pmd = pmd_offset(pud, address);
50	if (pmd_none(*pmd) || pmd_bad(*pmd))
51		return 0;
52
53	ptep = pte_offset_kernel(pmd, address);
54	if (!ptep)
55		return 0;
56
57	pte = *ptep;
58	return pte_present(pte);
59}
60
61#	define VM_READ_BIT	0
62#	define VM_WRITE_BIT	1
63#	define VM_EXEC_BIT	2
64
65void __kprobes
66ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
67{
68	int signal = SIGSEGV, code = SEGV_MAPERR;
69	struct vm_area_struct *vma, *prev_vma;
70	struct mm_struct *mm = current->mm;
71	unsigned long mask;
72	vm_fault_t fault;
73	unsigned int flags = FAULT_FLAG_DEFAULT;
74
75	mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
76		| (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
77
78	/* mmap_lock is performance critical.... */
79	prefetchw(&mm->mmap_lock);
80
81	/*
82	 * If we're in an interrupt or have no user context, we must not take the fault..
83	 */
84	if (faulthandler_disabled() || !mm)
85		goto no_context;
86
87	/*
88	 * This is to handle the kprobes on user space access instructions
89	 */
90	if (kprobe_page_fault(regs, TRAP_BRKPT))
91		return;
92
93	if (user_mode(regs))
94		flags |= FAULT_FLAG_USER;
95	if (mask & VM_WRITE)
96		flags |= FAULT_FLAG_WRITE;
97
98	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
99retry:
100	mmap_read_lock(mm);
101
102	vma = find_vma_prev(mm, address, &prev_vma);
103	if (!vma && !prev_vma )
104		goto bad_area;
105
106        /*
107         * find_vma_prev() returns vma such that address < vma->vm_end or NULL
108         *
109         * May find no vma, but could be that the last vm area is the
110         * register backing store that needs to expand upwards, in
111         * this case vma will be null, but prev_vma will ne non-null
112         */
113        if (( !vma && prev_vma ) || (address < vma->vm_start) ) {
114		vma = expand_stack(mm, address);
115		if (!vma)
116			goto bad_area_nosemaphore;
117	}
118
119	code = SEGV_ACCERR;
120
121	/* OK, we've got a good vm_area for this memory area.  Check the access permissions: */
122
123#	if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \
124	    || (1 << VM_EXEC_BIT) != VM_EXEC)
125#		error File is out of sync with <linux/mm.h>.  Please update.
126#	endif
127
128	if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
129		goto bad_area;
130
131	if ((vma->vm_flags & mask) != mask)
132		goto bad_area;
133
134	/*
135	 * If for any reason at all we couldn't handle the fault, make
136	 * sure we exit gracefully rather than endlessly redo the
137	 * fault.
138	 */
139	fault = handle_mm_fault(vma, address, flags, regs);
140
141	if (fault_signal_pending(fault, regs)) {
142		if (!user_mode(regs))
143			goto no_context;
144		return;
145	}
146
147	/* The fault is fully completed (including releasing mmap lock) */
148	if (fault & VM_FAULT_COMPLETED)
149		return;
150
151	if (unlikely(fault & VM_FAULT_ERROR)) {
152		/*
153		 * We ran out of memory, or some other thing happened
154		 * to us that made us unable to handle the page fault
155		 * gracefully.
156		 */
157		if (fault & VM_FAULT_OOM) {
158			goto out_of_memory;
159		} else if (fault & VM_FAULT_SIGSEGV) {
160			goto bad_area;
161		} else if (fault & VM_FAULT_SIGBUS) {
162			signal = SIGBUS;
163			goto bad_area;
164		}
165		BUG();
166	}
167
168	if (fault & VM_FAULT_RETRY) {
169		flags |= FAULT_FLAG_TRIED;
170
171		/* No need to mmap_read_unlock(mm) as we would
172		 * have already released it in __lock_page_or_retry
173		 * in mm/filemap.c.
174		 */
175
176		goto retry;
177	}
178
179	mmap_read_unlock(mm);
180	return;
181
182  bad_area:
183	mmap_read_unlock(mm);
184  bad_area_nosemaphore:
185	if ((isr & IA64_ISR_SP)
186	    || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
187	{
188		/*
189		 * This fault was due to a speculative load or lfetch.fault, set the "ed"
190		 * bit in the psr to ensure forward progress.  (Target register will get a
191		 * NaT for ld.s, lfetch will be canceled.)
192		 */
193		ia64_psr(regs)->ed = 1;
194		return;
195	}
196	if (user_mode(regs)) {
197		force_sig_fault(signal, code, (void __user *) address,
198				0, __ISR_VALID, isr);
199		return;
200	}
201
202  no_context:
203	if ((isr & IA64_ISR_SP)
204	    || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
205	{
206		/*
207		 * This fault was due to a speculative load or lfetch.fault, set the "ed"
208		 * bit in the psr to ensure forward progress.  (Target register will get a
209		 * NaT for ld.s, lfetch will be canceled.)
210		 */
211		ia64_psr(regs)->ed = 1;
212		return;
213	}
214
215	/*
216	 * Since we have no vma's for region 5, we might get here even if the address is
217	 * valid, due to the VHPT walker inserting a non present translation that becomes
218	 * stale. If that happens, the non present fault handler already purged the stale
219	 * translation, which fixed the problem. So, we check to see if the translation is
220	 * valid, and return if it is.
221	 */
222	if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address))
223		return;
224
225	if (ia64_done_with_exception(regs))
226		return;
227
228	/*
229	 * Oops. The kernel tried to access some bad page. We'll have to terminate things
230	 * with extreme prejudice.
231	 */
232	bust_spinlocks(1);
233
234	if (address < PAGE_SIZE)
235		printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference (address %016lx)\n", address);
236	else
237		printk(KERN_ALERT "Unable to handle kernel paging request at "
238		       "virtual address %016lx\n", address);
239	if (die("Oops", regs, isr))
240		regs = NULL;
241	bust_spinlocks(0);
242	if (regs)
243		make_task_dead(SIGKILL);
244	return;
245
246  out_of_memory:
247	mmap_read_unlock(mm);
248	if (!user_mode(regs))
249		goto no_context;
250	pagefault_out_of_memory();
251}
252