1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Memory fault handling for Hexagon
4 *
5 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
6 */
7
8/*
9 * Page fault handling for the Hexagon Virtual Machine.
10 * Can also be called by a native port emulating the HVM
11 * execptions.
12 */
13
14#include <asm/traps.h>
15#include <linux/uaccess.h>
16#include <linux/mm.h>
17#include <linux/sched/signal.h>
18#include <linux/signal.h>
19#include <linux/extable.h>
20#include <linux/hardirq.h>
21#include <linux/perf_event.h>
22
23/*
24 * Decode of hardware exception sends us to one of several
25 * entry points.  At each, we generate canonical arguments
26 * for handling by the abstract memory management code.
27 */
28#define FLT_IFETCH     -1
29#define FLT_LOAD        0
30#define FLT_STORE       1
31
32
33/*
34 * Canonical page fault handler
35 */
36void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
37{
38	struct vm_area_struct *vma;
39	struct mm_struct *mm = current->mm;
40	int si_signo;
41	int si_code = SEGV_MAPERR;
42	vm_fault_t fault;
43	const struct exception_table_entry *fixup;
44	unsigned int flags = FAULT_FLAG_DEFAULT;
45
46	/*
47	 * If we're in an interrupt or have no user context,
48	 * then must not take the fault.
49	 */
50	if (unlikely(in_interrupt() || !mm))
51		goto no_context;
52
53	local_irq_enable();
54
55	if (user_mode(regs))
56		flags |= FAULT_FLAG_USER;
57
58	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
59retry:
60	vma = lock_mm_and_find_vma(mm, address, regs);
61	if (unlikely(!vma))
62		goto bad_area_nosemaphore;
63
64	/* Address space is OK.  Now check access rights. */
65	si_code = SEGV_ACCERR;
66
67	switch (cause) {
68	case FLT_IFETCH:
69		if (!(vma->vm_flags & VM_EXEC))
70			goto bad_area;
71		break;
72	case FLT_LOAD:
73		if (!(vma->vm_flags & VM_READ))
74			goto bad_area;
75		break;
76	case FLT_STORE:
77		if (!(vma->vm_flags & VM_WRITE))
78			goto bad_area;
79		flags |= FAULT_FLAG_WRITE;
80		break;
81	}
82
83	fault = handle_mm_fault(vma, address, flags, regs);
84
85	if (fault_signal_pending(fault, regs)) {
86		if (!user_mode(regs))
87			goto no_context;
88		return;
89	}
90
91	/* The fault is fully completed (including releasing mmap lock) */
92	if (fault & VM_FAULT_COMPLETED)
93		return;
94
95	/* The most common case -- we are done. */
96	if (likely(!(fault & VM_FAULT_ERROR))) {
97		if (fault & VM_FAULT_RETRY) {
98			flags |= FAULT_FLAG_TRIED;
99			goto retry;
100		}
101
102		mmap_read_unlock(mm);
103		return;
104	}
105
106	mmap_read_unlock(mm);
107
108	/* Handle copyin/out exception cases */
109	if (!user_mode(regs))
110		goto no_context;
111
112	if (fault & VM_FAULT_OOM) {
113		pagefault_out_of_memory();
114		return;
115	}
116
117	/* User-mode address is in the memory map, but we are
118	 * unable to fix up the page fault.
119	 */
120	if (fault & VM_FAULT_SIGBUS) {
121		si_signo = SIGBUS;
122		si_code = BUS_ADRERR;
123	}
124	/* Address is not in the memory map */
125	else {
126		si_signo = SIGSEGV;
127		si_code  = SEGV_ACCERR;
128	}
129	force_sig_fault(si_signo, si_code, (void __user *)address);
130	return;
131
132bad_area:
133	mmap_read_unlock(mm);
134
135bad_area_nosemaphore:
136	if (user_mode(regs)) {
137		force_sig_fault(SIGSEGV, si_code, (void __user *)address);
138		return;
139	}
140	/* Kernel-mode fault falls through */
141
142no_context:
143	fixup = search_exception_tables(pt_elr(regs));
144	if (fixup) {
145		pt_set_elr(regs, fixup->fixup);
146		return;
147	}
148
149	/* Things are looking very, very bad now */
150	bust_spinlocks(1);
151	printk(KERN_EMERG "Unable to handle kernel paging request at "
152		"virtual address 0x%08lx, regs %p\n", address, regs);
153	die("Bad Kernel VA", regs, SIGKILL);
154}
155
156
157void read_protection_fault(struct pt_regs *regs)
158{
159	unsigned long badvadr = pt_badva(regs);
160
161	do_page_fault(badvadr, FLT_LOAD, regs);
162}
163
164void write_protection_fault(struct pt_regs *regs)
165{
166	unsigned long badvadr = pt_badva(regs);
167
168	do_page_fault(badvadr, FLT_STORE, regs);
169}
170
171void execute_protection_fault(struct pt_regs *regs)
172{
173	unsigned long badvadr = pt_badva(regs);
174
175	do_page_fault(badvadr, FLT_IFETCH, regs);
176}
177