xref: /kernel/linux/linux-5.10/arch/arc/mm/fault.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0-only
2/* Page Fault Handling for ARC (TLB Miss / ProtV)
3 *
4 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
5 */
6
7#include <linux/signal.h>
8#include <linux/interrupt.h>
9#include <linux/sched/signal.h>
10#include <linux/errno.h>
11#include <linux/ptrace.h>
12#include <linux/uaccess.h>
13#include <linux/kdebug.h>
14#include <linux/perf_event.h>
15#include <linux/mm_types.h>
16#include <asm/mmu.h>
17
18/*
19 * kernel virtual address is required to implement vmalloc/pkmap/fixmap
20 * Refer to asm/processor.h for System Memory Map
21 *
22 * It simply copies the PMD entry (pointer to 2nd level page table or hugepage)
23 * from swapper pgdir to task pgdir. The 2nd level table/page is thus shared
24 */
25noinline static int handle_kernel_vaddr_fault(unsigned long address)
26{
27	/*
28	 * Synchronize this task's top level page-table
29	 * with the 'reference' page table.
30	 */
31	pgd_t *pgd, *pgd_k;
32	p4d_t *p4d, *p4d_k;
33	pud_t *pud, *pud_k;
34	pmd_t *pmd, *pmd_k;
35
36	pgd = pgd_offset_fast(current->active_mm, address);
37	pgd_k = pgd_offset_k(address);
38
39	if (!pgd_present(*pgd_k))
40		goto bad_area;
41
42	p4d = p4d_offset(pgd, address);
43	p4d_k = p4d_offset(pgd_k, address);
44	if (!p4d_present(*p4d_k))
45		goto bad_area;
46
47	pud = pud_offset(p4d, address);
48	pud_k = pud_offset(p4d_k, address);
49	if (!pud_present(*pud_k))
50		goto bad_area;
51
52	pmd = pmd_offset(pud, address);
53	pmd_k = pmd_offset(pud_k, address);
54	if (!pmd_present(*pmd_k))
55		goto bad_area;
56
57	set_pmd(pmd, *pmd_k);
58
59	/* XXX: create the TLB entry here */
60	return 0;
61
62bad_area:
63	return 1;
64}
65
66void do_page_fault(unsigned long address, struct pt_regs *regs)
67{
68	struct vm_area_struct *vma = NULL;
69	struct task_struct *tsk = current;
70	struct mm_struct *mm = tsk->mm;
71	int sig, si_code = SEGV_MAPERR;
72	unsigned int write = 0, exec = 0, mask;
73	vm_fault_t fault = VM_FAULT_SIGSEGV;	/* handle_mm_fault() output */
74	unsigned int flags;			/* handle_mm_fault() input */
75
76	/*
77	 * NOTE! We MUST NOT take any locks for this case. We may
78	 * be in an interrupt or a critical region, and should
79	 * only copy the information from the master page table,
80	 * nothing more.
81	 */
82	if (address >= VMALLOC_START && !user_mode(regs)) {
83		if (unlikely(handle_kernel_vaddr_fault(address)))
84			goto no_context;
85		else
86			return;
87	}
88
89	/*
90	 * If we're in an interrupt or have no user
91	 * context, we must not take the fault..
92	 */
93	if (faulthandler_disabled() || !mm)
94		goto no_context;
95
96	if (regs->ecr_cause & ECR_C_PROTV_STORE)	/* ST/EX */
97		write = 1;
98	else if ((regs->ecr_vec == ECR_V_PROTV) &&
99	         (regs->ecr_cause == ECR_C_PROTV_INST_FETCH))
100		exec = 1;
101
102	flags = FAULT_FLAG_DEFAULT;
103	if (user_mode(regs))
104		flags |= FAULT_FLAG_USER;
105	if (write)
106		flags |= FAULT_FLAG_WRITE;
107
108	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
109retry:
110	mmap_read_lock(mm);
111
112	vma = find_vma(mm, address);
113	if (!vma)
114		goto bad_area;
115	if (unlikely(address < vma->vm_start)) {
116		if (!(vma->vm_flags & VM_GROWSDOWN) || expand_stack(vma, address))
117			goto bad_area;
118	}
119
120	/*
121	 * vm_area is good, now check permissions for this memory access
122	 */
123	mask = VM_READ;
124	if (write)
125		mask = VM_WRITE;
126	if (exec)
127		mask = VM_EXEC;
128
129	if (!(vma->vm_flags & mask)) {
130		si_code = SEGV_ACCERR;
131		goto bad_area;
132	}
133
134	fault = handle_mm_fault(vma, address, flags, regs);
135
136	/* Quick path to respond to signals */
137	if (fault_signal_pending(fault, regs)) {
138		if (!user_mode(regs))
139			goto no_context;
140		return;
141	}
142
143	/*
144	 * Fault retry nuances, mmap_lock already relinquished by core mm
145	 */
146	if (unlikely((fault & VM_FAULT_RETRY) &&
147		     (flags & FAULT_FLAG_ALLOW_RETRY))) {
148		flags |= FAULT_FLAG_TRIED;
149		goto retry;
150	}
151
152bad_area:
153	mmap_read_unlock(mm);
154
155	/*
156	 * Major/minor page fault accounting
157	 * (in case of retry we only land here once)
158	 */
159	if (likely(!(fault & VM_FAULT_ERROR)))
160		/* Normal return path: fault Handled Gracefully */
161		return;
162
163	if (!user_mode(regs))
164		goto no_context;
165
166	if (fault & VM_FAULT_OOM) {
167		pagefault_out_of_memory();
168		return;
169	}
170
171	if (fault & VM_FAULT_SIGBUS) {
172		sig = SIGBUS;
173		si_code = BUS_ADRERR;
174	}
175	else {
176		sig = SIGSEGV;
177	}
178
179	tsk->thread.fault_address = address;
180	force_sig_fault(sig, si_code, (void __user *)address);
181	return;
182
183no_context:
184	if (fixup_exception(regs))
185		return;
186
187	die("Oops", regs, address);
188}
189