1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright 2007-2008 Paul Mackerras, IBM Corp.
4 */
5
6#include <linux/errno.h>
7#include <linux/kernel.h>
8#include <linux/gfp.h>
9#include <linux/types.h>
10#include <linux/pagewalk.h>
11#include <linux/hugetlb.h>
12#include <linux/syscalls.h>
13
14#include <linux/pgtable.h>
15#include <linux/uaccess.h>
16
17/*
18 * Free all pages allocated for subpage protection maps and pointers.
19 * Also makes sure that the subpage_prot_table structure is
20 * reinitialized for the next user.
21 */
22void subpage_prot_free(struct mm_struct *mm)
23{
24	struct subpage_prot_table *spt = mm_ctx_subpage_prot(&mm->context);
25	unsigned long i, j, addr;
26	u32 **p;
27
28	if (!spt)
29		return;
30
31	for (i = 0; i < 4; ++i) {
32		if (spt->low_prot[i]) {
33			free_page((unsigned long)spt->low_prot[i]);
34			spt->low_prot[i] = NULL;
35		}
36	}
37	addr = 0;
38	for (i = 0; i < (TASK_SIZE_USER64 >> 43); ++i) {
39		p = spt->protptrs[i];
40		if (!p)
41			continue;
42		spt->protptrs[i] = NULL;
43		for (j = 0; j < SBP_L2_COUNT && addr < spt->maxaddr;
44		     ++j, addr += PAGE_SIZE)
45			if (p[j])
46				free_page((unsigned long)p[j]);
47		free_page((unsigned long)p);
48	}
49	spt->maxaddr = 0;
50	kfree(spt);
51}
52
53static void hpte_flush_range(struct mm_struct *mm, unsigned long addr,
54			     int npages)
55{
56	pgd_t *pgd;
57	p4d_t *p4d;
58	pud_t *pud;
59	pmd_t *pmd;
60	pte_t *pte;
61	spinlock_t *ptl;
62
63	pgd = pgd_offset(mm, addr);
64	p4d = p4d_offset(pgd, addr);
65	if (p4d_none(*p4d))
66		return;
67	pud = pud_offset(p4d, addr);
68	if (pud_none(*pud))
69		return;
70	pmd = pmd_offset(pud, addr);
71	if (pmd_none(*pmd))
72		return;
73	pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
74	arch_enter_lazy_mmu_mode();
75	for (; npages > 0; --npages) {
76		pte_update(mm, addr, pte, 0, 0, 0);
77		addr += PAGE_SIZE;
78		++pte;
79	}
80	arch_leave_lazy_mmu_mode();
81	pte_unmap_unlock(pte - 1, ptl);
82}
83
84/*
85 * Clear the subpage protection map for an address range, allowing
86 * all accesses that are allowed by the pte permissions.
87 */
88static void subpage_prot_clear(unsigned long addr, unsigned long len)
89{
90	struct mm_struct *mm = current->mm;
91	struct subpage_prot_table *spt;
92	u32 **spm, *spp;
93	unsigned long i;
94	size_t nw;
95	unsigned long next, limit;
96
97	mmap_write_lock(mm);
98
99	spt = mm_ctx_subpage_prot(&mm->context);
100	if (!spt)
101		goto err_out;
102
103	limit = addr + len;
104	if (limit > spt->maxaddr)
105		limit = spt->maxaddr;
106	for (; addr < limit; addr = next) {
107		next = pmd_addr_end(addr, limit);
108		if (addr < 0x100000000UL) {
109			spm = spt->low_prot;
110		} else {
111			spm = spt->protptrs[addr >> SBP_L3_SHIFT];
112			if (!spm)
113				continue;
114		}
115		spp = spm[(addr >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1)];
116		if (!spp)
117			continue;
118		spp += (addr >> PAGE_SHIFT) & (SBP_L1_COUNT - 1);
119
120		i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
121		nw = PTRS_PER_PTE - i;
122		if (addr + (nw << PAGE_SHIFT) > next)
123			nw = (next - addr) >> PAGE_SHIFT;
124
125		memset(spp, 0, nw * sizeof(u32));
126
127		/* now flush any existing HPTEs for the range */
128		hpte_flush_range(mm, addr, nw);
129	}
130
131err_out:
132	mmap_write_unlock(mm);
133}
134
135#ifdef CONFIG_TRANSPARENT_HUGEPAGE
136static int subpage_walk_pmd_entry(pmd_t *pmd, unsigned long addr,
137				  unsigned long end, struct mm_walk *walk)
138{
139	struct vm_area_struct *vma = walk->vma;
140	split_huge_pmd(vma, pmd, addr);
141	return 0;
142}
143
144static const struct mm_walk_ops subpage_walk_ops = {
145	.pmd_entry	= subpage_walk_pmd_entry,
146};
147
148static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr,
149				    unsigned long len)
150{
151	struct vm_area_struct *vma;
152
153	/*
154	 * We don't try too hard, we just mark all the vma in that range
155	 * VM_NOHUGEPAGE and split them.
156	 */
157	vma = find_vma(mm, addr);
158	/*
159	 * If the range is in unmapped range, just return
160	 */
161	if (vma && ((addr + len) <= vma->vm_start))
162		return;
163
164	while (vma) {
165		if (vma->vm_start >= (addr + len))
166			break;
167		vma->vm_flags |= VM_NOHUGEPAGE;
168		walk_page_vma(vma, &subpage_walk_ops, NULL);
169		vma = vma->vm_next;
170	}
171}
172#else
173static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr,
174				    unsigned long len)
175{
176	return;
177}
178#endif
179
180/*
181 * Copy in a subpage protection map for an address range.
182 * The map has 2 bits per 4k subpage, so 32 bits per 64k page.
183 * Each 2-bit field is 0 to allow any access, 1 to prevent writes,
184 * 2 or 3 to prevent all accesses.
185 * Note that the normal page protections also apply; the subpage
186 * protection mechanism is an additional constraint, so putting 0
187 * in a 2-bit field won't allow writes to a page that is otherwise
188 * write-protected.
189 */
190SYSCALL_DEFINE3(subpage_prot, unsigned long, addr,
191		unsigned long, len, u32 __user *, map)
192{
193	struct mm_struct *mm = current->mm;
194	struct subpage_prot_table *spt;
195	u32 **spm, *spp;
196	unsigned long i;
197	size_t nw;
198	unsigned long next, limit;
199	int err;
200
201	if (radix_enabled())
202		return -ENOENT;
203
204	/* Check parameters */
205	if ((addr & ~PAGE_MASK) || (len & ~PAGE_MASK) ||
206	    addr >= mm->task_size || len >= mm->task_size ||
207	    addr + len > mm->task_size)
208		return -EINVAL;
209
210	if (is_hugepage_only_range(mm, addr, len))
211		return -EINVAL;
212
213	if (!map) {
214		/* Clear out the protection map for the address range */
215		subpage_prot_clear(addr, len);
216		return 0;
217	}
218
219	if (!access_ok(map, (len >> PAGE_SHIFT) * sizeof(u32)))
220		return -EFAULT;
221
222	mmap_write_lock(mm);
223
224	spt = mm_ctx_subpage_prot(&mm->context);
225	if (!spt) {
226		/*
227		 * Allocate subpage prot table if not already done.
228		 * Do this with mmap_lock held
229		 */
230		spt = kzalloc(sizeof(struct subpage_prot_table), GFP_KERNEL);
231		if (!spt) {
232			err = -ENOMEM;
233			goto out;
234		}
235		mm->context.hash_context->spt = spt;
236	}
237
238	subpage_mark_vma_nohuge(mm, addr, len);
239	for (limit = addr + len; addr < limit; addr = next) {
240		next = pmd_addr_end(addr, limit);
241		err = -ENOMEM;
242		if (addr < 0x100000000UL) {
243			spm = spt->low_prot;
244		} else {
245			spm = spt->protptrs[addr >> SBP_L3_SHIFT];
246			if (!spm) {
247				spm = (u32 **)get_zeroed_page(GFP_KERNEL);
248				if (!spm)
249					goto out;
250				spt->protptrs[addr >> SBP_L3_SHIFT] = spm;
251			}
252		}
253		spm += (addr >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1);
254		spp = *spm;
255		if (!spp) {
256			spp = (u32 *)get_zeroed_page(GFP_KERNEL);
257			if (!spp)
258				goto out;
259			*spm = spp;
260		}
261		spp += (addr >> PAGE_SHIFT) & (SBP_L1_COUNT - 1);
262
263		local_irq_disable();
264		demote_segment_4k(mm, addr);
265		local_irq_enable();
266
267		i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
268		nw = PTRS_PER_PTE - i;
269		if (addr + (nw << PAGE_SHIFT) > next)
270			nw = (next - addr) >> PAGE_SHIFT;
271
272		mmap_write_unlock(mm);
273		if (__copy_from_user(spp, map, nw * sizeof(u32)))
274			return -EFAULT;
275		map += nw;
276		mmap_write_lock(mm);
277
278		/* now flush any existing HPTEs for the range */
279		hpte_flush_range(mm, addr, nw);
280	}
281	if (limit > spt->maxaddr)
282		spt->maxaddr = limit;
283	err = 0;
284 out:
285	mmap_write_unlock(mm);
286	return err;
287}
288