1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 *  linux/arch/arm/lib/uaccess_with_memcpy.c
4 *
5 *  Written by: Lennert Buytenhek and Nicolas Pitre
6 *  Copyright (C) 2009 Marvell Semiconductor
7 */
8
9#include <linux/kernel.h>
10#include <linux/ctype.h>
11#include <linux/uaccess.h>
12#include <linux/rwsem.h>
13#include <linux/mm.h>
14#include <linux/sched.h>
15#include <linux/hardirq.h> /* for in_atomic() */
16#include <linux/gfp.h>
17#include <linux/highmem.h>
18#include <linux/hugetlb.h>
19#include <asm/current.h>
20#include <asm/page.h>
21
22static int
23pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
24{
25	unsigned long addr = (unsigned long)_addr;
26	pgd_t *pgd;
27	p4d_t *p4d;
28	pmd_t *pmd;
29	pte_t *pte;
30	pud_t *pud;
31	spinlock_t *ptl;
32
33	pgd = pgd_offset(current->mm, addr);
34	if (unlikely(pgd_none(*pgd) || pgd_bad(*pgd)))
35		return 0;
36
37	p4d = p4d_offset(pgd, addr);
38	if (unlikely(p4d_none(*p4d) || p4d_bad(*p4d)))
39		return 0;
40
41	pud = pud_offset(p4d, addr);
42	if (unlikely(pud_none(*pud) || pud_bad(*pud)))
43		return 0;
44
45	pmd = pmd_offset(pud, addr);
46	if (unlikely(pmd_none(*pmd)))
47		return 0;
48
49	/*
50	 * A pmd can be bad if it refers to a HugeTLB or THP page.
51	 *
52	 * Both THP and HugeTLB pages have the same pmd layout
53	 * and should not be manipulated by the pte functions.
54	 *
55	 * Lock the page table for the destination and check
56	 * to see that it's still huge and whether or not we will
57	 * need to fault on write.
58	 */
59	if (unlikely(pmd_thp_or_huge(*pmd))) {
60		ptl = &current->mm->page_table_lock;
61		spin_lock(ptl);
62		if (unlikely(!pmd_thp_or_huge(*pmd)
63			|| pmd_hugewillfault(*pmd))) {
64			spin_unlock(ptl);
65			return 0;
66		}
67
68		*ptep = NULL;
69		*ptlp = ptl;
70		return 1;
71	}
72
73	if (unlikely(pmd_bad(*pmd)))
74		return 0;
75
76	pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl);
77	if (unlikely(!pte_present(*pte) || !pte_young(*pte) ||
78	    !pte_write(*pte) || !pte_dirty(*pte))) {
79		pte_unmap_unlock(pte, ptl);
80		return 0;
81	}
82
83	*ptep = pte;
84	*ptlp = ptl;
85
86	return 1;
87}
88
89static unsigned long noinline
90__copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
91{
92	unsigned long ua_flags;
93	int atomic;
94
95	if (uaccess_kernel()) {
96		memcpy((void *)to, from, n);
97		return 0;
98	}
99
100	/* the mmap semaphore is taken only if not in an atomic context */
101	atomic = faulthandler_disabled();
102
103	if (!atomic)
104		mmap_read_lock(current->mm);
105	while (n) {
106		pte_t *pte;
107		spinlock_t *ptl;
108		int tocopy;
109
110		while (!pin_page_for_write(to, &pte, &ptl)) {
111			if (!atomic)
112				mmap_read_unlock(current->mm);
113			if (__put_user(0, (char __user *)to))
114				goto out;
115			if (!atomic)
116				mmap_read_lock(current->mm);
117		}
118
119		tocopy = (~(unsigned long)to & ~PAGE_MASK) + 1;
120		if (tocopy > n)
121			tocopy = n;
122
123		ua_flags = uaccess_save_and_enable();
124		memcpy((void *)to, from, tocopy);
125		uaccess_restore(ua_flags);
126		to += tocopy;
127		from += tocopy;
128		n -= tocopy;
129
130		if (pte)
131			pte_unmap_unlock(pte, ptl);
132		else
133			spin_unlock(ptl);
134	}
135	if (!atomic)
136		mmap_read_unlock(current->mm);
137
138out:
139	return n;
140}
141
142unsigned long
143arm_copy_to_user(void __user *to, const void *from, unsigned long n)
144{
145	/*
146	 * This test is stubbed out of the main function above to keep
147	 * the overhead for small copies low by avoiding a large
148	 * register dump on the stack just to reload them right away.
149	 * With frame pointer disabled, tail call optimization kicks in
150	 * as well making this test almost invisible.
151	 */
152	if (n < 64) {
153		unsigned long ua_flags = uaccess_save_and_enable();
154		n = __copy_to_user_std(to, from, n);
155		uaccess_restore(ua_flags);
156	} else {
157		n = __copy_to_user_memcpy(uaccess_mask_range_ptr(to, n),
158					  from, n);
159	}
160	return n;
161}
162
163static unsigned long noinline
164__clear_user_memset(void __user *addr, unsigned long n)
165{
166	unsigned long ua_flags;
167
168	if (uaccess_kernel()) {
169		memset((void *)addr, 0, n);
170		return 0;
171	}
172
173	mmap_read_lock(current->mm);
174	while (n) {
175		pte_t *pte;
176		spinlock_t *ptl;
177		int tocopy;
178
179		while (!pin_page_for_write(addr, &pte, &ptl)) {
180			mmap_read_unlock(current->mm);
181			if (__put_user(0, (char __user *)addr))
182				goto out;
183			mmap_read_lock(current->mm);
184		}
185
186		tocopy = (~(unsigned long)addr & ~PAGE_MASK) + 1;
187		if (tocopy > n)
188			tocopy = n;
189
190		ua_flags = uaccess_save_and_enable();
191		memset((void *)addr, 0, tocopy);
192		uaccess_restore(ua_flags);
193		addr += tocopy;
194		n -= tocopy;
195
196		if (pte)
197			pte_unmap_unlock(pte, ptl);
198		else
199			spin_unlock(ptl);
200	}
201	mmap_read_unlock(current->mm);
202
203out:
204	return n;
205}
206
207unsigned long arm_clear_user(void __user *addr, unsigned long n)
208{
209	/* See rational for this in __copy_to_user() above. */
210	if (n < 64) {
211		unsigned long ua_flags = uaccess_save_and_enable();
212		n = __clear_user_std(addr, n);
213		uaccess_restore(ua_flags);
214	} else {
215		n = __clear_user_memset(addr, n);
216	}
217	return n;
218}
219
220#if 0
221
222/*
223 * This code is disabled by default, but kept around in case the chosen
224 * thresholds need to be revalidated.  Some overhead (small but still)
225 * would be implied by a runtime determined variable threshold, and
226 * so far the measurement on concerned targets didn't show a worthwhile
227 * variation.
228 *
229 * Note that a fairly precise sched_clock() implementation is needed
230 * for results to make some sense.
231 */
232
233#include <linux/vmalloc.h>
234
235static int __init test_size_treshold(void)
236{
237	struct page *src_page, *dst_page;
238	void *user_ptr, *kernel_ptr;
239	unsigned long long t0, t1, t2;
240	int size, ret;
241
242	ret = -ENOMEM;
243	src_page = alloc_page(GFP_KERNEL);
244	if (!src_page)
245		goto no_src;
246	dst_page = alloc_page(GFP_KERNEL);
247	if (!dst_page)
248		goto no_dst;
249	kernel_ptr = page_address(src_page);
250	user_ptr = vmap(&dst_page, 1, VM_IOREMAP, __pgprot(__P010));
251	if (!user_ptr)
252		goto no_vmap;
253
254	/* warm up the src page dcache */
255	ret = __copy_to_user_memcpy(user_ptr, kernel_ptr, PAGE_SIZE);
256
257	for (size = PAGE_SIZE; size >= 4; size /= 2) {
258		t0 = sched_clock();
259		ret |= __copy_to_user_memcpy(user_ptr, kernel_ptr, size);
260		t1 = sched_clock();
261		ret |= __copy_to_user_std(user_ptr, kernel_ptr, size);
262		t2 = sched_clock();
263		printk("copy_to_user: %d %llu %llu\n", size, t1 - t0, t2 - t1);
264	}
265
266	for (size = PAGE_SIZE; size >= 4; size /= 2) {
267		t0 = sched_clock();
268		ret |= __clear_user_memset(user_ptr, size);
269		t1 = sched_clock();
270		ret |= __clear_user_std(user_ptr, size);
271		t2 = sched_clock();
272		printk("clear_user: %d %llu %llu\n", size, t1 - t0, t2 - t1);
273	}
274
275	if (ret)
276		ret = -EFAULT;
277
278	vunmap(user_ptr);
279no_vmap:
280	put_page(dst_page);
281no_dst:
282	put_page(src_page);
283no_src:
284	return ret;
285}
286
287subsys_initcall(test_size_treshold);
288
289#endif
290