1// SPDX-License-Identifier: GPL-2.0
2/*
3 * This is for all the tests related to copy_to_user() and copy_from_user()
4 * hardening.
5 */
6#include "lkdtm.h"
7#include <linux/slab.h>
8#include <linux/vmalloc.h>
9#include <linux/sched/task_stack.h>
10#include <linux/mman.h>
11#include <linux/uaccess.h>
12#include <asm/cacheflush.h>
13
14/*
15 * Many of the tests here end up using const sizes, but those would
16 * normally be ignored by hardened usercopy, so force the compiler
17 * into choosing the non-const path to make sure we trigger the
18 * hardened usercopy checks by added "unconst" to all the const copies,
19 * and making sure "cache_size" isn't optimized into a const.
20 */
21static volatile size_t unconst;
22static volatile size_t cache_size = 1024;
23static struct kmem_cache *whitelist_cache;
24
25static const unsigned char test_text[] = "This is a test.\n";
26
27/*
28 * Instead of adding -Wno-return-local-addr, just pass the stack address
29 * through a function to obfuscate it from the compiler.
30 */
31static noinline unsigned char *trick_compiler(unsigned char *stack)
32{
33	return stack + unconst;
34}
35
36static noinline unsigned char *do_usercopy_stack_callee(int value)
37{
38	unsigned char buf[128];
39	int i;
40
41	/* Exercise stack to avoid everything living in registers. */
42	for (i = 0; i < sizeof(buf); i++) {
43		buf[i] = value & 0xff;
44	}
45
46	/*
47	 * Put the target buffer in the middle of stack allocation
48	 * so that we don't step on future stack users regardless
49	 * of stack growth direction.
50	 */
51	return trick_compiler(&buf[(128/2)-32]);
52}
53
54static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
55{
56	unsigned long user_addr;
57	unsigned char good_stack[32];
58	unsigned char *bad_stack;
59	int i;
60
61	/* Exercise stack to avoid everything living in registers. */
62	for (i = 0; i < sizeof(good_stack); i++)
63		good_stack[i] = test_text[i % sizeof(test_text)];
64
65	/* This is a pointer to outside our current stack frame. */
66	if (bad_frame) {
67		bad_stack = do_usercopy_stack_callee((uintptr_t)&bad_stack);
68	} else {
69		/* Put start address just inside stack. */
70		bad_stack = task_stack_page(current) + THREAD_SIZE;
71		bad_stack -= sizeof(unsigned long);
72	}
73
74#ifdef ARCH_HAS_CURRENT_STACK_POINTER
75	pr_info("stack     : %px\n", (void *)current_stack_pointer);
76#endif
77	pr_info("good_stack: %px-%px\n", good_stack, good_stack + sizeof(good_stack));
78	pr_info("bad_stack : %px-%px\n", bad_stack, bad_stack + sizeof(good_stack));
79
80	user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
81			    PROT_READ | PROT_WRITE | PROT_EXEC,
82			    MAP_ANONYMOUS | MAP_PRIVATE, 0);
83	if (user_addr >= TASK_SIZE) {
84		pr_warn("Failed to allocate user memory\n");
85		return;
86	}
87
88	if (to_user) {
89		pr_info("attempting good copy_to_user of local stack\n");
90		if (copy_to_user((void __user *)user_addr, good_stack,
91				 unconst + sizeof(good_stack))) {
92			pr_warn("copy_to_user failed unexpectedly?!\n");
93			goto free_user;
94		}
95
96		pr_info("attempting bad copy_to_user of distant stack\n");
97		if (copy_to_user((void __user *)user_addr, bad_stack,
98				 unconst + sizeof(good_stack))) {
99			pr_warn("copy_to_user failed, but lacked Oops\n");
100			goto free_user;
101		}
102	} else {
103		/*
104		 * There isn't a safe way to not be protected by usercopy
105		 * if we're going to write to another thread's stack.
106		 */
107		if (!bad_frame)
108			goto free_user;
109
110		pr_info("attempting good copy_from_user of local stack\n");
111		if (copy_from_user(good_stack, (void __user *)user_addr,
112				   unconst + sizeof(good_stack))) {
113			pr_warn("copy_from_user failed unexpectedly?!\n");
114			goto free_user;
115		}
116
117		pr_info("attempting bad copy_from_user of distant stack\n");
118		if (copy_from_user(bad_stack, (void __user *)user_addr,
119				   unconst + sizeof(good_stack))) {
120			pr_warn("copy_from_user failed, but lacked Oops\n");
121			goto free_user;
122		}
123	}
124
125free_user:
126	vm_munmap(user_addr, PAGE_SIZE);
127}
128
129/*
130 * This checks for whole-object size validation with hardened usercopy,
131 * with or without usercopy whitelisting.
132 */
133static void do_usercopy_heap_size(bool to_user)
134{
135	unsigned long user_addr;
136	unsigned char *one, *two;
137	void __user *test_user_addr;
138	void *test_kern_addr;
139	size_t size = unconst + 1024;
140
141	one = kmalloc(size, GFP_KERNEL);
142	two = kmalloc(size, GFP_KERNEL);
143	if (!one || !two) {
144		pr_warn("Failed to allocate kernel memory\n");
145		goto free_kernel;
146	}
147
148	user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
149			    PROT_READ | PROT_WRITE | PROT_EXEC,
150			    MAP_ANONYMOUS | MAP_PRIVATE, 0);
151	if (user_addr >= TASK_SIZE) {
152		pr_warn("Failed to allocate user memory\n");
153		goto free_kernel;
154	}
155
156	memset(one, 'A', size);
157	memset(two, 'B', size);
158
159	test_user_addr = (void __user *)(user_addr + 16);
160	test_kern_addr = one + 16;
161
162	if (to_user) {
163		pr_info("attempting good copy_to_user of correct size\n");
164		if (copy_to_user(test_user_addr, test_kern_addr, size / 2)) {
165			pr_warn("copy_to_user failed unexpectedly?!\n");
166			goto free_user;
167		}
168
169		pr_info("attempting bad copy_to_user of too large size\n");
170		if (copy_to_user(test_user_addr, test_kern_addr, size)) {
171			pr_warn("copy_to_user failed, but lacked Oops\n");
172			goto free_user;
173		}
174	} else {
175		pr_info("attempting good copy_from_user of correct size\n");
176		if (copy_from_user(test_kern_addr, test_user_addr, size / 2)) {
177			pr_warn("copy_from_user failed unexpectedly?!\n");
178			goto free_user;
179		}
180
181		pr_info("attempting bad copy_from_user of too large size\n");
182		if (copy_from_user(test_kern_addr, test_user_addr, size)) {
183			pr_warn("copy_from_user failed, but lacked Oops\n");
184			goto free_user;
185		}
186	}
187
188free_user:
189	vm_munmap(user_addr, PAGE_SIZE);
190free_kernel:
191	kfree(one);
192	kfree(two);
193}
194
195/*
196 * This checks for the specific whitelist window within an object. If this
197 * test passes, then do_usercopy_heap_size() tests will pass too.
198 */
199static void do_usercopy_heap_whitelist(bool to_user)
200{
201	unsigned long user_alloc;
202	unsigned char *buf = NULL;
203	unsigned char __user *user_addr;
204	size_t offset, size;
205
206	/* Make sure cache was prepared. */
207	if (!whitelist_cache) {
208		pr_warn("Failed to allocate kernel cache\n");
209		return;
210	}
211
212	/*
213	 * Allocate a buffer with a whitelisted window in the buffer.
214	 */
215	buf = kmem_cache_alloc(whitelist_cache, GFP_KERNEL);
216	if (!buf) {
217		pr_warn("Failed to allocate buffer from whitelist cache\n");
218		goto free_alloc;
219	}
220
221	/* Allocate user memory we'll poke at. */
222	user_alloc = vm_mmap(NULL, 0, PAGE_SIZE,
223			    PROT_READ | PROT_WRITE | PROT_EXEC,
224			    MAP_ANONYMOUS | MAP_PRIVATE, 0);
225	if (user_alloc >= TASK_SIZE) {
226		pr_warn("Failed to allocate user memory\n");
227		goto free_alloc;
228	}
229	user_addr = (void __user *)user_alloc;
230
231	memset(buf, 'B', cache_size);
232
233	/* Whitelisted window in buffer, from kmem_cache_create_usercopy. */
234	offset = (cache_size / 4) + unconst;
235	size = (cache_size / 16) + unconst;
236
237	if (to_user) {
238		pr_info("attempting good copy_to_user inside whitelist\n");
239		if (copy_to_user(user_addr, buf + offset, size)) {
240			pr_warn("copy_to_user failed unexpectedly?!\n");
241			goto free_user;
242		}
243
244		pr_info("attempting bad copy_to_user outside whitelist\n");
245		if (copy_to_user(user_addr, buf + offset - 1, size)) {
246			pr_warn("copy_to_user failed, but lacked Oops\n");
247			goto free_user;
248		}
249	} else {
250		pr_info("attempting good copy_from_user inside whitelist\n");
251		if (copy_from_user(buf + offset, user_addr, size)) {
252			pr_warn("copy_from_user failed unexpectedly?!\n");
253			goto free_user;
254		}
255
256		pr_info("attempting bad copy_from_user outside whitelist\n");
257		if (copy_from_user(buf + offset - 1, user_addr, size)) {
258			pr_warn("copy_from_user failed, but lacked Oops\n");
259			goto free_user;
260		}
261	}
262
263free_user:
264	vm_munmap(user_alloc, PAGE_SIZE);
265free_alloc:
266	if (buf)
267		kmem_cache_free(whitelist_cache, buf);
268}
269
270/* Callable tests. */
271void lkdtm_USERCOPY_HEAP_SIZE_TO(void)
272{
273	do_usercopy_heap_size(true);
274}
275
276void lkdtm_USERCOPY_HEAP_SIZE_FROM(void)
277{
278	do_usercopy_heap_size(false);
279}
280
281void lkdtm_USERCOPY_HEAP_WHITELIST_TO(void)
282{
283	do_usercopy_heap_whitelist(true);
284}
285
286void lkdtm_USERCOPY_HEAP_WHITELIST_FROM(void)
287{
288	do_usercopy_heap_whitelist(false);
289}
290
291void lkdtm_USERCOPY_STACK_FRAME_TO(void)
292{
293	do_usercopy_stack(true, true);
294}
295
296void lkdtm_USERCOPY_STACK_FRAME_FROM(void)
297{
298	do_usercopy_stack(false, true);
299}
300
301void lkdtm_USERCOPY_STACK_BEYOND(void)
302{
303	do_usercopy_stack(true, false);
304}
305
306void lkdtm_USERCOPY_KERNEL(void)
307{
308	unsigned long user_addr;
309
310	user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
311			    PROT_READ | PROT_WRITE | PROT_EXEC,
312			    MAP_ANONYMOUS | MAP_PRIVATE, 0);
313	if (user_addr >= TASK_SIZE) {
314		pr_warn("Failed to allocate user memory\n");
315		return;
316	}
317
318	pr_info("attempting good copy_to_user from kernel rodata: %px\n",
319		test_text);
320	if (copy_to_user((void __user *)user_addr, test_text,
321			 unconst + sizeof(test_text))) {
322		pr_warn("copy_to_user failed unexpectedly?!\n");
323		goto free_user;
324	}
325
326	pr_info("attempting bad copy_to_user from kernel text: %px\n",
327		vm_mmap);
328	if (copy_to_user((void __user *)user_addr, function_nocfi(vm_mmap),
329			 unconst + PAGE_SIZE)) {
330		pr_warn("copy_to_user failed, but lacked Oops\n");
331		goto free_user;
332	}
333	pr_err("FAIL: survived bad copy_to_user()\n");
334
335free_user:
336	vm_munmap(user_addr, PAGE_SIZE);
337}
338
339void __init lkdtm_usercopy_init(void)
340{
341	/* Prepare cache that lacks SLAB_USERCOPY flag. */
342	whitelist_cache =
343		kmem_cache_create_usercopy("lkdtm-usercopy", cache_size,
344					   0, 0,
345					   cache_size / 4,
346					   cache_size / 16,
347					   NULL);
348}
349
350void __exit lkdtm_usercopy_exit(void)
351{
352	kmem_cache_destroy(whitelist_cache);
353}
354