1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
4 *
5 * Test code for seccomp bpf.
6 */
7
8#define _GNU_SOURCE
9#include <sys/types.h>
10
11/*
12 * glibc 2.26 and later have SIGSYS in siginfo_t. Before that,
13 * we need to use the kernel's siginfo.h file and trick glibc
14 * into accepting it.
15 */
16#if !__GLIBC_PREREQ(2, 26)
17# include <asm/siginfo.h>
18# define __have_siginfo_t 1
19# define __have_sigval_t 1
20# define __have_sigevent_t 1
21#endif
22
23#include <errno.h>
24#include <linux/filter.h>
25#include <sys/prctl.h>
26#include <sys/ptrace.h>
27#include <sys/user.h>
28#include <linux/prctl.h>
29#include <linux/ptrace.h>
30#include <linux/seccomp.h>
31#include <pthread.h>
32#include <semaphore.h>
33#include <signal.h>
34#include <stddef.h>
35#include <stdbool.h>
36#include <string.h>
37#include <time.h>
38#include <limits.h>
39#include <linux/elf.h>
40#include <sys/uio.h>
41#include <sys/utsname.h>
42#include <sys/fcntl.h>
43#include <sys/mman.h>
44#include <sys/times.h>
45#include <sys/socket.h>
46#include <sys/ioctl.h>
47#include <linux/kcmp.h>
48#include <sys/resource.h>
49
50#include <unistd.h>
51#include <sys/syscall.h>
52#include <poll.h>
53
54#include "../kselftest_harness.h"
55#include "../clone3/clone3_selftests.h"
56
57/* Attempt to de-conflict with the selftests tree. */
58#ifndef SKIP
59#define SKIP(s, ...)	XFAIL(s, ##__VA_ARGS__)
60#endif
61
62#ifndef PR_SET_PTRACER
63# define PR_SET_PTRACER 0x59616d61
64#endif
65
66#ifndef PR_SET_NO_NEW_PRIVS
67#define PR_SET_NO_NEW_PRIVS 38
68#define PR_GET_NO_NEW_PRIVS 39
69#endif
70
71#ifndef PR_SECCOMP_EXT
72#define PR_SECCOMP_EXT 43
73#endif
74
75#ifndef SECCOMP_EXT_ACT
76#define SECCOMP_EXT_ACT 1
77#endif
78
79#ifndef SECCOMP_EXT_ACT_TSYNC
80#define SECCOMP_EXT_ACT_TSYNC 1
81#endif
82
83#ifndef SECCOMP_MODE_STRICT
84#define SECCOMP_MODE_STRICT 1
85#endif
86
87#ifndef SECCOMP_MODE_FILTER
88#define SECCOMP_MODE_FILTER 2
89#endif
90
91#ifndef SECCOMP_RET_ALLOW
92struct seccomp_data {
93	int nr;
94	__u32 arch;
95	__u64 instruction_pointer;
96	__u64 args[6];
97};
98#endif
99
100#ifndef SECCOMP_RET_KILL_PROCESS
101#define SECCOMP_RET_KILL_PROCESS 0x80000000U /* kill the process */
102#define SECCOMP_RET_KILL_THREAD	 0x00000000U /* kill the thread */
103#endif
104#ifndef SECCOMP_RET_KILL
105#define SECCOMP_RET_KILL	 SECCOMP_RET_KILL_THREAD
106#define SECCOMP_RET_TRAP	 0x00030000U /* disallow and force a SIGSYS */
107#define SECCOMP_RET_ERRNO	 0x00050000U /* returns an errno */
108#define SECCOMP_RET_TRACE	 0x7ff00000U /* pass to a tracer or disallow */
109#define SECCOMP_RET_ALLOW	 0x7fff0000U /* allow */
110#endif
111#ifndef SECCOMP_RET_LOG
112#define SECCOMP_RET_LOG		 0x7ffc0000U /* allow after logging */
113#endif
114
115#ifndef __NR_seccomp
116# if defined(__i386__)
117#  define __NR_seccomp 354
118# elif defined(__x86_64__)
119#  define __NR_seccomp 317
120# elif defined(__arm__)
121#  define __NR_seccomp 383
122# elif defined(__aarch64__)
123#  define __NR_seccomp 277
124# elif defined(__riscv)
125#  define __NR_seccomp 277
126# elif defined(__csky__)
127#  define __NR_seccomp 277
128# elif defined(__loongarch__)
129#  define __NR_seccomp 277
130# elif defined(__hppa__)
131#  define __NR_seccomp 338
132# elif defined(__powerpc__)
133#  define __NR_seccomp 358
134# elif defined(__s390__)
135#  define __NR_seccomp 348
136# elif defined(__xtensa__)
137#  define __NR_seccomp 337
138# elif defined(__sh__)
139#  define __NR_seccomp 372
140# else
141#  warning "seccomp syscall number unknown for this architecture"
142#  define __NR_seccomp 0xffff
143# endif
144#endif
145
146#ifndef SECCOMP_SET_MODE_STRICT
147#define SECCOMP_SET_MODE_STRICT 0
148#endif
149
150#ifndef SECCOMP_SET_MODE_FILTER
151#define SECCOMP_SET_MODE_FILTER 1
152#endif
153
154#ifndef SECCOMP_GET_ACTION_AVAIL
155#define SECCOMP_GET_ACTION_AVAIL 2
156#endif
157
158#ifndef SECCOMP_GET_NOTIF_SIZES
159#define SECCOMP_GET_NOTIF_SIZES 3
160#endif
161
162#ifndef SECCOMP_FILTER_FLAG_TSYNC
163#define SECCOMP_FILTER_FLAG_TSYNC (1UL << 0)
164#endif
165
166#ifndef SECCOMP_FILTER_FLAG_LOG
167#define SECCOMP_FILTER_FLAG_LOG (1UL << 1)
168#endif
169
170#ifndef SECCOMP_FILTER_FLAG_SPEC_ALLOW
171#define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2)
172#endif
173
174#ifndef PTRACE_SECCOMP_GET_METADATA
175#define PTRACE_SECCOMP_GET_METADATA	0x420d
176
177struct seccomp_metadata {
178	__u64 filter_off;       /* Input: which filter */
179	__u64 flags;             /* Output: filter's flags */
180};
181#endif
182
183#ifndef SECCOMP_FILTER_FLAG_NEW_LISTENER
184#define SECCOMP_FILTER_FLAG_NEW_LISTENER	(1UL << 3)
185#endif
186
187#ifndef SECCOMP_RET_USER_NOTIF
188#define SECCOMP_RET_USER_NOTIF 0x7fc00000U
189
190#define SECCOMP_IOC_MAGIC		'!'
191#define SECCOMP_IO(nr)			_IO(SECCOMP_IOC_MAGIC, nr)
192#define SECCOMP_IOR(nr, type)		_IOR(SECCOMP_IOC_MAGIC, nr, type)
193#define SECCOMP_IOW(nr, type)		_IOW(SECCOMP_IOC_MAGIC, nr, type)
194#define SECCOMP_IOWR(nr, type)		_IOWR(SECCOMP_IOC_MAGIC, nr, type)
195
196/* Flags for seccomp notification fd ioctl. */
197#define SECCOMP_IOCTL_NOTIF_RECV	SECCOMP_IOWR(0, struct seccomp_notif)
198#define SECCOMP_IOCTL_NOTIF_SEND	SECCOMP_IOWR(1,	\
199						struct seccomp_notif_resp)
200#define SECCOMP_IOCTL_NOTIF_ID_VALID	SECCOMP_IOW(2, __u64)
201
202struct seccomp_notif {
203	__u64 id;
204	__u32 pid;
205	__u32 flags;
206	struct seccomp_data data;
207};
208
209struct seccomp_notif_resp {
210	__u64 id;
211	__s64 val;
212	__s32 error;
213	__u32 flags;
214};
215
216struct seccomp_notif_sizes {
217	__u16 seccomp_notif;
218	__u16 seccomp_notif_resp;
219	__u16 seccomp_data;
220};
221#endif
222
223#ifndef SECCOMP_IOCTL_NOTIF_ADDFD
224/* On success, the return value is the remote process's added fd number */
225#define SECCOMP_IOCTL_NOTIF_ADDFD	SECCOMP_IOW(3,	\
226						struct seccomp_notif_addfd)
227
228/* valid flags for seccomp_notif_addfd */
229#define SECCOMP_ADDFD_FLAG_SETFD	(1UL << 0) /* Specify remote fd */
230
231struct seccomp_notif_addfd {
232	__u64 id;
233	__u32 flags;
234	__u32 srcfd;
235	__u32 newfd;
236	__u32 newfd_flags;
237};
238#endif
239
240struct seccomp_notif_addfd_small {
241	__u64 id;
242	char weird[4];
243};
244#define SECCOMP_IOCTL_NOTIF_ADDFD_SMALL	\
245	SECCOMP_IOW(3, struct seccomp_notif_addfd_small)
246
247struct seccomp_notif_addfd_big {
248	union {
249		struct seccomp_notif_addfd addfd;
250		char buf[sizeof(struct seccomp_notif_addfd) + 8];
251	};
252};
253#define SECCOMP_IOCTL_NOTIF_ADDFD_BIG	\
254	SECCOMP_IOWR(3, struct seccomp_notif_addfd_big)
255
256#ifndef PTRACE_EVENTMSG_SYSCALL_ENTRY
257#define PTRACE_EVENTMSG_SYSCALL_ENTRY	1
258#define PTRACE_EVENTMSG_SYSCALL_EXIT	2
259#endif
260
261#ifndef SECCOMP_USER_NOTIF_FLAG_CONTINUE
262#define SECCOMP_USER_NOTIF_FLAG_CONTINUE 0x00000001
263#endif
264
265#ifndef SECCOMP_FILTER_FLAG_TSYNC_ESRCH
266#define SECCOMP_FILTER_FLAG_TSYNC_ESRCH (1UL << 4)
267#endif
268
269#ifndef seccomp
270int seccomp(unsigned int op, unsigned int flags, void *args)
271{
272	errno = 0;
273	return syscall(__NR_seccomp, op, flags, args);
274}
275#endif
276
277#if __BYTE_ORDER == __LITTLE_ENDIAN
278#define syscall_arg(_n) (offsetof(struct seccomp_data, args[_n]))
279#elif __BYTE_ORDER == __BIG_ENDIAN
280#define syscall_arg(_n) (offsetof(struct seccomp_data, args[_n]) + sizeof(__u32))
281#else
282#error "wut? Unknown __BYTE_ORDER?!"
283#endif
284
285#define SIBLING_EXIT_UNKILLED	0xbadbeef
286#define SIBLING_EXIT_FAILURE	0xbadface
287#define SIBLING_EXIT_NEWPRIVS	0xbadfeed
288
289static int __filecmp(pid_t pid1, pid_t pid2, int fd1, int fd2)
290{
291#ifdef __NR_kcmp
292	errno = 0;
293	return syscall(__NR_kcmp, pid1, pid2, KCMP_FILE, fd1, fd2);
294#else
295	errno = ENOSYS;
296	return -1;
297#endif
298}
299
300/* Have TH_LOG report actual location filecmp() is used. */
301#define filecmp(pid1, pid2, fd1, fd2)	({		\
302	int _ret;					\
303							\
304	_ret = __filecmp(pid1, pid2, fd1, fd2);		\
305	if (_ret != 0) {				\
306		if (_ret < 0 && errno == ENOSYS) {	\
307			TH_LOG("kcmp() syscall missing (test is less accurate)");\
308			_ret = 0;			\
309		}					\
310	}						\
311	_ret; })
312
313TEST(kcmp)
314{
315	int ret;
316
317	ret = __filecmp(getpid(), getpid(), 1, 1);
318	EXPECT_EQ(ret, 0);
319	if (ret != 0 && errno == ENOSYS)
320		SKIP(return, "Kernel does not support kcmp() (missing CONFIG_KCMP?)");
321}
322
323TEST(mode_strict_support)
324{
325	long ret;
326
327	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, NULL, NULL, NULL);
328	ASSERT_EQ(0, ret) {
329		TH_LOG("Kernel does not support CONFIG_SECCOMP");
330	}
331	syscall(__NR_exit, 0);
332}
333
334TEST_SIGNAL(mode_strict_cannot_call_prctl, SIGKILL)
335{
336	long ret;
337
338	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, NULL, NULL, NULL);
339	ASSERT_EQ(0, ret) {
340		TH_LOG("Kernel does not support CONFIG_SECCOMP");
341	}
342	syscall(__NR_prctl, PR_SET_SECCOMP, SECCOMP_MODE_FILTER,
343		NULL, NULL, NULL);
344	EXPECT_FALSE(true) {
345		TH_LOG("Unreachable!");
346	}
347}
348
349/* Note! This doesn't test no new privs behavior */
350TEST(no_new_privs_support)
351{
352	long ret;
353
354	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
355	EXPECT_EQ(0, ret) {
356		TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
357	}
358}
359
360/* Tests kernel support by checking for a copy_from_user() fault on NULL. */
361TEST(mode_filter_support)
362{
363	long ret;
364
365	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, NULL, 0, 0);
366	ASSERT_EQ(0, ret) {
367		TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
368	}
369	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, NULL, NULL, NULL);
370	EXPECT_EQ(-1, ret);
371	EXPECT_EQ(EFAULT, errno) {
372		TH_LOG("Kernel does not support CONFIG_SECCOMP_FILTER!");
373	}
374}
375
376TEST(mode_filter_without_nnp)
377{
378	struct sock_filter filter[] = {
379		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
380	};
381	struct sock_fprog prog = {
382		.len = (unsigned short)ARRAY_SIZE(filter),
383		.filter = filter,
384	};
385	long ret;
386
387	ret = prctl(PR_GET_NO_NEW_PRIVS, 0, NULL, 0, 0);
388	ASSERT_LE(0, ret) {
389		TH_LOG("Expected 0 or unsupported for NO_NEW_PRIVS");
390	}
391	errno = 0;
392	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
393	/* Succeeds with CAP_SYS_ADMIN, fails without */
394	/* TODO(wad) check caps not euid */
395	if (geteuid()) {
396		EXPECT_EQ(-1, ret);
397		EXPECT_EQ(EACCES, errno);
398	} else {
399		EXPECT_EQ(0, ret);
400	}
401}
402
403#define MAX_INSNS_PER_PATH 32768
404
405TEST(filter_size_limits)
406{
407	int i;
408	int count = BPF_MAXINSNS + 1;
409	struct sock_filter allow[] = {
410		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
411	};
412	struct sock_filter *filter;
413	struct sock_fprog prog = { };
414	long ret;
415
416	filter = calloc(count, sizeof(*filter));
417	ASSERT_NE(NULL, filter);
418
419	for (i = 0; i < count; i++)
420		filter[i] = allow[0];
421
422	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
423	ASSERT_EQ(0, ret);
424
425	prog.filter = filter;
426	prog.len = count;
427
428	/* Too many filter instructions in a single filter. */
429	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
430	ASSERT_NE(0, ret) {
431		TH_LOG("Installing %d insn filter was allowed", prog.len);
432	}
433
434	/* One less is okay, though. */
435	prog.len -= 1;
436	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
437	ASSERT_EQ(0, ret) {
438		TH_LOG("Installing %d insn filter wasn't allowed", prog.len);
439	}
440}
441
442TEST(filter_chain_limits)
443{
444	int i;
445	int count = BPF_MAXINSNS;
446	struct sock_filter allow[] = {
447		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
448	};
449	struct sock_filter *filter;
450	struct sock_fprog prog = { };
451	long ret;
452
453	filter = calloc(count, sizeof(*filter));
454	ASSERT_NE(NULL, filter);
455
456	for (i = 0; i < count; i++)
457		filter[i] = allow[0];
458
459	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
460	ASSERT_EQ(0, ret);
461
462	prog.filter = filter;
463	prog.len = 1;
464
465	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
466	ASSERT_EQ(0, ret);
467
468	prog.len = count;
469
470	/* Too many total filter instructions. */
471	for (i = 0; i < MAX_INSNS_PER_PATH; i++) {
472		ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
473		if (ret != 0)
474			break;
475	}
476	ASSERT_NE(0, ret) {
477		TH_LOG("Allowed %d %d-insn filters (total with penalties:%d)",
478		       i, count, i * (count + 4));
479	}
480}
481
482TEST(mode_filter_cannot_move_to_strict)
483{
484	struct sock_filter filter[] = {
485		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
486	};
487	struct sock_fprog prog = {
488		.len = (unsigned short)ARRAY_SIZE(filter),
489		.filter = filter,
490	};
491	long ret;
492
493	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
494	ASSERT_EQ(0, ret);
495
496	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
497	ASSERT_EQ(0, ret);
498
499	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, NULL, 0, 0);
500	EXPECT_EQ(-1, ret);
501	EXPECT_EQ(EINVAL, errno);
502}
503
504
505TEST(mode_filter_get_seccomp)
506{
507	struct sock_filter filter[] = {
508		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
509	};
510	struct sock_fprog prog = {
511		.len = (unsigned short)ARRAY_SIZE(filter),
512		.filter = filter,
513	};
514	long ret;
515
516	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
517	ASSERT_EQ(0, ret);
518
519	ret = prctl(PR_GET_SECCOMP, 0, 0, 0, 0);
520	EXPECT_EQ(0, ret);
521
522	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
523	ASSERT_EQ(0, ret);
524
525	ret = prctl(PR_GET_SECCOMP, 0, 0, 0, 0);
526	EXPECT_EQ(2, ret);
527}
528
529
530TEST(ALLOW_all)
531{
532	struct sock_filter filter[] = {
533		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
534	};
535	struct sock_fprog prog = {
536		.len = (unsigned short)ARRAY_SIZE(filter),
537		.filter = filter,
538	};
539	long ret;
540
541	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
542	ASSERT_EQ(0, ret);
543
544	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
545	ASSERT_EQ(0, ret);
546}
547
548TEST(empty_prog)
549{
550	struct sock_filter filter[] = {
551	};
552	struct sock_fprog prog = {
553		.len = (unsigned short)ARRAY_SIZE(filter),
554		.filter = filter,
555	};
556	long ret;
557
558	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
559	ASSERT_EQ(0, ret);
560
561	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
562	EXPECT_EQ(-1, ret);
563	EXPECT_EQ(EINVAL, errno);
564}
565
566TEST(log_all)
567{
568	struct sock_filter filter[] = {
569		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_LOG),
570	};
571	struct sock_fprog prog = {
572		.len = (unsigned short)ARRAY_SIZE(filter),
573		.filter = filter,
574	};
575	long ret;
576	pid_t parent = getppid();
577
578	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
579	ASSERT_EQ(0, ret);
580
581	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
582	ASSERT_EQ(0, ret);
583
584	/* getppid() should succeed and be logged (no check for logging) */
585	EXPECT_EQ(parent, syscall(__NR_getppid));
586}
587
588TEST_SIGNAL(unknown_ret_is_kill_inside, SIGSYS)
589{
590	struct sock_filter filter[] = {
591		BPF_STMT(BPF_RET|BPF_K, 0x10000000U),
592	};
593	struct sock_fprog prog = {
594		.len = (unsigned short)ARRAY_SIZE(filter),
595		.filter = filter,
596	};
597	long ret;
598
599	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
600	ASSERT_EQ(0, ret);
601
602	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
603	ASSERT_EQ(0, ret);
604	EXPECT_EQ(0, syscall(__NR_getpid)) {
605		TH_LOG("getpid() shouldn't ever return");
606	}
607}
608
609/* return code >= 0x80000000 is unused. */
610TEST_SIGNAL(unknown_ret_is_kill_above_allow, SIGSYS)
611{
612	struct sock_filter filter[] = {
613		BPF_STMT(BPF_RET|BPF_K, 0x90000000U),
614	};
615	struct sock_fprog prog = {
616		.len = (unsigned short)ARRAY_SIZE(filter),
617		.filter = filter,
618	};
619	long ret;
620
621	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
622	ASSERT_EQ(0, ret);
623
624	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
625	ASSERT_EQ(0, ret);
626	EXPECT_EQ(0, syscall(__NR_getpid)) {
627		TH_LOG("getpid() shouldn't ever return");
628	}
629}
630
631TEST_SIGNAL(KILL_all, SIGSYS)
632{
633	struct sock_filter filter[] = {
634		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
635	};
636	struct sock_fprog prog = {
637		.len = (unsigned short)ARRAY_SIZE(filter),
638		.filter = filter,
639	};
640	long ret;
641
642	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
643	ASSERT_EQ(0, ret);
644
645	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
646	ASSERT_EQ(0, ret);
647}
648
649TEST_SIGNAL(KILL_one, SIGSYS)
650{
651	struct sock_filter filter[] = {
652		BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
653			offsetof(struct seccomp_data, nr)),
654		BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1),
655		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
656		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
657	};
658	struct sock_fprog prog = {
659		.len = (unsigned short)ARRAY_SIZE(filter),
660		.filter = filter,
661	};
662	long ret;
663	pid_t parent = getppid();
664
665	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
666	ASSERT_EQ(0, ret);
667
668	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
669	ASSERT_EQ(0, ret);
670
671	EXPECT_EQ(parent, syscall(__NR_getppid));
672	/* getpid() should never return. */
673	EXPECT_EQ(0, syscall(__NR_getpid));
674}
675
676TEST_SIGNAL(KILL_one_arg_one, SIGSYS)
677{
678	void *fatal_address;
679	struct sock_filter filter[] = {
680		BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
681			offsetof(struct seccomp_data, nr)),
682		BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_times, 1, 0),
683		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
684		/* Only both with lower 32-bit for now. */
685		BPF_STMT(BPF_LD|BPF_W|BPF_ABS, syscall_arg(0)),
686		BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K,
687			(unsigned long)&fatal_address, 0, 1),
688		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
689		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
690	};
691	struct sock_fprog prog = {
692		.len = (unsigned short)ARRAY_SIZE(filter),
693		.filter = filter,
694	};
695	long ret;
696	pid_t parent = getppid();
697	struct tms timebuf;
698	clock_t clock = times(&timebuf);
699
700	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
701	ASSERT_EQ(0, ret);
702
703	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
704	ASSERT_EQ(0, ret);
705
706	EXPECT_EQ(parent, syscall(__NR_getppid));
707	EXPECT_LE(clock, syscall(__NR_times, &timebuf));
708	/* times() should never return. */
709	EXPECT_EQ(0, syscall(__NR_times, &fatal_address));
710}
711
712TEST_SIGNAL(KILL_one_arg_six, SIGSYS)
713{
714#ifndef __NR_mmap2
715	int sysno = __NR_mmap;
716#else
717	int sysno = __NR_mmap2;
718#endif
719	struct sock_filter filter[] = {
720		BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
721			offsetof(struct seccomp_data, nr)),
722		BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, sysno, 1, 0),
723		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
724		/* Only both with lower 32-bit for now. */
725		BPF_STMT(BPF_LD|BPF_W|BPF_ABS, syscall_arg(5)),
726		BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, 0x0C0FFEE, 0, 1),
727		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
728		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
729	};
730	struct sock_fprog prog = {
731		.len = (unsigned short)ARRAY_SIZE(filter),
732		.filter = filter,
733	};
734	long ret;
735	pid_t parent = getppid();
736	int fd;
737	void *map1, *map2;
738	int page_size = sysconf(_SC_PAGESIZE);
739
740	ASSERT_LT(0, page_size);
741
742	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
743	ASSERT_EQ(0, ret);
744
745	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
746	ASSERT_EQ(0, ret);
747
748	fd = open("/dev/zero", O_RDONLY);
749	ASSERT_NE(-1, fd);
750
751	EXPECT_EQ(parent, syscall(__NR_getppid));
752	map1 = (void *)syscall(sysno,
753		NULL, page_size, PROT_READ, MAP_PRIVATE, fd, page_size);
754	EXPECT_NE(MAP_FAILED, map1);
755	/* mmap2() should never return. */
756	map2 = (void *)syscall(sysno,
757		 NULL, page_size, PROT_READ, MAP_PRIVATE, fd, 0x0C0FFEE);
758	EXPECT_EQ(MAP_FAILED, map2);
759
760	/* The test failed, so clean up the resources. */
761	munmap(map1, page_size);
762	munmap(map2, page_size);
763	close(fd);
764}
765
766/* This is a thread task to die via seccomp filter violation. */
767void *kill_thread(void *data)
768{
769	bool die = (bool)data;
770
771	if (die) {
772		prctl(PR_GET_SECCOMP, 0, 0, 0, 0);
773		return (void *)SIBLING_EXIT_FAILURE;
774	}
775
776	return (void *)SIBLING_EXIT_UNKILLED;
777}
778
779enum kill_t {
780	KILL_THREAD,
781	KILL_PROCESS,
782	RET_UNKNOWN
783};
784
785/* Prepare a thread that will kill itself or both of us. */
786void kill_thread_or_group(struct __test_metadata *_metadata,
787			  enum kill_t kill_how)
788{
789	pthread_t thread;
790	void *status;
791	/* Kill only when calling __NR_prctl. */
792	struct sock_filter filter_thread[] = {
793		BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
794			offsetof(struct seccomp_data, nr)),
795		BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_prctl, 0, 1),
796		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL_THREAD),
797		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
798	};
799	struct sock_fprog prog_thread = {
800		.len = (unsigned short)ARRAY_SIZE(filter_thread),
801		.filter = filter_thread,
802	};
803	int kill = kill_how == KILL_PROCESS ? SECCOMP_RET_KILL_PROCESS : 0xAAAAAAAA;
804	struct sock_filter filter_process[] = {
805		BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
806			offsetof(struct seccomp_data, nr)),
807		BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_prctl, 0, 1),
808		BPF_STMT(BPF_RET|BPF_K, kill),
809		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
810	};
811	struct sock_fprog prog_process = {
812		.len = (unsigned short)ARRAY_SIZE(filter_process),
813		.filter = filter_process,
814	};
815
816	ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
817		TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
818	}
819
820	ASSERT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER, 0,
821			     kill_how == KILL_THREAD ? &prog_thread
822						     : &prog_process));
823
824	/*
825	 * Add the KILL_THREAD rule again to make sure that the KILL_PROCESS
826	 * flag cannot be downgraded by a new filter.
827	 */
828	if (kill_how == KILL_PROCESS)
829		ASSERT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog_thread));
830
831	/* Start a thread that will exit immediately. */
832	ASSERT_EQ(0, pthread_create(&thread, NULL, kill_thread, (void *)false));
833	ASSERT_EQ(0, pthread_join(thread, &status));
834	ASSERT_EQ(SIBLING_EXIT_UNKILLED, (unsigned long)status);
835
836	/* Start a thread that will die immediately. */
837	ASSERT_EQ(0, pthread_create(&thread, NULL, kill_thread, (void *)true));
838	ASSERT_EQ(0, pthread_join(thread, &status));
839	ASSERT_NE(SIBLING_EXIT_FAILURE, (unsigned long)status);
840
841	/*
842	 * If we get here, only the spawned thread died. Let the parent know
843	 * the whole process didn't die (i.e. this thread, the spawner,
844	 * stayed running).
845	 */
846	exit(42);
847}
848
849TEST(KILL_thread)
850{
851	int status;
852	pid_t child_pid;
853
854	child_pid = fork();
855	ASSERT_LE(0, child_pid);
856	if (child_pid == 0) {
857		kill_thread_or_group(_metadata, KILL_THREAD);
858		_exit(38);
859	}
860
861	ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
862
863	/* If only the thread was killed, we'll see exit 42. */
864	ASSERT_TRUE(WIFEXITED(status));
865	ASSERT_EQ(42, WEXITSTATUS(status));
866}
867
868TEST(KILL_process)
869{
870	int status;
871	pid_t child_pid;
872
873	child_pid = fork();
874	ASSERT_LE(0, child_pid);
875	if (child_pid == 0) {
876		kill_thread_or_group(_metadata, KILL_PROCESS);
877		_exit(38);
878	}
879
880	ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
881
882	/* If the entire process was killed, we'll see SIGSYS. */
883	ASSERT_TRUE(WIFSIGNALED(status));
884	ASSERT_EQ(SIGSYS, WTERMSIG(status));
885}
886
887TEST(KILL_unknown)
888{
889	int status;
890	pid_t child_pid;
891
892	child_pid = fork();
893	ASSERT_LE(0, child_pid);
894	if (child_pid == 0) {
895		kill_thread_or_group(_metadata, RET_UNKNOWN);
896		_exit(38);
897	}
898
899	ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
900
901	/* If the entire process was killed, we'll see SIGSYS. */
902	EXPECT_TRUE(WIFSIGNALED(status)) {
903		TH_LOG("Unknown SECCOMP_RET is only killing the thread?");
904	}
905	ASSERT_EQ(SIGSYS, WTERMSIG(status));
906}
907
908/* TODO(wad) add 64-bit versus 32-bit arg tests. */
909TEST(arg_out_of_range)
910{
911	struct sock_filter filter[] = {
912		BPF_STMT(BPF_LD|BPF_W|BPF_ABS, syscall_arg(6)),
913		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
914	};
915	struct sock_fprog prog = {
916		.len = (unsigned short)ARRAY_SIZE(filter),
917		.filter = filter,
918	};
919	long ret;
920
921	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
922	ASSERT_EQ(0, ret);
923
924	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
925	EXPECT_EQ(-1, ret);
926	EXPECT_EQ(EINVAL, errno);
927}
928
929#define ERRNO_FILTER(name, errno)					\
930	struct sock_filter _read_filter_##name[] = {			\
931		BPF_STMT(BPF_LD|BPF_W|BPF_ABS,				\
932			offsetof(struct seccomp_data, nr)),		\
933		BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1),	\
934		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | errno),	\
935		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),		\
936	};								\
937	struct sock_fprog prog_##name = {				\
938		.len = (unsigned short)ARRAY_SIZE(_read_filter_##name),	\
939		.filter = _read_filter_##name,				\
940	}
941
942/* Make sure basic errno values are correctly passed through a filter. */
943TEST(ERRNO_valid)
944{
945	ERRNO_FILTER(valid, E2BIG);
946	long ret;
947	pid_t parent = getppid();
948
949	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
950	ASSERT_EQ(0, ret);
951
952	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_valid);
953	ASSERT_EQ(0, ret);
954
955	EXPECT_EQ(parent, syscall(__NR_getppid));
956	EXPECT_EQ(-1, read(-1, NULL, 0));
957	EXPECT_EQ(E2BIG, errno);
958}
959
960/* Make sure an errno of zero is correctly handled by the arch code. */
961TEST(ERRNO_zero)
962{
963	ERRNO_FILTER(zero, 0);
964	long ret;
965	pid_t parent = getppid();
966
967	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
968	ASSERT_EQ(0, ret);
969
970	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_zero);
971	ASSERT_EQ(0, ret);
972
973	EXPECT_EQ(parent, syscall(__NR_getppid));
974	/* "errno" of 0 is ok. */
975	EXPECT_EQ(0, read(-1, NULL, 0));
976}
977
978/*
979 * The SECCOMP_RET_DATA mask is 16 bits wide, but errno is smaller.
980 * This tests that the errno value gets capped correctly, fixed by
981 * 580c57f10768 ("seccomp: cap SECCOMP_RET_ERRNO data to MAX_ERRNO").
982 */
983TEST(ERRNO_capped)
984{
985	ERRNO_FILTER(capped, 4096);
986	long ret;
987	pid_t parent = getppid();
988
989	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
990	ASSERT_EQ(0, ret);
991
992	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_capped);
993	ASSERT_EQ(0, ret);
994
995	EXPECT_EQ(parent, syscall(__NR_getppid));
996	EXPECT_EQ(-1, read(-1, NULL, 0));
997	EXPECT_EQ(4095, errno);
998}
999
1000/*
1001 * Filters are processed in reverse order: last applied is executed first.
1002 * Since only the SECCOMP_RET_ACTION mask is tested for return values, the
1003 * SECCOMP_RET_DATA mask results will follow the most recently applied
1004 * matching filter return (and not the lowest or highest value).
1005 */
1006TEST(ERRNO_order)
1007{
1008	ERRNO_FILTER(first,  11);
1009	ERRNO_FILTER(second, 13);
1010	ERRNO_FILTER(third,  12);
1011	long ret;
1012	pid_t parent = getppid();
1013
1014	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
1015	ASSERT_EQ(0, ret);
1016
1017	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_first);
1018	ASSERT_EQ(0, ret);
1019
1020	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_second);
1021	ASSERT_EQ(0, ret);
1022
1023	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_third);
1024	ASSERT_EQ(0, ret);
1025
1026	EXPECT_EQ(parent, syscall(__NR_getppid));
1027	EXPECT_EQ(-1, read(-1, NULL, 0));
1028	EXPECT_EQ(12, errno);
1029}
1030
1031FIXTURE(TRAP) {
1032	struct sock_fprog prog;
1033};
1034
1035FIXTURE_SETUP(TRAP)
1036{
1037	struct sock_filter filter[] = {
1038		BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
1039			offsetof(struct seccomp_data, nr)),
1040		BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1),
1041		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRAP),
1042		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
1043	};
1044
1045	memset(&self->prog, 0, sizeof(self->prog));
1046	self->prog.filter = malloc(sizeof(filter));
1047	ASSERT_NE(NULL, self->prog.filter);
1048	memcpy(self->prog.filter, filter, sizeof(filter));
1049	self->prog.len = (unsigned short)ARRAY_SIZE(filter);
1050}
1051
1052FIXTURE_TEARDOWN(TRAP)
1053{
1054	if (self->prog.filter)
1055		free(self->prog.filter);
1056}
1057
1058TEST_F_SIGNAL(TRAP, dfl, SIGSYS)
1059{
1060	long ret;
1061
1062	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
1063	ASSERT_EQ(0, ret);
1064
1065	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog);
1066	ASSERT_EQ(0, ret);
1067	syscall(__NR_getpid);
1068}
1069
1070/* Ensure that SIGSYS overrides SIG_IGN */
1071TEST_F_SIGNAL(TRAP, ign, SIGSYS)
1072{
1073	long ret;
1074
1075	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
1076	ASSERT_EQ(0, ret);
1077
1078	signal(SIGSYS, SIG_IGN);
1079
1080	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog);
1081	ASSERT_EQ(0, ret);
1082	syscall(__NR_getpid);
1083}
1084
1085static siginfo_t TRAP_info;
1086static volatile int TRAP_nr;
1087static void TRAP_action(int nr, siginfo_t *info, void *void_context)
1088{
1089	memcpy(&TRAP_info, info, sizeof(TRAP_info));
1090	TRAP_nr = nr;
1091}
1092
1093TEST_F(TRAP, handler)
1094{
1095	int ret, test;
1096	struct sigaction act;
1097	sigset_t mask;
1098
1099	memset(&act, 0, sizeof(act));
1100	sigemptyset(&mask);
1101	sigaddset(&mask, SIGSYS);
1102
1103	act.sa_sigaction = &TRAP_action;
1104	act.sa_flags = SA_SIGINFO;
1105	ret = sigaction(SIGSYS, &act, NULL);
1106	ASSERT_EQ(0, ret) {
1107		TH_LOG("sigaction failed");
1108	}
1109	ret = sigprocmask(SIG_UNBLOCK, &mask, NULL);
1110	ASSERT_EQ(0, ret) {
1111		TH_LOG("sigprocmask failed");
1112	}
1113
1114	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
1115	ASSERT_EQ(0, ret);
1116	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog);
1117	ASSERT_EQ(0, ret);
1118	TRAP_nr = 0;
1119	memset(&TRAP_info, 0, sizeof(TRAP_info));
1120	/* Expect the registers to be rolled back. (nr = error) may vary
1121	 * based on arch. */
1122	ret = syscall(__NR_getpid);
1123	/* Silence gcc warning about volatile. */
1124	test = TRAP_nr;
1125	EXPECT_EQ(SIGSYS, test);
1126	struct local_sigsys {
1127		void *_call_addr;	/* calling user insn */
1128		int _syscall;		/* triggering system call number */
1129		unsigned int _arch;	/* AUDIT_ARCH_* of syscall */
1130	} *sigsys = (struct local_sigsys *)
1131#ifdef si_syscall
1132		&(TRAP_info.si_call_addr);
1133#else
1134		&TRAP_info.si_pid;
1135#endif
1136	EXPECT_EQ(__NR_getpid, sigsys->_syscall);
1137	/* Make sure arch is non-zero. */
1138	EXPECT_NE(0, sigsys->_arch);
1139	EXPECT_NE(0, (unsigned long)sigsys->_call_addr);
1140}
1141
1142FIXTURE(precedence) {
1143	struct sock_fprog allow;
1144	struct sock_fprog log;
1145	struct sock_fprog trace;
1146	struct sock_fprog error;
1147	struct sock_fprog trap;
1148	struct sock_fprog kill;
1149};
1150
1151FIXTURE_SETUP(precedence)
1152{
1153	struct sock_filter allow_insns[] = {
1154		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
1155	};
1156	struct sock_filter log_insns[] = {
1157		BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
1158			offsetof(struct seccomp_data, nr)),
1159		BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0),
1160		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
1161		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_LOG),
1162	};
1163	struct sock_filter trace_insns[] = {
1164		BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
1165			offsetof(struct seccomp_data, nr)),
1166		BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0),
1167		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
1168		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE),
1169	};
1170	struct sock_filter error_insns[] = {
1171		BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
1172			offsetof(struct seccomp_data, nr)),
1173		BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0),
1174		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
1175		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO),
1176	};
1177	struct sock_filter trap_insns[] = {
1178		BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
1179			offsetof(struct seccomp_data, nr)),
1180		BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0),
1181		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
1182		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRAP),
1183	};
1184	struct sock_filter kill_insns[] = {
1185		BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
1186			offsetof(struct seccomp_data, nr)),
1187		BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0),
1188		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
1189		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
1190	};
1191
1192	memset(self, 0, sizeof(*self));
1193#define FILTER_ALLOC(_x) \
1194	self->_x.filter = malloc(sizeof(_x##_insns)); \
1195	ASSERT_NE(NULL, self->_x.filter); \
1196	memcpy(self->_x.filter, &_x##_insns, sizeof(_x##_insns)); \
1197	self->_x.len = (unsigned short)ARRAY_SIZE(_x##_insns)
1198	FILTER_ALLOC(allow);
1199	FILTER_ALLOC(log);
1200	FILTER_ALLOC(trace);
1201	FILTER_ALLOC(error);
1202	FILTER_ALLOC(trap);
1203	FILTER_ALLOC(kill);
1204}
1205
1206FIXTURE_TEARDOWN(precedence)
1207{
1208#define FILTER_FREE(_x) if (self->_x.filter) free(self->_x.filter)
1209	FILTER_FREE(allow);
1210	FILTER_FREE(log);
1211	FILTER_FREE(trace);
1212	FILTER_FREE(error);
1213	FILTER_FREE(trap);
1214	FILTER_FREE(kill);
1215}
1216
1217TEST_F(precedence, allow_ok)
1218{
1219	pid_t parent, res = 0;
1220	long ret;
1221
1222	parent = getppid();
1223	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
1224	ASSERT_EQ(0, ret);
1225
1226	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
1227	ASSERT_EQ(0, ret);
1228	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
1229	ASSERT_EQ(0, ret);
1230	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
1231	ASSERT_EQ(0, ret);
1232	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
1233	ASSERT_EQ(0, ret);
1234	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap);
1235	ASSERT_EQ(0, ret);
1236	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->kill);
1237	ASSERT_EQ(0, ret);
1238	/* Should work just fine. */
1239	res = syscall(__NR_getppid);
1240	EXPECT_EQ(parent, res);
1241}
1242
1243TEST_F_SIGNAL(precedence, kill_is_highest, SIGSYS)
1244{
1245	pid_t parent, res = 0;
1246	long ret;
1247
1248	parent = getppid();
1249	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
1250	ASSERT_EQ(0, ret);
1251
1252	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
1253	ASSERT_EQ(0, ret);
1254	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
1255	ASSERT_EQ(0, ret);
1256	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
1257	ASSERT_EQ(0, ret);
1258	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
1259	ASSERT_EQ(0, ret);
1260	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap);
1261	ASSERT_EQ(0, ret);
1262	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->kill);
1263	ASSERT_EQ(0, ret);
1264	/* Should work just fine. */
1265	res = syscall(__NR_getppid);
1266	EXPECT_EQ(parent, res);
1267	/* getpid() should never return. */
1268	res = syscall(__NR_getpid);
1269	EXPECT_EQ(0, res);
1270}
1271
1272TEST_F_SIGNAL(precedence, kill_is_highest_in_any_order, SIGSYS)
1273{
1274	pid_t parent;
1275	long ret;
1276
1277	parent = getppid();
1278	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
1279	ASSERT_EQ(0, ret);
1280
1281	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
1282	ASSERT_EQ(0, ret);
1283	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->kill);
1284	ASSERT_EQ(0, ret);
1285	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
1286	ASSERT_EQ(0, ret);
1287	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
1288	ASSERT_EQ(0, ret);
1289	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
1290	ASSERT_EQ(0, ret);
1291	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap);
1292	ASSERT_EQ(0, ret);
1293	/* Should work just fine. */
1294	EXPECT_EQ(parent, syscall(__NR_getppid));
1295	/* getpid() should never return. */
1296	EXPECT_EQ(0, syscall(__NR_getpid));
1297}
1298
1299TEST_F_SIGNAL(precedence, trap_is_second, SIGSYS)
1300{
1301	pid_t parent;
1302	long ret;
1303
1304	parent = getppid();
1305	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
1306	ASSERT_EQ(0, ret);
1307
1308	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
1309	ASSERT_EQ(0, ret);
1310	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
1311	ASSERT_EQ(0, ret);
1312	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
1313	ASSERT_EQ(0, ret);
1314	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
1315	ASSERT_EQ(0, ret);
1316	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap);
1317	ASSERT_EQ(0, ret);
1318	/* Should work just fine. */
1319	EXPECT_EQ(parent, syscall(__NR_getppid));
1320	/* getpid() should never return. */
1321	EXPECT_EQ(0, syscall(__NR_getpid));
1322}
1323
1324TEST_F_SIGNAL(precedence, trap_is_second_in_any_order, SIGSYS)
1325{
1326	pid_t parent;
1327	long ret;
1328
1329	parent = getppid();
1330	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
1331	ASSERT_EQ(0, ret);
1332
1333	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
1334	ASSERT_EQ(0, ret);
1335	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap);
1336	ASSERT_EQ(0, ret);
1337	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
1338	ASSERT_EQ(0, ret);
1339	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
1340	ASSERT_EQ(0, ret);
1341	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
1342	ASSERT_EQ(0, ret);
1343	/* Should work just fine. */
1344	EXPECT_EQ(parent, syscall(__NR_getppid));
1345	/* getpid() should never return. */
1346	EXPECT_EQ(0, syscall(__NR_getpid));
1347}
1348
1349TEST_F(precedence, errno_is_third)
1350{
1351	pid_t parent;
1352	long ret;
1353
1354	parent = getppid();
1355	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
1356	ASSERT_EQ(0, ret);
1357
1358	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
1359	ASSERT_EQ(0, ret);
1360	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
1361	ASSERT_EQ(0, ret);
1362	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
1363	ASSERT_EQ(0, ret);
1364	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
1365	ASSERT_EQ(0, ret);
1366	/* Should work just fine. */
1367	EXPECT_EQ(parent, syscall(__NR_getppid));
1368	EXPECT_EQ(0, syscall(__NR_getpid));
1369}
1370
1371TEST_F(precedence, errno_is_third_in_any_order)
1372{
1373	pid_t parent;
1374	long ret;
1375
1376	parent = getppid();
1377	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
1378	ASSERT_EQ(0, ret);
1379
1380	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
1381	ASSERT_EQ(0, ret);
1382	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
1383	ASSERT_EQ(0, ret);
1384	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
1385	ASSERT_EQ(0, ret);
1386	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
1387	ASSERT_EQ(0, ret);
1388	/* Should work just fine. */
1389	EXPECT_EQ(parent, syscall(__NR_getppid));
1390	EXPECT_EQ(0, syscall(__NR_getpid));
1391}
1392
1393TEST_F(precedence, trace_is_fourth)
1394{
1395	pid_t parent;
1396	long ret;
1397
1398	parent = getppid();
1399	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
1400	ASSERT_EQ(0, ret);
1401
1402	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
1403	ASSERT_EQ(0, ret);
1404	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
1405	ASSERT_EQ(0, ret);
1406	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
1407	ASSERT_EQ(0, ret);
1408	/* Should work just fine. */
1409	EXPECT_EQ(parent, syscall(__NR_getppid));
1410	/* No ptracer */
1411	EXPECT_EQ(-1, syscall(__NR_getpid));
1412}
1413
1414TEST_F(precedence, trace_is_fourth_in_any_order)
1415{
1416	pid_t parent;
1417	long ret;
1418
1419	parent = getppid();
1420	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
1421	ASSERT_EQ(0, ret);
1422
1423	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
1424	ASSERT_EQ(0, ret);
1425	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
1426	ASSERT_EQ(0, ret);
1427	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
1428	ASSERT_EQ(0, ret);
1429	/* Should work just fine. */
1430	EXPECT_EQ(parent, syscall(__NR_getppid));
1431	/* No ptracer */
1432	EXPECT_EQ(-1, syscall(__NR_getpid));
1433}
1434
1435TEST_F(precedence, log_is_fifth)
1436{
1437	pid_t mypid, parent;
1438	long ret;
1439
1440	mypid = getpid();
1441	parent = getppid();
1442	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
1443	ASSERT_EQ(0, ret);
1444
1445	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
1446	ASSERT_EQ(0, ret);
1447	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
1448	ASSERT_EQ(0, ret);
1449	/* Should work just fine. */
1450	EXPECT_EQ(parent, syscall(__NR_getppid));
1451	/* Should also work just fine */
1452	EXPECT_EQ(mypid, syscall(__NR_getpid));
1453}
1454
1455TEST_F(precedence, log_is_fifth_in_any_order)
1456{
1457	pid_t mypid, parent;
1458	long ret;
1459
1460	mypid = getpid();
1461	parent = getppid();
1462	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
1463	ASSERT_EQ(0, ret);
1464
1465	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
1466	ASSERT_EQ(0, ret);
1467	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
1468	ASSERT_EQ(0, ret);
1469	/* Should work just fine. */
1470	EXPECT_EQ(parent, syscall(__NR_getppid));
1471	/* Should also work just fine */
1472	EXPECT_EQ(mypid, syscall(__NR_getpid));
1473}
1474
1475#ifndef PTRACE_O_TRACESECCOMP
1476#define PTRACE_O_TRACESECCOMP	0x00000080
1477#endif
1478
1479/* Catch the Ubuntu 12.04 value error. */
1480#if PTRACE_EVENT_SECCOMP != 7
1481#undef PTRACE_EVENT_SECCOMP
1482#endif
1483
1484#ifndef PTRACE_EVENT_SECCOMP
1485#define PTRACE_EVENT_SECCOMP 7
1486#endif
1487
1488#define IS_SECCOMP_EVENT(status) ((status >> 16) == PTRACE_EVENT_SECCOMP)
1489bool tracer_running;
1490void tracer_stop(int sig)
1491{
1492	tracer_running = false;
1493}
1494
1495typedef void tracer_func_t(struct __test_metadata *_metadata,
1496			   pid_t tracee, int status, void *args);
1497
1498void start_tracer(struct __test_metadata *_metadata, int fd, pid_t tracee,
1499	    tracer_func_t tracer_func, void *args, bool ptrace_syscall)
1500{
1501	int ret = -1;
1502	struct sigaction action = {
1503		.sa_handler = tracer_stop,
1504	};
1505
1506	/* Allow external shutdown. */
1507	tracer_running = true;
1508	ASSERT_EQ(0, sigaction(SIGUSR1, &action, NULL));
1509
1510	errno = 0;
1511	while (ret == -1 && errno != EINVAL)
1512		ret = ptrace(PTRACE_ATTACH, tracee, NULL, 0);
1513	ASSERT_EQ(0, ret) {
1514		kill(tracee, SIGKILL);
1515	}
1516	/* Wait for attach stop */
1517	wait(NULL);
1518
1519	ret = ptrace(PTRACE_SETOPTIONS, tracee, NULL, ptrace_syscall ?
1520						      PTRACE_O_TRACESYSGOOD :
1521						      PTRACE_O_TRACESECCOMP);
1522	ASSERT_EQ(0, ret) {
1523		TH_LOG("Failed to set PTRACE_O_TRACESECCOMP");
1524		kill(tracee, SIGKILL);
1525	}
1526	ret = ptrace(ptrace_syscall ? PTRACE_SYSCALL : PTRACE_CONT,
1527		     tracee, NULL, 0);
1528	ASSERT_EQ(0, ret);
1529
1530	/* Unblock the tracee */
1531	ASSERT_EQ(1, write(fd, "A", 1));
1532	ASSERT_EQ(0, close(fd));
1533
1534	/* Run until we're shut down. Must assert to stop execution. */
1535	while (tracer_running) {
1536		int status;
1537
1538		if (wait(&status) != tracee)
1539			continue;
1540		if (WIFSIGNALED(status) || WIFEXITED(status))
1541			/* Child is dead. Time to go. */
1542			return;
1543
1544		/* Check if this is a seccomp event. */
1545		ASSERT_EQ(!ptrace_syscall, IS_SECCOMP_EVENT(status));
1546
1547		tracer_func(_metadata, tracee, status, args);
1548
1549		ret = ptrace(ptrace_syscall ? PTRACE_SYSCALL : PTRACE_CONT,
1550			     tracee, NULL, 0);
1551		ASSERT_EQ(0, ret);
1552	}
1553	/* Directly report the status of our test harness results. */
1554	syscall(__NR_exit, _metadata->passed ? EXIT_SUCCESS : EXIT_FAILURE);
1555}
1556
1557/* Common tracer setup/teardown functions. */
1558void cont_handler(int num)
1559{ }
1560pid_t setup_trace_fixture(struct __test_metadata *_metadata,
1561			  tracer_func_t func, void *args, bool ptrace_syscall)
1562{
1563	char sync;
1564	int pipefd[2];
1565	pid_t tracer_pid;
1566	pid_t tracee = getpid();
1567
1568	/* Setup a pipe for clean synchronization. */
1569	ASSERT_EQ(0, pipe(pipefd));
1570
1571	/* Fork a child which we'll promote to tracer */
1572	tracer_pid = fork();
1573	ASSERT_LE(0, tracer_pid);
1574	signal(SIGALRM, cont_handler);
1575	if (tracer_pid == 0) {
1576		close(pipefd[0]);
1577		start_tracer(_metadata, pipefd[1], tracee, func, args,
1578			     ptrace_syscall);
1579		syscall(__NR_exit, 0);
1580	}
1581	close(pipefd[1]);
1582	prctl(PR_SET_PTRACER, tracer_pid, 0, 0, 0);
1583	read(pipefd[0], &sync, 1);
1584	close(pipefd[0]);
1585
1586	return tracer_pid;
1587}
1588
1589void teardown_trace_fixture(struct __test_metadata *_metadata,
1590			    pid_t tracer)
1591{
1592	if (tracer) {
1593		int status;
1594		/*
1595		 * Extract the exit code from the other process and
1596		 * adopt it for ourselves in case its asserts failed.
1597		 */
1598		ASSERT_EQ(0, kill(tracer, SIGUSR1));
1599		ASSERT_EQ(tracer, waitpid(tracer, &status, 0));
1600		if (WEXITSTATUS(status))
1601			_metadata->passed = 0;
1602	}
1603}
1604
1605/* "poke" tracer arguments and function. */
1606struct tracer_args_poke_t {
1607	unsigned long poke_addr;
1608};
1609
1610void tracer_poke(struct __test_metadata *_metadata, pid_t tracee, int status,
1611		 void *args)
1612{
1613	int ret;
1614	unsigned long msg;
1615	struct tracer_args_poke_t *info = (struct tracer_args_poke_t *)args;
1616
1617	ret = ptrace(PTRACE_GETEVENTMSG, tracee, NULL, &msg);
1618	EXPECT_EQ(0, ret);
1619	/* If this fails, don't try to recover. */
1620	ASSERT_EQ(0x1001, msg) {
1621		kill(tracee, SIGKILL);
1622	}
1623	/*
1624	 * Poke in the message.
1625	 * Registers are not touched to try to keep this relatively arch
1626	 * agnostic.
1627	 */
1628	ret = ptrace(PTRACE_POKEDATA, tracee, info->poke_addr, 0x1001);
1629	EXPECT_EQ(0, ret);
1630}
1631
1632FIXTURE(TRACE_poke) {
1633	struct sock_fprog prog;
1634	pid_t tracer;
1635	long poked;
1636	struct tracer_args_poke_t tracer_args;
1637};
1638
1639FIXTURE_SETUP(TRACE_poke)
1640{
1641	struct sock_filter filter[] = {
1642		BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
1643			offsetof(struct seccomp_data, nr)),
1644		BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1),
1645		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1001),
1646		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
1647	};
1648
1649	self->poked = 0;
1650	memset(&self->prog, 0, sizeof(self->prog));
1651	self->prog.filter = malloc(sizeof(filter));
1652	ASSERT_NE(NULL, self->prog.filter);
1653	memcpy(self->prog.filter, filter, sizeof(filter));
1654	self->prog.len = (unsigned short)ARRAY_SIZE(filter);
1655
1656	/* Set up tracer args. */
1657	self->tracer_args.poke_addr = (unsigned long)&self->poked;
1658
1659	/* Launch tracer. */
1660	self->tracer = setup_trace_fixture(_metadata, tracer_poke,
1661					   &self->tracer_args, false);
1662}
1663
1664FIXTURE_TEARDOWN(TRACE_poke)
1665{
1666	teardown_trace_fixture(_metadata, self->tracer);
1667	if (self->prog.filter)
1668		free(self->prog.filter);
1669}
1670
1671TEST_F(TRACE_poke, read_has_side_effects)
1672{
1673	ssize_t ret;
1674
1675	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
1676	ASSERT_EQ(0, ret);
1677
1678	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0);
1679	ASSERT_EQ(0, ret);
1680
1681	EXPECT_EQ(0, self->poked);
1682	ret = read(-1, NULL, 0);
1683	EXPECT_EQ(-1, ret);
1684	EXPECT_EQ(0x1001, self->poked);
1685}
1686
1687TEST_F(TRACE_poke, getpid_runs_normally)
1688{
1689	long ret;
1690
1691	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
1692	ASSERT_EQ(0, ret);
1693
1694	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0);
1695	ASSERT_EQ(0, ret);
1696
1697	EXPECT_EQ(0, self->poked);
1698	EXPECT_NE(0, syscall(__NR_getpid));
1699	EXPECT_EQ(0, self->poked);
1700}
1701
1702#if defined(__x86_64__)
1703# define ARCH_REGS		struct user_regs_struct
1704# define SYSCALL_NUM(_regs)	(_regs).orig_rax
1705# define SYSCALL_RET(_regs)	(_regs).rax
1706#elif defined(__i386__)
1707# define ARCH_REGS		struct user_regs_struct
1708# define SYSCALL_NUM(_regs)	(_regs).orig_eax
1709# define SYSCALL_RET(_regs)	(_regs).eax
1710#elif defined(__arm__)
1711# define ARCH_REGS		struct pt_regs
1712# define SYSCALL_NUM(_regs)	(_regs).ARM_r7
1713# ifndef PTRACE_SET_SYSCALL
1714#  define PTRACE_SET_SYSCALL   23
1715# endif
1716# define SYSCALL_NUM_SET(_regs, _nr)	\
1717		EXPECT_EQ(0, ptrace(PTRACE_SET_SYSCALL, tracee, NULL, _nr))
1718# define SYSCALL_RET(_regs)	(_regs).ARM_r0
1719#elif defined(__aarch64__)
1720# define ARCH_REGS		struct user_pt_regs
1721# define SYSCALL_NUM(_regs)	(_regs).regs[8]
1722# ifndef NT_ARM_SYSTEM_CALL
1723#  define NT_ARM_SYSTEM_CALL 0x404
1724# endif
1725# define SYSCALL_NUM_SET(_regs, _nr)				\
1726	do {							\
1727		struct iovec __v;				\
1728		typeof(_nr) __nr = (_nr);			\
1729		__v.iov_base = &__nr;				\
1730		__v.iov_len = sizeof(__nr);			\
1731		EXPECT_EQ(0, ptrace(PTRACE_SETREGSET, tracee,	\
1732				    NT_ARM_SYSTEM_CALL, &__v));	\
1733	} while (0)
1734# define SYSCALL_RET(_regs)	(_regs).regs[0]
1735#elif defined(__loongarch__)
1736# define ARCH_REGS		struct user_pt_regs
1737# define SYSCALL_NUM(_regs)	(_regs).regs[11]
1738# define SYSCALL_RET(_regs)	(_regs).regs[4]
1739#elif defined(__riscv) && __riscv_xlen == 64
1740# define ARCH_REGS		struct user_regs_struct
1741# define SYSCALL_NUM(_regs)	(_regs).a7
1742# define SYSCALL_RET(_regs)	(_regs).a0
1743#elif defined(__csky__)
1744# define ARCH_REGS		struct pt_regs
1745#  if defined(__CSKYABIV2__)
1746#   define SYSCALL_NUM(_regs)	(_regs).regs[3]
1747#  else
1748#   define SYSCALL_NUM(_regs)	(_regs).regs[9]
1749#  endif
1750# define SYSCALL_RET(_regs)	(_regs).a0
1751#elif defined(__hppa__)
1752# define ARCH_REGS		struct user_regs_struct
1753# define SYSCALL_NUM(_regs)	(_regs).gr[20]
1754# define SYSCALL_RET(_regs)	(_regs).gr[28]
1755#elif defined(__powerpc__)
1756# define ARCH_REGS		struct pt_regs
1757# define SYSCALL_NUM(_regs)	(_regs).gpr[0]
1758# define SYSCALL_RET(_regs)	(_regs).gpr[3]
1759# define SYSCALL_RET_SET(_regs, _val)				\
1760	do {							\
1761		typeof(_val) _result = (_val);			\
1762		if ((_regs.trap & 0xfff0) == 0x3000) {		\
1763			/*					\
1764			 * scv 0 system call uses -ve result	\
1765			 * for error, so no need to adjust.	\
1766			 */					\
1767			SYSCALL_RET(_regs) = _result;		\
1768		} else {					\
1769			/*					\
1770			 * A syscall error is signaled by the	\
1771			 * CR0 SO bit and the code is stored as	\
1772			 * a positive value.			\
1773			 */					\
1774			if (_result < 0) {			\
1775				SYSCALL_RET(_regs) = -_result;	\
1776				(_regs).ccr |= 0x10000000;	\
1777			} else {				\
1778				SYSCALL_RET(_regs) = _result;	\
1779				(_regs).ccr &= ~0x10000000;	\
1780			}					\
1781		}						\
1782	} while (0)
1783# define SYSCALL_RET_SET_ON_PTRACE_EXIT
1784#elif defined(__s390__)
1785# define ARCH_REGS		s390_regs
1786# define SYSCALL_NUM(_regs)	(_regs).gprs[2]
1787# define SYSCALL_RET_SET(_regs, _val)			\
1788		TH_LOG("Can't modify syscall return on this architecture")
1789#elif defined(__mips__)
1790# include <asm/unistd_nr_n32.h>
1791# include <asm/unistd_nr_n64.h>
1792# include <asm/unistd_nr_o32.h>
1793# define ARCH_REGS		struct pt_regs
1794# define SYSCALL_NUM(_regs)				\
1795	({						\
1796		typeof((_regs).regs[2]) _nr;		\
1797		if ((_regs).regs[2] == __NR_O32_Linux)	\
1798			_nr = (_regs).regs[4];		\
1799		else					\
1800			_nr = (_regs).regs[2];		\
1801		_nr;					\
1802	})
1803# define SYSCALL_NUM_SET(_regs, _nr)			\
1804	do {						\
1805		if ((_regs).regs[2] == __NR_O32_Linux)	\
1806			(_regs).regs[4] = _nr;		\
1807		else					\
1808			(_regs).regs[2] = _nr;		\
1809	} while (0)
1810# define SYSCALL_RET_SET(_regs, _val)			\
1811		TH_LOG("Can't modify syscall return on this architecture")
1812#elif defined(__xtensa__)
1813# define ARCH_REGS		struct user_pt_regs
1814# define SYSCALL_NUM(_regs)	(_regs).syscall
1815/*
1816 * On xtensa syscall return value is in the register
1817 * a2 of the current window which is not fixed.
1818 */
1819#define SYSCALL_RET(_regs)	(_regs).a[(_regs).windowbase * 4 + 2]
1820#elif defined(__sh__)
1821# define ARCH_REGS		struct pt_regs
1822# define SYSCALL_NUM(_regs)	(_regs).regs[3]
1823# define SYSCALL_RET(_regs)	(_regs).regs[0]
1824#else
1825# error "Do not know how to find your architecture's registers and syscalls"
1826#endif
1827
1828/*
1829 * Most architectures can change the syscall by just updating the
1830 * associated register. This is the default if not defined above.
1831 */
1832#ifndef SYSCALL_NUM_SET
1833# define SYSCALL_NUM_SET(_regs, _nr)		\
1834	do {					\
1835		SYSCALL_NUM(_regs) = (_nr);	\
1836	} while (0)
1837#endif
1838/*
1839 * Most architectures can change the syscall return value by just
1840 * writing to the SYSCALL_RET register. This is the default if not
1841 * defined above. If an architecture cannot set the return value
1842 * (for example when the syscall and return value register is
1843 * shared), report it with TH_LOG() in an arch-specific definition
1844 * of SYSCALL_RET_SET() above, and leave SYSCALL_RET undefined.
1845 */
1846#if !defined(SYSCALL_RET) && !defined(SYSCALL_RET_SET)
1847# error "One of SYSCALL_RET or SYSCALL_RET_SET is needed for this arch"
1848#endif
1849#ifndef SYSCALL_RET_SET
1850# define SYSCALL_RET_SET(_regs, _val)		\
1851	do {					\
1852		SYSCALL_RET(_regs) = (_val);	\
1853	} while (0)
1854#endif
1855
1856/* When the syscall return can't be changed, stub out the tests for it. */
1857#ifndef SYSCALL_RET
1858# define EXPECT_SYSCALL_RETURN(val, action)	EXPECT_EQ(-1, action)
1859#else
1860# define EXPECT_SYSCALL_RETURN(val, action)		\
1861	do {						\
1862		errno = 0;				\
1863		if (val < 0) {				\
1864			EXPECT_EQ(-1, action);		\
1865			EXPECT_EQ(-(val), errno);	\
1866		} else {				\
1867			EXPECT_EQ(val, action);		\
1868		}					\
1869	} while (0)
1870#endif
1871
1872/*
1873 * Some architectures (e.g. powerpc) can only set syscall
1874 * return values on syscall exit during ptrace.
1875 */
1876const bool ptrace_entry_set_syscall_nr = true;
1877const bool ptrace_entry_set_syscall_ret =
1878#ifndef SYSCALL_RET_SET_ON_PTRACE_EXIT
1879	true;
1880#else
1881	false;
1882#endif
1883
1884/*
1885 * Use PTRACE_GETREGS and PTRACE_SETREGS when available. This is useful for
1886 * architectures without HAVE_ARCH_TRACEHOOK (e.g. User-mode Linux).
1887 */
1888#if defined(__x86_64__) || defined(__i386__) || defined(__mips__)
1889# define ARCH_GETREGS(_regs)	ptrace(PTRACE_GETREGS, tracee, 0, &(_regs))
1890# define ARCH_SETREGS(_regs)	ptrace(PTRACE_SETREGS, tracee, 0, &(_regs))
1891#else
1892# define ARCH_GETREGS(_regs)	({					\
1893		struct iovec __v;					\
1894		__v.iov_base = &(_regs);				\
1895		__v.iov_len = sizeof(_regs);				\
1896		ptrace(PTRACE_GETREGSET, tracee, NT_PRSTATUS, &__v);	\
1897	})
1898# define ARCH_SETREGS(_regs)	({					\
1899		struct iovec __v;					\
1900		__v.iov_base = &(_regs);				\
1901		__v.iov_len = sizeof(_regs);				\
1902		ptrace(PTRACE_SETREGSET, tracee, NT_PRSTATUS, &__v);	\
1903	})
1904#endif
1905
1906/* Architecture-specific syscall fetching routine. */
1907int get_syscall(struct __test_metadata *_metadata, pid_t tracee)
1908{
1909	ARCH_REGS regs;
1910
1911	EXPECT_EQ(0, ARCH_GETREGS(regs)) {
1912		return -1;
1913	}
1914
1915	return SYSCALL_NUM(regs);
1916}
1917
1918/* Architecture-specific syscall changing routine. */
1919void __change_syscall(struct __test_metadata *_metadata,
1920		    pid_t tracee, long *syscall, long *ret)
1921{
1922	ARCH_REGS orig, regs;
1923
1924	/* Do not get/set registers if we have nothing to do. */
1925	if (!syscall && !ret)
1926		return;
1927
1928	EXPECT_EQ(0, ARCH_GETREGS(regs)) {
1929		return;
1930	}
1931	orig = regs;
1932
1933	if (syscall)
1934		SYSCALL_NUM_SET(regs, *syscall);
1935
1936	if (ret)
1937		SYSCALL_RET_SET(regs, *ret);
1938
1939	/* Flush any register changes made. */
1940	if (memcmp(&orig, &regs, sizeof(orig)) != 0)
1941		EXPECT_EQ(0, ARCH_SETREGS(regs));
1942}
1943
1944/* Change only syscall number. */
1945void change_syscall_nr(struct __test_metadata *_metadata,
1946		       pid_t tracee, long syscall)
1947{
1948	__change_syscall(_metadata, tracee, &syscall, NULL);
1949}
1950
1951/* Change syscall return value (and set syscall number to -1). */
1952void change_syscall_ret(struct __test_metadata *_metadata,
1953			pid_t tracee, long ret)
1954{
1955	long syscall = -1;
1956
1957	__change_syscall(_metadata, tracee, &syscall, &ret);
1958}
1959
1960void tracer_seccomp(struct __test_metadata *_metadata, pid_t tracee,
1961		    int status, void *args)
1962{
1963	int ret;
1964	unsigned long msg;
1965
1966	/* Make sure we got the right message. */
1967	ret = ptrace(PTRACE_GETEVENTMSG, tracee, NULL, &msg);
1968	EXPECT_EQ(0, ret);
1969
1970	/* Validate and take action on expected syscalls. */
1971	switch (msg) {
1972	case 0x1002:
1973		/* change getpid to getppid. */
1974		EXPECT_EQ(__NR_getpid, get_syscall(_metadata, tracee));
1975		change_syscall_nr(_metadata, tracee, __NR_getppid);
1976		break;
1977	case 0x1003:
1978		/* skip gettid with valid return code. */
1979		EXPECT_EQ(__NR_gettid, get_syscall(_metadata, tracee));
1980		change_syscall_ret(_metadata, tracee, 45000);
1981		break;
1982	case 0x1004:
1983		/* skip openat with error. */
1984		EXPECT_EQ(__NR_openat, get_syscall(_metadata, tracee));
1985		change_syscall_ret(_metadata, tracee, -ESRCH);
1986		break;
1987	case 0x1005:
1988		/* do nothing (allow getppid) */
1989		EXPECT_EQ(__NR_getppid, get_syscall(_metadata, tracee));
1990		break;
1991	default:
1992		EXPECT_EQ(0, msg) {
1993			TH_LOG("Unknown PTRACE_GETEVENTMSG: 0x%lx", msg);
1994			kill(tracee, SIGKILL);
1995		}
1996	}
1997
1998}
1999
2000FIXTURE(TRACE_syscall) {
2001	struct sock_fprog prog;
2002	pid_t tracer, mytid, mypid, parent;
2003	long syscall_nr;
2004};
2005
2006void tracer_ptrace(struct __test_metadata *_metadata, pid_t tracee,
2007		   int status, void *args)
2008{
2009	int ret;
2010	unsigned long msg;
2011	static bool entry;
2012	long syscall_nr_val, syscall_ret_val;
2013	long *syscall_nr = NULL, *syscall_ret = NULL;
2014	FIXTURE_DATA(TRACE_syscall) *self = args;
2015
2016	/*
2017	 * The traditional way to tell PTRACE_SYSCALL entry/exit
2018	 * is by counting.
2019	 */
2020	entry = !entry;
2021
2022	/* Make sure we got an appropriate message. */
2023	ret = ptrace(PTRACE_GETEVENTMSG, tracee, NULL, &msg);
2024	EXPECT_EQ(0, ret);
2025	EXPECT_EQ(entry ? PTRACE_EVENTMSG_SYSCALL_ENTRY
2026			: PTRACE_EVENTMSG_SYSCALL_EXIT, msg);
2027
2028	/*
2029	 * Some architectures only support setting return values during
2030	 * syscall exit under ptrace, and on exit the syscall number may
2031	 * no longer be available. Therefore, save the initial sycall
2032	 * number here, so it can be examined during both entry and exit
2033	 * phases.
2034	 */
2035	if (entry)
2036		self->syscall_nr = get_syscall(_metadata, tracee);
2037
2038	/*
2039	 * Depending on the architecture's syscall setting abilities, we
2040	 * pick which things to set during this phase (entry or exit).
2041	 */
2042	if (entry == ptrace_entry_set_syscall_nr)
2043		syscall_nr = &syscall_nr_val;
2044	if (entry == ptrace_entry_set_syscall_ret)
2045		syscall_ret = &syscall_ret_val;
2046
2047	/* Now handle the actual rewriting cases. */
2048	switch (self->syscall_nr) {
2049	case __NR_getpid:
2050		syscall_nr_val = __NR_getppid;
2051		/* Never change syscall return for this case. */
2052		syscall_ret = NULL;
2053		break;
2054	case __NR_gettid:
2055		syscall_nr_val = -1;
2056		syscall_ret_val = 45000;
2057		break;
2058	case __NR_openat:
2059		syscall_nr_val = -1;
2060		syscall_ret_val = -ESRCH;
2061		break;
2062	default:
2063		/* Unhandled, do nothing. */
2064		return;
2065	}
2066
2067	__change_syscall(_metadata, tracee, syscall_nr, syscall_ret);
2068}
2069
2070FIXTURE_VARIANT(TRACE_syscall) {
2071	/*
2072	 * All of the SECCOMP_RET_TRACE behaviors can be tested with either
2073	 * SECCOMP_RET_TRACE+PTRACE_CONT or plain ptrace()+PTRACE_SYSCALL.
2074	 * This indicates if we should use SECCOMP_RET_TRACE (false), or
2075	 * ptrace (true).
2076	 */
2077	bool use_ptrace;
2078};
2079
2080FIXTURE_VARIANT_ADD(TRACE_syscall, ptrace) {
2081	.use_ptrace = true,
2082};
2083
2084FIXTURE_VARIANT_ADD(TRACE_syscall, seccomp) {
2085	.use_ptrace = false,
2086};
2087
2088FIXTURE_SETUP(TRACE_syscall)
2089{
2090	struct sock_filter filter[] = {
2091		BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
2092			offsetof(struct seccomp_data, nr)),
2093		BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1),
2094		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1002),
2095		BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_gettid, 0, 1),
2096		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1003),
2097		BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_openat, 0, 1),
2098		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1004),
2099		BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1),
2100		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1005),
2101		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
2102	};
2103	struct sock_fprog prog = {
2104		.len = (unsigned short)ARRAY_SIZE(filter),
2105		.filter = filter,
2106	};
2107	long ret;
2108
2109	/* Prepare some testable syscall results. */
2110	self->mytid = syscall(__NR_gettid);
2111	ASSERT_GT(self->mytid, 0);
2112	ASSERT_NE(self->mytid, 1) {
2113		TH_LOG("Running this test as init is not supported. :)");
2114	}
2115
2116	self->mypid = getpid();
2117	ASSERT_GT(self->mypid, 0);
2118	ASSERT_EQ(self->mytid, self->mypid);
2119
2120	self->parent = getppid();
2121	ASSERT_GT(self->parent, 0);
2122	ASSERT_NE(self->parent, self->mypid);
2123
2124	/* Launch tracer. */
2125	self->tracer = setup_trace_fixture(_metadata,
2126					   variant->use_ptrace ? tracer_ptrace
2127							       : tracer_seccomp,
2128					   self, variant->use_ptrace);
2129
2130	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
2131	ASSERT_EQ(0, ret);
2132
2133	if (variant->use_ptrace)
2134		return;
2135
2136	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
2137	ASSERT_EQ(0, ret);
2138}
2139
2140FIXTURE_TEARDOWN(TRACE_syscall)
2141{
2142	teardown_trace_fixture(_metadata, self->tracer);
2143}
2144
2145TEST(negative_ENOSYS)
2146{
2147	/*
2148	 * There should be no difference between an "internal" skip
2149	 * and userspace asking for syscall "-1".
2150	 */
2151	errno = 0;
2152	EXPECT_EQ(-1, syscall(-1));
2153	EXPECT_EQ(errno, ENOSYS);
2154	/* And no difference for "still not valid but not -1". */
2155	errno = 0;
2156	EXPECT_EQ(-1, syscall(-101));
2157	EXPECT_EQ(errno, ENOSYS);
2158}
2159
2160TEST_F(TRACE_syscall, negative_ENOSYS)
2161{
2162	negative_ENOSYS(_metadata);
2163}
2164
2165TEST_F(TRACE_syscall, syscall_allowed)
2166{
2167	/* getppid works as expected (no changes). */
2168	EXPECT_EQ(self->parent, syscall(__NR_getppid));
2169	EXPECT_NE(self->mypid, syscall(__NR_getppid));
2170}
2171
2172TEST_F(TRACE_syscall, syscall_redirected)
2173{
2174	/* getpid has been redirected to getppid as expected. */
2175	EXPECT_EQ(self->parent, syscall(__NR_getpid));
2176	EXPECT_NE(self->mypid, syscall(__NR_getpid));
2177}
2178
2179TEST_F(TRACE_syscall, syscall_errno)
2180{
2181	/* Tracer should skip the open syscall, resulting in ESRCH. */
2182	EXPECT_SYSCALL_RETURN(-ESRCH, syscall(__NR_openat));
2183}
2184
2185TEST_F(TRACE_syscall, syscall_faked)
2186{
2187	/* Tracer skips the gettid syscall and store altered return value. */
2188	EXPECT_SYSCALL_RETURN(45000, syscall(__NR_gettid));
2189}
2190
2191TEST_F(TRACE_syscall, skip_after)
2192{
2193	struct sock_filter filter[] = {
2194		BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
2195			offsetof(struct seccomp_data, nr)),
2196		BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1),
2197		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | EPERM),
2198		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
2199	};
2200	struct sock_fprog prog = {
2201		.len = (unsigned short)ARRAY_SIZE(filter),
2202		.filter = filter,
2203	};
2204	long ret;
2205
2206	/* Install additional "errno on getppid" filter. */
2207	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
2208	ASSERT_EQ(0, ret);
2209
2210	/* Tracer will redirect getpid to getppid, and we should see EPERM. */
2211	errno = 0;
2212	EXPECT_EQ(-1, syscall(__NR_getpid));
2213	EXPECT_EQ(EPERM, errno);
2214}
2215
2216TEST_F_SIGNAL(TRACE_syscall, kill_after, SIGSYS)
2217{
2218	struct sock_filter filter[] = {
2219		BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
2220			offsetof(struct seccomp_data, nr)),
2221		BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1),
2222		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
2223		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
2224	};
2225	struct sock_fprog prog = {
2226		.len = (unsigned short)ARRAY_SIZE(filter),
2227		.filter = filter,
2228	};
2229	long ret;
2230
2231	/* Install additional "death on getppid" filter. */
2232	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
2233	ASSERT_EQ(0, ret);
2234
2235	/* Tracer will redirect getpid to getppid, and we should die. */
2236	EXPECT_NE(self->mypid, syscall(__NR_getpid));
2237}
2238
2239TEST(seccomp_syscall)
2240{
2241	struct sock_filter filter[] = {
2242		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
2243	};
2244	struct sock_fprog prog = {
2245		.len = (unsigned short)ARRAY_SIZE(filter),
2246		.filter = filter,
2247	};
2248	long ret;
2249
2250	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
2251	ASSERT_EQ(0, ret) {
2252		TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
2253	}
2254
2255	/* Reject insane operation. */
2256	ret = seccomp(-1, 0, &prog);
2257	ASSERT_NE(ENOSYS, errno) {
2258		TH_LOG("Kernel does not support seccomp syscall!");
2259	}
2260	EXPECT_EQ(EINVAL, errno) {
2261		TH_LOG("Did not reject crazy op value!");
2262	}
2263
2264	/* Reject strict with flags or pointer. */
2265	ret = seccomp(SECCOMP_SET_MODE_STRICT, -1, NULL);
2266	EXPECT_EQ(EINVAL, errno) {
2267		TH_LOG("Did not reject mode strict with flags!");
2268	}
2269	ret = seccomp(SECCOMP_SET_MODE_STRICT, 0, &prog);
2270	EXPECT_EQ(EINVAL, errno) {
2271		TH_LOG("Did not reject mode strict with uargs!");
2272	}
2273
2274	/* Reject insane args for filter. */
2275	ret = seccomp(SECCOMP_SET_MODE_FILTER, -1, &prog);
2276	EXPECT_EQ(EINVAL, errno) {
2277		TH_LOG("Did not reject crazy filter flags!");
2278	}
2279	ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, NULL);
2280	EXPECT_EQ(EFAULT, errno) {
2281		TH_LOG("Did not reject NULL filter!");
2282	}
2283
2284	ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog);
2285	EXPECT_EQ(0, errno) {
2286		TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER: %s",
2287			strerror(errno));
2288	}
2289}
2290
2291TEST(seccomp_syscall_mode_lock)
2292{
2293	struct sock_filter filter[] = {
2294		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
2295	};
2296	struct sock_fprog prog = {
2297		.len = (unsigned short)ARRAY_SIZE(filter),
2298		.filter = filter,
2299	};
2300	long ret;
2301
2302	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, NULL, 0, 0);
2303	ASSERT_EQ(0, ret) {
2304		TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
2305	}
2306
2307	ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog);
2308	ASSERT_NE(ENOSYS, errno) {
2309		TH_LOG("Kernel does not support seccomp syscall!");
2310	}
2311	EXPECT_EQ(0, ret) {
2312		TH_LOG("Could not install filter!");
2313	}
2314
2315	/* Make sure neither entry point will switch to strict. */
2316	ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, 0, 0, 0);
2317	EXPECT_EQ(EINVAL, errno) {
2318		TH_LOG("Switched to mode strict!");
2319	}
2320
2321	ret = seccomp(SECCOMP_SET_MODE_STRICT, 0, NULL);
2322	EXPECT_EQ(EINVAL, errno) {
2323		TH_LOG("Switched to mode strict!");
2324	}
2325}
2326
2327/*
2328 * Test detection of known and unknown filter flags. Userspace needs to be able
2329 * to check if a filter flag is supported by the current kernel and a good way
2330 * of doing that is by attempting to enter filter mode, with the flag bit in
2331 * question set, and a NULL pointer for the _args_ parameter. EFAULT indicates
2332 * that the flag is valid and EINVAL indicates that the flag is invalid.
2333 */
2334TEST(detect_seccomp_filter_flags)
2335{
2336	unsigned int flags[] = { SECCOMP_FILTER_FLAG_TSYNC,
2337				 SECCOMP_FILTER_FLAG_LOG,
2338				 SECCOMP_FILTER_FLAG_SPEC_ALLOW,
2339				 SECCOMP_FILTER_FLAG_NEW_LISTENER,
2340				 SECCOMP_FILTER_FLAG_TSYNC_ESRCH };
2341	unsigned int exclusive[] = {
2342				SECCOMP_FILTER_FLAG_TSYNC,
2343				SECCOMP_FILTER_FLAG_NEW_LISTENER };
2344	unsigned int flag, all_flags, exclusive_mask;
2345	int i;
2346	long ret;
2347
2348	/* Test detection of individual known-good filter flags */
2349	for (i = 0, all_flags = 0; i < ARRAY_SIZE(flags); i++) {
2350		int bits = 0;
2351
2352		flag = flags[i];
2353		/* Make sure the flag is a single bit! */
2354		while (flag) {
2355			if (flag & 0x1)
2356				bits ++;
2357			flag >>= 1;
2358		}
2359		ASSERT_EQ(1, bits);
2360		flag = flags[i];
2361
2362		ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
2363		ASSERT_NE(ENOSYS, errno) {
2364			TH_LOG("Kernel does not support seccomp syscall!");
2365		}
2366		EXPECT_EQ(-1, ret);
2367		EXPECT_EQ(EFAULT, errno) {
2368			TH_LOG("Failed to detect that a known-good filter flag (0x%X) is supported!",
2369			       flag);
2370		}
2371
2372		all_flags |= flag;
2373	}
2374
2375	/*
2376	 * Test detection of all known-good filter flags combined. But
2377	 * for the exclusive flags we need to mask them out and try them
2378	 * individually for the "all flags" testing.
2379	 */
2380	exclusive_mask = 0;
2381	for (i = 0; i < ARRAY_SIZE(exclusive); i++)
2382		exclusive_mask |= exclusive[i];
2383	for (i = 0; i < ARRAY_SIZE(exclusive); i++) {
2384		flag = all_flags & ~exclusive_mask;
2385		flag |= exclusive[i];
2386
2387		ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
2388		EXPECT_EQ(-1, ret);
2389		EXPECT_EQ(EFAULT, errno) {
2390			TH_LOG("Failed to detect that all known-good filter flags (0x%X) are supported!",
2391			       flag);
2392		}
2393	}
2394
2395	/* Test detection of an unknown filter flags, without exclusives. */
2396	flag = -1;
2397	flag &= ~exclusive_mask;
2398	ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
2399	EXPECT_EQ(-1, ret);
2400	EXPECT_EQ(EINVAL, errno) {
2401		TH_LOG("Failed to detect that an unknown filter flag (0x%X) is unsupported!",
2402		       flag);
2403	}
2404
2405	/*
2406	 * Test detection of an unknown filter flag that may simply need to be
2407	 * added to this test
2408	 */
2409	flag = flags[ARRAY_SIZE(flags) - 1] << 1;
2410	ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
2411	EXPECT_EQ(-1, ret);
2412	EXPECT_EQ(EINVAL, errno) {
2413		TH_LOG("Failed to detect that an unknown filter flag (0x%X) is unsupported! Does a new flag need to be added to this test?",
2414		       flag);
2415	}
2416}
2417
2418TEST(TSYNC_first)
2419{
2420	struct sock_filter filter[] = {
2421		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
2422	};
2423	struct sock_fprog prog = {
2424		.len = (unsigned short)ARRAY_SIZE(filter),
2425		.filter = filter,
2426	};
2427	long ret;
2428
2429	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, NULL, 0, 0);
2430	ASSERT_EQ(0, ret) {
2431		TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
2432	}
2433
2434	ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
2435		      &prog);
2436	ASSERT_NE(ENOSYS, errno) {
2437		TH_LOG("Kernel does not support seccomp syscall!");
2438	}
2439	EXPECT_EQ(0, ret) {
2440		TH_LOG("Could not install initial filter with TSYNC!");
2441	}
2442}
2443
2444#define TSYNC_SIBLINGS 2
2445struct tsync_sibling {
2446	pthread_t tid;
2447	pid_t system_tid;
2448	sem_t *started;
2449	pthread_cond_t *cond;
2450	pthread_mutex_t *mutex;
2451	int diverge;
2452	int num_waits;
2453	struct sock_fprog *prog;
2454	struct __test_metadata *metadata;
2455};
2456
2457/*
2458 * To avoid joining joined threads (which is not allowed by Bionic),
2459 * make sure we both successfully join and clear the tid to skip a
2460 * later join attempt during fixture teardown. Any remaining threads
2461 * will be directly killed during teardown.
2462 */
2463#define PTHREAD_JOIN(tid, status)					\
2464	do {								\
2465		int _rc = pthread_join(tid, status);			\
2466		if (_rc) {						\
2467			TH_LOG("pthread_join of tid %u failed: %d\n",	\
2468				(unsigned int)tid, _rc);		\
2469		} else {						\
2470			tid = 0;					\
2471		}							\
2472	} while (0)
2473
2474FIXTURE(TSYNC) {
2475	struct sock_fprog root_prog, apply_prog;
2476	struct tsync_sibling sibling[TSYNC_SIBLINGS];
2477	sem_t started;
2478	pthread_cond_t cond;
2479	pthread_mutex_t mutex;
2480	int sibling_count;
2481};
2482
2483FIXTURE_SETUP(TSYNC)
2484{
2485	struct sock_filter root_filter[] = {
2486		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
2487	};
2488	struct sock_filter apply_filter[] = {
2489		BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
2490			offsetof(struct seccomp_data, nr)),
2491		BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1),
2492		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
2493		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
2494	};
2495
2496	memset(&self->root_prog, 0, sizeof(self->root_prog));
2497	memset(&self->apply_prog, 0, sizeof(self->apply_prog));
2498	memset(&self->sibling, 0, sizeof(self->sibling));
2499	self->root_prog.filter = malloc(sizeof(root_filter));
2500	ASSERT_NE(NULL, self->root_prog.filter);
2501	memcpy(self->root_prog.filter, &root_filter, sizeof(root_filter));
2502	self->root_prog.len = (unsigned short)ARRAY_SIZE(root_filter);
2503
2504	self->apply_prog.filter = malloc(sizeof(apply_filter));
2505	ASSERT_NE(NULL, self->apply_prog.filter);
2506	memcpy(self->apply_prog.filter, &apply_filter, sizeof(apply_filter));
2507	self->apply_prog.len = (unsigned short)ARRAY_SIZE(apply_filter);
2508
2509	self->sibling_count = 0;
2510	pthread_mutex_init(&self->mutex, NULL);
2511	pthread_cond_init(&self->cond, NULL);
2512	sem_init(&self->started, 0, 0);
2513	self->sibling[0].tid = 0;
2514	self->sibling[0].cond = &self->cond;
2515	self->sibling[0].started = &self->started;
2516	self->sibling[0].mutex = &self->mutex;
2517	self->sibling[0].diverge = 0;
2518	self->sibling[0].num_waits = 1;
2519	self->sibling[0].prog = &self->root_prog;
2520	self->sibling[0].metadata = _metadata;
2521	self->sibling[1].tid = 0;
2522	self->sibling[1].cond = &self->cond;
2523	self->sibling[1].started = &self->started;
2524	self->sibling[1].mutex = &self->mutex;
2525	self->sibling[1].diverge = 0;
2526	self->sibling[1].prog = &self->root_prog;
2527	self->sibling[1].num_waits = 1;
2528	self->sibling[1].metadata = _metadata;
2529}
2530
2531FIXTURE_TEARDOWN(TSYNC)
2532{
2533	int sib = 0;
2534
2535	if (self->root_prog.filter)
2536		free(self->root_prog.filter);
2537	if (self->apply_prog.filter)
2538		free(self->apply_prog.filter);
2539
2540	for ( ; sib < self->sibling_count; ++sib) {
2541		struct tsync_sibling *s = &self->sibling[sib];
2542
2543		if (!s->tid)
2544			continue;
2545		/*
2546		 * If a thread is still running, it may be stuck, so hit
2547		 * it over the head really hard.
2548		 */
2549		pthread_kill(s->tid, 9);
2550	}
2551	pthread_mutex_destroy(&self->mutex);
2552	pthread_cond_destroy(&self->cond);
2553	sem_destroy(&self->started);
2554}
2555
2556void *tsync_sibling(void *data)
2557{
2558	long ret = 0;
2559	struct tsync_sibling *me = data;
2560
2561	me->system_tid = syscall(__NR_gettid);
2562
2563	pthread_mutex_lock(me->mutex);
2564	if (me->diverge) {
2565		/* Just re-apply the root prog to fork the tree */
2566		ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER,
2567				me->prog, 0, 0);
2568	}
2569	sem_post(me->started);
2570	/* Return outside of started so parent notices failures. */
2571	if (ret) {
2572		pthread_mutex_unlock(me->mutex);
2573		return (void *)SIBLING_EXIT_FAILURE;
2574	}
2575	do {
2576		pthread_cond_wait(me->cond, me->mutex);
2577		me->num_waits = me->num_waits - 1;
2578	} while (me->num_waits);
2579	pthread_mutex_unlock(me->mutex);
2580
2581	ret = prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0);
2582	if (!ret)
2583		return (void *)SIBLING_EXIT_NEWPRIVS;
2584	read(-1, NULL, 0);
2585	return (void *)SIBLING_EXIT_UNKILLED;
2586}
2587
2588void tsync_start_sibling(struct tsync_sibling *sibling)
2589{
2590	pthread_create(&sibling->tid, NULL, tsync_sibling, (void *)sibling);
2591}
2592
2593TEST_F(TSYNC, siblings_fail_prctl)
2594{
2595	long ret;
2596	void *status;
2597	struct sock_filter filter[] = {
2598		BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
2599			offsetof(struct seccomp_data, nr)),
2600		BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_prctl, 0, 1),
2601		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | EINVAL),
2602		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
2603	};
2604	struct sock_fprog prog = {
2605		.len = (unsigned short)ARRAY_SIZE(filter),
2606		.filter = filter,
2607	};
2608
2609	ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
2610		TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
2611	}
2612
2613	/* Check prctl failure detection by requesting sib 0 diverge. */
2614	ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog);
2615	ASSERT_NE(ENOSYS, errno) {
2616		TH_LOG("Kernel does not support seccomp syscall!");
2617	}
2618	ASSERT_EQ(0, ret) {
2619		TH_LOG("setting filter failed");
2620	}
2621
2622	self->sibling[0].diverge = 1;
2623	tsync_start_sibling(&self->sibling[0]);
2624	tsync_start_sibling(&self->sibling[1]);
2625
2626	while (self->sibling_count < TSYNC_SIBLINGS) {
2627		sem_wait(&self->started);
2628		self->sibling_count++;
2629	}
2630
2631	/* Signal the threads to clean up*/
2632	pthread_mutex_lock(&self->mutex);
2633	ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
2634		TH_LOG("cond broadcast non-zero");
2635	}
2636	pthread_mutex_unlock(&self->mutex);
2637
2638	/* Ensure diverging sibling failed to call prctl. */
2639	PTHREAD_JOIN(self->sibling[0].tid, &status);
2640	EXPECT_EQ(SIBLING_EXIT_FAILURE, (long)status);
2641	PTHREAD_JOIN(self->sibling[1].tid, &status);
2642	EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
2643}
2644
2645TEST_F(TSYNC, two_siblings_with_ancestor)
2646{
2647	long ret;
2648	void *status;
2649
2650	ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
2651		TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
2652	}
2653
2654	ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog);
2655	ASSERT_NE(ENOSYS, errno) {
2656		TH_LOG("Kernel does not support seccomp syscall!");
2657	}
2658	ASSERT_EQ(0, ret) {
2659		TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!");
2660	}
2661	tsync_start_sibling(&self->sibling[0]);
2662	tsync_start_sibling(&self->sibling[1]);
2663
2664	while (self->sibling_count < TSYNC_SIBLINGS) {
2665		sem_wait(&self->started);
2666		self->sibling_count++;
2667	}
2668
2669	ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
2670		      &self->apply_prog);
2671	ASSERT_EQ(0, ret) {
2672		TH_LOG("Could install filter on all threads!");
2673	}
2674	/* Tell the siblings to test the policy */
2675	pthread_mutex_lock(&self->mutex);
2676	ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
2677		TH_LOG("cond broadcast non-zero");
2678	}
2679	pthread_mutex_unlock(&self->mutex);
2680	/* Ensure they are both killed and don't exit cleanly. */
2681	PTHREAD_JOIN(self->sibling[0].tid, &status);
2682	EXPECT_EQ(0x0, (long)status);
2683	PTHREAD_JOIN(self->sibling[1].tid, &status);
2684	EXPECT_EQ(0x0, (long)status);
2685}
2686
2687TEST_F(TSYNC, two_sibling_want_nnp)
2688{
2689	void *status;
2690
2691	/* start siblings before any prctl() operations */
2692	tsync_start_sibling(&self->sibling[0]);
2693	tsync_start_sibling(&self->sibling[1]);
2694	while (self->sibling_count < TSYNC_SIBLINGS) {
2695		sem_wait(&self->started);
2696		self->sibling_count++;
2697	}
2698
2699	/* Tell the siblings to test no policy */
2700	pthread_mutex_lock(&self->mutex);
2701	ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
2702		TH_LOG("cond broadcast non-zero");
2703	}
2704	pthread_mutex_unlock(&self->mutex);
2705
2706	/* Ensure they are both upset about lacking nnp. */
2707	PTHREAD_JOIN(self->sibling[0].tid, &status);
2708	EXPECT_EQ(SIBLING_EXIT_NEWPRIVS, (long)status);
2709	PTHREAD_JOIN(self->sibling[1].tid, &status);
2710	EXPECT_EQ(SIBLING_EXIT_NEWPRIVS, (long)status);
2711}
2712
2713TEST_F(TSYNC, two_siblings_with_no_filter)
2714{
2715	long ret;
2716	void *status;
2717
2718	/* start siblings before any prctl() operations */
2719	tsync_start_sibling(&self->sibling[0]);
2720	tsync_start_sibling(&self->sibling[1]);
2721	while (self->sibling_count < TSYNC_SIBLINGS) {
2722		sem_wait(&self->started);
2723		self->sibling_count++;
2724	}
2725
2726	ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
2727		TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
2728	}
2729
2730	ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
2731		      &self->apply_prog);
2732	ASSERT_NE(ENOSYS, errno) {
2733		TH_LOG("Kernel does not support seccomp syscall!");
2734	}
2735	ASSERT_EQ(0, ret) {
2736		TH_LOG("Could install filter on all threads!");
2737	}
2738
2739	/* Tell the siblings to test the policy */
2740	pthread_mutex_lock(&self->mutex);
2741	ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
2742		TH_LOG("cond broadcast non-zero");
2743	}
2744	pthread_mutex_unlock(&self->mutex);
2745
2746	/* Ensure they are both killed and don't exit cleanly. */
2747	PTHREAD_JOIN(self->sibling[0].tid, &status);
2748	EXPECT_EQ(0x0, (long)status);
2749	PTHREAD_JOIN(self->sibling[1].tid, &status);
2750	EXPECT_EQ(0x0, (long)status);
2751}
2752
2753TEST_F(TSYNC, two_siblings_with_one_divergence)
2754{
2755	long ret;
2756	void *status;
2757
2758	ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
2759		TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
2760	}
2761
2762	ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog);
2763	ASSERT_NE(ENOSYS, errno) {
2764		TH_LOG("Kernel does not support seccomp syscall!");
2765	}
2766	ASSERT_EQ(0, ret) {
2767		TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!");
2768	}
2769	self->sibling[0].diverge = 1;
2770	tsync_start_sibling(&self->sibling[0]);
2771	tsync_start_sibling(&self->sibling[1]);
2772
2773	while (self->sibling_count < TSYNC_SIBLINGS) {
2774		sem_wait(&self->started);
2775		self->sibling_count++;
2776	}
2777
2778	ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
2779		      &self->apply_prog);
2780	ASSERT_EQ(self->sibling[0].system_tid, ret) {
2781		TH_LOG("Did not fail on diverged sibling.");
2782	}
2783
2784	/* Wake the threads */
2785	pthread_mutex_lock(&self->mutex);
2786	ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
2787		TH_LOG("cond broadcast non-zero");
2788	}
2789	pthread_mutex_unlock(&self->mutex);
2790
2791	/* Ensure they are both unkilled. */
2792	PTHREAD_JOIN(self->sibling[0].tid, &status);
2793	EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
2794	PTHREAD_JOIN(self->sibling[1].tid, &status);
2795	EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
2796}
2797
2798TEST_F(TSYNC, two_siblings_with_one_divergence_no_tid_in_err)
2799{
2800	long ret, flags;
2801	void *status;
2802
2803	ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
2804		TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
2805	}
2806
2807	ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog);
2808	ASSERT_NE(ENOSYS, errno) {
2809		TH_LOG("Kernel does not support seccomp syscall!");
2810	}
2811	ASSERT_EQ(0, ret) {
2812		TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!");
2813	}
2814	self->sibling[0].diverge = 1;
2815	tsync_start_sibling(&self->sibling[0]);
2816	tsync_start_sibling(&self->sibling[1]);
2817
2818	while (self->sibling_count < TSYNC_SIBLINGS) {
2819		sem_wait(&self->started);
2820		self->sibling_count++;
2821	}
2822
2823	flags = SECCOMP_FILTER_FLAG_TSYNC | \
2824		SECCOMP_FILTER_FLAG_TSYNC_ESRCH;
2825	ret = seccomp(SECCOMP_SET_MODE_FILTER, flags, &self->apply_prog);
2826	ASSERT_EQ(ESRCH, errno) {
2827		TH_LOG("Did not return ESRCH for diverged sibling.");
2828	}
2829	ASSERT_EQ(-1, ret) {
2830		TH_LOG("Did not fail on diverged sibling.");
2831	}
2832
2833	/* Wake the threads */
2834	pthread_mutex_lock(&self->mutex);
2835	ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
2836		TH_LOG("cond broadcast non-zero");
2837	}
2838	pthread_mutex_unlock(&self->mutex);
2839
2840	/* Ensure they are both unkilled. */
2841	PTHREAD_JOIN(self->sibling[0].tid, &status);
2842	EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
2843	PTHREAD_JOIN(self->sibling[1].tid, &status);
2844	EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
2845}
2846
2847TEST_F(TSYNC, two_siblings_not_under_filter)
2848{
2849	long ret, sib;
2850	void *status;
2851	struct timespec delay = { .tv_nsec = 100000000 };
2852
2853	ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
2854		TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
2855	}
2856
2857	/*
2858	 * Sibling 0 will have its own seccomp policy
2859	 * and Sibling 1 will not be under seccomp at
2860	 * all. Sibling 1 will enter seccomp and 0
2861	 * will cause failure.
2862	 */
2863	self->sibling[0].diverge = 1;
2864	tsync_start_sibling(&self->sibling[0]);
2865	tsync_start_sibling(&self->sibling[1]);
2866
2867	while (self->sibling_count < TSYNC_SIBLINGS) {
2868		sem_wait(&self->started);
2869		self->sibling_count++;
2870	}
2871
2872	ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog);
2873	ASSERT_NE(ENOSYS, errno) {
2874		TH_LOG("Kernel does not support seccomp syscall!");
2875	}
2876	ASSERT_EQ(0, ret) {
2877		TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!");
2878	}
2879
2880	ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
2881		      &self->apply_prog);
2882	ASSERT_EQ(ret, self->sibling[0].system_tid) {
2883		TH_LOG("Did not fail on diverged sibling.");
2884	}
2885	sib = 1;
2886	if (ret == self->sibling[0].system_tid)
2887		sib = 0;
2888
2889	pthread_mutex_lock(&self->mutex);
2890
2891	/* Increment the other siblings num_waits so we can clean up
2892	 * the one we just saw.
2893	 */
2894	self->sibling[!sib].num_waits += 1;
2895
2896	/* Signal the thread to clean up*/
2897	ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
2898		TH_LOG("cond broadcast non-zero");
2899	}
2900	pthread_mutex_unlock(&self->mutex);
2901	PTHREAD_JOIN(self->sibling[sib].tid, &status);
2902	EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
2903	/* Poll for actual task death. pthread_join doesn't guarantee it. */
2904	while (!kill(self->sibling[sib].system_tid, 0))
2905		nanosleep(&delay, NULL);
2906	/* Switch to the remaining sibling */
2907	sib = !sib;
2908
2909	ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
2910		      &self->apply_prog);
2911	ASSERT_EQ(0, ret) {
2912		TH_LOG("Expected the remaining sibling to sync");
2913	};
2914
2915	pthread_mutex_lock(&self->mutex);
2916
2917	/* If remaining sibling didn't have a chance to wake up during
2918	 * the first broadcast, manually reduce the num_waits now.
2919	 */
2920	if (self->sibling[sib].num_waits > 1)
2921		self->sibling[sib].num_waits = 1;
2922	ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
2923		TH_LOG("cond broadcast non-zero");
2924	}
2925	pthread_mutex_unlock(&self->mutex);
2926	PTHREAD_JOIN(self->sibling[sib].tid, &status);
2927	EXPECT_EQ(0, (long)status);
2928	/* Poll for actual task death. pthread_join doesn't guarantee it. */
2929	while (!kill(self->sibling[sib].system_tid, 0))
2930		nanosleep(&delay, NULL);
2931
2932	ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
2933		      &self->apply_prog);
2934	ASSERT_EQ(0, ret);  /* just us chickens */
2935}
2936
2937/* Make sure restarted syscalls are seen directly as "restart_syscall". */
2938TEST(syscall_restart)
2939{
2940	long ret;
2941	unsigned long msg;
2942	pid_t child_pid;
2943	int pipefd[2];
2944	int status;
2945	siginfo_t info = { };
2946	struct sock_filter filter[] = {
2947		BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
2948			 offsetof(struct seccomp_data, nr)),
2949
2950#ifdef __NR_sigreturn
2951		BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_sigreturn, 7, 0),
2952#endif
2953		BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 6, 0),
2954		BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_exit, 5, 0),
2955		BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_rt_sigreturn, 4, 0),
2956		BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_nanosleep, 5, 0),
2957		BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_clock_nanosleep, 4, 0),
2958		BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_restart_syscall, 4, 0),
2959
2960		/* Allow __NR_write for easy logging. */
2961		BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_write, 0, 1),
2962		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
2963		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
2964		/* The nanosleep jump target. */
2965		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE|0x100),
2966		/* The restart_syscall jump target. */
2967		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE|0x200),
2968	};
2969	struct sock_fprog prog = {
2970		.len = (unsigned short)ARRAY_SIZE(filter),
2971		.filter = filter,
2972	};
2973#if defined(__arm__)
2974	struct utsname utsbuf;
2975#endif
2976
2977	ASSERT_EQ(0, pipe(pipefd));
2978
2979	child_pid = fork();
2980	ASSERT_LE(0, child_pid);
2981	if (child_pid == 0) {
2982		/* Child uses EXPECT not ASSERT to deliver status correctly. */
2983		char buf = ' ';
2984		struct timespec timeout = { };
2985
2986		/* Attach parent as tracer and stop. */
2987		EXPECT_EQ(0, ptrace(PTRACE_TRACEME));
2988		EXPECT_EQ(0, raise(SIGSTOP));
2989
2990		EXPECT_EQ(0, close(pipefd[1]));
2991
2992		EXPECT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
2993			TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
2994		}
2995
2996		ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
2997		EXPECT_EQ(0, ret) {
2998			TH_LOG("Failed to install filter!");
2999		}
3000
3001		EXPECT_EQ(1, read(pipefd[0], &buf, 1)) {
3002			TH_LOG("Failed to read() sync from parent");
3003		}
3004		EXPECT_EQ('.', buf) {
3005			TH_LOG("Failed to get sync data from read()");
3006		}
3007
3008		/* Start nanosleep to be interrupted. */
3009		timeout.tv_sec = 1;
3010		errno = 0;
3011		EXPECT_EQ(0, nanosleep(&timeout, NULL)) {
3012			TH_LOG("Call to nanosleep() failed (errno %d)", errno);
3013		}
3014
3015		/* Read final sync from parent. */
3016		EXPECT_EQ(1, read(pipefd[0], &buf, 1)) {
3017			TH_LOG("Failed final read() from parent");
3018		}
3019		EXPECT_EQ('!', buf) {
3020			TH_LOG("Failed to get final data from read()");
3021		}
3022
3023		/* Directly report the status of our test harness results. */
3024		syscall(__NR_exit, _metadata->passed ? EXIT_SUCCESS
3025						     : EXIT_FAILURE);
3026	}
3027	EXPECT_EQ(0, close(pipefd[0]));
3028
3029	/* Attach to child, setup options, and release. */
3030	ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
3031	ASSERT_EQ(true, WIFSTOPPED(status));
3032	ASSERT_EQ(0, ptrace(PTRACE_SETOPTIONS, child_pid, NULL,
3033			    PTRACE_O_TRACESECCOMP));
3034	ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0));
3035	ASSERT_EQ(1, write(pipefd[1], ".", 1));
3036
3037	/* Wait for nanosleep() to start. */
3038	ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
3039	ASSERT_EQ(true, WIFSTOPPED(status));
3040	ASSERT_EQ(SIGTRAP, WSTOPSIG(status));
3041	ASSERT_EQ(PTRACE_EVENT_SECCOMP, (status >> 16));
3042	ASSERT_EQ(0, ptrace(PTRACE_GETEVENTMSG, child_pid, NULL, &msg));
3043	ASSERT_EQ(0x100, msg);
3044	ret = get_syscall(_metadata, child_pid);
3045	EXPECT_TRUE(ret == __NR_nanosleep || ret == __NR_clock_nanosleep);
3046
3047	/* Might as well check siginfo for sanity while we're here. */
3048	ASSERT_EQ(0, ptrace(PTRACE_GETSIGINFO, child_pid, NULL, &info));
3049	ASSERT_EQ(SIGTRAP, info.si_signo);
3050	ASSERT_EQ(SIGTRAP | (PTRACE_EVENT_SECCOMP << 8), info.si_code);
3051	EXPECT_EQ(0, info.si_errno);
3052	EXPECT_EQ(getuid(), info.si_uid);
3053	/* Verify signal delivery came from child (seccomp-triggered). */
3054	EXPECT_EQ(child_pid, info.si_pid);
3055
3056	/* Interrupt nanosleep with SIGSTOP (which we'll need to handle). */
3057	ASSERT_EQ(0, kill(child_pid, SIGSTOP));
3058	ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0));
3059	ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
3060	ASSERT_EQ(true, WIFSTOPPED(status));
3061	ASSERT_EQ(SIGSTOP, WSTOPSIG(status));
3062	ASSERT_EQ(0, ptrace(PTRACE_GETSIGINFO, child_pid, NULL, &info));
3063	/*
3064	 * There is no siginfo on SIGSTOP any more, so we can't verify
3065	 * signal delivery came from parent now (getpid() == info.si_pid).
3066	 * https://lkml.kernel.org/r/CAGXu5jJaZAOzP1qFz66tYrtbuywqb+UN2SOA1VLHpCCOiYvYeg@mail.gmail.com
3067	 * At least verify the SIGSTOP via PTRACE_GETSIGINFO.
3068	 */
3069	EXPECT_EQ(SIGSTOP, info.si_signo);
3070
3071	/* Restart nanosleep with SIGCONT, which triggers restart_syscall. */
3072	ASSERT_EQ(0, kill(child_pid, SIGCONT));
3073	ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0));
3074	ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
3075	ASSERT_EQ(true, WIFSTOPPED(status));
3076	ASSERT_EQ(SIGCONT, WSTOPSIG(status));
3077	ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0));
3078
3079	/* Wait for restart_syscall() to start. */
3080	ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
3081	ASSERT_EQ(true, WIFSTOPPED(status));
3082	ASSERT_EQ(SIGTRAP, WSTOPSIG(status));
3083	ASSERT_EQ(PTRACE_EVENT_SECCOMP, (status >> 16));
3084	ASSERT_EQ(0, ptrace(PTRACE_GETEVENTMSG, child_pid, NULL, &msg));
3085
3086	ASSERT_EQ(0x200, msg);
3087	ret = get_syscall(_metadata, child_pid);
3088#if defined(__arm__)
3089	/*
3090	 * FIXME:
3091	 * - native ARM registers do NOT expose true syscall.
3092	 * - compat ARM registers on ARM64 DO expose true syscall.
3093	 */
3094	ASSERT_EQ(0, uname(&utsbuf));
3095	if (strncmp(utsbuf.machine, "arm", 3) == 0) {
3096		EXPECT_EQ(__NR_nanosleep, ret);
3097	} else
3098#endif
3099	{
3100		EXPECT_EQ(__NR_restart_syscall, ret);
3101	}
3102
3103	/* Write again to end test. */
3104	ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0));
3105	ASSERT_EQ(1, write(pipefd[1], "!", 1));
3106	EXPECT_EQ(0, close(pipefd[1]));
3107
3108	ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
3109	if (WIFSIGNALED(status) || WEXITSTATUS(status))
3110		_metadata->passed = 0;
3111}
3112
3113TEST_SIGNAL(filter_flag_log, SIGSYS)
3114{
3115	struct sock_filter allow_filter[] = {
3116		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
3117	};
3118	struct sock_filter kill_filter[] = {
3119		BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
3120			offsetof(struct seccomp_data, nr)),
3121		BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1),
3122		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
3123		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
3124	};
3125	struct sock_fprog allow_prog = {
3126		.len = (unsigned short)ARRAY_SIZE(allow_filter),
3127		.filter = allow_filter,
3128	};
3129	struct sock_fprog kill_prog = {
3130		.len = (unsigned short)ARRAY_SIZE(kill_filter),
3131		.filter = kill_filter,
3132	};
3133	long ret;
3134	pid_t parent = getppid();
3135
3136	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
3137	ASSERT_EQ(0, ret);
3138
3139	/* Verify that the FILTER_FLAG_LOG flag isn't accepted in strict mode */
3140	ret = seccomp(SECCOMP_SET_MODE_STRICT, SECCOMP_FILTER_FLAG_LOG,
3141		      &allow_prog);
3142	ASSERT_NE(ENOSYS, errno) {
3143		TH_LOG("Kernel does not support seccomp syscall!");
3144	}
3145	EXPECT_NE(0, ret) {
3146		TH_LOG("Kernel accepted FILTER_FLAG_LOG flag in strict mode!");
3147	}
3148	EXPECT_EQ(EINVAL, errno) {
3149		TH_LOG("Kernel returned unexpected errno for FILTER_FLAG_LOG flag in strict mode!");
3150	}
3151
3152	/* Verify that a simple, permissive filter can be added with no flags */
3153	ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &allow_prog);
3154	EXPECT_EQ(0, ret);
3155
3156	/* See if the same filter can be added with the FILTER_FLAG_LOG flag */
3157	ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_LOG,
3158		      &allow_prog);
3159	ASSERT_NE(EINVAL, errno) {
3160		TH_LOG("Kernel does not support the FILTER_FLAG_LOG flag!");
3161	}
3162	EXPECT_EQ(0, ret);
3163
3164	/* Ensure that the kill filter works with the FILTER_FLAG_LOG flag */
3165	ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_LOG,
3166		      &kill_prog);
3167	EXPECT_EQ(0, ret);
3168
3169	EXPECT_EQ(parent, syscall(__NR_getppid));
3170	/* getpid() should never return. */
3171	EXPECT_EQ(0, syscall(__NR_getpid));
3172}
3173
3174TEST(get_action_avail)
3175{
3176	__u32 actions[] = { SECCOMP_RET_KILL_THREAD, SECCOMP_RET_TRAP,
3177			    SECCOMP_RET_ERRNO, SECCOMP_RET_TRACE,
3178			    SECCOMP_RET_LOG,   SECCOMP_RET_ALLOW };
3179	__u32 unknown_action = 0x10000000U;
3180	int i;
3181	long ret;
3182
3183	ret = seccomp(SECCOMP_GET_ACTION_AVAIL, 0, &actions[0]);
3184	ASSERT_NE(ENOSYS, errno) {
3185		TH_LOG("Kernel does not support seccomp syscall!");
3186	}
3187	ASSERT_NE(EINVAL, errno) {
3188		TH_LOG("Kernel does not support SECCOMP_GET_ACTION_AVAIL operation!");
3189	}
3190	EXPECT_EQ(ret, 0);
3191
3192	for (i = 0; i < ARRAY_SIZE(actions); i++) {
3193		ret = seccomp(SECCOMP_GET_ACTION_AVAIL, 0, &actions[i]);
3194		EXPECT_EQ(ret, 0) {
3195			TH_LOG("Expected action (0x%X) not available!",
3196			       actions[i]);
3197		}
3198	}
3199
3200	/* Check that an unknown action is handled properly (EOPNOTSUPP) */
3201	ret = seccomp(SECCOMP_GET_ACTION_AVAIL, 0, &unknown_action);
3202	EXPECT_EQ(ret, -1);
3203	EXPECT_EQ(errno, EOPNOTSUPP);
3204}
3205
3206TEST(get_metadata)
3207{
3208	pid_t pid;
3209	int pipefd[2];
3210	char buf;
3211	struct seccomp_metadata md;
3212	long ret;
3213
3214	/* Only real root can get metadata. */
3215	if (geteuid()) {
3216		SKIP(return, "get_metadata requires real root");
3217		return;
3218	}
3219
3220	ASSERT_EQ(0, pipe(pipefd));
3221
3222	pid = fork();
3223	ASSERT_GE(pid, 0);
3224	if (pid == 0) {
3225		struct sock_filter filter[] = {
3226			BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
3227		};
3228		struct sock_fprog prog = {
3229			.len = (unsigned short)ARRAY_SIZE(filter),
3230			.filter = filter,
3231		};
3232
3233		/* one with log, one without */
3234		EXPECT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER,
3235				     SECCOMP_FILTER_FLAG_LOG, &prog));
3236		EXPECT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog));
3237
3238		EXPECT_EQ(0, close(pipefd[0]));
3239		ASSERT_EQ(1, write(pipefd[1], "1", 1));
3240		ASSERT_EQ(0, close(pipefd[1]));
3241
3242		while (1)
3243			sleep(100);
3244	}
3245
3246	ASSERT_EQ(0, close(pipefd[1]));
3247	ASSERT_EQ(1, read(pipefd[0], &buf, 1));
3248
3249	ASSERT_EQ(0, ptrace(PTRACE_ATTACH, pid));
3250	ASSERT_EQ(pid, waitpid(pid, NULL, 0));
3251
3252	/* Past here must not use ASSERT or child process is never killed. */
3253
3254	md.filter_off = 0;
3255	errno = 0;
3256	ret = ptrace(PTRACE_SECCOMP_GET_METADATA, pid, sizeof(md), &md);
3257	EXPECT_EQ(sizeof(md), ret) {
3258		if (errno == EINVAL)
3259			SKIP(goto skip, "Kernel does not support PTRACE_SECCOMP_GET_METADATA (missing CONFIG_CHECKPOINT_RESTORE?)");
3260	}
3261
3262	EXPECT_EQ(md.flags, SECCOMP_FILTER_FLAG_LOG);
3263	EXPECT_EQ(md.filter_off, 0);
3264
3265	md.filter_off = 1;
3266	ret = ptrace(PTRACE_SECCOMP_GET_METADATA, pid, sizeof(md), &md);
3267	EXPECT_EQ(sizeof(md), ret);
3268	EXPECT_EQ(md.flags, 0);
3269	EXPECT_EQ(md.filter_off, 1);
3270
3271skip:
3272	ASSERT_EQ(0, kill(pid, SIGKILL));
3273}
3274
3275static int user_notif_syscall(int nr, unsigned int flags)
3276{
3277	struct sock_filter filter[] = {
3278		BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
3279			offsetof(struct seccomp_data, nr)),
3280		BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, nr, 0, 1),
3281		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_USER_NOTIF),
3282		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
3283	};
3284
3285	struct sock_fprog prog = {
3286		.len = (unsigned short)ARRAY_SIZE(filter),
3287		.filter = filter,
3288	};
3289
3290	return seccomp(SECCOMP_SET_MODE_FILTER, flags, &prog);
3291}
3292
3293#define USER_NOTIF_MAGIC INT_MAX
3294TEST(user_notification_basic)
3295{
3296	pid_t pid;
3297	long ret;
3298	int status, listener;
3299	struct seccomp_notif req = {};
3300	struct seccomp_notif_resp resp = {};
3301	struct pollfd pollfd;
3302
3303	struct sock_filter filter[] = {
3304		BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
3305	};
3306	struct sock_fprog prog = {
3307		.len = (unsigned short)ARRAY_SIZE(filter),
3308		.filter = filter,
3309	};
3310
3311	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
3312	ASSERT_EQ(0, ret) {
3313		TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
3314	}
3315
3316	pid = fork();
3317	ASSERT_GE(pid, 0);
3318
3319	/* Check that we get -ENOSYS with no listener attached */
3320	if (pid == 0) {
3321		if (user_notif_syscall(__NR_getppid, 0) < 0)
3322			exit(1);
3323		ret = syscall(__NR_getppid);
3324		exit(ret >= 0 || errno != ENOSYS);
3325	}
3326
3327	EXPECT_EQ(waitpid(pid, &status, 0), pid);
3328	EXPECT_EQ(true, WIFEXITED(status));
3329	EXPECT_EQ(0, WEXITSTATUS(status));
3330
3331	/* Add some no-op filters for grins. */
3332	EXPECT_EQ(seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog), 0);
3333	EXPECT_EQ(seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog), 0);
3334	EXPECT_EQ(seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog), 0);
3335	EXPECT_EQ(seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog), 0);
3336
3337	/* Check that the basic notification machinery works */
3338	listener = user_notif_syscall(__NR_getppid,
3339				      SECCOMP_FILTER_FLAG_NEW_LISTENER);
3340	ASSERT_GE(listener, 0);
3341
3342	/* Installing a second listener in the chain should EBUSY */
3343	EXPECT_EQ(user_notif_syscall(__NR_getppid,
3344				     SECCOMP_FILTER_FLAG_NEW_LISTENER),
3345		  -1);
3346	EXPECT_EQ(errno, EBUSY);
3347
3348	pid = fork();
3349	ASSERT_GE(pid, 0);
3350
3351	if (pid == 0) {
3352		ret = syscall(__NR_getppid);
3353		exit(ret != USER_NOTIF_MAGIC);
3354	}
3355
3356	pollfd.fd = listener;
3357	pollfd.events = POLLIN | POLLOUT;
3358
3359	EXPECT_GT(poll(&pollfd, 1, -1), 0);
3360	EXPECT_EQ(pollfd.revents, POLLIN);
3361
3362	/* Test that we can't pass garbage to the kernel. */
3363	memset(&req, 0, sizeof(req));
3364	req.pid = -1;
3365	errno = 0;
3366	ret = ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req);
3367	EXPECT_EQ(-1, ret);
3368	EXPECT_EQ(EINVAL, errno);
3369
3370	if (ret) {
3371		req.pid = 0;
3372		EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
3373	}
3374
3375	pollfd.fd = listener;
3376	pollfd.events = POLLIN | POLLOUT;
3377
3378	EXPECT_GT(poll(&pollfd, 1, -1), 0);
3379	EXPECT_EQ(pollfd.revents, POLLOUT);
3380
3381	EXPECT_EQ(req.data.nr,  __NR_getppid);
3382
3383	resp.id = req.id;
3384	resp.error = 0;
3385	resp.val = USER_NOTIF_MAGIC;
3386
3387	/* check that we make sure flags == 0 */
3388	resp.flags = 1;
3389	EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), -1);
3390	EXPECT_EQ(errno, EINVAL);
3391
3392	resp.flags = 0;
3393	EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0);
3394
3395	EXPECT_EQ(waitpid(pid, &status, 0), pid);
3396	EXPECT_EQ(true, WIFEXITED(status));
3397	EXPECT_EQ(0, WEXITSTATUS(status));
3398}
3399
3400TEST(user_notification_with_tsync)
3401{
3402	int ret;
3403	unsigned int flags;
3404
3405	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
3406	ASSERT_EQ(0, ret) {
3407		TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
3408	}
3409
3410	/* these were exclusive */
3411	flags = SECCOMP_FILTER_FLAG_NEW_LISTENER |
3412		SECCOMP_FILTER_FLAG_TSYNC;
3413	ASSERT_EQ(-1, user_notif_syscall(__NR_getppid, flags));
3414	ASSERT_EQ(EINVAL, errno);
3415
3416	/* but now they're not */
3417	flags |= SECCOMP_FILTER_FLAG_TSYNC_ESRCH;
3418	ret = user_notif_syscall(__NR_getppid, flags);
3419	close(ret);
3420	ASSERT_LE(0, ret);
3421}
3422
3423TEST(user_notification_kill_in_middle)
3424{
3425	pid_t pid;
3426	long ret;
3427	int listener;
3428	struct seccomp_notif req = {};
3429	struct seccomp_notif_resp resp = {};
3430
3431	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
3432	ASSERT_EQ(0, ret) {
3433		TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
3434	}
3435
3436	listener = user_notif_syscall(__NR_getppid,
3437				      SECCOMP_FILTER_FLAG_NEW_LISTENER);
3438	ASSERT_GE(listener, 0);
3439
3440	/*
3441	 * Check that nothing bad happens when we kill the task in the middle
3442	 * of a syscall.
3443	 */
3444	pid = fork();
3445	ASSERT_GE(pid, 0);
3446
3447	if (pid == 0) {
3448		ret = syscall(__NR_getppid);
3449		exit(ret != USER_NOTIF_MAGIC);
3450	}
3451
3452	EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
3453	EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ID_VALID, &req.id), 0);
3454
3455	EXPECT_EQ(kill(pid, SIGKILL), 0);
3456	EXPECT_EQ(waitpid(pid, NULL, 0), pid);
3457
3458	EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ID_VALID, &req.id), -1);
3459
3460	resp.id = req.id;
3461	ret = ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp);
3462	EXPECT_EQ(ret, -1);
3463	EXPECT_EQ(errno, ENOENT);
3464}
3465
3466static int handled = -1;
3467
3468static void signal_handler(int signal)
3469{
3470	if (write(handled, "c", 1) != 1)
3471		perror("write from signal");
3472}
3473
3474TEST(user_notification_signal)
3475{
3476	pid_t pid;
3477	long ret;
3478	int status, listener, sk_pair[2];
3479	struct seccomp_notif req = {};
3480	struct seccomp_notif_resp resp = {};
3481	char c;
3482
3483	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
3484	ASSERT_EQ(0, ret) {
3485		TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
3486	}
3487
3488	ASSERT_EQ(socketpair(PF_LOCAL, SOCK_SEQPACKET, 0, sk_pair), 0);
3489
3490	listener = user_notif_syscall(__NR_gettid,
3491				      SECCOMP_FILTER_FLAG_NEW_LISTENER);
3492	ASSERT_GE(listener, 0);
3493
3494	pid = fork();
3495	ASSERT_GE(pid, 0);
3496
3497	if (pid == 0) {
3498		close(sk_pair[0]);
3499		handled = sk_pair[1];
3500		if (signal(SIGUSR1, signal_handler) == SIG_ERR) {
3501			perror("signal");
3502			exit(1);
3503		}
3504		/*
3505		 * ERESTARTSYS behavior is a bit hard to test, because we need
3506		 * to rely on a signal that has not yet been handled. Let's at
3507		 * least check that the error code gets propagated through, and
3508		 * hope that it doesn't break when there is actually a signal :)
3509		 */
3510		ret = syscall(__NR_gettid);
3511		exit(!(ret == -1 && errno == 512));
3512	}
3513
3514	close(sk_pair[1]);
3515
3516	memset(&req, 0, sizeof(req));
3517	EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
3518
3519	EXPECT_EQ(kill(pid, SIGUSR1), 0);
3520
3521	/*
3522	 * Make sure the signal really is delivered, which means we're not
3523	 * stuck in the user notification code any more and the notification
3524	 * should be dead.
3525	 */
3526	EXPECT_EQ(read(sk_pair[0], &c, 1), 1);
3527
3528	resp.id = req.id;
3529	resp.error = -EPERM;
3530	resp.val = 0;
3531
3532	EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), -1);
3533	EXPECT_EQ(errno, ENOENT);
3534
3535	memset(&req, 0, sizeof(req));
3536	EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
3537
3538	resp.id = req.id;
3539	resp.error = -512; /* -ERESTARTSYS */
3540	resp.val = 0;
3541
3542	EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0);
3543
3544	EXPECT_EQ(waitpid(pid, &status, 0), pid);
3545	EXPECT_EQ(true, WIFEXITED(status));
3546	EXPECT_EQ(0, WEXITSTATUS(status));
3547}
3548
3549TEST(user_notification_closed_listener)
3550{
3551	pid_t pid;
3552	long ret;
3553	int status, listener;
3554
3555	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
3556	ASSERT_EQ(0, ret) {
3557		TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
3558	}
3559
3560	listener = user_notif_syscall(__NR_getppid,
3561				      SECCOMP_FILTER_FLAG_NEW_LISTENER);
3562	ASSERT_GE(listener, 0);
3563
3564	/*
3565	 * Check that we get an ENOSYS when the listener is closed.
3566	 */
3567	pid = fork();
3568	ASSERT_GE(pid, 0);
3569	if (pid == 0) {
3570		close(listener);
3571		ret = syscall(__NR_getppid);
3572		exit(ret != -1 && errno != ENOSYS);
3573	}
3574
3575	close(listener);
3576
3577	EXPECT_EQ(waitpid(pid, &status, 0), pid);
3578	EXPECT_EQ(true, WIFEXITED(status));
3579	EXPECT_EQ(0, WEXITSTATUS(status));
3580}
3581
3582/*
3583 * Check that a pid in a child namespace still shows up as valid in ours.
3584 */
3585TEST(user_notification_child_pid_ns)
3586{
3587	pid_t pid;
3588	int status, listener;
3589	struct seccomp_notif req = {};
3590	struct seccomp_notif_resp resp = {};
3591
3592	ASSERT_EQ(unshare(CLONE_NEWUSER | CLONE_NEWPID), 0) {
3593		if (errno == EINVAL)
3594			SKIP(return, "kernel missing CLONE_NEWUSER support");
3595	};
3596
3597	listener = user_notif_syscall(__NR_getppid,
3598				      SECCOMP_FILTER_FLAG_NEW_LISTENER);
3599	ASSERT_GE(listener, 0);
3600
3601	pid = fork();
3602	ASSERT_GE(pid, 0);
3603
3604	if (pid == 0)
3605		exit(syscall(__NR_getppid) != USER_NOTIF_MAGIC);
3606
3607	EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
3608	EXPECT_EQ(req.pid, pid);
3609
3610	resp.id = req.id;
3611	resp.error = 0;
3612	resp.val = USER_NOTIF_MAGIC;
3613
3614	EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0);
3615
3616	EXPECT_EQ(waitpid(pid, &status, 0), pid);
3617	EXPECT_EQ(true, WIFEXITED(status));
3618	EXPECT_EQ(0, WEXITSTATUS(status));
3619	close(listener);
3620}
3621
3622/*
3623 * Check that a pid in a sibling (i.e. unrelated) namespace shows up as 0, i.e.
3624 * invalid.
3625 */
3626TEST(user_notification_sibling_pid_ns)
3627{
3628	pid_t pid, pid2;
3629	int status, listener;
3630	struct seccomp_notif req = {};
3631	struct seccomp_notif_resp resp = {};
3632
3633	ASSERT_EQ(prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0), 0) {
3634		TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
3635	}
3636
3637	listener = user_notif_syscall(__NR_getppid,
3638				      SECCOMP_FILTER_FLAG_NEW_LISTENER);
3639	ASSERT_GE(listener, 0);
3640
3641	pid = fork();
3642	ASSERT_GE(pid, 0);
3643
3644	if (pid == 0) {
3645		ASSERT_EQ(unshare(CLONE_NEWPID), 0);
3646
3647		pid2 = fork();
3648		ASSERT_GE(pid2, 0);
3649
3650		if (pid2 == 0)
3651			exit(syscall(__NR_getppid) != USER_NOTIF_MAGIC);
3652
3653		EXPECT_EQ(waitpid(pid2, &status, 0), pid2);
3654		EXPECT_EQ(true, WIFEXITED(status));
3655		EXPECT_EQ(0, WEXITSTATUS(status));
3656		exit(WEXITSTATUS(status));
3657	}
3658
3659	/* Create the sibling ns, and sibling in it. */
3660	ASSERT_EQ(unshare(CLONE_NEWPID), 0) {
3661		if (errno == EPERM)
3662			SKIP(return, "CLONE_NEWPID requires CAP_SYS_ADMIN");
3663	}
3664	ASSERT_EQ(errno, 0);
3665
3666	pid2 = fork();
3667	ASSERT_GE(pid2, 0);
3668
3669	if (pid2 == 0) {
3670		ASSERT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
3671		/*
3672		 * The pid should be 0, i.e. the task is in some namespace that
3673		 * we can't "see".
3674		 */
3675		EXPECT_EQ(req.pid, 0);
3676
3677		resp.id = req.id;
3678		resp.error = 0;
3679		resp.val = USER_NOTIF_MAGIC;
3680
3681		ASSERT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0);
3682		exit(0);
3683	}
3684
3685	close(listener);
3686
3687	EXPECT_EQ(waitpid(pid, &status, 0), pid);
3688	EXPECT_EQ(true, WIFEXITED(status));
3689	EXPECT_EQ(0, WEXITSTATUS(status));
3690
3691	EXPECT_EQ(waitpid(pid2, &status, 0), pid2);
3692	EXPECT_EQ(true, WIFEXITED(status));
3693	EXPECT_EQ(0, WEXITSTATUS(status));
3694}
3695
3696TEST(user_notification_fault_recv)
3697{
3698	pid_t pid;
3699	int status, listener;
3700	struct seccomp_notif req = {};
3701	struct seccomp_notif_resp resp = {};
3702
3703	ASSERT_EQ(unshare(CLONE_NEWUSER), 0);
3704
3705	listener = user_notif_syscall(__NR_getppid,
3706				      SECCOMP_FILTER_FLAG_NEW_LISTENER);
3707	ASSERT_GE(listener, 0);
3708
3709	pid = fork();
3710	ASSERT_GE(pid, 0);
3711
3712	if (pid == 0)
3713		exit(syscall(__NR_getppid) != USER_NOTIF_MAGIC);
3714
3715	/* Do a bad recv() */
3716	EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, NULL), -1);
3717	EXPECT_EQ(errno, EFAULT);
3718
3719	/* We should still be able to receive this notification, though. */
3720	EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
3721	EXPECT_EQ(req.pid, pid);
3722
3723	resp.id = req.id;
3724	resp.error = 0;
3725	resp.val = USER_NOTIF_MAGIC;
3726
3727	EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0);
3728
3729	EXPECT_EQ(waitpid(pid, &status, 0), pid);
3730	EXPECT_EQ(true, WIFEXITED(status));
3731	EXPECT_EQ(0, WEXITSTATUS(status));
3732}
3733
3734TEST(seccomp_get_notif_sizes)
3735{
3736	struct seccomp_notif_sizes sizes;
3737
3738	ASSERT_EQ(seccomp(SECCOMP_GET_NOTIF_SIZES, 0, &sizes), 0);
3739	EXPECT_EQ(sizes.seccomp_notif, sizeof(struct seccomp_notif));
3740	EXPECT_EQ(sizes.seccomp_notif_resp, sizeof(struct seccomp_notif_resp));
3741}
3742
3743TEST(user_notification_continue)
3744{
3745	pid_t pid;
3746	long ret;
3747	int status, listener;
3748	struct seccomp_notif req = {};
3749	struct seccomp_notif_resp resp = {};
3750	struct pollfd pollfd;
3751
3752	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
3753	ASSERT_EQ(0, ret) {
3754		TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
3755	}
3756
3757	listener = user_notif_syscall(__NR_dup, SECCOMP_FILTER_FLAG_NEW_LISTENER);
3758	ASSERT_GE(listener, 0);
3759
3760	pid = fork();
3761	ASSERT_GE(pid, 0);
3762
3763	if (pid == 0) {
3764		int dup_fd, pipe_fds[2];
3765		pid_t self;
3766
3767		ASSERT_GE(pipe(pipe_fds), 0);
3768
3769		dup_fd = dup(pipe_fds[0]);
3770		ASSERT_GE(dup_fd, 0);
3771		EXPECT_NE(pipe_fds[0], dup_fd);
3772
3773		self = getpid();
3774		ASSERT_EQ(filecmp(self, self, pipe_fds[0], dup_fd), 0);
3775		exit(0);
3776	}
3777
3778	pollfd.fd = listener;
3779	pollfd.events = POLLIN | POLLOUT;
3780
3781	EXPECT_GT(poll(&pollfd, 1, -1), 0);
3782	EXPECT_EQ(pollfd.revents, POLLIN);
3783
3784	EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
3785
3786	pollfd.fd = listener;
3787	pollfd.events = POLLIN | POLLOUT;
3788
3789	EXPECT_GT(poll(&pollfd, 1, -1), 0);
3790	EXPECT_EQ(pollfd.revents, POLLOUT);
3791
3792	EXPECT_EQ(req.data.nr, __NR_dup);
3793
3794	resp.id = req.id;
3795	resp.flags = SECCOMP_USER_NOTIF_FLAG_CONTINUE;
3796
3797	/*
3798	 * Verify that setting SECCOMP_USER_NOTIF_FLAG_CONTINUE enforces other
3799	 * args be set to 0.
3800	 */
3801	resp.error = 0;
3802	resp.val = USER_NOTIF_MAGIC;
3803	EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), -1);
3804	EXPECT_EQ(errno, EINVAL);
3805
3806	resp.error = USER_NOTIF_MAGIC;
3807	resp.val = 0;
3808	EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), -1);
3809	EXPECT_EQ(errno, EINVAL);
3810
3811	resp.error = 0;
3812	resp.val = 0;
3813	EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0) {
3814		if (errno == EINVAL)
3815			SKIP(goto skip, "Kernel does not support SECCOMP_USER_NOTIF_FLAG_CONTINUE");
3816	}
3817
3818skip:
3819	EXPECT_EQ(waitpid(pid, &status, 0), pid);
3820	EXPECT_EQ(true, WIFEXITED(status));
3821	EXPECT_EQ(0, WEXITSTATUS(status)) {
3822		if (WEXITSTATUS(status) == 2) {
3823			SKIP(return, "Kernel does not support kcmp() syscall");
3824			return;
3825		}
3826	}
3827}
3828
3829TEST(user_notification_filter_empty)
3830{
3831	pid_t pid;
3832	long ret;
3833	int status;
3834	struct pollfd pollfd;
3835	struct __clone_args args = {
3836		.flags = CLONE_FILES,
3837		.exit_signal = SIGCHLD,
3838	};
3839
3840	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
3841	ASSERT_EQ(0, ret) {
3842		TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
3843	}
3844
3845	pid = sys_clone3(&args, sizeof(args));
3846	ASSERT_GE(pid, 0);
3847
3848	if (pid == 0) {
3849		int listener;
3850
3851		listener = user_notif_syscall(__NR_mknodat, SECCOMP_FILTER_FLAG_NEW_LISTENER);
3852		if (listener < 0)
3853			_exit(EXIT_FAILURE);
3854
3855		if (dup2(listener, 200) != 200)
3856			_exit(EXIT_FAILURE);
3857
3858		close(listener);
3859
3860		_exit(EXIT_SUCCESS);
3861	}
3862
3863	EXPECT_EQ(waitpid(pid, &status, 0), pid);
3864	EXPECT_EQ(true, WIFEXITED(status));
3865	EXPECT_EQ(0, WEXITSTATUS(status));
3866
3867	/*
3868	 * The seccomp filter has become unused so we should be notified once
3869	 * the kernel gets around to cleaning up task struct.
3870	 */
3871	pollfd.fd = 200;
3872	pollfd.events = POLLHUP;
3873
3874	EXPECT_GT(poll(&pollfd, 1, 2000), 0);
3875	EXPECT_GT((pollfd.revents & POLLHUP) ?: 0, 0);
3876}
3877
3878static void *do_thread(void *data)
3879{
3880	return NULL;
3881}
3882
3883TEST(user_notification_filter_empty_threaded)
3884{
3885	pid_t pid;
3886	long ret;
3887	int status;
3888	struct pollfd pollfd;
3889	struct __clone_args args = {
3890		.flags = CLONE_FILES,
3891		.exit_signal = SIGCHLD,
3892	};
3893
3894	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
3895	ASSERT_EQ(0, ret) {
3896		TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
3897	}
3898
3899	pid = sys_clone3(&args, sizeof(args));
3900	ASSERT_GE(pid, 0);
3901
3902	if (pid == 0) {
3903		pid_t pid1, pid2;
3904		int listener, status;
3905		pthread_t thread;
3906
3907		listener = user_notif_syscall(__NR_dup, SECCOMP_FILTER_FLAG_NEW_LISTENER);
3908		if (listener < 0)
3909			_exit(EXIT_FAILURE);
3910
3911		if (dup2(listener, 200) != 200)
3912			_exit(EXIT_FAILURE);
3913
3914		close(listener);
3915
3916		pid1 = fork();
3917		if (pid1 < 0)
3918			_exit(EXIT_FAILURE);
3919
3920		if (pid1 == 0)
3921			_exit(EXIT_SUCCESS);
3922
3923		pid2 = fork();
3924		if (pid2 < 0)
3925			_exit(EXIT_FAILURE);
3926
3927		if (pid2 == 0)
3928			_exit(EXIT_SUCCESS);
3929
3930		if (pthread_create(&thread, NULL, do_thread, NULL) ||
3931		    pthread_join(thread, NULL))
3932			_exit(EXIT_FAILURE);
3933
3934		if (pthread_create(&thread, NULL, do_thread, NULL) ||
3935		    pthread_join(thread, NULL))
3936			_exit(EXIT_FAILURE);
3937
3938		if (waitpid(pid1, &status, 0) != pid1 || !WIFEXITED(status) ||
3939		    WEXITSTATUS(status))
3940			_exit(EXIT_FAILURE);
3941
3942		if (waitpid(pid2, &status, 0) != pid2 || !WIFEXITED(status) ||
3943		    WEXITSTATUS(status))
3944			_exit(EXIT_FAILURE);
3945
3946		exit(EXIT_SUCCESS);
3947	}
3948
3949	EXPECT_EQ(waitpid(pid, &status, 0), pid);
3950	EXPECT_EQ(true, WIFEXITED(status));
3951	EXPECT_EQ(0, WEXITSTATUS(status));
3952
3953	/*
3954	 * The seccomp filter has become unused so we should be notified once
3955	 * the kernel gets around to cleaning up task struct.
3956	 */
3957	pollfd.fd = 200;
3958	pollfd.events = POLLHUP;
3959
3960	EXPECT_GT(poll(&pollfd, 1, 2000), 0);
3961	EXPECT_GT((pollfd.revents & POLLHUP) ?: 0, 0);
3962}
3963
3964TEST(user_notification_addfd)
3965{
3966	pid_t pid;
3967	long ret;
3968	int status, listener, memfd, fd;
3969	struct seccomp_notif_addfd addfd = {};
3970	struct seccomp_notif_addfd_small small = {};
3971	struct seccomp_notif_addfd_big big = {};
3972	struct seccomp_notif req = {};
3973	struct seccomp_notif_resp resp = {};
3974	/* 100 ms */
3975	struct timespec delay = { .tv_nsec = 100000000 };
3976
3977	memfd = memfd_create("test", 0);
3978	ASSERT_GE(memfd, 0);
3979
3980	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
3981	ASSERT_EQ(0, ret) {
3982		TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
3983	}
3984
3985	/* Check that the basic notification machinery works */
3986	listener = user_notif_syscall(__NR_getppid,
3987				      SECCOMP_FILTER_FLAG_NEW_LISTENER);
3988	ASSERT_GE(listener, 0);
3989
3990	pid = fork();
3991	ASSERT_GE(pid, 0);
3992
3993	if (pid == 0) {
3994		if (syscall(__NR_getppid) != USER_NOTIF_MAGIC)
3995			exit(1);
3996		exit(syscall(__NR_getppid) != USER_NOTIF_MAGIC);
3997	}
3998
3999	ASSERT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
4000
4001	addfd.srcfd = memfd;
4002	addfd.newfd = 0;
4003	addfd.id = req.id;
4004	addfd.flags = 0x0;
4005
4006	/* Verify bad newfd_flags cannot be set */
4007	addfd.newfd_flags = ~O_CLOEXEC;
4008	EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd), -1);
4009	EXPECT_EQ(errno, EINVAL);
4010	addfd.newfd_flags = O_CLOEXEC;
4011
4012	/* Verify bad flags cannot be set */
4013	addfd.flags = 0xff;
4014	EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd), -1);
4015	EXPECT_EQ(errno, EINVAL);
4016	addfd.flags = 0;
4017
4018	/* Verify that remote_fd cannot be set without setting flags */
4019	addfd.newfd = 1;
4020	EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd), -1);
4021	EXPECT_EQ(errno, EINVAL);
4022	addfd.newfd = 0;
4023
4024	/* Verify small size cannot be set */
4025	EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD_SMALL, &small), -1);
4026	EXPECT_EQ(errno, EINVAL);
4027
4028	/* Verify we can't send bits filled in unknown buffer area */
4029	memset(&big, 0xAA, sizeof(big));
4030	big.addfd = addfd;
4031	EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD_BIG, &big), -1);
4032	EXPECT_EQ(errno, E2BIG);
4033
4034
4035	/* Verify we can set an arbitrary remote fd */
4036	fd = ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd);
4037	/*
4038	 * The child has fds 0(stdin), 1(stdout), 2(stderr), 3(memfd),
4039	 * 4(listener), so the newly allocated fd should be 5.
4040	 */
4041	EXPECT_EQ(fd, 5);
4042	EXPECT_EQ(filecmp(getpid(), pid, memfd, fd), 0);
4043
4044	/* Verify we can set an arbitrary remote fd with large size */
4045	memset(&big, 0x0, sizeof(big));
4046	big.addfd = addfd;
4047	fd = ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD_BIG, &big);
4048	EXPECT_EQ(fd, 6);
4049
4050	/* Verify we can set a specific remote fd */
4051	addfd.newfd = 42;
4052	addfd.flags = SECCOMP_ADDFD_FLAG_SETFD;
4053	fd = ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd);
4054	EXPECT_EQ(fd, 42);
4055	EXPECT_EQ(filecmp(getpid(), pid, memfd, fd), 0);
4056
4057	/* Resume syscall */
4058	resp.id = req.id;
4059	resp.error = 0;
4060	resp.val = USER_NOTIF_MAGIC;
4061	EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0);
4062
4063	/*
4064	 * This sets the ID of the ADD FD to the last request plus 1. The
4065	 * notification ID increments 1 per notification.
4066	 */
4067	addfd.id = req.id + 1;
4068
4069	/* This spins until the underlying notification is generated */
4070	while (ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd) != -1 &&
4071	       errno != -EINPROGRESS)
4072		nanosleep(&delay, NULL);
4073
4074	memset(&req, 0, sizeof(req));
4075	ASSERT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
4076	ASSERT_EQ(addfd.id, req.id);
4077
4078	resp.id = req.id;
4079	resp.error = 0;
4080	resp.val = USER_NOTIF_MAGIC;
4081	EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0);
4082
4083	/* Wait for child to finish. */
4084	EXPECT_EQ(waitpid(pid, &status, 0), pid);
4085	EXPECT_EQ(true, WIFEXITED(status));
4086	EXPECT_EQ(0, WEXITSTATUS(status));
4087
4088	close(memfd);
4089}
4090
4091TEST(user_notification_addfd_rlimit)
4092{
4093	pid_t pid;
4094	long ret;
4095	int status, listener, memfd;
4096	struct seccomp_notif_addfd addfd = {};
4097	struct seccomp_notif req = {};
4098	struct seccomp_notif_resp resp = {};
4099	const struct rlimit lim = {
4100		.rlim_cur	= 0,
4101		.rlim_max	= 0,
4102	};
4103
4104	memfd = memfd_create("test", 0);
4105	ASSERT_GE(memfd, 0);
4106
4107	ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
4108	ASSERT_EQ(0, ret) {
4109		TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
4110	}
4111
4112	/* Check that the basic notification machinery works */
4113	listener = user_notif_syscall(__NR_getppid,
4114				      SECCOMP_FILTER_FLAG_NEW_LISTENER);
4115	ASSERT_GE(listener, 0);
4116
4117	pid = fork();
4118	ASSERT_GE(pid, 0);
4119
4120	if (pid == 0)
4121		exit(syscall(__NR_getppid) != USER_NOTIF_MAGIC);
4122
4123
4124	ASSERT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
4125
4126	ASSERT_EQ(prlimit(pid, RLIMIT_NOFILE, &lim, NULL), 0);
4127
4128	addfd.srcfd = memfd;
4129	addfd.newfd_flags = O_CLOEXEC;
4130	addfd.newfd = 0;
4131	addfd.id = req.id;
4132	addfd.flags = 0;
4133
4134	/* Should probably spot check /proc/sys/fs/file-nr */
4135	EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd), -1);
4136	EXPECT_EQ(errno, EMFILE);
4137
4138	addfd.newfd = 100;
4139	addfd.flags = SECCOMP_ADDFD_FLAG_SETFD;
4140	EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd), -1);
4141	EXPECT_EQ(errno, EBADF);
4142
4143	resp.id = req.id;
4144	resp.error = 0;
4145	resp.val = USER_NOTIF_MAGIC;
4146
4147	EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0);
4148
4149	/* Wait for child to finish. */
4150	EXPECT_EQ(waitpid(pid, &status, 0), pid);
4151	EXPECT_EQ(true, WIFEXITED(status));
4152	EXPECT_EQ(0, WEXITSTATUS(status));
4153
4154	close(memfd);
4155}
4156
4157/*
4158 * TODO:
4159 * - expand NNP testing
4160 * - better arch-specific TRACE and TRAP handlers.
4161 * - endianness checking when appropriate
4162 * - 64-bit arg prodding
4163 * - arch value testing (x86 modes especially)
4164 * - verify that FILTER_FLAG_LOG filters generate log messages
4165 * - verify that RET_LOG generates log messages
4166 */
4167
4168TEST_HARNESS_MAIN
4169