18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-only 28c2ecf20Sopenharmony_ci/* 38c2ecf20Sopenharmony_ci * linux/kernel/exit.c 48c2ecf20Sopenharmony_ci * 58c2ecf20Sopenharmony_ci * Copyright (C) 1991, 1992 Linus Torvalds 68c2ecf20Sopenharmony_ci */ 78c2ecf20Sopenharmony_ci 88c2ecf20Sopenharmony_ci#include <linux/mm.h> 98c2ecf20Sopenharmony_ci#include <linux/slab.h> 108c2ecf20Sopenharmony_ci#include <linux/sched/autogroup.h> 118c2ecf20Sopenharmony_ci#include <linux/sched/mm.h> 128c2ecf20Sopenharmony_ci#include <linux/sched/stat.h> 138c2ecf20Sopenharmony_ci#include <linux/sched/task.h> 148c2ecf20Sopenharmony_ci#include <linux/sched/task_stack.h> 158c2ecf20Sopenharmony_ci#include <linux/sched/cputime.h> 168c2ecf20Sopenharmony_ci 178c2ecf20Sopenharmony_ci#ifdef CONFIG_QOS_CTRL 188c2ecf20Sopenharmony_ci#include <linux/sched/qos_ctrl.h> 198c2ecf20Sopenharmony_ci#endif 208c2ecf20Sopenharmony_ci 218c2ecf20Sopenharmony_ci#include <linux/interrupt.h> 228c2ecf20Sopenharmony_ci#include <linux/module.h> 238c2ecf20Sopenharmony_ci#include <linux/capability.h> 248c2ecf20Sopenharmony_ci#include <linux/completion.h> 258c2ecf20Sopenharmony_ci#include <linux/personality.h> 268c2ecf20Sopenharmony_ci#include <linux/tty.h> 278c2ecf20Sopenharmony_ci#include <linux/iocontext.h> 288c2ecf20Sopenharmony_ci#include <linux/key.h> 298c2ecf20Sopenharmony_ci#include <linux/cpu.h> 308c2ecf20Sopenharmony_ci#include <linux/acct.h> 318c2ecf20Sopenharmony_ci#include <linux/tsacct_kern.h> 328c2ecf20Sopenharmony_ci#include <linux/file.h> 338c2ecf20Sopenharmony_ci#include <linux/fdtable.h> 348c2ecf20Sopenharmony_ci#include <linux/freezer.h> 358c2ecf20Sopenharmony_ci#include <linux/binfmts.h> 368c2ecf20Sopenharmony_ci#include <linux/nsproxy.h> 378c2ecf20Sopenharmony_ci#include <linux/pid_namespace.h> 388c2ecf20Sopenharmony_ci#include <linux/ptrace.h> 398c2ecf20Sopenharmony_ci#include <linux/profile.h> 408c2ecf20Sopenharmony_ci#include <linux/mount.h> 418c2ecf20Sopenharmony_ci#include <linux/proc_fs.h> 428c2ecf20Sopenharmony_ci#include <linux/kthread.h> 438c2ecf20Sopenharmony_ci#include <linux/mempolicy.h> 448c2ecf20Sopenharmony_ci#include <linux/taskstats_kern.h> 458c2ecf20Sopenharmony_ci#include <linux/delayacct.h> 468c2ecf20Sopenharmony_ci#include <linux/cgroup.h> 478c2ecf20Sopenharmony_ci#include <linux/syscalls.h> 488c2ecf20Sopenharmony_ci#include <linux/signal.h> 498c2ecf20Sopenharmony_ci#include <linux/posix-timers.h> 508c2ecf20Sopenharmony_ci#include <linux/cn_proc.h> 518c2ecf20Sopenharmony_ci#include <linux/mutex.h> 528c2ecf20Sopenharmony_ci#include <linux/futex.h> 538c2ecf20Sopenharmony_ci#include <linux/pipe_fs_i.h> 548c2ecf20Sopenharmony_ci#include <linux/audit.h> /* for audit_free() */ 558c2ecf20Sopenharmony_ci#include <linux/resource.h> 568c2ecf20Sopenharmony_ci#include <linux/blkdev.h> 578c2ecf20Sopenharmony_ci#include <linux/task_io_accounting_ops.h> 588c2ecf20Sopenharmony_ci#include <linux/tracehook.h> 598c2ecf20Sopenharmony_ci#include <linux/fs_struct.h> 608c2ecf20Sopenharmony_ci#include <linux/init_task.h> 618c2ecf20Sopenharmony_ci#include <linux/perf_event.h> 628c2ecf20Sopenharmony_ci#include <trace/events/sched.h> 638c2ecf20Sopenharmony_ci#include <linux/hw_breakpoint.h> 648c2ecf20Sopenharmony_ci#include <linux/oom.h> 658c2ecf20Sopenharmony_ci#include <linux/writeback.h> 668c2ecf20Sopenharmony_ci#include <linux/shm.h> 678c2ecf20Sopenharmony_ci#include <linux/kcov.h> 688c2ecf20Sopenharmony_ci#include <linux/random.h> 698c2ecf20Sopenharmony_ci#include <linux/rcuwait.h> 708c2ecf20Sopenharmony_ci#include <linux/compat.h> 718c2ecf20Sopenharmony_ci#include <linux/io_uring.h> 728c2ecf20Sopenharmony_ci#include <linux/sysfs.h> 738c2ecf20Sopenharmony_ci 748c2ecf20Sopenharmony_ci#include <linux/uaccess.h> 758c2ecf20Sopenharmony_ci#include <asm/unistd.h> 768c2ecf20Sopenharmony_ci#include <asm/mmu_context.h> 778c2ecf20Sopenharmony_ci 788c2ecf20Sopenharmony_ci#include <linux/hck/lite_hck_ced.h> 798c2ecf20Sopenharmony_ci#include <linux/hck/lite_hck_jit_memory.h> 808c2ecf20Sopenharmony_ci 818c2ecf20Sopenharmony_ci/* 828c2ecf20Sopenharmony_ci * The default value should be high enough to not crash a system that randomly 838c2ecf20Sopenharmony_ci * crashes its kernel from time to time, but low enough to at least not permit 848c2ecf20Sopenharmony_ci * overflowing 32-bit refcounts or the ldsem writer count. 858c2ecf20Sopenharmony_ci */ 868c2ecf20Sopenharmony_cistatic unsigned int oops_limit = 10000; 878c2ecf20Sopenharmony_ci 888c2ecf20Sopenharmony_ci#ifdef CONFIG_SYSCTL 898c2ecf20Sopenharmony_cistatic struct ctl_table kern_exit_table[] = { 908c2ecf20Sopenharmony_ci { 918c2ecf20Sopenharmony_ci .procname = "oops_limit", 928c2ecf20Sopenharmony_ci .data = &oops_limit, 938c2ecf20Sopenharmony_ci .maxlen = sizeof(oops_limit), 948c2ecf20Sopenharmony_ci .mode = 0644, 958c2ecf20Sopenharmony_ci .proc_handler = proc_douintvec, 968c2ecf20Sopenharmony_ci }, 978c2ecf20Sopenharmony_ci { } 988c2ecf20Sopenharmony_ci}; 998c2ecf20Sopenharmony_ci 1008c2ecf20Sopenharmony_cistatic __init int kernel_exit_sysctls_init(void) 1018c2ecf20Sopenharmony_ci{ 1028c2ecf20Sopenharmony_ci register_sysctl_init("kernel", kern_exit_table); 1038c2ecf20Sopenharmony_ci return 0; 1048c2ecf20Sopenharmony_ci} 1058c2ecf20Sopenharmony_cilate_initcall(kernel_exit_sysctls_init); 1068c2ecf20Sopenharmony_ci#endif 1078c2ecf20Sopenharmony_ci 1088c2ecf20Sopenharmony_cistatic atomic_t oops_count = ATOMIC_INIT(0); 1098c2ecf20Sopenharmony_ci 1108c2ecf20Sopenharmony_ci#ifdef CONFIG_SYSFS 1118c2ecf20Sopenharmony_cistatic ssize_t oops_count_show(struct kobject *kobj, struct kobj_attribute *attr, 1128c2ecf20Sopenharmony_ci char *page) 1138c2ecf20Sopenharmony_ci{ 1148c2ecf20Sopenharmony_ci return sysfs_emit(page, "%d\n", atomic_read(&oops_count)); 1158c2ecf20Sopenharmony_ci} 1168c2ecf20Sopenharmony_ci 1178c2ecf20Sopenharmony_cistatic struct kobj_attribute oops_count_attr = __ATTR_RO(oops_count); 1188c2ecf20Sopenharmony_ci 1198c2ecf20Sopenharmony_cistatic __init int kernel_exit_sysfs_init(void) 1208c2ecf20Sopenharmony_ci{ 1218c2ecf20Sopenharmony_ci sysfs_add_file_to_group(kernel_kobj, &oops_count_attr.attr, NULL); 1228c2ecf20Sopenharmony_ci return 0; 1238c2ecf20Sopenharmony_ci} 1248c2ecf20Sopenharmony_cilate_initcall(kernel_exit_sysfs_init); 1258c2ecf20Sopenharmony_ci#endif 1268c2ecf20Sopenharmony_ci 1278c2ecf20Sopenharmony_cistatic void __unhash_process(struct task_struct *p, bool group_dead) 1288c2ecf20Sopenharmony_ci{ 1298c2ecf20Sopenharmony_ci nr_threads--; 1308c2ecf20Sopenharmony_ci detach_pid(p, PIDTYPE_PID); 1318c2ecf20Sopenharmony_ci if (group_dead) { 1328c2ecf20Sopenharmony_ci detach_pid(p, PIDTYPE_TGID); 1338c2ecf20Sopenharmony_ci detach_pid(p, PIDTYPE_PGID); 1348c2ecf20Sopenharmony_ci detach_pid(p, PIDTYPE_SID); 1358c2ecf20Sopenharmony_ci 1368c2ecf20Sopenharmony_ci list_del_rcu(&p->tasks); 1378c2ecf20Sopenharmony_ci list_del_init(&p->sibling); 1388c2ecf20Sopenharmony_ci __this_cpu_dec(process_counts); 1398c2ecf20Sopenharmony_ci } 1408c2ecf20Sopenharmony_ci list_del_rcu(&p->thread_group); 1418c2ecf20Sopenharmony_ci list_del_rcu(&p->thread_node); 1428c2ecf20Sopenharmony_ci} 1438c2ecf20Sopenharmony_ci 1448c2ecf20Sopenharmony_ci/* 1458c2ecf20Sopenharmony_ci * This function expects the tasklist_lock write-locked. 1468c2ecf20Sopenharmony_ci */ 1478c2ecf20Sopenharmony_cistatic void __exit_signal(struct task_struct *tsk) 1488c2ecf20Sopenharmony_ci{ 1498c2ecf20Sopenharmony_ci struct signal_struct *sig = tsk->signal; 1508c2ecf20Sopenharmony_ci bool group_dead = thread_group_leader(tsk); 1518c2ecf20Sopenharmony_ci struct sighand_struct *sighand; 1528c2ecf20Sopenharmony_ci struct tty_struct *tty; 1538c2ecf20Sopenharmony_ci u64 utime, stime; 1548c2ecf20Sopenharmony_ci 1558c2ecf20Sopenharmony_ci sighand = rcu_dereference_check(tsk->sighand, 1568c2ecf20Sopenharmony_ci lockdep_tasklist_lock_is_held()); 1578c2ecf20Sopenharmony_ci spin_lock(&sighand->siglock); 1588c2ecf20Sopenharmony_ci 1598c2ecf20Sopenharmony_ci#ifdef CONFIG_POSIX_TIMERS 1608c2ecf20Sopenharmony_ci posix_cpu_timers_exit(tsk); 1618c2ecf20Sopenharmony_ci if (group_dead) 1628c2ecf20Sopenharmony_ci posix_cpu_timers_exit_group(tsk); 1638c2ecf20Sopenharmony_ci#endif 1648c2ecf20Sopenharmony_ci 1658c2ecf20Sopenharmony_ci if (group_dead) { 1668c2ecf20Sopenharmony_ci tty = sig->tty; 1678c2ecf20Sopenharmony_ci sig->tty = NULL; 1688c2ecf20Sopenharmony_ci } else { 1698c2ecf20Sopenharmony_ci /* 1708c2ecf20Sopenharmony_ci * If there is any task waiting for the group exit 1718c2ecf20Sopenharmony_ci * then notify it: 1728c2ecf20Sopenharmony_ci */ 1738c2ecf20Sopenharmony_ci if (sig->notify_count > 0 && !--sig->notify_count) 1748c2ecf20Sopenharmony_ci wake_up_process(sig->group_exit_task); 1758c2ecf20Sopenharmony_ci 1768c2ecf20Sopenharmony_ci if (tsk == sig->curr_target) 1778c2ecf20Sopenharmony_ci sig->curr_target = next_thread(tsk); 1788c2ecf20Sopenharmony_ci } 1798c2ecf20Sopenharmony_ci 1808c2ecf20Sopenharmony_ci add_device_randomness((const void*) &tsk->se.sum_exec_runtime, 1818c2ecf20Sopenharmony_ci sizeof(unsigned long long)); 1828c2ecf20Sopenharmony_ci 1838c2ecf20Sopenharmony_ci /* 1848c2ecf20Sopenharmony_ci * Accumulate here the counters for all threads as they die. We could 1858c2ecf20Sopenharmony_ci * skip the group leader because it is the last user of signal_struct, 1868c2ecf20Sopenharmony_ci * but we want to avoid the race with thread_group_cputime() which can 1878c2ecf20Sopenharmony_ci * see the empty ->thread_head list. 1888c2ecf20Sopenharmony_ci */ 1898c2ecf20Sopenharmony_ci task_cputime(tsk, &utime, &stime); 1908c2ecf20Sopenharmony_ci write_seqlock(&sig->stats_lock); 1918c2ecf20Sopenharmony_ci sig->utime += utime; 1928c2ecf20Sopenharmony_ci sig->stime += stime; 1938c2ecf20Sopenharmony_ci sig->gtime += task_gtime(tsk); 1948c2ecf20Sopenharmony_ci sig->min_flt += tsk->min_flt; 1958c2ecf20Sopenharmony_ci sig->maj_flt += tsk->maj_flt; 1968c2ecf20Sopenharmony_ci sig->nvcsw += tsk->nvcsw; 1978c2ecf20Sopenharmony_ci sig->nivcsw += tsk->nivcsw; 1988c2ecf20Sopenharmony_ci sig->inblock += task_io_get_inblock(tsk); 1998c2ecf20Sopenharmony_ci sig->oublock += task_io_get_oublock(tsk); 2008c2ecf20Sopenharmony_ci task_io_accounting_add(&sig->ioac, &tsk->ioac); 2018c2ecf20Sopenharmony_ci sig->sum_sched_runtime += tsk->se.sum_exec_runtime; 2028c2ecf20Sopenharmony_ci sig->nr_threads--; 2038c2ecf20Sopenharmony_ci __unhash_process(tsk, group_dead); 2048c2ecf20Sopenharmony_ci write_sequnlock(&sig->stats_lock); 2058c2ecf20Sopenharmony_ci 2068c2ecf20Sopenharmony_ci /* 2078c2ecf20Sopenharmony_ci * Do this under ->siglock, we can race with another thread 2088c2ecf20Sopenharmony_ci * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals. 2098c2ecf20Sopenharmony_ci */ 2108c2ecf20Sopenharmony_ci flush_sigqueue(&tsk->pending); 2118c2ecf20Sopenharmony_ci tsk->sighand = NULL; 2128c2ecf20Sopenharmony_ci spin_unlock(&sighand->siglock); 2138c2ecf20Sopenharmony_ci 2148c2ecf20Sopenharmony_ci __cleanup_sighand(sighand); 2158c2ecf20Sopenharmony_ci clear_tsk_thread_flag(tsk, TIF_SIGPENDING); 2168c2ecf20Sopenharmony_ci if (group_dead) { 2178c2ecf20Sopenharmony_ci flush_sigqueue(&sig->shared_pending); 2188c2ecf20Sopenharmony_ci tty_kref_put(tty); 2198c2ecf20Sopenharmony_ci } 2208c2ecf20Sopenharmony_ci} 2218c2ecf20Sopenharmony_ci 2228c2ecf20Sopenharmony_cistatic void delayed_put_task_struct(struct rcu_head *rhp) 2238c2ecf20Sopenharmony_ci{ 2248c2ecf20Sopenharmony_ci struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); 2258c2ecf20Sopenharmony_ci 2268c2ecf20Sopenharmony_ci perf_event_delayed_put(tsk); 2278c2ecf20Sopenharmony_ci trace_sched_process_free(tsk); 2288c2ecf20Sopenharmony_ci put_task_struct(tsk); 2298c2ecf20Sopenharmony_ci} 2308c2ecf20Sopenharmony_ci 2318c2ecf20Sopenharmony_civoid put_task_struct_rcu_user(struct task_struct *task) 2328c2ecf20Sopenharmony_ci{ 2338c2ecf20Sopenharmony_ci if (refcount_dec_and_test(&task->rcu_users)) 2348c2ecf20Sopenharmony_ci call_rcu(&task->rcu, delayed_put_task_struct); 2358c2ecf20Sopenharmony_ci} 2368c2ecf20Sopenharmony_ci 2378c2ecf20Sopenharmony_civoid release_task(struct task_struct *p) 2388c2ecf20Sopenharmony_ci{ 2398c2ecf20Sopenharmony_ci struct task_struct *leader; 2408c2ecf20Sopenharmony_ci struct pid *thread_pid; 2418c2ecf20Sopenharmony_ci int zap_leader; 2428c2ecf20Sopenharmony_cirepeat: 2438c2ecf20Sopenharmony_ci /* don't need to get the RCU readlock here - the process is dead and 2448c2ecf20Sopenharmony_ci * can't be modifying its own credentials. But shut RCU-lockdep up */ 2458c2ecf20Sopenharmony_ci rcu_read_lock(); 2468c2ecf20Sopenharmony_ci atomic_dec(&__task_cred(p)->user->processes); 2478c2ecf20Sopenharmony_ci rcu_read_unlock(); 2488c2ecf20Sopenharmony_ci 2498c2ecf20Sopenharmony_ci cgroup_release(p); 2508c2ecf20Sopenharmony_ci 2518c2ecf20Sopenharmony_ci write_lock_irq(&tasklist_lock); 2528c2ecf20Sopenharmony_ci ptrace_release_task(p); 2538c2ecf20Sopenharmony_ci thread_pid = get_pid(p->thread_pid); 2548c2ecf20Sopenharmony_ci __exit_signal(p); 2558c2ecf20Sopenharmony_ci 2568c2ecf20Sopenharmony_ci /* 2578c2ecf20Sopenharmony_ci * If we are the last non-leader member of the thread 2588c2ecf20Sopenharmony_ci * group, and the leader is zombie, then notify the 2598c2ecf20Sopenharmony_ci * group leader's parent process. (if it wants notification.) 2608c2ecf20Sopenharmony_ci */ 2618c2ecf20Sopenharmony_ci zap_leader = 0; 2628c2ecf20Sopenharmony_ci leader = p->group_leader; 2638c2ecf20Sopenharmony_ci if (leader != p && thread_group_empty(leader) 2648c2ecf20Sopenharmony_ci && leader->exit_state == EXIT_ZOMBIE) { 2658c2ecf20Sopenharmony_ci /* 2668c2ecf20Sopenharmony_ci * If we were the last child thread and the leader has 2678c2ecf20Sopenharmony_ci * exited already, and the leader's parent ignores SIGCHLD, 2688c2ecf20Sopenharmony_ci * then we are the one who should release the leader. 2698c2ecf20Sopenharmony_ci */ 2708c2ecf20Sopenharmony_ci zap_leader = do_notify_parent(leader, leader->exit_signal); 2718c2ecf20Sopenharmony_ci if (zap_leader) 2728c2ecf20Sopenharmony_ci leader->exit_state = EXIT_DEAD; 2738c2ecf20Sopenharmony_ci } 2748c2ecf20Sopenharmony_ci 2758c2ecf20Sopenharmony_ci write_unlock_irq(&tasklist_lock); 2768c2ecf20Sopenharmony_ci seccomp_filter_release(p); 2778c2ecf20Sopenharmony_ci proc_flush_pid(thread_pid); 2788c2ecf20Sopenharmony_ci put_pid(thread_pid); 2798c2ecf20Sopenharmony_ci release_thread(p); 2808c2ecf20Sopenharmony_ci put_task_struct_rcu_user(p); 2818c2ecf20Sopenharmony_ci 2828c2ecf20Sopenharmony_ci p = leader; 2838c2ecf20Sopenharmony_ci if (unlikely(zap_leader)) 2848c2ecf20Sopenharmony_ci goto repeat; 2858c2ecf20Sopenharmony_ci} 2868c2ecf20Sopenharmony_ci 2878c2ecf20Sopenharmony_ciint rcuwait_wake_up(struct rcuwait *w) 2888c2ecf20Sopenharmony_ci{ 2898c2ecf20Sopenharmony_ci int ret = 0; 2908c2ecf20Sopenharmony_ci struct task_struct *task; 2918c2ecf20Sopenharmony_ci 2928c2ecf20Sopenharmony_ci rcu_read_lock(); 2938c2ecf20Sopenharmony_ci 2948c2ecf20Sopenharmony_ci /* 2958c2ecf20Sopenharmony_ci * Order condition vs @task, such that everything prior to the load 2968c2ecf20Sopenharmony_ci * of @task is visible. This is the condition as to why the user called 2978c2ecf20Sopenharmony_ci * rcuwait_wake() in the first place. Pairs with set_current_state() 2988c2ecf20Sopenharmony_ci * barrier (A) in rcuwait_wait_event(). 2998c2ecf20Sopenharmony_ci * 3008c2ecf20Sopenharmony_ci * WAIT WAKE 3018c2ecf20Sopenharmony_ci * [S] tsk = current [S] cond = true 3028c2ecf20Sopenharmony_ci * MB (A) MB (B) 3038c2ecf20Sopenharmony_ci * [L] cond [L] tsk 3048c2ecf20Sopenharmony_ci */ 3058c2ecf20Sopenharmony_ci smp_mb(); /* (B) */ 3068c2ecf20Sopenharmony_ci 3078c2ecf20Sopenharmony_ci task = rcu_dereference(w->task); 3088c2ecf20Sopenharmony_ci if (task) 3098c2ecf20Sopenharmony_ci ret = wake_up_process(task); 3108c2ecf20Sopenharmony_ci rcu_read_unlock(); 3118c2ecf20Sopenharmony_ci 3128c2ecf20Sopenharmony_ci return ret; 3138c2ecf20Sopenharmony_ci} 3148c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(rcuwait_wake_up); 3158c2ecf20Sopenharmony_ci 3168c2ecf20Sopenharmony_ci/* 3178c2ecf20Sopenharmony_ci * Determine if a process group is "orphaned", according to the POSIX 3188c2ecf20Sopenharmony_ci * definition in 2.2.2.52. Orphaned process groups are not to be affected 3198c2ecf20Sopenharmony_ci * by terminal-generated stop signals. Newly orphaned process groups are 3208c2ecf20Sopenharmony_ci * to receive a SIGHUP and a SIGCONT. 3218c2ecf20Sopenharmony_ci * 3228c2ecf20Sopenharmony_ci * "I ask you, have you ever known what it is to be an orphan?" 3238c2ecf20Sopenharmony_ci */ 3248c2ecf20Sopenharmony_cistatic int will_become_orphaned_pgrp(struct pid *pgrp, 3258c2ecf20Sopenharmony_ci struct task_struct *ignored_task) 3268c2ecf20Sopenharmony_ci{ 3278c2ecf20Sopenharmony_ci struct task_struct *p; 3288c2ecf20Sopenharmony_ci 3298c2ecf20Sopenharmony_ci do_each_pid_task(pgrp, PIDTYPE_PGID, p) { 3308c2ecf20Sopenharmony_ci if ((p == ignored_task) || 3318c2ecf20Sopenharmony_ci (p->exit_state && thread_group_empty(p)) || 3328c2ecf20Sopenharmony_ci is_global_init(p->real_parent)) 3338c2ecf20Sopenharmony_ci continue; 3348c2ecf20Sopenharmony_ci 3358c2ecf20Sopenharmony_ci if (task_pgrp(p->real_parent) != pgrp && 3368c2ecf20Sopenharmony_ci task_session(p->real_parent) == task_session(p)) 3378c2ecf20Sopenharmony_ci return 0; 3388c2ecf20Sopenharmony_ci } while_each_pid_task(pgrp, PIDTYPE_PGID, p); 3398c2ecf20Sopenharmony_ci 3408c2ecf20Sopenharmony_ci return 1; 3418c2ecf20Sopenharmony_ci} 3428c2ecf20Sopenharmony_ci 3438c2ecf20Sopenharmony_ciint is_current_pgrp_orphaned(void) 3448c2ecf20Sopenharmony_ci{ 3458c2ecf20Sopenharmony_ci int retval; 3468c2ecf20Sopenharmony_ci 3478c2ecf20Sopenharmony_ci read_lock(&tasklist_lock); 3488c2ecf20Sopenharmony_ci retval = will_become_orphaned_pgrp(task_pgrp(current), NULL); 3498c2ecf20Sopenharmony_ci read_unlock(&tasklist_lock); 3508c2ecf20Sopenharmony_ci 3518c2ecf20Sopenharmony_ci return retval; 3528c2ecf20Sopenharmony_ci} 3538c2ecf20Sopenharmony_ci 3548c2ecf20Sopenharmony_cistatic bool has_stopped_jobs(struct pid *pgrp) 3558c2ecf20Sopenharmony_ci{ 3568c2ecf20Sopenharmony_ci struct task_struct *p; 3578c2ecf20Sopenharmony_ci 3588c2ecf20Sopenharmony_ci do_each_pid_task(pgrp, PIDTYPE_PGID, p) { 3598c2ecf20Sopenharmony_ci if (p->signal->flags & SIGNAL_STOP_STOPPED) 3608c2ecf20Sopenharmony_ci return true; 3618c2ecf20Sopenharmony_ci } while_each_pid_task(pgrp, PIDTYPE_PGID, p); 3628c2ecf20Sopenharmony_ci 3638c2ecf20Sopenharmony_ci return false; 3648c2ecf20Sopenharmony_ci} 3658c2ecf20Sopenharmony_ci 3668c2ecf20Sopenharmony_ci/* 3678c2ecf20Sopenharmony_ci * Check to see if any process groups have become orphaned as 3688c2ecf20Sopenharmony_ci * a result of our exiting, and if they have any stopped jobs, 3698c2ecf20Sopenharmony_ci * send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) 3708c2ecf20Sopenharmony_ci */ 3718c2ecf20Sopenharmony_cistatic void 3728c2ecf20Sopenharmony_cikill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent) 3738c2ecf20Sopenharmony_ci{ 3748c2ecf20Sopenharmony_ci struct pid *pgrp = task_pgrp(tsk); 3758c2ecf20Sopenharmony_ci struct task_struct *ignored_task = tsk; 3768c2ecf20Sopenharmony_ci 3778c2ecf20Sopenharmony_ci if (!parent) 3788c2ecf20Sopenharmony_ci /* exit: our father is in a different pgrp than 3798c2ecf20Sopenharmony_ci * we are and we were the only connection outside. 3808c2ecf20Sopenharmony_ci */ 3818c2ecf20Sopenharmony_ci parent = tsk->real_parent; 3828c2ecf20Sopenharmony_ci else 3838c2ecf20Sopenharmony_ci /* reparent: our child is in a different pgrp than 3848c2ecf20Sopenharmony_ci * we are, and it was the only connection outside. 3858c2ecf20Sopenharmony_ci */ 3868c2ecf20Sopenharmony_ci ignored_task = NULL; 3878c2ecf20Sopenharmony_ci 3888c2ecf20Sopenharmony_ci if (task_pgrp(parent) != pgrp && 3898c2ecf20Sopenharmony_ci task_session(parent) == task_session(tsk) && 3908c2ecf20Sopenharmony_ci will_become_orphaned_pgrp(pgrp, ignored_task) && 3918c2ecf20Sopenharmony_ci has_stopped_jobs(pgrp)) { 3928c2ecf20Sopenharmony_ci __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp); 3938c2ecf20Sopenharmony_ci __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp); 3948c2ecf20Sopenharmony_ci } 3958c2ecf20Sopenharmony_ci} 3968c2ecf20Sopenharmony_ci 3978c2ecf20Sopenharmony_ci#ifdef CONFIG_MEMCG 3988c2ecf20Sopenharmony_ci/* 3998c2ecf20Sopenharmony_ci * A task is exiting. If it owned this mm, find a new owner for the mm. 4008c2ecf20Sopenharmony_ci */ 4018c2ecf20Sopenharmony_civoid mm_update_next_owner(struct mm_struct *mm) 4028c2ecf20Sopenharmony_ci{ 4038c2ecf20Sopenharmony_ci struct task_struct *c, *g, *p = current; 4048c2ecf20Sopenharmony_ci 4058c2ecf20Sopenharmony_ciretry: 4068c2ecf20Sopenharmony_ci /* 4078c2ecf20Sopenharmony_ci * If the exiting or execing task is not the owner, it's 4088c2ecf20Sopenharmony_ci * someone else's problem. 4098c2ecf20Sopenharmony_ci */ 4108c2ecf20Sopenharmony_ci if (mm->owner != p) 4118c2ecf20Sopenharmony_ci return; 4128c2ecf20Sopenharmony_ci /* 4138c2ecf20Sopenharmony_ci * The current owner is exiting/execing and there are no other 4148c2ecf20Sopenharmony_ci * candidates. Do not leave the mm pointing to a possibly 4158c2ecf20Sopenharmony_ci * freed task structure. 4168c2ecf20Sopenharmony_ci */ 4178c2ecf20Sopenharmony_ci if (atomic_read(&mm->mm_users) <= 1) { 4188c2ecf20Sopenharmony_ci WRITE_ONCE(mm->owner, NULL); 4198c2ecf20Sopenharmony_ci return; 4208c2ecf20Sopenharmony_ci } 4218c2ecf20Sopenharmony_ci 4228c2ecf20Sopenharmony_ci read_lock(&tasklist_lock); 4238c2ecf20Sopenharmony_ci /* 4248c2ecf20Sopenharmony_ci * Search in the children 4258c2ecf20Sopenharmony_ci */ 4268c2ecf20Sopenharmony_ci list_for_each_entry(c, &p->children, sibling) { 4278c2ecf20Sopenharmony_ci if (c->mm == mm) 4288c2ecf20Sopenharmony_ci goto assign_new_owner; 4298c2ecf20Sopenharmony_ci } 4308c2ecf20Sopenharmony_ci 4318c2ecf20Sopenharmony_ci /* 4328c2ecf20Sopenharmony_ci * Search in the siblings 4338c2ecf20Sopenharmony_ci */ 4348c2ecf20Sopenharmony_ci list_for_each_entry(c, &p->real_parent->children, sibling) { 4358c2ecf20Sopenharmony_ci if (c->mm == mm) 4368c2ecf20Sopenharmony_ci goto assign_new_owner; 4378c2ecf20Sopenharmony_ci } 4388c2ecf20Sopenharmony_ci 4398c2ecf20Sopenharmony_ci /* 4408c2ecf20Sopenharmony_ci * Search through everything else, we should not get here often. 4418c2ecf20Sopenharmony_ci */ 4428c2ecf20Sopenharmony_ci for_each_process(g) { 4438c2ecf20Sopenharmony_ci if (g->flags & PF_KTHREAD) 4448c2ecf20Sopenharmony_ci continue; 4458c2ecf20Sopenharmony_ci for_each_thread(g, c) { 4468c2ecf20Sopenharmony_ci if (c->mm == mm) 4478c2ecf20Sopenharmony_ci goto assign_new_owner; 4488c2ecf20Sopenharmony_ci if (c->mm) 4498c2ecf20Sopenharmony_ci break; 4508c2ecf20Sopenharmony_ci } 4518c2ecf20Sopenharmony_ci } 4528c2ecf20Sopenharmony_ci read_unlock(&tasklist_lock); 4538c2ecf20Sopenharmony_ci /* 4548c2ecf20Sopenharmony_ci * We found no owner yet mm_users > 1: this implies that we are 4558c2ecf20Sopenharmony_ci * most likely racing with swapoff (try_to_unuse()) or /proc or 4568c2ecf20Sopenharmony_ci * ptrace or page migration (get_task_mm()). Mark owner as NULL. 4578c2ecf20Sopenharmony_ci */ 4588c2ecf20Sopenharmony_ci WRITE_ONCE(mm->owner, NULL); 4598c2ecf20Sopenharmony_ci return; 4608c2ecf20Sopenharmony_ci 4618c2ecf20Sopenharmony_ciassign_new_owner: 4628c2ecf20Sopenharmony_ci BUG_ON(c == p); 4638c2ecf20Sopenharmony_ci get_task_struct(c); 4648c2ecf20Sopenharmony_ci /* 4658c2ecf20Sopenharmony_ci * The task_lock protects c->mm from changing. 4668c2ecf20Sopenharmony_ci * We always want mm->owner->mm == mm 4678c2ecf20Sopenharmony_ci */ 4688c2ecf20Sopenharmony_ci task_lock(c); 4698c2ecf20Sopenharmony_ci /* 4708c2ecf20Sopenharmony_ci * Delay read_unlock() till we have the task_lock() 4718c2ecf20Sopenharmony_ci * to ensure that c does not slip away underneath us 4728c2ecf20Sopenharmony_ci */ 4738c2ecf20Sopenharmony_ci read_unlock(&tasklist_lock); 4748c2ecf20Sopenharmony_ci if (c->mm != mm) { 4758c2ecf20Sopenharmony_ci task_unlock(c); 4768c2ecf20Sopenharmony_ci put_task_struct(c); 4778c2ecf20Sopenharmony_ci goto retry; 4788c2ecf20Sopenharmony_ci } 4798c2ecf20Sopenharmony_ci WRITE_ONCE(mm->owner, c); 4808c2ecf20Sopenharmony_ci task_unlock(c); 4818c2ecf20Sopenharmony_ci put_task_struct(c); 4828c2ecf20Sopenharmony_ci} 4838c2ecf20Sopenharmony_ci#endif /* CONFIG_MEMCG */ 4848c2ecf20Sopenharmony_ci 4858c2ecf20Sopenharmony_ci/* 4868c2ecf20Sopenharmony_ci * Turn us into a lazy TLB process if we 4878c2ecf20Sopenharmony_ci * aren't already.. 4888c2ecf20Sopenharmony_ci */ 4898c2ecf20Sopenharmony_cistatic void exit_mm(void) 4908c2ecf20Sopenharmony_ci{ 4918c2ecf20Sopenharmony_ci struct mm_struct *mm = current->mm; 4928c2ecf20Sopenharmony_ci struct core_state *core_state; 4938c2ecf20Sopenharmony_ci 4948c2ecf20Sopenharmony_ci exit_mm_release(current, mm); 4958c2ecf20Sopenharmony_ci if (!mm) 4968c2ecf20Sopenharmony_ci return; 4978c2ecf20Sopenharmony_ci sync_mm_rss(mm); 4988c2ecf20Sopenharmony_ci /* 4998c2ecf20Sopenharmony_ci * Serialize with any possible pending coredump. 5008c2ecf20Sopenharmony_ci * We must hold mmap_lock around checking core_state 5018c2ecf20Sopenharmony_ci * and clearing tsk->mm. The core-inducing thread 5028c2ecf20Sopenharmony_ci * will increment ->nr_threads for each thread in the 5038c2ecf20Sopenharmony_ci * group with ->mm != NULL. 5048c2ecf20Sopenharmony_ci */ 5058c2ecf20Sopenharmony_ci mmap_read_lock(mm); 5068c2ecf20Sopenharmony_ci core_state = mm->core_state; 5078c2ecf20Sopenharmony_ci if (core_state) { 5088c2ecf20Sopenharmony_ci struct core_thread self; 5098c2ecf20Sopenharmony_ci 5108c2ecf20Sopenharmony_ci mmap_read_unlock(mm); 5118c2ecf20Sopenharmony_ci 5128c2ecf20Sopenharmony_ci self.task = current; 5138c2ecf20Sopenharmony_ci if (self.task->flags & PF_SIGNALED) 5148c2ecf20Sopenharmony_ci self.next = xchg(&core_state->dumper.next, &self); 5158c2ecf20Sopenharmony_ci else 5168c2ecf20Sopenharmony_ci self.task = NULL; 5178c2ecf20Sopenharmony_ci /* 5188c2ecf20Sopenharmony_ci * Implies mb(), the result of xchg() must be visible 5198c2ecf20Sopenharmony_ci * to core_state->dumper. 5208c2ecf20Sopenharmony_ci */ 5218c2ecf20Sopenharmony_ci if (atomic_dec_and_test(&core_state->nr_threads)) 5228c2ecf20Sopenharmony_ci complete(&core_state->startup); 5238c2ecf20Sopenharmony_ci 5248c2ecf20Sopenharmony_ci for (;;) { 5258c2ecf20Sopenharmony_ci set_current_state(TASK_UNINTERRUPTIBLE); 5268c2ecf20Sopenharmony_ci if (!self.task) /* see coredump_finish() */ 5278c2ecf20Sopenharmony_ci break; 5288c2ecf20Sopenharmony_ci freezable_schedule(); 5298c2ecf20Sopenharmony_ci } 5308c2ecf20Sopenharmony_ci __set_current_state(TASK_RUNNING); 5318c2ecf20Sopenharmony_ci mmap_read_lock(mm); 5328c2ecf20Sopenharmony_ci } 5338c2ecf20Sopenharmony_ci mmgrab(mm); 5348c2ecf20Sopenharmony_ci BUG_ON(mm != current->active_mm); 5358c2ecf20Sopenharmony_ci /* more a memory barrier than a real lock */ 5368c2ecf20Sopenharmony_ci task_lock(current); 5378c2ecf20Sopenharmony_ci current->mm = NULL; 5388c2ecf20Sopenharmony_ci mmap_read_unlock(mm); 5398c2ecf20Sopenharmony_ci enter_lazy_tlb(mm, current); 5408c2ecf20Sopenharmony_ci task_unlock(current); 5418c2ecf20Sopenharmony_ci mm_update_next_owner(mm); 5428c2ecf20Sopenharmony_ci mmput(mm); 5438c2ecf20Sopenharmony_ci if (test_thread_flag(TIF_MEMDIE)) 5448c2ecf20Sopenharmony_ci exit_oom_victim(); 5458c2ecf20Sopenharmony_ci} 5468c2ecf20Sopenharmony_ci 5478c2ecf20Sopenharmony_cistatic struct task_struct *find_alive_thread(struct task_struct *p) 5488c2ecf20Sopenharmony_ci{ 5498c2ecf20Sopenharmony_ci struct task_struct *t; 5508c2ecf20Sopenharmony_ci 5518c2ecf20Sopenharmony_ci for_each_thread(p, t) { 5528c2ecf20Sopenharmony_ci if (!(t->flags & PF_EXITING)) 5538c2ecf20Sopenharmony_ci return t; 5548c2ecf20Sopenharmony_ci } 5558c2ecf20Sopenharmony_ci return NULL; 5568c2ecf20Sopenharmony_ci} 5578c2ecf20Sopenharmony_ci 5588c2ecf20Sopenharmony_cistatic struct task_struct *find_child_reaper(struct task_struct *father, 5598c2ecf20Sopenharmony_ci struct list_head *dead) 5608c2ecf20Sopenharmony_ci __releases(&tasklist_lock) 5618c2ecf20Sopenharmony_ci __acquires(&tasklist_lock) 5628c2ecf20Sopenharmony_ci{ 5638c2ecf20Sopenharmony_ci struct pid_namespace *pid_ns = task_active_pid_ns(father); 5648c2ecf20Sopenharmony_ci struct task_struct *reaper = pid_ns->child_reaper; 5658c2ecf20Sopenharmony_ci struct task_struct *p, *n; 5668c2ecf20Sopenharmony_ci 5678c2ecf20Sopenharmony_ci if (likely(reaper != father)) 5688c2ecf20Sopenharmony_ci return reaper; 5698c2ecf20Sopenharmony_ci 5708c2ecf20Sopenharmony_ci reaper = find_alive_thread(father); 5718c2ecf20Sopenharmony_ci if (reaper) { 5728c2ecf20Sopenharmony_ci pid_ns->child_reaper = reaper; 5738c2ecf20Sopenharmony_ci return reaper; 5748c2ecf20Sopenharmony_ci } 5758c2ecf20Sopenharmony_ci 5768c2ecf20Sopenharmony_ci write_unlock_irq(&tasklist_lock); 5778c2ecf20Sopenharmony_ci 5788c2ecf20Sopenharmony_ci list_for_each_entry_safe(p, n, dead, ptrace_entry) { 5798c2ecf20Sopenharmony_ci list_del_init(&p->ptrace_entry); 5808c2ecf20Sopenharmony_ci release_task(p); 5818c2ecf20Sopenharmony_ci } 5828c2ecf20Sopenharmony_ci 5838c2ecf20Sopenharmony_ci zap_pid_ns_processes(pid_ns); 5848c2ecf20Sopenharmony_ci write_lock_irq(&tasklist_lock); 5858c2ecf20Sopenharmony_ci 5868c2ecf20Sopenharmony_ci return father; 5878c2ecf20Sopenharmony_ci} 5888c2ecf20Sopenharmony_ci 5898c2ecf20Sopenharmony_ci/* 5908c2ecf20Sopenharmony_ci * When we die, we re-parent all our children, and try to: 5918c2ecf20Sopenharmony_ci * 1. give them to another thread in our thread group, if such a member exists 5928c2ecf20Sopenharmony_ci * 2. give it to the first ancestor process which prctl'd itself as a 5938c2ecf20Sopenharmony_ci * child_subreaper for its children (like a service manager) 5948c2ecf20Sopenharmony_ci * 3. give it to the init process (PID 1) in our pid namespace 5958c2ecf20Sopenharmony_ci */ 5968c2ecf20Sopenharmony_cistatic struct task_struct *find_new_reaper(struct task_struct *father, 5978c2ecf20Sopenharmony_ci struct task_struct *child_reaper) 5988c2ecf20Sopenharmony_ci{ 5998c2ecf20Sopenharmony_ci struct task_struct *thread, *reaper; 6008c2ecf20Sopenharmony_ci 6018c2ecf20Sopenharmony_ci thread = find_alive_thread(father); 6028c2ecf20Sopenharmony_ci if (thread) 6038c2ecf20Sopenharmony_ci return thread; 6048c2ecf20Sopenharmony_ci 6058c2ecf20Sopenharmony_ci if (father->signal->has_child_subreaper) { 6068c2ecf20Sopenharmony_ci unsigned int ns_level = task_pid(father)->level; 6078c2ecf20Sopenharmony_ci /* 6088c2ecf20Sopenharmony_ci * Find the first ->is_child_subreaper ancestor in our pid_ns. 6098c2ecf20Sopenharmony_ci * We can't check reaper != child_reaper to ensure we do not 6108c2ecf20Sopenharmony_ci * cross the namespaces, the exiting parent could be injected 6118c2ecf20Sopenharmony_ci * by setns() + fork(). 6128c2ecf20Sopenharmony_ci * We check pid->level, this is slightly more efficient than 6138c2ecf20Sopenharmony_ci * task_active_pid_ns(reaper) != task_active_pid_ns(father). 6148c2ecf20Sopenharmony_ci */ 6158c2ecf20Sopenharmony_ci for (reaper = father->real_parent; 6168c2ecf20Sopenharmony_ci task_pid(reaper)->level == ns_level; 6178c2ecf20Sopenharmony_ci reaper = reaper->real_parent) { 6188c2ecf20Sopenharmony_ci if (reaper == &init_task) 6198c2ecf20Sopenharmony_ci break; 6208c2ecf20Sopenharmony_ci if (!reaper->signal->is_child_subreaper) 6218c2ecf20Sopenharmony_ci continue; 6228c2ecf20Sopenharmony_ci thread = find_alive_thread(reaper); 6238c2ecf20Sopenharmony_ci if (thread) 6248c2ecf20Sopenharmony_ci return thread; 6258c2ecf20Sopenharmony_ci } 6268c2ecf20Sopenharmony_ci } 6278c2ecf20Sopenharmony_ci 6288c2ecf20Sopenharmony_ci return child_reaper; 6298c2ecf20Sopenharmony_ci} 6308c2ecf20Sopenharmony_ci 6318c2ecf20Sopenharmony_ci/* 6328c2ecf20Sopenharmony_ci* Any that need to be release_task'd are put on the @dead list. 6338c2ecf20Sopenharmony_ci */ 6348c2ecf20Sopenharmony_cistatic void reparent_leader(struct task_struct *father, struct task_struct *p, 6358c2ecf20Sopenharmony_ci struct list_head *dead) 6368c2ecf20Sopenharmony_ci{ 6378c2ecf20Sopenharmony_ci if (unlikely(p->exit_state == EXIT_DEAD)) 6388c2ecf20Sopenharmony_ci return; 6398c2ecf20Sopenharmony_ci 6408c2ecf20Sopenharmony_ci /* We don't want people slaying init. */ 6418c2ecf20Sopenharmony_ci p->exit_signal = SIGCHLD; 6428c2ecf20Sopenharmony_ci 6438c2ecf20Sopenharmony_ci /* If it has exited notify the new parent about this child's death. */ 6448c2ecf20Sopenharmony_ci if (!p->ptrace && 6458c2ecf20Sopenharmony_ci p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) { 6468c2ecf20Sopenharmony_ci if (do_notify_parent(p, p->exit_signal)) { 6478c2ecf20Sopenharmony_ci p->exit_state = EXIT_DEAD; 6488c2ecf20Sopenharmony_ci list_add(&p->ptrace_entry, dead); 6498c2ecf20Sopenharmony_ci } 6508c2ecf20Sopenharmony_ci } 6518c2ecf20Sopenharmony_ci 6528c2ecf20Sopenharmony_ci kill_orphaned_pgrp(p, father); 6538c2ecf20Sopenharmony_ci} 6548c2ecf20Sopenharmony_ci 6558c2ecf20Sopenharmony_ci/* 6568c2ecf20Sopenharmony_ci * This does two things: 6578c2ecf20Sopenharmony_ci * 6588c2ecf20Sopenharmony_ci * A. Make init inherit all the child processes 6598c2ecf20Sopenharmony_ci * B. Check to see if any process groups have become orphaned 6608c2ecf20Sopenharmony_ci * as a result of our exiting, and if they have any stopped 6618c2ecf20Sopenharmony_ci * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) 6628c2ecf20Sopenharmony_ci */ 6638c2ecf20Sopenharmony_cistatic void forget_original_parent(struct task_struct *father, 6648c2ecf20Sopenharmony_ci struct list_head *dead) 6658c2ecf20Sopenharmony_ci{ 6668c2ecf20Sopenharmony_ci struct task_struct *p, *t, *reaper; 6678c2ecf20Sopenharmony_ci 6688c2ecf20Sopenharmony_ci if (unlikely(!list_empty(&father->ptraced))) 6698c2ecf20Sopenharmony_ci exit_ptrace(father, dead); 6708c2ecf20Sopenharmony_ci 6718c2ecf20Sopenharmony_ci /* Can drop and reacquire tasklist_lock */ 6728c2ecf20Sopenharmony_ci reaper = find_child_reaper(father, dead); 6738c2ecf20Sopenharmony_ci if (list_empty(&father->children)) 6748c2ecf20Sopenharmony_ci return; 6758c2ecf20Sopenharmony_ci 6768c2ecf20Sopenharmony_ci reaper = find_new_reaper(father, reaper); 6778c2ecf20Sopenharmony_ci list_for_each_entry(p, &father->children, sibling) { 6788c2ecf20Sopenharmony_ci for_each_thread(p, t) { 6798c2ecf20Sopenharmony_ci RCU_INIT_POINTER(t->real_parent, reaper); 6808c2ecf20Sopenharmony_ci BUG_ON((!t->ptrace) != (rcu_access_pointer(t->parent) == father)); 6818c2ecf20Sopenharmony_ci if (likely(!t->ptrace)) 6828c2ecf20Sopenharmony_ci t->parent = t->real_parent; 6838c2ecf20Sopenharmony_ci if (t->pdeath_signal) 6848c2ecf20Sopenharmony_ci group_send_sig_info(t->pdeath_signal, 6858c2ecf20Sopenharmony_ci SEND_SIG_NOINFO, t, 6868c2ecf20Sopenharmony_ci PIDTYPE_TGID); 6878c2ecf20Sopenharmony_ci } 6888c2ecf20Sopenharmony_ci /* 6898c2ecf20Sopenharmony_ci * If this is a threaded reparent there is no need to 6908c2ecf20Sopenharmony_ci * notify anyone anything has happened. 6918c2ecf20Sopenharmony_ci */ 6928c2ecf20Sopenharmony_ci if (!same_thread_group(reaper, father)) 6938c2ecf20Sopenharmony_ci reparent_leader(father, p, dead); 6948c2ecf20Sopenharmony_ci } 6958c2ecf20Sopenharmony_ci list_splice_tail_init(&father->children, &reaper->children); 6968c2ecf20Sopenharmony_ci} 6978c2ecf20Sopenharmony_ci 6988c2ecf20Sopenharmony_ci/* 6998c2ecf20Sopenharmony_ci * Send signals to all our closest relatives so that they know 7008c2ecf20Sopenharmony_ci * to properly mourn us.. 7018c2ecf20Sopenharmony_ci */ 7028c2ecf20Sopenharmony_cistatic void exit_notify(struct task_struct *tsk, int group_dead) 7038c2ecf20Sopenharmony_ci{ 7048c2ecf20Sopenharmony_ci bool autoreap; 7058c2ecf20Sopenharmony_ci struct task_struct *p, *n; 7068c2ecf20Sopenharmony_ci LIST_HEAD(dead); 7078c2ecf20Sopenharmony_ci 7088c2ecf20Sopenharmony_ci write_lock_irq(&tasklist_lock); 7098c2ecf20Sopenharmony_ci forget_original_parent(tsk, &dead); 7108c2ecf20Sopenharmony_ci 7118c2ecf20Sopenharmony_ci if (group_dead) 7128c2ecf20Sopenharmony_ci kill_orphaned_pgrp(tsk->group_leader, NULL); 7138c2ecf20Sopenharmony_ci 7148c2ecf20Sopenharmony_ci tsk->exit_state = EXIT_ZOMBIE; 7158c2ecf20Sopenharmony_ci if (unlikely(tsk->ptrace)) { 7168c2ecf20Sopenharmony_ci int sig = thread_group_leader(tsk) && 7178c2ecf20Sopenharmony_ci thread_group_empty(tsk) && 7188c2ecf20Sopenharmony_ci !ptrace_reparented(tsk) ? 7198c2ecf20Sopenharmony_ci tsk->exit_signal : SIGCHLD; 7208c2ecf20Sopenharmony_ci autoreap = do_notify_parent(tsk, sig); 7218c2ecf20Sopenharmony_ci } else if (thread_group_leader(tsk)) { 7228c2ecf20Sopenharmony_ci autoreap = thread_group_empty(tsk) && 7238c2ecf20Sopenharmony_ci do_notify_parent(tsk, tsk->exit_signal); 7248c2ecf20Sopenharmony_ci } else { 7258c2ecf20Sopenharmony_ci autoreap = true; 7268c2ecf20Sopenharmony_ci } 7278c2ecf20Sopenharmony_ci 7288c2ecf20Sopenharmony_ci if (autoreap) { 7298c2ecf20Sopenharmony_ci tsk->exit_state = EXIT_DEAD; 7308c2ecf20Sopenharmony_ci list_add(&tsk->ptrace_entry, &dead); 7318c2ecf20Sopenharmony_ci } 7328c2ecf20Sopenharmony_ci 7338c2ecf20Sopenharmony_ci /* mt-exec, de_thread() is waiting for group leader */ 7348c2ecf20Sopenharmony_ci if (unlikely(tsk->signal->notify_count < 0)) 7358c2ecf20Sopenharmony_ci wake_up_process(tsk->signal->group_exit_task); 7368c2ecf20Sopenharmony_ci write_unlock_irq(&tasklist_lock); 7378c2ecf20Sopenharmony_ci 7388c2ecf20Sopenharmony_ci list_for_each_entry_safe(p, n, &dead, ptrace_entry) { 7398c2ecf20Sopenharmony_ci list_del_init(&p->ptrace_entry); 7408c2ecf20Sopenharmony_ci release_task(p); 7418c2ecf20Sopenharmony_ci } 7428c2ecf20Sopenharmony_ci} 7438c2ecf20Sopenharmony_ci 7448c2ecf20Sopenharmony_ci#ifdef CONFIG_DEBUG_STACK_USAGE 7458c2ecf20Sopenharmony_cistatic void check_stack_usage(void) 7468c2ecf20Sopenharmony_ci{ 7478c2ecf20Sopenharmony_ci static DEFINE_SPINLOCK(low_water_lock); 7488c2ecf20Sopenharmony_ci static int lowest_to_date = THREAD_SIZE; 7498c2ecf20Sopenharmony_ci unsigned long free; 7508c2ecf20Sopenharmony_ci 7518c2ecf20Sopenharmony_ci free = stack_not_used(current); 7528c2ecf20Sopenharmony_ci 7538c2ecf20Sopenharmony_ci if (free >= lowest_to_date) 7548c2ecf20Sopenharmony_ci return; 7558c2ecf20Sopenharmony_ci 7568c2ecf20Sopenharmony_ci spin_lock(&low_water_lock); 7578c2ecf20Sopenharmony_ci if (free < lowest_to_date) { 7588c2ecf20Sopenharmony_ci pr_info("%s (%d) used greatest stack depth: %lu bytes left\n", 7598c2ecf20Sopenharmony_ci current->comm, task_pid_nr(current), free); 7608c2ecf20Sopenharmony_ci lowest_to_date = free; 7618c2ecf20Sopenharmony_ci } 7628c2ecf20Sopenharmony_ci spin_unlock(&low_water_lock); 7638c2ecf20Sopenharmony_ci} 7648c2ecf20Sopenharmony_ci#else 7658c2ecf20Sopenharmony_cistatic inline void check_stack_usage(void) {} 7668c2ecf20Sopenharmony_ci#endif 7678c2ecf20Sopenharmony_ci 7688c2ecf20Sopenharmony_civoid __noreturn do_exit(long code) 7698c2ecf20Sopenharmony_ci{ 7708c2ecf20Sopenharmony_ci struct task_struct *tsk = current; 7718c2ecf20Sopenharmony_ci int group_dead; 7728c2ecf20Sopenharmony_ci 7738c2ecf20Sopenharmony_ci CALL_HCK_LITE_HOOK(exit_jit_memory_lhck, current); 7748c2ecf20Sopenharmony_ci 7758c2ecf20Sopenharmony_ci /* 7768c2ecf20Sopenharmony_ci * We can get here from a kernel oops, sometimes with preemption off. 7778c2ecf20Sopenharmony_ci * Start by checking for critical errors. 7788c2ecf20Sopenharmony_ci * Then fix up important state like USER_DS and preemption. 7798c2ecf20Sopenharmony_ci * Then do everything else. 7808c2ecf20Sopenharmony_ci */ 7818c2ecf20Sopenharmony_ci 7828c2ecf20Sopenharmony_ci WARN_ON(blk_needs_flush_plug(tsk)); 7838c2ecf20Sopenharmony_ci 7848c2ecf20Sopenharmony_ci if (unlikely(in_interrupt())) 7858c2ecf20Sopenharmony_ci panic("Aiee, killing interrupt handler!"); 7868c2ecf20Sopenharmony_ci if (unlikely(!tsk->pid)) 7878c2ecf20Sopenharmony_ci panic("Attempted to kill the idle task!"); 7888c2ecf20Sopenharmony_ci 7898c2ecf20Sopenharmony_ci /* 7908c2ecf20Sopenharmony_ci * If do_exit is called because this processes oopsed, it's possible 7918c2ecf20Sopenharmony_ci * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before 7928c2ecf20Sopenharmony_ci * continuing. Amongst other possible reasons, this is to prevent 7938c2ecf20Sopenharmony_ci * mm_release()->clear_child_tid() from writing to a user-controlled 7948c2ecf20Sopenharmony_ci * kernel address. 7958c2ecf20Sopenharmony_ci */ 7968c2ecf20Sopenharmony_ci force_uaccess_begin(); 7978c2ecf20Sopenharmony_ci 7988c2ecf20Sopenharmony_ci if (unlikely(in_atomic())) { 7998c2ecf20Sopenharmony_ci pr_info("note: %s[%d] exited with preempt_count %d\n", 8008c2ecf20Sopenharmony_ci current->comm, task_pid_nr(current), 8018c2ecf20Sopenharmony_ci preempt_count()); 8028c2ecf20Sopenharmony_ci preempt_count_set(PREEMPT_ENABLED); 8038c2ecf20Sopenharmony_ci } 8048c2ecf20Sopenharmony_ci 8058c2ecf20Sopenharmony_ci profile_task_exit(tsk); 8068c2ecf20Sopenharmony_ci kcov_task_exit(tsk); 8078c2ecf20Sopenharmony_ci 8088c2ecf20Sopenharmony_ci ptrace_event(PTRACE_EVENT_EXIT, code); 8098c2ecf20Sopenharmony_ci 8108c2ecf20Sopenharmony_ci validate_creds_for_do_exit(tsk); 8118c2ecf20Sopenharmony_ci 8128c2ecf20Sopenharmony_ci /* 8138c2ecf20Sopenharmony_ci * We're taking recursive faults here in do_exit. Safest is to just 8148c2ecf20Sopenharmony_ci * leave this task alone and wait for reboot. 8158c2ecf20Sopenharmony_ci */ 8168c2ecf20Sopenharmony_ci if (unlikely(tsk->flags & PF_EXITING)) { 8178c2ecf20Sopenharmony_ci pr_alert("Fixing recursive fault but reboot is needed!\n"); 8188c2ecf20Sopenharmony_ci futex_exit_recursive(tsk); 8198c2ecf20Sopenharmony_ci set_current_state(TASK_UNINTERRUPTIBLE); 8208c2ecf20Sopenharmony_ci schedule(); 8218c2ecf20Sopenharmony_ci } 8228c2ecf20Sopenharmony_ci 8238c2ecf20Sopenharmony_ci io_uring_files_cancel(); 8248c2ecf20Sopenharmony_ci exit_signals(tsk); /* sets PF_EXITING */ 8258c2ecf20Sopenharmony_ci sched_exit(tsk); 8268c2ecf20Sopenharmony_ci 8278c2ecf20Sopenharmony_ci#ifdef CONFIG_QOS_CTRL 8288c2ecf20Sopenharmony_ci sched_exit_qos_list(tsk); 8298c2ecf20Sopenharmony_ci#endif 8308c2ecf20Sopenharmony_ci 8318c2ecf20Sopenharmony_ci /* sync mm's RSS info before statistics gathering */ 8328c2ecf20Sopenharmony_ci if (tsk->mm) 8338c2ecf20Sopenharmony_ci sync_mm_rss(tsk->mm); 8348c2ecf20Sopenharmony_ci acct_update_integrals(tsk); 8358c2ecf20Sopenharmony_ci group_dead = atomic_dec_and_test(&tsk->signal->live); 8368c2ecf20Sopenharmony_ci if (group_dead) { 8378c2ecf20Sopenharmony_ci /* 8388c2ecf20Sopenharmony_ci * If the last thread of global init has exited, panic 8398c2ecf20Sopenharmony_ci * immediately to get a useable coredump. 8408c2ecf20Sopenharmony_ci */ 8418c2ecf20Sopenharmony_ci if (unlikely(is_global_init(tsk))) 8428c2ecf20Sopenharmony_ci panic("Attempted to kill init! exitcode=0x%08x\n", 8438c2ecf20Sopenharmony_ci tsk->signal->group_exit_code ?: (int)code); 8448c2ecf20Sopenharmony_ci 8458c2ecf20Sopenharmony_ci#ifdef CONFIG_POSIX_TIMERS 8468c2ecf20Sopenharmony_ci hrtimer_cancel(&tsk->signal->real_timer); 8478c2ecf20Sopenharmony_ci exit_itimers(tsk); 8488c2ecf20Sopenharmony_ci#endif 8498c2ecf20Sopenharmony_ci if (tsk->mm) 8508c2ecf20Sopenharmony_ci setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm); 8518c2ecf20Sopenharmony_ci } 8528c2ecf20Sopenharmony_ci acct_collect(code, group_dead); 8538c2ecf20Sopenharmony_ci if (group_dead) 8548c2ecf20Sopenharmony_ci tty_audit_exit(); 8558c2ecf20Sopenharmony_ci audit_free(tsk); 8568c2ecf20Sopenharmony_ci 8578c2ecf20Sopenharmony_ci tsk->exit_code = code; 8588c2ecf20Sopenharmony_ci taskstats_exit(tsk, group_dead); 8598c2ecf20Sopenharmony_ci 8608c2ecf20Sopenharmony_ci exit_mm(); 8618c2ecf20Sopenharmony_ci 8628c2ecf20Sopenharmony_ci if (group_dead) 8638c2ecf20Sopenharmony_ci acct_process(); 8648c2ecf20Sopenharmony_ci trace_sched_process_exit(tsk); 8658c2ecf20Sopenharmony_ci 8668c2ecf20Sopenharmony_ci exit_sem(tsk); 8678c2ecf20Sopenharmony_ci exit_shm(tsk); 8688c2ecf20Sopenharmony_ci exit_files(tsk); 8698c2ecf20Sopenharmony_ci exit_fs(tsk); 8708c2ecf20Sopenharmony_ci if (group_dead) 8718c2ecf20Sopenharmony_ci disassociate_ctty(1); 8728c2ecf20Sopenharmony_ci exit_task_namespaces(tsk); 8738c2ecf20Sopenharmony_ci exit_task_work(tsk); 8748c2ecf20Sopenharmony_ci exit_thread(tsk); 8758c2ecf20Sopenharmony_ci 8768c2ecf20Sopenharmony_ci /* 8778c2ecf20Sopenharmony_ci * Flush inherited counters to the parent - before the parent 8788c2ecf20Sopenharmony_ci * gets woken up by child-exit notifications. 8798c2ecf20Sopenharmony_ci * 8808c2ecf20Sopenharmony_ci * because of cgroup mode, must be called before cgroup_exit() 8818c2ecf20Sopenharmony_ci */ 8828c2ecf20Sopenharmony_ci perf_event_exit_task(tsk); 8838c2ecf20Sopenharmony_ci 8848c2ecf20Sopenharmony_ci sched_autogroup_exit_task(tsk); 8858c2ecf20Sopenharmony_ci cgroup_exit(tsk); 8868c2ecf20Sopenharmony_ci 8878c2ecf20Sopenharmony_ci /* 8888c2ecf20Sopenharmony_ci * FIXME: do that only when needed, using sched_exit tracepoint 8898c2ecf20Sopenharmony_ci */ 8908c2ecf20Sopenharmony_ci flush_ptrace_hw_breakpoint(tsk); 8918c2ecf20Sopenharmony_ci 8928c2ecf20Sopenharmony_ci exit_tasks_rcu_start(); 8938c2ecf20Sopenharmony_ci exit_notify(tsk, group_dead); 8948c2ecf20Sopenharmony_ci CALL_HCK_LITE_HOOK(ced_exit_lhck, tsk); 8958c2ecf20Sopenharmony_ci proc_exit_connector(tsk); 8968c2ecf20Sopenharmony_ci mpol_put_task_policy(tsk); 8978c2ecf20Sopenharmony_ci#ifdef CONFIG_FUTEX 8988c2ecf20Sopenharmony_ci if (unlikely(current->pi_state_cache)) 8998c2ecf20Sopenharmony_ci kfree(current->pi_state_cache); 9008c2ecf20Sopenharmony_ci#endif 9018c2ecf20Sopenharmony_ci /* 9028c2ecf20Sopenharmony_ci * Make sure we are holding no locks: 9038c2ecf20Sopenharmony_ci */ 9048c2ecf20Sopenharmony_ci debug_check_no_locks_held(); 9058c2ecf20Sopenharmony_ci 9068c2ecf20Sopenharmony_ci if (tsk->io_context) 9078c2ecf20Sopenharmony_ci exit_io_context(tsk); 9088c2ecf20Sopenharmony_ci 9098c2ecf20Sopenharmony_ci if (tsk->splice_pipe) 9108c2ecf20Sopenharmony_ci free_pipe_info(tsk->splice_pipe); 9118c2ecf20Sopenharmony_ci 9128c2ecf20Sopenharmony_ci if (tsk->task_frag.page) 9138c2ecf20Sopenharmony_ci put_page(tsk->task_frag.page); 9148c2ecf20Sopenharmony_ci 9158c2ecf20Sopenharmony_ci validate_creds_for_do_exit(tsk); 9168c2ecf20Sopenharmony_ci 9178c2ecf20Sopenharmony_ci check_stack_usage(); 9188c2ecf20Sopenharmony_ci preempt_disable(); 9198c2ecf20Sopenharmony_ci if (tsk->nr_dirtied) 9208c2ecf20Sopenharmony_ci __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied); 9218c2ecf20Sopenharmony_ci exit_rcu(); 9228c2ecf20Sopenharmony_ci exit_tasks_rcu_finish(); 9238c2ecf20Sopenharmony_ci 9248c2ecf20Sopenharmony_ci lockdep_free_task(tsk); 9258c2ecf20Sopenharmony_ci do_task_dead(); 9268c2ecf20Sopenharmony_ci} 9278c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(do_exit); 9288c2ecf20Sopenharmony_ci 9298c2ecf20Sopenharmony_civoid __noreturn make_task_dead(int signr) 9308c2ecf20Sopenharmony_ci{ 9318c2ecf20Sopenharmony_ci /* 9328c2ecf20Sopenharmony_ci * Take the task off the cpu after something catastrophic has 9338c2ecf20Sopenharmony_ci * happened. 9348c2ecf20Sopenharmony_ci */ 9358c2ecf20Sopenharmony_ci unsigned int limit; 9368c2ecf20Sopenharmony_ci 9378c2ecf20Sopenharmony_ci /* 9388c2ecf20Sopenharmony_ci * Every time the system oopses, if the oops happens while a reference 9398c2ecf20Sopenharmony_ci * to an object was held, the reference leaks. 9408c2ecf20Sopenharmony_ci * If the oops doesn't also leak memory, repeated oopsing can cause 9418c2ecf20Sopenharmony_ci * reference counters to wrap around (if they're not using refcount_t). 9428c2ecf20Sopenharmony_ci * This means that repeated oopsing can make unexploitable-looking bugs 9438c2ecf20Sopenharmony_ci * exploitable through repeated oopsing. 9448c2ecf20Sopenharmony_ci * To make sure this can't happen, place an upper bound on how often the 9458c2ecf20Sopenharmony_ci * kernel may oops without panic(). 9468c2ecf20Sopenharmony_ci */ 9478c2ecf20Sopenharmony_ci limit = READ_ONCE(oops_limit); 9488c2ecf20Sopenharmony_ci if (atomic_inc_return(&oops_count) >= limit && limit) 9498c2ecf20Sopenharmony_ci panic("Oopsed too often (kernel.oops_limit is %d)", limit); 9508c2ecf20Sopenharmony_ci 9518c2ecf20Sopenharmony_ci do_exit(signr); 9528c2ecf20Sopenharmony_ci} 9538c2ecf20Sopenharmony_ci 9548c2ecf20Sopenharmony_civoid complete_and_exit(struct completion *comp, long code) 9558c2ecf20Sopenharmony_ci{ 9568c2ecf20Sopenharmony_ci if (comp) 9578c2ecf20Sopenharmony_ci complete(comp); 9588c2ecf20Sopenharmony_ci 9598c2ecf20Sopenharmony_ci do_exit(code); 9608c2ecf20Sopenharmony_ci} 9618c2ecf20Sopenharmony_ciEXPORT_SYMBOL(complete_and_exit); 9628c2ecf20Sopenharmony_ci 9638c2ecf20Sopenharmony_ciSYSCALL_DEFINE1(exit, int, error_code) 9648c2ecf20Sopenharmony_ci{ 9658c2ecf20Sopenharmony_ci do_exit((error_code&0xff)<<8); 9668c2ecf20Sopenharmony_ci} 9678c2ecf20Sopenharmony_ci 9688c2ecf20Sopenharmony_ci/* 9698c2ecf20Sopenharmony_ci * Take down every thread in the group. This is called by fatal signals 9708c2ecf20Sopenharmony_ci * as well as by sys_exit_group (below). 9718c2ecf20Sopenharmony_ci */ 9728c2ecf20Sopenharmony_civoid 9738c2ecf20Sopenharmony_cido_group_exit(int exit_code) 9748c2ecf20Sopenharmony_ci{ 9758c2ecf20Sopenharmony_ci struct signal_struct *sig = current->signal; 9768c2ecf20Sopenharmony_ci 9778c2ecf20Sopenharmony_ci BUG_ON(exit_code & 0x80); /* core dumps don't get here */ 9788c2ecf20Sopenharmony_ci 9798c2ecf20Sopenharmony_ci if (signal_group_exit(sig)) 9808c2ecf20Sopenharmony_ci exit_code = sig->group_exit_code; 9818c2ecf20Sopenharmony_ci else if (!thread_group_empty(current)) { 9828c2ecf20Sopenharmony_ci struct sighand_struct *const sighand = current->sighand; 9838c2ecf20Sopenharmony_ci 9848c2ecf20Sopenharmony_ci spin_lock_irq(&sighand->siglock); 9858c2ecf20Sopenharmony_ci if (signal_group_exit(sig)) 9868c2ecf20Sopenharmony_ci /* Another thread got here before we took the lock. */ 9878c2ecf20Sopenharmony_ci exit_code = sig->group_exit_code; 9888c2ecf20Sopenharmony_ci else { 9898c2ecf20Sopenharmony_ci sig->group_exit_code = exit_code; 9908c2ecf20Sopenharmony_ci sig->flags = SIGNAL_GROUP_EXIT; 9918c2ecf20Sopenharmony_ci zap_other_threads(current); 9928c2ecf20Sopenharmony_ci } 9938c2ecf20Sopenharmony_ci spin_unlock_irq(&sighand->siglock); 9948c2ecf20Sopenharmony_ci } 9958c2ecf20Sopenharmony_ci 9968c2ecf20Sopenharmony_ci do_exit(exit_code); 9978c2ecf20Sopenharmony_ci /* NOTREACHED */ 9988c2ecf20Sopenharmony_ci} 9998c2ecf20Sopenharmony_ci 10008c2ecf20Sopenharmony_ci/* 10018c2ecf20Sopenharmony_ci * this kills every thread in the thread group. Note that any externally 10028c2ecf20Sopenharmony_ci * wait4()-ing process will get the correct exit code - even if this 10038c2ecf20Sopenharmony_ci * thread is not the thread group leader. 10048c2ecf20Sopenharmony_ci */ 10058c2ecf20Sopenharmony_ciSYSCALL_DEFINE1(exit_group, int, error_code) 10068c2ecf20Sopenharmony_ci{ 10078c2ecf20Sopenharmony_ci do_group_exit((error_code & 0xff) << 8); 10088c2ecf20Sopenharmony_ci /* NOTREACHED */ 10098c2ecf20Sopenharmony_ci return 0; 10108c2ecf20Sopenharmony_ci} 10118c2ecf20Sopenharmony_ci 10128c2ecf20Sopenharmony_cistruct waitid_info { 10138c2ecf20Sopenharmony_ci pid_t pid; 10148c2ecf20Sopenharmony_ci uid_t uid; 10158c2ecf20Sopenharmony_ci int status; 10168c2ecf20Sopenharmony_ci int cause; 10178c2ecf20Sopenharmony_ci}; 10188c2ecf20Sopenharmony_ci 10198c2ecf20Sopenharmony_cistruct wait_opts { 10208c2ecf20Sopenharmony_ci enum pid_type wo_type; 10218c2ecf20Sopenharmony_ci int wo_flags; 10228c2ecf20Sopenharmony_ci struct pid *wo_pid; 10238c2ecf20Sopenharmony_ci 10248c2ecf20Sopenharmony_ci struct waitid_info *wo_info; 10258c2ecf20Sopenharmony_ci int wo_stat; 10268c2ecf20Sopenharmony_ci struct rusage *wo_rusage; 10278c2ecf20Sopenharmony_ci 10288c2ecf20Sopenharmony_ci wait_queue_entry_t child_wait; 10298c2ecf20Sopenharmony_ci int notask_error; 10308c2ecf20Sopenharmony_ci}; 10318c2ecf20Sopenharmony_ci 10328c2ecf20Sopenharmony_cistatic int eligible_pid(struct wait_opts *wo, struct task_struct *p) 10338c2ecf20Sopenharmony_ci{ 10348c2ecf20Sopenharmony_ci return wo->wo_type == PIDTYPE_MAX || 10358c2ecf20Sopenharmony_ci task_pid_type(p, wo->wo_type) == wo->wo_pid; 10368c2ecf20Sopenharmony_ci} 10378c2ecf20Sopenharmony_ci 10388c2ecf20Sopenharmony_cistatic int 10398c2ecf20Sopenharmony_cieligible_child(struct wait_opts *wo, bool ptrace, struct task_struct *p) 10408c2ecf20Sopenharmony_ci{ 10418c2ecf20Sopenharmony_ci if (!eligible_pid(wo, p)) 10428c2ecf20Sopenharmony_ci return 0; 10438c2ecf20Sopenharmony_ci 10448c2ecf20Sopenharmony_ci /* 10458c2ecf20Sopenharmony_ci * Wait for all children (clone and not) if __WALL is set or 10468c2ecf20Sopenharmony_ci * if it is traced by us. 10478c2ecf20Sopenharmony_ci */ 10488c2ecf20Sopenharmony_ci if (ptrace || (wo->wo_flags & __WALL)) 10498c2ecf20Sopenharmony_ci return 1; 10508c2ecf20Sopenharmony_ci 10518c2ecf20Sopenharmony_ci /* 10528c2ecf20Sopenharmony_ci * Otherwise, wait for clone children *only* if __WCLONE is set; 10538c2ecf20Sopenharmony_ci * otherwise, wait for non-clone children *only*. 10548c2ecf20Sopenharmony_ci * 10558c2ecf20Sopenharmony_ci * Note: a "clone" child here is one that reports to its parent 10568c2ecf20Sopenharmony_ci * using a signal other than SIGCHLD, or a non-leader thread which 10578c2ecf20Sopenharmony_ci * we can only see if it is traced by us. 10588c2ecf20Sopenharmony_ci */ 10598c2ecf20Sopenharmony_ci if ((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE)) 10608c2ecf20Sopenharmony_ci return 0; 10618c2ecf20Sopenharmony_ci 10628c2ecf20Sopenharmony_ci return 1; 10638c2ecf20Sopenharmony_ci} 10648c2ecf20Sopenharmony_ci 10658c2ecf20Sopenharmony_ci/* 10668c2ecf20Sopenharmony_ci * Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold 10678c2ecf20Sopenharmony_ci * read_lock(&tasklist_lock) on entry. If we return zero, we still hold 10688c2ecf20Sopenharmony_ci * the lock and this task is uninteresting. If we return nonzero, we have 10698c2ecf20Sopenharmony_ci * released the lock and the system call should return. 10708c2ecf20Sopenharmony_ci */ 10718c2ecf20Sopenharmony_cistatic int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) 10728c2ecf20Sopenharmony_ci{ 10738c2ecf20Sopenharmony_ci int state, status; 10748c2ecf20Sopenharmony_ci pid_t pid = task_pid_vnr(p); 10758c2ecf20Sopenharmony_ci uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p)); 10768c2ecf20Sopenharmony_ci struct waitid_info *infop; 10778c2ecf20Sopenharmony_ci 10788c2ecf20Sopenharmony_ci if (!likely(wo->wo_flags & WEXITED)) 10798c2ecf20Sopenharmony_ci return 0; 10808c2ecf20Sopenharmony_ci 10818c2ecf20Sopenharmony_ci if (unlikely(wo->wo_flags & WNOWAIT)) { 10828c2ecf20Sopenharmony_ci status = p->exit_code; 10838c2ecf20Sopenharmony_ci get_task_struct(p); 10848c2ecf20Sopenharmony_ci read_unlock(&tasklist_lock); 10858c2ecf20Sopenharmony_ci sched_annotate_sleep(); 10868c2ecf20Sopenharmony_ci if (wo->wo_rusage) 10878c2ecf20Sopenharmony_ci getrusage(p, RUSAGE_BOTH, wo->wo_rusage); 10888c2ecf20Sopenharmony_ci put_task_struct(p); 10898c2ecf20Sopenharmony_ci goto out_info; 10908c2ecf20Sopenharmony_ci } 10918c2ecf20Sopenharmony_ci /* 10928c2ecf20Sopenharmony_ci * Move the task's state to DEAD/TRACE, only one thread can do this. 10938c2ecf20Sopenharmony_ci */ 10948c2ecf20Sopenharmony_ci state = (ptrace_reparented(p) && thread_group_leader(p)) ? 10958c2ecf20Sopenharmony_ci EXIT_TRACE : EXIT_DEAD; 10968c2ecf20Sopenharmony_ci if (cmpxchg(&p->exit_state, EXIT_ZOMBIE, state) != EXIT_ZOMBIE) 10978c2ecf20Sopenharmony_ci return 0; 10988c2ecf20Sopenharmony_ci /* 10998c2ecf20Sopenharmony_ci * We own this thread, nobody else can reap it. 11008c2ecf20Sopenharmony_ci */ 11018c2ecf20Sopenharmony_ci read_unlock(&tasklist_lock); 11028c2ecf20Sopenharmony_ci sched_annotate_sleep(); 11038c2ecf20Sopenharmony_ci 11048c2ecf20Sopenharmony_ci /* 11058c2ecf20Sopenharmony_ci * Check thread_group_leader() to exclude the traced sub-threads. 11068c2ecf20Sopenharmony_ci */ 11078c2ecf20Sopenharmony_ci if (state == EXIT_DEAD && thread_group_leader(p)) { 11088c2ecf20Sopenharmony_ci struct signal_struct *sig = p->signal; 11098c2ecf20Sopenharmony_ci struct signal_struct *psig = current->signal; 11108c2ecf20Sopenharmony_ci unsigned long maxrss; 11118c2ecf20Sopenharmony_ci u64 tgutime, tgstime; 11128c2ecf20Sopenharmony_ci 11138c2ecf20Sopenharmony_ci /* 11148c2ecf20Sopenharmony_ci * The resource counters for the group leader are in its 11158c2ecf20Sopenharmony_ci * own task_struct. Those for dead threads in the group 11168c2ecf20Sopenharmony_ci * are in its signal_struct, as are those for the child 11178c2ecf20Sopenharmony_ci * processes it has previously reaped. All these 11188c2ecf20Sopenharmony_ci * accumulate in the parent's signal_struct c* fields. 11198c2ecf20Sopenharmony_ci * 11208c2ecf20Sopenharmony_ci * We don't bother to take a lock here to protect these 11218c2ecf20Sopenharmony_ci * p->signal fields because the whole thread group is dead 11228c2ecf20Sopenharmony_ci * and nobody can change them. 11238c2ecf20Sopenharmony_ci * 11248c2ecf20Sopenharmony_ci * psig->stats_lock also protects us from our sub-theads 11258c2ecf20Sopenharmony_ci * which can reap other children at the same time. Until 11268c2ecf20Sopenharmony_ci * we change k_getrusage()-like users to rely on this lock 11278c2ecf20Sopenharmony_ci * we have to take ->siglock as well. 11288c2ecf20Sopenharmony_ci * 11298c2ecf20Sopenharmony_ci * We use thread_group_cputime_adjusted() to get times for 11308c2ecf20Sopenharmony_ci * the thread group, which consolidates times for all threads 11318c2ecf20Sopenharmony_ci * in the group including the group leader. 11328c2ecf20Sopenharmony_ci */ 11338c2ecf20Sopenharmony_ci thread_group_cputime_adjusted(p, &tgutime, &tgstime); 11348c2ecf20Sopenharmony_ci spin_lock_irq(¤t->sighand->siglock); 11358c2ecf20Sopenharmony_ci write_seqlock(&psig->stats_lock); 11368c2ecf20Sopenharmony_ci psig->cutime += tgutime + sig->cutime; 11378c2ecf20Sopenharmony_ci psig->cstime += tgstime + sig->cstime; 11388c2ecf20Sopenharmony_ci psig->cgtime += task_gtime(p) + sig->gtime + sig->cgtime; 11398c2ecf20Sopenharmony_ci psig->cmin_flt += 11408c2ecf20Sopenharmony_ci p->min_flt + sig->min_flt + sig->cmin_flt; 11418c2ecf20Sopenharmony_ci psig->cmaj_flt += 11428c2ecf20Sopenharmony_ci p->maj_flt + sig->maj_flt + sig->cmaj_flt; 11438c2ecf20Sopenharmony_ci psig->cnvcsw += 11448c2ecf20Sopenharmony_ci p->nvcsw + sig->nvcsw + sig->cnvcsw; 11458c2ecf20Sopenharmony_ci psig->cnivcsw += 11468c2ecf20Sopenharmony_ci p->nivcsw + sig->nivcsw + sig->cnivcsw; 11478c2ecf20Sopenharmony_ci psig->cinblock += 11488c2ecf20Sopenharmony_ci task_io_get_inblock(p) + 11498c2ecf20Sopenharmony_ci sig->inblock + sig->cinblock; 11508c2ecf20Sopenharmony_ci psig->coublock += 11518c2ecf20Sopenharmony_ci task_io_get_oublock(p) + 11528c2ecf20Sopenharmony_ci sig->oublock + sig->coublock; 11538c2ecf20Sopenharmony_ci maxrss = max(sig->maxrss, sig->cmaxrss); 11548c2ecf20Sopenharmony_ci if (psig->cmaxrss < maxrss) 11558c2ecf20Sopenharmony_ci psig->cmaxrss = maxrss; 11568c2ecf20Sopenharmony_ci task_io_accounting_add(&psig->ioac, &p->ioac); 11578c2ecf20Sopenharmony_ci task_io_accounting_add(&psig->ioac, &sig->ioac); 11588c2ecf20Sopenharmony_ci write_sequnlock(&psig->stats_lock); 11598c2ecf20Sopenharmony_ci spin_unlock_irq(¤t->sighand->siglock); 11608c2ecf20Sopenharmony_ci } 11618c2ecf20Sopenharmony_ci 11628c2ecf20Sopenharmony_ci if (wo->wo_rusage) 11638c2ecf20Sopenharmony_ci getrusage(p, RUSAGE_BOTH, wo->wo_rusage); 11648c2ecf20Sopenharmony_ci status = (p->signal->flags & SIGNAL_GROUP_EXIT) 11658c2ecf20Sopenharmony_ci ? p->signal->group_exit_code : p->exit_code; 11668c2ecf20Sopenharmony_ci wo->wo_stat = status; 11678c2ecf20Sopenharmony_ci 11688c2ecf20Sopenharmony_ci if (state == EXIT_TRACE) { 11698c2ecf20Sopenharmony_ci write_lock_irq(&tasklist_lock); 11708c2ecf20Sopenharmony_ci /* We dropped tasklist, ptracer could die and untrace */ 11718c2ecf20Sopenharmony_ci ptrace_unlink(p); 11728c2ecf20Sopenharmony_ci 11738c2ecf20Sopenharmony_ci /* If parent wants a zombie, don't release it now */ 11748c2ecf20Sopenharmony_ci state = EXIT_ZOMBIE; 11758c2ecf20Sopenharmony_ci if (do_notify_parent(p, p->exit_signal)) 11768c2ecf20Sopenharmony_ci state = EXIT_DEAD; 11778c2ecf20Sopenharmony_ci p->exit_state = state; 11788c2ecf20Sopenharmony_ci write_unlock_irq(&tasklist_lock); 11798c2ecf20Sopenharmony_ci } 11808c2ecf20Sopenharmony_ci if (state == EXIT_DEAD) 11818c2ecf20Sopenharmony_ci release_task(p); 11828c2ecf20Sopenharmony_ci 11838c2ecf20Sopenharmony_ciout_info: 11848c2ecf20Sopenharmony_ci infop = wo->wo_info; 11858c2ecf20Sopenharmony_ci if (infop) { 11868c2ecf20Sopenharmony_ci if ((status & 0x7f) == 0) { 11878c2ecf20Sopenharmony_ci infop->cause = CLD_EXITED; 11888c2ecf20Sopenharmony_ci infop->status = status >> 8; 11898c2ecf20Sopenharmony_ci } else { 11908c2ecf20Sopenharmony_ci infop->cause = (status & 0x80) ? CLD_DUMPED : CLD_KILLED; 11918c2ecf20Sopenharmony_ci infop->status = status & 0x7f; 11928c2ecf20Sopenharmony_ci } 11938c2ecf20Sopenharmony_ci infop->pid = pid; 11948c2ecf20Sopenharmony_ci infop->uid = uid; 11958c2ecf20Sopenharmony_ci } 11968c2ecf20Sopenharmony_ci 11978c2ecf20Sopenharmony_ci return pid; 11988c2ecf20Sopenharmony_ci} 11998c2ecf20Sopenharmony_ci 12008c2ecf20Sopenharmony_cistatic int *task_stopped_code(struct task_struct *p, bool ptrace) 12018c2ecf20Sopenharmony_ci{ 12028c2ecf20Sopenharmony_ci if (ptrace) { 12038c2ecf20Sopenharmony_ci if (task_is_traced(p) && !(p->jobctl & JOBCTL_LISTENING)) 12048c2ecf20Sopenharmony_ci return &p->exit_code; 12058c2ecf20Sopenharmony_ci } else { 12068c2ecf20Sopenharmony_ci if (p->signal->flags & SIGNAL_STOP_STOPPED) 12078c2ecf20Sopenharmony_ci return &p->signal->group_exit_code; 12088c2ecf20Sopenharmony_ci } 12098c2ecf20Sopenharmony_ci return NULL; 12108c2ecf20Sopenharmony_ci} 12118c2ecf20Sopenharmony_ci 12128c2ecf20Sopenharmony_ci/** 12138c2ecf20Sopenharmony_ci * wait_task_stopped - Wait for %TASK_STOPPED or %TASK_TRACED 12148c2ecf20Sopenharmony_ci * @wo: wait options 12158c2ecf20Sopenharmony_ci * @ptrace: is the wait for ptrace 12168c2ecf20Sopenharmony_ci * @p: task to wait for 12178c2ecf20Sopenharmony_ci * 12188c2ecf20Sopenharmony_ci * Handle sys_wait4() work for %p in state %TASK_STOPPED or %TASK_TRACED. 12198c2ecf20Sopenharmony_ci * 12208c2ecf20Sopenharmony_ci * CONTEXT: 12218c2ecf20Sopenharmony_ci * read_lock(&tasklist_lock), which is released if return value is 12228c2ecf20Sopenharmony_ci * non-zero. Also, grabs and releases @p->sighand->siglock. 12238c2ecf20Sopenharmony_ci * 12248c2ecf20Sopenharmony_ci * RETURNS: 12258c2ecf20Sopenharmony_ci * 0 if wait condition didn't exist and search for other wait conditions 12268c2ecf20Sopenharmony_ci * should continue. Non-zero return, -errno on failure and @p's pid on 12278c2ecf20Sopenharmony_ci * success, implies that tasklist_lock is released and wait condition 12288c2ecf20Sopenharmony_ci * search should terminate. 12298c2ecf20Sopenharmony_ci */ 12308c2ecf20Sopenharmony_cistatic int wait_task_stopped(struct wait_opts *wo, 12318c2ecf20Sopenharmony_ci int ptrace, struct task_struct *p) 12328c2ecf20Sopenharmony_ci{ 12338c2ecf20Sopenharmony_ci struct waitid_info *infop; 12348c2ecf20Sopenharmony_ci int exit_code, *p_code, why; 12358c2ecf20Sopenharmony_ci uid_t uid = 0; /* unneeded, required by compiler */ 12368c2ecf20Sopenharmony_ci pid_t pid; 12378c2ecf20Sopenharmony_ci 12388c2ecf20Sopenharmony_ci /* 12398c2ecf20Sopenharmony_ci * Traditionally we see ptrace'd stopped tasks regardless of options. 12408c2ecf20Sopenharmony_ci */ 12418c2ecf20Sopenharmony_ci if (!ptrace && !(wo->wo_flags & WUNTRACED)) 12428c2ecf20Sopenharmony_ci return 0; 12438c2ecf20Sopenharmony_ci 12448c2ecf20Sopenharmony_ci if (!task_stopped_code(p, ptrace)) 12458c2ecf20Sopenharmony_ci return 0; 12468c2ecf20Sopenharmony_ci 12478c2ecf20Sopenharmony_ci exit_code = 0; 12488c2ecf20Sopenharmony_ci spin_lock_irq(&p->sighand->siglock); 12498c2ecf20Sopenharmony_ci 12508c2ecf20Sopenharmony_ci p_code = task_stopped_code(p, ptrace); 12518c2ecf20Sopenharmony_ci if (unlikely(!p_code)) 12528c2ecf20Sopenharmony_ci goto unlock_sig; 12538c2ecf20Sopenharmony_ci 12548c2ecf20Sopenharmony_ci exit_code = *p_code; 12558c2ecf20Sopenharmony_ci if (!exit_code) 12568c2ecf20Sopenharmony_ci goto unlock_sig; 12578c2ecf20Sopenharmony_ci 12588c2ecf20Sopenharmony_ci if (!unlikely(wo->wo_flags & WNOWAIT)) 12598c2ecf20Sopenharmony_ci *p_code = 0; 12608c2ecf20Sopenharmony_ci 12618c2ecf20Sopenharmony_ci uid = from_kuid_munged(current_user_ns(), task_uid(p)); 12628c2ecf20Sopenharmony_ciunlock_sig: 12638c2ecf20Sopenharmony_ci spin_unlock_irq(&p->sighand->siglock); 12648c2ecf20Sopenharmony_ci if (!exit_code) 12658c2ecf20Sopenharmony_ci return 0; 12668c2ecf20Sopenharmony_ci 12678c2ecf20Sopenharmony_ci /* 12688c2ecf20Sopenharmony_ci * Now we are pretty sure this task is interesting. 12698c2ecf20Sopenharmony_ci * Make sure it doesn't get reaped out from under us while we 12708c2ecf20Sopenharmony_ci * give up the lock and then examine it below. We don't want to 12718c2ecf20Sopenharmony_ci * keep holding onto the tasklist_lock while we call getrusage and 12728c2ecf20Sopenharmony_ci * possibly take page faults for user memory. 12738c2ecf20Sopenharmony_ci */ 12748c2ecf20Sopenharmony_ci get_task_struct(p); 12758c2ecf20Sopenharmony_ci pid = task_pid_vnr(p); 12768c2ecf20Sopenharmony_ci why = ptrace ? CLD_TRAPPED : CLD_STOPPED; 12778c2ecf20Sopenharmony_ci read_unlock(&tasklist_lock); 12788c2ecf20Sopenharmony_ci sched_annotate_sleep(); 12798c2ecf20Sopenharmony_ci if (wo->wo_rusage) 12808c2ecf20Sopenharmony_ci getrusage(p, RUSAGE_BOTH, wo->wo_rusage); 12818c2ecf20Sopenharmony_ci put_task_struct(p); 12828c2ecf20Sopenharmony_ci 12838c2ecf20Sopenharmony_ci if (likely(!(wo->wo_flags & WNOWAIT))) 12848c2ecf20Sopenharmony_ci wo->wo_stat = (exit_code << 8) | 0x7f; 12858c2ecf20Sopenharmony_ci 12868c2ecf20Sopenharmony_ci infop = wo->wo_info; 12878c2ecf20Sopenharmony_ci if (infop) { 12888c2ecf20Sopenharmony_ci infop->cause = why; 12898c2ecf20Sopenharmony_ci infop->status = exit_code; 12908c2ecf20Sopenharmony_ci infop->pid = pid; 12918c2ecf20Sopenharmony_ci infop->uid = uid; 12928c2ecf20Sopenharmony_ci } 12938c2ecf20Sopenharmony_ci return pid; 12948c2ecf20Sopenharmony_ci} 12958c2ecf20Sopenharmony_ci 12968c2ecf20Sopenharmony_ci/* 12978c2ecf20Sopenharmony_ci * Handle do_wait work for one task in a live, non-stopped state. 12988c2ecf20Sopenharmony_ci * read_lock(&tasklist_lock) on entry. If we return zero, we still hold 12998c2ecf20Sopenharmony_ci * the lock and this task is uninteresting. If we return nonzero, we have 13008c2ecf20Sopenharmony_ci * released the lock and the system call should return. 13018c2ecf20Sopenharmony_ci */ 13028c2ecf20Sopenharmony_cistatic int wait_task_continued(struct wait_opts *wo, struct task_struct *p) 13038c2ecf20Sopenharmony_ci{ 13048c2ecf20Sopenharmony_ci struct waitid_info *infop; 13058c2ecf20Sopenharmony_ci pid_t pid; 13068c2ecf20Sopenharmony_ci uid_t uid; 13078c2ecf20Sopenharmony_ci 13088c2ecf20Sopenharmony_ci if (!unlikely(wo->wo_flags & WCONTINUED)) 13098c2ecf20Sopenharmony_ci return 0; 13108c2ecf20Sopenharmony_ci 13118c2ecf20Sopenharmony_ci if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) 13128c2ecf20Sopenharmony_ci return 0; 13138c2ecf20Sopenharmony_ci 13148c2ecf20Sopenharmony_ci spin_lock_irq(&p->sighand->siglock); 13158c2ecf20Sopenharmony_ci /* Re-check with the lock held. */ 13168c2ecf20Sopenharmony_ci if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) { 13178c2ecf20Sopenharmony_ci spin_unlock_irq(&p->sighand->siglock); 13188c2ecf20Sopenharmony_ci return 0; 13198c2ecf20Sopenharmony_ci } 13208c2ecf20Sopenharmony_ci if (!unlikely(wo->wo_flags & WNOWAIT)) 13218c2ecf20Sopenharmony_ci p->signal->flags &= ~SIGNAL_STOP_CONTINUED; 13228c2ecf20Sopenharmony_ci uid = from_kuid_munged(current_user_ns(), task_uid(p)); 13238c2ecf20Sopenharmony_ci spin_unlock_irq(&p->sighand->siglock); 13248c2ecf20Sopenharmony_ci 13258c2ecf20Sopenharmony_ci pid = task_pid_vnr(p); 13268c2ecf20Sopenharmony_ci get_task_struct(p); 13278c2ecf20Sopenharmony_ci read_unlock(&tasklist_lock); 13288c2ecf20Sopenharmony_ci sched_annotate_sleep(); 13298c2ecf20Sopenharmony_ci if (wo->wo_rusage) 13308c2ecf20Sopenharmony_ci getrusage(p, RUSAGE_BOTH, wo->wo_rusage); 13318c2ecf20Sopenharmony_ci put_task_struct(p); 13328c2ecf20Sopenharmony_ci 13338c2ecf20Sopenharmony_ci infop = wo->wo_info; 13348c2ecf20Sopenharmony_ci if (!infop) { 13358c2ecf20Sopenharmony_ci wo->wo_stat = 0xffff; 13368c2ecf20Sopenharmony_ci } else { 13378c2ecf20Sopenharmony_ci infop->cause = CLD_CONTINUED; 13388c2ecf20Sopenharmony_ci infop->pid = pid; 13398c2ecf20Sopenharmony_ci infop->uid = uid; 13408c2ecf20Sopenharmony_ci infop->status = SIGCONT; 13418c2ecf20Sopenharmony_ci } 13428c2ecf20Sopenharmony_ci return pid; 13438c2ecf20Sopenharmony_ci} 13448c2ecf20Sopenharmony_ci 13458c2ecf20Sopenharmony_ci/* 13468c2ecf20Sopenharmony_ci * Consider @p for a wait by @parent. 13478c2ecf20Sopenharmony_ci * 13488c2ecf20Sopenharmony_ci * -ECHILD should be in ->notask_error before the first call. 13498c2ecf20Sopenharmony_ci * Returns nonzero for a final return, when we have unlocked tasklist_lock. 13508c2ecf20Sopenharmony_ci * Returns zero if the search for a child should continue; 13518c2ecf20Sopenharmony_ci * then ->notask_error is 0 if @p is an eligible child, 13528c2ecf20Sopenharmony_ci * or still -ECHILD. 13538c2ecf20Sopenharmony_ci */ 13548c2ecf20Sopenharmony_cistatic int wait_consider_task(struct wait_opts *wo, int ptrace, 13558c2ecf20Sopenharmony_ci struct task_struct *p) 13568c2ecf20Sopenharmony_ci{ 13578c2ecf20Sopenharmony_ci /* 13588c2ecf20Sopenharmony_ci * We can race with wait_task_zombie() from another thread. 13598c2ecf20Sopenharmony_ci * Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition 13608c2ecf20Sopenharmony_ci * can't confuse the checks below. 13618c2ecf20Sopenharmony_ci */ 13628c2ecf20Sopenharmony_ci int exit_state = READ_ONCE(p->exit_state); 13638c2ecf20Sopenharmony_ci int ret; 13648c2ecf20Sopenharmony_ci 13658c2ecf20Sopenharmony_ci if (unlikely(exit_state == EXIT_DEAD)) 13668c2ecf20Sopenharmony_ci return 0; 13678c2ecf20Sopenharmony_ci 13688c2ecf20Sopenharmony_ci ret = eligible_child(wo, ptrace, p); 13698c2ecf20Sopenharmony_ci if (!ret) 13708c2ecf20Sopenharmony_ci return ret; 13718c2ecf20Sopenharmony_ci 13728c2ecf20Sopenharmony_ci if (unlikely(exit_state == EXIT_TRACE)) { 13738c2ecf20Sopenharmony_ci /* 13748c2ecf20Sopenharmony_ci * ptrace == 0 means we are the natural parent. In this case 13758c2ecf20Sopenharmony_ci * we should clear notask_error, debugger will notify us. 13768c2ecf20Sopenharmony_ci */ 13778c2ecf20Sopenharmony_ci if (likely(!ptrace)) 13788c2ecf20Sopenharmony_ci wo->notask_error = 0; 13798c2ecf20Sopenharmony_ci return 0; 13808c2ecf20Sopenharmony_ci } 13818c2ecf20Sopenharmony_ci 13828c2ecf20Sopenharmony_ci if (likely(!ptrace) && unlikely(p->ptrace)) { 13838c2ecf20Sopenharmony_ci /* 13848c2ecf20Sopenharmony_ci * If it is traced by its real parent's group, just pretend 13858c2ecf20Sopenharmony_ci * the caller is ptrace_do_wait() and reap this child if it 13868c2ecf20Sopenharmony_ci * is zombie. 13878c2ecf20Sopenharmony_ci * 13888c2ecf20Sopenharmony_ci * This also hides group stop state from real parent; otherwise 13898c2ecf20Sopenharmony_ci * a single stop can be reported twice as group and ptrace stop. 13908c2ecf20Sopenharmony_ci * If a ptracer wants to distinguish these two events for its 13918c2ecf20Sopenharmony_ci * own children it should create a separate process which takes 13928c2ecf20Sopenharmony_ci * the role of real parent. 13938c2ecf20Sopenharmony_ci */ 13948c2ecf20Sopenharmony_ci if (!ptrace_reparented(p)) 13958c2ecf20Sopenharmony_ci ptrace = 1; 13968c2ecf20Sopenharmony_ci } 13978c2ecf20Sopenharmony_ci 13988c2ecf20Sopenharmony_ci /* slay zombie? */ 13998c2ecf20Sopenharmony_ci if (exit_state == EXIT_ZOMBIE) { 14008c2ecf20Sopenharmony_ci /* we don't reap group leaders with subthreads */ 14018c2ecf20Sopenharmony_ci if (!delay_group_leader(p)) { 14028c2ecf20Sopenharmony_ci /* 14038c2ecf20Sopenharmony_ci * A zombie ptracee is only visible to its ptracer. 14048c2ecf20Sopenharmony_ci * Notification and reaping will be cascaded to the 14058c2ecf20Sopenharmony_ci * real parent when the ptracer detaches. 14068c2ecf20Sopenharmony_ci */ 14078c2ecf20Sopenharmony_ci if (unlikely(ptrace) || likely(!p->ptrace)) 14088c2ecf20Sopenharmony_ci return wait_task_zombie(wo, p); 14098c2ecf20Sopenharmony_ci } 14108c2ecf20Sopenharmony_ci 14118c2ecf20Sopenharmony_ci /* 14128c2ecf20Sopenharmony_ci * Allow access to stopped/continued state via zombie by 14138c2ecf20Sopenharmony_ci * falling through. Clearing of notask_error is complex. 14148c2ecf20Sopenharmony_ci * 14158c2ecf20Sopenharmony_ci * When !@ptrace: 14168c2ecf20Sopenharmony_ci * 14178c2ecf20Sopenharmony_ci * If WEXITED is set, notask_error should naturally be 14188c2ecf20Sopenharmony_ci * cleared. If not, subset of WSTOPPED|WCONTINUED is set, 14198c2ecf20Sopenharmony_ci * so, if there are live subthreads, there are events to 14208c2ecf20Sopenharmony_ci * wait for. If all subthreads are dead, it's still safe 14218c2ecf20Sopenharmony_ci * to clear - this function will be called again in finite 14228c2ecf20Sopenharmony_ci * amount time once all the subthreads are released and 14238c2ecf20Sopenharmony_ci * will then return without clearing. 14248c2ecf20Sopenharmony_ci * 14258c2ecf20Sopenharmony_ci * When @ptrace: 14268c2ecf20Sopenharmony_ci * 14278c2ecf20Sopenharmony_ci * Stopped state is per-task and thus can't change once the 14288c2ecf20Sopenharmony_ci * target task dies. Only continued and exited can happen. 14298c2ecf20Sopenharmony_ci * Clear notask_error if WCONTINUED | WEXITED. 14308c2ecf20Sopenharmony_ci */ 14318c2ecf20Sopenharmony_ci if (likely(!ptrace) || (wo->wo_flags & (WCONTINUED | WEXITED))) 14328c2ecf20Sopenharmony_ci wo->notask_error = 0; 14338c2ecf20Sopenharmony_ci } else { 14348c2ecf20Sopenharmony_ci /* 14358c2ecf20Sopenharmony_ci * @p is alive and it's gonna stop, continue or exit, so 14368c2ecf20Sopenharmony_ci * there always is something to wait for. 14378c2ecf20Sopenharmony_ci */ 14388c2ecf20Sopenharmony_ci wo->notask_error = 0; 14398c2ecf20Sopenharmony_ci } 14408c2ecf20Sopenharmony_ci 14418c2ecf20Sopenharmony_ci /* 14428c2ecf20Sopenharmony_ci * Wait for stopped. Depending on @ptrace, different stopped state 14438c2ecf20Sopenharmony_ci * is used and the two don't interact with each other. 14448c2ecf20Sopenharmony_ci */ 14458c2ecf20Sopenharmony_ci ret = wait_task_stopped(wo, ptrace, p); 14468c2ecf20Sopenharmony_ci if (ret) 14478c2ecf20Sopenharmony_ci return ret; 14488c2ecf20Sopenharmony_ci 14498c2ecf20Sopenharmony_ci /* 14508c2ecf20Sopenharmony_ci * Wait for continued. There's only one continued state and the 14518c2ecf20Sopenharmony_ci * ptracer can consume it which can confuse the real parent. Don't 14528c2ecf20Sopenharmony_ci * use WCONTINUED from ptracer. You don't need or want it. 14538c2ecf20Sopenharmony_ci */ 14548c2ecf20Sopenharmony_ci return wait_task_continued(wo, p); 14558c2ecf20Sopenharmony_ci} 14568c2ecf20Sopenharmony_ci 14578c2ecf20Sopenharmony_ci/* 14588c2ecf20Sopenharmony_ci * Do the work of do_wait() for one thread in the group, @tsk. 14598c2ecf20Sopenharmony_ci * 14608c2ecf20Sopenharmony_ci * -ECHILD should be in ->notask_error before the first call. 14618c2ecf20Sopenharmony_ci * Returns nonzero for a final return, when we have unlocked tasklist_lock. 14628c2ecf20Sopenharmony_ci * Returns zero if the search for a child should continue; then 14638c2ecf20Sopenharmony_ci * ->notask_error is 0 if there were any eligible children, 14648c2ecf20Sopenharmony_ci * or still -ECHILD. 14658c2ecf20Sopenharmony_ci */ 14668c2ecf20Sopenharmony_cistatic int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk) 14678c2ecf20Sopenharmony_ci{ 14688c2ecf20Sopenharmony_ci struct task_struct *p; 14698c2ecf20Sopenharmony_ci 14708c2ecf20Sopenharmony_ci list_for_each_entry(p, &tsk->children, sibling) { 14718c2ecf20Sopenharmony_ci int ret = wait_consider_task(wo, 0, p); 14728c2ecf20Sopenharmony_ci 14738c2ecf20Sopenharmony_ci if (ret) 14748c2ecf20Sopenharmony_ci return ret; 14758c2ecf20Sopenharmony_ci } 14768c2ecf20Sopenharmony_ci 14778c2ecf20Sopenharmony_ci return 0; 14788c2ecf20Sopenharmony_ci} 14798c2ecf20Sopenharmony_ci 14808c2ecf20Sopenharmony_cistatic int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk) 14818c2ecf20Sopenharmony_ci{ 14828c2ecf20Sopenharmony_ci struct task_struct *p; 14838c2ecf20Sopenharmony_ci 14848c2ecf20Sopenharmony_ci list_for_each_entry(p, &tsk->ptraced, ptrace_entry) { 14858c2ecf20Sopenharmony_ci int ret = wait_consider_task(wo, 1, p); 14868c2ecf20Sopenharmony_ci 14878c2ecf20Sopenharmony_ci if (ret) 14888c2ecf20Sopenharmony_ci return ret; 14898c2ecf20Sopenharmony_ci } 14908c2ecf20Sopenharmony_ci 14918c2ecf20Sopenharmony_ci return 0; 14928c2ecf20Sopenharmony_ci} 14938c2ecf20Sopenharmony_ci 14948c2ecf20Sopenharmony_cistatic int child_wait_callback(wait_queue_entry_t *wait, unsigned mode, 14958c2ecf20Sopenharmony_ci int sync, void *key) 14968c2ecf20Sopenharmony_ci{ 14978c2ecf20Sopenharmony_ci struct wait_opts *wo = container_of(wait, struct wait_opts, 14988c2ecf20Sopenharmony_ci child_wait); 14998c2ecf20Sopenharmony_ci struct task_struct *p = key; 15008c2ecf20Sopenharmony_ci 15018c2ecf20Sopenharmony_ci if (!eligible_pid(wo, p)) 15028c2ecf20Sopenharmony_ci return 0; 15038c2ecf20Sopenharmony_ci 15048c2ecf20Sopenharmony_ci if ((wo->wo_flags & __WNOTHREAD) && wait->private != p->parent) 15058c2ecf20Sopenharmony_ci return 0; 15068c2ecf20Sopenharmony_ci 15078c2ecf20Sopenharmony_ci return default_wake_function(wait, mode, sync, key); 15088c2ecf20Sopenharmony_ci} 15098c2ecf20Sopenharmony_ci 15108c2ecf20Sopenharmony_civoid __wake_up_parent(struct task_struct *p, struct task_struct *parent) 15118c2ecf20Sopenharmony_ci{ 15128c2ecf20Sopenharmony_ci __wake_up_sync_key(&parent->signal->wait_chldexit, 15138c2ecf20Sopenharmony_ci TASK_INTERRUPTIBLE, p); 15148c2ecf20Sopenharmony_ci} 15158c2ecf20Sopenharmony_ci 15168c2ecf20Sopenharmony_cistatic long do_wait(struct wait_opts *wo) 15178c2ecf20Sopenharmony_ci{ 15188c2ecf20Sopenharmony_ci struct task_struct *tsk; 15198c2ecf20Sopenharmony_ci int retval; 15208c2ecf20Sopenharmony_ci 15218c2ecf20Sopenharmony_ci trace_sched_process_wait(wo->wo_pid); 15228c2ecf20Sopenharmony_ci 15238c2ecf20Sopenharmony_ci init_waitqueue_func_entry(&wo->child_wait, child_wait_callback); 15248c2ecf20Sopenharmony_ci wo->child_wait.private = current; 15258c2ecf20Sopenharmony_ci add_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait); 15268c2ecf20Sopenharmony_cirepeat: 15278c2ecf20Sopenharmony_ci /* 15288c2ecf20Sopenharmony_ci * If there is nothing that can match our criteria, just get out. 15298c2ecf20Sopenharmony_ci * We will clear ->notask_error to zero if we see any child that 15308c2ecf20Sopenharmony_ci * might later match our criteria, even if we are not able to reap 15318c2ecf20Sopenharmony_ci * it yet. 15328c2ecf20Sopenharmony_ci */ 15338c2ecf20Sopenharmony_ci wo->notask_error = -ECHILD; 15348c2ecf20Sopenharmony_ci if ((wo->wo_type < PIDTYPE_MAX) && 15358c2ecf20Sopenharmony_ci (!wo->wo_pid || !pid_has_task(wo->wo_pid, wo->wo_type))) 15368c2ecf20Sopenharmony_ci goto notask; 15378c2ecf20Sopenharmony_ci 15388c2ecf20Sopenharmony_ci set_current_state(TASK_INTERRUPTIBLE); 15398c2ecf20Sopenharmony_ci read_lock(&tasklist_lock); 15408c2ecf20Sopenharmony_ci tsk = current; 15418c2ecf20Sopenharmony_ci do { 15428c2ecf20Sopenharmony_ci retval = do_wait_thread(wo, tsk); 15438c2ecf20Sopenharmony_ci if (retval) 15448c2ecf20Sopenharmony_ci goto end; 15458c2ecf20Sopenharmony_ci 15468c2ecf20Sopenharmony_ci retval = ptrace_do_wait(wo, tsk); 15478c2ecf20Sopenharmony_ci if (retval) 15488c2ecf20Sopenharmony_ci goto end; 15498c2ecf20Sopenharmony_ci 15508c2ecf20Sopenharmony_ci if (wo->wo_flags & __WNOTHREAD) 15518c2ecf20Sopenharmony_ci break; 15528c2ecf20Sopenharmony_ci } while_each_thread(current, tsk); 15538c2ecf20Sopenharmony_ci read_unlock(&tasklist_lock); 15548c2ecf20Sopenharmony_ci 15558c2ecf20Sopenharmony_cinotask: 15568c2ecf20Sopenharmony_ci retval = wo->notask_error; 15578c2ecf20Sopenharmony_ci if (!retval && !(wo->wo_flags & WNOHANG)) { 15588c2ecf20Sopenharmony_ci retval = -ERESTARTSYS; 15598c2ecf20Sopenharmony_ci if (!signal_pending(current)) { 15608c2ecf20Sopenharmony_ci schedule(); 15618c2ecf20Sopenharmony_ci goto repeat; 15628c2ecf20Sopenharmony_ci } 15638c2ecf20Sopenharmony_ci } 15648c2ecf20Sopenharmony_ciend: 15658c2ecf20Sopenharmony_ci __set_current_state(TASK_RUNNING); 15668c2ecf20Sopenharmony_ci remove_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait); 15678c2ecf20Sopenharmony_ci return retval; 15688c2ecf20Sopenharmony_ci} 15698c2ecf20Sopenharmony_ci 15708c2ecf20Sopenharmony_cistatic long kernel_waitid(int which, pid_t upid, struct waitid_info *infop, 15718c2ecf20Sopenharmony_ci int options, struct rusage *ru) 15728c2ecf20Sopenharmony_ci{ 15738c2ecf20Sopenharmony_ci struct wait_opts wo; 15748c2ecf20Sopenharmony_ci struct pid *pid = NULL; 15758c2ecf20Sopenharmony_ci enum pid_type type; 15768c2ecf20Sopenharmony_ci long ret; 15778c2ecf20Sopenharmony_ci unsigned int f_flags = 0; 15788c2ecf20Sopenharmony_ci 15798c2ecf20Sopenharmony_ci if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED| 15808c2ecf20Sopenharmony_ci __WNOTHREAD|__WCLONE|__WALL)) 15818c2ecf20Sopenharmony_ci return -EINVAL; 15828c2ecf20Sopenharmony_ci if (!(options & (WEXITED|WSTOPPED|WCONTINUED))) 15838c2ecf20Sopenharmony_ci return -EINVAL; 15848c2ecf20Sopenharmony_ci 15858c2ecf20Sopenharmony_ci switch (which) { 15868c2ecf20Sopenharmony_ci case P_ALL: 15878c2ecf20Sopenharmony_ci type = PIDTYPE_MAX; 15888c2ecf20Sopenharmony_ci break; 15898c2ecf20Sopenharmony_ci case P_PID: 15908c2ecf20Sopenharmony_ci type = PIDTYPE_PID; 15918c2ecf20Sopenharmony_ci if (upid <= 0) 15928c2ecf20Sopenharmony_ci return -EINVAL; 15938c2ecf20Sopenharmony_ci 15948c2ecf20Sopenharmony_ci pid = find_get_pid(upid); 15958c2ecf20Sopenharmony_ci break; 15968c2ecf20Sopenharmony_ci case P_PGID: 15978c2ecf20Sopenharmony_ci type = PIDTYPE_PGID; 15988c2ecf20Sopenharmony_ci if (upid < 0) 15998c2ecf20Sopenharmony_ci return -EINVAL; 16008c2ecf20Sopenharmony_ci 16018c2ecf20Sopenharmony_ci if (upid) 16028c2ecf20Sopenharmony_ci pid = find_get_pid(upid); 16038c2ecf20Sopenharmony_ci else 16048c2ecf20Sopenharmony_ci pid = get_task_pid(current, PIDTYPE_PGID); 16058c2ecf20Sopenharmony_ci break; 16068c2ecf20Sopenharmony_ci case P_PIDFD: 16078c2ecf20Sopenharmony_ci type = PIDTYPE_PID; 16088c2ecf20Sopenharmony_ci if (upid < 0) 16098c2ecf20Sopenharmony_ci return -EINVAL; 16108c2ecf20Sopenharmony_ci 16118c2ecf20Sopenharmony_ci pid = pidfd_get_pid(upid, &f_flags); 16128c2ecf20Sopenharmony_ci if (IS_ERR(pid)) 16138c2ecf20Sopenharmony_ci return PTR_ERR(pid); 16148c2ecf20Sopenharmony_ci 16158c2ecf20Sopenharmony_ci break; 16168c2ecf20Sopenharmony_ci default: 16178c2ecf20Sopenharmony_ci return -EINVAL; 16188c2ecf20Sopenharmony_ci } 16198c2ecf20Sopenharmony_ci 16208c2ecf20Sopenharmony_ci wo.wo_type = type; 16218c2ecf20Sopenharmony_ci wo.wo_pid = pid; 16228c2ecf20Sopenharmony_ci wo.wo_flags = options; 16238c2ecf20Sopenharmony_ci wo.wo_info = infop; 16248c2ecf20Sopenharmony_ci wo.wo_rusage = ru; 16258c2ecf20Sopenharmony_ci if (f_flags & O_NONBLOCK) 16268c2ecf20Sopenharmony_ci wo.wo_flags |= WNOHANG; 16278c2ecf20Sopenharmony_ci 16288c2ecf20Sopenharmony_ci ret = do_wait(&wo); 16298c2ecf20Sopenharmony_ci if (!ret && !(options & WNOHANG) && (f_flags & O_NONBLOCK)) 16308c2ecf20Sopenharmony_ci ret = -EAGAIN; 16318c2ecf20Sopenharmony_ci 16328c2ecf20Sopenharmony_ci put_pid(pid); 16338c2ecf20Sopenharmony_ci return ret; 16348c2ecf20Sopenharmony_ci} 16358c2ecf20Sopenharmony_ci 16368c2ecf20Sopenharmony_ciSYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *, 16378c2ecf20Sopenharmony_ci infop, int, options, struct rusage __user *, ru) 16388c2ecf20Sopenharmony_ci{ 16398c2ecf20Sopenharmony_ci struct rusage r; 16408c2ecf20Sopenharmony_ci struct waitid_info info = {.status = 0}; 16418c2ecf20Sopenharmony_ci long err = kernel_waitid(which, upid, &info, options, ru ? &r : NULL); 16428c2ecf20Sopenharmony_ci int signo = 0; 16438c2ecf20Sopenharmony_ci 16448c2ecf20Sopenharmony_ci if (err > 0) { 16458c2ecf20Sopenharmony_ci signo = SIGCHLD; 16468c2ecf20Sopenharmony_ci err = 0; 16478c2ecf20Sopenharmony_ci if (ru && copy_to_user(ru, &r, sizeof(struct rusage))) 16488c2ecf20Sopenharmony_ci return -EFAULT; 16498c2ecf20Sopenharmony_ci } 16508c2ecf20Sopenharmony_ci if (!infop) 16518c2ecf20Sopenharmony_ci return err; 16528c2ecf20Sopenharmony_ci 16538c2ecf20Sopenharmony_ci if (!user_write_access_begin(infop, sizeof(*infop))) 16548c2ecf20Sopenharmony_ci return -EFAULT; 16558c2ecf20Sopenharmony_ci 16568c2ecf20Sopenharmony_ci unsafe_put_user(signo, &infop->si_signo, Efault); 16578c2ecf20Sopenharmony_ci unsafe_put_user(0, &infop->si_errno, Efault); 16588c2ecf20Sopenharmony_ci unsafe_put_user(info.cause, &infop->si_code, Efault); 16598c2ecf20Sopenharmony_ci unsafe_put_user(info.pid, &infop->si_pid, Efault); 16608c2ecf20Sopenharmony_ci unsafe_put_user(info.uid, &infop->si_uid, Efault); 16618c2ecf20Sopenharmony_ci unsafe_put_user(info.status, &infop->si_status, Efault); 16628c2ecf20Sopenharmony_ci user_write_access_end(); 16638c2ecf20Sopenharmony_ci return err; 16648c2ecf20Sopenharmony_ciEfault: 16658c2ecf20Sopenharmony_ci user_write_access_end(); 16668c2ecf20Sopenharmony_ci return -EFAULT; 16678c2ecf20Sopenharmony_ci} 16688c2ecf20Sopenharmony_ci 16698c2ecf20Sopenharmony_cilong kernel_wait4(pid_t upid, int __user *stat_addr, int options, 16708c2ecf20Sopenharmony_ci struct rusage *ru) 16718c2ecf20Sopenharmony_ci{ 16728c2ecf20Sopenharmony_ci struct wait_opts wo; 16738c2ecf20Sopenharmony_ci struct pid *pid = NULL; 16748c2ecf20Sopenharmony_ci enum pid_type type; 16758c2ecf20Sopenharmony_ci long ret; 16768c2ecf20Sopenharmony_ci 16778c2ecf20Sopenharmony_ci if (options & ~(WNOHANG|WUNTRACED|WCONTINUED| 16788c2ecf20Sopenharmony_ci __WNOTHREAD|__WCLONE|__WALL)) 16798c2ecf20Sopenharmony_ci return -EINVAL; 16808c2ecf20Sopenharmony_ci 16818c2ecf20Sopenharmony_ci /* -INT_MIN is not defined */ 16828c2ecf20Sopenharmony_ci if (upid == INT_MIN) 16838c2ecf20Sopenharmony_ci return -ESRCH; 16848c2ecf20Sopenharmony_ci 16858c2ecf20Sopenharmony_ci if (upid == -1) 16868c2ecf20Sopenharmony_ci type = PIDTYPE_MAX; 16878c2ecf20Sopenharmony_ci else if (upid < 0) { 16888c2ecf20Sopenharmony_ci type = PIDTYPE_PGID; 16898c2ecf20Sopenharmony_ci pid = find_get_pid(-upid); 16908c2ecf20Sopenharmony_ci } else if (upid == 0) { 16918c2ecf20Sopenharmony_ci type = PIDTYPE_PGID; 16928c2ecf20Sopenharmony_ci pid = get_task_pid(current, PIDTYPE_PGID); 16938c2ecf20Sopenharmony_ci } else /* upid > 0 */ { 16948c2ecf20Sopenharmony_ci type = PIDTYPE_PID; 16958c2ecf20Sopenharmony_ci pid = find_get_pid(upid); 16968c2ecf20Sopenharmony_ci } 16978c2ecf20Sopenharmony_ci 16988c2ecf20Sopenharmony_ci wo.wo_type = type; 16998c2ecf20Sopenharmony_ci wo.wo_pid = pid; 17008c2ecf20Sopenharmony_ci wo.wo_flags = options | WEXITED; 17018c2ecf20Sopenharmony_ci wo.wo_info = NULL; 17028c2ecf20Sopenharmony_ci wo.wo_stat = 0; 17038c2ecf20Sopenharmony_ci wo.wo_rusage = ru; 17048c2ecf20Sopenharmony_ci ret = do_wait(&wo); 17058c2ecf20Sopenharmony_ci put_pid(pid); 17068c2ecf20Sopenharmony_ci if (ret > 0 && stat_addr && put_user(wo.wo_stat, stat_addr)) 17078c2ecf20Sopenharmony_ci ret = -EFAULT; 17088c2ecf20Sopenharmony_ci 17098c2ecf20Sopenharmony_ci return ret; 17108c2ecf20Sopenharmony_ci} 17118c2ecf20Sopenharmony_ci 17128c2ecf20Sopenharmony_ciint kernel_wait(pid_t pid, int *stat) 17138c2ecf20Sopenharmony_ci{ 17148c2ecf20Sopenharmony_ci struct wait_opts wo = { 17158c2ecf20Sopenharmony_ci .wo_type = PIDTYPE_PID, 17168c2ecf20Sopenharmony_ci .wo_pid = find_get_pid(pid), 17178c2ecf20Sopenharmony_ci .wo_flags = WEXITED, 17188c2ecf20Sopenharmony_ci }; 17198c2ecf20Sopenharmony_ci int ret; 17208c2ecf20Sopenharmony_ci 17218c2ecf20Sopenharmony_ci ret = do_wait(&wo); 17228c2ecf20Sopenharmony_ci if (ret > 0 && wo.wo_stat) 17238c2ecf20Sopenharmony_ci *stat = wo.wo_stat; 17248c2ecf20Sopenharmony_ci put_pid(wo.wo_pid); 17258c2ecf20Sopenharmony_ci return ret; 17268c2ecf20Sopenharmony_ci} 17278c2ecf20Sopenharmony_ci 17288c2ecf20Sopenharmony_ciSYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr, 17298c2ecf20Sopenharmony_ci int, options, struct rusage __user *, ru) 17308c2ecf20Sopenharmony_ci{ 17318c2ecf20Sopenharmony_ci struct rusage r; 17328c2ecf20Sopenharmony_ci long err = kernel_wait4(upid, stat_addr, options, ru ? &r : NULL); 17338c2ecf20Sopenharmony_ci 17348c2ecf20Sopenharmony_ci if (err > 0) { 17358c2ecf20Sopenharmony_ci if (ru && copy_to_user(ru, &r, sizeof(struct rusage))) 17368c2ecf20Sopenharmony_ci return -EFAULT; 17378c2ecf20Sopenharmony_ci } 17388c2ecf20Sopenharmony_ci return err; 17398c2ecf20Sopenharmony_ci} 17408c2ecf20Sopenharmony_ci 17418c2ecf20Sopenharmony_ci#ifdef __ARCH_WANT_SYS_WAITPID 17428c2ecf20Sopenharmony_ci 17438c2ecf20Sopenharmony_ci/* 17448c2ecf20Sopenharmony_ci * sys_waitpid() remains for compatibility. waitpid() should be 17458c2ecf20Sopenharmony_ci * implemented by calling sys_wait4() from libc.a. 17468c2ecf20Sopenharmony_ci */ 17478c2ecf20Sopenharmony_ciSYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options) 17488c2ecf20Sopenharmony_ci{ 17498c2ecf20Sopenharmony_ci return kernel_wait4(pid, stat_addr, options, NULL); 17508c2ecf20Sopenharmony_ci} 17518c2ecf20Sopenharmony_ci 17528c2ecf20Sopenharmony_ci#endif 17538c2ecf20Sopenharmony_ci 17548c2ecf20Sopenharmony_ci#ifdef CONFIG_COMPAT 17558c2ecf20Sopenharmony_ciCOMPAT_SYSCALL_DEFINE4(wait4, 17568c2ecf20Sopenharmony_ci compat_pid_t, pid, 17578c2ecf20Sopenharmony_ci compat_uint_t __user *, stat_addr, 17588c2ecf20Sopenharmony_ci int, options, 17598c2ecf20Sopenharmony_ci struct compat_rusage __user *, ru) 17608c2ecf20Sopenharmony_ci{ 17618c2ecf20Sopenharmony_ci struct rusage r; 17628c2ecf20Sopenharmony_ci long err = kernel_wait4(pid, stat_addr, options, ru ? &r : NULL); 17638c2ecf20Sopenharmony_ci if (err > 0) { 17648c2ecf20Sopenharmony_ci if (ru && put_compat_rusage(&r, ru)) 17658c2ecf20Sopenharmony_ci return -EFAULT; 17668c2ecf20Sopenharmony_ci } 17678c2ecf20Sopenharmony_ci return err; 17688c2ecf20Sopenharmony_ci} 17698c2ecf20Sopenharmony_ci 17708c2ecf20Sopenharmony_ciCOMPAT_SYSCALL_DEFINE5(waitid, 17718c2ecf20Sopenharmony_ci int, which, compat_pid_t, pid, 17728c2ecf20Sopenharmony_ci struct compat_siginfo __user *, infop, int, options, 17738c2ecf20Sopenharmony_ci struct compat_rusage __user *, uru) 17748c2ecf20Sopenharmony_ci{ 17758c2ecf20Sopenharmony_ci struct rusage ru; 17768c2ecf20Sopenharmony_ci struct waitid_info info = {.status = 0}; 17778c2ecf20Sopenharmony_ci long err = kernel_waitid(which, pid, &info, options, uru ? &ru : NULL); 17788c2ecf20Sopenharmony_ci int signo = 0; 17798c2ecf20Sopenharmony_ci if (err > 0) { 17808c2ecf20Sopenharmony_ci signo = SIGCHLD; 17818c2ecf20Sopenharmony_ci err = 0; 17828c2ecf20Sopenharmony_ci if (uru) { 17838c2ecf20Sopenharmony_ci /* kernel_waitid() overwrites everything in ru */ 17848c2ecf20Sopenharmony_ci if (COMPAT_USE_64BIT_TIME) 17858c2ecf20Sopenharmony_ci err = copy_to_user(uru, &ru, sizeof(ru)); 17868c2ecf20Sopenharmony_ci else 17878c2ecf20Sopenharmony_ci err = put_compat_rusage(&ru, uru); 17888c2ecf20Sopenharmony_ci if (err) 17898c2ecf20Sopenharmony_ci return -EFAULT; 17908c2ecf20Sopenharmony_ci } 17918c2ecf20Sopenharmony_ci } 17928c2ecf20Sopenharmony_ci 17938c2ecf20Sopenharmony_ci if (!infop) 17948c2ecf20Sopenharmony_ci return err; 17958c2ecf20Sopenharmony_ci 17968c2ecf20Sopenharmony_ci if (!user_write_access_begin(infop, sizeof(*infop))) 17978c2ecf20Sopenharmony_ci return -EFAULT; 17988c2ecf20Sopenharmony_ci 17998c2ecf20Sopenharmony_ci unsafe_put_user(signo, &infop->si_signo, Efault); 18008c2ecf20Sopenharmony_ci unsafe_put_user(0, &infop->si_errno, Efault); 18018c2ecf20Sopenharmony_ci unsafe_put_user(info.cause, &infop->si_code, Efault); 18028c2ecf20Sopenharmony_ci unsafe_put_user(info.pid, &infop->si_pid, Efault); 18038c2ecf20Sopenharmony_ci unsafe_put_user(info.uid, &infop->si_uid, Efault); 18048c2ecf20Sopenharmony_ci unsafe_put_user(info.status, &infop->si_status, Efault); 18058c2ecf20Sopenharmony_ci user_write_access_end(); 18068c2ecf20Sopenharmony_ci return err; 18078c2ecf20Sopenharmony_ciEfault: 18088c2ecf20Sopenharmony_ci user_write_access_end(); 18098c2ecf20Sopenharmony_ci return -EFAULT; 18108c2ecf20Sopenharmony_ci} 18118c2ecf20Sopenharmony_ci#endif 18128c2ecf20Sopenharmony_ci 18138c2ecf20Sopenharmony_ci/** 18148c2ecf20Sopenharmony_ci * thread_group_exited - check that a thread group has exited 18158c2ecf20Sopenharmony_ci * @pid: tgid of thread group to be checked. 18168c2ecf20Sopenharmony_ci * 18178c2ecf20Sopenharmony_ci * Test if the thread group represented by tgid has exited (all 18188c2ecf20Sopenharmony_ci * threads are zombies, dead or completely gone). 18198c2ecf20Sopenharmony_ci * 18208c2ecf20Sopenharmony_ci * Return: true if the thread group has exited. false otherwise. 18218c2ecf20Sopenharmony_ci */ 18228c2ecf20Sopenharmony_cibool thread_group_exited(struct pid *pid) 18238c2ecf20Sopenharmony_ci{ 18248c2ecf20Sopenharmony_ci struct task_struct *task; 18258c2ecf20Sopenharmony_ci bool exited; 18268c2ecf20Sopenharmony_ci 18278c2ecf20Sopenharmony_ci rcu_read_lock(); 18288c2ecf20Sopenharmony_ci task = pid_task(pid, PIDTYPE_PID); 18298c2ecf20Sopenharmony_ci exited = !task || 18308c2ecf20Sopenharmony_ci (READ_ONCE(task->exit_state) && thread_group_empty(task)); 18318c2ecf20Sopenharmony_ci rcu_read_unlock(); 18328c2ecf20Sopenharmony_ci 18338c2ecf20Sopenharmony_ci return exited; 18348c2ecf20Sopenharmony_ci} 18358c2ecf20Sopenharmony_ciEXPORT_SYMBOL(thread_group_exited); 18368c2ecf20Sopenharmony_ci 18378c2ecf20Sopenharmony_ci__weak void abort(void) 18388c2ecf20Sopenharmony_ci{ 18398c2ecf20Sopenharmony_ci BUG(); 18408c2ecf20Sopenharmony_ci 18418c2ecf20Sopenharmony_ci /* if that doesn't kill us, halt */ 18428c2ecf20Sopenharmony_ci panic("Oops failed to kill thread"); 18438c2ecf20Sopenharmony_ci} 18448c2ecf20Sopenharmony_ciEXPORT_SYMBOL(abort); 1845