18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-only 28c2ecf20Sopenharmony_ci/* Kernel thread helper functions. 38c2ecf20Sopenharmony_ci * Copyright (C) 2004 IBM Corporation, Rusty Russell. 48c2ecf20Sopenharmony_ci * Copyright (C) 2009 Red Hat, Inc. 58c2ecf20Sopenharmony_ci * 68c2ecf20Sopenharmony_ci * Creation is done via kthreadd, so that we get a clean environment 78c2ecf20Sopenharmony_ci * even if we're invoked from userspace (think modprobe, hotplug cpu, 88c2ecf20Sopenharmony_ci * etc.). 98c2ecf20Sopenharmony_ci */ 108c2ecf20Sopenharmony_ci#include <uapi/linux/sched/types.h> 118c2ecf20Sopenharmony_ci#include <linux/mm.h> 128c2ecf20Sopenharmony_ci#include <linux/mmu_context.h> 138c2ecf20Sopenharmony_ci#include <linux/sched.h> 148c2ecf20Sopenharmony_ci#include <linux/sched/mm.h> 158c2ecf20Sopenharmony_ci#include <linux/sched/task.h> 168c2ecf20Sopenharmony_ci#include <linux/kthread.h> 178c2ecf20Sopenharmony_ci#include <linux/completion.h> 188c2ecf20Sopenharmony_ci#include <linux/err.h> 198c2ecf20Sopenharmony_ci#include <linux/cgroup.h> 208c2ecf20Sopenharmony_ci#include <linux/cpuset.h> 218c2ecf20Sopenharmony_ci#include <linux/unistd.h> 228c2ecf20Sopenharmony_ci#include <linux/file.h> 238c2ecf20Sopenharmony_ci#include <linux/export.h> 248c2ecf20Sopenharmony_ci#include <linux/mutex.h> 258c2ecf20Sopenharmony_ci#include <linux/slab.h> 268c2ecf20Sopenharmony_ci#include <linux/freezer.h> 278c2ecf20Sopenharmony_ci#include <linux/ptrace.h> 288c2ecf20Sopenharmony_ci#include <linux/uaccess.h> 298c2ecf20Sopenharmony_ci#include <linux/numa.h> 308c2ecf20Sopenharmony_ci#include <linux/sched/isolation.h> 318c2ecf20Sopenharmony_ci#include <trace/events/sched.h> 328c2ecf20Sopenharmony_ci 338c2ecf20Sopenharmony_ci 348c2ecf20Sopenharmony_cistatic DEFINE_SPINLOCK(kthread_create_lock); 358c2ecf20Sopenharmony_cistatic LIST_HEAD(kthread_create_list); 368c2ecf20Sopenharmony_cistruct task_struct *kthreadd_task; 378c2ecf20Sopenharmony_ci 388c2ecf20Sopenharmony_cistruct kthread_create_info 398c2ecf20Sopenharmony_ci{ 408c2ecf20Sopenharmony_ci /* Information passed to kthread() from kthreadd. */ 418c2ecf20Sopenharmony_ci int (*threadfn)(void *data); 428c2ecf20Sopenharmony_ci void *data; 438c2ecf20Sopenharmony_ci int node; 448c2ecf20Sopenharmony_ci 458c2ecf20Sopenharmony_ci /* Result passed back to kthread_create() from kthreadd. */ 468c2ecf20Sopenharmony_ci struct task_struct *result; 478c2ecf20Sopenharmony_ci struct completion *done; 488c2ecf20Sopenharmony_ci 498c2ecf20Sopenharmony_ci struct list_head list; 508c2ecf20Sopenharmony_ci}; 518c2ecf20Sopenharmony_ci 528c2ecf20Sopenharmony_cistruct kthread { 538c2ecf20Sopenharmony_ci unsigned long flags; 548c2ecf20Sopenharmony_ci unsigned int cpu; 558c2ecf20Sopenharmony_ci int (*threadfn)(void *); 568c2ecf20Sopenharmony_ci void *data; 578c2ecf20Sopenharmony_ci mm_segment_t oldfs; 588c2ecf20Sopenharmony_ci struct completion parked; 598c2ecf20Sopenharmony_ci struct completion exited; 608c2ecf20Sopenharmony_ci#ifdef CONFIG_BLK_CGROUP 618c2ecf20Sopenharmony_ci struct cgroup_subsys_state *blkcg_css; 628c2ecf20Sopenharmony_ci#endif 638c2ecf20Sopenharmony_ci}; 648c2ecf20Sopenharmony_ci 658c2ecf20Sopenharmony_cienum KTHREAD_BITS { 668c2ecf20Sopenharmony_ci KTHREAD_IS_PER_CPU = 0, 678c2ecf20Sopenharmony_ci KTHREAD_SHOULD_STOP, 688c2ecf20Sopenharmony_ci KTHREAD_SHOULD_PARK, 698c2ecf20Sopenharmony_ci}; 708c2ecf20Sopenharmony_ci 718c2ecf20Sopenharmony_cistatic inline void set_kthread_struct(void *kthread) 728c2ecf20Sopenharmony_ci{ 738c2ecf20Sopenharmony_ci /* 748c2ecf20Sopenharmony_ci * We abuse ->set_child_tid to avoid the new member and because it 758c2ecf20Sopenharmony_ci * can't be wrongly copied by copy_process(). We also rely on fact 768c2ecf20Sopenharmony_ci * that the caller can't exec, so PF_KTHREAD can't be cleared. 778c2ecf20Sopenharmony_ci */ 788c2ecf20Sopenharmony_ci current->set_child_tid = (__force void __user *)kthread; 798c2ecf20Sopenharmony_ci} 808c2ecf20Sopenharmony_ci 818c2ecf20Sopenharmony_cistatic inline struct kthread *to_kthread(struct task_struct *k) 828c2ecf20Sopenharmony_ci{ 838c2ecf20Sopenharmony_ci WARN_ON(!(k->flags & PF_KTHREAD)); 848c2ecf20Sopenharmony_ci return (__force void *)k->set_child_tid; 858c2ecf20Sopenharmony_ci} 868c2ecf20Sopenharmony_ci 878c2ecf20Sopenharmony_ci/* 888c2ecf20Sopenharmony_ci * Variant of to_kthread() that doesn't assume @p is a kthread. 898c2ecf20Sopenharmony_ci * 908c2ecf20Sopenharmony_ci * Per construction; when: 918c2ecf20Sopenharmony_ci * 928c2ecf20Sopenharmony_ci * (p->flags & PF_KTHREAD) && p->set_child_tid 938c2ecf20Sopenharmony_ci * 948c2ecf20Sopenharmony_ci * the task is both a kthread and struct kthread is persistent. However 958c2ecf20Sopenharmony_ci * PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and 968c2ecf20Sopenharmony_ci * begin_new_exec()). 978c2ecf20Sopenharmony_ci */ 988c2ecf20Sopenharmony_cistatic inline struct kthread *__to_kthread(struct task_struct *p) 998c2ecf20Sopenharmony_ci{ 1008c2ecf20Sopenharmony_ci void *kthread = (__force void *)p->set_child_tid; 1018c2ecf20Sopenharmony_ci if (kthread && !(p->flags & PF_KTHREAD)) 1028c2ecf20Sopenharmony_ci kthread = NULL; 1038c2ecf20Sopenharmony_ci return kthread; 1048c2ecf20Sopenharmony_ci} 1058c2ecf20Sopenharmony_ci 1068c2ecf20Sopenharmony_civoid free_kthread_struct(struct task_struct *k) 1078c2ecf20Sopenharmony_ci{ 1088c2ecf20Sopenharmony_ci struct kthread *kthread; 1098c2ecf20Sopenharmony_ci 1108c2ecf20Sopenharmony_ci /* 1118c2ecf20Sopenharmony_ci * Can be NULL if this kthread was created by kernel_thread() 1128c2ecf20Sopenharmony_ci * or if kmalloc() in kthread() failed. 1138c2ecf20Sopenharmony_ci */ 1148c2ecf20Sopenharmony_ci kthread = to_kthread(k); 1158c2ecf20Sopenharmony_ci#ifdef CONFIG_BLK_CGROUP 1168c2ecf20Sopenharmony_ci WARN_ON_ONCE(kthread && kthread->blkcg_css); 1178c2ecf20Sopenharmony_ci#endif 1188c2ecf20Sopenharmony_ci kfree(kthread); 1198c2ecf20Sopenharmony_ci} 1208c2ecf20Sopenharmony_ci 1218c2ecf20Sopenharmony_ci/** 1228c2ecf20Sopenharmony_ci * kthread_should_stop - should this kthread return now? 1238c2ecf20Sopenharmony_ci * 1248c2ecf20Sopenharmony_ci * When someone calls kthread_stop() on your kthread, it will be woken 1258c2ecf20Sopenharmony_ci * and this will return true. You should then return, and your return 1268c2ecf20Sopenharmony_ci * value will be passed through to kthread_stop(). 1278c2ecf20Sopenharmony_ci */ 1288c2ecf20Sopenharmony_cibool kthread_should_stop(void) 1298c2ecf20Sopenharmony_ci{ 1308c2ecf20Sopenharmony_ci return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags); 1318c2ecf20Sopenharmony_ci} 1328c2ecf20Sopenharmony_ciEXPORT_SYMBOL(kthread_should_stop); 1338c2ecf20Sopenharmony_ci 1348c2ecf20Sopenharmony_cibool __kthread_should_park(struct task_struct *k) 1358c2ecf20Sopenharmony_ci{ 1368c2ecf20Sopenharmony_ci return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags); 1378c2ecf20Sopenharmony_ci} 1388c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(__kthread_should_park); 1398c2ecf20Sopenharmony_ci 1408c2ecf20Sopenharmony_ci/** 1418c2ecf20Sopenharmony_ci * kthread_should_park - should this kthread park now? 1428c2ecf20Sopenharmony_ci * 1438c2ecf20Sopenharmony_ci * When someone calls kthread_park() on your kthread, it will be woken 1448c2ecf20Sopenharmony_ci * and this will return true. You should then do the necessary 1458c2ecf20Sopenharmony_ci * cleanup and call kthread_parkme() 1468c2ecf20Sopenharmony_ci * 1478c2ecf20Sopenharmony_ci * Similar to kthread_should_stop(), but this keeps the thread alive 1488c2ecf20Sopenharmony_ci * and in a park position. kthread_unpark() "restarts" the thread and 1498c2ecf20Sopenharmony_ci * calls the thread function again. 1508c2ecf20Sopenharmony_ci */ 1518c2ecf20Sopenharmony_cibool kthread_should_park(void) 1528c2ecf20Sopenharmony_ci{ 1538c2ecf20Sopenharmony_ci return __kthread_should_park(current); 1548c2ecf20Sopenharmony_ci} 1558c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(kthread_should_park); 1568c2ecf20Sopenharmony_ci 1578c2ecf20Sopenharmony_ci/** 1588c2ecf20Sopenharmony_ci * kthread_freezable_should_stop - should this freezable kthread return now? 1598c2ecf20Sopenharmony_ci * @was_frozen: optional out parameter, indicates whether %current was frozen 1608c2ecf20Sopenharmony_ci * 1618c2ecf20Sopenharmony_ci * kthread_should_stop() for freezable kthreads, which will enter 1628c2ecf20Sopenharmony_ci * refrigerator if necessary. This function is safe from kthread_stop() / 1638c2ecf20Sopenharmony_ci * freezer deadlock and freezable kthreads should use this function instead 1648c2ecf20Sopenharmony_ci * of calling try_to_freeze() directly. 1658c2ecf20Sopenharmony_ci */ 1668c2ecf20Sopenharmony_cibool kthread_freezable_should_stop(bool *was_frozen) 1678c2ecf20Sopenharmony_ci{ 1688c2ecf20Sopenharmony_ci bool frozen = false; 1698c2ecf20Sopenharmony_ci 1708c2ecf20Sopenharmony_ci might_sleep(); 1718c2ecf20Sopenharmony_ci 1728c2ecf20Sopenharmony_ci if (unlikely(freezing(current))) 1738c2ecf20Sopenharmony_ci frozen = __refrigerator(true); 1748c2ecf20Sopenharmony_ci 1758c2ecf20Sopenharmony_ci if (was_frozen) 1768c2ecf20Sopenharmony_ci *was_frozen = frozen; 1778c2ecf20Sopenharmony_ci 1788c2ecf20Sopenharmony_ci return kthread_should_stop(); 1798c2ecf20Sopenharmony_ci} 1808c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(kthread_freezable_should_stop); 1818c2ecf20Sopenharmony_ci 1828c2ecf20Sopenharmony_ci/** 1838c2ecf20Sopenharmony_ci * kthread_func - return the function specified on kthread creation 1848c2ecf20Sopenharmony_ci * @task: kthread task in question 1858c2ecf20Sopenharmony_ci * 1868c2ecf20Sopenharmony_ci * Returns NULL if the task is not a kthread. 1878c2ecf20Sopenharmony_ci */ 1888c2ecf20Sopenharmony_civoid *kthread_func(struct task_struct *task) 1898c2ecf20Sopenharmony_ci{ 1908c2ecf20Sopenharmony_ci struct kthread *kthread = __to_kthread(task); 1918c2ecf20Sopenharmony_ci if (kthread) 1928c2ecf20Sopenharmony_ci return kthread->threadfn; 1938c2ecf20Sopenharmony_ci return NULL; 1948c2ecf20Sopenharmony_ci} 1958c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(kthread_func); 1968c2ecf20Sopenharmony_ci 1978c2ecf20Sopenharmony_ci/** 1988c2ecf20Sopenharmony_ci * kthread_data - return data value specified on kthread creation 1998c2ecf20Sopenharmony_ci * @task: kthread task in question 2008c2ecf20Sopenharmony_ci * 2018c2ecf20Sopenharmony_ci * Return the data value specified when kthread @task was created. 2028c2ecf20Sopenharmony_ci * The caller is responsible for ensuring the validity of @task when 2038c2ecf20Sopenharmony_ci * calling this function. 2048c2ecf20Sopenharmony_ci */ 2058c2ecf20Sopenharmony_civoid *kthread_data(struct task_struct *task) 2068c2ecf20Sopenharmony_ci{ 2078c2ecf20Sopenharmony_ci return to_kthread(task)->data; 2088c2ecf20Sopenharmony_ci} 2098c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(kthread_data); 2108c2ecf20Sopenharmony_ci 2118c2ecf20Sopenharmony_ci/** 2128c2ecf20Sopenharmony_ci * kthread_probe_data - speculative version of kthread_data() 2138c2ecf20Sopenharmony_ci * @task: possible kthread task in question 2148c2ecf20Sopenharmony_ci * 2158c2ecf20Sopenharmony_ci * @task could be a kthread task. Return the data value specified when it 2168c2ecf20Sopenharmony_ci * was created if accessible. If @task isn't a kthread task or its data is 2178c2ecf20Sopenharmony_ci * inaccessible for any reason, %NULL is returned. This function requires 2188c2ecf20Sopenharmony_ci * that @task itself is safe to dereference. 2198c2ecf20Sopenharmony_ci */ 2208c2ecf20Sopenharmony_civoid *kthread_probe_data(struct task_struct *task) 2218c2ecf20Sopenharmony_ci{ 2228c2ecf20Sopenharmony_ci struct kthread *kthread = __to_kthread(task); 2238c2ecf20Sopenharmony_ci void *data = NULL; 2248c2ecf20Sopenharmony_ci 2258c2ecf20Sopenharmony_ci if (kthread) 2268c2ecf20Sopenharmony_ci copy_from_kernel_nofault(&data, &kthread->data, sizeof(data)); 2278c2ecf20Sopenharmony_ci return data; 2288c2ecf20Sopenharmony_ci} 2298c2ecf20Sopenharmony_ci 2308c2ecf20Sopenharmony_cistatic void __kthread_parkme(struct kthread *self) 2318c2ecf20Sopenharmony_ci{ 2328c2ecf20Sopenharmony_ci for (;;) { 2338c2ecf20Sopenharmony_ci /* 2348c2ecf20Sopenharmony_ci * TASK_PARKED is a special state; we must serialize against 2358c2ecf20Sopenharmony_ci * possible pending wakeups to avoid store-store collisions on 2368c2ecf20Sopenharmony_ci * task->state. 2378c2ecf20Sopenharmony_ci * 2388c2ecf20Sopenharmony_ci * Such a collision might possibly result in the task state 2398c2ecf20Sopenharmony_ci * changin from TASK_PARKED and us failing the 2408c2ecf20Sopenharmony_ci * wait_task_inactive() in kthread_park(). 2418c2ecf20Sopenharmony_ci */ 2428c2ecf20Sopenharmony_ci set_special_state(TASK_PARKED); 2438c2ecf20Sopenharmony_ci if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags)) 2448c2ecf20Sopenharmony_ci break; 2458c2ecf20Sopenharmony_ci 2468c2ecf20Sopenharmony_ci /* 2478c2ecf20Sopenharmony_ci * Thread is going to call schedule(), do not preempt it, 2488c2ecf20Sopenharmony_ci * or the caller of kthread_park() may spend more time in 2498c2ecf20Sopenharmony_ci * wait_task_inactive(). 2508c2ecf20Sopenharmony_ci */ 2518c2ecf20Sopenharmony_ci preempt_disable(); 2528c2ecf20Sopenharmony_ci complete(&self->parked); 2538c2ecf20Sopenharmony_ci schedule_preempt_disabled(); 2548c2ecf20Sopenharmony_ci preempt_enable(); 2558c2ecf20Sopenharmony_ci } 2568c2ecf20Sopenharmony_ci __set_current_state(TASK_RUNNING); 2578c2ecf20Sopenharmony_ci} 2588c2ecf20Sopenharmony_ci 2598c2ecf20Sopenharmony_civoid kthread_parkme(void) 2608c2ecf20Sopenharmony_ci{ 2618c2ecf20Sopenharmony_ci __kthread_parkme(to_kthread(current)); 2628c2ecf20Sopenharmony_ci} 2638c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(kthread_parkme); 2648c2ecf20Sopenharmony_ci 2658c2ecf20Sopenharmony_cistatic int kthread(void *_create) 2668c2ecf20Sopenharmony_ci{ 2678c2ecf20Sopenharmony_ci /* Copy data: it's on kthread's stack */ 2688c2ecf20Sopenharmony_ci struct kthread_create_info *create = _create; 2698c2ecf20Sopenharmony_ci int (*threadfn)(void *data) = create->threadfn; 2708c2ecf20Sopenharmony_ci void *data = create->data; 2718c2ecf20Sopenharmony_ci struct completion *done; 2728c2ecf20Sopenharmony_ci struct kthread *self; 2738c2ecf20Sopenharmony_ci int ret; 2748c2ecf20Sopenharmony_ci 2758c2ecf20Sopenharmony_ci self = kzalloc(sizeof(*self), GFP_KERNEL); 2768c2ecf20Sopenharmony_ci set_kthread_struct(self); 2778c2ecf20Sopenharmony_ci 2788c2ecf20Sopenharmony_ci /* If user was SIGKILLed, I release the structure. */ 2798c2ecf20Sopenharmony_ci done = xchg(&create->done, NULL); 2808c2ecf20Sopenharmony_ci if (!done) { 2818c2ecf20Sopenharmony_ci kfree(create); 2828c2ecf20Sopenharmony_ci do_exit(-EINTR); 2838c2ecf20Sopenharmony_ci } 2848c2ecf20Sopenharmony_ci 2858c2ecf20Sopenharmony_ci if (!self) { 2868c2ecf20Sopenharmony_ci create->result = ERR_PTR(-ENOMEM); 2878c2ecf20Sopenharmony_ci complete(done); 2888c2ecf20Sopenharmony_ci do_exit(-ENOMEM); 2898c2ecf20Sopenharmony_ci } 2908c2ecf20Sopenharmony_ci 2918c2ecf20Sopenharmony_ci self->threadfn = threadfn; 2928c2ecf20Sopenharmony_ci self->data = data; 2938c2ecf20Sopenharmony_ci init_completion(&self->exited); 2948c2ecf20Sopenharmony_ci init_completion(&self->parked); 2958c2ecf20Sopenharmony_ci current->vfork_done = &self->exited; 2968c2ecf20Sopenharmony_ci 2978c2ecf20Sopenharmony_ci /* OK, tell user we're spawned, wait for stop or wakeup */ 2988c2ecf20Sopenharmony_ci __set_current_state(TASK_UNINTERRUPTIBLE); 2998c2ecf20Sopenharmony_ci create->result = current; 3008c2ecf20Sopenharmony_ci /* 3018c2ecf20Sopenharmony_ci * Thread is going to call schedule(), do not preempt it, 3028c2ecf20Sopenharmony_ci * or the creator may spend more time in wait_task_inactive(). 3038c2ecf20Sopenharmony_ci */ 3048c2ecf20Sopenharmony_ci preempt_disable(); 3058c2ecf20Sopenharmony_ci complete(done); 3068c2ecf20Sopenharmony_ci schedule_preempt_disabled(); 3078c2ecf20Sopenharmony_ci preempt_enable(); 3088c2ecf20Sopenharmony_ci 3098c2ecf20Sopenharmony_ci ret = -EINTR; 3108c2ecf20Sopenharmony_ci if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) { 3118c2ecf20Sopenharmony_ci cgroup_kthread_ready(); 3128c2ecf20Sopenharmony_ci __kthread_parkme(self); 3138c2ecf20Sopenharmony_ci ret = threadfn(data); 3148c2ecf20Sopenharmony_ci } 3158c2ecf20Sopenharmony_ci do_exit(ret); 3168c2ecf20Sopenharmony_ci} 3178c2ecf20Sopenharmony_ci 3188c2ecf20Sopenharmony_ci/* called from do_fork() to get node information for about to be created task */ 3198c2ecf20Sopenharmony_ciint tsk_fork_get_node(struct task_struct *tsk) 3208c2ecf20Sopenharmony_ci{ 3218c2ecf20Sopenharmony_ci#ifdef CONFIG_NUMA 3228c2ecf20Sopenharmony_ci if (tsk == kthreadd_task) 3238c2ecf20Sopenharmony_ci return tsk->pref_node_fork; 3248c2ecf20Sopenharmony_ci#endif 3258c2ecf20Sopenharmony_ci return NUMA_NO_NODE; 3268c2ecf20Sopenharmony_ci} 3278c2ecf20Sopenharmony_ci 3288c2ecf20Sopenharmony_cistatic void create_kthread(struct kthread_create_info *create) 3298c2ecf20Sopenharmony_ci{ 3308c2ecf20Sopenharmony_ci int pid; 3318c2ecf20Sopenharmony_ci 3328c2ecf20Sopenharmony_ci#ifdef CONFIG_NUMA 3338c2ecf20Sopenharmony_ci current->pref_node_fork = create->node; 3348c2ecf20Sopenharmony_ci#endif 3358c2ecf20Sopenharmony_ci /* We want our own signal handler (we take no signals by default). */ 3368c2ecf20Sopenharmony_ci pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD); 3378c2ecf20Sopenharmony_ci if (pid < 0) { 3388c2ecf20Sopenharmony_ci /* If user was SIGKILLed, I release the structure. */ 3398c2ecf20Sopenharmony_ci struct completion *done = xchg(&create->done, NULL); 3408c2ecf20Sopenharmony_ci 3418c2ecf20Sopenharmony_ci if (!done) { 3428c2ecf20Sopenharmony_ci kfree(create); 3438c2ecf20Sopenharmony_ci return; 3448c2ecf20Sopenharmony_ci } 3458c2ecf20Sopenharmony_ci create->result = ERR_PTR(pid); 3468c2ecf20Sopenharmony_ci complete(done); 3478c2ecf20Sopenharmony_ci } 3488c2ecf20Sopenharmony_ci} 3498c2ecf20Sopenharmony_ci 3508c2ecf20Sopenharmony_cistatic __printf(4, 0) 3518c2ecf20Sopenharmony_cistruct task_struct *__kthread_create_on_node(int (*threadfn)(void *data), 3528c2ecf20Sopenharmony_ci void *data, int node, 3538c2ecf20Sopenharmony_ci const char namefmt[], 3548c2ecf20Sopenharmony_ci va_list args) 3558c2ecf20Sopenharmony_ci{ 3568c2ecf20Sopenharmony_ci DECLARE_COMPLETION_ONSTACK(done); 3578c2ecf20Sopenharmony_ci struct task_struct *task; 3588c2ecf20Sopenharmony_ci struct kthread_create_info *create = kmalloc(sizeof(*create), 3598c2ecf20Sopenharmony_ci GFP_KERNEL); 3608c2ecf20Sopenharmony_ci 3618c2ecf20Sopenharmony_ci if (!create) 3628c2ecf20Sopenharmony_ci return ERR_PTR(-ENOMEM); 3638c2ecf20Sopenharmony_ci create->threadfn = threadfn; 3648c2ecf20Sopenharmony_ci create->data = data; 3658c2ecf20Sopenharmony_ci create->node = node; 3668c2ecf20Sopenharmony_ci create->done = &done; 3678c2ecf20Sopenharmony_ci 3688c2ecf20Sopenharmony_ci spin_lock(&kthread_create_lock); 3698c2ecf20Sopenharmony_ci list_add_tail(&create->list, &kthread_create_list); 3708c2ecf20Sopenharmony_ci spin_unlock(&kthread_create_lock); 3718c2ecf20Sopenharmony_ci 3728c2ecf20Sopenharmony_ci wake_up_process(kthreadd_task); 3738c2ecf20Sopenharmony_ci /* 3748c2ecf20Sopenharmony_ci * Wait for completion in killable state, for I might be chosen by 3758c2ecf20Sopenharmony_ci * the OOM killer while kthreadd is trying to allocate memory for 3768c2ecf20Sopenharmony_ci * new kernel thread. 3778c2ecf20Sopenharmony_ci */ 3788c2ecf20Sopenharmony_ci if (unlikely(wait_for_completion_killable(&done))) { 3798c2ecf20Sopenharmony_ci /* 3808c2ecf20Sopenharmony_ci * If I was SIGKILLed before kthreadd (or new kernel thread) 3818c2ecf20Sopenharmony_ci * calls complete(), leave the cleanup of this structure to 3828c2ecf20Sopenharmony_ci * that thread. 3838c2ecf20Sopenharmony_ci */ 3848c2ecf20Sopenharmony_ci if (xchg(&create->done, NULL)) 3858c2ecf20Sopenharmony_ci return ERR_PTR(-EINTR); 3868c2ecf20Sopenharmony_ci /* 3878c2ecf20Sopenharmony_ci * kthreadd (or new kernel thread) will call complete() 3888c2ecf20Sopenharmony_ci * shortly. 3898c2ecf20Sopenharmony_ci */ 3908c2ecf20Sopenharmony_ci wait_for_completion(&done); 3918c2ecf20Sopenharmony_ci } 3928c2ecf20Sopenharmony_ci task = create->result; 3938c2ecf20Sopenharmony_ci if (!IS_ERR(task)) { 3948c2ecf20Sopenharmony_ci static const struct sched_param param = { .sched_priority = 0 }; 3958c2ecf20Sopenharmony_ci char name[TASK_COMM_LEN]; 3968c2ecf20Sopenharmony_ci 3978c2ecf20Sopenharmony_ci /* 3988c2ecf20Sopenharmony_ci * task is already visible to other tasks, so updating 3998c2ecf20Sopenharmony_ci * COMM must be protected. 4008c2ecf20Sopenharmony_ci */ 4018c2ecf20Sopenharmony_ci vsnprintf(name, sizeof(name), namefmt, args); 4028c2ecf20Sopenharmony_ci set_task_comm(task, name); 4038c2ecf20Sopenharmony_ci /* 4048c2ecf20Sopenharmony_ci * root may have changed our (kthreadd's) priority or CPU mask. 4058c2ecf20Sopenharmony_ci * The kernel thread should not inherit these properties. 4068c2ecf20Sopenharmony_ci */ 4078c2ecf20Sopenharmony_ci sched_setscheduler_nocheck(task, SCHED_NORMAL, ¶m); 4088c2ecf20Sopenharmony_ci set_cpus_allowed_ptr(task, 4098c2ecf20Sopenharmony_ci housekeeping_cpumask(HK_FLAG_KTHREAD)); 4108c2ecf20Sopenharmony_ci } 4118c2ecf20Sopenharmony_ci kfree(create); 4128c2ecf20Sopenharmony_ci return task; 4138c2ecf20Sopenharmony_ci} 4148c2ecf20Sopenharmony_ci 4158c2ecf20Sopenharmony_ci/** 4168c2ecf20Sopenharmony_ci * kthread_create_on_node - create a kthread. 4178c2ecf20Sopenharmony_ci * @threadfn: the function to run until signal_pending(current). 4188c2ecf20Sopenharmony_ci * @data: data ptr for @threadfn. 4198c2ecf20Sopenharmony_ci * @node: task and thread structures for the thread are allocated on this node 4208c2ecf20Sopenharmony_ci * @namefmt: printf-style name for the thread. 4218c2ecf20Sopenharmony_ci * 4228c2ecf20Sopenharmony_ci * Description: This helper function creates and names a kernel 4238c2ecf20Sopenharmony_ci * thread. The thread will be stopped: use wake_up_process() to start 4248c2ecf20Sopenharmony_ci * it. See also kthread_run(). The new thread has SCHED_NORMAL policy and 4258c2ecf20Sopenharmony_ci * is affine to all CPUs. 4268c2ecf20Sopenharmony_ci * 4278c2ecf20Sopenharmony_ci * If thread is going to be bound on a particular cpu, give its node 4288c2ecf20Sopenharmony_ci * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE. 4298c2ecf20Sopenharmony_ci * When woken, the thread will run @threadfn() with @data as its 4308c2ecf20Sopenharmony_ci * argument. @threadfn() can either call do_exit() directly if it is a 4318c2ecf20Sopenharmony_ci * standalone thread for which no one will call kthread_stop(), or 4328c2ecf20Sopenharmony_ci * return when 'kthread_should_stop()' is true (which means 4338c2ecf20Sopenharmony_ci * kthread_stop() has been called). The return value should be zero 4348c2ecf20Sopenharmony_ci * or a negative error number; it will be passed to kthread_stop(). 4358c2ecf20Sopenharmony_ci * 4368c2ecf20Sopenharmony_ci * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR). 4378c2ecf20Sopenharmony_ci */ 4388c2ecf20Sopenharmony_cistruct task_struct *kthread_create_on_node(int (*threadfn)(void *data), 4398c2ecf20Sopenharmony_ci void *data, int node, 4408c2ecf20Sopenharmony_ci const char namefmt[], 4418c2ecf20Sopenharmony_ci ...) 4428c2ecf20Sopenharmony_ci{ 4438c2ecf20Sopenharmony_ci struct task_struct *task; 4448c2ecf20Sopenharmony_ci va_list args; 4458c2ecf20Sopenharmony_ci 4468c2ecf20Sopenharmony_ci va_start(args, namefmt); 4478c2ecf20Sopenharmony_ci task = __kthread_create_on_node(threadfn, data, node, namefmt, args); 4488c2ecf20Sopenharmony_ci va_end(args); 4498c2ecf20Sopenharmony_ci 4508c2ecf20Sopenharmony_ci return task; 4518c2ecf20Sopenharmony_ci} 4528c2ecf20Sopenharmony_ciEXPORT_SYMBOL(kthread_create_on_node); 4538c2ecf20Sopenharmony_ci 4548c2ecf20Sopenharmony_cistatic void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state) 4558c2ecf20Sopenharmony_ci{ 4568c2ecf20Sopenharmony_ci unsigned long flags; 4578c2ecf20Sopenharmony_ci 4588c2ecf20Sopenharmony_ci if (!wait_task_inactive(p, state)) { 4598c2ecf20Sopenharmony_ci WARN_ON(1); 4608c2ecf20Sopenharmony_ci return; 4618c2ecf20Sopenharmony_ci } 4628c2ecf20Sopenharmony_ci 4638c2ecf20Sopenharmony_ci /* It's safe because the task is inactive. */ 4648c2ecf20Sopenharmony_ci raw_spin_lock_irqsave(&p->pi_lock, flags); 4658c2ecf20Sopenharmony_ci do_set_cpus_allowed(p, mask); 4668c2ecf20Sopenharmony_ci p->flags |= PF_NO_SETAFFINITY; 4678c2ecf20Sopenharmony_ci raw_spin_unlock_irqrestore(&p->pi_lock, flags); 4688c2ecf20Sopenharmony_ci} 4698c2ecf20Sopenharmony_ci 4708c2ecf20Sopenharmony_cistatic void __kthread_bind(struct task_struct *p, unsigned int cpu, long state) 4718c2ecf20Sopenharmony_ci{ 4728c2ecf20Sopenharmony_ci __kthread_bind_mask(p, cpumask_of(cpu), state); 4738c2ecf20Sopenharmony_ci} 4748c2ecf20Sopenharmony_ci 4758c2ecf20Sopenharmony_civoid kthread_bind_mask(struct task_struct *p, const struct cpumask *mask) 4768c2ecf20Sopenharmony_ci{ 4778c2ecf20Sopenharmony_ci __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE); 4788c2ecf20Sopenharmony_ci} 4798c2ecf20Sopenharmony_ci 4808c2ecf20Sopenharmony_ci/** 4818c2ecf20Sopenharmony_ci * kthread_bind - bind a just-created kthread to a cpu. 4828c2ecf20Sopenharmony_ci * @p: thread created by kthread_create(). 4838c2ecf20Sopenharmony_ci * @cpu: cpu (might not be online, must be possible) for @k to run on. 4848c2ecf20Sopenharmony_ci * 4858c2ecf20Sopenharmony_ci * Description: This function is equivalent to set_cpus_allowed(), 4868c2ecf20Sopenharmony_ci * except that @cpu doesn't need to be online, and the thread must be 4878c2ecf20Sopenharmony_ci * stopped (i.e., just returned from kthread_create()). 4888c2ecf20Sopenharmony_ci */ 4898c2ecf20Sopenharmony_civoid kthread_bind(struct task_struct *p, unsigned int cpu) 4908c2ecf20Sopenharmony_ci{ 4918c2ecf20Sopenharmony_ci __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE); 4928c2ecf20Sopenharmony_ci} 4938c2ecf20Sopenharmony_ciEXPORT_SYMBOL(kthread_bind); 4948c2ecf20Sopenharmony_ci 4958c2ecf20Sopenharmony_ci/** 4968c2ecf20Sopenharmony_ci * kthread_create_on_cpu - Create a cpu bound kthread 4978c2ecf20Sopenharmony_ci * @threadfn: the function to run until signal_pending(current). 4988c2ecf20Sopenharmony_ci * @data: data ptr for @threadfn. 4998c2ecf20Sopenharmony_ci * @cpu: The cpu on which the thread should be bound, 5008c2ecf20Sopenharmony_ci * @namefmt: printf-style name for the thread. Format is restricted 5018c2ecf20Sopenharmony_ci * to "name.*%u". Code fills in cpu number. 5028c2ecf20Sopenharmony_ci * 5038c2ecf20Sopenharmony_ci * Description: This helper function creates and names a kernel thread 5048c2ecf20Sopenharmony_ci */ 5058c2ecf20Sopenharmony_cistruct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), 5068c2ecf20Sopenharmony_ci void *data, unsigned int cpu, 5078c2ecf20Sopenharmony_ci const char *namefmt) 5088c2ecf20Sopenharmony_ci{ 5098c2ecf20Sopenharmony_ci struct task_struct *p; 5108c2ecf20Sopenharmony_ci 5118c2ecf20Sopenharmony_ci p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt, 5128c2ecf20Sopenharmony_ci cpu); 5138c2ecf20Sopenharmony_ci if (IS_ERR(p)) 5148c2ecf20Sopenharmony_ci return p; 5158c2ecf20Sopenharmony_ci kthread_bind(p, cpu); 5168c2ecf20Sopenharmony_ci /* CPU hotplug need to bind once again when unparking the thread. */ 5178c2ecf20Sopenharmony_ci to_kthread(p)->cpu = cpu; 5188c2ecf20Sopenharmony_ci return p; 5198c2ecf20Sopenharmony_ci} 5208c2ecf20Sopenharmony_ci 5218c2ecf20Sopenharmony_civoid kthread_set_per_cpu(struct task_struct *k, int cpu) 5228c2ecf20Sopenharmony_ci{ 5238c2ecf20Sopenharmony_ci struct kthread *kthread = to_kthread(k); 5248c2ecf20Sopenharmony_ci if (!kthread) 5258c2ecf20Sopenharmony_ci return; 5268c2ecf20Sopenharmony_ci 5278c2ecf20Sopenharmony_ci WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY)); 5288c2ecf20Sopenharmony_ci 5298c2ecf20Sopenharmony_ci if (cpu < 0) { 5308c2ecf20Sopenharmony_ci clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags); 5318c2ecf20Sopenharmony_ci return; 5328c2ecf20Sopenharmony_ci } 5338c2ecf20Sopenharmony_ci 5348c2ecf20Sopenharmony_ci kthread->cpu = cpu; 5358c2ecf20Sopenharmony_ci set_bit(KTHREAD_IS_PER_CPU, &kthread->flags); 5368c2ecf20Sopenharmony_ci} 5378c2ecf20Sopenharmony_ci 5388c2ecf20Sopenharmony_cibool kthread_is_per_cpu(struct task_struct *p) 5398c2ecf20Sopenharmony_ci{ 5408c2ecf20Sopenharmony_ci struct kthread *kthread = __to_kthread(p); 5418c2ecf20Sopenharmony_ci if (!kthread) 5428c2ecf20Sopenharmony_ci return false; 5438c2ecf20Sopenharmony_ci 5448c2ecf20Sopenharmony_ci return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags); 5458c2ecf20Sopenharmony_ci} 5468c2ecf20Sopenharmony_ci 5478c2ecf20Sopenharmony_ci/** 5488c2ecf20Sopenharmony_ci * kthread_unpark - unpark a thread created by kthread_create(). 5498c2ecf20Sopenharmony_ci * @k: thread created by kthread_create(). 5508c2ecf20Sopenharmony_ci * 5518c2ecf20Sopenharmony_ci * Sets kthread_should_park() for @k to return false, wakes it, and 5528c2ecf20Sopenharmony_ci * waits for it to return. If the thread is marked percpu then its 5538c2ecf20Sopenharmony_ci * bound to the cpu again. 5548c2ecf20Sopenharmony_ci */ 5558c2ecf20Sopenharmony_civoid kthread_unpark(struct task_struct *k) 5568c2ecf20Sopenharmony_ci{ 5578c2ecf20Sopenharmony_ci struct kthread *kthread = to_kthread(k); 5588c2ecf20Sopenharmony_ci 5598c2ecf20Sopenharmony_ci /* 5608c2ecf20Sopenharmony_ci * Newly created kthread was parked when the CPU was offline. 5618c2ecf20Sopenharmony_ci * The binding was lost and we need to set it again. 5628c2ecf20Sopenharmony_ci */ 5638c2ecf20Sopenharmony_ci if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags)) 5648c2ecf20Sopenharmony_ci __kthread_bind(k, kthread->cpu, TASK_PARKED); 5658c2ecf20Sopenharmony_ci 5668c2ecf20Sopenharmony_ci clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); 5678c2ecf20Sopenharmony_ci /* 5688c2ecf20Sopenharmony_ci * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup. 5698c2ecf20Sopenharmony_ci */ 5708c2ecf20Sopenharmony_ci wake_up_state(k, TASK_PARKED); 5718c2ecf20Sopenharmony_ci} 5728c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(kthread_unpark); 5738c2ecf20Sopenharmony_ci 5748c2ecf20Sopenharmony_ci/** 5758c2ecf20Sopenharmony_ci * kthread_park - park a thread created by kthread_create(). 5768c2ecf20Sopenharmony_ci * @k: thread created by kthread_create(). 5778c2ecf20Sopenharmony_ci * 5788c2ecf20Sopenharmony_ci * Sets kthread_should_park() for @k to return true, wakes it, and 5798c2ecf20Sopenharmony_ci * waits for it to return. This can also be called after kthread_create() 5808c2ecf20Sopenharmony_ci * instead of calling wake_up_process(): the thread will park without 5818c2ecf20Sopenharmony_ci * calling threadfn(). 5828c2ecf20Sopenharmony_ci * 5838c2ecf20Sopenharmony_ci * Returns 0 if the thread is parked, -ENOSYS if the thread exited. 5848c2ecf20Sopenharmony_ci * If called by the kthread itself just the park bit is set. 5858c2ecf20Sopenharmony_ci */ 5868c2ecf20Sopenharmony_ciint kthread_park(struct task_struct *k) 5878c2ecf20Sopenharmony_ci{ 5888c2ecf20Sopenharmony_ci struct kthread *kthread = to_kthread(k); 5898c2ecf20Sopenharmony_ci 5908c2ecf20Sopenharmony_ci if (WARN_ON(k->flags & PF_EXITING)) 5918c2ecf20Sopenharmony_ci return -ENOSYS; 5928c2ecf20Sopenharmony_ci 5938c2ecf20Sopenharmony_ci if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags))) 5948c2ecf20Sopenharmony_ci return -EBUSY; 5958c2ecf20Sopenharmony_ci 5968c2ecf20Sopenharmony_ci set_bit(KTHREAD_SHOULD_PARK, &kthread->flags); 5978c2ecf20Sopenharmony_ci if (k != current) { 5988c2ecf20Sopenharmony_ci wake_up_process(k); 5998c2ecf20Sopenharmony_ci /* 6008c2ecf20Sopenharmony_ci * Wait for __kthread_parkme() to complete(), this means we 6018c2ecf20Sopenharmony_ci * _will_ have TASK_PARKED and are about to call schedule(). 6028c2ecf20Sopenharmony_ci */ 6038c2ecf20Sopenharmony_ci wait_for_completion(&kthread->parked); 6048c2ecf20Sopenharmony_ci /* 6058c2ecf20Sopenharmony_ci * Now wait for that schedule() to complete and the task to 6068c2ecf20Sopenharmony_ci * get scheduled out. 6078c2ecf20Sopenharmony_ci */ 6088c2ecf20Sopenharmony_ci WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED)); 6098c2ecf20Sopenharmony_ci } 6108c2ecf20Sopenharmony_ci 6118c2ecf20Sopenharmony_ci return 0; 6128c2ecf20Sopenharmony_ci} 6138c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(kthread_park); 6148c2ecf20Sopenharmony_ci 6158c2ecf20Sopenharmony_ci/** 6168c2ecf20Sopenharmony_ci * kthread_stop - stop a thread created by kthread_create(). 6178c2ecf20Sopenharmony_ci * @k: thread created by kthread_create(). 6188c2ecf20Sopenharmony_ci * 6198c2ecf20Sopenharmony_ci * Sets kthread_should_stop() for @k to return true, wakes it, and 6208c2ecf20Sopenharmony_ci * waits for it to exit. This can also be called after kthread_create() 6218c2ecf20Sopenharmony_ci * instead of calling wake_up_process(): the thread will exit without 6228c2ecf20Sopenharmony_ci * calling threadfn(). 6238c2ecf20Sopenharmony_ci * 6248c2ecf20Sopenharmony_ci * If threadfn() may call do_exit() itself, the caller must ensure 6258c2ecf20Sopenharmony_ci * task_struct can't go away. 6268c2ecf20Sopenharmony_ci * 6278c2ecf20Sopenharmony_ci * Returns the result of threadfn(), or %-EINTR if wake_up_process() 6288c2ecf20Sopenharmony_ci * was never called. 6298c2ecf20Sopenharmony_ci */ 6308c2ecf20Sopenharmony_ciint kthread_stop(struct task_struct *k) 6318c2ecf20Sopenharmony_ci{ 6328c2ecf20Sopenharmony_ci struct kthread *kthread; 6338c2ecf20Sopenharmony_ci int ret; 6348c2ecf20Sopenharmony_ci 6358c2ecf20Sopenharmony_ci trace_sched_kthread_stop(k); 6368c2ecf20Sopenharmony_ci 6378c2ecf20Sopenharmony_ci get_task_struct(k); 6388c2ecf20Sopenharmony_ci kthread = to_kthread(k); 6398c2ecf20Sopenharmony_ci set_bit(KTHREAD_SHOULD_STOP, &kthread->flags); 6408c2ecf20Sopenharmony_ci kthread_unpark(k); 6418c2ecf20Sopenharmony_ci wake_up_process(k); 6428c2ecf20Sopenharmony_ci wait_for_completion(&kthread->exited); 6438c2ecf20Sopenharmony_ci ret = k->exit_code; 6448c2ecf20Sopenharmony_ci put_task_struct(k); 6458c2ecf20Sopenharmony_ci 6468c2ecf20Sopenharmony_ci trace_sched_kthread_stop_ret(ret); 6478c2ecf20Sopenharmony_ci return ret; 6488c2ecf20Sopenharmony_ci} 6498c2ecf20Sopenharmony_ciEXPORT_SYMBOL(kthread_stop); 6508c2ecf20Sopenharmony_ci 6518c2ecf20Sopenharmony_ciint kthreadd(void *unused) 6528c2ecf20Sopenharmony_ci{ 6538c2ecf20Sopenharmony_ci struct task_struct *tsk = current; 6548c2ecf20Sopenharmony_ci 6558c2ecf20Sopenharmony_ci /* Setup a clean context for our children to inherit. */ 6568c2ecf20Sopenharmony_ci set_task_comm(tsk, "kthreadd"); 6578c2ecf20Sopenharmony_ci ignore_signals(tsk); 6588c2ecf20Sopenharmony_ci set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_FLAG_KTHREAD)); 6598c2ecf20Sopenharmony_ci set_mems_allowed(node_states[N_MEMORY]); 6608c2ecf20Sopenharmony_ci 6618c2ecf20Sopenharmony_ci current->flags |= PF_NOFREEZE; 6628c2ecf20Sopenharmony_ci cgroup_init_kthreadd(); 6638c2ecf20Sopenharmony_ci 6648c2ecf20Sopenharmony_ci for (;;) { 6658c2ecf20Sopenharmony_ci set_current_state(TASK_INTERRUPTIBLE); 6668c2ecf20Sopenharmony_ci if (list_empty(&kthread_create_list)) 6678c2ecf20Sopenharmony_ci schedule(); 6688c2ecf20Sopenharmony_ci __set_current_state(TASK_RUNNING); 6698c2ecf20Sopenharmony_ci 6708c2ecf20Sopenharmony_ci spin_lock(&kthread_create_lock); 6718c2ecf20Sopenharmony_ci while (!list_empty(&kthread_create_list)) { 6728c2ecf20Sopenharmony_ci struct kthread_create_info *create; 6738c2ecf20Sopenharmony_ci 6748c2ecf20Sopenharmony_ci create = list_entry(kthread_create_list.next, 6758c2ecf20Sopenharmony_ci struct kthread_create_info, list); 6768c2ecf20Sopenharmony_ci list_del_init(&create->list); 6778c2ecf20Sopenharmony_ci spin_unlock(&kthread_create_lock); 6788c2ecf20Sopenharmony_ci 6798c2ecf20Sopenharmony_ci create_kthread(create); 6808c2ecf20Sopenharmony_ci 6818c2ecf20Sopenharmony_ci spin_lock(&kthread_create_lock); 6828c2ecf20Sopenharmony_ci } 6838c2ecf20Sopenharmony_ci spin_unlock(&kthread_create_lock); 6848c2ecf20Sopenharmony_ci } 6858c2ecf20Sopenharmony_ci 6868c2ecf20Sopenharmony_ci return 0; 6878c2ecf20Sopenharmony_ci} 6888c2ecf20Sopenharmony_ci 6898c2ecf20Sopenharmony_civoid __kthread_init_worker(struct kthread_worker *worker, 6908c2ecf20Sopenharmony_ci const char *name, 6918c2ecf20Sopenharmony_ci struct lock_class_key *key) 6928c2ecf20Sopenharmony_ci{ 6938c2ecf20Sopenharmony_ci memset(worker, 0, sizeof(struct kthread_worker)); 6948c2ecf20Sopenharmony_ci raw_spin_lock_init(&worker->lock); 6958c2ecf20Sopenharmony_ci lockdep_set_class_and_name(&worker->lock, key, name); 6968c2ecf20Sopenharmony_ci INIT_LIST_HEAD(&worker->work_list); 6978c2ecf20Sopenharmony_ci INIT_LIST_HEAD(&worker->delayed_work_list); 6988c2ecf20Sopenharmony_ci} 6998c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(__kthread_init_worker); 7008c2ecf20Sopenharmony_ci 7018c2ecf20Sopenharmony_ci/** 7028c2ecf20Sopenharmony_ci * kthread_worker_fn - kthread function to process kthread_worker 7038c2ecf20Sopenharmony_ci * @worker_ptr: pointer to initialized kthread_worker 7048c2ecf20Sopenharmony_ci * 7058c2ecf20Sopenharmony_ci * This function implements the main cycle of kthread worker. It processes 7068c2ecf20Sopenharmony_ci * work_list until it is stopped with kthread_stop(). It sleeps when the queue 7078c2ecf20Sopenharmony_ci * is empty. 7088c2ecf20Sopenharmony_ci * 7098c2ecf20Sopenharmony_ci * The works are not allowed to keep any locks, disable preemption or interrupts 7108c2ecf20Sopenharmony_ci * when they finish. There is defined a safe point for freezing when one work 7118c2ecf20Sopenharmony_ci * finishes and before a new one is started. 7128c2ecf20Sopenharmony_ci * 7138c2ecf20Sopenharmony_ci * Also the works must not be handled by more than one worker at the same time, 7148c2ecf20Sopenharmony_ci * see also kthread_queue_work(). 7158c2ecf20Sopenharmony_ci */ 7168c2ecf20Sopenharmony_ciint kthread_worker_fn(void *worker_ptr) 7178c2ecf20Sopenharmony_ci{ 7188c2ecf20Sopenharmony_ci struct kthread_worker *worker = worker_ptr; 7198c2ecf20Sopenharmony_ci struct kthread_work *work; 7208c2ecf20Sopenharmony_ci 7218c2ecf20Sopenharmony_ci /* 7228c2ecf20Sopenharmony_ci * FIXME: Update the check and remove the assignment when all kthread 7238c2ecf20Sopenharmony_ci * worker users are created using kthread_create_worker*() functions. 7248c2ecf20Sopenharmony_ci */ 7258c2ecf20Sopenharmony_ci WARN_ON(worker->task && worker->task != current); 7268c2ecf20Sopenharmony_ci worker->task = current; 7278c2ecf20Sopenharmony_ci 7288c2ecf20Sopenharmony_ci if (worker->flags & KTW_FREEZABLE) 7298c2ecf20Sopenharmony_ci set_freezable(); 7308c2ecf20Sopenharmony_ci 7318c2ecf20Sopenharmony_cirepeat: 7328c2ecf20Sopenharmony_ci set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */ 7338c2ecf20Sopenharmony_ci 7348c2ecf20Sopenharmony_ci if (kthread_should_stop()) { 7358c2ecf20Sopenharmony_ci __set_current_state(TASK_RUNNING); 7368c2ecf20Sopenharmony_ci raw_spin_lock_irq(&worker->lock); 7378c2ecf20Sopenharmony_ci worker->task = NULL; 7388c2ecf20Sopenharmony_ci raw_spin_unlock_irq(&worker->lock); 7398c2ecf20Sopenharmony_ci return 0; 7408c2ecf20Sopenharmony_ci } 7418c2ecf20Sopenharmony_ci 7428c2ecf20Sopenharmony_ci work = NULL; 7438c2ecf20Sopenharmony_ci raw_spin_lock_irq(&worker->lock); 7448c2ecf20Sopenharmony_ci if (!list_empty(&worker->work_list)) { 7458c2ecf20Sopenharmony_ci work = list_first_entry(&worker->work_list, 7468c2ecf20Sopenharmony_ci struct kthread_work, node); 7478c2ecf20Sopenharmony_ci list_del_init(&work->node); 7488c2ecf20Sopenharmony_ci } 7498c2ecf20Sopenharmony_ci worker->current_work = work; 7508c2ecf20Sopenharmony_ci raw_spin_unlock_irq(&worker->lock); 7518c2ecf20Sopenharmony_ci 7528c2ecf20Sopenharmony_ci if (work) { 7538c2ecf20Sopenharmony_ci __set_current_state(TASK_RUNNING); 7548c2ecf20Sopenharmony_ci work->func(work); 7558c2ecf20Sopenharmony_ci } else if (!freezing(current)) 7568c2ecf20Sopenharmony_ci schedule(); 7578c2ecf20Sopenharmony_ci 7588c2ecf20Sopenharmony_ci try_to_freeze(); 7598c2ecf20Sopenharmony_ci cond_resched(); 7608c2ecf20Sopenharmony_ci goto repeat; 7618c2ecf20Sopenharmony_ci} 7628c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(kthread_worker_fn); 7638c2ecf20Sopenharmony_ci 7648c2ecf20Sopenharmony_cistatic __printf(3, 0) struct kthread_worker * 7658c2ecf20Sopenharmony_ci__kthread_create_worker(int cpu, unsigned int flags, 7668c2ecf20Sopenharmony_ci const char namefmt[], va_list args) 7678c2ecf20Sopenharmony_ci{ 7688c2ecf20Sopenharmony_ci struct kthread_worker *worker; 7698c2ecf20Sopenharmony_ci struct task_struct *task; 7708c2ecf20Sopenharmony_ci int node = NUMA_NO_NODE; 7718c2ecf20Sopenharmony_ci 7728c2ecf20Sopenharmony_ci worker = kzalloc(sizeof(*worker), GFP_KERNEL); 7738c2ecf20Sopenharmony_ci if (!worker) 7748c2ecf20Sopenharmony_ci return ERR_PTR(-ENOMEM); 7758c2ecf20Sopenharmony_ci 7768c2ecf20Sopenharmony_ci kthread_init_worker(worker); 7778c2ecf20Sopenharmony_ci 7788c2ecf20Sopenharmony_ci if (cpu >= 0) 7798c2ecf20Sopenharmony_ci node = cpu_to_node(cpu); 7808c2ecf20Sopenharmony_ci 7818c2ecf20Sopenharmony_ci task = __kthread_create_on_node(kthread_worker_fn, worker, 7828c2ecf20Sopenharmony_ci node, namefmt, args); 7838c2ecf20Sopenharmony_ci if (IS_ERR(task)) 7848c2ecf20Sopenharmony_ci goto fail_task; 7858c2ecf20Sopenharmony_ci 7868c2ecf20Sopenharmony_ci if (cpu >= 0) 7878c2ecf20Sopenharmony_ci kthread_bind(task, cpu); 7888c2ecf20Sopenharmony_ci 7898c2ecf20Sopenharmony_ci worker->flags = flags; 7908c2ecf20Sopenharmony_ci worker->task = task; 7918c2ecf20Sopenharmony_ci wake_up_process(task); 7928c2ecf20Sopenharmony_ci return worker; 7938c2ecf20Sopenharmony_ci 7948c2ecf20Sopenharmony_cifail_task: 7958c2ecf20Sopenharmony_ci kfree(worker); 7968c2ecf20Sopenharmony_ci return ERR_CAST(task); 7978c2ecf20Sopenharmony_ci} 7988c2ecf20Sopenharmony_ci 7998c2ecf20Sopenharmony_ci/** 8008c2ecf20Sopenharmony_ci * kthread_create_worker - create a kthread worker 8018c2ecf20Sopenharmony_ci * @flags: flags modifying the default behavior of the worker 8028c2ecf20Sopenharmony_ci * @namefmt: printf-style name for the kthread worker (task). 8038c2ecf20Sopenharmony_ci * 8048c2ecf20Sopenharmony_ci * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM) 8058c2ecf20Sopenharmony_ci * when the needed structures could not get allocated, and ERR_PTR(-EINTR) 8068c2ecf20Sopenharmony_ci * when the worker was SIGKILLed. 8078c2ecf20Sopenharmony_ci */ 8088c2ecf20Sopenharmony_cistruct kthread_worker * 8098c2ecf20Sopenharmony_cikthread_create_worker(unsigned int flags, const char namefmt[], ...) 8108c2ecf20Sopenharmony_ci{ 8118c2ecf20Sopenharmony_ci struct kthread_worker *worker; 8128c2ecf20Sopenharmony_ci va_list args; 8138c2ecf20Sopenharmony_ci 8148c2ecf20Sopenharmony_ci va_start(args, namefmt); 8158c2ecf20Sopenharmony_ci worker = __kthread_create_worker(-1, flags, namefmt, args); 8168c2ecf20Sopenharmony_ci va_end(args); 8178c2ecf20Sopenharmony_ci 8188c2ecf20Sopenharmony_ci return worker; 8198c2ecf20Sopenharmony_ci} 8208c2ecf20Sopenharmony_ciEXPORT_SYMBOL(kthread_create_worker); 8218c2ecf20Sopenharmony_ci 8228c2ecf20Sopenharmony_ci/** 8238c2ecf20Sopenharmony_ci * kthread_create_worker_on_cpu - create a kthread worker and bind it 8248c2ecf20Sopenharmony_ci * to a given CPU and the associated NUMA node. 8258c2ecf20Sopenharmony_ci * @cpu: CPU number 8268c2ecf20Sopenharmony_ci * @flags: flags modifying the default behavior of the worker 8278c2ecf20Sopenharmony_ci * @namefmt: printf-style name for the kthread worker (task). 8288c2ecf20Sopenharmony_ci * 8298c2ecf20Sopenharmony_ci * Use a valid CPU number if you want to bind the kthread worker 8308c2ecf20Sopenharmony_ci * to the given CPU and the associated NUMA node. 8318c2ecf20Sopenharmony_ci * 8328c2ecf20Sopenharmony_ci * A good practice is to add the cpu number also into the worker name. 8338c2ecf20Sopenharmony_ci * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu). 8348c2ecf20Sopenharmony_ci * 8358c2ecf20Sopenharmony_ci * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM) 8368c2ecf20Sopenharmony_ci * when the needed structures could not get allocated, and ERR_PTR(-EINTR) 8378c2ecf20Sopenharmony_ci * when the worker was SIGKILLed. 8388c2ecf20Sopenharmony_ci */ 8398c2ecf20Sopenharmony_cistruct kthread_worker * 8408c2ecf20Sopenharmony_cikthread_create_worker_on_cpu(int cpu, unsigned int flags, 8418c2ecf20Sopenharmony_ci const char namefmt[], ...) 8428c2ecf20Sopenharmony_ci{ 8438c2ecf20Sopenharmony_ci struct kthread_worker *worker; 8448c2ecf20Sopenharmony_ci va_list args; 8458c2ecf20Sopenharmony_ci 8468c2ecf20Sopenharmony_ci va_start(args, namefmt); 8478c2ecf20Sopenharmony_ci worker = __kthread_create_worker(cpu, flags, namefmt, args); 8488c2ecf20Sopenharmony_ci va_end(args); 8498c2ecf20Sopenharmony_ci 8508c2ecf20Sopenharmony_ci return worker; 8518c2ecf20Sopenharmony_ci} 8528c2ecf20Sopenharmony_ciEXPORT_SYMBOL(kthread_create_worker_on_cpu); 8538c2ecf20Sopenharmony_ci 8548c2ecf20Sopenharmony_ci/* 8558c2ecf20Sopenharmony_ci * Returns true when the work could not be queued at the moment. 8568c2ecf20Sopenharmony_ci * It happens when it is already pending in a worker list 8578c2ecf20Sopenharmony_ci * or when it is being cancelled. 8588c2ecf20Sopenharmony_ci */ 8598c2ecf20Sopenharmony_cistatic inline bool queuing_blocked(struct kthread_worker *worker, 8608c2ecf20Sopenharmony_ci struct kthread_work *work) 8618c2ecf20Sopenharmony_ci{ 8628c2ecf20Sopenharmony_ci lockdep_assert_held(&worker->lock); 8638c2ecf20Sopenharmony_ci 8648c2ecf20Sopenharmony_ci return !list_empty(&work->node) || work->canceling; 8658c2ecf20Sopenharmony_ci} 8668c2ecf20Sopenharmony_ci 8678c2ecf20Sopenharmony_cistatic void kthread_insert_work_sanity_check(struct kthread_worker *worker, 8688c2ecf20Sopenharmony_ci struct kthread_work *work) 8698c2ecf20Sopenharmony_ci{ 8708c2ecf20Sopenharmony_ci lockdep_assert_held(&worker->lock); 8718c2ecf20Sopenharmony_ci WARN_ON_ONCE(!list_empty(&work->node)); 8728c2ecf20Sopenharmony_ci /* Do not use a work with >1 worker, see kthread_queue_work() */ 8738c2ecf20Sopenharmony_ci WARN_ON_ONCE(work->worker && work->worker != worker); 8748c2ecf20Sopenharmony_ci} 8758c2ecf20Sopenharmony_ci 8768c2ecf20Sopenharmony_ci/* insert @work before @pos in @worker */ 8778c2ecf20Sopenharmony_cistatic void kthread_insert_work(struct kthread_worker *worker, 8788c2ecf20Sopenharmony_ci struct kthread_work *work, 8798c2ecf20Sopenharmony_ci struct list_head *pos) 8808c2ecf20Sopenharmony_ci{ 8818c2ecf20Sopenharmony_ci kthread_insert_work_sanity_check(worker, work); 8828c2ecf20Sopenharmony_ci 8838c2ecf20Sopenharmony_ci list_add_tail(&work->node, pos); 8848c2ecf20Sopenharmony_ci work->worker = worker; 8858c2ecf20Sopenharmony_ci if (!worker->current_work && likely(worker->task)) 8868c2ecf20Sopenharmony_ci wake_up_process(worker->task); 8878c2ecf20Sopenharmony_ci} 8888c2ecf20Sopenharmony_ci 8898c2ecf20Sopenharmony_ci/** 8908c2ecf20Sopenharmony_ci * kthread_queue_work - queue a kthread_work 8918c2ecf20Sopenharmony_ci * @worker: target kthread_worker 8928c2ecf20Sopenharmony_ci * @work: kthread_work to queue 8938c2ecf20Sopenharmony_ci * 8948c2ecf20Sopenharmony_ci * Queue @work to work processor @task for async execution. @task 8958c2ecf20Sopenharmony_ci * must have been created with kthread_worker_create(). Returns %true 8968c2ecf20Sopenharmony_ci * if @work was successfully queued, %false if it was already pending. 8978c2ecf20Sopenharmony_ci * 8988c2ecf20Sopenharmony_ci * Reinitialize the work if it needs to be used by another worker. 8998c2ecf20Sopenharmony_ci * For example, when the worker was stopped and started again. 9008c2ecf20Sopenharmony_ci */ 9018c2ecf20Sopenharmony_cibool kthread_queue_work(struct kthread_worker *worker, 9028c2ecf20Sopenharmony_ci struct kthread_work *work) 9038c2ecf20Sopenharmony_ci{ 9048c2ecf20Sopenharmony_ci bool ret = false; 9058c2ecf20Sopenharmony_ci unsigned long flags; 9068c2ecf20Sopenharmony_ci 9078c2ecf20Sopenharmony_ci raw_spin_lock_irqsave(&worker->lock, flags); 9088c2ecf20Sopenharmony_ci if (!queuing_blocked(worker, work)) { 9098c2ecf20Sopenharmony_ci kthread_insert_work(worker, work, &worker->work_list); 9108c2ecf20Sopenharmony_ci ret = true; 9118c2ecf20Sopenharmony_ci } 9128c2ecf20Sopenharmony_ci raw_spin_unlock_irqrestore(&worker->lock, flags); 9138c2ecf20Sopenharmony_ci return ret; 9148c2ecf20Sopenharmony_ci} 9158c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(kthread_queue_work); 9168c2ecf20Sopenharmony_ci 9178c2ecf20Sopenharmony_ci/** 9188c2ecf20Sopenharmony_ci * kthread_delayed_work_timer_fn - callback that queues the associated kthread 9198c2ecf20Sopenharmony_ci * delayed work when the timer expires. 9208c2ecf20Sopenharmony_ci * @t: pointer to the expired timer 9218c2ecf20Sopenharmony_ci * 9228c2ecf20Sopenharmony_ci * The format of the function is defined by struct timer_list. 9238c2ecf20Sopenharmony_ci * It should have been called from irqsafe timer with irq already off. 9248c2ecf20Sopenharmony_ci */ 9258c2ecf20Sopenharmony_civoid kthread_delayed_work_timer_fn(struct timer_list *t) 9268c2ecf20Sopenharmony_ci{ 9278c2ecf20Sopenharmony_ci struct kthread_delayed_work *dwork = from_timer(dwork, t, timer); 9288c2ecf20Sopenharmony_ci struct kthread_work *work = &dwork->work; 9298c2ecf20Sopenharmony_ci struct kthread_worker *worker = work->worker; 9308c2ecf20Sopenharmony_ci unsigned long flags; 9318c2ecf20Sopenharmony_ci 9328c2ecf20Sopenharmony_ci /* 9338c2ecf20Sopenharmony_ci * This might happen when a pending work is reinitialized. 9348c2ecf20Sopenharmony_ci * It means that it is used a wrong way. 9358c2ecf20Sopenharmony_ci */ 9368c2ecf20Sopenharmony_ci if (WARN_ON_ONCE(!worker)) 9378c2ecf20Sopenharmony_ci return; 9388c2ecf20Sopenharmony_ci 9398c2ecf20Sopenharmony_ci raw_spin_lock_irqsave(&worker->lock, flags); 9408c2ecf20Sopenharmony_ci /* Work must not be used with >1 worker, see kthread_queue_work(). */ 9418c2ecf20Sopenharmony_ci WARN_ON_ONCE(work->worker != worker); 9428c2ecf20Sopenharmony_ci 9438c2ecf20Sopenharmony_ci /* Move the work from worker->delayed_work_list. */ 9448c2ecf20Sopenharmony_ci WARN_ON_ONCE(list_empty(&work->node)); 9458c2ecf20Sopenharmony_ci list_del_init(&work->node); 9468c2ecf20Sopenharmony_ci if (!work->canceling) 9478c2ecf20Sopenharmony_ci kthread_insert_work(worker, work, &worker->work_list); 9488c2ecf20Sopenharmony_ci 9498c2ecf20Sopenharmony_ci raw_spin_unlock_irqrestore(&worker->lock, flags); 9508c2ecf20Sopenharmony_ci} 9518c2ecf20Sopenharmony_ciEXPORT_SYMBOL(kthread_delayed_work_timer_fn); 9528c2ecf20Sopenharmony_ci 9538c2ecf20Sopenharmony_cistatic void __kthread_queue_delayed_work(struct kthread_worker *worker, 9548c2ecf20Sopenharmony_ci struct kthread_delayed_work *dwork, 9558c2ecf20Sopenharmony_ci unsigned long delay) 9568c2ecf20Sopenharmony_ci{ 9578c2ecf20Sopenharmony_ci struct timer_list *timer = &dwork->timer; 9588c2ecf20Sopenharmony_ci struct kthread_work *work = &dwork->work; 9598c2ecf20Sopenharmony_ci 9608c2ecf20Sopenharmony_ci WARN_ON_FUNCTION_MISMATCH(timer->function, 9618c2ecf20Sopenharmony_ci kthread_delayed_work_timer_fn); 9628c2ecf20Sopenharmony_ci 9638c2ecf20Sopenharmony_ci /* 9648c2ecf20Sopenharmony_ci * If @delay is 0, queue @dwork->work immediately. This is for 9658c2ecf20Sopenharmony_ci * both optimization and correctness. The earliest @timer can 9668c2ecf20Sopenharmony_ci * expire is on the closest next tick and delayed_work users depend 9678c2ecf20Sopenharmony_ci * on that there's no such delay when @delay is 0. 9688c2ecf20Sopenharmony_ci */ 9698c2ecf20Sopenharmony_ci if (!delay) { 9708c2ecf20Sopenharmony_ci kthread_insert_work(worker, work, &worker->work_list); 9718c2ecf20Sopenharmony_ci return; 9728c2ecf20Sopenharmony_ci } 9738c2ecf20Sopenharmony_ci 9748c2ecf20Sopenharmony_ci /* Be paranoid and try to detect possible races already now. */ 9758c2ecf20Sopenharmony_ci kthread_insert_work_sanity_check(worker, work); 9768c2ecf20Sopenharmony_ci 9778c2ecf20Sopenharmony_ci list_add(&work->node, &worker->delayed_work_list); 9788c2ecf20Sopenharmony_ci work->worker = worker; 9798c2ecf20Sopenharmony_ci timer->expires = jiffies + delay; 9808c2ecf20Sopenharmony_ci add_timer(timer); 9818c2ecf20Sopenharmony_ci} 9828c2ecf20Sopenharmony_ci 9838c2ecf20Sopenharmony_ci/** 9848c2ecf20Sopenharmony_ci * kthread_queue_delayed_work - queue the associated kthread work 9858c2ecf20Sopenharmony_ci * after a delay. 9868c2ecf20Sopenharmony_ci * @worker: target kthread_worker 9878c2ecf20Sopenharmony_ci * @dwork: kthread_delayed_work to queue 9888c2ecf20Sopenharmony_ci * @delay: number of jiffies to wait before queuing 9898c2ecf20Sopenharmony_ci * 9908c2ecf20Sopenharmony_ci * If the work has not been pending it starts a timer that will queue 9918c2ecf20Sopenharmony_ci * the work after the given @delay. If @delay is zero, it queues the 9928c2ecf20Sopenharmony_ci * work immediately. 9938c2ecf20Sopenharmony_ci * 9948c2ecf20Sopenharmony_ci * Return: %false if the @work has already been pending. It means that 9958c2ecf20Sopenharmony_ci * either the timer was running or the work was queued. It returns %true 9968c2ecf20Sopenharmony_ci * otherwise. 9978c2ecf20Sopenharmony_ci */ 9988c2ecf20Sopenharmony_cibool kthread_queue_delayed_work(struct kthread_worker *worker, 9998c2ecf20Sopenharmony_ci struct kthread_delayed_work *dwork, 10008c2ecf20Sopenharmony_ci unsigned long delay) 10018c2ecf20Sopenharmony_ci{ 10028c2ecf20Sopenharmony_ci struct kthread_work *work = &dwork->work; 10038c2ecf20Sopenharmony_ci unsigned long flags; 10048c2ecf20Sopenharmony_ci bool ret = false; 10058c2ecf20Sopenharmony_ci 10068c2ecf20Sopenharmony_ci raw_spin_lock_irqsave(&worker->lock, flags); 10078c2ecf20Sopenharmony_ci 10088c2ecf20Sopenharmony_ci if (!queuing_blocked(worker, work)) { 10098c2ecf20Sopenharmony_ci __kthread_queue_delayed_work(worker, dwork, delay); 10108c2ecf20Sopenharmony_ci ret = true; 10118c2ecf20Sopenharmony_ci } 10128c2ecf20Sopenharmony_ci 10138c2ecf20Sopenharmony_ci raw_spin_unlock_irqrestore(&worker->lock, flags); 10148c2ecf20Sopenharmony_ci return ret; 10158c2ecf20Sopenharmony_ci} 10168c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(kthread_queue_delayed_work); 10178c2ecf20Sopenharmony_ci 10188c2ecf20Sopenharmony_cistruct kthread_flush_work { 10198c2ecf20Sopenharmony_ci struct kthread_work work; 10208c2ecf20Sopenharmony_ci struct completion done; 10218c2ecf20Sopenharmony_ci}; 10228c2ecf20Sopenharmony_ci 10238c2ecf20Sopenharmony_cistatic void kthread_flush_work_fn(struct kthread_work *work) 10248c2ecf20Sopenharmony_ci{ 10258c2ecf20Sopenharmony_ci struct kthread_flush_work *fwork = 10268c2ecf20Sopenharmony_ci container_of(work, struct kthread_flush_work, work); 10278c2ecf20Sopenharmony_ci complete(&fwork->done); 10288c2ecf20Sopenharmony_ci} 10298c2ecf20Sopenharmony_ci 10308c2ecf20Sopenharmony_ci/** 10318c2ecf20Sopenharmony_ci * kthread_flush_work - flush a kthread_work 10328c2ecf20Sopenharmony_ci * @work: work to flush 10338c2ecf20Sopenharmony_ci * 10348c2ecf20Sopenharmony_ci * If @work is queued or executing, wait for it to finish execution. 10358c2ecf20Sopenharmony_ci */ 10368c2ecf20Sopenharmony_civoid kthread_flush_work(struct kthread_work *work) 10378c2ecf20Sopenharmony_ci{ 10388c2ecf20Sopenharmony_ci struct kthread_flush_work fwork = { 10398c2ecf20Sopenharmony_ci KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), 10408c2ecf20Sopenharmony_ci COMPLETION_INITIALIZER_ONSTACK(fwork.done), 10418c2ecf20Sopenharmony_ci }; 10428c2ecf20Sopenharmony_ci struct kthread_worker *worker; 10438c2ecf20Sopenharmony_ci bool noop = false; 10448c2ecf20Sopenharmony_ci 10458c2ecf20Sopenharmony_ci worker = work->worker; 10468c2ecf20Sopenharmony_ci if (!worker) 10478c2ecf20Sopenharmony_ci return; 10488c2ecf20Sopenharmony_ci 10498c2ecf20Sopenharmony_ci raw_spin_lock_irq(&worker->lock); 10508c2ecf20Sopenharmony_ci /* Work must not be used with >1 worker, see kthread_queue_work(). */ 10518c2ecf20Sopenharmony_ci WARN_ON_ONCE(work->worker != worker); 10528c2ecf20Sopenharmony_ci 10538c2ecf20Sopenharmony_ci if (!list_empty(&work->node)) 10548c2ecf20Sopenharmony_ci kthread_insert_work(worker, &fwork.work, work->node.next); 10558c2ecf20Sopenharmony_ci else if (worker->current_work == work) 10568c2ecf20Sopenharmony_ci kthread_insert_work(worker, &fwork.work, 10578c2ecf20Sopenharmony_ci worker->work_list.next); 10588c2ecf20Sopenharmony_ci else 10598c2ecf20Sopenharmony_ci noop = true; 10608c2ecf20Sopenharmony_ci 10618c2ecf20Sopenharmony_ci raw_spin_unlock_irq(&worker->lock); 10628c2ecf20Sopenharmony_ci 10638c2ecf20Sopenharmony_ci if (!noop) 10648c2ecf20Sopenharmony_ci wait_for_completion(&fwork.done); 10658c2ecf20Sopenharmony_ci} 10668c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(kthread_flush_work); 10678c2ecf20Sopenharmony_ci 10688c2ecf20Sopenharmony_ci/* 10698c2ecf20Sopenharmony_ci * Make sure that the timer is neither set nor running and could 10708c2ecf20Sopenharmony_ci * not manipulate the work list_head any longer. 10718c2ecf20Sopenharmony_ci * 10728c2ecf20Sopenharmony_ci * The function is called under worker->lock. The lock is temporary 10738c2ecf20Sopenharmony_ci * released but the timer can't be set again in the meantime. 10748c2ecf20Sopenharmony_ci */ 10758c2ecf20Sopenharmony_cistatic void kthread_cancel_delayed_work_timer(struct kthread_work *work, 10768c2ecf20Sopenharmony_ci unsigned long *flags) 10778c2ecf20Sopenharmony_ci{ 10788c2ecf20Sopenharmony_ci struct kthread_delayed_work *dwork = 10798c2ecf20Sopenharmony_ci container_of(work, struct kthread_delayed_work, work); 10808c2ecf20Sopenharmony_ci struct kthread_worker *worker = work->worker; 10818c2ecf20Sopenharmony_ci 10828c2ecf20Sopenharmony_ci /* 10838c2ecf20Sopenharmony_ci * del_timer_sync() must be called to make sure that the timer 10848c2ecf20Sopenharmony_ci * callback is not running. The lock must be temporary released 10858c2ecf20Sopenharmony_ci * to avoid a deadlock with the callback. In the meantime, 10868c2ecf20Sopenharmony_ci * any queuing is blocked by setting the canceling counter. 10878c2ecf20Sopenharmony_ci */ 10888c2ecf20Sopenharmony_ci work->canceling++; 10898c2ecf20Sopenharmony_ci raw_spin_unlock_irqrestore(&worker->lock, *flags); 10908c2ecf20Sopenharmony_ci del_timer_sync(&dwork->timer); 10918c2ecf20Sopenharmony_ci raw_spin_lock_irqsave(&worker->lock, *flags); 10928c2ecf20Sopenharmony_ci work->canceling--; 10938c2ecf20Sopenharmony_ci} 10948c2ecf20Sopenharmony_ci 10958c2ecf20Sopenharmony_ci/* 10968c2ecf20Sopenharmony_ci * This function removes the work from the worker queue. 10978c2ecf20Sopenharmony_ci * 10988c2ecf20Sopenharmony_ci * It is called under worker->lock. The caller must make sure that 10998c2ecf20Sopenharmony_ci * the timer used by delayed work is not running, e.g. by calling 11008c2ecf20Sopenharmony_ci * kthread_cancel_delayed_work_timer(). 11018c2ecf20Sopenharmony_ci * 11028c2ecf20Sopenharmony_ci * The work might still be in use when this function finishes. See the 11038c2ecf20Sopenharmony_ci * current_work proceed by the worker. 11048c2ecf20Sopenharmony_ci * 11058c2ecf20Sopenharmony_ci * Return: %true if @work was pending and successfully canceled, 11068c2ecf20Sopenharmony_ci * %false if @work was not pending 11078c2ecf20Sopenharmony_ci */ 11088c2ecf20Sopenharmony_cistatic bool __kthread_cancel_work(struct kthread_work *work) 11098c2ecf20Sopenharmony_ci{ 11108c2ecf20Sopenharmony_ci /* 11118c2ecf20Sopenharmony_ci * Try to remove the work from a worker list. It might either 11128c2ecf20Sopenharmony_ci * be from worker->work_list or from worker->delayed_work_list. 11138c2ecf20Sopenharmony_ci */ 11148c2ecf20Sopenharmony_ci if (!list_empty(&work->node)) { 11158c2ecf20Sopenharmony_ci list_del_init(&work->node); 11168c2ecf20Sopenharmony_ci return true; 11178c2ecf20Sopenharmony_ci } 11188c2ecf20Sopenharmony_ci 11198c2ecf20Sopenharmony_ci return false; 11208c2ecf20Sopenharmony_ci} 11218c2ecf20Sopenharmony_ci 11228c2ecf20Sopenharmony_ci/** 11238c2ecf20Sopenharmony_ci * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work 11248c2ecf20Sopenharmony_ci * @worker: kthread worker to use 11258c2ecf20Sopenharmony_ci * @dwork: kthread delayed work to queue 11268c2ecf20Sopenharmony_ci * @delay: number of jiffies to wait before queuing 11278c2ecf20Sopenharmony_ci * 11288c2ecf20Sopenharmony_ci * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise, 11298c2ecf20Sopenharmony_ci * modify @dwork's timer so that it expires after @delay. If @delay is zero, 11308c2ecf20Sopenharmony_ci * @work is guaranteed to be queued immediately. 11318c2ecf20Sopenharmony_ci * 11328c2ecf20Sopenharmony_ci * Return: %false if @dwork was idle and queued, %true otherwise. 11338c2ecf20Sopenharmony_ci * 11348c2ecf20Sopenharmony_ci * A special case is when the work is being canceled in parallel. 11358c2ecf20Sopenharmony_ci * It might be caused either by the real kthread_cancel_delayed_work_sync() 11368c2ecf20Sopenharmony_ci * or yet another kthread_mod_delayed_work() call. We let the other command 11378c2ecf20Sopenharmony_ci * win and return %true here. The return value can be used for reference 11388c2ecf20Sopenharmony_ci * counting and the number of queued works stays the same. Anyway, the caller 11398c2ecf20Sopenharmony_ci * is supposed to synchronize these operations a reasonable way. 11408c2ecf20Sopenharmony_ci * 11418c2ecf20Sopenharmony_ci * This function is safe to call from any context including IRQ handler. 11428c2ecf20Sopenharmony_ci * See __kthread_cancel_work() and kthread_delayed_work_timer_fn() 11438c2ecf20Sopenharmony_ci * for details. 11448c2ecf20Sopenharmony_ci */ 11458c2ecf20Sopenharmony_cibool kthread_mod_delayed_work(struct kthread_worker *worker, 11468c2ecf20Sopenharmony_ci struct kthread_delayed_work *dwork, 11478c2ecf20Sopenharmony_ci unsigned long delay) 11488c2ecf20Sopenharmony_ci{ 11498c2ecf20Sopenharmony_ci struct kthread_work *work = &dwork->work; 11508c2ecf20Sopenharmony_ci unsigned long flags; 11518c2ecf20Sopenharmony_ci int ret; 11528c2ecf20Sopenharmony_ci 11538c2ecf20Sopenharmony_ci raw_spin_lock_irqsave(&worker->lock, flags); 11548c2ecf20Sopenharmony_ci 11558c2ecf20Sopenharmony_ci /* Do not bother with canceling when never queued. */ 11568c2ecf20Sopenharmony_ci if (!work->worker) { 11578c2ecf20Sopenharmony_ci ret = false; 11588c2ecf20Sopenharmony_ci goto fast_queue; 11598c2ecf20Sopenharmony_ci } 11608c2ecf20Sopenharmony_ci 11618c2ecf20Sopenharmony_ci /* Work must not be used with >1 worker, see kthread_queue_work() */ 11628c2ecf20Sopenharmony_ci WARN_ON_ONCE(work->worker != worker); 11638c2ecf20Sopenharmony_ci 11648c2ecf20Sopenharmony_ci /* 11658c2ecf20Sopenharmony_ci * Temporary cancel the work but do not fight with another command 11668c2ecf20Sopenharmony_ci * that is canceling the work as well. 11678c2ecf20Sopenharmony_ci * 11688c2ecf20Sopenharmony_ci * It is a bit tricky because of possible races with another 11698c2ecf20Sopenharmony_ci * mod_delayed_work() and cancel_delayed_work() callers. 11708c2ecf20Sopenharmony_ci * 11718c2ecf20Sopenharmony_ci * The timer must be canceled first because worker->lock is released 11728c2ecf20Sopenharmony_ci * when doing so. But the work can be removed from the queue (list) 11738c2ecf20Sopenharmony_ci * only when it can be queued again so that the return value can 11748c2ecf20Sopenharmony_ci * be used for reference counting. 11758c2ecf20Sopenharmony_ci */ 11768c2ecf20Sopenharmony_ci kthread_cancel_delayed_work_timer(work, &flags); 11778c2ecf20Sopenharmony_ci if (work->canceling) { 11788c2ecf20Sopenharmony_ci /* The number of works in the queue does not change. */ 11798c2ecf20Sopenharmony_ci ret = true; 11808c2ecf20Sopenharmony_ci goto out; 11818c2ecf20Sopenharmony_ci } 11828c2ecf20Sopenharmony_ci ret = __kthread_cancel_work(work); 11838c2ecf20Sopenharmony_ci 11848c2ecf20Sopenharmony_cifast_queue: 11858c2ecf20Sopenharmony_ci __kthread_queue_delayed_work(worker, dwork, delay); 11868c2ecf20Sopenharmony_ciout: 11878c2ecf20Sopenharmony_ci raw_spin_unlock_irqrestore(&worker->lock, flags); 11888c2ecf20Sopenharmony_ci return ret; 11898c2ecf20Sopenharmony_ci} 11908c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(kthread_mod_delayed_work); 11918c2ecf20Sopenharmony_ci 11928c2ecf20Sopenharmony_cistatic bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork) 11938c2ecf20Sopenharmony_ci{ 11948c2ecf20Sopenharmony_ci struct kthread_worker *worker = work->worker; 11958c2ecf20Sopenharmony_ci unsigned long flags; 11968c2ecf20Sopenharmony_ci int ret = false; 11978c2ecf20Sopenharmony_ci 11988c2ecf20Sopenharmony_ci if (!worker) 11998c2ecf20Sopenharmony_ci goto out; 12008c2ecf20Sopenharmony_ci 12018c2ecf20Sopenharmony_ci raw_spin_lock_irqsave(&worker->lock, flags); 12028c2ecf20Sopenharmony_ci /* Work must not be used with >1 worker, see kthread_queue_work(). */ 12038c2ecf20Sopenharmony_ci WARN_ON_ONCE(work->worker != worker); 12048c2ecf20Sopenharmony_ci 12058c2ecf20Sopenharmony_ci if (is_dwork) 12068c2ecf20Sopenharmony_ci kthread_cancel_delayed_work_timer(work, &flags); 12078c2ecf20Sopenharmony_ci 12088c2ecf20Sopenharmony_ci ret = __kthread_cancel_work(work); 12098c2ecf20Sopenharmony_ci 12108c2ecf20Sopenharmony_ci if (worker->current_work != work) 12118c2ecf20Sopenharmony_ci goto out_fast; 12128c2ecf20Sopenharmony_ci 12138c2ecf20Sopenharmony_ci /* 12148c2ecf20Sopenharmony_ci * The work is in progress and we need to wait with the lock released. 12158c2ecf20Sopenharmony_ci * In the meantime, block any queuing by setting the canceling counter. 12168c2ecf20Sopenharmony_ci */ 12178c2ecf20Sopenharmony_ci work->canceling++; 12188c2ecf20Sopenharmony_ci raw_spin_unlock_irqrestore(&worker->lock, flags); 12198c2ecf20Sopenharmony_ci kthread_flush_work(work); 12208c2ecf20Sopenharmony_ci raw_spin_lock_irqsave(&worker->lock, flags); 12218c2ecf20Sopenharmony_ci work->canceling--; 12228c2ecf20Sopenharmony_ci 12238c2ecf20Sopenharmony_ciout_fast: 12248c2ecf20Sopenharmony_ci raw_spin_unlock_irqrestore(&worker->lock, flags); 12258c2ecf20Sopenharmony_ciout: 12268c2ecf20Sopenharmony_ci return ret; 12278c2ecf20Sopenharmony_ci} 12288c2ecf20Sopenharmony_ci 12298c2ecf20Sopenharmony_ci/** 12308c2ecf20Sopenharmony_ci * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish 12318c2ecf20Sopenharmony_ci * @work: the kthread work to cancel 12328c2ecf20Sopenharmony_ci * 12338c2ecf20Sopenharmony_ci * Cancel @work and wait for its execution to finish. This function 12348c2ecf20Sopenharmony_ci * can be used even if the work re-queues itself. On return from this 12358c2ecf20Sopenharmony_ci * function, @work is guaranteed to be not pending or executing on any CPU. 12368c2ecf20Sopenharmony_ci * 12378c2ecf20Sopenharmony_ci * kthread_cancel_work_sync(&delayed_work->work) must not be used for 12388c2ecf20Sopenharmony_ci * delayed_work's. Use kthread_cancel_delayed_work_sync() instead. 12398c2ecf20Sopenharmony_ci * 12408c2ecf20Sopenharmony_ci * The caller must ensure that the worker on which @work was last 12418c2ecf20Sopenharmony_ci * queued can't be destroyed before this function returns. 12428c2ecf20Sopenharmony_ci * 12438c2ecf20Sopenharmony_ci * Return: %true if @work was pending, %false otherwise. 12448c2ecf20Sopenharmony_ci */ 12458c2ecf20Sopenharmony_cibool kthread_cancel_work_sync(struct kthread_work *work) 12468c2ecf20Sopenharmony_ci{ 12478c2ecf20Sopenharmony_ci return __kthread_cancel_work_sync(work, false); 12488c2ecf20Sopenharmony_ci} 12498c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(kthread_cancel_work_sync); 12508c2ecf20Sopenharmony_ci 12518c2ecf20Sopenharmony_ci/** 12528c2ecf20Sopenharmony_ci * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and 12538c2ecf20Sopenharmony_ci * wait for it to finish. 12548c2ecf20Sopenharmony_ci * @dwork: the kthread delayed work to cancel 12558c2ecf20Sopenharmony_ci * 12568c2ecf20Sopenharmony_ci * This is kthread_cancel_work_sync() for delayed works. 12578c2ecf20Sopenharmony_ci * 12588c2ecf20Sopenharmony_ci * Return: %true if @dwork was pending, %false otherwise. 12598c2ecf20Sopenharmony_ci */ 12608c2ecf20Sopenharmony_cibool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork) 12618c2ecf20Sopenharmony_ci{ 12628c2ecf20Sopenharmony_ci return __kthread_cancel_work_sync(&dwork->work, true); 12638c2ecf20Sopenharmony_ci} 12648c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync); 12658c2ecf20Sopenharmony_ci 12668c2ecf20Sopenharmony_ci/** 12678c2ecf20Sopenharmony_ci * kthread_flush_worker - flush all current works on a kthread_worker 12688c2ecf20Sopenharmony_ci * @worker: worker to flush 12698c2ecf20Sopenharmony_ci * 12708c2ecf20Sopenharmony_ci * Wait until all currently executing or pending works on @worker are 12718c2ecf20Sopenharmony_ci * finished. 12728c2ecf20Sopenharmony_ci */ 12738c2ecf20Sopenharmony_civoid kthread_flush_worker(struct kthread_worker *worker) 12748c2ecf20Sopenharmony_ci{ 12758c2ecf20Sopenharmony_ci struct kthread_flush_work fwork = { 12768c2ecf20Sopenharmony_ci KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), 12778c2ecf20Sopenharmony_ci COMPLETION_INITIALIZER_ONSTACK(fwork.done), 12788c2ecf20Sopenharmony_ci }; 12798c2ecf20Sopenharmony_ci 12808c2ecf20Sopenharmony_ci kthread_queue_work(worker, &fwork.work); 12818c2ecf20Sopenharmony_ci wait_for_completion(&fwork.done); 12828c2ecf20Sopenharmony_ci} 12838c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(kthread_flush_worker); 12848c2ecf20Sopenharmony_ci 12858c2ecf20Sopenharmony_ci/** 12868c2ecf20Sopenharmony_ci * kthread_destroy_worker - destroy a kthread worker 12878c2ecf20Sopenharmony_ci * @worker: worker to be destroyed 12888c2ecf20Sopenharmony_ci * 12898c2ecf20Sopenharmony_ci * Flush and destroy @worker. The simple flush is enough because the kthread 12908c2ecf20Sopenharmony_ci * worker API is used only in trivial scenarios. There are no multi-step state 12918c2ecf20Sopenharmony_ci * machines needed. 12928c2ecf20Sopenharmony_ci */ 12938c2ecf20Sopenharmony_civoid kthread_destroy_worker(struct kthread_worker *worker) 12948c2ecf20Sopenharmony_ci{ 12958c2ecf20Sopenharmony_ci struct task_struct *task; 12968c2ecf20Sopenharmony_ci 12978c2ecf20Sopenharmony_ci task = worker->task; 12988c2ecf20Sopenharmony_ci if (WARN_ON(!task)) 12998c2ecf20Sopenharmony_ci return; 13008c2ecf20Sopenharmony_ci 13018c2ecf20Sopenharmony_ci kthread_flush_worker(worker); 13028c2ecf20Sopenharmony_ci kthread_stop(task); 13038c2ecf20Sopenharmony_ci WARN_ON(!list_empty(&worker->work_list)); 13048c2ecf20Sopenharmony_ci kfree(worker); 13058c2ecf20Sopenharmony_ci} 13068c2ecf20Sopenharmony_ciEXPORT_SYMBOL(kthread_destroy_worker); 13078c2ecf20Sopenharmony_ci 13088c2ecf20Sopenharmony_ci/** 13098c2ecf20Sopenharmony_ci * kthread_use_mm - make the calling kthread operate on an address space 13108c2ecf20Sopenharmony_ci * @mm: address space to operate on 13118c2ecf20Sopenharmony_ci */ 13128c2ecf20Sopenharmony_civoid kthread_use_mm(struct mm_struct *mm) 13138c2ecf20Sopenharmony_ci{ 13148c2ecf20Sopenharmony_ci struct mm_struct *active_mm; 13158c2ecf20Sopenharmony_ci struct task_struct *tsk = current; 13168c2ecf20Sopenharmony_ci 13178c2ecf20Sopenharmony_ci WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD)); 13188c2ecf20Sopenharmony_ci WARN_ON_ONCE(tsk->mm); 13198c2ecf20Sopenharmony_ci 13208c2ecf20Sopenharmony_ci task_lock(tsk); 13218c2ecf20Sopenharmony_ci /* Hold off tlb flush IPIs while switching mm's */ 13228c2ecf20Sopenharmony_ci local_irq_disable(); 13238c2ecf20Sopenharmony_ci active_mm = tsk->active_mm; 13248c2ecf20Sopenharmony_ci if (active_mm != mm) { 13258c2ecf20Sopenharmony_ci mmgrab(mm); 13268c2ecf20Sopenharmony_ci tsk->active_mm = mm; 13278c2ecf20Sopenharmony_ci } 13288c2ecf20Sopenharmony_ci tsk->mm = mm; 13298c2ecf20Sopenharmony_ci switch_mm_irqs_off(active_mm, mm, tsk); 13308c2ecf20Sopenharmony_ci local_irq_enable(); 13318c2ecf20Sopenharmony_ci task_unlock(tsk); 13328c2ecf20Sopenharmony_ci#ifdef finish_arch_post_lock_switch 13338c2ecf20Sopenharmony_ci finish_arch_post_lock_switch(); 13348c2ecf20Sopenharmony_ci#endif 13358c2ecf20Sopenharmony_ci 13368c2ecf20Sopenharmony_ci if (active_mm != mm) 13378c2ecf20Sopenharmony_ci mmdrop(active_mm); 13388c2ecf20Sopenharmony_ci 13398c2ecf20Sopenharmony_ci to_kthread(tsk)->oldfs = force_uaccess_begin(); 13408c2ecf20Sopenharmony_ci} 13418c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(kthread_use_mm); 13428c2ecf20Sopenharmony_ci 13438c2ecf20Sopenharmony_ci/** 13448c2ecf20Sopenharmony_ci * kthread_unuse_mm - reverse the effect of kthread_use_mm() 13458c2ecf20Sopenharmony_ci * @mm: address space to operate on 13468c2ecf20Sopenharmony_ci */ 13478c2ecf20Sopenharmony_civoid kthread_unuse_mm(struct mm_struct *mm) 13488c2ecf20Sopenharmony_ci{ 13498c2ecf20Sopenharmony_ci struct task_struct *tsk = current; 13508c2ecf20Sopenharmony_ci 13518c2ecf20Sopenharmony_ci WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD)); 13528c2ecf20Sopenharmony_ci WARN_ON_ONCE(!tsk->mm); 13538c2ecf20Sopenharmony_ci 13548c2ecf20Sopenharmony_ci force_uaccess_end(to_kthread(tsk)->oldfs); 13558c2ecf20Sopenharmony_ci 13568c2ecf20Sopenharmony_ci task_lock(tsk); 13578c2ecf20Sopenharmony_ci sync_mm_rss(mm); 13588c2ecf20Sopenharmony_ci local_irq_disable(); 13598c2ecf20Sopenharmony_ci tsk->mm = NULL; 13608c2ecf20Sopenharmony_ci /* active_mm is still 'mm' */ 13618c2ecf20Sopenharmony_ci enter_lazy_tlb(mm, tsk); 13628c2ecf20Sopenharmony_ci local_irq_enable(); 13638c2ecf20Sopenharmony_ci task_unlock(tsk); 13648c2ecf20Sopenharmony_ci} 13658c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(kthread_unuse_mm); 13668c2ecf20Sopenharmony_ci 13678c2ecf20Sopenharmony_ci#ifdef CONFIG_BLK_CGROUP 13688c2ecf20Sopenharmony_ci/** 13698c2ecf20Sopenharmony_ci * kthread_associate_blkcg - associate blkcg to current kthread 13708c2ecf20Sopenharmony_ci * @css: the cgroup info 13718c2ecf20Sopenharmony_ci * 13728c2ecf20Sopenharmony_ci * Current thread must be a kthread. The thread is running jobs on behalf of 13738c2ecf20Sopenharmony_ci * other threads. In some cases, we expect the jobs attach cgroup info of 13748c2ecf20Sopenharmony_ci * original threads instead of that of current thread. This function stores 13758c2ecf20Sopenharmony_ci * original thread's cgroup info in current kthread context for later 13768c2ecf20Sopenharmony_ci * retrieval. 13778c2ecf20Sopenharmony_ci */ 13788c2ecf20Sopenharmony_civoid kthread_associate_blkcg(struct cgroup_subsys_state *css) 13798c2ecf20Sopenharmony_ci{ 13808c2ecf20Sopenharmony_ci struct kthread *kthread; 13818c2ecf20Sopenharmony_ci 13828c2ecf20Sopenharmony_ci if (!(current->flags & PF_KTHREAD)) 13838c2ecf20Sopenharmony_ci return; 13848c2ecf20Sopenharmony_ci kthread = to_kthread(current); 13858c2ecf20Sopenharmony_ci if (!kthread) 13868c2ecf20Sopenharmony_ci return; 13878c2ecf20Sopenharmony_ci 13888c2ecf20Sopenharmony_ci if (kthread->blkcg_css) { 13898c2ecf20Sopenharmony_ci css_put(kthread->blkcg_css); 13908c2ecf20Sopenharmony_ci kthread->blkcg_css = NULL; 13918c2ecf20Sopenharmony_ci } 13928c2ecf20Sopenharmony_ci if (css) { 13938c2ecf20Sopenharmony_ci css_get(css); 13948c2ecf20Sopenharmony_ci kthread->blkcg_css = css; 13958c2ecf20Sopenharmony_ci } 13968c2ecf20Sopenharmony_ci} 13978c2ecf20Sopenharmony_ciEXPORT_SYMBOL(kthread_associate_blkcg); 13988c2ecf20Sopenharmony_ci 13998c2ecf20Sopenharmony_ci/** 14008c2ecf20Sopenharmony_ci * kthread_blkcg - get associated blkcg css of current kthread 14018c2ecf20Sopenharmony_ci * 14028c2ecf20Sopenharmony_ci * Current thread must be a kthread. 14038c2ecf20Sopenharmony_ci */ 14048c2ecf20Sopenharmony_cistruct cgroup_subsys_state *kthread_blkcg(void) 14058c2ecf20Sopenharmony_ci{ 14068c2ecf20Sopenharmony_ci struct kthread *kthread; 14078c2ecf20Sopenharmony_ci 14088c2ecf20Sopenharmony_ci if (current->flags & PF_KTHREAD) { 14098c2ecf20Sopenharmony_ci kthread = to_kthread(current); 14108c2ecf20Sopenharmony_ci if (kthread) 14118c2ecf20Sopenharmony_ci return kthread->blkcg_css; 14128c2ecf20Sopenharmony_ci } 14138c2ecf20Sopenharmony_ci return NULL; 14148c2ecf20Sopenharmony_ci} 14158c2ecf20Sopenharmony_ciEXPORT_SYMBOL(kthread_blkcg); 14168c2ecf20Sopenharmony_ci#endif 1417