Lines Matching refs:worker

3  * kernel/workqueue.c - generic async execution with shared worker pool
19 * executed in process context. The worker pool is shared and
20 * automatically managed. There are two worker pools for each CPU (one for
77 /* worker flags */
82 WORKER_UNBOUND = 1 << 7, /* worker is unbound */
83 WORKER_REBOUND = 1 << 8, /* worker was rebound */
146 /* struct worker is defined in workqueue_internal.h */
163 struct timer_list idle_timer; /* L: worker idle timeout */
170 struct worker *manager; /* L: purely informational */
174 struct ida worker_ida; /* worker IDs for task name */
176 struct workqueue_attrs *attrs; /* I: worker attributes */
252 struct worker *rescuer; /* MD: rescue worker */
303 static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */
329 /* the per-cpu worker pools */
400 * @worker: iteration cursor
408 #define for_each_pool_worker(worker, pool) \
409 list_for_each_entry((worker), &(pool)->workers, node) \
730 * get_work_pool_id - return the worker pool ID a given work is associated with
762 * Policy functions. These define the policies on how the global worker
773 * Need to wake up a worker? Called from anything but currently
798 /* Do we need a new worker? Called from manager. */
818 /* Return the first idle worker. Safe with preemption disabled */
819 static struct worker *first_idle_worker(struct worker_pool *pool)
824 return list_first_entry(&pool->idle_list, struct worker, entry);
828 * wake_up_worker - wake up an idle worker
829 * @pool: worker pool to wake worker from
831 * Wake up the first idle worker of @pool.
838 struct worker *worker = first_idle_worker(pool);
840 if (likely(worker))
841 wake_up_process(worker->task);
845 * wq_worker_running - a worker is running again
848 * This function is called when a worker returns from schedule()
852 struct worker *worker = kthread_data(task);
854 if (!worker->sleeping)
864 if (!(worker->flags & WORKER_NOT_RUNNING))
865 atomic_inc(&worker->pool->nr_running);
867 worker->sleeping = 0;
871 * wq_worker_sleeping - a worker is going to sleep
874 * This function is called from schedule() when a busy worker is
880 struct worker *next, *worker = kthread_data(task);
888 if (worker->flags & WORKER_NOT_RUNNING)
891 pool = worker->pool;
894 if (worker->sleeping)
897 worker->sleeping = 1;
921 * wq_worker_last_func - retrieve worker's last work function
924 * Determine the last function a worker executed. This is called from
925 * the scheduler to get a worker's last known identity.
933 * worker is the last task in the system or cgroup to go to sleep.
941 * The last work function %current executed as a worker, NULL if it
946 struct worker *worker = kthread_data(task);
948 return worker->last_func;
952 * worker_set_flags - set worker flags and adjust nr_running accordingly
953 * @worker: self
956 * Set @flags in @worker->flags and adjust nr_running accordingly.
961 static inline void worker_set_flags(struct worker *worker, unsigned int flags)
963 struct worker_pool *pool = worker->pool;
965 WARN_ON_ONCE(worker->task != current);
969 !(worker->flags & WORKER_NOT_RUNNING)) {
973 worker->flags |= flags;
977 * worker_clr_flags - clear worker flags and adjust nr_running accordingly
978 * @worker: self
981 * Clear @flags in @worker->flags and adjust nr_running accordingly.
986 static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
988 struct worker_pool *pool = worker->pool;
989 unsigned int oflags = worker->flags;
991 WARN_ON_ONCE(worker->task != current);
993 worker->flags &= ~flags;
1001 if (!(worker->flags & WORKER_NOT_RUNNING))
1006 * find_worker_executing_work - find worker which is executing a work
1008 * @work: work to find worker for
1010 * Find a worker which is executing @work on @pool by searching
1011 * @pool->busy_hash which is keyed by the address of @work. For a worker
1035 * Pointer to worker which is executing @work if found, %NULL
1038 static struct worker *find_worker_executing_work(struct worker_pool *pool,
1041 struct worker *worker;
1043 hash_for_each_possible(pool->busy_hash, worker, hentry,
1045 if (worker->current_work == work &&
1046 worker->current_func == work->func)
1047 return worker;
1365 struct worker *worker;
1367 worker = current_wq_worker();
1369 * Return %true iff I'm a worker executing a work item on @wq. If
1370 * I'm @worker, it's safe to dereference it without locking.
1372 return worker && worker->current_pwq->wq == wq;
1450 struct worker *worker;
1454 worker = find_worker_executing_work(last_pool, work);
1456 if (worker && worker->current_pwq->wq == wq) {
1457 pwq = worker->current_pwq;
1773 * @worker: worker which is entering idle state
1775 * @worker is entering idle state. Update stats and idle timer if
1781 static void worker_enter_idle(struct worker *worker)
1783 struct worker_pool *pool = worker->pool;
1785 if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) ||
1786 WARN_ON_ONCE(!list_empty(&worker->entry) &&
1787 (worker->hentry.next || worker->hentry.pprev)))
1791 worker->flags |= WORKER_IDLE;
1793 worker->last_active = jiffies;
1796 list_add(&worker->entry, &pool->idle_list);
1814 * @worker: worker which is leaving idle state
1816 * @worker is leaving idle state. Update stats.
1821 static void worker_leave_idle(struct worker *worker)
1823 struct worker_pool *pool = worker->pool;
1825 if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE)))
1827 worker_clr_flags(worker, WORKER_IDLE);
1829 list_del_init(&worker->entry);
1832 static struct worker *alloc_worker(int node)
1834 struct worker *worker;
1836 worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node);
1837 if (worker) {
1838 INIT_LIST_HEAD(&worker->entry);
1839 INIT_LIST_HEAD(&worker->scheduled);
1840 INIT_LIST_HEAD(&worker->node);
1841 /* on creation a worker is in !idle && prep state */
1842 worker->flags = WORKER_PREP;
1844 return worker;
1848 * worker_attach_to_pool() - attach a worker to a pool
1849 * @worker: worker to be attached
1852 * Attach @worker to @pool. Once attached, the %WORKER_UNBOUND flag and
1853 * cpu-binding of @worker are kept coordinated with the pool across
1856 static void worker_attach_to_pool(struct worker *worker,
1867 worker->flags |= WORKER_UNBOUND;
1869 if (worker->rescue_wq)
1870 set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
1872 list_add_tail(&worker->node, &pool->workers);
1873 worker->pool = pool;
1879 * worker_detach_from_pool() - detach a worker from its pool
1880 * @worker: worker which is attached to its pool
1883 * caller worker shouldn't access to the pool after detached except it has
1886 static void worker_detach_from_pool(struct worker *worker)
1888 struct worker_pool *pool = worker->pool;
1893 list_del(&worker->node);
1894 worker->pool = NULL;
1901 worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND);
1908 * create_worker - create a new workqueue worker
1909 * @pool: pool the new worker will belong to
1911 * Create and start a new worker which is attached to @pool.
1917 * Pointer to the newly created worker.
1919 static struct worker *create_worker(struct worker_pool *pool)
1921 struct worker *worker = NULL;
1930 worker = alloc_worker(pool->node);
1931 if (!worker)
1934 worker->id = id;
1942 worker->task = kthread_create_on_node(worker_thread, worker, pool->node,
1944 if (IS_ERR(worker->task))
1947 set_user_nice(worker->task, pool->attrs->nice);
1948 kthread_bind_mask(worker->task, pool->attrs->cpumask);
1950 /* successful, attach the worker to the pool */
1951 worker_attach_to_pool(worker, pool);
1953 /* start the newly created worker */
1955 worker->pool->nr_workers++;
1956 worker_enter_idle(worker);
1957 wake_up_process(worker->task);
1960 return worker;
1965 kfree(worker);
1970 * destroy_worker - destroy a workqueue worker
1971 * @worker: worker to be destroyed
1973 * Destroy @worker and adjust @pool stats accordingly. The worker should
1979 static void destroy_worker(struct worker *worker)
1981 struct worker_pool *pool = worker->pool;
1986 if (WARN_ON(worker->current_work) ||
1987 WARN_ON(!list_empty(&worker->scheduled)) ||
1988 WARN_ON(!(worker->flags & WORKER_IDLE)))
1994 list_del_init(&worker->entry);
1995 worker->flags |= WORKER_DIE;
1996 wake_up_process(worker->task);
2006 struct worker *worker;
2010 worker = list_entry(pool->idle_list.prev, struct worker, entry);
2011 expires = worker->last_active + IDLE_WORKER_TIMEOUT;
2018 destroy_worker(worker);
2057 * We've been trying to create a new worker but
2073 * maybe_create_worker - create a new worker if necessary
2074 * @pool: pool to create a new worker for
2076 * Create a new worker for @pool if necessary. @pool is guaranteed to
2077 * have at least one idle worker on return from this function. If
2078 * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
2113 * This is necessary even after a new worker was just successfully
2114 * created as @pool->lock was dropped and the new worker might have
2122 * manage_workers - manage worker pool
2123 * @worker: self
2125 * Assume the manager role and manage the worker pool @worker belongs
2143 static bool manage_workers(struct worker *worker)
2145 struct worker_pool *pool = worker->pool;
2151 pool->manager = worker;
2163 * @worker: self
2169 * flushing. As long as context requirement is met, any worker can
2175 static void process_one_work(struct worker *worker, struct work_struct *work)
2180 struct worker_pool *pool = worker->pool;
2183 struct worker *collision;
2214 hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
2215 worker->current_work = work;
2216 worker->current_func = work->func;
2217 worker->current_pwq = pwq;
2224 strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN);
2230 * They're the scheduler's responsibility. This takes @worker out
2235 worker_set_flags(worker, WORKER_CPU_INTENSIVE);
2238 * Wake up another worker if necessary. The condition is always
2282 worker->current_func(work);
2287 trace_workqueue_execute_end(work, worker->current_func);
2295 worker->current_func);
2314 worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
2316 /* tag the worker for identification in schedule() */
2317 worker->last_func = worker->current_func;
2320 hash_del(&worker->hentry);
2321 worker->current_work = NULL;
2322 worker->current_func = NULL;
2323 worker->current_pwq = NULL;
2329 * @worker: self
2339 static void process_scheduled_works(struct worker *worker)
2341 while (!list_empty(&worker->scheduled)) {
2342 struct work_struct *work = list_first_entry(&worker->scheduled,
2344 process_one_work(worker, work);
2359 * worker_thread - the worker thread function
2362 * The worker thread function. All workers belong to a worker_pool -
2372 struct worker *worker = __worker;
2373 struct worker_pool *pool = worker->pool;
2375 /* tell the scheduler that this is a workqueue worker */
2381 if (unlikely(worker->flags & WORKER_DIE)) {
2383 WARN_ON_ONCE(!list_empty(&worker->entry));
2386 set_task_comm(worker->task, "kworker/dying");
2387 ida_simple_remove(&pool->worker_ida, worker->id);
2388 worker_detach_from_pool(worker);
2389 kfree(worker);
2393 worker_leave_idle(worker);
2395 /* no more worker necessary? */
2400 if (unlikely(!may_start_working(pool)) && manage_workers(worker))
2404 * ->scheduled list can only be filled while a worker is
2408 WARN_ON_ONCE(!list_empty(&worker->scheduled));
2412 * worker or that someone else has already assumed the manager
2413 * role. This is where @worker starts participating in concurrency
2417 worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND);
2428 process_one_work(worker, work);
2429 if (unlikely(!list_empty(&worker->scheduled)))
2430 process_scheduled_works(worker);
2432 move_linked_works(work, &worker->scheduled, NULL);
2433 process_scheduled_works(worker);
2437 worker_set_flags(worker, WORKER_PREP);
2446 worker_enter_idle(worker);
2461 * worker which uses GFP_KERNEL allocation which has slight chance of
2476 struct worker *rescuer = __rescuer;
2484 * Mark rescuer as worker too. As WORKER_PREP is never cleared, it
2568 * regular worker; otherwise, we end up with 0 concurrency
2610 struct worker *worker;
2615 worker = current_wq_worker();
2620 WARN_ONCE(worker && ((worker->current_pwq->wq->flags &
2623 worker->current_pwq->wq->name, worker->current_func,
2644 * @worker: worker currently executing @target, NULL if @target is not executing
2657 * Note that when @worker is non-NULL, @target may be modified
2665 struct work_struct *target, struct worker *worker)
2685 * barrier to the worker; otherwise, put it after @target.
2687 if (worker)
2688 head = worker->scheduled.next;
2990 struct worker *worker = NULL;
3010 worker = find_worker_executing_work(pool, work);
3011 if (!worker)
3013 pwq = worker->current_pwq;
3018 insert_wq_barrier(pwq, barr, work, worker);
3553 struct worker *worker;
3581 while ((worker = first_idle_worker(pool)))
3582 destroy_worker(worker);
3662 /* create and start the initial worker */
3765 * Need to kick a worker after thawed or an unbound wq's
3767 * worker will cause interference on the isolated cpu cores, so
4245 struct worker *rescuer;
4406 struct worker *rescuer = wq->rescuer;
4512 * Determine if %current task is a workqueue worker and what it's working on.
4515 * Return: work struct if %current task is a workqueue worker, %NULL otherwise.
4519 struct worker *worker = current_wq_worker();
4521 return worker ? worker->current_work : NULL;
4535 struct worker *worker = current_wq_worker();
4537 return worker && worker->rescue_wq;
4622 * the work item is about. If the worker task gets dumped, this
4628 struct worker *worker = current_wq_worker();
4631 if (worker) {
4633 vsnprintf(worker->desc, sizeof(worker->desc), fmt, args);
4640 * print_worker_info - print out worker information and description
4644 * If @task is a worker and currently executing a work item, print out the
4645 * name of the workqueue being serviced and worker description set with
4659 struct worker *worker;
4668 worker = kthread_probe_data(task);
4674 copy_from_kernel_nofault(&fn, &worker->current_func, sizeof(fn));
4675 copy_from_kernel_nofault(&pwq, &worker->current_pwq, sizeof(pwq));
4678 copy_from_kernel_nofault(desc, worker->desc, sizeof(desc) - 1);
4714 struct worker *worker;
4725 hash_for_each(pool->busy_hash, bkt, worker, hentry) {
4726 if (worker->current_pwq == pwq) {
4735 hash_for_each(pool->busy_hash, bkt, worker, hentry) {
4736 if (worker->current_pwq != pwq)
4740 task_pid_nr(worker->task),
4741 worker->rescue_wq ? "(RESCUER)" : "",
4742 worker->current_func);
4743 list_for_each_entry(work, &worker->scheduled, entry)
4797 pr_info("Showing busy workqueues and worker pools:\n");
4829 struct worker *worker;
4837 /* How long the first pending work is waiting for a worker. */
4847 list_for_each_entry(worker, &pool->idle_list, entry) {
4849 task_pid_nr(worker->task));
4866 /* used to show worker information through /proc/PID/{comm,stat,status} */
4876 /* stabilize PF_WQ_WORKER and worker pool association */
4880 struct worker *worker = kthread_data(task);
4881 struct worker_pool *pool = worker->pool;
4890 if (worker->desc[0] != '\0') {
4891 if (worker->current_work)
4893 worker->desc);
4896 worker->desc);
4914 * worker pools serve mix of short, long and very long running works making
4925 struct worker *worker;
4938 for_each_pool_worker(worker, pool)
4939 worker->flags |= WORKER_UNBOUND;
4966 * worker blocking could lead to lengthy stalls. Kick off
4983 struct worker *worker;
4994 for_each_pool_worker(worker, pool)
4995 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
5002 for_each_pool_worker(worker, pool) {
5003 unsigned int worker_flags = worker->flags;
5006 * A bound idle worker should actually be on the runqueue
5010 * replacing UNBOUND with REBOUND is safe as no worker will
5014 wake_up_process(worker->task);
5020 * @worker will clear REBOUND using worker_clr_flags() when
5023 * @worker clears REBOUND doesn't affect correctness.
5025 * WRITE_ONCE() is necessary because @worker->flags may be
5034 WRITE_ONCE(worker->flags, worker_flags);
5046 * CPUs. When a worker of such pool get scheduled, the scheduler resets
5053 struct worker *worker;
5064 for_each_pool_worker(worker, pool)
5065 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0);
5772 * Workqueue watchdog monitors all worker pools periodically and dumps
6045 * are no kworkers executing the work items yet. Populate the worker pools