Lines Matching defs:pool
3 * kernel/workqueue.c - generic async execution with shared worker pool
19 * executed in process context. The worker pool is shared and
61 * A bound pool is either associated or disassociated with its CPU.
68 * be executing on any CPU. The pool behaves as an unbound one.
90 UNBOUND_POOL_HASH_ORDER = 6, /* hashed by pool->attrs */
121 * L: pool->lock protected. Access with pool->lock held.
123 * X: During normal operation, modification requires pool->lock and should
125 * cpu or grabbing pool->lock is enough for read access. If
149 raw_spinlock_t lock; /* the pool lock */
152 int id; /* I: pool ID */
188 * Destruction of pool is RCU protected to allow dereferences
195 * The per-pool workqueue. While queued, the lower WORK_STRUCT_FLAG_BITS
201 struct worker_pool *pool; /* I: the associated pool */
334 /* PL: hash of all unbound pools keyed by pool->attrs */
376 #define for_each_cpu_worker_pool(pool, cpu) \
377 for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
378 (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
379 (pool)++)
383 * @pool: iteration cursor
387 * locked. If the pool needs to be used beyond the locking in effect, the
388 * caller is responsible for guaranteeing that the pool stays online.
393 #define for_each_pool(pool, pi) \
394 idr_for_each_entry(&worker_pool_idr, pool, pi) \
401 * @pool: worker_pool to iterate workers of
408 #define for_each_pool_worker(worker, pool) \
409 list_for_each_entry((worker), &(pool)->workers, node) \
527 * worker_pool_assign_id - allocate ID and assing it to @pool
528 * @pool: the pool pointer of interest
533 static int worker_pool_assign_id(struct worker_pool *pool)
539 ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE,
542 pool->id = ret;
596 * is cleared and the high bits contain OFFQ flags and pool ID.
599 * and clear_work_data() can be used to set the pwq, pool or clear
603 * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq
705 * All fields of the returned pool are accessible as long as the above
706 * mentioned locking is in effect. If the returned pool needs to be used
708 * returned pool is and stays online.
720 return work_struct_pwq(data)->pool;
730 * get_work_pool_id - return the worker pool ID a given work is associated with
741 return work_struct_pwq(data)->pool->id;
764 * they're being called with pool->lock held.
767 static bool __need_more_worker(struct worker_pool *pool)
769 return !atomic_read(&pool->nr_running);
780 static bool need_more_worker(struct worker_pool *pool)
782 return !list_empty(&pool->worklist) && __need_more_worker(pool);
786 static bool may_start_working(struct worker_pool *pool)
788 return pool->nr_idle;
792 static bool keep_working(struct worker_pool *pool)
794 return !list_empty(&pool->worklist) &&
795 atomic_read(&pool->nr_running) <= 1;
799 static bool need_to_create_worker(struct worker_pool *pool)
801 return need_more_worker(pool) && !may_start_working(pool);
805 static bool too_many_workers(struct worker_pool *pool)
807 bool managing = pool->flags & POOL_MANAGER_ACTIVE;
808 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
809 int nr_busy = pool->nr_workers - nr_idle;
819 static struct worker *first_idle_worker(struct worker_pool *pool)
821 if (unlikely(list_empty(&pool->idle_list)))
824 return list_first_entry(&pool->idle_list, struct worker, entry);
829 * @pool: worker pool to wake worker from
831 * Wake up the first idle worker of @pool.
834 * raw_spin_lock_irq(pool->lock).
836 static void wake_up_worker(struct worker_pool *pool)
838 struct worker *worker = first_idle_worker(pool);
860 * and leave with an unexpected pool->nr_running == 1 on the newly unbound
861 * pool. Protect against such race.
865 atomic_inc(&worker->pool->nr_running);
881 struct worker_pool *pool;
891 pool = worker->pool;
898 raw_spin_lock_irq(&pool->lock);
908 * manipulating idle_list, so dereferencing idle_list without pool
911 if (atomic_dec_and_test(&pool->nr_running) &&
912 !list_empty(&pool->worklist)) {
913 next = first_idle_worker(pool);
917 raw_spin_unlock_irq(&pool->lock);
959 * raw_spin_lock_irq(pool->lock)
963 struct worker_pool *pool = worker->pool;
970 atomic_dec(&pool->nr_running);
984 * raw_spin_lock_irq(pool->lock)
988 struct worker_pool *pool = worker->pool;
1002 atomic_inc(&pool->nr_running);
1007 * @pool: pool of interest
1010 * Find a worker which is executing @work on @pool by searching
1011 * @pool->busy_hash which is keyed by the address of @work. For a worker
1032 * raw_spin_lock_irq(pool->lock).
1038 static struct worker *find_worker_executing_work(struct worker_pool *pool,
1043 hash_for_each_possible(pool->busy_hash, worker, hentry,
1067 * raw_spin_lock_irq(pool->lock).
1098 * @pwq has positive refcnt and be holding the matching pool->lock.
1102 lockdep_assert_held(&pwq->pool->lock);
1112 * destruction. The caller should be holding the matching pool->lock.
1116 lockdep_assert_held(&pwq->pool->lock);
1122 * @pwq can't be released under pool->lock, bounce to
1124 * pool->lock as this path is taken only for unbound workqueues and
1126 * avoid lockdep warning, unbound pool->locks are given lockdep
1133 * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock
1145 raw_spin_lock_irq(&pwq->pool->lock);
1147 raw_spin_unlock_irq(&pwq->pool->lock);
1156 if (list_empty(&pwq->pool->worklist))
1157 pwq->pool->watchdog_ts = jiffies;
1158 move_linked_works(work, &pwq->pool->worklist, NULL);
1180 * raw_spin_lock_irq(pool->lock).
1251 struct worker_pool *pool;
1278 pool = get_work_pool(work);
1279 if (!pool)
1282 raw_spin_lock(&pool->lock);
1286 * to pwq on queueing and to pool on dequeueing are done under
1287 * pwq->pool->lock. This in turn guarantees that, if work->data
1288 * points to pwq which is associated with a locked pool, the work
1289 * item is currently queued on that pool.
1292 if (pwq && pwq->pool == pool) {
1308 /* work->data points to pwq iff queued, point to pool */
1309 set_work_pool_and_keep_pending(work, pool->id);
1311 raw_spin_unlock(&pool->lock);
1315 raw_spin_unlock(&pool->lock);
1326 * insert_work - insert a work into a pool
1336 * raw_spin_lock_irq(pool->lock).
1341 struct worker_pool *pool = pwq->pool;
1355 if (__need_more_worker(pool))
1356 wake_up_worker(pool);
1444 * If @work was previously on a different pool, it might still be
1446 * pool to guarantee non-reentrancy.
1449 if (last_pool && last_pool != pwq->pool) {
1461 raw_spin_lock(&pwq->pool->lock);
1464 raw_spin_lock(&pwq->pool->lock);
1477 raw_spin_unlock(&pwq->pool->lock);
1498 worklist = &pwq->pool->worklist;
1500 pwq->pool->watchdog_ts = jiffies;
1510 raw_spin_unlock(&pwq->pool->lock);
1779 * raw_spin_lock_irq(pool->lock).
1783 struct worker_pool *pool = worker->pool;
1792 pool->nr_idle++;
1796 list_add(&worker->entry, &pool->idle_list);
1798 if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
1799 mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
1803 * pool->lock between setting %WORKER_UNBOUND and zapping
1807 WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
1808 pool->nr_workers == pool->nr_idle &&
1809 atomic_read(&pool->nr_running));
1819 * raw_spin_lock_irq(pool->lock).
1823 struct worker_pool *pool = worker->pool;
1828 pool->nr_idle--;
1848 * worker_attach_to_pool() - attach a worker to a pool
1850 * @pool: the target pool
1852 * Attach @worker to @pool. Once attached, the %WORKER_UNBOUND flag and
1853 * cpu-binding of @worker are kept coordinated with the pool across
1857 struct worker_pool *pool)
1866 if (pool->flags & POOL_DISASSOCIATED)
1870 set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
1872 list_add_tail(&worker->node, &pool->workers);
1873 worker->pool = pool;
1879 * worker_detach_from_pool() - detach a worker from its pool
1880 * @worker: worker which is attached to its pool
1883 * caller worker shouldn't access to the pool after detached except it has
1884 * other reference to the pool.
1888 struct worker_pool *pool = worker->pool;
1894 worker->pool = NULL;
1896 if (list_empty(&pool->workers))
1897 detach_completion = pool->detach_completion;
1900 /* clear leftover flags without pool->lock after it is detached */
1909 * @pool: pool the new worker will belong to
1911 * Create and start a new worker which is attached to @pool.
1919 static struct worker *create_worker(struct worker_pool *pool)
1926 id = ida_simple_get(&pool->worker_ida, 0, 0, GFP_KERNEL);
1930 worker = alloc_worker(pool->node);
1936 if (pool->cpu >= 0)
1937 snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id,
1938 pool->attrs->nice < 0 ? "H" : "");
1940 snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id);
1942 worker->task = kthread_create_on_node(worker_thread, worker, pool->node,
1947 set_user_nice(worker->task, pool->attrs->nice);
1948 kthread_bind_mask(worker->task, pool->attrs->cpumask);
1950 /* successful, attach the worker to the pool */
1951 worker_attach_to_pool(worker, pool);
1954 raw_spin_lock_irq(&pool->lock);
1955 worker->pool->nr_workers++;
1958 raw_spin_unlock_irq(&pool->lock);
1964 ida_simple_remove(&pool->worker_ida, id);
1973 * Destroy @worker and adjust @pool stats accordingly. The worker should
1977 * raw_spin_lock_irq(pool->lock).
1981 struct worker_pool *pool = worker->pool;
1983 lockdep_assert_held(&pool->lock);
1991 pool->nr_workers--;
1992 pool->nr_idle--;
2001 struct worker_pool *pool = from_timer(pool, t, idle_timer);
2003 raw_spin_lock_irq(&pool->lock);
2005 while (too_many_workers(pool)) {
2010 worker = list_entry(pool->idle_list.prev, struct worker, entry);
2014 mod_timer(&pool->idle_timer, expires);
2021 raw_spin_unlock_irq(&pool->lock);
2049 struct worker_pool *pool = from_timer(pool, t, mayday_timer);
2052 raw_spin_lock_irq(&pool->lock);
2055 if (need_to_create_worker(pool)) {
2062 list_for_each_entry(work, &pool->worklist, entry)
2067 raw_spin_unlock_irq(&pool->lock);
2069 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
2074 * @pool: pool to create a new worker for
2076 * Create a new worker for @pool if necessary. @pool is guaranteed to
2079 * sent to all rescuers with works scheduled on @pool to resolve
2086 * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
2090 static void maybe_create_worker(struct worker_pool *pool)
2091 __releases(&pool->lock)
2092 __acquires(&pool->lock)
2095 raw_spin_unlock_irq(&pool->lock);
2098 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
2101 if (create_worker(pool) || !need_to_create_worker(pool))
2106 if (!need_to_create_worker(pool))
2110 del_timer_sync(&pool->mayday_timer);
2111 raw_spin_lock_irq(&pool->lock);
2114 * created as @pool->lock was dropped and the new worker might have
2117 if (need_to_create_worker(pool))
2122 * manage_workers - manage worker pool
2125 * Assume the manager role and manage the worker pool @worker belongs
2127 * pool. The exclusion is handled automatically by this function.
2134 * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
2138 * %false if the pool doesn't need management and the caller can safely
2145 struct worker_pool *pool = worker->pool;
2147 if (pool->flags & POOL_MANAGER_ACTIVE)
2150 pool->flags |= POOL_MANAGER_ACTIVE;
2151 pool->manager = worker;
2153 maybe_create_worker(pool);
2155 pool->manager = NULL;
2156 pool->flags &= ~POOL_MANAGER_ACTIVE;
2173 * raw_spin_lock_irq(pool->lock) which is released and regrabbed.
2176 __releases(&pool->lock)
2177 __acquires(&pool->lock)
2180 struct worker_pool *pool = worker->pool;
2197 WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
2198 raw_smp_processor_id() != pool->cpu);
2206 collision = find_worker_executing_work(pool, work);
2214 hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
2244 if (need_more_worker(pool))
2245 wake_up_worker(pool);
2248 * Record the last pool and clear PENDING which should be the last
2249 * update to @work. Also, do this inside @pool->lock so that
2253 set_work_pool_and_clear_pending(work, pool->id);
2255 raw_spin_unlock_irq(&pool->lock);
2310 raw_spin_lock_irq(&pool->lock);
2336 * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
2373 struct worker_pool *pool = worker->pool;
2378 raw_spin_lock_irq(&pool->lock);
2382 raw_spin_unlock_irq(&pool->lock);
2387 ida_simple_remove(&pool->worker_ida, worker->id);
2396 if (!need_more_worker(pool))
2400 if (unlikely(!may_start_working(pool)) && manage_workers(worker))
2421 list_first_entry(&pool->worklist,
2424 pool->watchdog_ts = jiffies;
2435 } while (keep_working(pool));
2440 * pool->lock is held and there's no work to process and no need to
2442 * pool->lock or from local cpu, so setting the current state
2443 * before releasing pool->lock is enough to prevent losing any
2448 raw_spin_unlock_irq(&pool->lock);
2460 * Regular work processing on a pool may block trying to create a new
2466 * When such condition is possible, the pool summons rescuers of all
2467 * workqueues which have works queued on the pool and let them process
2507 struct worker_pool *pool = pwq->pool;
2516 worker_attach_to_pool(rescuer, pool);
2518 raw_spin_lock_irq(&pool->lock);
2525 list_for_each_entry_safe(work, n, &pool->worklist, entry) {
2528 pool->watchdog_ts = jiffies;
2546 if (pwq->nr_active && need_to_create_worker(pool)) {
2561 * Put the reference grabbed by send_mayday(). @pool won't
2567 * Leave this pool. If need_more_worker() is %true, notify a
2571 if (need_more_worker(pool))
2572 wake_up_worker(pool);
2574 raw_spin_unlock_irq(&pool->lock);
2661 * raw_spin_lock_irq(pool->lock).
2671 * debugobject calls are safe here even with pool->lock locked
2746 struct worker_pool *pool = pwq->pool;
2748 raw_spin_lock_irq(&pool->lock);
2765 raw_spin_unlock_irq(&pool->lock);
2965 raw_spin_lock_irq(&pwq->pool->lock);
2967 raw_spin_unlock_irq(&pwq->pool->lock);
2991 struct worker_pool *pool;
2997 pool = get_work_pool(work);
2998 if (!pool) {
3003 raw_spin_lock_irq(&pool->lock);
3007 if (unlikely(pwq->pool != pool))
3010 worker = find_worker_executing_work(pool, work);
3019 raw_spin_unlock_irq(&pool->lock);
3038 raw_spin_unlock_irq(&pool->lock);
3398 * ->no_numa as it is used for both pool and wq attrs. Instead,
3428 * @pool: worker_pool to initialize
3430 * Initialize a newly zalloc'd @pool. It also allocates @pool->attrs.
3433 * inside @pool proper are initialized and put_unbound_pool() can be called
3434 * on @pool safely to release it.
3436 static int init_worker_pool(struct worker_pool *pool)
3438 raw_spin_lock_init(&pool->lock);
3439 pool->id = -1;
3440 pool->cpu = -1;
3441 pool->node = NUMA_NO_NODE;
3442 pool->flags |= POOL_DISASSOCIATED;
3443 pool->watchdog_ts = jiffies;
3444 INIT_LIST_HEAD(&pool->worklist);
3445 INIT_LIST_HEAD(&pool->idle_list);
3446 hash_init(pool->busy_hash);
3448 timer_setup(&pool->idle_timer, idle_worker_timeout, TIMER_DEFERRABLE);
3450 timer_setup(&pool->mayday_timer, pool_mayday_timeout, 0);
3452 INIT_LIST_HEAD(&pool->workers);
3454 ida_init(&pool->worker_ida);
3455 INIT_HLIST_NODE(&pool->hash_node);
3456 pool->refcnt = 1;
3459 pool->attrs = alloc_workqueue_attrs();
3460 if (!pool->attrs)
3520 struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu);
3522 ida_destroy(&pool->worker_ida);
3523 free_workqueue_attrs(pool->attrs);
3524 kfree(pool);
3527 /* This returns with the lock held on success (pool manager is inactive). */
3528 static bool wq_manager_inactive(struct worker_pool *pool)
3530 raw_spin_lock_irq(&pool->lock);
3532 if (pool->flags & POOL_MANAGER_ACTIVE) {
3533 raw_spin_unlock_irq(&pool->lock);
3541 * @pool: worker_pool to put
3543 * Put @pool. If its refcnt reaches zero, it gets destroyed in RCU
3550 static void put_unbound_pool(struct worker_pool *pool)
3557 if (--pool->refcnt)
3561 if (WARN_ON(!(pool->cpu < 0)) ||
3562 WARN_ON(!list_empty(&pool->worklist)))
3566 if (pool->id >= 0)
3567 idr_remove(&worker_pool_idr, pool->id);
3568 hash_del(&pool->hash_node);
3572 * @pool's workers from blocking on attach_mutex. We're the last
3573 * manager and @pool gets freed with the flag set.
3577 rcuwait_wait_event(&manager_wait, wq_manager_inactive(pool),
3579 pool->flags |= POOL_MANAGER_ACTIVE;
3581 while ((worker = first_idle_worker(pool)))
3583 WARN_ON(pool->nr_workers || pool->nr_idle);
3584 raw_spin_unlock_irq(&pool->lock);
3587 if (!list_empty(&pool->workers))
3588 pool->detach_completion = &detach_completion;
3591 if (pool->detach_completion)
3592 wait_for_completion(pool->detach_completion);
3595 del_timer_sync(&pool->idle_timer);
3596 del_timer_sync(&pool->mayday_timer);
3599 call_rcu(&pool->rcu, rcu_free_pool);
3619 struct worker_pool *pool;
3625 /* do we already have a matching pool? */
3626 hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
3627 if (wqattrs_equal(pool->attrs, attrs)) {
3628 pool->refcnt++;
3629 return pool;
3645 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, target_node);
3646 if (!pool || init_worker_pool(pool) < 0)
3649 lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */
3650 copy_workqueue_attrs(pool->attrs, attrs);
3651 pool->node = target_node;
3657 pool->attrs->no_numa = false;
3659 if (worker_pool_assign_id(pool) < 0)
3663 if (wq_online && !create_worker(pool))
3667 hash_add(unbound_pool_hash, &pool->hash_node, hash);
3669 return pool;
3671 if (pool)
3672 put_unbound_pool(pool);
3691 struct worker_pool *pool = pwq->pool;
3709 put_unbound_pool(pool);
3746 raw_spin_lock_irqsave(&pwq->pool->lock, flags);
3771 wake_up_worker(pwq->pool);
3776 raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
3779 /* initialize newly alloced @pwq which is associated with @wq and @pool */
3781 struct worker_pool *pool)
3787 pwq->pool = pool;
3818 /* obtain a pool matching @attr and create a pwq associating the pool and @wq */
3822 struct worker_pool *pool;
3827 pool = get_unbound_pool(attrs);
3828 if (!pool)
3831 pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node);
3833 put_unbound_pool(pool);
3837 init_pwq(pwq, wq, pool);
4156 if (wq_calc_node_cpumask(wq->dfl_pwq->pool->attrs, node, cpu_off, cpumask)) {
4157 if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask))
4178 raw_spin_lock_irq(&wq->dfl_pwq->pool->lock);
4180 raw_spin_unlock_irq(&wq->dfl_pwq->pool->lock);
4425 raw_spin_lock_irq(&pwq->pool->lock);
4430 raw_spin_unlock_irq(&pwq->pool->lock);
4436 raw_spin_unlock_irq(&pwq->pool->lock);
4595 struct worker_pool *pool;
4603 pool = get_work_pool(work);
4604 if (pool) {
4605 raw_spin_lock_irqsave(&pool->lock, flags);
4606 if (find_worker_executing_work(pool, work))
4608 raw_spin_unlock_irqrestore(&pool->lock, flags);
4688 static void pr_cont_pool_info(struct worker_pool *pool)
4690 pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask);
4691 if (pool->node != NUMA_NO_NODE)
4692 pr_cont(" node=%d", pool->node);
4693 pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice);
4712 struct worker_pool *pool = pwq->pool;
4718 pr_info(" pwq %d:", pool->id);
4719 pr_cont_pool_info(pool);
4725 hash_for_each(pool->busy_hash, bkt, worker, hentry) {
4735 hash_for_each(pool->busy_hash, bkt, worker, hentry) {
4750 list_for_each_entry(work, &pool->worklist, entry) {
4760 list_for_each_entry(work, &pool->worklist, entry) {
4791 struct worker_pool *pool;
4815 raw_spin_lock_irqsave(&pwq->pool->lock, flags);
4818 raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
4828 for_each_pool(pool, pi) {
4833 raw_spin_lock_irqsave(&pool->lock, flags);
4834 if (pool->nr_workers == pool->nr_idle)
4838 if (!list_empty(&pool->worklist))
4839 hung = jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000;
4841 pr_info("pool %d:", pool->id);
4842 pr_cont_pool_info(pool);
4843 pr_cont(" hung=%lus workers=%d", hung, pool->nr_workers);
4844 if (pool->manager)
4846 task_pid_nr(pool->manager->task));
4847 list_for_each_entry(worker, &pool->idle_list, entry) {
4854 raw_spin_unlock_irqrestore(&pool->lock, flags);
4876 /* stabilize PF_WQ_WORKER and worker pool association */
4881 struct worker_pool *pool = worker->pool;
4883 if (pool) {
4884 raw_spin_lock_irq(&pool->lock);
4898 raw_spin_unlock_irq(&pool->lock);
4912 * pool which make migrating pending and scheduled works very
4924 struct worker_pool *pool;
4927 for_each_cpu_worker_pool(pool, cpu) {
4929 raw_spin_lock_irq(&pool->lock);
4938 for_each_pool_worker(worker, pool)
4941 pool->flags |= POOL_DISASSOCIATED;
4943 raw_spin_unlock_irq(&pool->lock);
4958 * worklist is not empty. This pool now behaves as an
4959 * unbound (in terms of concurrency management) pool which
4960 * are served by workers tied to the pool.
4962 atomic_set(&pool->nr_running, 0);
4969 raw_spin_lock_irq(&pool->lock);
4970 wake_up_worker(pool);
4971 raw_spin_unlock_irq(&pool->lock);
4976 * rebind_workers - rebind all workers of a pool to the associated CPU
4977 * @pool: pool of interest
4979 * @pool->cpu is coming online. Rebind all workers to the CPU.
4981 static void rebind_workers(struct worker_pool *pool)
4994 for_each_pool_worker(worker, pool)
4996 pool->attrs->cpumask) < 0);
4998 raw_spin_lock_irq(&pool->lock);
5000 pool->flags &= ~POOL_DISASSOCIATED;
5002 for_each_pool_worker(worker, pool) {
5011 * be bound before @pool->lock is released.
5037 raw_spin_unlock_irq(&pool->lock);
5042 * @pool: unbound pool of interest
5045 * An unbound pool may end up with a cpumask which doesn't have any online
5046 * CPUs. When a worker of such pool get scheduled, the scheduler resets
5047 * its cpus_allowed. If @cpu is in @pool's cpumask which didn't have any
5050 static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
5057 /* is @cpu allowed for @pool? */
5058 if (!cpumask_test_cpu(cpu, pool->attrs->cpumask))
5061 cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask);
5064 for_each_pool_worker(worker, pool)
5070 struct worker_pool *pool;
5072 for_each_cpu_worker_pool(pool, cpu) {
5073 if (pool->nr_workers)
5075 if (!create_worker(pool))
5083 struct worker_pool *pool;
5089 for_each_pool(pool, pi) {
5092 if (pool->cpu == cpu)
5093 rebind_workers(pool);
5094 else if (pool->cpu < 0)
5095 restore_unbound_workers_cpumask(pool, cpu);
5195 * pool->worklist.
5198 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
5270 * frozen works are transferred to their respective pool worklists.
5273 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
5402 * pool_ids RO int : the associated pool IDs for each node
5470 unbound_pwq_by_node(wq, node)->pool->id);
5802 struct worker_pool *pool;
5810 for_each_pool(pool, pi) {
5813 if (list_empty(&pool->worklist))
5822 /* get the latest of pool and touched timestamps */
5823 pool_ts = READ_ONCE(pool->watchdog_ts);
5831 if (pool->cpu >= 0) {
5834 pool->cpu));
5842 pr_emerg("BUG: workqueue lockup - pool");
5843 pr_cont_pool_info(pool);
5985 struct worker_pool *pool;
5988 for_each_cpu_worker_pool(pool, cpu) {
5989 BUG_ON(init_worker_pool(pool));
5990 pool->cpu = cpu;
5991 cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
5992 pool->attrs->nice = std_nice[i++];
5993 pool->node = cpu_to_node(cpu);
5995 /* alloc pool ID */
5997 BUG_ON(worker_pool_assign_id(pool));
6051 struct worker_pool *pool;
6068 for_each_cpu_worker_pool(pool, cpu) {
6069 pool->node = cpu_to_node(cpu);
6084 for_each_cpu_worker_pool(pool, cpu) {
6085 pool->flags &= ~POOL_DISASSOCIATED;
6086 BUG_ON(!create_worker(pool));
6090 hash_for_each(unbound_pool_hash, bkt, pool, hash_node)
6091 BUG_ON(!create_worker(pool));