Lines Matching refs:pwq
197 * point to the pwq; thus, pwqs need to be aligned at two's power of the
215 * Release of unbound pwq is punted to system_wq. See put_pwq()
217 * itself is also RCU protected so that the first pwq can be
255 int saved_max_active; /* WQ: saved pwq max_active */
360 static void show_pwq(struct pool_workqueue *pwq);
415 * @pwq: iteration cursor
419 * If the pwq needs to be used beyond the locking in effect, the caller is
420 * responsible for guaranteeing that the pwq stays online.
425 #define for_each_pwq(pwq, wq) \
426 list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node, \
555 * If the pwq needs to be used beyond the locking in effect, the caller is
556 * responsible for guaranteeing that the pwq stays online.
595 * contain the pointer to the queued pwq. Once execution starts, the flag
599 * and clear_work_data() can be used to set the pwq, pool or clear
603 * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq
605 * queued anywhere after initialization until it is sync canceled. pwq is
620 static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq,
623 set_work_data(work, (unsigned long)pwq,
1095 * @pwq: pool_workqueue to get
1097 * Obtain an extra reference on @pwq. The caller should guarantee that
1098 * @pwq has positive refcnt and be holding the matching pool->lock.
1100 static void get_pwq(struct pool_workqueue *pwq)
1102 lockdep_assert_held(&pwq->pool->lock);
1103 WARN_ON_ONCE(pwq->refcnt <= 0);
1104 pwq->refcnt++;
1109 * @pwq: pool_workqueue to put
1111 * Drop a reference of @pwq. If its refcnt reaches zero, schedule its
1114 static void put_pwq(struct pool_workqueue *pwq)
1116 lockdep_assert_held(&pwq->pool->lock);
1117 if (likely(--pwq->refcnt))
1119 if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND)))
1122 * @pwq can't be released under pool->lock, bounce to
1129 schedule_work(&pwq->unbound_release_work);
1134 * @pwq: pool_workqueue to put (can be %NULL)
1136 * put_pwq() with locking. This function also allows %NULL @pwq.
1138 static void put_pwq_unlocked(struct pool_workqueue *pwq)
1140 if (pwq) {
1145 raw_spin_lock_irq(&pwq->pool->lock);
1146 put_pwq(pwq);
1147 raw_spin_unlock_irq(&pwq->pool->lock);
1153 struct pool_workqueue *pwq = get_work_pwq(work);
1156 if (list_empty(&pwq->pool->worklist))
1157 pwq->pool->watchdog_ts = jiffies;
1158 move_linked_works(work, &pwq->pool->worklist, NULL);
1160 pwq->nr_active++;
1163 static void pwq_activate_first_inactive(struct pool_workqueue *pwq)
1165 struct work_struct *work = list_first_entry(&pwq->inactive_works,
1172 * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight
1173 * @pwq: pwq of interest
1177 * decrement nr_in_flight of its pwq and handle workqueue flushing.
1182 static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color)
1188 pwq->nr_in_flight[color]--;
1190 pwq->nr_active--;
1191 if (!list_empty(&pwq->inactive_works)) {
1193 if (pwq->nr_active < pwq->max_active)
1194 pwq_activate_first_inactive(pwq);
1198 if (likely(pwq->flush_color != color))
1202 if (pwq->nr_in_flight[color])
1205 /* this pwq is done, clear flush_color */
1206 pwq->flush_color = -1;
1209 * If this was the last pwq, wake up the first flusher. It
1212 if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush))
1213 complete(&pwq->wq->first_flusher->done);
1215 put_pwq(pwq);
1252 struct pool_workqueue *pwq;
1284 * work->data is guaranteed to point to pwq only while the work
1285 * item is queued on pwq->wq, and both updating work->data to point
1286 * to pwq on queueing and to pool on dequeueing are done under
1287 * pwq->pool->lock. This in turn guarantees that, if work->data
1288 * points to pwq which is associated with a locked pool, the work
1291 pwq = get_work_pwq(work);
1292 if (pwq && pwq->pool == pool) {
1298 * on the inactive_works list, will confuse pwq->nr_active
1306 pwq_dec_nr_in_flight(pwq, get_work_color(work));
1308 /* work->data points to pwq iff queued, point to pool */
1327 * @pwq: pwq @work belongs to
1332 * Insert @work which belongs to @pwq after @head. @extra_flags is or'd to
1338 static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
1341 struct worker_pool *pool = pwq->pool;
1344 set_work_pwq(work, pwq, extra_flags);
1346 get_pwq(pwq);
1411 struct pool_workqueue *pwq;
1432 /* pwq which will be used unless @work is executing elsewhere */
1436 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
1440 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
1449 if (last_pool && last_pool != pwq->pool) {
1457 pwq = worker->current_pwq;
1461 raw_spin_lock(&pwq->pool->lock);
1464 raw_spin_lock(&pwq->pool->lock);
1468 * pwq is determined and locked. For unbound pools, we could have
1469 * raced with pwq release and it could already be dead. If its
1470 * refcnt is zero, repeat pwq selection. Note that pwqs never die
1471 * without another pwq replacing it in the numa_pwq_tbl or while
1475 if (unlikely(!pwq->refcnt)) {
1477 raw_spin_unlock(&pwq->pool->lock);
1482 WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt",
1486 /* pwq determined, queue */
1487 trace_workqueue_queue_work(req_cpu, pwq, work);
1492 pwq->nr_in_flight[pwq->work_color]++;
1493 work_flags = work_color_to_flags(pwq->work_color);
1495 if (likely(pwq->nr_active < pwq->max_active)) {
1497 pwq->nr_active++;
1498 worklist = &pwq->pool->worklist;
1500 pwq->pool->watchdog_ts = jiffies;
1503 worklist = &pwq->inactive_works;
1507 insert_work(pwq, work, worklist, work_flags);
1510 raw_spin_unlock(&pwq->pool->lock);
2026 struct pool_workqueue *pwq = get_work_pwq(work);
2027 struct workqueue_struct *wq = pwq->wq;
2035 if (list_empty(&pwq->mayday_node)) {
2037 * If @pwq is for an unbound wq, its base ref may be put at
2038 * any time due to an attribute change. Pin @pwq until the
2041 get_pwq(pwq);
2042 list_add_tail(&pwq->mayday_node, &wq->maydays);
2179 struct pool_workqueue *pwq = get_work_pwq(work);
2181 bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE;
2217 worker->current_pwq = pwq;
2224 strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN);
2257 lock_map_acquire(&pwq->wq->lockdep_map);
2289 lock_map_release(&pwq->wq->lockdep_map);
2324 pwq_dec_nr_in_flight(pwq, work_color);
2494 * pwq(s) queued. This can happen by non-rescuer workers consuming
2501 /* see whether any pwq is asking for help */
2505 struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
2507 struct worker_pool *pool = pwq->pool;
2512 list_del_init(&pwq->mayday_node);
2526 if (get_work_pwq(work) == pwq) {
2541 * queueing. Let's put @pwq back on mayday list so
2546 if (pwq->nr_active && need_to_create_worker(pool)) {
2552 if (wq->rescuer && list_empty(&pwq->mayday_node)) {
2553 get_pwq(pwq);
2554 list_add_tail(&pwq->mayday_node, &wq->maydays);
2564 put_pwq(pwq);
2641 * @pwq: pwq to insert barrier into
2658 * underneath us, so we can't reliably determine pwq from @target.
2663 static void insert_wq_barrier(struct pool_workqueue *pwq,
2699 insert_work(pwq, &barr->work, head,
2712 * -1. If no pwq has in-flight commands at the specified color, all
2713 * pwq->flush_color's stay at -1 and %false is returned. If any pwq
2714 * has in flight commands, its pwq->flush_color is set to
2715 * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq
2738 struct pool_workqueue *pwq;
2745 for_each_pwq(pwq, wq) {
2746 struct worker_pool *pool = pwq->pool;
2751 WARN_ON_ONCE(pwq->flush_color != -1);
2753 if (pwq->nr_in_flight[flush_color]) {
2754 pwq->flush_color = flush_color;
2761 WARN_ON_ONCE(work_color != work_next_color(pwq->work_color));
2762 pwq->work_color = work_color;
2946 struct pool_workqueue *pwq;
2962 for_each_pwq(pwq, wq) {
2965 raw_spin_lock_irq(&pwq->pool->lock);
2966 drained = !pwq->nr_active && list_empty(&pwq->inactive_works);
2967 raw_spin_unlock_irq(&pwq->pool->lock);
2992 struct pool_workqueue *pwq;
3005 pwq = get_work_pwq(work);
3006 if (pwq) {
3007 if (unlikely(pwq->pool != pool))
3013 pwq = worker->current_pwq;
3016 check_flush_dependency(pwq->wq, work);
3018 insert_wq_barrier(pwq, barr, work, worker);
3031 (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) {
3032 lock_map_acquire(&pwq->wq->lockdep_map);
3033 lock_map_release(&pwq->wq->lockdep_map);
3683 * Scheduled on system_wq by put_pwq() when an unbound pwq hits zero refcnt
3688 struct pool_workqueue *pwq = container_of(work, struct pool_workqueue,
3690 struct workqueue_struct *wq = pwq->wq;
3691 struct worker_pool *pool = pwq->pool;
3695 * when @pwq is not linked, it doesn't hold any reference to the
3698 if (!list_empty(&pwq->pwqs_node)) {
3703 list_del_rcu(&pwq->pwqs_node);
3712 call_rcu(&pwq->rcu, rcu_free_pwq);
3715 * If we're the last pwq going away, @wq is already dead and no one
3725 * pwq_adjust_max_active - update a pwq's max_active to the current setting
3726 * @pwq: target pool_workqueue
3728 * If @pwq isn't freezing, set @pwq->max_active to the associated
3730 * accordingly. If @pwq is freezing, clear @pwq->max_active to zero.
3732 static void pwq_adjust_max_active(struct pool_workqueue *pwq)
3734 struct workqueue_struct *wq = pwq->wq;
3742 if (!freezable && pwq->max_active == wq->saved_max_active)
3746 raw_spin_lock_irqsave(&pwq->pool->lock, flags);
3756 pwq->max_active = wq->saved_max_active;
3758 while (!list_empty(&pwq->inactive_works) &&
3759 pwq->nr_active < pwq->max_active) {
3760 pwq_activate_first_inactive(pwq);
3771 wake_up_worker(pwq->pool);
3773 pwq->max_active = 0;
3776 raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
3779 /* initialize newly alloced @pwq which is associated with @wq and @pool */
3780 static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
3783 BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
3785 memset(pwq, 0, sizeof(*pwq));
3787 pwq->pool = pool;
3788 pwq->wq = wq;
3789 pwq->flush_color = -1;
3790 pwq->refcnt = 1;
3791 INIT_LIST_HEAD(&pwq->inactive_works);
3792 INIT_LIST_HEAD(&pwq->pwqs_node);
3793 INIT_LIST_HEAD(&pwq->mayday_node);
3794 INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn);
3797 /* sync @pwq with the current state of its associated wq and link it */
3798 static void link_pwq(struct pool_workqueue *pwq)
3800 struct workqueue_struct *wq = pwq->wq;
3805 if (!list_empty(&pwq->pwqs_node))
3809 pwq->work_color = wq->work_color;
3812 pwq_adjust_max_active(pwq);
3814 /* link in @pwq */
3815 list_add_rcu(&pwq->pwqs_node, &wq->pwqs);
3818 /* obtain a pool matching @attr and create a pwq associating the pool and @wq */
3823 struct pool_workqueue *pwq;
3831 pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node);
3832 if (!pwq) {
3837 init_pwq(pwq, wq, pool);
3838 return pwq;
3843 * @attrs: the wq_attrs of the default pwq of the target workqueue
3893 /* install @pwq into @wq's numa_pwq_tbl[] for @node and return the old pwq */
3896 struct pool_workqueue *pwq)
3904 link_pwq(pwq);
3907 rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq);
3955 * Calculate the attrs of the default pwq.
3973 * the default pwq covering whole @attrs->cpumask. Always create
4017 /* save the previous pwq and install the new one */
4031 /* CPUs should stay stable across pwq creations and installations */
4076 * machines, this function maps a separate pwq to each NUMA node with
4080 * back-to-back will stay on its current pwq.
4129 struct pool_workqueue *old_pwq = NULL, *pwq;
4148 pwq = unbound_pwq_by_node(wq, node);
4152 * different from the default pwq's, we need to compare it to @pwq's
4154 * equals the default pwq's, the default pwq should be used.
4157 if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask))
4163 /* create a new pwq */
4164 pwq = alloc_unbound_pwq(wq, target_attrs);
4165 if (!pwq) {
4171 /* Install the new pwq. */
4173 old_pwq = numa_pwq_tbl_install(wq, node, pwq);
4198 struct pool_workqueue *pwq =
4203 init_pwq(pwq, wq, &cpu_pools[highpri]);
4206 link_pwq(pwq);
4215 /* there should only be single pwq for ordering guarantee */
4278 struct pool_workqueue *pwq;
4345 for_each_pwq(pwq, wq)
4346 pwq_adjust_max_active(pwq);
4368 static bool pwq_busy(struct pool_workqueue *pwq)
4373 if (pwq->nr_in_flight[i])
4376 if ((pwq != pwq->wq->dfl_pwq) && (pwq->refcnt > 1))
4378 if (pwq->nr_active || !list_empty(&pwq->inactive_works))
4392 struct pool_workqueue *pwq;
4424 for_each_pwq(pwq, wq) {
4425 raw_spin_lock_irq(&pwq->pool->lock);
4426 if (WARN_ON(pwq_busy(pwq))) {
4427 pr_warn("%s: %s has the following busy pwq\n",
4429 show_pwq(pwq);
4430 raw_spin_unlock_irq(&pwq->pool->lock);
4436 raw_spin_unlock_irq(&pwq->pool->lock);
4458 * @wq will be freed when the last pwq is released.
4461 pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
4463 put_pwq_unlocked(pwq);
4470 pwq = wq->dfl_pwq;
4472 put_pwq_unlocked(pwq);
4489 struct pool_workqueue *pwq;
4502 for_each_pwq(pwq, wq)
4503 pwq_adjust_max_active(pwq);
4560 struct pool_workqueue *pwq;
4570 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
4572 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
4574 ret = !list_empty(&pwq->inactive_works);
4657 struct pool_workqueue *pwq = NULL;
4675 copy_from_kernel_nofault(&pwq, &worker->current_pwq, sizeof(pwq));
4676 copy_from_kernel_nofault(&wq, &pwq->wq, sizeof(wq));
4710 static void show_pwq(struct pool_workqueue *pwq)
4712 struct worker_pool *pool = pwq->pool;
4718 pr_info(" pwq %d:", pool->id);
4722 pwq->nr_active, pwq->max_active, pwq->refcnt,
4723 !list_empty(&pwq->mayday_node) ? " MAYDAY" : "");
4726 if (worker->current_pwq == pwq) {
4736 if (worker->current_pwq != pwq)
4751 if (get_work_pwq(work) == pwq) {
4761 if (get_work_pwq(work) != pwq)
4770 if (!list_empty(&pwq->inactive_works)) {
4774 list_for_each_entry(work, &pwq->inactive_works, entry) {
4800 struct pool_workqueue *pwq;
4803 for_each_pwq(pwq, wq) {
4804 if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
4814 for_each_pwq(pwq, wq) {
4815 raw_spin_lock_irqsave(&pwq->pool->lock, flags);
4816 if (pwq->nr_active || !list_empty(&pwq->inactive_works))
4817 show_pwq(pwq);
4818 raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
4911 * are a lot of assumptions on strong associations among work, pwq and
5203 struct pool_workqueue *pwq;
5212 for_each_pwq(pwq, wq)
5213 pwq_adjust_max_active(pwq);
5237 struct pool_workqueue *pwq;
5251 for_each_pwq(pwq, wq) {
5252 WARN_ON_ONCE(pwq->nr_active < 0);
5253 if (pwq->nr_active) {
5278 struct pool_workqueue *pwq;
5290 for_each_pwq(pwq, wq)
5291 pwq_adjust_max_active(pwq);
6011 * An ordered wq should have only one pwq as ordering is