Lines Matching refs:wq

56 struct btrfs_fs_info * __pure btrfs_workqueue_owner(const struct __btrfs_workqueue *wq)
58 return wq->fs_info;
63 return work->wq->fs_info;
66 bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq)
69 * We could compare wq->normal->pending with num_online_cpus()
74 if (wq->normal->thresh == NO_THRESHOLD)
77 return atomic_read(&wq->normal->pending) > wq->normal->thresh * 2;
100 * For threshold-able wq, let its concurrency grow on demand.
127 __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq);
165 static inline void thresh_queue_hook(struct __btrfs_workqueue *wq)
167 if (wq->thresh == NO_THRESHOLD)
169 atomic_inc(&wq->pending);
177 static inline void thresh_exec_hook(struct __btrfs_workqueue *wq)
183 if (wq->thresh == NO_THRESHOLD)
186 atomic_dec(&wq->pending);
187 spin_lock(&wq->thres_lock);
189 * Use wq->count to limit the calling frequency of
192 wq->count++;
193 wq->count %= (wq->thresh / 4);
194 if (!wq->count)
196 new_current_active = wq->current_active;
202 pending = atomic_read(&wq->pending);
203 if (pending > wq->thresh)
205 if (pending < wq->thresh / 2)
207 new_current_active = clamp_val(new_current_active, 1, wq->limit_active);
208 if (new_current_active != wq->current_active) {
210 wq->current_active = new_current_active;
213 spin_unlock(&wq->thres_lock);
216 workqueue_set_max_active(wq->normal_wq, wq->current_active);
220 static void run_ordered_work(struct __btrfs_workqueue *wq,
223 struct list_head *list = &wq->ordered_list;
225 spinlock_t *lock = &wq->list_lock;
292 trace_btrfs_all_work_done(wq->fs_info, work);
300 trace_btrfs_all_work_done(wq->fs_info, self);
308 struct __btrfs_workqueue *wq;
321 wq = work->wq;
324 thresh_exec_hook(wq);
335 run_ordered_work(wq, work);
338 trace_btrfs_all_work_done(wq->fs_info, work);
353 static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq,
358 work->wq = wq;
359 thresh_queue_hook(wq);
361 spin_lock_irqsave(&wq->list_lock, flags);
362 list_add_tail(&work->ordered_list, &wq->ordered_list);
363 spin_unlock_irqrestore(&wq->list_lock, flags);
366 queue_work(wq->normal_wq, &work->normal_work);
369 void btrfs_queue_work(struct btrfs_workqueue *wq,
374 if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags) && wq->high)
375 dest_wq = wq->high;
377 dest_wq = wq->normal;
382 __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq)
384 destroy_workqueue(wq->normal_wq);
385 trace_btrfs_workqueue_destroy(wq);
386 kfree(wq);
389 void btrfs_destroy_workqueue(struct btrfs_workqueue *wq)
391 if (!wq)
393 if (wq->high)
394 __btrfs_destroy_workqueue(wq->high);
395 __btrfs_destroy_workqueue(wq->normal);
396 kfree(wq);
399 void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int limit_active)
401 if (!wq)
403 wq->normal->limit_active = limit_active;
404 if (wq->high)
405 wq->high->limit_active = limit_active;
413 void btrfs_flush_workqueue(struct btrfs_workqueue *wq)
415 if (wq->high)
416 flush_workqueue(wq->high->normal_wq);
418 flush_workqueue(wq->normal->normal_wq);