1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2007 Oracle.  All rights reserved.
4 * Copyright (C) 2014 Fujitsu.  All rights reserved.
5 */
6
7#include <linux/kthread.h>
8#include <linux/slab.h>
9#include <linux/list.h>
10#include <linux/spinlock.h>
11#include <linux/freezer.h>
12#include "async-thread.h"
13#include "ctree.h"
14
15enum {
16	WORK_DONE_BIT,
17	WORK_ORDER_DONE_BIT,
18	WORK_HIGH_PRIO_BIT,
19};
20
21#define NO_THRESHOLD (-1)
22#define DFT_THRESHOLD (32)
23
24struct __btrfs_workqueue {
25	struct workqueue_struct *normal_wq;
26
27	/* File system this workqueue services */
28	struct btrfs_fs_info *fs_info;
29
30	/* List head pointing to ordered work list */
31	struct list_head ordered_list;
32
33	/* Spinlock for ordered_list */
34	spinlock_t list_lock;
35
36	/* Thresholding related variants */
37	atomic_t pending;
38
39	/* Up limit of concurrency workers */
40	int limit_active;
41
42	/* Current number of concurrency workers */
43	int current_active;
44
45	/* Threshold to change current_active */
46	int thresh;
47	unsigned int count;
48	spinlock_t thres_lock;
49};
50
51struct btrfs_workqueue {
52	struct __btrfs_workqueue *normal;
53	struct __btrfs_workqueue *high;
54};
55
56struct btrfs_fs_info * __pure btrfs_workqueue_owner(const struct __btrfs_workqueue *wq)
57{
58	return wq->fs_info;
59}
60
61struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work)
62{
63	return work->wq->fs_info;
64}
65
66bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq)
67{
68	/*
69	 * We could compare wq->normal->pending with num_online_cpus()
70	 * to support "thresh == NO_THRESHOLD" case, but it requires
71	 * moving up atomic_inc/dec in thresh_queue/exec_hook. Let's
72	 * postpone it until someone needs the support of that case.
73	 */
74	if (wq->normal->thresh == NO_THRESHOLD)
75		return false;
76
77	return atomic_read(&wq->normal->pending) > wq->normal->thresh * 2;
78}
79
80static struct __btrfs_workqueue *
81__btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, const char *name,
82			unsigned int flags, int limit_active, int thresh)
83{
84	struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL);
85
86	if (!ret)
87		return NULL;
88
89	ret->fs_info = fs_info;
90	ret->limit_active = limit_active;
91	atomic_set(&ret->pending, 0);
92	if (thresh == 0)
93		thresh = DFT_THRESHOLD;
94	/* For low threshold, disabling threshold is a better choice */
95	if (thresh < DFT_THRESHOLD) {
96		ret->current_active = limit_active;
97		ret->thresh = NO_THRESHOLD;
98	} else {
99		/*
100		 * For threshold-able wq, let its concurrency grow on demand.
101		 * Use minimal max_active at alloc time to reduce resource
102		 * usage.
103		 */
104		ret->current_active = 1;
105		ret->thresh = thresh;
106	}
107
108	if (flags & WQ_HIGHPRI)
109		ret->normal_wq = alloc_workqueue("btrfs-%s-high", flags,
110						 ret->current_active, name);
111	else
112		ret->normal_wq = alloc_workqueue("btrfs-%s", flags,
113						 ret->current_active, name);
114	if (!ret->normal_wq) {
115		kfree(ret);
116		return NULL;
117	}
118
119	INIT_LIST_HEAD(&ret->ordered_list);
120	spin_lock_init(&ret->list_lock);
121	spin_lock_init(&ret->thres_lock);
122	trace_btrfs_workqueue_alloc(ret, name, flags & WQ_HIGHPRI);
123	return ret;
124}
125
126static inline void
127__btrfs_destroy_workqueue(struct __btrfs_workqueue *wq);
128
129struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info,
130					      const char *name,
131					      unsigned int flags,
132					      int limit_active,
133					      int thresh)
134{
135	struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL);
136
137	if (!ret)
138		return NULL;
139
140	ret->normal = __btrfs_alloc_workqueue(fs_info, name,
141					      flags & ~WQ_HIGHPRI,
142					      limit_active, thresh);
143	if (!ret->normal) {
144		kfree(ret);
145		return NULL;
146	}
147
148	if (flags & WQ_HIGHPRI) {
149		ret->high = __btrfs_alloc_workqueue(fs_info, name, flags,
150						    limit_active, thresh);
151		if (!ret->high) {
152			__btrfs_destroy_workqueue(ret->normal);
153			kfree(ret);
154			return NULL;
155		}
156	}
157	return ret;
158}
159
160/*
161 * Hook for threshold which will be called in btrfs_queue_work.
162 * This hook WILL be called in IRQ handler context,
163 * so workqueue_set_max_active MUST NOT be called in this hook
164 */
165static inline void thresh_queue_hook(struct __btrfs_workqueue *wq)
166{
167	if (wq->thresh == NO_THRESHOLD)
168		return;
169	atomic_inc(&wq->pending);
170}
171
172/*
173 * Hook for threshold which will be called before executing the work,
174 * This hook is called in kthread content.
175 * So workqueue_set_max_active is called here.
176 */
177static inline void thresh_exec_hook(struct __btrfs_workqueue *wq)
178{
179	int new_current_active;
180	long pending;
181	int need_change = 0;
182
183	if (wq->thresh == NO_THRESHOLD)
184		return;
185
186	atomic_dec(&wq->pending);
187	spin_lock(&wq->thres_lock);
188	/*
189	 * Use wq->count to limit the calling frequency of
190	 * workqueue_set_max_active.
191	 */
192	wq->count++;
193	wq->count %= (wq->thresh / 4);
194	if (!wq->count)
195		goto  out;
196	new_current_active = wq->current_active;
197
198	/*
199	 * pending may be changed later, but it's OK since we really
200	 * don't need it so accurate to calculate new_max_active.
201	 */
202	pending = atomic_read(&wq->pending);
203	if (pending > wq->thresh)
204		new_current_active++;
205	if (pending < wq->thresh / 2)
206		new_current_active--;
207	new_current_active = clamp_val(new_current_active, 1, wq->limit_active);
208	if (new_current_active != wq->current_active)  {
209		need_change = 1;
210		wq->current_active = new_current_active;
211	}
212out:
213	spin_unlock(&wq->thres_lock);
214
215	if (need_change) {
216		workqueue_set_max_active(wq->normal_wq, wq->current_active);
217	}
218}
219
220static void run_ordered_work(struct __btrfs_workqueue *wq,
221			     struct btrfs_work *self)
222{
223	struct list_head *list = &wq->ordered_list;
224	struct btrfs_work *work;
225	spinlock_t *lock = &wq->list_lock;
226	unsigned long flags;
227	bool free_self = false;
228
229	while (1) {
230		spin_lock_irqsave(lock, flags);
231		if (list_empty(list))
232			break;
233		work = list_entry(list->next, struct btrfs_work,
234				  ordered_list);
235		if (!test_bit(WORK_DONE_BIT, &work->flags))
236			break;
237		/*
238		 * Orders all subsequent loads after reading WORK_DONE_BIT,
239		 * paired with the smp_mb__before_atomic in btrfs_work_helper
240		 * this guarantees that the ordered function will see all
241		 * updates from ordinary work function.
242		 */
243		smp_rmb();
244
245		/*
246		 * we are going to call the ordered done function, but
247		 * we leave the work item on the list as a barrier so
248		 * that later work items that are done don't have their
249		 * functions called before this one returns
250		 */
251		if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
252			break;
253		trace_btrfs_ordered_sched(work);
254		spin_unlock_irqrestore(lock, flags);
255		work->ordered_func(work);
256
257		/* now take the lock again and drop our item from the list */
258		spin_lock_irqsave(lock, flags);
259		list_del(&work->ordered_list);
260		spin_unlock_irqrestore(lock, flags);
261
262		if (work == self) {
263			/*
264			 * This is the work item that the worker is currently
265			 * executing.
266			 *
267			 * The kernel workqueue code guarantees non-reentrancy
268			 * of work items. I.e., if a work item with the same
269			 * address and work function is queued twice, the second
270			 * execution is blocked until the first one finishes. A
271			 * work item may be freed and recycled with the same
272			 * work function; the workqueue code assumes that the
273			 * original work item cannot depend on the recycled work
274			 * item in that case (see find_worker_executing_work()).
275			 *
276			 * Note that different types of Btrfs work can depend on
277			 * each other, and one type of work on one Btrfs
278			 * filesystem may even depend on the same type of work
279			 * on another Btrfs filesystem via, e.g., a loop device.
280			 * Therefore, we must not allow the current work item to
281			 * be recycled until we are really done, otherwise we
282			 * break the above assumption and can deadlock.
283			 */
284			free_self = true;
285		} else {
286			/*
287			 * We don't want to call the ordered free functions with
288			 * the lock held.
289			 */
290			work->ordered_free(work);
291			/* NB: work must not be dereferenced past this point. */
292			trace_btrfs_all_work_done(wq->fs_info, work);
293		}
294	}
295	spin_unlock_irqrestore(lock, flags);
296
297	if (free_self) {
298		self->ordered_free(self);
299		/* NB: self must not be dereferenced past this point. */
300		trace_btrfs_all_work_done(wq->fs_info, self);
301	}
302}
303
304static void btrfs_work_helper(struct work_struct *normal_work)
305{
306	struct btrfs_work *work = container_of(normal_work, struct btrfs_work,
307					       normal_work);
308	struct __btrfs_workqueue *wq;
309	int need_order = 0;
310
311	/*
312	 * We should not touch things inside work in the following cases:
313	 * 1) after work->func() if it has no ordered_free
314	 *    Since the struct is freed in work->func().
315	 * 2) after setting WORK_DONE_BIT
316	 *    The work may be freed in other threads almost instantly.
317	 * So we save the needed things here.
318	 */
319	if (work->ordered_func)
320		need_order = 1;
321	wq = work->wq;
322
323	trace_btrfs_work_sched(work);
324	thresh_exec_hook(wq);
325	work->func(work);
326	if (need_order) {
327		/*
328		 * Ensures all memory accesses done in the work function are
329		 * ordered before setting the WORK_DONE_BIT. Ensuring the thread
330		 * which is going to executed the ordered work sees them.
331		 * Pairs with the smp_rmb in run_ordered_work.
332		 */
333		smp_mb__before_atomic();
334		set_bit(WORK_DONE_BIT, &work->flags);
335		run_ordered_work(wq, work);
336	} else {
337		/* NB: work must not be dereferenced past this point. */
338		trace_btrfs_all_work_done(wq->fs_info, work);
339	}
340}
341
342void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func,
343		     btrfs_func_t ordered_func, btrfs_func_t ordered_free)
344{
345	work->func = func;
346	work->ordered_func = ordered_func;
347	work->ordered_free = ordered_free;
348	INIT_WORK(&work->normal_work, btrfs_work_helper);
349	INIT_LIST_HEAD(&work->ordered_list);
350	work->flags = 0;
351}
352
353static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq,
354				      struct btrfs_work *work)
355{
356	unsigned long flags;
357
358	work->wq = wq;
359	thresh_queue_hook(wq);
360	if (work->ordered_func) {
361		spin_lock_irqsave(&wq->list_lock, flags);
362		list_add_tail(&work->ordered_list, &wq->ordered_list);
363		spin_unlock_irqrestore(&wq->list_lock, flags);
364	}
365	trace_btrfs_work_queued(work);
366	queue_work(wq->normal_wq, &work->normal_work);
367}
368
369void btrfs_queue_work(struct btrfs_workqueue *wq,
370		      struct btrfs_work *work)
371{
372	struct __btrfs_workqueue *dest_wq;
373
374	if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags) && wq->high)
375		dest_wq = wq->high;
376	else
377		dest_wq = wq->normal;
378	__btrfs_queue_work(dest_wq, work);
379}
380
381static inline void
382__btrfs_destroy_workqueue(struct __btrfs_workqueue *wq)
383{
384	destroy_workqueue(wq->normal_wq);
385	trace_btrfs_workqueue_destroy(wq);
386	kfree(wq);
387}
388
389void btrfs_destroy_workqueue(struct btrfs_workqueue *wq)
390{
391	if (!wq)
392		return;
393	if (wq->high)
394		__btrfs_destroy_workqueue(wq->high);
395	__btrfs_destroy_workqueue(wq->normal);
396	kfree(wq);
397}
398
399void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int limit_active)
400{
401	if (!wq)
402		return;
403	wq->normal->limit_active = limit_active;
404	if (wq->high)
405		wq->high->limit_active = limit_active;
406}
407
408void btrfs_set_work_high_priority(struct btrfs_work *work)
409{
410	set_bit(WORK_HIGH_PRIO_BIT, &work->flags);
411}
412
413void btrfs_flush_workqueue(struct btrfs_workqueue *wq)
414{
415	if (wq->high)
416		flush_workqueue(wq->high->normal_wq);
417
418	flush_workqueue(wq->normal->normal_wq);
419}
420