xref: /kernel/linux/linux-6.6/fs/btrfs/locking.c (revision 62306a36)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2008 Oracle.  All rights reserved.
4 */
5
6#include <linux/sched.h>
7#include <linux/pagemap.h>
8#include <linux/spinlock.h>
9#include <linux/page-flags.h>
10#include <asm/bug.h>
11#include "misc.h"
12#include "ctree.h"
13#include "extent_io.h"
14#include "locking.h"
15#include "accessors.h"
16
17/*
18 * Lockdep class keys for extent_buffer->lock's in this root.  For a given
19 * eb, the lockdep key is determined by the btrfs_root it belongs to and
20 * the level the eb occupies in the tree.
21 *
22 * Different roots are used for different purposes and may nest inside each
23 * other and they require separate keysets.  As lockdep keys should be
24 * static, assign keysets according to the purpose of the root as indicated
25 * by btrfs_root->root_key.objectid.  This ensures that all special purpose
26 * roots have separate keysets.
27 *
28 * Lock-nesting across peer nodes is always done with the immediate parent
29 * node locked thus preventing deadlock.  As lockdep doesn't know this, use
30 * subclass to avoid triggering lockdep warning in such cases.
31 *
32 * The key is set by the readpage_end_io_hook after the buffer has passed
33 * csum validation but before the pages are unlocked.  It is also set by
34 * btrfs_init_new_buffer on freshly allocated blocks.
35 *
36 * We also add a check to make sure the highest level of the tree is the
37 * same as our lockdep setup here.  If BTRFS_MAX_LEVEL changes, this code
38 * needs update as well.
39 */
40#ifdef CONFIG_DEBUG_LOCK_ALLOC
41#if BTRFS_MAX_LEVEL != 8
42#error
43#endif
44
45#define DEFINE_LEVEL(stem, level)					\
46	.names[level] = "btrfs-" stem "-0" #level,
47
48#define DEFINE_NAME(stem)						\
49	DEFINE_LEVEL(stem, 0)						\
50	DEFINE_LEVEL(stem, 1)						\
51	DEFINE_LEVEL(stem, 2)						\
52	DEFINE_LEVEL(stem, 3)						\
53	DEFINE_LEVEL(stem, 4)						\
54	DEFINE_LEVEL(stem, 5)						\
55	DEFINE_LEVEL(stem, 6)						\
56	DEFINE_LEVEL(stem, 7)
57
58static struct btrfs_lockdep_keyset {
59	u64			id;		/* root objectid */
60	/* Longest entry: btrfs-block-group-00 */
61	char			names[BTRFS_MAX_LEVEL][24];
62	struct lock_class_key	keys[BTRFS_MAX_LEVEL];
63} btrfs_lockdep_keysets[] = {
64	{ .id = BTRFS_ROOT_TREE_OBJECTID,	DEFINE_NAME("root")	},
65	{ .id = BTRFS_EXTENT_TREE_OBJECTID,	DEFINE_NAME("extent")	},
66	{ .id = BTRFS_CHUNK_TREE_OBJECTID,	DEFINE_NAME("chunk")	},
67	{ .id = BTRFS_DEV_TREE_OBJECTID,	DEFINE_NAME("dev")	},
68	{ .id = BTRFS_CSUM_TREE_OBJECTID,	DEFINE_NAME("csum")	},
69	{ .id = BTRFS_QUOTA_TREE_OBJECTID,	DEFINE_NAME("quota")	},
70	{ .id = BTRFS_TREE_LOG_OBJECTID,	DEFINE_NAME("log")	},
71	{ .id = BTRFS_TREE_RELOC_OBJECTID,	DEFINE_NAME("treloc")	},
72	{ .id = BTRFS_DATA_RELOC_TREE_OBJECTID,	DEFINE_NAME("dreloc")	},
73	{ .id = BTRFS_UUID_TREE_OBJECTID,	DEFINE_NAME("uuid")	},
74	{ .id = BTRFS_FREE_SPACE_TREE_OBJECTID,	DEFINE_NAME("free-space") },
75	{ .id = BTRFS_BLOCK_GROUP_TREE_OBJECTID, DEFINE_NAME("block-group") },
76	{ .id = 0,				DEFINE_NAME("tree")	},
77};
78
79#undef DEFINE_LEVEL
80#undef DEFINE_NAME
81
82void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, int level)
83{
84	struct btrfs_lockdep_keyset *ks;
85
86	BUG_ON(level >= ARRAY_SIZE(ks->keys));
87
88	/* Find the matching keyset, id 0 is the default entry */
89	for (ks = btrfs_lockdep_keysets; ks->id; ks++)
90		if (ks->id == objectid)
91			break;
92
93	lockdep_set_class_and_name(&eb->lock, &ks->keys[level], ks->names[level]);
94}
95
96void btrfs_maybe_reset_lockdep_class(struct btrfs_root *root, struct extent_buffer *eb)
97{
98	if (test_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &root->state))
99		btrfs_set_buffer_lockdep_class(root->root_key.objectid,
100					       eb, btrfs_header_level(eb));
101}
102
103#endif
104
105/*
106 * Extent buffer locking
107 * =====================
108 *
109 * We use a rw_semaphore for tree locking, and the semantics are exactly the
110 * same:
111 *
112 * - reader/writer exclusion
113 * - writer/writer exclusion
114 * - reader/reader sharing
115 * - try-lock semantics for readers and writers
116 *
117 * The rwsem implementation does opportunistic spinning which reduces number of
118 * times the locking task needs to sleep.
119 */
120
121/*
122 * __btrfs_tree_read_lock - lock extent buffer for read
123 * @eb:		the eb to be locked
124 * @nest:	the nesting level to be used for lockdep
125 *
126 * This takes the read lock on the extent buffer, using the specified nesting
127 * level for lockdep purposes.
128 */
129void __btrfs_tree_read_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest)
130{
131	u64 start_ns = 0;
132
133	if (trace_btrfs_tree_read_lock_enabled())
134		start_ns = ktime_get_ns();
135
136	down_read_nested(&eb->lock, nest);
137	trace_btrfs_tree_read_lock(eb, start_ns);
138}
139
140void btrfs_tree_read_lock(struct extent_buffer *eb)
141{
142	__btrfs_tree_read_lock(eb, BTRFS_NESTING_NORMAL);
143}
144
145/*
146 * Try-lock for read.
147 *
148 * Return 1 if the rwlock has been taken, 0 otherwise
149 */
150int btrfs_try_tree_read_lock(struct extent_buffer *eb)
151{
152	if (down_read_trylock(&eb->lock)) {
153		trace_btrfs_try_tree_read_lock(eb);
154		return 1;
155	}
156	return 0;
157}
158
159/*
160 * Try-lock for write.
161 *
162 * Return 1 if the rwlock has been taken, 0 otherwise
163 */
164int btrfs_try_tree_write_lock(struct extent_buffer *eb)
165{
166	if (down_write_trylock(&eb->lock)) {
167		eb->lock_owner = current->pid;
168		trace_btrfs_try_tree_write_lock(eb);
169		return 1;
170	}
171	return 0;
172}
173
174/*
175 * Release read lock.
176 */
177void btrfs_tree_read_unlock(struct extent_buffer *eb)
178{
179	trace_btrfs_tree_read_unlock(eb);
180	up_read(&eb->lock);
181}
182
183/*
184 * __btrfs_tree_lock - lock eb for write
185 * @eb:		the eb to lock
186 * @nest:	the nesting to use for the lock
187 *
188 * Returns with the eb->lock write locked.
189 */
190void __btrfs_tree_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest)
191	__acquires(&eb->lock)
192{
193	u64 start_ns = 0;
194
195	if (trace_btrfs_tree_lock_enabled())
196		start_ns = ktime_get_ns();
197
198	down_write_nested(&eb->lock, nest);
199	eb->lock_owner = current->pid;
200	trace_btrfs_tree_lock(eb, start_ns);
201}
202
203void btrfs_tree_lock(struct extent_buffer *eb)
204{
205	__btrfs_tree_lock(eb, BTRFS_NESTING_NORMAL);
206}
207
208/*
209 * Release the write lock.
210 */
211void btrfs_tree_unlock(struct extent_buffer *eb)
212{
213	trace_btrfs_tree_unlock(eb);
214	eb->lock_owner = 0;
215	up_write(&eb->lock);
216}
217
218/*
219 * This releases any locks held in the path starting at level and going all the
220 * way up to the root.
221 *
222 * btrfs_search_slot will keep the lock held on higher nodes in a few corner
223 * cases, such as COW of the block at slot zero in the node.  This ignores
224 * those rules, and it should only be called when there are no more updates to
225 * be done higher up in the tree.
226 */
227void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
228{
229	int i;
230
231	if (path->keep_locks)
232		return;
233
234	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
235		if (!path->nodes[i])
236			continue;
237		if (!path->locks[i])
238			continue;
239		btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
240		path->locks[i] = 0;
241	}
242}
243
244/*
245 * Loop around taking references on and locking the root node of the tree until
246 * we end up with a lock on the root node.
247 *
248 * Return: root extent buffer with write lock held
249 */
250struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
251{
252	struct extent_buffer *eb;
253
254	while (1) {
255		eb = btrfs_root_node(root);
256
257		btrfs_maybe_reset_lockdep_class(root, eb);
258		btrfs_tree_lock(eb);
259		if (eb == root->node)
260			break;
261		btrfs_tree_unlock(eb);
262		free_extent_buffer(eb);
263	}
264	return eb;
265}
266
267/*
268 * Loop around taking references on and locking the root node of the tree until
269 * we end up with a lock on the root node.
270 *
271 * Return: root extent buffer with read lock held
272 */
273struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
274{
275	struct extent_buffer *eb;
276
277	while (1) {
278		eb = btrfs_root_node(root);
279
280		btrfs_maybe_reset_lockdep_class(root, eb);
281		btrfs_tree_read_lock(eb);
282		if (eb == root->node)
283			break;
284		btrfs_tree_read_unlock(eb);
285		free_extent_buffer(eb);
286	}
287	return eb;
288}
289
290/*
291 * Loop around taking references on and locking the root node of the tree in
292 * nowait mode until we end up with a lock on the root node or returning to
293 * avoid blocking.
294 *
295 * Return: root extent buffer with read lock held or -EAGAIN.
296 */
297struct extent_buffer *btrfs_try_read_lock_root_node(struct btrfs_root *root)
298{
299	struct extent_buffer *eb;
300
301	while (1) {
302		eb = btrfs_root_node(root);
303		if (!btrfs_try_tree_read_lock(eb)) {
304			free_extent_buffer(eb);
305			return ERR_PTR(-EAGAIN);
306		}
307		if (eb == root->node)
308			break;
309		btrfs_tree_read_unlock(eb);
310		free_extent_buffer(eb);
311	}
312	return eb;
313}
314
315/*
316 * DREW locks
317 * ==========
318 *
319 * DREW stands for double-reader-writer-exclusion lock. It's used in situation
320 * where you want to provide A-B exclusion but not AA or BB.
321 *
322 * Currently implementation gives more priority to reader. If a reader and a
323 * writer both race to acquire their respective sides of the lock the writer
324 * would yield its lock as soon as it detects a concurrent reader. Additionally
325 * if there are pending readers no new writers would be allowed to come in and
326 * acquire the lock.
327 */
328
329void btrfs_drew_lock_init(struct btrfs_drew_lock *lock)
330{
331	atomic_set(&lock->readers, 0);
332	atomic_set(&lock->writers, 0);
333	init_waitqueue_head(&lock->pending_readers);
334	init_waitqueue_head(&lock->pending_writers);
335}
336
337/* Return true if acquisition is successful, false otherwise */
338bool btrfs_drew_try_write_lock(struct btrfs_drew_lock *lock)
339{
340	if (atomic_read(&lock->readers))
341		return false;
342
343	atomic_inc(&lock->writers);
344
345	/* Ensure writers count is updated before we check for pending readers */
346	smp_mb__after_atomic();
347	if (atomic_read(&lock->readers)) {
348		btrfs_drew_write_unlock(lock);
349		return false;
350	}
351
352	return true;
353}
354
355void btrfs_drew_write_lock(struct btrfs_drew_lock *lock)
356{
357	while (true) {
358		if (btrfs_drew_try_write_lock(lock))
359			return;
360		wait_event(lock->pending_writers, !atomic_read(&lock->readers));
361	}
362}
363
364void btrfs_drew_write_unlock(struct btrfs_drew_lock *lock)
365{
366	atomic_dec(&lock->writers);
367	cond_wake_up(&lock->pending_readers);
368}
369
370void btrfs_drew_read_lock(struct btrfs_drew_lock *lock)
371{
372	atomic_inc(&lock->readers);
373
374	/*
375	 * Ensure the pending reader count is perceieved BEFORE this reader
376	 * goes to sleep in case of active writers. This guarantees new writers
377	 * won't be allowed and that the current reader will be woken up when
378	 * the last active writer finishes its jobs.
379	 */
380	smp_mb__after_atomic();
381
382	wait_event(lock->pending_readers, atomic_read(&lock->writers) == 0);
383}
384
385void btrfs_drew_read_unlock(struct btrfs_drew_lock *lock)
386{
387	/*
388	 * atomic_dec_and_test implies a full barrier, so woken up writers
389	 * are guaranteed to see the decrement
390	 */
391	if (atomic_dec_and_test(&lock->readers))
392		wake_up(&lock->pending_writers);
393}
394