xref: /kernel/linux/linux-6.6/fs/btrfs/transaction.h (revision 62306a36)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2007 Oracle.  All rights reserved.
4 */
5
6#ifndef BTRFS_TRANSACTION_H
7#define BTRFS_TRANSACTION_H
8
9#include <linux/refcount.h>
10#include "btrfs_inode.h"
11#include "delayed-ref.h"
12#include "ctree.h"
13#include "misc.h"
14
15/* Radix-tree tag for roots that are part of the trasaction. */
16#define BTRFS_ROOT_TRANS_TAG			0
17
18enum btrfs_trans_state {
19	TRANS_STATE_RUNNING,
20	TRANS_STATE_COMMIT_PREP,
21	TRANS_STATE_COMMIT_START,
22	TRANS_STATE_COMMIT_DOING,
23	TRANS_STATE_UNBLOCKED,
24	TRANS_STATE_SUPER_COMMITTED,
25	TRANS_STATE_COMPLETED,
26	TRANS_STATE_MAX,
27};
28
29#define BTRFS_TRANS_HAVE_FREE_BGS	0
30#define BTRFS_TRANS_DIRTY_BG_RUN	1
31#define BTRFS_TRANS_CACHE_ENOSPC	2
32
33struct btrfs_transaction {
34	u64 transid;
35	/*
36	 * total external writers(USERSPACE/START/ATTACH) in this
37	 * transaction, it must be zero before the transaction is
38	 * being committed
39	 */
40	atomic_t num_extwriters;
41	/*
42	 * total writers in this transaction, it must be zero before the
43	 * transaction can end
44	 */
45	atomic_t num_writers;
46	refcount_t use_count;
47
48	unsigned long flags;
49
50	/* Be protected by fs_info->trans_lock when we want to change it. */
51	enum btrfs_trans_state state;
52	int aborted;
53	struct list_head list;
54	struct extent_io_tree dirty_pages;
55	time64_t start_time;
56	wait_queue_head_t writer_wait;
57	wait_queue_head_t commit_wait;
58	struct list_head pending_snapshots;
59	struct list_head dev_update_list;
60	struct list_head switch_commits;
61	struct list_head dirty_bgs;
62
63	/*
64	 * There is no explicit lock which protects io_bgs, rather its
65	 * consistency is implied by the fact that all the sites which modify
66	 * it do so under some form of transaction critical section, namely:
67	 *
68	 * - btrfs_start_dirty_block_groups - This function can only ever be
69	 *   run by one of the transaction committers. Refer to
70	 *   BTRFS_TRANS_DIRTY_BG_RUN usage in btrfs_commit_transaction
71	 *
72	 * - btrfs_write_dirty_blockgroups - this is called by
73	 *   commit_cowonly_roots from transaction critical section
74	 *   (TRANS_STATE_COMMIT_DOING)
75	 *
76	 * - btrfs_cleanup_dirty_bgs - called on transaction abort
77	 */
78	struct list_head io_bgs;
79	struct list_head dropped_roots;
80	struct extent_io_tree pinned_extents;
81
82	/*
83	 * we need to make sure block group deletion doesn't race with
84	 * free space cache writeout.  This mutex keeps them from stomping
85	 * on each other
86	 */
87	struct mutex cache_write_mutex;
88	spinlock_t dirty_bgs_lock;
89	/* Protected by spin lock fs_info->unused_bgs_lock. */
90	struct list_head deleted_bgs;
91	spinlock_t dropped_roots_lock;
92	struct btrfs_delayed_ref_root delayed_refs;
93	struct btrfs_fs_info *fs_info;
94
95	/*
96	 * Number of ordered extents the transaction must wait for before
97	 * committing. These are ordered extents started by a fast fsync.
98	 */
99	atomic_t pending_ordered;
100	wait_queue_head_t pending_wait;
101};
102
103enum {
104	ENUM_BIT(__TRANS_FREEZABLE),
105	ENUM_BIT(__TRANS_START),
106	ENUM_BIT(__TRANS_ATTACH),
107	ENUM_BIT(__TRANS_JOIN),
108	ENUM_BIT(__TRANS_JOIN_NOLOCK),
109	ENUM_BIT(__TRANS_DUMMY),
110	ENUM_BIT(__TRANS_JOIN_NOSTART),
111};
112
113#define TRANS_START		(__TRANS_START | __TRANS_FREEZABLE)
114#define TRANS_ATTACH		(__TRANS_ATTACH)
115#define TRANS_JOIN		(__TRANS_JOIN | __TRANS_FREEZABLE)
116#define TRANS_JOIN_NOLOCK	(__TRANS_JOIN_NOLOCK)
117#define TRANS_JOIN_NOSTART	(__TRANS_JOIN_NOSTART)
118
119#define TRANS_EXTWRITERS	(__TRANS_START | __TRANS_ATTACH)
120
121struct btrfs_trans_handle {
122	u64 transid;
123	u64 bytes_reserved;
124	u64 chunk_bytes_reserved;
125	unsigned long delayed_ref_updates;
126	struct btrfs_transaction *transaction;
127	struct btrfs_block_rsv *block_rsv;
128	struct btrfs_block_rsv *orig_rsv;
129	/* Set by a task that wants to create a snapshot. */
130	struct btrfs_pending_snapshot *pending_snapshot;
131	refcount_t use_count;
132	unsigned int type;
133	/*
134	 * Error code of transaction abort, set outside of locks and must use
135	 * the READ_ONCE/WRITE_ONCE access
136	 */
137	short aborted;
138	bool adding_csums;
139	bool allocating_chunk;
140	bool removing_chunk;
141	bool reloc_reserved;
142	bool in_fsync;
143	struct btrfs_fs_info *fs_info;
144	struct list_head new_bgs;
145};
146
147/*
148 * The abort status can be changed between calls and is not protected by locks.
149 * This accepts btrfs_transaction and btrfs_trans_handle as types. Once it's
150 * set to a non-zero value it does not change, so the macro should be in checks
151 * but is not necessary for further reads of the value.
152 */
153#define TRANS_ABORTED(trans)		(unlikely(READ_ONCE((trans)->aborted)))
154
155struct btrfs_pending_snapshot {
156	struct dentry *dentry;
157	struct inode *dir;
158	struct btrfs_root *root;
159	struct btrfs_root_item *root_item;
160	struct btrfs_root *snap;
161	struct btrfs_qgroup_inherit *inherit;
162	struct btrfs_path *path;
163	/* block reservation for the operation */
164	struct btrfs_block_rsv block_rsv;
165	/* extra metadata reservation for relocation */
166	int error;
167	/* Preallocated anonymous block device number */
168	dev_t anon_dev;
169	bool readonly;
170	struct list_head list;
171};
172
173static inline void btrfs_set_inode_last_trans(struct btrfs_trans_handle *trans,
174					      struct btrfs_inode *inode)
175{
176	spin_lock(&inode->lock);
177	inode->last_trans = trans->transaction->transid;
178	inode->last_sub_trans = inode->root->log_transid;
179	inode->last_log_commit = inode->last_sub_trans - 1;
180	spin_unlock(&inode->lock);
181}
182
183/*
184 * Make qgroup codes to skip given qgroupid, means the old/new_roots for
185 * qgroup won't contain the qgroupid in it.
186 */
187static inline void btrfs_set_skip_qgroup(struct btrfs_trans_handle *trans,
188					 u64 qgroupid)
189{
190	struct btrfs_delayed_ref_root *delayed_refs;
191
192	delayed_refs = &trans->transaction->delayed_refs;
193	WARN_ON(delayed_refs->qgroup_to_skip);
194	delayed_refs->qgroup_to_skip = qgroupid;
195}
196
197static inline void btrfs_clear_skip_qgroup(struct btrfs_trans_handle *trans)
198{
199	struct btrfs_delayed_ref_root *delayed_refs;
200
201	delayed_refs = &trans->transaction->delayed_refs;
202	WARN_ON(!delayed_refs->qgroup_to_skip);
203	delayed_refs->qgroup_to_skip = 0;
204}
205
206bool __cold abort_should_print_stack(int errno);
207
208/*
209 * Call btrfs_abort_transaction as early as possible when an error condition is
210 * detected, that way the exact stack trace is reported for some errors.
211 */
212#define btrfs_abort_transaction(trans, errno)		\
213do {								\
214	bool first = false;					\
215	/* Report first abort since mount */			\
216	if (!test_and_set_bit(BTRFS_FS_STATE_TRANS_ABORTED,	\
217			&((trans)->fs_info->fs_state))) {	\
218		first = true;					\
219		if (WARN(abort_should_print_stack(errno),	\
220			KERN_ERR				\
221			"BTRFS: Transaction aborted (error %d)\n",	\
222			(errno))) {					\
223			/* Stack trace printed. */			\
224		} else {						\
225			btrfs_err((trans)->fs_info,			\
226				  "Transaction aborted (error %d)",	\
227				  (errno));			\
228		}						\
229	}							\
230	__btrfs_abort_transaction((trans), __func__,		\
231				  __LINE__, (errno), first);	\
232} while (0)
233
234int btrfs_end_transaction(struct btrfs_trans_handle *trans);
235struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
236						   unsigned int num_items);
237struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
238					struct btrfs_root *root,
239					unsigned int num_items);
240struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root);
241struct btrfs_trans_handle *btrfs_join_transaction_spacecache(struct btrfs_root *root);
242struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root);
243struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root);
244struct btrfs_trans_handle *btrfs_attach_transaction_barrier(
245					struct btrfs_root *root);
246int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid);
247
248void btrfs_add_dead_root(struct btrfs_root *root);
249int btrfs_defrag_root(struct btrfs_root *root);
250void btrfs_maybe_wake_unfinished_drop(struct btrfs_fs_info *fs_info);
251int btrfs_clean_one_deleted_snapshot(struct btrfs_fs_info *fs_info);
252int btrfs_commit_transaction(struct btrfs_trans_handle *trans);
253void btrfs_commit_transaction_async(struct btrfs_trans_handle *trans);
254int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans);
255bool btrfs_should_end_transaction(struct btrfs_trans_handle *trans);
256void btrfs_throttle(struct btrfs_fs_info *fs_info);
257int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
258				struct btrfs_root *root);
259int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
260				struct extent_io_tree *dirty_pages, int mark);
261int btrfs_wait_tree_log_extents(struct btrfs_root *root, int mark);
262int btrfs_transaction_blocked(struct btrfs_fs_info *info);
263int btrfs_transaction_in_commit(struct btrfs_fs_info *info);
264void btrfs_put_transaction(struct btrfs_transaction *transaction);
265void btrfs_add_dropped_root(struct btrfs_trans_handle *trans,
266			    struct btrfs_root *root);
267void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans);
268void __cold __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
269				      const char *function,
270				      unsigned int line, int errno, bool first_hit);
271
272int __init btrfs_transaction_init(void);
273void __cold btrfs_transaction_exit(void);
274
275#endif
276