xref: /kernel/linux/linux-5.10/fs/btrfs/backref.h (revision 8c2ecf20)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2011 STRATO.  All rights reserved.
4 */
5
6#ifndef BTRFS_BACKREF_H
7#define BTRFS_BACKREF_H
8
9#include <linux/btrfs.h>
10#include "ulist.h"
11#include "disk-io.h"
12#include "extent_io.h"
13
14struct inode_fs_paths {
15	struct btrfs_path		*btrfs_path;
16	struct btrfs_root		*fs_root;
17	struct btrfs_data_container	*fspath;
18};
19
20typedef int (iterate_extent_inodes_t)(u64 inum, u64 offset, u64 root,
21		void *ctx);
22
23int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
24			struct btrfs_path *path, struct btrfs_key *found_key,
25			u64 *flags);
26
27int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
28			    struct btrfs_key *key, struct btrfs_extent_item *ei,
29			    u32 item_size, u64 *out_root, u8 *out_level);
30
31int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
32				u64 extent_item_objectid,
33				u64 extent_offset, int search_commit_root,
34				iterate_extent_inodes_t *iterate, void *ctx,
35				bool ignore_offset);
36
37int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
38				struct btrfs_path *path, void *ctx,
39				bool ignore_offset);
40
41int paths_from_inode(u64 inum, struct inode_fs_paths *ipath);
42
43int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
44			 struct btrfs_fs_info *fs_info, u64 bytenr,
45			 u64 time_seq, struct ulist **leafs,
46			 const u64 *extent_item_pos, bool ignore_offset);
47int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
48			 struct btrfs_fs_info *fs_info, u64 bytenr,
49			 u64 time_seq, struct ulist **roots, bool ignore_offset);
50char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
51			u32 name_len, unsigned long name_off,
52			struct extent_buffer *eb_in, u64 parent,
53			char *dest, u32 size);
54
55struct btrfs_data_container *init_data_container(u32 total_bytes);
56struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
57					struct btrfs_path *path);
58void free_ipath(struct inode_fs_paths *ipath);
59
60int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
61			  u64 start_off, struct btrfs_path *path,
62			  struct btrfs_inode_extref **ret_extref,
63			  u64 *found_off);
64int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
65		struct ulist *roots, struct ulist *tmp_ulist);
66
67int __init btrfs_prelim_ref_init(void);
68void __cold btrfs_prelim_ref_exit(void);
69
70struct prelim_ref {
71	struct rb_node rbnode;
72	u64 root_id;
73	struct btrfs_key key_for_search;
74	int level;
75	int count;
76	struct extent_inode_elem *inode_list;
77	u64 parent;
78	u64 wanted_disk_byte;
79};
80
81/*
82 * Iterate backrefs of one extent.
83 *
84 * Now it only supports iteration of tree block in commit root.
85 */
86struct btrfs_backref_iter {
87	u64 bytenr;
88	struct btrfs_path *path;
89	struct btrfs_fs_info *fs_info;
90	struct btrfs_key cur_key;
91	u32 item_ptr;
92	u32 cur_ptr;
93	u32 end_ptr;
94};
95
96struct btrfs_backref_iter *btrfs_backref_iter_alloc(
97		struct btrfs_fs_info *fs_info, gfp_t gfp_flag);
98
99static inline void btrfs_backref_iter_free(struct btrfs_backref_iter *iter)
100{
101	if (!iter)
102		return;
103	btrfs_free_path(iter->path);
104	kfree(iter);
105}
106
107static inline struct extent_buffer *btrfs_backref_get_eb(
108		struct btrfs_backref_iter *iter)
109{
110	if (!iter)
111		return NULL;
112	return iter->path->nodes[0];
113}
114
115/*
116 * For metadata with EXTENT_ITEM key (non-skinny) case, the first inline data
117 * is btrfs_tree_block_info, without a btrfs_extent_inline_ref header.
118 *
119 * This helper determines if that's the case.
120 */
121static inline bool btrfs_backref_has_tree_block_info(
122		struct btrfs_backref_iter *iter)
123{
124	if (iter->cur_key.type == BTRFS_EXTENT_ITEM_KEY &&
125	    iter->cur_ptr - iter->item_ptr == sizeof(struct btrfs_extent_item))
126		return true;
127	return false;
128}
129
130int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr);
131
132int btrfs_backref_iter_next(struct btrfs_backref_iter *iter);
133
134static inline bool btrfs_backref_iter_is_inline_ref(
135		struct btrfs_backref_iter *iter)
136{
137	if (iter->cur_key.type == BTRFS_EXTENT_ITEM_KEY ||
138	    iter->cur_key.type == BTRFS_METADATA_ITEM_KEY)
139		return true;
140	return false;
141}
142
143static inline void btrfs_backref_iter_release(struct btrfs_backref_iter *iter)
144{
145	iter->bytenr = 0;
146	iter->item_ptr = 0;
147	iter->cur_ptr = 0;
148	iter->end_ptr = 0;
149	btrfs_release_path(iter->path);
150	memset(&iter->cur_key, 0, sizeof(iter->cur_key));
151}
152
153/*
154 * Backref cache related structures
155 *
156 * The whole objective of backref_cache is to build a bi-directional map
157 * of tree blocks (represented by backref_node) and all their parents.
158 */
159
160/*
161 * Represent a tree block in the backref cache
162 */
163struct btrfs_backref_node {
164	struct {
165		struct rb_node rb_node;
166		u64 bytenr;
167	}; /* Use rb_simple_node for search/insert */
168
169	u64 new_bytenr;
170	/* Objectid of tree block owner, can be not uptodate */
171	u64 owner;
172	/* Link to pending, changed or detached list */
173	struct list_head list;
174
175	/* List of upper level edges, which link this node to its parents */
176	struct list_head upper;
177	/* List of lower level edges, which link this node to its children */
178	struct list_head lower;
179
180	/* NULL if this node is not tree root */
181	struct btrfs_root *root;
182	/* Extent buffer got by COWing the block */
183	struct extent_buffer *eb;
184	/* Level of the tree block */
185	unsigned int level:8;
186	/* Is the block in a non-shareable tree */
187	unsigned int cowonly:1;
188	/* 1 if no child node is in the cache */
189	unsigned int lowest:1;
190	/* Is the extent buffer locked */
191	unsigned int locked:1;
192	/* Has the block been processed */
193	unsigned int processed:1;
194	/* Have backrefs of this block been checked */
195	unsigned int checked:1;
196	/*
197	 * 1 if corresponding block has been COWed but some upper level block
198	 * pointers may not point to the new location
199	 */
200	unsigned int pending:1;
201	/* 1 if the backref node isn't connected to any other backref node */
202	unsigned int detached:1;
203
204	/*
205	 * For generic purpose backref cache, where we only care if it's a reloc
206	 * root, doesn't care the source subvolid.
207	 */
208	unsigned int is_reloc_root:1;
209};
210
211#define LOWER	0
212#define UPPER	1
213
214/*
215 * Represent an edge connecting upper and lower backref nodes.
216 */
217struct btrfs_backref_edge {
218	/*
219	 * list[LOWER] is linked to btrfs_backref_node::upper of lower level
220	 * node, and list[UPPER] is linked to btrfs_backref_node::lower of
221	 * upper level node.
222	 *
223	 * Also, build_backref_tree() uses list[UPPER] for pending edges, before
224	 * linking list[UPPER] to its upper level nodes.
225	 */
226	struct list_head list[2];
227
228	/* Two related nodes */
229	struct btrfs_backref_node *node[2];
230};
231
232struct btrfs_backref_cache {
233	/* Red black tree of all backref nodes in the cache */
234	struct rb_root rb_root;
235	/* For passing backref nodes to btrfs_reloc_cow_block */
236	struct btrfs_backref_node *path[BTRFS_MAX_LEVEL];
237	/*
238	 * List of blocks that have been COWed but some block pointers in upper
239	 * level blocks may not reflect the new location
240	 */
241	struct list_head pending[BTRFS_MAX_LEVEL];
242	/* List of backref nodes with no child node */
243	struct list_head leaves;
244	/* List of blocks that have been COWed in current transaction */
245	struct list_head changed;
246	/* List of detached backref node. */
247	struct list_head detached;
248
249	u64 last_trans;
250
251	int nr_nodes;
252	int nr_edges;
253
254	/* List of unchecked backref edges during backref cache build */
255	struct list_head pending_edge;
256
257	/* List of useless backref nodes during backref cache build */
258	struct list_head useless_node;
259
260	struct btrfs_fs_info *fs_info;
261
262	/*
263	 * Whether this cache is for relocation
264	 *
265	 * Reloction backref cache require more info for reloc root compared
266	 * to generic backref cache.
267	 */
268	unsigned int is_reloc;
269};
270
271void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info,
272			      struct btrfs_backref_cache *cache, int is_reloc);
273struct btrfs_backref_node *btrfs_backref_alloc_node(
274		struct btrfs_backref_cache *cache, u64 bytenr, int level);
275struct btrfs_backref_edge *btrfs_backref_alloc_edge(
276		struct btrfs_backref_cache *cache);
277
278#define		LINK_LOWER	(1 << 0)
279#define		LINK_UPPER	(1 << 1)
280static inline void btrfs_backref_link_edge(struct btrfs_backref_edge *edge,
281					   struct btrfs_backref_node *lower,
282					   struct btrfs_backref_node *upper,
283					   int link_which)
284{
285	ASSERT(upper && lower && upper->level == lower->level + 1);
286	edge->node[LOWER] = lower;
287	edge->node[UPPER] = upper;
288	if (link_which & LINK_LOWER)
289		list_add_tail(&edge->list[LOWER], &lower->upper);
290	if (link_which & LINK_UPPER)
291		list_add_tail(&edge->list[UPPER], &upper->lower);
292}
293
294static inline void btrfs_backref_free_node(struct btrfs_backref_cache *cache,
295					   struct btrfs_backref_node *node)
296{
297	if (node) {
298		ASSERT(list_empty(&node->list));
299		ASSERT(list_empty(&node->lower));
300		ASSERT(node->eb == NULL);
301		cache->nr_nodes--;
302		btrfs_put_root(node->root);
303		kfree(node);
304	}
305}
306
307static inline void btrfs_backref_free_edge(struct btrfs_backref_cache *cache,
308					   struct btrfs_backref_edge *edge)
309{
310	if (edge) {
311		cache->nr_edges--;
312		kfree(edge);
313	}
314}
315
316static inline void btrfs_backref_unlock_node_buffer(
317		struct btrfs_backref_node *node)
318{
319	if (node->locked) {
320		btrfs_tree_unlock(node->eb);
321		node->locked = 0;
322	}
323}
324
325static inline void btrfs_backref_drop_node_buffer(
326		struct btrfs_backref_node *node)
327{
328	if (node->eb) {
329		btrfs_backref_unlock_node_buffer(node);
330		free_extent_buffer(node->eb);
331		node->eb = NULL;
332	}
333}
334
335/*
336 * Drop the backref node from cache without cleaning up its children
337 * edges.
338 *
339 * This can only be called on node without parent edges.
340 * The children edges are still kept as is.
341 */
342static inline void btrfs_backref_drop_node(struct btrfs_backref_cache *tree,
343					   struct btrfs_backref_node *node)
344{
345	ASSERT(list_empty(&node->upper));
346
347	btrfs_backref_drop_node_buffer(node);
348	list_del_init(&node->list);
349	list_del_init(&node->lower);
350	if (!RB_EMPTY_NODE(&node->rb_node))
351		rb_erase(&node->rb_node, &tree->rb_root);
352	btrfs_backref_free_node(tree, node);
353}
354
355void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache,
356				struct btrfs_backref_node *node);
357
358void btrfs_backref_release_cache(struct btrfs_backref_cache *cache);
359
360static inline void btrfs_backref_panic(struct btrfs_fs_info *fs_info,
361				       u64 bytenr, int errno)
362{
363	btrfs_panic(fs_info, errno,
364		    "Inconsistency in backref cache found at offset %llu",
365		    bytenr);
366}
367
368int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
369				struct btrfs_path *path,
370				struct btrfs_backref_iter *iter,
371				struct btrfs_key *node_key,
372				struct btrfs_backref_node *cur);
373
374int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache,
375				     struct btrfs_backref_node *start);
376
377void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache,
378				 struct btrfs_backref_node *node);
379
380#endif
381