xref: /kernel/linux/linux-5.10/fs/btrfs/relocation.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2009 Oracle.  All rights reserved.
4 */
5
6#include <linux/sched.h>
7#include <linux/pagemap.h>
8#include <linux/writeback.h>
9#include <linux/blkdev.h>
10#include <linux/rbtree.h>
11#include <linux/slab.h>
12#include <linux/error-injection.h>
13#include "ctree.h"
14#include "disk-io.h"
15#include "transaction.h"
16#include "volumes.h"
17#include "locking.h"
18#include "btrfs_inode.h"
19#include "async-thread.h"
20#include "free-space-cache.h"
21#include "inode-map.h"
22#include "qgroup.h"
23#include "print-tree.h"
24#include "delalloc-space.h"
25#include "block-group.h"
26#include "backref.h"
27#include "misc.h"
28
29/*
30 * Relocation overview
31 *
32 * [What does relocation do]
33 *
34 * The objective of relocation is to relocate all extents of the target block
35 * group to other block groups.
36 * This is utilized by resize (shrink only), profile converting, compacting
37 * space, or balance routine to spread chunks over devices.
38 *
39 * 		Before		|		After
40 * ------------------------------------------------------------------
41 *  BG A: 10 data extents	| BG A: deleted
42 *  BG B:  2 data extents	| BG B: 10 data extents (2 old + 8 relocated)
43 *  BG C:  1 extents		| BG C:  3 data extents (1 old + 2 relocated)
44 *
45 * [How does relocation work]
46 *
47 * 1.   Mark the target block group read-only
48 *      New extents won't be allocated from the target block group.
49 *
50 * 2.1  Record each extent in the target block group
51 *      To build a proper map of extents to be relocated.
52 *
53 * 2.2  Build data reloc tree and reloc trees
54 *      Data reloc tree will contain an inode, recording all newly relocated
55 *      data extents.
56 *      There will be only one data reloc tree for one data block group.
57 *
58 *      Reloc tree will be a special snapshot of its source tree, containing
59 *      relocated tree blocks.
60 *      Each tree referring to a tree block in target block group will get its
61 *      reloc tree built.
62 *
63 * 2.3  Swap source tree with its corresponding reloc tree
64 *      Each involved tree only refers to new extents after swap.
65 *
66 * 3.   Cleanup reloc trees and data reloc tree.
67 *      As old extents in the target block group are still referenced by reloc
68 *      trees, we need to clean them up before really freeing the target block
69 *      group.
70 *
71 * The main complexity is in steps 2.2 and 2.3.
72 *
73 * The entry point of relocation is relocate_block_group() function.
74 */
75
76#define RELOCATION_RESERVED_NODES	256
77/*
78 * map address of tree root to tree
79 */
80struct mapping_node {
81	struct {
82		struct rb_node rb_node;
83		u64 bytenr;
84	}; /* Use rb_simle_node for search/insert */
85	void *data;
86};
87
88struct mapping_tree {
89	struct rb_root rb_root;
90	spinlock_t lock;
91};
92
93/*
94 * present a tree block to process
95 */
96struct tree_block {
97	struct {
98		struct rb_node rb_node;
99		u64 bytenr;
100	}; /* Use rb_simple_node for search/insert */
101	struct btrfs_key key;
102	unsigned int level:8;
103	unsigned int key_ready:1;
104};
105
106#define MAX_EXTENTS 128
107
108struct file_extent_cluster {
109	u64 start;
110	u64 end;
111	u64 boundary[MAX_EXTENTS];
112	unsigned int nr;
113};
114
115struct reloc_control {
116	/* block group to relocate */
117	struct btrfs_block_group *block_group;
118	/* extent tree */
119	struct btrfs_root *extent_root;
120	/* inode for moving data */
121	struct inode *data_inode;
122
123	struct btrfs_block_rsv *block_rsv;
124
125	struct btrfs_backref_cache backref_cache;
126
127	struct file_extent_cluster cluster;
128	/* tree blocks have been processed */
129	struct extent_io_tree processed_blocks;
130	/* map start of tree root to corresponding reloc tree */
131	struct mapping_tree reloc_root_tree;
132	/* list of reloc trees */
133	struct list_head reloc_roots;
134	/* list of subvolume trees that get relocated */
135	struct list_head dirty_subvol_roots;
136	/* size of metadata reservation for merging reloc trees */
137	u64 merging_rsv_size;
138	/* size of relocated tree nodes */
139	u64 nodes_relocated;
140	/* reserved size for block group relocation*/
141	u64 reserved_bytes;
142
143	u64 search_start;
144	u64 extents_found;
145
146	unsigned int stage:8;
147	unsigned int create_reloc_tree:1;
148	unsigned int merge_reloc_tree:1;
149	unsigned int found_file_extent:1;
150};
151
152/* stages of data relocation */
153#define MOVE_DATA_EXTENTS	0
154#define UPDATE_DATA_PTRS	1
155
156static void mark_block_processed(struct reloc_control *rc,
157				 struct btrfs_backref_node *node)
158{
159	u32 blocksize;
160
161	if (node->level == 0 ||
162	    in_range(node->bytenr, rc->block_group->start,
163		     rc->block_group->length)) {
164		blocksize = rc->extent_root->fs_info->nodesize;
165		set_extent_bits(&rc->processed_blocks, node->bytenr,
166				node->bytenr + blocksize - 1, EXTENT_DIRTY);
167	}
168	node->processed = 1;
169}
170
171
172static void mapping_tree_init(struct mapping_tree *tree)
173{
174	tree->rb_root = RB_ROOT;
175	spin_lock_init(&tree->lock);
176}
177
178/*
179 * walk up backref nodes until reach node presents tree root
180 */
181static struct btrfs_backref_node *walk_up_backref(
182		struct btrfs_backref_node *node,
183		struct btrfs_backref_edge *edges[], int *index)
184{
185	struct btrfs_backref_edge *edge;
186	int idx = *index;
187
188	while (!list_empty(&node->upper)) {
189		edge = list_entry(node->upper.next,
190				  struct btrfs_backref_edge, list[LOWER]);
191		edges[idx++] = edge;
192		node = edge->node[UPPER];
193	}
194	BUG_ON(node->detached);
195	*index = idx;
196	return node;
197}
198
199/*
200 * walk down backref nodes to find start of next reference path
201 */
202static struct btrfs_backref_node *walk_down_backref(
203		struct btrfs_backref_edge *edges[], int *index)
204{
205	struct btrfs_backref_edge *edge;
206	struct btrfs_backref_node *lower;
207	int idx = *index;
208
209	while (idx > 0) {
210		edge = edges[idx - 1];
211		lower = edge->node[LOWER];
212		if (list_is_last(&edge->list[LOWER], &lower->upper)) {
213			idx--;
214			continue;
215		}
216		edge = list_entry(edge->list[LOWER].next,
217				  struct btrfs_backref_edge, list[LOWER]);
218		edges[idx - 1] = edge;
219		*index = idx;
220		return edge->node[UPPER];
221	}
222	*index = 0;
223	return NULL;
224}
225
226static void update_backref_node(struct btrfs_backref_cache *cache,
227				struct btrfs_backref_node *node, u64 bytenr)
228{
229	struct rb_node *rb_node;
230	rb_erase(&node->rb_node, &cache->rb_root);
231	node->bytenr = bytenr;
232	rb_node = rb_simple_insert(&cache->rb_root, node->bytenr, &node->rb_node);
233	if (rb_node)
234		btrfs_backref_panic(cache->fs_info, bytenr, -EEXIST);
235}
236
237/*
238 * update backref cache after a transaction commit
239 */
240static int update_backref_cache(struct btrfs_trans_handle *trans,
241				struct btrfs_backref_cache *cache)
242{
243	struct btrfs_backref_node *node;
244	int level = 0;
245
246	if (cache->last_trans == 0) {
247		cache->last_trans = trans->transid;
248		return 0;
249	}
250
251	if (cache->last_trans == trans->transid)
252		return 0;
253
254	/*
255	 * detached nodes are used to avoid unnecessary backref
256	 * lookup. transaction commit changes the extent tree.
257	 * so the detached nodes are no longer useful.
258	 */
259	while (!list_empty(&cache->detached)) {
260		node = list_entry(cache->detached.next,
261				  struct btrfs_backref_node, list);
262		btrfs_backref_cleanup_node(cache, node);
263	}
264
265	while (!list_empty(&cache->changed)) {
266		node = list_entry(cache->changed.next,
267				  struct btrfs_backref_node, list);
268		list_del_init(&node->list);
269		BUG_ON(node->pending);
270		update_backref_node(cache, node, node->new_bytenr);
271	}
272
273	/*
274	 * some nodes can be left in the pending list if there were
275	 * errors during processing the pending nodes.
276	 */
277	for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
278		list_for_each_entry(node, &cache->pending[level], list) {
279			BUG_ON(!node->pending);
280			if (node->bytenr == node->new_bytenr)
281				continue;
282			update_backref_node(cache, node, node->new_bytenr);
283		}
284	}
285
286	cache->last_trans = 0;
287	return 1;
288}
289
290static bool reloc_root_is_dead(struct btrfs_root *root)
291{
292	/*
293	 * Pair with set_bit/clear_bit in clean_dirty_subvols and
294	 * btrfs_update_reloc_root. We need to see the updated bit before
295	 * trying to access reloc_root
296	 */
297	smp_rmb();
298	if (test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state))
299		return true;
300	return false;
301}
302
303/*
304 * Check if this subvolume tree has valid reloc tree.
305 *
306 * Reloc tree after swap is considered dead, thus not considered as valid.
307 * This is enough for most callers, as they don't distinguish dead reloc root
308 * from no reloc root.  But btrfs_should_ignore_reloc_root() below is a
309 * special case.
310 */
311static bool have_reloc_root(struct btrfs_root *root)
312{
313	if (reloc_root_is_dead(root))
314		return false;
315	if (!root->reloc_root)
316		return false;
317	return true;
318}
319
320int btrfs_should_ignore_reloc_root(struct btrfs_root *root)
321{
322	struct btrfs_root *reloc_root;
323
324	if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
325		return 0;
326
327	/* This root has been merged with its reloc tree, we can ignore it */
328	if (reloc_root_is_dead(root))
329		return 1;
330
331	reloc_root = root->reloc_root;
332	if (!reloc_root)
333		return 0;
334
335	if (btrfs_header_generation(reloc_root->commit_root) ==
336	    root->fs_info->running_transaction->transid)
337		return 0;
338	/*
339	 * if there is reloc tree and it was created in previous
340	 * transaction backref lookup can find the reloc tree,
341	 * so backref node for the fs tree root is useless for
342	 * relocation.
343	 */
344	return 1;
345}
346
347/*
348 * find reloc tree by address of tree root
349 */
350struct btrfs_root *find_reloc_root(struct btrfs_fs_info *fs_info, u64 bytenr)
351{
352	struct reloc_control *rc = fs_info->reloc_ctl;
353	struct rb_node *rb_node;
354	struct mapping_node *node;
355	struct btrfs_root *root = NULL;
356
357	ASSERT(rc);
358	spin_lock(&rc->reloc_root_tree.lock);
359	rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root, bytenr);
360	if (rb_node) {
361		node = rb_entry(rb_node, struct mapping_node, rb_node);
362		root = (struct btrfs_root *)node->data;
363	}
364	spin_unlock(&rc->reloc_root_tree.lock);
365	return btrfs_grab_root(root);
366}
367
368/*
369 * For useless nodes, do two major clean ups:
370 *
371 * - Cleanup the children edges and nodes
372 *   If child node is also orphan (no parent) during cleanup, then the child
373 *   node will also be cleaned up.
374 *
375 * - Freeing up leaves (level 0), keeps nodes detached
376 *   For nodes, the node is still cached as "detached"
377 *
378 * Return false if @node is not in the @useless_nodes list.
379 * Return true if @node is in the @useless_nodes list.
380 */
381static bool handle_useless_nodes(struct reloc_control *rc,
382				 struct btrfs_backref_node *node)
383{
384	struct btrfs_backref_cache *cache = &rc->backref_cache;
385	struct list_head *useless_node = &cache->useless_node;
386	bool ret = false;
387
388	while (!list_empty(useless_node)) {
389		struct btrfs_backref_node *cur;
390
391		cur = list_first_entry(useless_node, struct btrfs_backref_node,
392				 list);
393		list_del_init(&cur->list);
394
395		/* Only tree root nodes can be added to @useless_nodes */
396		ASSERT(list_empty(&cur->upper));
397
398		if (cur == node)
399			ret = true;
400
401		/* The node is the lowest node */
402		if (cur->lowest) {
403			list_del_init(&cur->lower);
404			cur->lowest = 0;
405		}
406
407		/* Cleanup the lower edges */
408		while (!list_empty(&cur->lower)) {
409			struct btrfs_backref_edge *edge;
410			struct btrfs_backref_node *lower;
411
412			edge = list_entry(cur->lower.next,
413					struct btrfs_backref_edge, list[UPPER]);
414			list_del(&edge->list[UPPER]);
415			list_del(&edge->list[LOWER]);
416			lower = edge->node[LOWER];
417			btrfs_backref_free_edge(cache, edge);
418
419			/* Child node is also orphan, queue for cleanup */
420			if (list_empty(&lower->upper))
421				list_add(&lower->list, useless_node);
422		}
423		/* Mark this block processed for relocation */
424		mark_block_processed(rc, cur);
425
426		/*
427		 * Backref nodes for tree leaves are deleted from the cache.
428		 * Backref nodes for upper level tree blocks are left in the
429		 * cache to avoid unnecessary backref lookup.
430		 */
431		if (cur->level > 0) {
432			list_add(&cur->list, &cache->detached);
433			cur->detached = 1;
434		} else {
435			rb_erase(&cur->rb_node, &cache->rb_root);
436			btrfs_backref_free_node(cache, cur);
437		}
438	}
439	return ret;
440}
441
442/*
443 * Build backref tree for a given tree block. Root of the backref tree
444 * corresponds the tree block, leaves of the backref tree correspond roots of
445 * b-trees that reference the tree block.
446 *
447 * The basic idea of this function is check backrefs of a given block to find
448 * upper level blocks that reference the block, and then check backrefs of
449 * these upper level blocks recursively. The recursion stops when tree root is
450 * reached or backrefs for the block is cached.
451 *
452 * NOTE: if we find that backrefs for a block are cached, we know backrefs for
453 * all upper level blocks that directly/indirectly reference the block are also
454 * cached.
455 */
456static noinline_for_stack struct btrfs_backref_node *build_backref_tree(
457			struct reloc_control *rc, struct btrfs_key *node_key,
458			int level, u64 bytenr)
459{
460	struct btrfs_backref_iter *iter;
461	struct btrfs_backref_cache *cache = &rc->backref_cache;
462	/* For searching parent of TREE_BLOCK_REF */
463	struct btrfs_path *path;
464	struct btrfs_backref_node *cur;
465	struct btrfs_backref_node *node = NULL;
466	struct btrfs_backref_edge *edge;
467	int ret;
468	int err = 0;
469
470	iter = btrfs_backref_iter_alloc(rc->extent_root->fs_info, GFP_NOFS);
471	if (!iter)
472		return ERR_PTR(-ENOMEM);
473	path = btrfs_alloc_path();
474	if (!path) {
475		err = -ENOMEM;
476		goto out;
477	}
478
479	node = btrfs_backref_alloc_node(cache, bytenr, level);
480	if (!node) {
481		err = -ENOMEM;
482		goto out;
483	}
484
485	node->lowest = 1;
486	cur = node;
487
488	/* Breadth-first search to build backref cache */
489	do {
490		ret = btrfs_backref_add_tree_node(cache, path, iter, node_key,
491						  cur);
492		if (ret < 0) {
493			err = ret;
494			goto out;
495		}
496		edge = list_first_entry_or_null(&cache->pending_edge,
497				struct btrfs_backref_edge, list[UPPER]);
498		/*
499		 * The pending list isn't empty, take the first block to
500		 * process
501		 */
502		if (edge) {
503			list_del_init(&edge->list[UPPER]);
504			cur = edge->node[UPPER];
505		}
506	} while (edge);
507
508	/* Finish the upper linkage of newly added edges/nodes */
509	ret = btrfs_backref_finish_upper_links(cache, node);
510	if (ret < 0) {
511		err = ret;
512		goto out;
513	}
514
515	if (handle_useless_nodes(rc, node))
516		node = NULL;
517out:
518	btrfs_backref_iter_free(iter);
519	btrfs_free_path(path);
520	if (err) {
521		btrfs_backref_error_cleanup(cache, node);
522		return ERR_PTR(err);
523	}
524	ASSERT(!node || !node->detached);
525	ASSERT(list_empty(&cache->useless_node) &&
526	       list_empty(&cache->pending_edge));
527	return node;
528}
529
530/*
531 * helper to add backref node for the newly created snapshot.
532 * the backref node is created by cloning backref node that
533 * corresponds to root of source tree
534 */
535static int clone_backref_node(struct btrfs_trans_handle *trans,
536			      struct reloc_control *rc,
537			      struct btrfs_root *src,
538			      struct btrfs_root *dest)
539{
540	struct btrfs_root *reloc_root = src->reloc_root;
541	struct btrfs_backref_cache *cache = &rc->backref_cache;
542	struct btrfs_backref_node *node = NULL;
543	struct btrfs_backref_node *new_node;
544	struct btrfs_backref_edge *edge;
545	struct btrfs_backref_edge *new_edge;
546	struct rb_node *rb_node;
547
548	if (cache->last_trans > 0)
549		update_backref_cache(trans, cache);
550
551	rb_node = rb_simple_search(&cache->rb_root, src->commit_root->start);
552	if (rb_node) {
553		node = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
554		if (node->detached)
555			node = NULL;
556		else
557			BUG_ON(node->new_bytenr != reloc_root->node->start);
558	}
559
560	if (!node) {
561		rb_node = rb_simple_search(&cache->rb_root,
562					   reloc_root->commit_root->start);
563		if (rb_node) {
564			node = rb_entry(rb_node, struct btrfs_backref_node,
565					rb_node);
566			BUG_ON(node->detached);
567		}
568	}
569
570	if (!node)
571		return 0;
572
573	new_node = btrfs_backref_alloc_node(cache, dest->node->start,
574					    node->level);
575	if (!new_node)
576		return -ENOMEM;
577
578	new_node->lowest = node->lowest;
579	new_node->checked = 1;
580	new_node->root = btrfs_grab_root(dest);
581	ASSERT(new_node->root);
582
583	if (!node->lowest) {
584		list_for_each_entry(edge, &node->lower, list[UPPER]) {
585			new_edge = btrfs_backref_alloc_edge(cache);
586			if (!new_edge)
587				goto fail;
588
589			btrfs_backref_link_edge(new_edge, edge->node[LOWER],
590						new_node, LINK_UPPER);
591		}
592	} else {
593		list_add_tail(&new_node->lower, &cache->leaves);
594	}
595
596	rb_node = rb_simple_insert(&cache->rb_root, new_node->bytenr,
597				   &new_node->rb_node);
598	if (rb_node)
599		btrfs_backref_panic(trans->fs_info, new_node->bytenr, -EEXIST);
600
601	if (!new_node->lowest) {
602		list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) {
603			list_add_tail(&new_edge->list[LOWER],
604				      &new_edge->node[LOWER]->upper);
605		}
606	}
607	return 0;
608fail:
609	while (!list_empty(&new_node->lower)) {
610		new_edge = list_entry(new_node->lower.next,
611				      struct btrfs_backref_edge, list[UPPER]);
612		list_del(&new_edge->list[UPPER]);
613		btrfs_backref_free_edge(cache, new_edge);
614	}
615	btrfs_backref_free_node(cache, new_node);
616	return -ENOMEM;
617}
618
619/*
620 * helper to add 'address of tree root -> reloc tree' mapping
621 */
622static int __must_check __add_reloc_root(struct btrfs_root *root)
623{
624	struct btrfs_fs_info *fs_info = root->fs_info;
625	struct rb_node *rb_node;
626	struct mapping_node *node;
627	struct reloc_control *rc = fs_info->reloc_ctl;
628
629	node = kmalloc(sizeof(*node), GFP_NOFS);
630	if (!node)
631		return -ENOMEM;
632
633	node->bytenr = root->commit_root->start;
634	node->data = root;
635
636	spin_lock(&rc->reloc_root_tree.lock);
637	rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root,
638				   node->bytenr, &node->rb_node);
639	spin_unlock(&rc->reloc_root_tree.lock);
640	if (rb_node) {
641		btrfs_panic(fs_info, -EEXIST,
642			    "Duplicate root found for start=%llu while inserting into relocation tree",
643			    node->bytenr);
644	}
645
646	list_add_tail(&root->root_list, &rc->reloc_roots);
647	return 0;
648}
649
650/*
651 * helper to delete the 'address of tree root -> reloc tree'
652 * mapping
653 */
654static void __del_reloc_root(struct btrfs_root *root)
655{
656	struct btrfs_fs_info *fs_info = root->fs_info;
657	struct rb_node *rb_node;
658	struct mapping_node *node = NULL;
659	struct reloc_control *rc = fs_info->reloc_ctl;
660	bool put_ref = false;
661
662	if (rc && root->node) {
663		spin_lock(&rc->reloc_root_tree.lock);
664		rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root,
665					   root->commit_root->start);
666		if (rb_node) {
667			node = rb_entry(rb_node, struct mapping_node, rb_node);
668			rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
669			RB_CLEAR_NODE(&node->rb_node);
670		}
671		spin_unlock(&rc->reloc_root_tree.lock);
672		ASSERT(!node || (struct btrfs_root *)node->data == root);
673	}
674
675	/*
676	 * We only put the reloc root here if it's on the list.  There's a lot
677	 * of places where the pattern is to splice the rc->reloc_roots, process
678	 * the reloc roots, and then add the reloc root back onto
679	 * rc->reloc_roots.  If we call __del_reloc_root while it's off of the
680	 * list we don't want the reference being dropped, because the guy
681	 * messing with the list is in charge of the reference.
682	 */
683	spin_lock(&fs_info->trans_lock);
684	if (!list_empty(&root->root_list)) {
685		put_ref = true;
686		list_del_init(&root->root_list);
687	}
688	spin_unlock(&fs_info->trans_lock);
689	if (put_ref)
690		btrfs_put_root(root);
691	kfree(node);
692}
693
694/*
695 * helper to update the 'address of tree root -> reloc tree'
696 * mapping
697 */
698static int __update_reloc_root(struct btrfs_root *root)
699{
700	struct btrfs_fs_info *fs_info = root->fs_info;
701	struct rb_node *rb_node;
702	struct mapping_node *node = NULL;
703	struct reloc_control *rc = fs_info->reloc_ctl;
704
705	spin_lock(&rc->reloc_root_tree.lock);
706	rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root,
707				   root->commit_root->start);
708	if (rb_node) {
709		node = rb_entry(rb_node, struct mapping_node, rb_node);
710		rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
711	}
712	spin_unlock(&rc->reloc_root_tree.lock);
713
714	if (!node)
715		return 0;
716	BUG_ON((struct btrfs_root *)node->data != root);
717
718	spin_lock(&rc->reloc_root_tree.lock);
719	node->bytenr = root->node->start;
720	rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root,
721				   node->bytenr, &node->rb_node);
722	spin_unlock(&rc->reloc_root_tree.lock);
723	if (rb_node)
724		btrfs_backref_panic(fs_info, node->bytenr, -EEXIST);
725	return 0;
726}
727
728static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
729					struct btrfs_root *root, u64 objectid)
730{
731	struct btrfs_fs_info *fs_info = root->fs_info;
732	struct btrfs_root *reloc_root;
733	struct extent_buffer *eb;
734	struct btrfs_root_item *root_item;
735	struct btrfs_key root_key;
736	int ret = 0;
737	bool must_abort = false;
738
739	root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
740	if (!root_item)
741		return ERR_PTR(-ENOMEM);
742
743	root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
744	root_key.type = BTRFS_ROOT_ITEM_KEY;
745	root_key.offset = objectid;
746
747	if (root->root_key.objectid == objectid) {
748		u64 commit_root_gen;
749
750		/* called by btrfs_init_reloc_root */
751		ret = btrfs_copy_root(trans, root, root->commit_root, &eb,
752				      BTRFS_TREE_RELOC_OBJECTID);
753		if (ret)
754			goto fail;
755
756		/*
757		 * Set the last_snapshot field to the generation of the commit
758		 * root - like this ctree.c:btrfs_block_can_be_shared() behaves
759		 * correctly (returns true) when the relocation root is created
760		 * either inside the critical section of a transaction commit
761		 * (through transaction.c:qgroup_account_snapshot()) and when
762		 * it's created before the transaction commit is started.
763		 */
764		commit_root_gen = btrfs_header_generation(root->commit_root);
765		btrfs_set_root_last_snapshot(&root->root_item, commit_root_gen);
766	} else {
767		/*
768		 * called by btrfs_reloc_post_snapshot_hook.
769		 * the source tree is a reloc tree, all tree blocks
770		 * modified after it was created have RELOC flag
771		 * set in their headers. so it's OK to not update
772		 * the 'last_snapshot'.
773		 */
774		ret = btrfs_copy_root(trans, root, root->node, &eb,
775				      BTRFS_TREE_RELOC_OBJECTID);
776		if (ret)
777			goto fail;
778	}
779
780	/*
781	 * We have changed references at this point, we must abort the
782	 * transaction if anything fails.
783	 */
784	must_abort = true;
785
786	memcpy(root_item, &root->root_item, sizeof(*root_item));
787	btrfs_set_root_bytenr(root_item, eb->start);
788	btrfs_set_root_level(root_item, btrfs_header_level(eb));
789	btrfs_set_root_generation(root_item, trans->transid);
790
791	if (root->root_key.objectid == objectid) {
792		btrfs_set_root_refs(root_item, 0);
793		memset(&root_item->drop_progress, 0,
794		       sizeof(struct btrfs_disk_key));
795		root_item->drop_level = 0;
796	}
797
798	btrfs_tree_unlock(eb);
799	free_extent_buffer(eb);
800
801	ret = btrfs_insert_root(trans, fs_info->tree_root,
802				&root_key, root_item);
803	if (ret)
804		goto fail;
805
806	kfree(root_item);
807
808	reloc_root = btrfs_read_tree_root(fs_info->tree_root, &root_key);
809	if (IS_ERR(reloc_root)) {
810		ret = PTR_ERR(reloc_root);
811		goto abort;
812	}
813	set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state);
814	reloc_root->last_trans = trans->transid;
815	return reloc_root;
816fail:
817	kfree(root_item);
818abort:
819	if (must_abort)
820		btrfs_abort_transaction(trans, ret);
821	return ERR_PTR(ret);
822}
823
824/*
825 * create reloc tree for a given fs tree. reloc tree is just a
826 * snapshot of the fs tree with special root objectid.
827 *
828 * The reloc_root comes out of here with two references, one for
829 * root->reloc_root, and another for being on the rc->reloc_roots list.
830 */
831int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
832			  struct btrfs_root *root)
833{
834	struct btrfs_fs_info *fs_info = root->fs_info;
835	struct btrfs_root *reloc_root;
836	struct reloc_control *rc = fs_info->reloc_ctl;
837	struct btrfs_block_rsv *rsv;
838	int clear_rsv = 0;
839	int ret;
840
841	if (!rc)
842		return 0;
843
844	/*
845	 * The subvolume has reloc tree but the swap is finished, no need to
846	 * create/update the dead reloc tree
847	 */
848	if (reloc_root_is_dead(root))
849		return 0;
850
851	/*
852	 * This is subtle but important.  We do not do
853	 * record_root_in_transaction for reloc roots, instead we record their
854	 * corresponding fs root, and then here we update the last trans for the
855	 * reloc root.  This means that we have to do this for the entire life
856	 * of the reloc root, regardless of which stage of the relocation we are
857	 * in.
858	 */
859	if (root->reloc_root) {
860		reloc_root = root->reloc_root;
861		reloc_root->last_trans = trans->transid;
862		return 0;
863	}
864
865	/*
866	 * We are merging reloc roots, we do not need new reloc trees.  Also
867	 * reloc trees never need their own reloc tree.
868	 */
869	if (!rc->create_reloc_tree ||
870	    root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
871		return 0;
872
873	if (!trans->reloc_reserved) {
874		rsv = trans->block_rsv;
875		trans->block_rsv = rc->block_rsv;
876		clear_rsv = 1;
877	}
878	reloc_root = create_reloc_root(trans, root, root->root_key.objectid);
879	if (clear_rsv)
880		trans->block_rsv = rsv;
881
882	ret = __add_reloc_root(reloc_root);
883	BUG_ON(ret < 0);
884	root->reloc_root = btrfs_grab_root(reloc_root);
885	return 0;
886}
887
888/*
889 * update root item of reloc tree
890 */
891int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
892			    struct btrfs_root *root)
893{
894	struct btrfs_fs_info *fs_info = root->fs_info;
895	struct btrfs_root *reloc_root;
896	struct btrfs_root_item *root_item;
897	int ret;
898
899	if (!have_reloc_root(root))
900		return 0;
901
902	reloc_root = root->reloc_root;
903	root_item = &reloc_root->root_item;
904
905	/*
906	 * We are probably ok here, but __del_reloc_root() will drop its ref of
907	 * the root.  We have the ref for root->reloc_root, but just in case
908	 * hold it while we update the reloc root.
909	 */
910	btrfs_grab_root(reloc_root);
911
912	/* root->reloc_root will stay until current relocation finished */
913	if (fs_info->reloc_ctl->merge_reloc_tree &&
914	    btrfs_root_refs(root_item) == 0) {
915		set_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
916		/*
917		 * Mark the tree as dead before we change reloc_root so
918		 * have_reloc_root will not touch it from now on.
919		 */
920		smp_wmb();
921		__del_reloc_root(reloc_root);
922	}
923
924	if (reloc_root->commit_root != reloc_root->node) {
925		__update_reloc_root(reloc_root);
926		btrfs_set_root_node(root_item, reloc_root->node);
927		free_extent_buffer(reloc_root->commit_root);
928		reloc_root->commit_root = btrfs_root_node(reloc_root);
929	}
930
931	ret = btrfs_update_root(trans, fs_info->tree_root,
932				&reloc_root->root_key, root_item);
933	btrfs_put_root(reloc_root);
934	return ret;
935}
936
937/*
938 * helper to find first cached inode with inode number >= objectid
939 * in a subvolume
940 */
941static struct inode *find_next_inode(struct btrfs_root *root, u64 objectid)
942{
943	struct rb_node *node;
944	struct rb_node *prev;
945	struct btrfs_inode *entry;
946	struct inode *inode;
947
948	spin_lock(&root->inode_lock);
949again:
950	node = root->inode_tree.rb_node;
951	prev = NULL;
952	while (node) {
953		prev = node;
954		entry = rb_entry(node, struct btrfs_inode, rb_node);
955
956		if (objectid < btrfs_ino(entry))
957			node = node->rb_left;
958		else if (objectid > btrfs_ino(entry))
959			node = node->rb_right;
960		else
961			break;
962	}
963	if (!node) {
964		while (prev) {
965			entry = rb_entry(prev, struct btrfs_inode, rb_node);
966			if (objectid <= btrfs_ino(entry)) {
967				node = prev;
968				break;
969			}
970			prev = rb_next(prev);
971		}
972	}
973	while (node) {
974		entry = rb_entry(node, struct btrfs_inode, rb_node);
975		inode = igrab(&entry->vfs_inode);
976		if (inode) {
977			spin_unlock(&root->inode_lock);
978			return inode;
979		}
980
981		objectid = btrfs_ino(entry) + 1;
982		if (cond_resched_lock(&root->inode_lock))
983			goto again;
984
985		node = rb_next(node);
986	}
987	spin_unlock(&root->inode_lock);
988	return NULL;
989}
990
991/*
992 * get new location of data
993 */
994static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr,
995			    u64 bytenr, u64 num_bytes)
996{
997	struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
998	struct btrfs_path *path;
999	struct btrfs_file_extent_item *fi;
1000	struct extent_buffer *leaf;
1001	int ret;
1002
1003	path = btrfs_alloc_path();
1004	if (!path)
1005		return -ENOMEM;
1006
1007	bytenr -= BTRFS_I(reloc_inode)->index_cnt;
1008	ret = btrfs_lookup_file_extent(NULL, root, path,
1009			btrfs_ino(BTRFS_I(reloc_inode)), bytenr, 0);
1010	if (ret < 0)
1011		goto out;
1012	if (ret > 0) {
1013		ret = -ENOENT;
1014		goto out;
1015	}
1016
1017	leaf = path->nodes[0];
1018	fi = btrfs_item_ptr(leaf, path->slots[0],
1019			    struct btrfs_file_extent_item);
1020
1021	BUG_ON(btrfs_file_extent_offset(leaf, fi) ||
1022	       btrfs_file_extent_compression(leaf, fi) ||
1023	       btrfs_file_extent_encryption(leaf, fi) ||
1024	       btrfs_file_extent_other_encoding(leaf, fi));
1025
1026	if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi)) {
1027		ret = -EINVAL;
1028		goto out;
1029	}
1030
1031	*new_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1032	ret = 0;
1033out:
1034	btrfs_free_path(path);
1035	return ret;
1036}
1037
1038/*
1039 * update file extent items in the tree leaf to point to
1040 * the new locations.
1041 */
1042static noinline_for_stack
1043int replace_file_extents(struct btrfs_trans_handle *trans,
1044			 struct reloc_control *rc,
1045			 struct btrfs_root *root,
1046			 struct extent_buffer *leaf)
1047{
1048	struct btrfs_fs_info *fs_info = root->fs_info;
1049	struct btrfs_key key;
1050	struct btrfs_file_extent_item *fi;
1051	struct inode *inode = NULL;
1052	u64 parent;
1053	u64 bytenr;
1054	u64 new_bytenr = 0;
1055	u64 num_bytes;
1056	u64 end;
1057	u32 nritems;
1058	u32 i;
1059	int ret = 0;
1060	int first = 1;
1061	int dirty = 0;
1062
1063	if (rc->stage != UPDATE_DATA_PTRS)
1064		return 0;
1065
1066	/* reloc trees always use full backref */
1067	if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1068		parent = leaf->start;
1069	else
1070		parent = 0;
1071
1072	nritems = btrfs_header_nritems(leaf);
1073	for (i = 0; i < nritems; i++) {
1074		struct btrfs_ref ref = { 0 };
1075
1076		cond_resched();
1077		btrfs_item_key_to_cpu(leaf, &key, i);
1078		if (key.type != BTRFS_EXTENT_DATA_KEY)
1079			continue;
1080		fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
1081		if (btrfs_file_extent_type(leaf, fi) ==
1082		    BTRFS_FILE_EXTENT_INLINE)
1083			continue;
1084		bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1085		num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1086		if (bytenr == 0)
1087			continue;
1088		if (!in_range(bytenr, rc->block_group->start,
1089			      rc->block_group->length))
1090			continue;
1091
1092		/*
1093		 * if we are modifying block in fs tree, wait for readpage
1094		 * to complete and drop the extent cache
1095		 */
1096		if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
1097			if (first) {
1098				inode = find_next_inode(root, key.objectid);
1099				first = 0;
1100			} else if (inode && btrfs_ino(BTRFS_I(inode)) < key.objectid) {
1101				btrfs_add_delayed_iput(inode);
1102				inode = find_next_inode(root, key.objectid);
1103			}
1104			if (inode && btrfs_ino(BTRFS_I(inode)) == key.objectid) {
1105				end = key.offset +
1106				      btrfs_file_extent_num_bytes(leaf, fi);
1107				WARN_ON(!IS_ALIGNED(key.offset,
1108						    fs_info->sectorsize));
1109				WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
1110				end--;
1111				ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
1112						      key.offset, end);
1113				if (!ret)
1114					continue;
1115
1116				btrfs_drop_extent_cache(BTRFS_I(inode),
1117						key.offset,	end, 1);
1118				unlock_extent(&BTRFS_I(inode)->io_tree,
1119					      key.offset, end);
1120			}
1121		}
1122
1123		ret = get_new_location(rc->data_inode, &new_bytenr,
1124				       bytenr, num_bytes);
1125		if (ret) {
1126			/*
1127			 * Don't have to abort since we've not changed anything
1128			 * in the file extent yet.
1129			 */
1130			break;
1131		}
1132
1133		btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr);
1134		dirty = 1;
1135
1136		key.offset -= btrfs_file_extent_offset(leaf, fi);
1137		btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new_bytenr,
1138				       num_bytes, parent);
1139		ref.real_root = root->root_key.objectid;
1140		btrfs_init_data_ref(&ref, btrfs_header_owner(leaf),
1141				    key.objectid, key.offset);
1142		ret = btrfs_inc_extent_ref(trans, &ref);
1143		if (ret) {
1144			btrfs_abort_transaction(trans, ret);
1145			break;
1146		}
1147
1148		btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
1149				       num_bytes, parent);
1150		ref.real_root = root->root_key.objectid;
1151		btrfs_init_data_ref(&ref, btrfs_header_owner(leaf),
1152				    key.objectid, key.offset);
1153		ret = btrfs_free_extent(trans, &ref);
1154		if (ret) {
1155			btrfs_abort_transaction(trans, ret);
1156			break;
1157		}
1158	}
1159	if (dirty)
1160		btrfs_mark_buffer_dirty(leaf);
1161	if (inode)
1162		btrfs_add_delayed_iput(inode);
1163	return ret;
1164}
1165
1166static noinline_for_stack
1167int memcmp_node_keys(struct extent_buffer *eb, int slot,
1168		     struct btrfs_path *path, int level)
1169{
1170	struct btrfs_disk_key key1;
1171	struct btrfs_disk_key key2;
1172	btrfs_node_key(eb, &key1, slot);
1173	btrfs_node_key(path->nodes[level], &key2, path->slots[level]);
1174	return memcmp(&key1, &key2, sizeof(key1));
1175}
1176
1177/*
1178 * try to replace tree blocks in fs tree with the new blocks
1179 * in reloc tree. tree blocks haven't been modified since the
1180 * reloc tree was create can be replaced.
1181 *
1182 * if a block was replaced, level of the block + 1 is returned.
1183 * if no block got replaced, 0 is returned. if there are other
1184 * errors, a negative error number is returned.
1185 */
1186static noinline_for_stack
1187int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc,
1188		 struct btrfs_root *dest, struct btrfs_root *src,
1189		 struct btrfs_path *path, struct btrfs_key *next_key,
1190		 int lowest_level, int max_level)
1191{
1192	struct btrfs_fs_info *fs_info = dest->fs_info;
1193	struct extent_buffer *eb;
1194	struct extent_buffer *parent;
1195	struct btrfs_ref ref = { 0 };
1196	struct btrfs_key key;
1197	u64 old_bytenr;
1198	u64 new_bytenr;
1199	u64 old_ptr_gen;
1200	u64 new_ptr_gen;
1201	u64 last_snapshot;
1202	u32 blocksize;
1203	int cow = 0;
1204	int level;
1205	int ret;
1206	int slot;
1207
1208	ASSERT(src->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID);
1209	ASSERT(dest->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
1210
1211	last_snapshot = btrfs_root_last_snapshot(&src->root_item);
1212again:
1213	slot = path->slots[lowest_level];
1214	btrfs_node_key_to_cpu(path->nodes[lowest_level], &key, slot);
1215
1216	eb = btrfs_lock_root_node(dest);
1217	btrfs_set_lock_blocking_write(eb);
1218	level = btrfs_header_level(eb);
1219
1220	if (level < lowest_level) {
1221		btrfs_tree_unlock(eb);
1222		free_extent_buffer(eb);
1223		return 0;
1224	}
1225
1226	if (cow) {
1227		ret = btrfs_cow_block(trans, dest, eb, NULL, 0, &eb,
1228				      BTRFS_NESTING_COW);
1229		BUG_ON(ret);
1230	}
1231	btrfs_set_lock_blocking_write(eb);
1232
1233	if (next_key) {
1234		next_key->objectid = (u64)-1;
1235		next_key->type = (u8)-1;
1236		next_key->offset = (u64)-1;
1237	}
1238
1239	parent = eb;
1240	while (1) {
1241		struct btrfs_key first_key;
1242
1243		level = btrfs_header_level(parent);
1244		ASSERT(level >= lowest_level);
1245
1246		ret = btrfs_bin_search(parent, &key, &slot);
1247		if (ret < 0)
1248			break;
1249		if (ret && slot > 0)
1250			slot--;
1251
1252		if (next_key && slot + 1 < btrfs_header_nritems(parent))
1253			btrfs_node_key_to_cpu(parent, next_key, slot + 1);
1254
1255		old_bytenr = btrfs_node_blockptr(parent, slot);
1256		blocksize = fs_info->nodesize;
1257		old_ptr_gen = btrfs_node_ptr_generation(parent, slot);
1258		btrfs_node_key_to_cpu(parent, &first_key, slot);
1259
1260		if (level <= max_level) {
1261			eb = path->nodes[level];
1262			new_bytenr = btrfs_node_blockptr(eb,
1263							path->slots[level]);
1264			new_ptr_gen = btrfs_node_ptr_generation(eb,
1265							path->slots[level]);
1266		} else {
1267			new_bytenr = 0;
1268			new_ptr_gen = 0;
1269		}
1270
1271		if (WARN_ON(new_bytenr > 0 && new_bytenr == old_bytenr)) {
1272			ret = level;
1273			break;
1274		}
1275
1276		if (new_bytenr == 0 || old_ptr_gen > last_snapshot ||
1277		    memcmp_node_keys(parent, slot, path, level)) {
1278			if (level <= lowest_level) {
1279				ret = 0;
1280				break;
1281			}
1282
1283			eb = read_tree_block(fs_info, old_bytenr, old_ptr_gen,
1284					     level - 1, &first_key);
1285			if (IS_ERR(eb)) {
1286				ret = PTR_ERR(eb);
1287				break;
1288			} else if (!extent_buffer_uptodate(eb)) {
1289				ret = -EIO;
1290				free_extent_buffer(eb);
1291				break;
1292			}
1293			btrfs_tree_lock(eb);
1294			if (cow) {
1295				ret = btrfs_cow_block(trans, dest, eb, parent,
1296						      slot, &eb,
1297						      BTRFS_NESTING_COW);
1298				BUG_ON(ret);
1299			}
1300			btrfs_set_lock_blocking_write(eb);
1301
1302			btrfs_tree_unlock(parent);
1303			free_extent_buffer(parent);
1304
1305			parent = eb;
1306			continue;
1307		}
1308
1309		if (!cow) {
1310			btrfs_tree_unlock(parent);
1311			free_extent_buffer(parent);
1312			cow = 1;
1313			goto again;
1314		}
1315
1316		btrfs_node_key_to_cpu(path->nodes[level], &key,
1317				      path->slots[level]);
1318		btrfs_release_path(path);
1319
1320		path->lowest_level = level;
1321		ret = btrfs_search_slot(trans, src, &key, path, 0, 1);
1322		path->lowest_level = 0;
1323		BUG_ON(ret);
1324
1325		/*
1326		 * Info qgroup to trace both subtrees.
1327		 *
1328		 * We must trace both trees.
1329		 * 1) Tree reloc subtree
1330		 *    If not traced, we will leak data numbers
1331		 * 2) Fs subtree
1332		 *    If not traced, we will double count old data
1333		 *
1334		 * We don't scan the subtree right now, but only record
1335		 * the swapped tree blocks.
1336		 * The real subtree rescan is delayed until we have new
1337		 * CoW on the subtree root node before transaction commit.
1338		 */
1339		ret = btrfs_qgroup_add_swapped_blocks(trans, dest,
1340				rc->block_group, parent, slot,
1341				path->nodes[level], path->slots[level],
1342				last_snapshot);
1343		if (ret < 0)
1344			break;
1345		/*
1346		 * swap blocks in fs tree and reloc tree.
1347		 */
1348		btrfs_set_node_blockptr(parent, slot, new_bytenr);
1349		btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen);
1350		btrfs_mark_buffer_dirty(parent);
1351
1352		btrfs_set_node_blockptr(path->nodes[level],
1353					path->slots[level], old_bytenr);
1354		btrfs_set_node_ptr_generation(path->nodes[level],
1355					      path->slots[level], old_ptr_gen);
1356		btrfs_mark_buffer_dirty(path->nodes[level]);
1357
1358		btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, old_bytenr,
1359				       blocksize, path->nodes[level]->start);
1360		ref.skip_qgroup = true;
1361		btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid);
1362		ret = btrfs_inc_extent_ref(trans, &ref);
1363		BUG_ON(ret);
1364		btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new_bytenr,
1365				       blocksize, 0);
1366		ref.skip_qgroup = true;
1367		btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid);
1368		ret = btrfs_inc_extent_ref(trans, &ref);
1369		BUG_ON(ret);
1370
1371		btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, new_bytenr,
1372				       blocksize, path->nodes[level]->start);
1373		btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid);
1374		ref.skip_qgroup = true;
1375		ret = btrfs_free_extent(trans, &ref);
1376		BUG_ON(ret);
1377
1378		btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, old_bytenr,
1379				       blocksize, 0);
1380		btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid);
1381		ref.skip_qgroup = true;
1382		ret = btrfs_free_extent(trans, &ref);
1383		BUG_ON(ret);
1384
1385		btrfs_unlock_up_safe(path, 0);
1386
1387		ret = level;
1388		break;
1389	}
1390	btrfs_tree_unlock(parent);
1391	free_extent_buffer(parent);
1392	return ret;
1393}
1394
1395/*
1396 * helper to find next relocated block in reloc tree
1397 */
1398static noinline_for_stack
1399int walk_up_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
1400		       int *level)
1401{
1402	struct extent_buffer *eb;
1403	int i;
1404	u64 last_snapshot;
1405	u32 nritems;
1406
1407	last_snapshot = btrfs_root_last_snapshot(&root->root_item);
1408
1409	for (i = 0; i < *level; i++) {
1410		free_extent_buffer(path->nodes[i]);
1411		path->nodes[i] = NULL;
1412	}
1413
1414	for (i = *level; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) {
1415		eb = path->nodes[i];
1416		nritems = btrfs_header_nritems(eb);
1417		while (path->slots[i] + 1 < nritems) {
1418			path->slots[i]++;
1419			if (btrfs_node_ptr_generation(eb, path->slots[i]) <=
1420			    last_snapshot)
1421				continue;
1422
1423			*level = i;
1424			return 0;
1425		}
1426		free_extent_buffer(path->nodes[i]);
1427		path->nodes[i] = NULL;
1428	}
1429	return 1;
1430}
1431
1432/*
1433 * walk down reloc tree to find relocated block of lowest level
1434 */
1435static noinline_for_stack
1436int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
1437			 int *level)
1438{
1439	struct btrfs_fs_info *fs_info = root->fs_info;
1440	struct extent_buffer *eb = NULL;
1441	int i;
1442	u64 bytenr;
1443	u64 ptr_gen = 0;
1444	u64 last_snapshot;
1445	u32 nritems;
1446
1447	last_snapshot = btrfs_root_last_snapshot(&root->root_item);
1448
1449	for (i = *level; i > 0; i--) {
1450		struct btrfs_key first_key;
1451
1452		eb = path->nodes[i];
1453		nritems = btrfs_header_nritems(eb);
1454		while (path->slots[i] < nritems) {
1455			ptr_gen = btrfs_node_ptr_generation(eb, path->slots[i]);
1456			if (ptr_gen > last_snapshot)
1457				break;
1458			path->slots[i]++;
1459		}
1460		if (path->slots[i] >= nritems) {
1461			if (i == *level)
1462				break;
1463			*level = i + 1;
1464			return 0;
1465		}
1466		if (i == 1) {
1467			*level = i;
1468			return 0;
1469		}
1470
1471		bytenr = btrfs_node_blockptr(eb, path->slots[i]);
1472		btrfs_node_key_to_cpu(eb, &first_key, path->slots[i]);
1473		eb = read_tree_block(fs_info, bytenr, ptr_gen, i - 1,
1474				     &first_key);
1475		if (IS_ERR(eb)) {
1476			return PTR_ERR(eb);
1477		} else if (!extent_buffer_uptodate(eb)) {
1478			free_extent_buffer(eb);
1479			return -EIO;
1480		}
1481		BUG_ON(btrfs_header_level(eb) != i - 1);
1482		path->nodes[i - 1] = eb;
1483		path->slots[i - 1] = 0;
1484	}
1485	return 1;
1486}
1487
1488/*
1489 * invalidate extent cache for file extents whose key in range of
1490 * [min_key, max_key)
1491 */
1492static int invalidate_extent_cache(struct btrfs_root *root,
1493				   struct btrfs_key *min_key,
1494				   struct btrfs_key *max_key)
1495{
1496	struct btrfs_fs_info *fs_info = root->fs_info;
1497	struct inode *inode = NULL;
1498	u64 objectid;
1499	u64 start, end;
1500	u64 ino;
1501
1502	objectid = min_key->objectid;
1503	while (1) {
1504		cond_resched();
1505		iput(inode);
1506
1507		if (objectid > max_key->objectid)
1508			break;
1509
1510		inode = find_next_inode(root, objectid);
1511		if (!inode)
1512			break;
1513		ino = btrfs_ino(BTRFS_I(inode));
1514
1515		if (ino > max_key->objectid) {
1516			iput(inode);
1517			break;
1518		}
1519
1520		objectid = ino + 1;
1521		if (!S_ISREG(inode->i_mode))
1522			continue;
1523
1524		if (unlikely(min_key->objectid == ino)) {
1525			if (min_key->type > BTRFS_EXTENT_DATA_KEY)
1526				continue;
1527			if (min_key->type < BTRFS_EXTENT_DATA_KEY)
1528				start = 0;
1529			else {
1530				start = min_key->offset;
1531				WARN_ON(!IS_ALIGNED(start, fs_info->sectorsize));
1532			}
1533		} else {
1534			start = 0;
1535		}
1536
1537		if (unlikely(max_key->objectid == ino)) {
1538			if (max_key->type < BTRFS_EXTENT_DATA_KEY)
1539				continue;
1540			if (max_key->type > BTRFS_EXTENT_DATA_KEY) {
1541				end = (u64)-1;
1542			} else {
1543				if (max_key->offset == 0)
1544					continue;
1545				end = max_key->offset;
1546				WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
1547				end--;
1548			}
1549		} else {
1550			end = (u64)-1;
1551		}
1552
1553		/* the lock_extent waits for readpage to complete */
1554		lock_extent(&BTRFS_I(inode)->io_tree, start, end);
1555		btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 1);
1556		unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
1557	}
1558	return 0;
1559}
1560
1561static int find_next_key(struct btrfs_path *path, int level,
1562			 struct btrfs_key *key)
1563
1564{
1565	while (level < BTRFS_MAX_LEVEL) {
1566		if (!path->nodes[level])
1567			break;
1568		if (path->slots[level] + 1 <
1569		    btrfs_header_nritems(path->nodes[level])) {
1570			btrfs_node_key_to_cpu(path->nodes[level], key,
1571					      path->slots[level] + 1);
1572			return 0;
1573		}
1574		level++;
1575	}
1576	return 1;
1577}
1578
1579/*
1580 * Insert current subvolume into reloc_control::dirty_subvol_roots
1581 */
1582static void insert_dirty_subvol(struct btrfs_trans_handle *trans,
1583				struct reloc_control *rc,
1584				struct btrfs_root *root)
1585{
1586	struct btrfs_root *reloc_root = root->reloc_root;
1587	struct btrfs_root_item *reloc_root_item;
1588
1589	/* @root must be a subvolume tree root with a valid reloc tree */
1590	ASSERT(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
1591	ASSERT(reloc_root);
1592
1593	reloc_root_item = &reloc_root->root_item;
1594	memset(&reloc_root_item->drop_progress, 0,
1595		sizeof(reloc_root_item->drop_progress));
1596	reloc_root_item->drop_level = 0;
1597	btrfs_set_root_refs(reloc_root_item, 0);
1598	btrfs_update_reloc_root(trans, root);
1599
1600	if (list_empty(&root->reloc_dirty_list)) {
1601		btrfs_grab_root(root);
1602		list_add_tail(&root->reloc_dirty_list, &rc->dirty_subvol_roots);
1603	}
1604}
1605
1606static int clean_dirty_subvols(struct reloc_control *rc)
1607{
1608	struct btrfs_root *root;
1609	struct btrfs_root *next;
1610	int ret = 0;
1611	int ret2;
1612
1613	list_for_each_entry_safe(root, next, &rc->dirty_subvol_roots,
1614				 reloc_dirty_list) {
1615		if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
1616			/* Merged subvolume, cleanup its reloc root */
1617			struct btrfs_root *reloc_root = root->reloc_root;
1618
1619			list_del_init(&root->reloc_dirty_list);
1620			root->reloc_root = NULL;
1621			/*
1622			 * Need barrier to ensure clear_bit() only happens after
1623			 * root->reloc_root = NULL. Pairs with have_reloc_root.
1624			 */
1625			smp_wmb();
1626			clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
1627			if (reloc_root) {
1628				/*
1629				 * btrfs_drop_snapshot drops our ref we hold for
1630				 * ->reloc_root.  If it fails however we must
1631				 * drop the ref ourselves.
1632				 */
1633				ret2 = btrfs_drop_snapshot(reloc_root, 0, 1);
1634				if (ret2 < 0) {
1635					btrfs_put_root(reloc_root);
1636					if (!ret)
1637						ret = ret2;
1638				}
1639			}
1640			btrfs_put_root(root);
1641		} else {
1642			/* Orphan reloc tree, just clean it up */
1643			ret2 = btrfs_drop_snapshot(root, 0, 1);
1644			if (ret2 < 0) {
1645				btrfs_put_root(root);
1646				if (!ret)
1647					ret = ret2;
1648			}
1649		}
1650	}
1651	return ret;
1652}
1653
1654/*
1655 * merge the relocated tree blocks in reloc tree with corresponding
1656 * fs tree.
1657 */
1658static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
1659					       struct btrfs_root *root)
1660{
1661	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
1662	struct btrfs_key key;
1663	struct btrfs_key next_key;
1664	struct btrfs_trans_handle *trans = NULL;
1665	struct btrfs_root *reloc_root;
1666	struct btrfs_root_item *root_item;
1667	struct btrfs_path *path;
1668	struct extent_buffer *leaf;
1669	int reserve_level;
1670	int level;
1671	int max_level;
1672	int replaced = 0;
1673	int ret;
1674	int err = 0;
1675	u32 min_reserved;
1676
1677	path = btrfs_alloc_path();
1678	if (!path)
1679		return -ENOMEM;
1680	path->reada = READA_FORWARD;
1681
1682	reloc_root = root->reloc_root;
1683	root_item = &reloc_root->root_item;
1684
1685	if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
1686		level = btrfs_root_level(root_item);
1687		atomic_inc(&reloc_root->node->refs);
1688		path->nodes[level] = reloc_root->node;
1689		path->slots[level] = 0;
1690	} else {
1691		btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
1692
1693		level = root_item->drop_level;
1694		BUG_ON(level == 0);
1695		path->lowest_level = level;
1696		ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0);
1697		path->lowest_level = 0;
1698		if (ret < 0) {
1699			btrfs_free_path(path);
1700			return ret;
1701		}
1702
1703		btrfs_node_key_to_cpu(path->nodes[level], &next_key,
1704				      path->slots[level]);
1705		WARN_ON(memcmp(&key, &next_key, sizeof(key)));
1706
1707		btrfs_unlock_up_safe(path, 0);
1708	}
1709
1710	/*
1711	 * In merge_reloc_root(), we modify the upper level pointer to swap the
1712	 * tree blocks between reloc tree and subvolume tree.  Thus for tree
1713	 * block COW, we COW at most from level 1 to root level for each tree.
1714	 *
1715	 * Thus the needed metadata size is at most root_level * nodesize,
1716	 * and * 2 since we have two trees to COW.
1717	 */
1718	reserve_level = max_t(int, 1, btrfs_root_level(root_item));
1719	min_reserved = fs_info->nodesize * reserve_level * 2;
1720	memset(&next_key, 0, sizeof(next_key));
1721
1722	while (1) {
1723		ret = btrfs_block_rsv_refill(root, rc->block_rsv, min_reserved,
1724					     BTRFS_RESERVE_FLUSH_LIMIT);
1725		if (ret) {
1726			err = ret;
1727			goto out;
1728		}
1729		trans = btrfs_start_transaction(root, 0);
1730		if (IS_ERR(trans)) {
1731			err = PTR_ERR(trans);
1732			trans = NULL;
1733			goto out;
1734		}
1735
1736		/*
1737		 * At this point we no longer have a reloc_control, so we can't
1738		 * depend on btrfs_init_reloc_root to update our last_trans.
1739		 *
1740		 * But that's ok, we started the trans handle on our
1741		 * corresponding fs_root, which means it's been added to the
1742		 * dirty list.  At commit time we'll still call
1743		 * btrfs_update_reloc_root() and update our root item
1744		 * appropriately.
1745		 */
1746		reloc_root->last_trans = trans->transid;
1747		trans->block_rsv = rc->block_rsv;
1748
1749		replaced = 0;
1750		max_level = level;
1751
1752		ret = walk_down_reloc_tree(reloc_root, path, &level);
1753		if (ret < 0) {
1754			err = ret;
1755			goto out;
1756		}
1757		if (ret > 0)
1758			break;
1759
1760		if (!find_next_key(path, level, &key) &&
1761		    btrfs_comp_cpu_keys(&next_key, &key) >= 0) {
1762			ret = 0;
1763		} else {
1764			ret = replace_path(trans, rc, root, reloc_root, path,
1765					   &next_key, level, max_level);
1766		}
1767		if (ret < 0) {
1768			err = ret;
1769			goto out;
1770		}
1771
1772		if (ret > 0) {
1773			level = ret;
1774			btrfs_node_key_to_cpu(path->nodes[level], &key,
1775					      path->slots[level]);
1776			replaced = 1;
1777		}
1778
1779		ret = walk_up_reloc_tree(reloc_root, path, &level);
1780		if (ret > 0)
1781			break;
1782
1783		BUG_ON(level == 0);
1784		/*
1785		 * save the merging progress in the drop_progress.
1786		 * this is OK since root refs == 1 in this case.
1787		 */
1788		btrfs_node_key(path->nodes[level], &root_item->drop_progress,
1789			       path->slots[level]);
1790		root_item->drop_level = level;
1791
1792		btrfs_end_transaction_throttle(trans);
1793		trans = NULL;
1794
1795		btrfs_btree_balance_dirty(fs_info);
1796
1797		if (replaced && rc->stage == UPDATE_DATA_PTRS)
1798			invalidate_extent_cache(root, &key, &next_key);
1799	}
1800
1801	/*
1802	 * handle the case only one block in the fs tree need to be
1803	 * relocated and the block is tree root.
1804	 */
1805	leaf = btrfs_lock_root_node(root);
1806	ret = btrfs_cow_block(trans, root, leaf, NULL, 0, &leaf,
1807			      BTRFS_NESTING_COW);
1808	btrfs_tree_unlock(leaf);
1809	free_extent_buffer(leaf);
1810	if (ret < 0)
1811		err = ret;
1812out:
1813	btrfs_free_path(path);
1814
1815	if (err == 0)
1816		insert_dirty_subvol(trans, rc, root);
1817
1818	if (trans)
1819		btrfs_end_transaction_throttle(trans);
1820
1821	btrfs_btree_balance_dirty(fs_info);
1822
1823	if (replaced && rc->stage == UPDATE_DATA_PTRS)
1824		invalidate_extent_cache(root, &key, &next_key);
1825
1826	return err;
1827}
1828
1829static noinline_for_stack
1830int prepare_to_merge(struct reloc_control *rc, int err)
1831{
1832	struct btrfs_root *root = rc->extent_root;
1833	struct btrfs_fs_info *fs_info = root->fs_info;
1834	struct btrfs_root *reloc_root;
1835	struct btrfs_trans_handle *trans;
1836	LIST_HEAD(reloc_roots);
1837	u64 num_bytes = 0;
1838	int ret;
1839
1840	mutex_lock(&fs_info->reloc_mutex);
1841	rc->merging_rsv_size += fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
1842	rc->merging_rsv_size += rc->nodes_relocated * 2;
1843	mutex_unlock(&fs_info->reloc_mutex);
1844
1845again:
1846	if (!err) {
1847		num_bytes = rc->merging_rsv_size;
1848		ret = btrfs_block_rsv_add(root, rc->block_rsv, num_bytes,
1849					  BTRFS_RESERVE_FLUSH_ALL);
1850		if (ret)
1851			err = ret;
1852	}
1853
1854	trans = btrfs_join_transaction(rc->extent_root);
1855	if (IS_ERR(trans)) {
1856		if (!err)
1857			btrfs_block_rsv_release(fs_info, rc->block_rsv,
1858						num_bytes, NULL);
1859		return PTR_ERR(trans);
1860	}
1861
1862	if (!err) {
1863		if (num_bytes != rc->merging_rsv_size) {
1864			btrfs_end_transaction(trans);
1865			btrfs_block_rsv_release(fs_info, rc->block_rsv,
1866						num_bytes, NULL);
1867			goto again;
1868		}
1869	}
1870
1871	rc->merge_reloc_tree = 1;
1872
1873	while (!list_empty(&rc->reloc_roots)) {
1874		reloc_root = list_entry(rc->reloc_roots.next,
1875					struct btrfs_root, root_list);
1876		list_del_init(&reloc_root->root_list);
1877
1878		root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
1879				false);
1880		BUG_ON(IS_ERR(root));
1881		BUG_ON(root->reloc_root != reloc_root);
1882
1883		/*
1884		 * set reference count to 1, so btrfs_recover_relocation
1885		 * knows it should resumes merging
1886		 */
1887		if (!err)
1888			btrfs_set_root_refs(&reloc_root->root_item, 1);
1889		btrfs_update_reloc_root(trans, root);
1890
1891		list_add(&reloc_root->root_list, &reloc_roots);
1892		btrfs_put_root(root);
1893	}
1894
1895	list_splice(&reloc_roots, &rc->reloc_roots);
1896
1897	if (!err)
1898		err = btrfs_commit_transaction(trans);
1899	else
1900		btrfs_end_transaction(trans);
1901	return err;
1902}
1903
1904static noinline_for_stack
1905void free_reloc_roots(struct list_head *list)
1906{
1907	struct btrfs_root *reloc_root, *tmp;
1908
1909	list_for_each_entry_safe(reloc_root, tmp, list, root_list)
1910		__del_reloc_root(reloc_root);
1911}
1912
1913static noinline_for_stack
1914void merge_reloc_roots(struct reloc_control *rc)
1915{
1916	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
1917	struct btrfs_root *root;
1918	struct btrfs_root *reloc_root;
1919	LIST_HEAD(reloc_roots);
1920	int found = 0;
1921	int ret = 0;
1922again:
1923	root = rc->extent_root;
1924
1925	/*
1926	 * this serializes us with btrfs_record_root_in_transaction,
1927	 * we have to make sure nobody is in the middle of
1928	 * adding their roots to the list while we are
1929	 * doing this splice
1930	 */
1931	mutex_lock(&fs_info->reloc_mutex);
1932	list_splice_init(&rc->reloc_roots, &reloc_roots);
1933	mutex_unlock(&fs_info->reloc_mutex);
1934
1935	while (!list_empty(&reloc_roots)) {
1936		found = 1;
1937		reloc_root = list_entry(reloc_roots.next,
1938					struct btrfs_root, root_list);
1939
1940		root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
1941					 false);
1942		if (btrfs_root_refs(&reloc_root->root_item) > 0) {
1943			BUG_ON(IS_ERR(root));
1944			BUG_ON(root->reloc_root != reloc_root);
1945			ret = merge_reloc_root(rc, root);
1946			btrfs_put_root(root);
1947			if (ret) {
1948				if (list_empty(&reloc_root->root_list))
1949					list_add_tail(&reloc_root->root_list,
1950						      &reloc_roots);
1951				goto out;
1952			}
1953		} else {
1954			if (!IS_ERR(root)) {
1955				if (root->reloc_root == reloc_root) {
1956					root->reloc_root = NULL;
1957					btrfs_put_root(reloc_root);
1958				}
1959				clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE,
1960					  &root->state);
1961				btrfs_put_root(root);
1962			}
1963
1964			list_del_init(&reloc_root->root_list);
1965			/* Don't forget to queue this reloc root for cleanup */
1966			list_add_tail(&reloc_root->reloc_dirty_list,
1967				      &rc->dirty_subvol_roots);
1968		}
1969	}
1970
1971	if (found) {
1972		found = 0;
1973		goto again;
1974	}
1975out:
1976	if (ret) {
1977		btrfs_handle_fs_error(fs_info, ret, NULL);
1978		free_reloc_roots(&reloc_roots);
1979
1980		/* new reloc root may be added */
1981		mutex_lock(&fs_info->reloc_mutex);
1982		list_splice_init(&rc->reloc_roots, &reloc_roots);
1983		mutex_unlock(&fs_info->reloc_mutex);
1984		free_reloc_roots(&reloc_roots);
1985	}
1986
1987	/*
1988	 * We used to have
1989	 *
1990	 * BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root));
1991	 *
1992	 * here, but it's wrong.  If we fail to start the transaction in
1993	 * prepare_to_merge() we will have only 0 ref reloc roots, none of which
1994	 * have actually been removed from the reloc_root_tree rb tree.  This is
1995	 * fine because we're bailing here, and we hold a reference on the root
1996	 * for the list that holds it, so these roots will be cleaned up when we
1997	 * do the reloc_dirty_list afterwards.  Meanwhile the root->reloc_root
1998	 * will be cleaned up on unmount.
1999	 *
2000	 * The remaining nodes will be cleaned up by free_reloc_control.
2001	 */
2002}
2003
2004static void free_block_list(struct rb_root *blocks)
2005{
2006	struct tree_block *block;
2007	struct rb_node *rb_node;
2008	while ((rb_node = rb_first(blocks))) {
2009		block = rb_entry(rb_node, struct tree_block, rb_node);
2010		rb_erase(rb_node, blocks);
2011		kfree(block);
2012	}
2013}
2014
2015static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans,
2016				      struct btrfs_root *reloc_root)
2017{
2018	struct btrfs_fs_info *fs_info = reloc_root->fs_info;
2019	struct btrfs_root *root;
2020	int ret;
2021
2022	if (reloc_root->last_trans == trans->transid)
2023		return 0;
2024
2025	root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, false);
2026	BUG_ON(IS_ERR(root));
2027	BUG_ON(root->reloc_root != reloc_root);
2028	ret = btrfs_record_root_in_trans(trans, root);
2029	btrfs_put_root(root);
2030
2031	return ret;
2032}
2033
2034static noinline_for_stack
2035struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans,
2036				     struct reloc_control *rc,
2037				     struct btrfs_backref_node *node,
2038				     struct btrfs_backref_edge *edges[])
2039{
2040	struct btrfs_backref_node *next;
2041	struct btrfs_root *root;
2042	int index = 0;
2043
2044	next = node;
2045	while (1) {
2046		cond_resched();
2047		next = walk_up_backref(next, edges, &index);
2048		root = next->root;
2049		BUG_ON(!root);
2050		BUG_ON(!test_bit(BTRFS_ROOT_SHAREABLE, &root->state));
2051
2052		if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
2053			record_reloc_root_in_trans(trans, root);
2054			break;
2055		}
2056
2057		btrfs_record_root_in_trans(trans, root);
2058		root = root->reloc_root;
2059
2060		if (next->new_bytenr != root->node->start) {
2061			BUG_ON(next->new_bytenr);
2062			BUG_ON(!list_empty(&next->list));
2063			next->new_bytenr = root->node->start;
2064			btrfs_put_root(next->root);
2065			next->root = btrfs_grab_root(root);
2066			ASSERT(next->root);
2067			list_add_tail(&next->list,
2068				      &rc->backref_cache.changed);
2069			mark_block_processed(rc, next);
2070			break;
2071		}
2072
2073		WARN_ON(1);
2074		root = NULL;
2075		next = walk_down_backref(edges, &index);
2076		if (!next || next->level <= node->level)
2077			break;
2078	}
2079	if (!root)
2080		return NULL;
2081
2082	next = node;
2083	/* setup backref node path for btrfs_reloc_cow_block */
2084	while (1) {
2085		rc->backref_cache.path[next->level] = next;
2086		if (--index < 0)
2087			break;
2088		next = edges[index]->node[UPPER];
2089	}
2090	return root;
2091}
2092
2093/*
2094 * Select a tree root for relocation.
2095 *
2096 * Return NULL if the block is not shareable. We should use do_relocation() in
2097 * this case.
2098 *
2099 * Return a tree root pointer if the block is shareable.
2100 * Return -ENOENT if the block is root of reloc tree.
2101 */
2102static noinline_for_stack
2103struct btrfs_root *select_one_root(struct btrfs_backref_node *node)
2104{
2105	struct btrfs_backref_node *next;
2106	struct btrfs_root *root;
2107	struct btrfs_root *fs_root = NULL;
2108	struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2109	int index = 0;
2110
2111	next = node;
2112	while (1) {
2113		cond_resched();
2114		next = walk_up_backref(next, edges, &index);
2115		root = next->root;
2116		BUG_ON(!root);
2117
2118		/* No other choice for non-shareable tree */
2119		if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2120			return root;
2121
2122		if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID)
2123			fs_root = root;
2124
2125		if (next != node)
2126			return NULL;
2127
2128		next = walk_down_backref(edges, &index);
2129		if (!next || next->level <= node->level)
2130			break;
2131	}
2132
2133	if (!fs_root)
2134		return ERR_PTR(-ENOENT);
2135	return fs_root;
2136}
2137
2138static noinline_for_stack
2139u64 calcu_metadata_size(struct reloc_control *rc,
2140			struct btrfs_backref_node *node, int reserve)
2141{
2142	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2143	struct btrfs_backref_node *next = node;
2144	struct btrfs_backref_edge *edge;
2145	struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2146	u64 num_bytes = 0;
2147	int index = 0;
2148
2149	BUG_ON(reserve && node->processed);
2150
2151	while (next) {
2152		cond_resched();
2153		while (1) {
2154			if (next->processed && (reserve || next != node))
2155				break;
2156
2157			num_bytes += fs_info->nodesize;
2158
2159			if (list_empty(&next->upper))
2160				break;
2161
2162			edge = list_entry(next->upper.next,
2163					struct btrfs_backref_edge, list[LOWER]);
2164			edges[index++] = edge;
2165			next = edge->node[UPPER];
2166		}
2167		next = walk_down_backref(edges, &index);
2168	}
2169	return num_bytes;
2170}
2171
2172static int reserve_metadata_space(struct btrfs_trans_handle *trans,
2173				  struct reloc_control *rc,
2174				  struct btrfs_backref_node *node)
2175{
2176	struct btrfs_root *root = rc->extent_root;
2177	struct btrfs_fs_info *fs_info = root->fs_info;
2178	u64 num_bytes;
2179	int ret;
2180	u64 tmp;
2181
2182	num_bytes = calcu_metadata_size(rc, node, 1) * 2;
2183
2184	trans->block_rsv = rc->block_rsv;
2185	rc->reserved_bytes += num_bytes;
2186
2187	/*
2188	 * We are under a transaction here so we can only do limited flushing.
2189	 * If we get an enospc just kick back -EAGAIN so we know to drop the
2190	 * transaction and try to refill when we can flush all the things.
2191	 */
2192	ret = btrfs_block_rsv_refill(root, rc->block_rsv, num_bytes,
2193				BTRFS_RESERVE_FLUSH_LIMIT);
2194	if (ret) {
2195		tmp = fs_info->nodesize * RELOCATION_RESERVED_NODES;
2196		while (tmp <= rc->reserved_bytes)
2197			tmp <<= 1;
2198		/*
2199		 * only one thread can access block_rsv at this point,
2200		 * so we don't need hold lock to protect block_rsv.
2201		 * we expand more reservation size here to allow enough
2202		 * space for relocation and we will return earlier in
2203		 * enospc case.
2204		 */
2205		rc->block_rsv->size = tmp + fs_info->nodesize *
2206				      RELOCATION_RESERVED_NODES;
2207		return -EAGAIN;
2208	}
2209
2210	return 0;
2211}
2212
2213/*
2214 * relocate a block tree, and then update pointers in upper level
2215 * blocks that reference the block to point to the new location.
2216 *
2217 * if called by link_to_upper, the block has already been relocated.
2218 * in that case this function just updates pointers.
2219 */
2220static int do_relocation(struct btrfs_trans_handle *trans,
2221			 struct reloc_control *rc,
2222			 struct btrfs_backref_node *node,
2223			 struct btrfs_key *key,
2224			 struct btrfs_path *path, int lowest)
2225{
2226	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2227	struct btrfs_backref_node *upper;
2228	struct btrfs_backref_edge *edge;
2229	struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2230	struct btrfs_root *root;
2231	struct extent_buffer *eb;
2232	u32 blocksize;
2233	u64 bytenr;
2234	u64 generation;
2235	int slot;
2236	int ret;
2237	int err = 0;
2238
2239	BUG_ON(lowest && node->eb);
2240
2241	path->lowest_level = node->level + 1;
2242	rc->backref_cache.path[node->level] = node;
2243	list_for_each_entry(edge, &node->upper, list[LOWER]) {
2244		struct btrfs_key first_key;
2245		struct btrfs_ref ref = { 0 };
2246
2247		cond_resched();
2248
2249		upper = edge->node[UPPER];
2250		root = select_reloc_root(trans, rc, upper, edges);
2251		BUG_ON(!root);
2252
2253		if (upper->eb && !upper->locked) {
2254			if (!lowest) {
2255				ret = btrfs_bin_search(upper->eb, key, &slot);
2256				if (ret < 0) {
2257					err = ret;
2258					goto next;
2259				}
2260				BUG_ON(ret);
2261				bytenr = btrfs_node_blockptr(upper->eb, slot);
2262				if (node->eb->start == bytenr)
2263					goto next;
2264			}
2265			btrfs_backref_drop_node_buffer(upper);
2266		}
2267
2268		if (!upper->eb) {
2269			ret = btrfs_search_slot(trans, root, key, path, 0, 1);
2270			if (ret) {
2271				if (ret < 0)
2272					err = ret;
2273				else
2274					err = -ENOENT;
2275
2276				btrfs_release_path(path);
2277				break;
2278			}
2279
2280			if (!upper->eb) {
2281				upper->eb = path->nodes[upper->level];
2282				path->nodes[upper->level] = NULL;
2283			} else {
2284				BUG_ON(upper->eb != path->nodes[upper->level]);
2285			}
2286
2287			upper->locked = 1;
2288			path->locks[upper->level] = 0;
2289
2290			slot = path->slots[upper->level];
2291			btrfs_release_path(path);
2292		} else {
2293			ret = btrfs_bin_search(upper->eb, key, &slot);
2294			if (ret < 0) {
2295				err = ret;
2296				goto next;
2297			}
2298			BUG_ON(ret);
2299		}
2300
2301		bytenr = btrfs_node_blockptr(upper->eb, slot);
2302		if (lowest) {
2303			if (bytenr != node->bytenr) {
2304				btrfs_err(root->fs_info,
2305		"lowest leaf/node mismatch: bytenr %llu node->bytenr %llu slot %d upper %llu",
2306					  bytenr, node->bytenr, slot,
2307					  upper->eb->start);
2308				err = -EIO;
2309				goto next;
2310			}
2311		} else {
2312			if (node->eb->start == bytenr)
2313				goto next;
2314		}
2315
2316		blocksize = root->fs_info->nodesize;
2317		generation = btrfs_node_ptr_generation(upper->eb, slot);
2318		btrfs_node_key_to_cpu(upper->eb, &first_key, slot);
2319		eb = read_tree_block(fs_info, bytenr, generation,
2320				     upper->level - 1, &first_key);
2321		if (IS_ERR(eb)) {
2322			err = PTR_ERR(eb);
2323			goto next;
2324		} else if (!extent_buffer_uptodate(eb)) {
2325			free_extent_buffer(eb);
2326			err = -EIO;
2327			goto next;
2328		}
2329		btrfs_tree_lock(eb);
2330		btrfs_set_lock_blocking_write(eb);
2331
2332		if (!node->eb) {
2333			ret = btrfs_cow_block(trans, root, eb, upper->eb,
2334					      slot, &eb, BTRFS_NESTING_COW);
2335			btrfs_tree_unlock(eb);
2336			free_extent_buffer(eb);
2337			if (ret < 0) {
2338				err = ret;
2339				goto next;
2340			}
2341			BUG_ON(node->eb != eb);
2342		} else {
2343			btrfs_set_node_blockptr(upper->eb, slot,
2344						node->eb->start);
2345			btrfs_set_node_ptr_generation(upper->eb, slot,
2346						      trans->transid);
2347			btrfs_mark_buffer_dirty(upper->eb);
2348
2349			btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF,
2350					       node->eb->start, blocksize,
2351					       upper->eb->start);
2352			ref.real_root = root->root_key.objectid;
2353			btrfs_init_tree_ref(&ref, node->level,
2354					    btrfs_header_owner(upper->eb));
2355			ret = btrfs_inc_extent_ref(trans, &ref);
2356			BUG_ON(ret);
2357
2358			ret = btrfs_drop_subtree(trans, root, eb, upper->eb);
2359			BUG_ON(ret);
2360		}
2361next:
2362		if (!upper->pending)
2363			btrfs_backref_drop_node_buffer(upper);
2364		else
2365			btrfs_backref_unlock_node_buffer(upper);
2366		if (err)
2367			break;
2368	}
2369
2370	if (!err && node->pending) {
2371		btrfs_backref_drop_node_buffer(node);
2372		list_move_tail(&node->list, &rc->backref_cache.changed);
2373		node->pending = 0;
2374	}
2375
2376	path->lowest_level = 0;
2377	BUG_ON(err == -ENOSPC);
2378	return err;
2379}
2380
2381static int link_to_upper(struct btrfs_trans_handle *trans,
2382			 struct reloc_control *rc,
2383			 struct btrfs_backref_node *node,
2384			 struct btrfs_path *path)
2385{
2386	struct btrfs_key key;
2387
2388	btrfs_node_key_to_cpu(node->eb, &key, 0);
2389	return do_relocation(trans, rc, node, &key, path, 0);
2390}
2391
2392static int finish_pending_nodes(struct btrfs_trans_handle *trans,
2393				struct reloc_control *rc,
2394				struct btrfs_path *path, int err)
2395{
2396	LIST_HEAD(list);
2397	struct btrfs_backref_cache *cache = &rc->backref_cache;
2398	struct btrfs_backref_node *node;
2399	int level;
2400	int ret;
2401
2402	for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
2403		while (!list_empty(&cache->pending[level])) {
2404			node = list_entry(cache->pending[level].next,
2405					  struct btrfs_backref_node, list);
2406			list_move_tail(&node->list, &list);
2407			BUG_ON(!node->pending);
2408
2409			if (!err) {
2410				ret = link_to_upper(trans, rc, node, path);
2411				if (ret < 0)
2412					err = ret;
2413			}
2414		}
2415		list_splice_init(&list, &cache->pending[level]);
2416	}
2417	return err;
2418}
2419
2420/*
2421 * mark a block and all blocks directly/indirectly reference the block
2422 * as processed.
2423 */
2424static void update_processed_blocks(struct reloc_control *rc,
2425				    struct btrfs_backref_node *node)
2426{
2427	struct btrfs_backref_node *next = node;
2428	struct btrfs_backref_edge *edge;
2429	struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2430	int index = 0;
2431
2432	while (next) {
2433		cond_resched();
2434		while (1) {
2435			if (next->processed)
2436				break;
2437
2438			mark_block_processed(rc, next);
2439
2440			if (list_empty(&next->upper))
2441				break;
2442
2443			edge = list_entry(next->upper.next,
2444					struct btrfs_backref_edge, list[LOWER]);
2445			edges[index++] = edge;
2446			next = edge->node[UPPER];
2447		}
2448		next = walk_down_backref(edges, &index);
2449	}
2450}
2451
2452static int tree_block_processed(u64 bytenr, struct reloc_control *rc)
2453{
2454	u32 blocksize = rc->extent_root->fs_info->nodesize;
2455
2456	if (test_range_bit(&rc->processed_blocks, bytenr,
2457			   bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL))
2458		return 1;
2459	return 0;
2460}
2461
2462static int get_tree_block_key(struct btrfs_fs_info *fs_info,
2463			      struct tree_block *block)
2464{
2465	struct extent_buffer *eb;
2466
2467	eb = read_tree_block(fs_info, block->bytenr, block->key.offset,
2468			     block->level, NULL);
2469	if (IS_ERR(eb)) {
2470		return PTR_ERR(eb);
2471	} else if (!extent_buffer_uptodate(eb)) {
2472		free_extent_buffer(eb);
2473		return -EIO;
2474	}
2475	if (block->level == 0)
2476		btrfs_item_key_to_cpu(eb, &block->key, 0);
2477	else
2478		btrfs_node_key_to_cpu(eb, &block->key, 0);
2479	free_extent_buffer(eb);
2480	block->key_ready = 1;
2481	return 0;
2482}
2483
2484/*
2485 * helper function to relocate a tree block
2486 */
2487static int relocate_tree_block(struct btrfs_trans_handle *trans,
2488				struct reloc_control *rc,
2489				struct btrfs_backref_node *node,
2490				struct btrfs_key *key,
2491				struct btrfs_path *path)
2492{
2493	struct btrfs_root *root;
2494	int ret = 0;
2495
2496	if (!node)
2497		return 0;
2498
2499	/*
2500	 * If we fail here we want to drop our backref_node because we are going
2501	 * to start over and regenerate the tree for it.
2502	 */
2503	ret = reserve_metadata_space(trans, rc, node);
2504	if (ret)
2505		goto out;
2506
2507	BUG_ON(node->processed);
2508	root = select_one_root(node);
2509	if (root == ERR_PTR(-ENOENT)) {
2510		update_processed_blocks(rc, node);
2511		goto out;
2512	}
2513
2514	if (root) {
2515		if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) {
2516			BUG_ON(node->new_bytenr);
2517			BUG_ON(!list_empty(&node->list));
2518			btrfs_record_root_in_trans(trans, root);
2519			root = root->reloc_root;
2520			node->new_bytenr = root->node->start;
2521			btrfs_put_root(node->root);
2522			node->root = btrfs_grab_root(root);
2523			ASSERT(node->root);
2524			list_add_tail(&node->list, &rc->backref_cache.changed);
2525		} else {
2526			path->lowest_level = node->level;
2527			ret = btrfs_search_slot(trans, root, key, path, 0, 1);
2528			btrfs_release_path(path);
2529			if (ret > 0)
2530				ret = 0;
2531		}
2532		if (!ret)
2533			update_processed_blocks(rc, node);
2534	} else {
2535		ret = do_relocation(trans, rc, node, key, path, 1);
2536	}
2537out:
2538	if (ret || node->level == 0 || node->cowonly)
2539		btrfs_backref_cleanup_node(&rc->backref_cache, node);
2540	return ret;
2541}
2542
2543/*
2544 * relocate a list of blocks
2545 */
2546static noinline_for_stack
2547int relocate_tree_blocks(struct btrfs_trans_handle *trans,
2548			 struct reloc_control *rc, struct rb_root *blocks)
2549{
2550	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2551	struct btrfs_backref_node *node;
2552	struct btrfs_path *path;
2553	struct tree_block *block;
2554	struct tree_block *next;
2555	int ret;
2556	int err = 0;
2557
2558	path = btrfs_alloc_path();
2559	if (!path) {
2560		err = -ENOMEM;
2561		goto out_free_blocks;
2562	}
2563
2564	/* Kick in readahead for tree blocks with missing keys */
2565	rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
2566		if (!block->key_ready)
2567			readahead_tree_block(fs_info, block->bytenr);
2568	}
2569
2570	/* Get first keys */
2571	rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
2572		if (!block->key_ready) {
2573			err = get_tree_block_key(fs_info, block);
2574			if (err)
2575				goto out_free_path;
2576		}
2577	}
2578
2579	/* Do tree relocation */
2580	rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
2581		node = build_backref_tree(rc, &block->key,
2582					  block->level, block->bytenr);
2583		if (IS_ERR(node)) {
2584			err = PTR_ERR(node);
2585			goto out;
2586		}
2587
2588		ret = relocate_tree_block(trans, rc, node, &block->key,
2589					  path);
2590		if (ret < 0) {
2591			err = ret;
2592			break;
2593		}
2594	}
2595out:
2596	err = finish_pending_nodes(trans, rc, path, err);
2597
2598out_free_path:
2599	btrfs_free_path(path);
2600out_free_blocks:
2601	free_block_list(blocks);
2602	return err;
2603}
2604
2605static noinline_for_stack int prealloc_file_extent_cluster(
2606				struct btrfs_inode *inode,
2607				struct file_extent_cluster *cluster)
2608{
2609	u64 alloc_hint = 0;
2610	u64 start;
2611	u64 end;
2612	u64 offset = inode->index_cnt;
2613	u64 num_bytes;
2614	int nr;
2615	int ret = 0;
2616	u64 prealloc_start = cluster->start - offset;
2617	u64 prealloc_end = cluster->end - offset;
2618	u64 cur_offset = prealloc_start;
2619
2620	BUG_ON(cluster->start != cluster->boundary[0]);
2621	ret = btrfs_alloc_data_chunk_ondemand(inode,
2622					      prealloc_end + 1 - prealloc_start);
2623	if (ret)
2624		return ret;
2625
2626	inode_lock(&inode->vfs_inode);
2627	for (nr = 0; nr < cluster->nr; nr++) {
2628		start = cluster->boundary[nr] - offset;
2629		if (nr + 1 < cluster->nr)
2630			end = cluster->boundary[nr + 1] - 1 - offset;
2631		else
2632			end = cluster->end - offset;
2633
2634		lock_extent(&inode->io_tree, start, end);
2635		num_bytes = end + 1 - start;
2636		ret = btrfs_prealloc_file_range(&inode->vfs_inode, 0, start,
2637						num_bytes, num_bytes,
2638						end + 1, &alloc_hint);
2639		cur_offset = end + 1;
2640		unlock_extent(&inode->io_tree, start, end);
2641		if (ret)
2642			break;
2643	}
2644	inode_unlock(&inode->vfs_inode);
2645
2646	if (cur_offset < prealloc_end)
2647		btrfs_free_reserved_data_space_noquota(inode->root->fs_info,
2648					       prealloc_end + 1 - cur_offset);
2649	return ret;
2650}
2651
2652static noinline_for_stack
2653int setup_extent_mapping(struct inode *inode, u64 start, u64 end,
2654			 u64 block_start)
2655{
2656	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2657	struct extent_map *em;
2658	int ret = 0;
2659
2660	em = alloc_extent_map();
2661	if (!em)
2662		return -ENOMEM;
2663
2664	em->start = start;
2665	em->len = end + 1 - start;
2666	em->block_len = em->len;
2667	em->block_start = block_start;
2668	set_bit(EXTENT_FLAG_PINNED, &em->flags);
2669
2670	lock_extent(&BTRFS_I(inode)->io_tree, start, end);
2671	while (1) {
2672		write_lock(&em_tree->lock);
2673		ret = add_extent_mapping(em_tree, em, 0);
2674		write_unlock(&em_tree->lock);
2675		if (ret != -EEXIST) {
2676			free_extent_map(em);
2677			break;
2678		}
2679		btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 0);
2680	}
2681	unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
2682	return ret;
2683}
2684
2685/*
2686 * Allow error injection to test balance cancellation
2687 */
2688int btrfs_should_cancel_balance(struct btrfs_fs_info *fs_info)
2689{
2690	return atomic_read(&fs_info->balance_cancel_req) ||
2691		fatal_signal_pending(current);
2692}
2693ALLOW_ERROR_INJECTION(btrfs_should_cancel_balance, TRUE);
2694
2695static int relocate_file_extent_cluster(struct inode *inode,
2696					struct file_extent_cluster *cluster)
2697{
2698	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2699	u64 page_start;
2700	u64 page_end;
2701	u64 offset = BTRFS_I(inode)->index_cnt;
2702	unsigned long index;
2703	unsigned long last_index;
2704	struct page *page;
2705	struct file_ra_state *ra;
2706	gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
2707	int nr = 0;
2708	int ret = 0;
2709
2710	if (!cluster->nr)
2711		return 0;
2712
2713	ra = kzalloc(sizeof(*ra), GFP_NOFS);
2714	if (!ra)
2715		return -ENOMEM;
2716
2717	ret = prealloc_file_extent_cluster(BTRFS_I(inode), cluster);
2718	if (ret)
2719		goto out;
2720
2721	file_ra_state_init(ra, inode->i_mapping);
2722
2723	ret = setup_extent_mapping(inode, cluster->start - offset,
2724				   cluster->end - offset, cluster->start);
2725	if (ret)
2726		goto out;
2727
2728	index = (cluster->start - offset) >> PAGE_SHIFT;
2729	last_index = (cluster->end - offset) >> PAGE_SHIFT;
2730	while (index <= last_index) {
2731		ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
2732				PAGE_SIZE);
2733		if (ret)
2734			goto out;
2735
2736		page = find_lock_page(inode->i_mapping, index);
2737		if (!page) {
2738			page_cache_sync_readahead(inode->i_mapping,
2739						  ra, NULL, index,
2740						  last_index + 1 - index);
2741			page = find_or_create_page(inode->i_mapping, index,
2742						   mask);
2743			if (!page) {
2744				btrfs_delalloc_release_metadata(BTRFS_I(inode),
2745							PAGE_SIZE, true);
2746				btrfs_delalloc_release_extents(BTRFS_I(inode),
2747							PAGE_SIZE);
2748				ret = -ENOMEM;
2749				goto out;
2750			}
2751		}
2752
2753		if (PageReadahead(page)) {
2754			page_cache_async_readahead(inode->i_mapping,
2755						   ra, NULL, page, index,
2756						   last_index + 1 - index);
2757		}
2758
2759		if (!PageUptodate(page)) {
2760			btrfs_readpage(NULL, page);
2761			lock_page(page);
2762			if (!PageUptodate(page)) {
2763				unlock_page(page);
2764				put_page(page);
2765				btrfs_delalloc_release_metadata(BTRFS_I(inode),
2766							PAGE_SIZE, true);
2767				btrfs_delalloc_release_extents(BTRFS_I(inode),
2768							       PAGE_SIZE);
2769				ret = -EIO;
2770				goto out;
2771			}
2772		}
2773
2774		page_start = page_offset(page);
2775		page_end = page_start + PAGE_SIZE - 1;
2776
2777		lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end);
2778
2779		set_page_extent_mapped(page);
2780
2781		if (nr < cluster->nr &&
2782		    page_start + offset == cluster->boundary[nr]) {
2783			set_extent_bits(&BTRFS_I(inode)->io_tree,
2784					page_start, page_end,
2785					EXTENT_BOUNDARY);
2786			nr++;
2787		}
2788
2789		ret = btrfs_set_extent_delalloc(BTRFS_I(inode), page_start,
2790						page_end, 0, NULL);
2791		if (ret) {
2792			unlock_page(page);
2793			put_page(page);
2794			btrfs_delalloc_release_metadata(BTRFS_I(inode),
2795							 PAGE_SIZE, true);
2796			btrfs_delalloc_release_extents(BTRFS_I(inode),
2797			                               PAGE_SIZE);
2798
2799			clear_extent_bits(&BTRFS_I(inode)->io_tree,
2800					  page_start, page_end,
2801					  EXTENT_LOCKED | EXTENT_BOUNDARY);
2802			goto out;
2803
2804		}
2805		set_page_dirty(page);
2806
2807		unlock_extent(&BTRFS_I(inode)->io_tree,
2808			      page_start, page_end);
2809		unlock_page(page);
2810		put_page(page);
2811
2812		index++;
2813		btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
2814		balance_dirty_pages_ratelimited(inode->i_mapping);
2815		btrfs_throttle(fs_info);
2816		if (btrfs_should_cancel_balance(fs_info)) {
2817			ret = -ECANCELED;
2818			goto out;
2819		}
2820	}
2821	WARN_ON(nr != cluster->nr);
2822out:
2823	kfree(ra);
2824	return ret;
2825}
2826
2827static noinline_for_stack
2828int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key,
2829			 struct file_extent_cluster *cluster)
2830{
2831	int ret;
2832
2833	if (cluster->nr > 0 && extent_key->objectid != cluster->end + 1) {
2834		ret = relocate_file_extent_cluster(inode, cluster);
2835		if (ret)
2836			return ret;
2837		cluster->nr = 0;
2838	}
2839
2840	if (!cluster->nr)
2841		cluster->start = extent_key->objectid;
2842	else
2843		BUG_ON(cluster->nr >= MAX_EXTENTS);
2844	cluster->end = extent_key->objectid + extent_key->offset - 1;
2845	cluster->boundary[cluster->nr] = extent_key->objectid;
2846	cluster->nr++;
2847
2848	if (cluster->nr >= MAX_EXTENTS) {
2849		ret = relocate_file_extent_cluster(inode, cluster);
2850		if (ret)
2851			return ret;
2852		cluster->nr = 0;
2853	}
2854	return 0;
2855}
2856
2857/*
2858 * helper to add a tree block to the list.
2859 * the major work is getting the generation and level of the block
2860 */
2861static int add_tree_block(struct reloc_control *rc,
2862			  struct btrfs_key *extent_key,
2863			  struct btrfs_path *path,
2864			  struct rb_root *blocks)
2865{
2866	struct extent_buffer *eb;
2867	struct btrfs_extent_item *ei;
2868	struct btrfs_tree_block_info *bi;
2869	struct tree_block *block;
2870	struct rb_node *rb_node;
2871	u32 item_size;
2872	int level = -1;
2873	u64 generation;
2874
2875	eb =  path->nodes[0];
2876	item_size = btrfs_item_size_nr(eb, path->slots[0]);
2877
2878	if (extent_key->type == BTRFS_METADATA_ITEM_KEY ||
2879	    item_size >= sizeof(*ei) + sizeof(*bi)) {
2880		ei = btrfs_item_ptr(eb, path->slots[0],
2881				struct btrfs_extent_item);
2882		if (extent_key->type == BTRFS_EXTENT_ITEM_KEY) {
2883			bi = (struct btrfs_tree_block_info *)(ei + 1);
2884			level = btrfs_tree_block_level(eb, bi);
2885		} else {
2886			level = (int)extent_key->offset;
2887		}
2888		generation = btrfs_extent_generation(eb, ei);
2889	} else if (unlikely(item_size == sizeof(struct btrfs_extent_item_v0))) {
2890		btrfs_print_v0_err(eb->fs_info);
2891		btrfs_handle_fs_error(eb->fs_info, -EINVAL, NULL);
2892		return -EINVAL;
2893	} else {
2894		BUG();
2895	}
2896
2897	btrfs_release_path(path);
2898
2899	BUG_ON(level == -1);
2900
2901	block = kmalloc(sizeof(*block), GFP_NOFS);
2902	if (!block)
2903		return -ENOMEM;
2904
2905	block->bytenr = extent_key->objectid;
2906	block->key.objectid = rc->extent_root->fs_info->nodesize;
2907	block->key.offset = generation;
2908	block->level = level;
2909	block->key_ready = 0;
2910
2911	rb_node = rb_simple_insert(blocks, block->bytenr, &block->rb_node);
2912	if (rb_node)
2913		btrfs_backref_panic(rc->extent_root->fs_info, block->bytenr,
2914				    -EEXIST);
2915
2916	return 0;
2917}
2918
2919/*
2920 * helper to add tree blocks for backref of type BTRFS_SHARED_DATA_REF_KEY
2921 */
2922static int __add_tree_block(struct reloc_control *rc,
2923			    u64 bytenr, u32 blocksize,
2924			    struct rb_root *blocks)
2925{
2926	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2927	struct btrfs_path *path;
2928	struct btrfs_key key;
2929	int ret;
2930	bool skinny = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
2931
2932	if (tree_block_processed(bytenr, rc))
2933		return 0;
2934
2935	if (rb_simple_search(blocks, bytenr))
2936		return 0;
2937
2938	path = btrfs_alloc_path();
2939	if (!path)
2940		return -ENOMEM;
2941again:
2942	key.objectid = bytenr;
2943	if (skinny) {
2944		key.type = BTRFS_METADATA_ITEM_KEY;
2945		key.offset = (u64)-1;
2946	} else {
2947		key.type = BTRFS_EXTENT_ITEM_KEY;
2948		key.offset = blocksize;
2949	}
2950
2951	path->search_commit_root = 1;
2952	path->skip_locking = 1;
2953	ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 0, 0);
2954	if (ret < 0)
2955		goto out;
2956
2957	if (ret > 0 && skinny) {
2958		if (path->slots[0]) {
2959			path->slots[0]--;
2960			btrfs_item_key_to_cpu(path->nodes[0], &key,
2961					      path->slots[0]);
2962			if (key.objectid == bytenr &&
2963			    (key.type == BTRFS_METADATA_ITEM_KEY ||
2964			     (key.type == BTRFS_EXTENT_ITEM_KEY &&
2965			      key.offset == blocksize)))
2966				ret = 0;
2967		}
2968
2969		if (ret) {
2970			skinny = false;
2971			btrfs_release_path(path);
2972			goto again;
2973		}
2974	}
2975	if (ret) {
2976		ASSERT(ret == 1);
2977		btrfs_print_leaf(path->nodes[0]);
2978		btrfs_err(fs_info,
2979	     "tree block extent item (%llu) is not found in extent tree",
2980		     bytenr);
2981		WARN_ON(1);
2982		ret = -EINVAL;
2983		goto out;
2984	}
2985
2986	ret = add_tree_block(rc, &key, path, blocks);
2987out:
2988	btrfs_free_path(path);
2989	return ret;
2990}
2991
2992static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
2993				    struct btrfs_block_group *block_group,
2994				    struct inode *inode,
2995				    u64 ino)
2996{
2997	struct btrfs_root *root = fs_info->tree_root;
2998	struct btrfs_trans_handle *trans;
2999	int ret = 0;
3000
3001	if (inode)
3002		goto truncate;
3003
3004	inode = btrfs_iget(fs_info->sb, ino, root);
3005	if (IS_ERR(inode))
3006		return -ENOENT;
3007
3008truncate:
3009	ret = btrfs_check_trunc_cache_free_space(fs_info,
3010						 &fs_info->global_block_rsv);
3011	if (ret)
3012		goto out;
3013
3014	trans = btrfs_join_transaction(root);
3015	if (IS_ERR(trans)) {
3016		ret = PTR_ERR(trans);
3017		goto out;
3018	}
3019
3020	ret = btrfs_truncate_free_space_cache(trans, block_group, inode);
3021
3022	btrfs_end_transaction(trans);
3023	btrfs_btree_balance_dirty(fs_info);
3024out:
3025	iput(inode);
3026	return ret;
3027}
3028
3029/*
3030 * Locate the free space cache EXTENT_DATA in root tree leaf and delete the
3031 * cache inode, to avoid free space cache data extent blocking data relocation.
3032 */
3033static int delete_v1_space_cache(struct extent_buffer *leaf,
3034				 struct btrfs_block_group *block_group,
3035				 u64 data_bytenr)
3036{
3037	u64 space_cache_ino;
3038	struct btrfs_file_extent_item *ei;
3039	struct btrfs_key key;
3040	bool found = false;
3041	int i;
3042	int ret;
3043
3044	if (btrfs_header_owner(leaf) != BTRFS_ROOT_TREE_OBJECTID)
3045		return 0;
3046
3047	for (i = 0; i < btrfs_header_nritems(leaf); i++) {
3048		u8 type;
3049
3050		btrfs_item_key_to_cpu(leaf, &key, i);
3051		if (key.type != BTRFS_EXTENT_DATA_KEY)
3052			continue;
3053		ei = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
3054		type = btrfs_file_extent_type(leaf, ei);
3055
3056		if ((type == BTRFS_FILE_EXTENT_REG ||
3057		     type == BTRFS_FILE_EXTENT_PREALLOC) &&
3058		    btrfs_file_extent_disk_bytenr(leaf, ei) == data_bytenr) {
3059			found = true;
3060			space_cache_ino = key.objectid;
3061			break;
3062		}
3063	}
3064	if (!found)
3065		return -ENOENT;
3066	ret = delete_block_group_cache(leaf->fs_info, block_group, NULL,
3067					space_cache_ino);
3068	return ret;
3069}
3070
3071/*
3072 * helper to find all tree blocks that reference a given data extent
3073 */
3074static noinline_for_stack
3075int add_data_references(struct reloc_control *rc,
3076			struct btrfs_key *extent_key,
3077			struct btrfs_path *path,
3078			struct rb_root *blocks)
3079{
3080	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3081	struct ulist *leaves = NULL;
3082	struct ulist_iterator leaf_uiter;
3083	struct ulist_node *ref_node = NULL;
3084	const u32 blocksize = fs_info->nodesize;
3085	int ret = 0;
3086
3087	btrfs_release_path(path);
3088	ret = btrfs_find_all_leafs(NULL, fs_info, extent_key->objectid,
3089				   0, &leaves, NULL, true);
3090	if (ret < 0)
3091		return ret;
3092
3093	ULIST_ITER_INIT(&leaf_uiter);
3094	while ((ref_node = ulist_next(leaves, &leaf_uiter))) {
3095		struct extent_buffer *eb;
3096
3097		eb = read_tree_block(fs_info, ref_node->val, 0, 0, NULL);
3098		if (IS_ERR(eb)) {
3099			ret = PTR_ERR(eb);
3100			break;
3101		}
3102		ret = delete_v1_space_cache(eb, rc->block_group,
3103					    extent_key->objectid);
3104		free_extent_buffer(eb);
3105		if (ret < 0)
3106			break;
3107		ret = __add_tree_block(rc, ref_node->val, blocksize, blocks);
3108		if (ret < 0)
3109			break;
3110	}
3111	if (ret < 0)
3112		free_block_list(blocks);
3113	ulist_free(leaves);
3114	return ret;
3115}
3116
3117/*
3118 * helper to find next unprocessed extent
3119 */
3120static noinline_for_stack
3121int find_next_extent(struct reloc_control *rc, struct btrfs_path *path,
3122		     struct btrfs_key *extent_key)
3123{
3124	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3125	struct btrfs_key key;
3126	struct extent_buffer *leaf;
3127	u64 start, end, last;
3128	int ret;
3129
3130	last = rc->block_group->start + rc->block_group->length;
3131	while (1) {
3132		cond_resched();
3133		if (rc->search_start >= last) {
3134			ret = 1;
3135			break;
3136		}
3137
3138		key.objectid = rc->search_start;
3139		key.type = BTRFS_EXTENT_ITEM_KEY;
3140		key.offset = 0;
3141
3142		path->search_commit_root = 1;
3143		path->skip_locking = 1;
3144		ret = btrfs_search_slot(NULL, rc->extent_root, &key, path,
3145					0, 0);
3146		if (ret < 0)
3147			break;
3148next:
3149		leaf = path->nodes[0];
3150		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
3151			ret = btrfs_next_leaf(rc->extent_root, path);
3152			if (ret != 0)
3153				break;
3154			leaf = path->nodes[0];
3155		}
3156
3157		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3158		if (key.objectid >= last) {
3159			ret = 1;
3160			break;
3161		}
3162
3163		if (key.type != BTRFS_EXTENT_ITEM_KEY &&
3164		    key.type != BTRFS_METADATA_ITEM_KEY) {
3165			path->slots[0]++;
3166			goto next;
3167		}
3168
3169		if (key.type == BTRFS_EXTENT_ITEM_KEY &&
3170		    key.objectid + key.offset <= rc->search_start) {
3171			path->slots[0]++;
3172			goto next;
3173		}
3174
3175		if (key.type == BTRFS_METADATA_ITEM_KEY &&
3176		    key.objectid + fs_info->nodesize <=
3177		    rc->search_start) {
3178			path->slots[0]++;
3179			goto next;
3180		}
3181
3182		ret = find_first_extent_bit(&rc->processed_blocks,
3183					    key.objectid, &start, &end,
3184					    EXTENT_DIRTY, NULL);
3185
3186		if (ret == 0 && start <= key.objectid) {
3187			btrfs_release_path(path);
3188			rc->search_start = end + 1;
3189		} else {
3190			if (key.type == BTRFS_EXTENT_ITEM_KEY)
3191				rc->search_start = key.objectid + key.offset;
3192			else
3193				rc->search_start = key.objectid +
3194					fs_info->nodesize;
3195			memcpy(extent_key, &key, sizeof(key));
3196			return 0;
3197		}
3198	}
3199	btrfs_release_path(path);
3200	return ret;
3201}
3202
3203static void set_reloc_control(struct reloc_control *rc)
3204{
3205	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3206
3207	mutex_lock(&fs_info->reloc_mutex);
3208	fs_info->reloc_ctl = rc;
3209	mutex_unlock(&fs_info->reloc_mutex);
3210}
3211
3212static void unset_reloc_control(struct reloc_control *rc)
3213{
3214	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3215
3216	mutex_lock(&fs_info->reloc_mutex);
3217	fs_info->reloc_ctl = NULL;
3218	mutex_unlock(&fs_info->reloc_mutex);
3219}
3220
3221static int check_extent_flags(u64 flags)
3222{
3223	if ((flags & BTRFS_EXTENT_FLAG_DATA) &&
3224	    (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK))
3225		return 1;
3226	if (!(flags & BTRFS_EXTENT_FLAG_DATA) &&
3227	    !(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK))
3228		return 1;
3229	if ((flags & BTRFS_EXTENT_FLAG_DATA) &&
3230	    (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
3231		return 1;
3232	return 0;
3233}
3234
3235static noinline_for_stack
3236int prepare_to_relocate(struct reloc_control *rc)
3237{
3238	struct btrfs_trans_handle *trans;
3239	int ret;
3240
3241	rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root->fs_info,
3242					      BTRFS_BLOCK_RSV_TEMP);
3243	if (!rc->block_rsv)
3244		return -ENOMEM;
3245
3246	memset(&rc->cluster, 0, sizeof(rc->cluster));
3247	rc->search_start = rc->block_group->start;
3248	rc->extents_found = 0;
3249	rc->nodes_relocated = 0;
3250	rc->merging_rsv_size = 0;
3251	rc->reserved_bytes = 0;
3252	rc->block_rsv->size = rc->extent_root->fs_info->nodesize *
3253			      RELOCATION_RESERVED_NODES;
3254	ret = btrfs_block_rsv_refill(rc->extent_root,
3255				     rc->block_rsv, rc->block_rsv->size,
3256				     BTRFS_RESERVE_FLUSH_ALL);
3257	if (ret)
3258		return ret;
3259
3260	rc->create_reloc_tree = 1;
3261	set_reloc_control(rc);
3262
3263	trans = btrfs_join_transaction(rc->extent_root);
3264	if (IS_ERR(trans)) {
3265		unset_reloc_control(rc);
3266		/*
3267		 * extent tree is not a ref_cow tree and has no reloc_root to
3268		 * cleanup.  And callers are responsible to free the above
3269		 * block rsv.
3270		 */
3271		return PTR_ERR(trans);
3272	}
3273
3274	ret = btrfs_commit_transaction(trans);
3275	if (ret)
3276		unset_reloc_control(rc);
3277
3278	return ret;
3279}
3280
3281static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
3282{
3283	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3284	struct rb_root blocks = RB_ROOT;
3285	struct btrfs_key key;
3286	struct btrfs_trans_handle *trans = NULL;
3287	struct btrfs_path *path;
3288	struct btrfs_extent_item *ei;
3289	u64 flags;
3290	u32 item_size;
3291	int ret;
3292	int err = 0;
3293	int progress = 0;
3294
3295	path = btrfs_alloc_path();
3296	if (!path)
3297		return -ENOMEM;
3298	path->reada = READA_FORWARD;
3299
3300	ret = prepare_to_relocate(rc);
3301	if (ret) {
3302		err = ret;
3303		goto out_free;
3304	}
3305
3306	while (1) {
3307		rc->reserved_bytes = 0;
3308		ret = btrfs_block_rsv_refill(rc->extent_root,
3309					rc->block_rsv, rc->block_rsv->size,
3310					BTRFS_RESERVE_FLUSH_ALL);
3311		if (ret) {
3312			err = ret;
3313			break;
3314		}
3315		progress++;
3316		trans = btrfs_start_transaction(rc->extent_root, 0);
3317		if (IS_ERR(trans)) {
3318			err = PTR_ERR(trans);
3319			trans = NULL;
3320			break;
3321		}
3322restart:
3323		if (update_backref_cache(trans, &rc->backref_cache)) {
3324			btrfs_end_transaction(trans);
3325			trans = NULL;
3326			continue;
3327		}
3328
3329		ret = find_next_extent(rc, path, &key);
3330		if (ret < 0)
3331			err = ret;
3332		if (ret != 0)
3333			break;
3334
3335		rc->extents_found++;
3336
3337		ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
3338				    struct btrfs_extent_item);
3339		item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
3340		if (item_size >= sizeof(*ei)) {
3341			flags = btrfs_extent_flags(path->nodes[0], ei);
3342			ret = check_extent_flags(flags);
3343			BUG_ON(ret);
3344		} else if (unlikely(item_size == sizeof(struct btrfs_extent_item_v0))) {
3345			err = -EINVAL;
3346			btrfs_print_v0_err(trans->fs_info);
3347			btrfs_abort_transaction(trans, err);
3348			break;
3349		} else {
3350			BUG();
3351		}
3352
3353		if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
3354			ret = add_tree_block(rc, &key, path, &blocks);
3355		} else if (rc->stage == UPDATE_DATA_PTRS &&
3356			   (flags & BTRFS_EXTENT_FLAG_DATA)) {
3357			ret = add_data_references(rc, &key, path, &blocks);
3358		} else {
3359			btrfs_release_path(path);
3360			ret = 0;
3361		}
3362		if (ret < 0) {
3363			err = ret;
3364			break;
3365		}
3366
3367		if (!RB_EMPTY_ROOT(&blocks)) {
3368			ret = relocate_tree_blocks(trans, rc, &blocks);
3369			if (ret < 0) {
3370				if (ret != -EAGAIN) {
3371					err = ret;
3372					break;
3373				}
3374				rc->extents_found--;
3375				rc->search_start = key.objectid;
3376			}
3377		}
3378
3379		btrfs_end_transaction_throttle(trans);
3380		btrfs_btree_balance_dirty(fs_info);
3381		trans = NULL;
3382
3383		if (rc->stage == MOVE_DATA_EXTENTS &&
3384		    (flags & BTRFS_EXTENT_FLAG_DATA)) {
3385			rc->found_file_extent = 1;
3386			ret = relocate_data_extent(rc->data_inode,
3387						   &key, &rc->cluster);
3388			if (ret < 0) {
3389				err = ret;
3390				break;
3391			}
3392		}
3393		if (btrfs_should_cancel_balance(fs_info)) {
3394			err = -ECANCELED;
3395			break;
3396		}
3397	}
3398	if (trans && progress && err == -ENOSPC) {
3399		ret = btrfs_force_chunk_alloc(trans, rc->block_group->flags);
3400		if (ret == 1) {
3401			err = 0;
3402			progress = 0;
3403			goto restart;
3404		}
3405	}
3406
3407	btrfs_release_path(path);
3408	clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY);
3409
3410	if (trans) {
3411		btrfs_end_transaction_throttle(trans);
3412		btrfs_btree_balance_dirty(fs_info);
3413	}
3414
3415	if (!err) {
3416		ret = relocate_file_extent_cluster(rc->data_inode,
3417						   &rc->cluster);
3418		if (ret < 0)
3419			err = ret;
3420	}
3421
3422	rc->create_reloc_tree = 0;
3423	set_reloc_control(rc);
3424
3425	btrfs_backref_release_cache(&rc->backref_cache);
3426	btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL);
3427
3428	/*
3429	 * Even in the case when the relocation is cancelled, we should all go
3430	 * through prepare_to_merge() and merge_reloc_roots().
3431	 *
3432	 * For error (including cancelled balance), prepare_to_merge() will
3433	 * mark all reloc trees orphan, then queue them for cleanup in
3434	 * merge_reloc_roots()
3435	 */
3436	err = prepare_to_merge(rc, err);
3437
3438	merge_reloc_roots(rc);
3439
3440	rc->merge_reloc_tree = 0;
3441	unset_reloc_control(rc);
3442	btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL);
3443
3444	/* get rid of pinned extents */
3445	trans = btrfs_join_transaction(rc->extent_root);
3446	if (IS_ERR(trans)) {
3447		err = PTR_ERR(trans);
3448		goto out_free;
3449	}
3450	ret = btrfs_commit_transaction(trans);
3451	if (ret && !err)
3452		err = ret;
3453out_free:
3454	ret = clean_dirty_subvols(rc);
3455	if (ret < 0 && !err)
3456		err = ret;
3457	btrfs_free_block_rsv(fs_info, rc->block_rsv);
3458	btrfs_free_path(path);
3459	return err;
3460}
3461
3462static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
3463				 struct btrfs_root *root, u64 objectid)
3464{
3465	struct btrfs_path *path;
3466	struct btrfs_inode_item *item;
3467	struct extent_buffer *leaf;
3468	int ret;
3469
3470	path = btrfs_alloc_path();
3471	if (!path)
3472		return -ENOMEM;
3473
3474	ret = btrfs_insert_empty_inode(trans, root, path, objectid);
3475	if (ret)
3476		goto out;
3477
3478	leaf = path->nodes[0];
3479	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item);
3480	memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
3481	btrfs_set_inode_generation(leaf, item, 1);
3482	btrfs_set_inode_size(leaf, item, 0);
3483	btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
3484	btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS |
3485					  BTRFS_INODE_PREALLOC);
3486	btrfs_mark_buffer_dirty(leaf);
3487out:
3488	btrfs_free_path(path);
3489	return ret;
3490}
3491
3492/*
3493 * helper to create inode for data relocation.
3494 * the inode is in data relocation tree and its link count is 0
3495 */
3496static noinline_for_stack
3497struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
3498				 struct btrfs_block_group *group)
3499{
3500	struct inode *inode = NULL;
3501	struct btrfs_trans_handle *trans;
3502	struct btrfs_root *root;
3503	u64 objectid;
3504	int err = 0;
3505
3506	root = btrfs_grab_root(fs_info->data_reloc_root);
3507	trans = btrfs_start_transaction(root, 6);
3508	if (IS_ERR(trans)) {
3509		btrfs_put_root(root);
3510		return ERR_CAST(trans);
3511	}
3512
3513	err = btrfs_find_free_objectid(root, &objectid);
3514	if (err)
3515		goto out;
3516
3517	err = __insert_orphan_inode(trans, root, objectid);
3518	BUG_ON(err);
3519
3520	inode = btrfs_iget(fs_info->sb, objectid, root);
3521	BUG_ON(IS_ERR(inode));
3522	BTRFS_I(inode)->index_cnt = group->start;
3523
3524	err = btrfs_orphan_add(trans, BTRFS_I(inode));
3525out:
3526	btrfs_put_root(root);
3527	btrfs_end_transaction(trans);
3528	btrfs_btree_balance_dirty(fs_info);
3529	if (err) {
3530		if (inode)
3531			iput(inode);
3532		inode = ERR_PTR(err);
3533	}
3534	return inode;
3535}
3536
3537static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info)
3538{
3539	struct reloc_control *rc;
3540
3541	rc = kzalloc(sizeof(*rc), GFP_NOFS);
3542	if (!rc)
3543		return NULL;
3544
3545	INIT_LIST_HEAD(&rc->reloc_roots);
3546	INIT_LIST_HEAD(&rc->dirty_subvol_roots);
3547	btrfs_backref_init_cache(fs_info, &rc->backref_cache, 1);
3548	mapping_tree_init(&rc->reloc_root_tree);
3549	extent_io_tree_init(fs_info, &rc->processed_blocks,
3550			    IO_TREE_RELOC_BLOCKS, NULL);
3551	return rc;
3552}
3553
3554static void free_reloc_control(struct reloc_control *rc)
3555{
3556	struct mapping_node *node, *tmp;
3557
3558	free_reloc_roots(&rc->reloc_roots);
3559	rbtree_postorder_for_each_entry_safe(node, tmp,
3560			&rc->reloc_root_tree.rb_root, rb_node)
3561		kfree(node);
3562
3563	kfree(rc);
3564}
3565
3566/*
3567 * Print the block group being relocated
3568 */
3569static void describe_relocation(struct btrfs_fs_info *fs_info,
3570				struct btrfs_block_group *block_group)
3571{
3572	char buf[128] = {'\0'};
3573
3574	btrfs_describe_block_groups(block_group->flags, buf, sizeof(buf));
3575
3576	btrfs_info(fs_info,
3577		   "relocating block group %llu flags %s",
3578		   block_group->start, buf);
3579}
3580
3581static const char *stage_to_string(int stage)
3582{
3583	if (stage == MOVE_DATA_EXTENTS)
3584		return "move data extents";
3585	if (stage == UPDATE_DATA_PTRS)
3586		return "update data pointers";
3587	return "unknown";
3588}
3589
3590/*
3591 * function to relocate all extents in a block group.
3592 */
3593int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
3594{
3595	struct btrfs_block_group *bg;
3596	struct btrfs_root *extent_root = fs_info->extent_root;
3597	struct reloc_control *rc;
3598	struct inode *inode;
3599	struct btrfs_path *path;
3600	int ret;
3601	int rw = 0;
3602	int err = 0;
3603
3604	bg = btrfs_lookup_block_group(fs_info, group_start);
3605	if (!bg)
3606		return -ENOENT;
3607
3608	if (btrfs_pinned_by_swapfile(fs_info, bg)) {
3609		btrfs_put_block_group(bg);
3610		return -ETXTBSY;
3611	}
3612
3613	rc = alloc_reloc_control(fs_info);
3614	if (!rc) {
3615		btrfs_put_block_group(bg);
3616		return -ENOMEM;
3617	}
3618
3619	rc->extent_root = extent_root;
3620	rc->block_group = bg;
3621
3622	ret = btrfs_inc_block_group_ro(rc->block_group, true);
3623	if (ret) {
3624		err = ret;
3625		goto out;
3626	}
3627	rw = 1;
3628
3629	path = btrfs_alloc_path();
3630	if (!path) {
3631		err = -ENOMEM;
3632		goto out;
3633	}
3634
3635	inode = lookup_free_space_inode(rc->block_group, path);
3636	btrfs_free_path(path);
3637
3638	if (!IS_ERR(inode))
3639		ret = delete_block_group_cache(fs_info, rc->block_group, inode, 0);
3640	else
3641		ret = PTR_ERR(inode);
3642
3643	if (ret && ret != -ENOENT) {
3644		err = ret;
3645		goto out;
3646	}
3647
3648	rc->data_inode = create_reloc_inode(fs_info, rc->block_group);
3649	if (IS_ERR(rc->data_inode)) {
3650		err = PTR_ERR(rc->data_inode);
3651		rc->data_inode = NULL;
3652		goto out;
3653	}
3654
3655	describe_relocation(fs_info, rc->block_group);
3656
3657	btrfs_wait_block_group_reservations(rc->block_group);
3658	btrfs_wait_nocow_writers(rc->block_group);
3659	btrfs_wait_ordered_roots(fs_info, U64_MAX,
3660				 rc->block_group->start,
3661				 rc->block_group->length);
3662
3663	while (1) {
3664		int finishes_stage;
3665
3666		mutex_lock(&fs_info->cleaner_mutex);
3667		ret = relocate_block_group(rc);
3668		mutex_unlock(&fs_info->cleaner_mutex);
3669		if (ret < 0)
3670			err = ret;
3671
3672		finishes_stage = rc->stage;
3673		/*
3674		 * We may have gotten ENOSPC after we already dirtied some
3675		 * extents.  If writeout happens while we're relocating a
3676		 * different block group we could end up hitting the
3677		 * BUG_ON(rc->stage == UPDATE_DATA_PTRS) in
3678		 * btrfs_reloc_cow_block.  Make sure we write everything out
3679		 * properly so we don't trip over this problem, and then break
3680		 * out of the loop if we hit an error.
3681		 */
3682		if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) {
3683			ret = btrfs_wait_ordered_range(rc->data_inode, 0,
3684						       (u64)-1);
3685			if (ret)
3686				err = ret;
3687			invalidate_mapping_pages(rc->data_inode->i_mapping,
3688						 0, -1);
3689			rc->stage = UPDATE_DATA_PTRS;
3690		}
3691
3692		if (err < 0)
3693			goto out;
3694
3695		if (rc->extents_found == 0)
3696			break;
3697
3698		btrfs_info(fs_info, "found %llu extents, stage: %s",
3699			   rc->extents_found, stage_to_string(finishes_stage));
3700	}
3701
3702	WARN_ON(rc->block_group->pinned > 0);
3703	WARN_ON(rc->block_group->reserved > 0);
3704	WARN_ON(rc->block_group->used > 0);
3705out:
3706	if (err && rw)
3707		btrfs_dec_block_group_ro(rc->block_group);
3708	iput(rc->data_inode);
3709	btrfs_put_block_group(rc->block_group);
3710	free_reloc_control(rc);
3711	return err;
3712}
3713
3714static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
3715{
3716	struct btrfs_fs_info *fs_info = root->fs_info;
3717	struct btrfs_trans_handle *trans;
3718	int ret, err;
3719
3720	trans = btrfs_start_transaction(fs_info->tree_root, 0);
3721	if (IS_ERR(trans))
3722		return PTR_ERR(trans);
3723
3724	memset(&root->root_item.drop_progress, 0,
3725		sizeof(root->root_item.drop_progress));
3726	root->root_item.drop_level = 0;
3727	btrfs_set_root_refs(&root->root_item, 0);
3728	ret = btrfs_update_root(trans, fs_info->tree_root,
3729				&root->root_key, &root->root_item);
3730
3731	err = btrfs_end_transaction(trans);
3732	if (err)
3733		return err;
3734	return ret;
3735}
3736
3737/*
3738 * recover relocation interrupted by system crash.
3739 *
3740 * this function resumes merging reloc trees with corresponding fs trees.
3741 * this is important for keeping the sharing of tree blocks
3742 */
3743int btrfs_recover_relocation(struct btrfs_root *root)
3744{
3745	struct btrfs_fs_info *fs_info = root->fs_info;
3746	LIST_HEAD(reloc_roots);
3747	struct btrfs_key key;
3748	struct btrfs_root *fs_root;
3749	struct btrfs_root *reloc_root;
3750	struct btrfs_path *path;
3751	struct extent_buffer *leaf;
3752	struct reloc_control *rc = NULL;
3753	struct btrfs_trans_handle *trans;
3754	int ret;
3755	int err = 0;
3756
3757	path = btrfs_alloc_path();
3758	if (!path)
3759		return -ENOMEM;
3760	path->reada = READA_BACK;
3761
3762	key.objectid = BTRFS_TREE_RELOC_OBJECTID;
3763	key.type = BTRFS_ROOT_ITEM_KEY;
3764	key.offset = (u64)-1;
3765
3766	while (1) {
3767		ret = btrfs_search_slot(NULL, fs_info->tree_root, &key,
3768					path, 0, 0);
3769		if (ret < 0) {
3770			err = ret;
3771			goto out;
3772		}
3773		if (ret > 0) {
3774			if (path->slots[0] == 0)
3775				break;
3776			path->slots[0]--;
3777		}
3778		leaf = path->nodes[0];
3779		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3780		btrfs_release_path(path);
3781
3782		if (key.objectid != BTRFS_TREE_RELOC_OBJECTID ||
3783		    key.type != BTRFS_ROOT_ITEM_KEY)
3784			break;
3785
3786		reloc_root = btrfs_read_tree_root(root, &key);
3787		if (IS_ERR(reloc_root)) {
3788			err = PTR_ERR(reloc_root);
3789			goto out;
3790		}
3791
3792		set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state);
3793		list_add(&reloc_root->root_list, &reloc_roots);
3794
3795		if (btrfs_root_refs(&reloc_root->root_item) > 0) {
3796			fs_root = btrfs_get_fs_root(fs_info,
3797					reloc_root->root_key.offset, false);
3798			if (IS_ERR(fs_root)) {
3799				ret = PTR_ERR(fs_root);
3800				if (ret != -ENOENT) {
3801					err = ret;
3802					goto out;
3803				}
3804				ret = mark_garbage_root(reloc_root);
3805				if (ret < 0) {
3806					err = ret;
3807					goto out;
3808				}
3809			} else {
3810				btrfs_put_root(fs_root);
3811			}
3812		}
3813
3814		if (key.offset == 0)
3815			break;
3816
3817		key.offset--;
3818	}
3819	btrfs_release_path(path);
3820
3821	if (list_empty(&reloc_roots))
3822		goto out;
3823
3824	rc = alloc_reloc_control(fs_info);
3825	if (!rc) {
3826		err = -ENOMEM;
3827		goto out;
3828	}
3829
3830	rc->extent_root = fs_info->extent_root;
3831
3832	set_reloc_control(rc);
3833
3834	trans = btrfs_join_transaction(rc->extent_root);
3835	if (IS_ERR(trans)) {
3836		err = PTR_ERR(trans);
3837		goto out_unset;
3838	}
3839
3840	rc->merge_reloc_tree = 1;
3841
3842	while (!list_empty(&reloc_roots)) {
3843		reloc_root = list_entry(reloc_roots.next,
3844					struct btrfs_root, root_list);
3845		list_del(&reloc_root->root_list);
3846
3847		if (btrfs_root_refs(&reloc_root->root_item) == 0) {
3848			list_add_tail(&reloc_root->root_list,
3849				      &rc->reloc_roots);
3850			continue;
3851		}
3852
3853		fs_root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
3854					    false);
3855		if (IS_ERR(fs_root)) {
3856			err = PTR_ERR(fs_root);
3857			list_add_tail(&reloc_root->root_list, &reloc_roots);
3858			btrfs_end_transaction(trans);
3859			goto out_unset;
3860		}
3861
3862		err = __add_reloc_root(reloc_root);
3863		BUG_ON(err < 0); /* -ENOMEM or logic error */
3864		fs_root->reloc_root = btrfs_grab_root(reloc_root);
3865		btrfs_put_root(fs_root);
3866	}
3867
3868	err = btrfs_commit_transaction(trans);
3869	if (err)
3870		goto out_unset;
3871
3872	merge_reloc_roots(rc);
3873
3874	unset_reloc_control(rc);
3875
3876	trans = btrfs_join_transaction(rc->extent_root);
3877	if (IS_ERR(trans)) {
3878		err = PTR_ERR(trans);
3879		goto out_clean;
3880	}
3881	err = btrfs_commit_transaction(trans);
3882out_clean:
3883	ret = clean_dirty_subvols(rc);
3884	if (ret < 0 && !err)
3885		err = ret;
3886out_unset:
3887	unset_reloc_control(rc);
3888	free_reloc_control(rc);
3889out:
3890	free_reloc_roots(&reloc_roots);
3891
3892	btrfs_free_path(path);
3893
3894	if (err == 0) {
3895		/* cleanup orphan inode in data relocation tree */
3896		fs_root = btrfs_grab_root(fs_info->data_reloc_root);
3897		ASSERT(fs_root);
3898		err = btrfs_orphan_cleanup(fs_root);
3899		btrfs_put_root(fs_root);
3900	}
3901	return err;
3902}
3903
3904/*
3905 * helper to add ordered checksum for data relocation.
3906 *
3907 * cloning checksum properly handles the nodatasum extents.
3908 * it also saves CPU time to re-calculate the checksum.
3909 */
3910int btrfs_reloc_clone_csums(struct btrfs_inode *inode, u64 file_pos, u64 len)
3911{
3912	struct btrfs_fs_info *fs_info = inode->root->fs_info;
3913	struct btrfs_ordered_sum *sums;
3914	struct btrfs_ordered_extent *ordered;
3915	int ret;
3916	u64 disk_bytenr;
3917	u64 new_bytenr;
3918	LIST_HEAD(list);
3919
3920	ordered = btrfs_lookup_ordered_extent(inode, file_pos);
3921	BUG_ON(ordered->file_offset != file_pos || ordered->num_bytes != len);
3922
3923	disk_bytenr = file_pos + inode->index_cnt;
3924	ret = btrfs_lookup_csums_range(fs_info->csum_root, disk_bytenr,
3925				       disk_bytenr + len - 1, &list, 0);
3926	if (ret)
3927		goto out;
3928
3929	while (!list_empty(&list)) {
3930		sums = list_entry(list.next, struct btrfs_ordered_sum, list);
3931		list_del_init(&sums->list);
3932
3933		/*
3934		 * We need to offset the new_bytenr based on where the csum is.
3935		 * We need to do this because we will read in entire prealloc
3936		 * extents but we may have written to say the middle of the
3937		 * prealloc extent, so we need to make sure the csum goes with
3938		 * the right disk offset.
3939		 *
3940		 * We can do this because the data reloc inode refers strictly
3941		 * to the on disk bytes, so we don't have to worry about
3942		 * disk_len vs real len like with real inodes since it's all
3943		 * disk length.
3944		 */
3945		new_bytenr = ordered->disk_bytenr + sums->bytenr - disk_bytenr;
3946		sums->bytenr = new_bytenr;
3947
3948		btrfs_add_ordered_sum(ordered, sums);
3949	}
3950out:
3951	btrfs_put_ordered_extent(ordered);
3952	return ret;
3953}
3954
3955int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
3956			  struct btrfs_root *root, struct extent_buffer *buf,
3957			  struct extent_buffer *cow)
3958{
3959	struct btrfs_fs_info *fs_info = root->fs_info;
3960	struct reloc_control *rc;
3961	struct btrfs_backref_node *node;
3962	int first_cow = 0;
3963	int level;
3964	int ret = 0;
3965
3966	rc = fs_info->reloc_ctl;
3967	if (!rc)
3968		return 0;
3969
3970	BUG_ON(rc->stage == UPDATE_DATA_PTRS &&
3971	       root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID);
3972
3973	level = btrfs_header_level(buf);
3974	if (btrfs_header_generation(buf) <=
3975	    btrfs_root_last_snapshot(&root->root_item))
3976		first_cow = 1;
3977
3978	if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID &&
3979	    rc->create_reloc_tree) {
3980		WARN_ON(!first_cow && level == 0);
3981
3982		node = rc->backref_cache.path[level];
3983		BUG_ON(node->bytenr != buf->start &&
3984		       node->new_bytenr != buf->start);
3985
3986		btrfs_backref_drop_node_buffer(node);
3987		atomic_inc(&cow->refs);
3988		node->eb = cow;
3989		node->new_bytenr = cow->start;
3990
3991		if (!node->pending) {
3992			list_move_tail(&node->list,
3993				       &rc->backref_cache.pending[level]);
3994			node->pending = 1;
3995		}
3996
3997		if (first_cow)
3998			mark_block_processed(rc, node);
3999
4000		if (first_cow && level > 0)
4001			rc->nodes_relocated += buf->len;
4002	}
4003
4004	if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS)
4005		ret = replace_file_extents(trans, rc, root, cow);
4006	return ret;
4007}
4008
4009/*
4010 * called before creating snapshot. it calculates metadata reservation
4011 * required for relocating tree blocks in the snapshot
4012 */
4013void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending,
4014			      u64 *bytes_to_reserve)
4015{
4016	struct btrfs_root *root = pending->root;
4017	struct reloc_control *rc = root->fs_info->reloc_ctl;
4018
4019	if (!rc || !have_reloc_root(root))
4020		return;
4021
4022	if (!rc->merge_reloc_tree)
4023		return;
4024
4025	root = root->reloc_root;
4026	BUG_ON(btrfs_root_refs(&root->root_item) == 0);
4027	/*
4028	 * relocation is in the stage of merging trees. the space
4029	 * used by merging a reloc tree is twice the size of
4030	 * relocated tree nodes in the worst case. half for cowing
4031	 * the reloc tree, half for cowing the fs tree. the space
4032	 * used by cowing the reloc tree will be freed after the
4033	 * tree is dropped. if we create snapshot, cowing the fs
4034	 * tree may use more space than it frees. so we need
4035	 * reserve extra space.
4036	 */
4037	*bytes_to_reserve += rc->nodes_relocated;
4038}
4039
4040/*
4041 * called after snapshot is created. migrate block reservation
4042 * and create reloc root for the newly created snapshot
4043 *
4044 * This is similar to btrfs_init_reloc_root(), we come out of here with two
4045 * references held on the reloc_root, one for root->reloc_root and one for
4046 * rc->reloc_roots.
4047 */
4048int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
4049			       struct btrfs_pending_snapshot *pending)
4050{
4051	struct btrfs_root *root = pending->root;
4052	struct btrfs_root *reloc_root;
4053	struct btrfs_root *new_root;
4054	struct reloc_control *rc = root->fs_info->reloc_ctl;
4055	int ret;
4056
4057	if (!rc || !have_reloc_root(root))
4058		return 0;
4059
4060	rc = root->fs_info->reloc_ctl;
4061	rc->merging_rsv_size += rc->nodes_relocated;
4062
4063	if (rc->merge_reloc_tree) {
4064		ret = btrfs_block_rsv_migrate(&pending->block_rsv,
4065					      rc->block_rsv,
4066					      rc->nodes_relocated, true);
4067		if (ret)
4068			return ret;
4069	}
4070
4071	new_root = pending->snap;
4072	reloc_root = create_reloc_root(trans, root->reloc_root,
4073				       new_root->root_key.objectid);
4074	if (IS_ERR(reloc_root))
4075		return PTR_ERR(reloc_root);
4076
4077	ret = __add_reloc_root(reloc_root);
4078	BUG_ON(ret < 0);
4079	new_root->reloc_root = btrfs_grab_root(reloc_root);
4080
4081	if (rc->create_reloc_tree)
4082		ret = clone_backref_node(trans, rc, root, reloc_root);
4083	return ret;
4084}
4085