Lines Matching refs:tree
7 #include "extent-io-tree.h"
46 pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n",
55 #define btrfs_debug_check_extent_io_range(tree, start, end) \
56 __btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
58 struct extent_io_tree *tree,
61 struct btrfs_inode *inode = tree->inode;
84 * the tree lock and get the inode lock when setting delalloc. These two things
97 struct extent_io_tree *tree, unsigned int owner)
99 tree->fs_info = fs_info;
100 tree->state = RB_ROOT;
101 spin_lock_init(&tree->lock);
102 tree->inode = NULL;
103 tree->owner = owner;
105 lockdep_set_class(&tree->lock, &file_extent_tree_class);
108 void extent_io_tree_release(struct extent_io_tree *tree)
110 spin_lock(&tree->lock);
117 while (!RB_EMPTY_ROOT(&tree->state)) {
121 node = rb_first(&tree->state);
123 rb_erase(&state->rb_node, &tree->state);
132 cond_resched_lock(&tree->lock);
134 spin_unlock(&tree->lock);
217 * Search @tree for an entry that contains @offset. Such entry would have
220 * @tree: the tree to search
221 * @offset: offset that should fall within an entry in @tree
223 * entry in the tree)
233 static inline struct extent_state *tree_search_for_insert(struct extent_io_tree *tree,
238 struct rb_root *root = &tree->state;
268 * Search offset in the tree or fill neighbor rbtree node pointers.
270 * @tree: the tree to search
271 * @offset: offset that should fall within an entry in @tree
279 static struct extent_state *tree_search_prev_next(struct extent_io_tree *tree,
284 struct rb_root *root = &tree->state;
317 * Inexact rb-tree search, return the next entry if @offset is not found
319 static inline struct extent_state *tree_search(struct extent_io_tree *tree, u64 offset)
321 return tree_search_for_insert(tree, offset, NULL, NULL);
324 static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
326 btrfs_panic(tree->fs_info, err,
327 "locking error: extent tree was modified by another thread while locked");
333 * tree. Extents with EXTENT_IO in their state field are not merged because
337 * This should be called with the tree lock held.
339 static void merge_state(struct extent_io_tree *tree, struct extent_state *state)
349 if (tree->inode)
350 btrfs_merge_delalloc_extent(tree->inode, state, other);
352 rb_erase(&other->rb_node, &tree->state);
359 if (tree->inode)
360 btrfs_merge_delalloc_extent(tree->inode, state, other);
362 rb_erase(&other->rb_node, &tree->state);
368 static void set_state_bits(struct extent_io_tree *tree,
375 if (tree->inode)
376 btrfs_set_delalloc_extent(tree->inode, state, bits);
384 * Insert an extent_state struct into the tree. 'bits' are set on the
390 * The tree lock is not taken internally. This is a utility function and
393 static int insert_state(struct extent_io_tree *tree,
401 set_state_bits(tree, state, bits, changeset);
403 node = &tree->state.rb_node;
415 btrfs_err(tree->fs_info,
423 rb_insert_color(&state->rb_node, &tree->state);
425 merge_state(tree, state);
430 * Insert state to @tree to the location given by @node and @parent.
432 static void insert_state_fast(struct extent_io_tree *tree,
437 set_state_bits(tree, state, bits, changeset);
439 rb_insert_color(&state->rb_node, &tree->state);
440 merge_state(tree, state);
449 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
450 * are two extent state structs in the tree:
454 * The tree locks are not taken by this function. They need to be held
457 static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
463 if (tree->inode)
464 btrfs_split_delalloc_extent(tree->inode, orig, split);
490 rb_insert_color(&prealloc->rb_node, &tree->state);
500 * struct is freed and removed from the tree
502 static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
511 if (tree->inode)
512 btrfs_clear_delalloc_extent(tree->inode, state, bits);
522 rb_erase(&state->rb_node, &tree->state);
529 merge_state(tree, state);
546 * Clear some bits on a range in the tree. This may require splitting or
547 * inserting elements in the tree, so the gfp mask is used to indicate which
551 * range from the tree regardless of state (ie for truncate).
555 * This takes the tree lock, and returns 0 on success and < 0 on error.
557 int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
572 btrfs_debug_check_extent_io_range(tree, start, end);
573 trace_btrfs_clear_extent_bit(tree, start, end - start + 1, bits);
589 * is the case if we only have in the tree extent states that
596 spin_lock(&tree->lock);
617 state = tree_search(tree, start);
651 err = split_state(tree, state, prealloc, start);
653 extent_io_tree_panic(tree, err);
659 state = clear_state_bit(tree, state, bits, wake, changeset);
673 err = split_state(tree, state, prealloc, end + 1);
675 extent_io_tree_panic(tree, err);
680 clear_state_bit(tree, prealloc, bits, wake, changeset);
686 state = clear_state_bit(tree, state, bits, wake, changeset);
697 spin_unlock(&tree->lock);
703 spin_unlock(&tree->lock);
711 static void wait_on_state(struct extent_io_tree *tree,
713 __releases(tree->lock)
714 __acquires(tree->lock)
718 spin_unlock(&tree->lock);
720 spin_lock(&tree->lock);
725 * Wait for one or more bits to clear on a range in the state tree.
727 * The tree lock is taken by this function
729 void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bits,
734 btrfs_debug_check_extent_io_range(tree, start, end);
736 spin_lock(&tree->lock);
739 * Maintain cached_state, as we may not remove it from the tree if there
753 state = tree_search(tree, start);
763 wait_on_state(tree, state);
772 if (!cond_resched_lock(&tree->lock)) {
784 spin_unlock(&tree->lock);
808 * tree->lock must be held. NULL will returned if nothing was found after
811 static struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
820 state = tree_search(tree, start);
830 * Find the first offset in the io tree with one or more @bits set.
837 bool find_first_extent_bit(struct extent_io_tree *tree, u64 start,
844 spin_lock(&tree->lock);
860 state = find_first_extent_bit_state(tree, start, bits);
869 spin_unlock(&tree->lock);
876 * @tree: io tree to check
884 * will drop the tree->lock, so use this helper if you want to find the actual
886 * then walk down the tree until we find a non-contiguous area. The area
889 int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
895 spin_lock(&tree->lock);
896 state = find_first_extent_bit_state(tree, start, bits);
907 spin_unlock(&tree->lock);
915 * True is returned if we find something, false if nothing was in the tree.
917 bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start,
926 spin_lock(&tree->lock);
932 state = tree_search(tree, cur_start);
962 spin_unlock(&tree->lock);
967 * Set some bits on a range in the tree. This may require allocations or
978 * [start, end] is inclusive This takes the tree lock.
980 static int __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
997 btrfs_debug_check_extent_io_range(tree, start, end);
998 trace_btrfs_set_extent_bit(tree, start, end - start + 1, bits);
1009 * is the case if we only have in the tree extent states that
1016 spin_lock(&tree->lock);
1027 state = tree_search_for_insert(tree, start, &p, &parent);
1034 insert_state_fast(tree, prealloc, p, parent, bits, changeset);
1057 set_state_bits(tree, state, bits, changeset);
1059 merge_state(tree, state);
1106 err = split_state(tree, state, prealloc, start);
1108 extent_io_tree_panic(tree, err);
1114 set_state_bits(tree, state, bits, changeset);
1116 merge_state(tree, state);
1151 err = insert_state(tree, prealloc, bits, changeset);
1153 extent_io_tree_panic(tree, err);
1177 err = split_state(tree, state, prealloc, end + 1);
1179 extent_io_tree_panic(tree, err);
1181 set_state_bits(tree, prealloc, bits, changeset);
1183 merge_state(tree, prealloc);
1191 spin_unlock(&tree->lock);
1197 spin_unlock(&tree->lock);
1205 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1208 return __set_extent_bit(tree, start, end, bits, NULL, NULL,
1215 * @tree: the io tree to search
1230 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1243 btrfs_debug_check_extent_io_range(tree, start, end);
1244 trace_btrfs_convert_extent_bit(tree, start, end - start + 1, bits,
1254 * after locking the tree.
1261 spin_lock(&tree->lock);
1273 state = tree_search_for_insert(tree, start, &p, &parent);
1282 insert_state_fast(tree, prealloc, p, parent, bits, NULL);
1298 set_state_bits(tree, state, bits, NULL);
1300 state = clear_state_bit(tree, state, clear_bits, 0, NULL);
1331 err = split_state(tree, state, prealloc, start);
1333 extent_io_tree_panic(tree, err);
1338 set_state_bits(tree, state, bits, NULL);
1340 state = clear_state_bit(tree, state, clear_bits, 0, NULL);
1376 err = insert_state(tree, prealloc, bits, NULL);
1378 extent_io_tree_panic(tree, err);
1397 err = split_state(tree, state, prealloc, end + 1);
1399 extent_io_tree_panic(tree, err);
1401 set_state_bits(tree, prealloc, bits, NULL);
1403 clear_state_bit(tree, prealloc, clear_bits, 0, NULL);
1411 spin_unlock(&tree->lock);
1417 spin_unlock(&tree->lock);
1428 * @tree: the tree to search
1439 void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
1445 spin_lock(&tree->lock);
1449 state = tree_search_prev_next(tree, start, &prev, &next);
1529 spin_unlock(&tree->lock);
1533 * Count the number of bytes in the tree that have a given bit(s) set for a
1536 * @tree: The io tree to search.
1557 u64 count_range_bits(struct extent_io_tree *tree,
1572 spin_lock(&tree->lock);
1591 * when there are holes between records in the tree. If there is
1607 state = tree_search(tree, cur_start);
1637 spin_unlock(&tree->lock);
1643 * Search a range in the state tree for a given mask. If 'filled' == 1, this
1644 * returns 1 only if every extent in the tree has the bits set. Otherwise, 1
1647 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1653 spin_lock(&tree->lock);
1658 state = tree_search(tree, start);
1689 spin_unlock(&tree->lock);
1694 int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1705 return __set_extent_bit(tree, start, end, bits, NULL, NULL, NULL, changeset);
1708 int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1717 return __clear_extent_bit(tree, start, end, bits, NULL, changeset);
1720 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
1726 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, &failed_start,
1730 clear_extent_bit(tree, start, failed_start - 1,
1741 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
1748 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, &failed_start,
1752 clear_extent_bit(tree, start, failed_start - 1,
1755 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED,
1757 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED,