Lines Matching refs:end

46 		pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n",
47 state->start, state->end, state->state,
55 #define btrfs_debug_check_extent_io_range(tree, start, end) \
56 __btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
59 u64 start, u64 end)
68 if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
71 caller, btrfs_ino(inode), isize, start, end);
92 u64 end;
190 changeset->bytes_changed += state->end - state->start + 1;
191 ret = ulist_add(&changeset->range_changed, state->start, state->end,
218 * entry->start <= offset && entry->end >= offset.
249 else if (offset > entry->end)
260 /* Search neighbors until we find the first one past the end */
261 while (entry && offset > entry->end)
297 else if (offset > entry->end)
304 while (entry && offset > entry->end)
347 if (other && other->end == state->start - 1 &&
357 if (other && other->start == state->end + 1 &&
361 state->end = other->end;
399 const u64 end = state->end;
410 if (end < entry->start) {
412 } else if (end > entry->end) {
417 entry->start, entry->end, state->start, end);
449 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
452 * orig: [ split, orig->end ]
467 prealloc->end = split - 1;
479 if (prealloc->end < entry->start) {
481 } else if (prealloc->end > entry->end) {
553 * The range [start, end] is inclusive.
557 int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
572 btrfs_debug_check_extent_io_range(tree, start, end);
573 trace_btrfs_clear_extent_bit(tree, start, end - start + 1, bits);
587 * Don't care for allocation failure here because we might end
591 * If we end up needing a new extent state we allocate it later.
606 cached->start <= start && cached->end > start) {
616 /* This search will find the extents that end after our range starts. */
621 if (state->start > end)
623 WARN_ON(state->end < start);
624 last_end = state->end;
658 if (state->end <= end) {
669 if (state->start <= end && state->end > end) {
673 err = split_state(tree, state, prealloc, end + 1);
691 if (start <= end && state && !need_resched())
695 if (start > end)
726 * The range [start, end] is inclusive.
729 void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bits,
734 btrfs_debug_check_extent_io_range(tree, start, end);
745 state->start <= start && start < state->end)
750 * This search will find all the extents that end after our
757 if (state->start > end)
767 start = state->end + 1;
769 if (start > end)
817 * This search will find all the extents that end after our range
822 if (state->end >= start && (state->state & bits))
847 if (state->end == start - 1 && extent_state_in_tree(state)) {
865 *end_ret = state->end;
899 *end_ret = state->end;
903 *end_ret = state->end;
913 * than 'max_bytes'. start and end are used to return the range,
918 u64 *end, u64 max_bytes,
929 * This search will find all the extents that end after our range
934 *end = (u64)-1;
945 *end = state->end;
954 *end = state->end;
955 cur_start = state->end + 1;
956 total_bytes += state->end - state->start + 1;
978 * [start, end] is inclusive This takes the tree lock.
980 static int __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
997 btrfs_debug_check_extent_io_range(tree, start, end);
998 trace_btrfs_set_extent_bit(tree, start, end - start + 1, bits);
1007 * Don't care for allocation failure here because we might end
1011 * If we end up needing a new extent state we allocate it later.
1019 if (state->start <= start && state->end > start &&
1024 * This search will find all the extents that end after our range
1033 prealloc->end = end;
1041 last_end = state->end;
1049 if (state->start == start && state->end <= end) {
1064 if (start < end && state && state->start == start &&
1098 start = state->end + 1;
1113 if (state->end <= end) {
1121 if (start < end && state && state->start == start &&
1136 if (end < last_start)
1137 this_end = end;
1150 prealloc->end = this_end;
1166 if (state->start <= end && state->end > end) {
1177 err = split_state(tree, state, prealloc, end + 1);
1189 if (start > end)
1205 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1208 return __set_extent_bit(tree, start, end, bits, NULL, NULL,
1217 * @end: the end offset in bytes (inclusive)
1230 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1243 btrfs_debug_check_extent_io_range(tree, start, end);
1244 trace_btrfs_convert_extent_bit(tree, start, end - start + 1, bits,
1264 if (state->start <= start && state->end > start &&
1270 * This search will find all the extents that end after our range
1281 prealloc->end = end;
1289 last_end = state->end;
1297 if (state->start == start && state->end <= end) {
1304 if (start < end && state && state->start == start &&
1337 if (state->end <= end) {
1344 if (start < end && state && state->start == start &&
1359 if (end < last_start)
1360 this_end = end;
1375 prealloc->end = this_end;
1390 if (state->start <= end && state->end > end) {
1397 err = split_state(tree, state, prealloc, end + 1);
1409 if (start > end)
1431 * @end_ret: records the end of the range (inclusive)
1436 * spans (last_range_end, end of device]. In this case it's up to the caller to
1461 * the end of the last extent.
1463 *start_ret = prev->end + 1;
1474 if (in_range(start, state->start, state->end - state->start + 1)) {
1481 start = state->end + 1;
1508 *start_ret = prev->end + 1;
1520 if (state->end >= start && !(state->state & bits)) {
1521 *end_ret = state->end;
1539 * can end up being bigger than the initial value.
1540 * @search_end: The end offset (inclusive value) of the search range.
1582 if (cached->start <= cur_start && cur_start <= cached->end) {
1597 else if (prev->start <= cur_start && cur_start <= prev->end)
1602 * This search will find all the extents that end after our range
1614 if (state->end >= cur_start && (state->state & bits) == bits) {
1615 total_bytes += min(search_end, state->end) + 1 -
1623 last = state->end;
1647 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1655 cached->end > start)
1659 while (state && start <= end) {
1665 if (state->start > end)
1677 if (state->end == (u64)-1)
1680 start = state->end + 1;
1681 if (start > end)
1694 int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1705 return __set_extent_bit(tree, start, end, bits, NULL, NULL, NULL, changeset);
1708 int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1717 return __clear_extent_bit(tree, start, end, bits, NULL, changeset);
1720 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
1726 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, &failed_start,
1738 * Either insert or lock state struct between start and end use mask to tell
1741 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
1748 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, &failed_start,
1755 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED,
1757 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED,