Lines Matching defs:end

94 		pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n",
95 state->start, state->end, state->state,
103 #define btrfs_debug_check_extent_io_range(tree, start, end) \
104 __btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
106 struct extent_io_tree *tree, u64 start, u64 end)
115 if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
118 caller, btrfs_ino(BTRFS_I(inode)), isize, start, end);
130 u64 end;
157 changeset->bytes_changed += state->end - state->start + 1;
158 ret = ulist_add(&changeset->range_changed, state->start, state->end,
379 else if (offset > entry->end)
393 * entry would have entry->start <= offset && entry->end >= offset.
429 else if (offset > entry->end)
442 while (prev && offset > prev_entry->end) {
503 if (other->end == state->start - 1 &&
518 if (other->start == state->end + 1 &&
524 state->end = other->end;
547 struct extent_state *state, u64 start, u64 end,
554 if (end < start) {
556 "insert state: end < start %llu %llu", end, start);
560 state->end = end;
564 node = tree_insert(&tree->state, NULL, end, &state->rb_node, p, parent);
570 found->start, found->end, start, end);
583 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
586 * orig: [ split, orig->end ]
600 prealloc->end = split - 1;
604 node = tree_insert(&tree->state, &orig->rb_node, prealloc->end,
639 u64 range = state->end - state->start + 1;
691 * the range [start, end] is inclusive.
695 int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
708 btrfs_debug_check_extent_io_range(tree, start, end);
709 trace_btrfs_clear_extent_bit(tree, start, end - start + 1, bits);
722 * Don't care for allocation failure here because we might end
726 * If we end up needing a new extent state we allocate it later.
741 cached->start <= start && cached->end > start) {
751 * this search will find the extents that end after
759 if (state->start > end)
761 WARN_ON(state->end < start);
762 last_end = state->end;
796 if (state->end <= end) {
809 if (state->start <= end && state->end > end) {
812 err = split_state(tree, state, prealloc, end + 1);
830 if (start <= end && state && !need_resched())
834 if (start > end)
865 * The range [start, end] is inclusive.
868 static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
874 btrfs_debug_check_extent_io_range(tree, start, end);
880 * this search will find all the extents that end after
890 if (state->start > end)
900 start = state->end + 1;
902 if (start > end)
925 u64 range = state->end - state->start + 1;
960 * [start, end] is inclusive This takes the tree lock.
964 __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
978 btrfs_debug_check_extent_io_range(tree, start, end);
979 trace_btrfs_set_extent_bit(tree, start, end - start + 1, bits);
984 * Don't care for allocation failure here because we might end
988 * If we end up needing a new extent state we allocate it later.
996 if (state->start <= start && state->end > start &&
1003 * this search will find all the extents that end after
1010 err = insert_state(tree, prealloc, start, end,
1022 last_end = state->end;
1030 if (state->start == start && state->end <= end) {
1044 if (start < end && state && state->start == start &&
1078 start = state->end + 1;
1092 if (state->end <= end) {
1100 if (start < end && state && state->start == start &&
1115 if (end < last_start)
1116 this_end = end;
1143 if (state->start <= end && state->end > end) {
1152 err = split_state(tree, state, prealloc, end + 1);
1164 if (start > end)
1180 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1184 return __set_extent_bit(tree, start, end, bits, 0, failed_start,
1194 * @end: the end offset in bytes (inclusive)
1207 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1221 btrfs_debug_check_extent_io_range(tree, start, end);
1222 trace_btrfs_convert_extent_bit(tree, start, end - start + 1, bits,
1242 if (state->start <= start && state->end > start &&
1250 * this search will find all the extents that end after
1260 err = insert_state(tree, prealloc, start, end,
1271 last_end = state->end;
1279 if (state->start == start && state->end <= end) {
1286 if (start < end && state && state->start == start &&
1320 if (state->end <= end) {
1328 if (start < end && state && state->start == start &&
1343 if (end < last_start)
1344 this_end = end;
1373 if (state->start <= end && state->end > end) {
1380 err = split_state(tree, state, prealloc, end + 1);
1392 if (start > end)
1408 int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1419 return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL, GFP_NOFS,
1423 int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end,
1426 return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL,
1430 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1434 return __clear_extent_bit(tree, start, end, bits, wake, delete,
1438 int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1447 return __clear_extent_bit(tree, start, end, bits, 0, 0, NULL, GFP_NOFS,
1452 * either insert or lock state struct between start and end use mask to tell
1455 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1462 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED,
1466 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1470 WARN_ON(start > end);
1475 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1480 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1491 void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
1494 unsigned long end_index = end >> PAGE_SHIFT;
1506 void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
1509 unsigned long end_index = end >> PAGE_SHIFT;
1534 * this search will find all the extents that end after
1543 if (state->end >= start && (state->state & bits))
1571 if (state->end == start - 1 && extent_state_in_tree(state)) {
1589 *end_ret = state->end;
1622 *end_ret = state->end;
1626 *end_ret = state->end;
1641 * @end_ret - records the end of the range (inclusive)
1646 * spans (last_range_end, end of device]. In this case it's up to the caller to
1671 * the end of the last extent.
1674 *start_ret = state->end + 1;
1686 if (in_range(start, state->start, state->end - state->start + 1)) {
1693 start = state->end + 1;
1722 *start_ret = state->end + 1;
1736 if (state->end >= start && !(state->state & bits)) {
1737 *end_ret = state->end;
1753 * more than 'max_bytes'. start and end are used to return the range,
1758 u64 *end, u64 max_bytes,
1770 * this search will find all the extents that end after
1775 *end = (u64)-1;
1787 *end = state->end;
1796 *end = state->end;
1797 cur_start = state->end + 1;
1799 total_bytes += state->end - state->start + 1;
1817 u64 start, u64 end)
1820 unsigned long end_index = end >> PAGE_SHIFT;
1854 * more than @max_bytes. @Start and @end are used to return the range,
1862 u64 *end)
1881 *end = delalloc_end;
1936 *end = delalloc_end;
2019 void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
2024 clear_extent_bit(&inode->io_tree, start, end, clear_bits, 1, 0, NULL);
2027 start >> PAGE_SHIFT, end >> PAGE_SHIFT,
2056 * this search will find all the extents that end after
2069 if (state->end >= cur_start && (state->state & bits) == bits) {
2070 total_bytes += min(search_end, state->end) + 1 -
2078 last = state->end;
2104 * this search will find all the extents that end after
2131 * this search will find all the extents that end after
2157 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
2166 cached->end > start)
2170 while (node && start <= end) {
2178 if (state->start > end)
2190 if (state->end == (u64)-1)
2193 start = state->end + 1;
2194 if (start > end)
2214 u64 end = start + PAGE_SIZE - 1;
2215 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
2401 state->end >= failrec->start + failrec->len - 1) {
2423 void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start, u64 end)
2435 if (state->start > end)
2438 ASSERT(state->end <= end);
2452 u64 start, u64 end)
2483 failrec->len = end - start + 1;
2522 ret = set_extent_bits(failure_tree, start, end,
2527 ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED);
2617 * There are a few possible bios that can end up here:
2645 u64 start, u64 end, int failed_mirror,
2664 failrec = btrfs_get_io_failure_record(inode, start, end);
2712 void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2717 btrfs_writepage_endio_finish_ordered(page, start, end, uptodate);
2741 u64 end;
2767 end = start + bvec->bv_offset + bvec->bv_len - 1;
2769 end_extent_writepage(page, error, start, end);
2781 u64 end = start + len - 1;
2784 set_extent_uptodate(tree, start, end, &cached, GFP_ATOMIC);
2785 unlock_extent_cached_atomic(tree, start, end, &cached);
2807 u64 end;
2845 end = start + bvec->bv_offset + bvec->bv_len - 1;
2852 start, end, mirror);
2855 offset, page, start, end, mirror);
2873 * created and submitted and will end up in
2882 start, end, mirror,
2905 /* Zero out the end if this page straddles i_size */
2926 end - start + 1, 0);
2929 extent_len = end + 1 - start;
2931 extent_len += end + 1 - start;
2936 extent_len = end + 1 - start;
3148 const u64 end = start + PAGE_SIZE - 1;
3169 unlock_extent(tree, start, end);
3186 while (cur <= end) {
3206 end - cur + 1, em_cached);
3209 unlock_extent(tree, cur, end);
3214 BUG_ON(end < cur);
3222 iosize = min(extent_map_end(em) - cur, end - cur + 1);
3223 cur_end = min(extent_map_end(em) - 1, end);
3347 u64 start, u64 end,
3356 btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
3465 u64 end;
3493 end = page_end;
3496 while (cur <= end) {
3505 em = btrfs_get_extent(inode, NULL, 0, cur, end - cur + 1);
3515 BUG_ON(end < cur);
3516 iosize = min(em_end - cur, end - cur + 1);
3543 "page %lu not writeback, cur %llu end %llu",
3544 page->index, cur, end);
3700 * under IO since we can end up having no IO bits set for a short period
3878 unsigned long start, end;
3889 end = btrfs_node_key_ptr_offset(nritems);
3891 memzero_extent_buffer(eb, end, eb->len - end);
3898 end = BTRFS_LEAF_DATA_OFFSET + leaf_data_end(eb);
3899 memzero_extent_buffer(eb, start, end - start);
3953 pgoff_t end; /* Inclusive */
3960 end = -1;
3968 end = wbc->range_end >> PAGE_SHIFT;
3977 tag_pages_for_writeback(mapping, index, end);
3978 while (!done && !nr_to_write_done && (index <= end) &&
3979 (nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
4122 pgoff_t end; /* Inclusive */
4143 end = -1;
4151 end = wbc->range_end >> PAGE_SHIFT;
4175 tag_pages_for_writeback(mapping, index, end);
4177 while (!done && !nr_to_write_done && (index <= end) &&
4179 &index, end, tag))) {
4281 int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
4287 unsigned long nr_pages = (end - start + PAGE_SIZE) >>
4299 .range_end = end + 1,
4306 while (start <= end) {
4387 u64 end = start + PAGE_SIZE - 1;
4391 if (start > end)
4394 lock_extent_bits(tree, start, end, &cached_state);
4396 clear_extent_bit(tree, start, end, EXTENT_LOCKED | EXTENT_DELALLOC |
4410 u64 end = start + PAGE_SIZE - 1;
4413 if (test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL)) {
4420 ret = __clear_extent_bit(tree, start, end,
4444 u64 end = start + PAGE_SIZE - 1;
4452 while (start <= end) {
4456 len = end - start + 1;
4688 int end = 0;
4737 /* have to trust i_size as the end */
4772 while (!end) {
4808 end = 1;
4811 end = 1;
4848 end = 1;
4859 end = 1;