Lines Matching refs:start

79 	"BTRFS: buffer leak start %llu len %lu refs %d bflags %lu owner %llu\n",
80 eb->start, eb->len, atomic_read(&eb->refs), eb->bflags,
94 pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n",
95 state->start, state->end, state->state,
103 #define btrfs_debug_check_extent_io_range(tree, start, end) \
104 __btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
106 struct extent_io_tree *tree, u64 start, u64 end)
118 caller, btrfs_ino(BTRFS_I(inode)), isize, start, end);
129 u64 start;
157 changeset->bytes_changed += state->end - state->start + 1;
158 ret = ulist_add(&changeset->range_changed, state->start, state->end,
377 if (offset < entry->start)
393 * entry would have entry->start <= offset && entry->end >= offset.
427 if (offset < entry->start)
452 while (prev && offset < prev_entry->start) {
503 if (other->end == state->start - 1 &&
509 state->start = other->start;
518 if (other->start == state->end + 1 &&
547 struct extent_state *state, u64 start, u64 end,
554 if (end < start) {
556 "insert state: end < start %llu %llu", end, start);
559 state->start = start;
570 found->start, found->end, start, end);
583 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
585 * prealloc: [orig->start, split - 1]
599 prealloc->start = orig->start;
602 orig->start = split;
639 u64 range = state->end - state->start + 1;
691 * the range [start, end] is inclusive.
695 int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
708 btrfs_debug_check_extent_io_range(tree, start, end);
709 trace_btrfs_clear_extent_bit(tree, start, end - start + 1, bits);
741 cached->start <= start && cached->end > start) {
754 node = tree_search(tree, start);
759 if (state->start > end)
761 WARN_ON(state->end < start);
786 if (state->start < start) {
789 err = split_state(tree, state, prealloc, start);
809 if (state->start <= end && state->end > end) {
829 start = last_end + 1;
830 if (start <= end && state && !need_resched())
834 if (start > end)
865 * The range [start, end] is inclusive.
868 static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
874 btrfs_debug_check_extent_io_range(tree, start, end);
883 node = tree_search(tree, start);
890 if (state->start > end)
894 start = state->start;
900 start = state->end + 1;
902 if (start > end)
925 u64 range = state->end - state->start + 1;
957 * part of the range already has the desired bits set. The start of the
960 * [start, end] is inclusive This takes the tree lock.
964 __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
978 btrfs_debug_check_extent_io_range(tree, start, end);
979 trace_btrfs_set_extent_bit(tree, start, end - start + 1, bits);
996 if (state->start <= start && state->end > start &&
1006 node = tree_search_for_insert(tree, start, &p, &parent);
1010 err = insert_state(tree, prealloc, start, end,
1021 last_start = state->start;
1030 if (state->start == start && state->end <= end) {
1032 *failed_start = state->start;
1042 start = last_end + 1;
1044 if (start < end && state && state->start == start &&
1066 if (state->start < start) {
1068 *failed_start = start;
1078 start = state->end + 1;
1085 err = split_state(tree, state, prealloc, start);
1098 start = last_end + 1;
1100 if (start < end && state && state->start == start &&
1113 if (state->start > start) {
1127 err = insert_state(tree, prealloc, start, this_end,
1134 start = this_end + 1;
1143 if (state->start <= end && state->end > end) {
1145 *failed_start = start;
1164 if (start > end)
1180 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1184 return __set_extent_bit(tree, start, end, bits, 0, failed_start,
1193 * @start: the start offset in bytes
1207 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1221 btrfs_debug_check_extent_io_range(tree, start, end);
1222 trace_btrfs_convert_extent_bit(tree, start, end - start + 1, bits,
1242 if (state->start <= start && state->end > start &&
1253 node = tree_search_for_insert(tree, start, &p, &parent);
1260 err = insert_state(tree, prealloc, start, end,
1270 last_start = state->start;
1279 if (state->start == start && state->end <= end) {
1285 start = last_end + 1;
1286 if (start < end && state && state->start == start &&
1308 if (state->start < start) {
1314 err = split_state(tree, state, prealloc, start);
1327 start = last_end + 1;
1328 if (start < end && state && state->start == start &&
1341 if (state->start > start) {
1358 err = insert_state(tree, prealloc, start, this_end,
1364 start = this_end + 1;
1373 if (state->start <= end && state->end > end) {
1392 if (start > end)
1408 int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1419 return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL, GFP_NOFS,
1423 int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end,
1426 return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL,
1430 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1434 return __clear_extent_bit(tree, start, end, bits, wake, delete,
1438 int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1447 return __clear_extent_bit(tree, start, end, bits, 0, 0, NULL, GFP_NOFS,
1452 * either insert or lock state struct between start and end use mask to tell
1455 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1462 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED,
1467 start = failed_start;
1470 WARN_ON(start > end);
1475 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1480 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1483 if (failed_start > start)
1484 clear_extent_bit(tree, start, failed_start - 1,
1491 void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
1493 unsigned long index = start >> PAGE_SHIFT;
1506 void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
1508 unsigned long index = start >> PAGE_SHIFT;
1522 /* find the first state struct with 'bits' set after 'start', and
1524 * nothing was found after 'start'
1528 u64 start, unsigned bits)
1537 node = tree_search(tree, start);
1543 if (state->end >= start && (state->state & bits))
1561 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1571 if (state->end == start - 1 && extent_state_in_tree(state)) {
1584 state = find_first_extent_bit_state(tree, start, bits);
1588 *start_ret = state->start;
1600 * @start - offset to start the search from
1612 int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
1619 state = find_first_extent_bit_state(tree, start, bits);
1621 *start_ret = state->start;
1624 if (state->start > (*end_ret + 1))
1636 * This range could start before @start.
1639 * @start - the offset at/after which the found extent should start
1649 void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
1659 node = __etree_search(tree, start, &next, &prev, NULL, NULL);
1670 * We are past the last allocated chunk, set start at
1681 * At this point 'node' either contains 'start' or start is
1686 if (in_range(start, state->start, state->end - state->start + 1)) {
1691 * start
1693 start = state->end + 1;
1696 * 'start' falls within a range that doesn't
1697 * have the bits set, so take its start as
1702 * start
1704 *start_ret = state->start;
1711 * start
1717 * start
1731 * Find the longest stretch from start until an entry which has the
1736 if (state->end >= start && !(state->state & bits)) {
1739 *end_ret = state->start - 1;
1753 * more than 'max_bytes'. start and end are used to return the range,
1757 bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start,
1763 u64 cur_start = *start;
1781 if (found && (state->start != cur_start ||
1791 *start = state->start;
1799 total_bytes += state->end - state->start + 1;
1817 u64 start, u64 end)
1819 unsigned long index = start >> PAGE_SHIFT;
1861 struct page *locked_page, u64 *start,
1874 /* step one, find a bunch of delalloc bytes starting at start */
1875 delalloc_start = *start;
1879 if (!found || delalloc_end <= *start) {
1880 *start = delalloc_start;
1887 * start comes from the offset of locked_page. We have to lock
1891 if (delalloc_start < *start)
1892 delalloc_start = *start;
1900 /* step two, lock all the pages after the page that has start */
1935 *start = delalloc_start;
2019 void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
2024 clear_extent_bit(&inode->io_tree, start, end, clear_bits, 1, 0, NULL);
2027 start >> PAGE_SHIFT, end >> PAGE_SHIFT,
2037 u64 *start, u64 search_end, u64 max_bytes,
2042 u64 cur_start = *start;
2065 if (state->start > search_end)
2067 if (contig && found && state->start > last + 1)
2071 max(cur_start, state->start);
2075 *start = max(cur_start, state->start);
2095 int set_state_failrec(struct extent_io_tree *tree, u64 start,
2107 node = tree_search(tree, start);
2113 if (state->start != start) {
2123 struct io_failure_record *get_state_failrec(struct extent_io_tree *tree, u64 start)
2134 node = tree_search(tree, start);
2140 if (state->start != start) {
2157 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
2165 if (cached && extent_state_in_tree(cached) && cached->start <= start &&
2166 cached->end > start)
2169 node = tree_search(tree, start);
2170 while (node && start <= end) {
2173 if (filled && state->start > start) {
2178 if (state->start > end)
2193 start = state->end + 1;
2194 if (start > end)
2213 u64 start = page_offset(page);
2214 u64 end = start + PAGE_SIZE - 1;
2215 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
2226 set_state_failrec(failure_tree, rec->start, NULL);
2227 ret = clear_extent_bits(failure_tree, rec->start,
2228 rec->start + rec->len - 1,
2233 ret = clear_extent_bits(io_tree, rec->start,
2234 rec->start + rec->len - 1,
2253 int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
2327 ino, start,
2337 u64 start = eb->start;
2347 ret = repair_io_failure(fs_info, 0, start, PAGE_SIZE, start, p,
2348 start - page_offset(p), mirror_num);
2351 start += PAGE_SIZE;
2363 struct extent_io_tree *io_tree, u64 start,
2378 failrec = get_state_failrec(failure_tree, start);
2388 failrec->start);
2396 failrec->start,
2400 if (state && state->start <= failrec->start &&
2401 state->end >= failrec->start + failrec->len - 1) {
2405 repair_io_failure(fs_info, ino, start, failrec->len,
2423 void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start, u64 end)
2433 state = find_first_extent_bit_state(failure_tree, start, EXTENT_DIRTY);
2435 if (state->start > end)
2452 u64 start, u64 end)
2463 failrec = get_state_failrec(failure_tree, start);
2466 "Get IO Failure Record: (found) logical=%llu, start=%llu, len=%llu, validation=%d",
2467 failrec->logical, failrec->start, failrec->len,
2482 failrec->start = start;
2483 failrec->len = end - start + 1;
2489 em = lookup_extent_mapping(em_tree, start, failrec->len);
2496 if (em->start > start || em->start + em->len <= start) {
2506 logical = start - em->start;
2515 "Get IO Failure Record: (new) logical=%llu, start=%llu, len=%llu",
2516 logical, start, failrec->len);
2522 ret = set_extent_bits(failure_tree, start, end,
2525 ret = set_state_failrec(failure_tree, start, failrec);
2527 ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED);
2645 u64 start, u64 end, int failed_mirror,
2660 "repair read error: read error at %llu", start);
2664 failrec = btrfs_get_io_failure_record(inode, start, end);
2694 repair_io_bio->logical = failrec->start;
2712 void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2717 btrfs_writepage_endio_finish_ordered(page, start, end, uptodate);
2740 u64 start;
2766 start = page_offset(page);
2767 end = start + bvec->bv_offset + bvec->bv_len - 1;
2769 end_extent_writepage(page, error, start, end);
2777 endio_readpage_release_extent(struct extent_io_tree *tree, u64 start, u64 len,
2781 u64 end = start + len - 1;
2784 set_extent_uptodate(tree, start, end, &cached, GFP_ATOMIC);
2785 unlock_extent_cached_atomic(tree, start, end, &cached);
2806 u64 start;
2844 start = page_offset(page);
2845 end = start + bvec->bv_offset + bvec->bv_len - 1;
2852 start, end, mirror);
2855 offset, page, start, end, mirror);
2860 failure_tree, tree, start,
2881 start - page_offset(page),
2882 start, end, mirror,
2925 endio_readpage_release_extent(tree, start,
2926 end - start + 1, 0);
2928 extent_start = start;
2929 extent_len = end + 1 - start;
2930 } else if (extent_start + extent_len == start) {
2931 extent_len += end + 1 - start;
2935 extent_start = start;
2936 extent_len = end + 1 - start;
3111 u64 start, u64 len, struct extent_map **em_cached)
3117 if (extent_map_in_tree(em) && start >= em->start &&
3118 start < extent_map_end(em)) {
3127 em = btrfs_get_extent(BTRFS_I(inode), page, pg_offset, start, len);
3147 u64 start = page_offset(page);
3148 const u64 end = start + PAGE_SIZE - 1;
3149 u64 cur = start;
3169 unlock_extent(tree, start, end);
3212 extent_offset = cur - em->start;
3272 *prev_em_start != em->start)
3276 *prev_em_start = em->start;
3347 u64 start, u64 end,
3356 btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
3430 /* did the fill delalloc function already unlock and start
3447 * helper for __extent_writepage. This calls the writepage start hooks,
3463 u64 start = page_offset(page);
3464 u64 page_end = start + PAGE_SIZE - 1;
3466 u64 cur = start;
3478 ret = btrfs_writepage_cow_fixup(page, start, page_end);
3512 extent_offset = cur - em->start;
3579 u64 start = page_offset(page);
3580 u64 page_end = start + PAGE_SIZE - 1;
3615 ret = writepage_delalloc(BTRFS_I(inode), page, wbc, start,
3636 end_extent_writepage(page, ret, start, page_end);
3875 u64 offset = eb->start;
3878 unsigned long start, end;
3897 start = btrfs_item_nr_offset(nritems);
3899 memzero_extent_buffer(eb, start, end - start);
4050 * back to the start of the file
4237 * back to the start of the file
4281 int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
4287 unsigned long nr_pages = (end - start + PAGE_SIZE) >>
4298 .range_start = start,
4306 while (start <= end) {
4307 page = find_get_page(mapping, start >> PAGE_SHIFT);
4311 btrfs_writepage_endio_finish_ordered(page, start,
4312 start + PAGE_SIZE - 1, 1);
4316 start += PAGE_SIZE;
4386 u64 start = page_offset(page);
4387 u64 end = start + PAGE_SIZE - 1;
4390 start += ALIGN(offset, blocksize);
4391 if (start > end)
4394 lock_extent_bits(tree, start, end, &cached_state);
4396 clear_extent_bit(tree, start, end, EXTENT_LOCKED | EXTENT_DELALLOC |
4409 u64 start = page_offset(page);
4410 u64 end = start + PAGE_SIZE - 1;
4413 if (test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL)) {
4420 ret = __clear_extent_bit(tree, start, end,
4443 u64 start = page_offset(page);
4444 u64 end = start + PAGE_SIZE - 1;
4452 while (start <= end) {
4456 len = end - start + 1;
4458 em = lookup_extent_mapping(map, start, len);
4464 em->start != start) {
4469 if (test_range_bit(tree, em->start,
4508 start = extent_map_end(em);
4669 u64 start, u64 len)
4673 u64 max = start + len;
4709 * We can't initialize that to 'start' as this could miss extents due
4713 start = round_down(start, btrfs_inode_sectorsize(inode));
4714 len = round_up(max, btrfs_inode_sectorsize(inode)) - start;
4742 * remember the start of the last extent. There are a
4753 * extents. so, we trust isize unless the start of the last extent is
4761 lock_extent_bits(&inode->io_tree, start, start + len - 1,
4764 em = get_extent_skip_holes(inode, start, last_for_get_extent);
4776 if (em->start >= max || extent_map_end(em) < off)
4785 em_start = max(em->start, off);
4788 * record the offset from the start of the extent
4794 offset_in_extent = em_start - em->start;
4821 (em->start - em->orig_start);
4874 unlock_extent_cached(&inode->io_tree, start, start + len - 1,
4953 __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
4959 eb->start = start;
5001 new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
5025 u64 start, unsigned long len)
5031 eb = __alloc_extent_buffer(fs_info, start, len);
5054 u64 start)
5056 return __alloc_dummy_extent_buffer(fs_info, start, fs_info->nodesize);
5112 u64 start)
5118 start >> PAGE_SHIFT);
5150 u64 start)
5155 eb = find_extent_buffer(fs_info, start);
5158 eb = alloc_dummy_extent_buffer(fs_info, start);
5170 start >> PAGE_SHIFT, eb);
5174 exists = find_extent_buffer(fs_info, start);
5191 u64 start)
5196 unsigned long index = start >> PAGE_SHIFT;
5204 if (!IS_ALIGNED(start, fs_info->sectorsize)) {
5205 btrfs_err(fs_info, "bad tree block start %llu", start);
5209 eb = find_extent_buffer(fs_info, start);
5213 eb = __alloc_extent_buffer(fs_info, start, len);
5278 start >> PAGE_SHIFT, eb);
5282 exists = find_extent_buffer(fs_info, start);
5334 eb->start >> PAGE_SHIFT);
5597 static bool report_eb_range(const struct extent_buffer *eb, unsigned long start,
5601 "access to eb bytenr %llu len %lu out of range start %lu len %lu",
5602 eb->start, eb->len, start, len);
5609 * Check if the [start, start + len) range is valid before reading/writing
5611 * NOTE: @start and @len are offset inside the eb, not logical address.
5616 unsigned long start, unsigned long len)
5620 /* start, start + len should not go beyond eb->len nor overflow */
5621 if (unlikely(check_add_overflow(start, len, &offset) || offset > eb->len))
5622 return report_eb_range(eb, start, len);
5628 unsigned long start, unsigned long len)
5635 unsigned long i = start >> PAGE_SHIFT;
5637 if (check_eb_range(eb, start, len)) {
5646 offset = offset_in_page(start);
5664 unsigned long start, unsigned long len)
5671 unsigned long i = start >> PAGE_SHIFT;
5674 WARN_ON(start > eb->len);
5675 WARN_ON(start + len > eb->start + eb->len);
5677 offset = offset_in_page(start);
5699 unsigned long start, unsigned long len)
5706 unsigned long i = start >> PAGE_SHIFT;
5709 if (check_eb_range(eb, start, len))
5712 offset = offset_in_page(start);
5754 unsigned long start, unsigned long len)
5761 unsigned long i = start >> PAGE_SHIFT;
5763 if (check_eb_range(eb, start, len))
5766 offset = offset_in_page(start);
5783 void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start,
5790 unsigned long i = start >> PAGE_SHIFT;
5792 if (check_eb_range(eb, start, len))
5795 offset = offset_in_page(start);
5865 * @start: offset of the bitmap item in the extent buffer
5875 unsigned long start, unsigned long nr,
5887 offset = start + byte_offset;
5896 * @start: offset of the bitmap item in the extent buffer
5899 int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
5907 eb_bitmap_offset(eb, start, nr, &i, &offset);
5917 * @start: offset of the bitmap item in the extent buffer
5921 void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start,
5932 eb_bitmap_offset(eb, start, pos, &i, &offset);
5959 * @start: offset of the bitmap item in the extent buffer
5964 unsigned long start, unsigned long pos,
5975 eb_bitmap_offset(eb, start, pos, &i, &offset);