Lines Matching defs:end
130 u64 end, struct writeback_control *wbc,
593 * Otherwise we could end up racing with unlink.
651 drop_args.end = fs_info->sectorsize;
710 u64 end;
749 u64 end)
787 !PAGE_ALIGNED(end + 1))
803 return btrfs_compress_heuristic(&inode->vfs_inode, start, end);
808 u64 start, u64 end, u64 num_bytes, u32 small_write)
812 (start > 0 || end + 1 < inode->disk_i_size))
838 u64 end = async_chunk->end;
850 inode_should_defrag(inode, start, end, end - start + 1, SZ_16K);
857 extent_range_clear_dirty_for_io(&inode->vfs_inode, start, end);
871 actual_end = min_t(u64, i_size, end + 1);
874 nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
878 * we don't want to send crud past the end of i_size through
880 * end of the file is before the start of our current
897 (start > 0 || end + 1 < inode->disk_i_size))
903 * Thus we must also check against @actual_end, not just @end.
921 if (!inode_need_compress(inode, start, end))
946 * Zero the tail end of the last page, as we might be sending it down
991 extent_clear_unlock_delalloc(inode, start, end,
1022 if (start + total_in < end) {
1033 add_async_extent(async_chunk, start, end - start + 1, 0, NULL, 0,
1066 u64 end = async_extent->start + async_extent->ram_size - 1;
1071 .range_end = end,
1076 ret = run_delalloc_cow(inode, locked_page, start, end, &wbc, false);
1079 btrfs_cleanup_ordered_extents(inode, locked_page, start, end - start + 1);
1108 u64 end = async_extent->start + async_extent->ram_size - 1;
1121 if (!(start >= locked_page_end || end <= locked_page_start))
1124 lock_extent(io_tree, start, end, NULL);
1171 btrfs_drop_extent_map_range(inode, start, end, false);
1178 extent_clear_unlock_delalloc(inode, start, end,
1197 extent_clear_unlock_delalloc(inode, start, end,
1247 * the call backs end up in this code. The basic idea is to
1274 struct page *locked_page, u64 start, u64 end,
1299 num_bytes = ALIGN(end - start + 1, blocksize);
1303 inode_should_defrag(inode, start, end, num_bytes, SZ_64K);
1317 end + 1);
1329 extent_clear_unlock_delalloc(inode, start, end,
1490 *done_offset = end;
1503 * `- orig_start `- start `- start + cur_alloc_size `- end
1556 if (start < end) {
1558 extent_clear_unlock_delalloc(inode, start, end, locked_page,
1578 nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >>
1611 u64 end, struct writeback_control *wbc)
1618 u64 num_chunks = DIV_ROUND_UP(end - start, SZ_512K);
1629 unlock_extent(&inode->io_tree, start, end, NULL);
1636 u64 cur_end = min(end, start + SZ_512K - 1);
1646 async_chunk[i].end = cur_end;
1699 * Run the delalloc range from start to end, and write back any dirty pages
1704 u64 end, struct writeback_control *wbc,
1707 u64 done_offset = end;
1710 while (start <= end) {
1711 ret = cow_file_range(inode, locked_page, start, end, &done_offset,
1747 const u64 start, const u64 end)
1751 const u64 range_bytes = end + 1 - start;
1789 count = count_range_bits(io_tree, &range_start, end, range_bytes,
1804 clear_extent_bit(io_tree, start, end, EXTENT_NORESERVE,
1813 ret = cow_file_range(inode, locked_page, start, end, NULL, false, true);
1824 u64 end;
1920 * csum_exist_in_range() call below we will end up allocating
1935 args->num_bytes = min(args->end + 1, extent_end) - args->start;
1964 const u64 start, const u64 end)
1979 * writing sequentially and can end up here as well.
1989 nocow_args.end = end;
2052 found_key.offset > end)
2111 if (cur_offset > end)
2196 if (cur_offset > end)
2201 if (cur_offset <= end && cow_start == (u64)-1)
2205 cur_offset = end;
2206 ret = fallback_to_cow(inode, locked_page, cow_start, end);
2223 if (cur_offset < end)
2224 extent_clear_unlock_delalloc(inode, cur_offset, end,
2234 static bool should_nocow(struct btrfs_inode *inode, u64 start, u64 end)
2238 test_range_bit(&inode->io_tree, start, end, EXTENT_DEFRAG,
2251 u64 start, u64 end, struct writeback_control *wbc)
2260 ASSERT(!(end <= page_offset(locked_page) ||
2263 if (should_nocow(inode, start, end)) {
2264 ret = run_delalloc_nocow(inode, locked_page, start, end);
2269 inode_need_compress(inode, start, end) &&
2270 run_delalloc_compressed(inode, locked_page, start, end, wbc))
2274 ret = run_delalloc_cow(inode, locked_page, start, end, wbc,
2277 ret = cow_file_range(inode, locked_page, start, end, NULL,
2283 end - start + 1);
2297 size = orig->end - orig->start + 1;
2306 new_size = orig->end - split + 1;
2336 new_size = new->end - other->start + 1;
2338 new_size = other->end - new->start + 1;
2366 old_size = other->end - other->start + 1;
2368 old_size = new->end - new->start + 1;
2445 u64 len = state->end + 1 - state->start;
2472 inode->new_delalloc_bytes += state->end + 1 - state->start;
2485 u64 len = state->end + 1 - state->start;
2615 const u64 end = start + len - 1;
2617 while (search_start < end) {
2618 const u64 search_len = end - search_start + 1;
2648 int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
2652 WARN_ON(PAGE_ALIGNED(end));
2665 end + 1 - start,
2671 return set_extent_bit(&inode->io_tree, start, end,
2892 drop_args.end = file_pos + num_bytes;
3020 u64 start, end;
3030 end = start + ordered_extent->num_bytes - 1;
3080 lock_extent(io_tree, start, end, &cached_state);
3134 clear_extent_bit(&inode->io_tree, start, end,
3146 clear_extent_bit(&inode->io_tree, start, end, clear_bits,
3169 clear_extent_uptodate(io_tree, unwritten_start, end, NULL);
3187 end, false);
3289 u64 end = file_offset + bv->bv_len - 1;
3299 test_range_bit(&inode->io_tree, file_offset, end, EXTENT_NODATASUM,
3302 clear_extent_bits(&inode->io_tree, file_offset, end,
3710 /* we hit the end of the leaf before we found an xattr or
4133 * If we have a pending delayed iput we could end up with the final iput
4135 * up we can end up burning a lot of time in btrfs-cleaner without any
4840 drop_args.end = offset + len;
5117 * (which would be dropped in the end io callback of each bio).
5118 * Therefore here we effectively end up waiting for those bios and
5129 u64 end;
5135 end = state->end;
5139 lock_extent(io_tree, start, end, &cached_state);
5147 * Note, end is the bytenr of last byte, so we need + 1 here.
5151 end - start + 1, NULL);
5153 clear_extent_bit(io_tree, start, end,
6688 * decompression code contains a memset to fill in any space between the end
6689 * of the uncompressed data and the end of max_size in case the decompressed
6691 * the end of an inline extent and the beginning of the next block, so we
7108 nocow_args.end = offset + *len - 1;
7586 * iomap_dio_rw(), we can end up returning less data then what the caller
7632 * We need to unlock only the end area that we aren't using.
8126 u64 end;
8133 end = page_end;
8140 * end up waiting indefinitely to get a lock on the page currently
8196 end = page_start + reserved_space - 1;
8210 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end,
8214 ret2 = btrfs_set_extent_delalloc(BTRFS_I(inode), page_start, end, 0,
8232 btrfs_page_set_dirty(fs_info, page, page_start, end + 1 - page_start);
8233 btrfs_page_set_uptodate(fs_info, page, page_start, end + 1 - page_start);
8296 * doesn't end up using space reserved for updating the inode. We also
9011 * now so we don't add too much work to the end of the transaction
9584 u64 end = start + num_bytes - 1;
9592 * If we are severely fragmented we could end up with really
9682 if (clear_offset < end)
9684 end - clear_offset + 1);
9783 void btrfs_set_range_writeback(struct btrfs_inode *inode, u64 start, u64 end)
9787 unsigned long end_index = end >> PAGE_SHIFT;
9791 ASSERT(end + 1 - start <= U32_MAX);
9792 len = end + 1 - start;
10207 u64 start, end;
10277 * The extent must end on a sector boundary. However, we allow a write
10279 * up the extent size and set i_size to the unaligned end.
10291 end = start + num_bytes - 1;
10331 end >> PAGE_SHIFT);
10334 lock_extent(io_tree, start, end, &cached_state);
10337 !filemap_range_has_page(inode->vfs_inode.i_mapping, start, end))
10341 unlock_extent(io_tree, start, end, &cached_state);
10395 btrfs_drop_extent_map_range(inode, start, end, false);
10404 unlock_extent(io_tree, start, end, &cached_state);
10429 unlock_extent(io_tree, start, end, &cached_state);
10864 * @end: End offset (inclusive) of the file range, its value +1 should be
10873 void btrfs_assert_inode_range_clean(struct btrfs_inode *inode, u64 start, u64 end)
10881 ordered = btrfs_lookup_first_ordered_range(inode, start, end + 1 - start);
10885 start, end, btrfs_ino(inode), root->root_key.objectid,