Lines Matching defs:end

86 				   u64 start, u64 end, int *page_started,
256 * could end up racing with unlink.
272 u64 end, size_t compressed_size,
280 u64 actual_end = min(end + 1, isize);
282 u64 aligned_end = ALIGN(end, fs_info->sectorsize);
297 end + 1 < isize ||
371 u64 end;
422 u64 end)
444 return btrfs_compress_heuristic(&inode->vfs_inode, start, end);
449 u64 start, u64 end, u64 num_bytes, u64 small_write)
453 (start > 0 || end + 1 < inode->disk_i_size))
480 u64 end = async_chunk->end;
494 inode_should_defrag(BTRFS_I(inode), start, end, end - start + 1,
509 actual_end = min_t(u64, i_size, end + 1);
512 nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
518 * we don't want to send crud past the end of i_size through
520 * end of the file is before the start of our current
537 (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
550 if (inode_need_compress(BTRFS_I(inode), start, end)) {
574 * has moved, the end is the original one.
577 extent_range_clear_dirty_for_io(inode, start, end);
595 /* zero the tail end of the last page, we might be
614 ret = cow_file_range_inline(BTRFS_I(inode), start, end,
619 ret = cow_file_range_inline(BTRFS_I(inode), start, end,
641 extent_clear_unlock_delalloc(BTRFS_I(inode), start, end,
692 if (start + total_in < end) {
730 page_offset(async_chunk->locked_page)) <= end) {
736 extent_range_redirty_for_io(inode, start, end);
737 add_async_extent(async_chunk, start, end - start + 1, 0, NULL, 0,
898 const u64 end = start + async_extent->ram_size - 1;
901 btrfs_writepage_endio_finish_ordered(p, start, end, 0);
904 extent_clear_unlock_delalloc(inode, start, end, NULL, 0,
966 * the call backs end up in this code. The basic idea is to
979 u64 start, u64 end, int *page_started,
1002 num_bytes = ALIGN(end - start + 1, blocksize);
1006 inode_should_defrag(inode, start, end, num_bytes, SZ_64K);
1010 ret = cow_file_range_inline(inode, start, end, 0,
1019 extent_clear_unlock_delalloc(inode, start, end, NULL,
1026 (end - start + PAGE_SIZE) / PAGE_SIZE;
1165 if (start >= end)
1168 extent_clear_unlock_delalloc(inode, start, end, locked_page,
1201 nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >>
1239 u64 start, u64 end, int *page_started,
1248 u64 num_chunks = DIV_ROUND_UP(end - start, SZ_512K);
1254 unlock_extent(&inode->io_tree, start, end);
1276 extent_clear_unlock_delalloc(inode, start, end, locked_page,
1286 cur_end = min(end, start + SZ_512K - 1);
1288 cur_end = end;
1298 async_chunk[i].end = cur_end;
1374 const u64 start, const u64 end,
1380 const u64 range_bytes = end + 1 - start;
1417 count = count_range_bits(io_tree, &range_start, end, range_bytes,
1432 clear_extent_bit(io_tree, start, end, EXTENT_NORESERVE,
1436 return cow_file_range(inode, locked_page, start, end, page_started,
1449 const u64 start, const u64 end,
1467 extent_clear_unlock_delalloc(inode, start, end, locked_page,
1541 found_key.offset > end)
1622 num_bytes = min(end + 1, extent_end) - cur_offset;
1673 if (cur_offset > end)
1760 if (cur_offset > end)
1765 if (cur_offset <= end && cow_start == (u64)-1)
1769 cur_offset = end;
1770 ret = fallback_to_cow(inode, locked_page, cow_start, end,
1780 if (ret && cur_offset < end)
1781 extent_clear_unlock_delalloc(inode, cur_offset, end,
1792 static inline int need_force_cow(struct btrfs_inode *inode, u64 start, u64 end)
1805 test_range_bit(&inode->io_tree, start, end, EXTENT_DEFRAG, 0, NULL))
1816 u64 start, u64 end, int *page_started, unsigned long *nr_written,
1820 int force_cow = need_force_cow(inode, start, end);
1823 ret = run_delalloc_nocow(inode, locked_page, start, end,
1826 ret = run_delalloc_nocow(inode, locked_page, start, end,
1829 !inode_need_compress(inode, start, end)) {
1830 ret = cow_file_range(inode, locked_page, start, end,
1834 ret = cow_file_range_async(inode, wbc, locked_page, start, end,
1839 end - start + 1);
1852 size = orig->end - orig->start + 1;
1861 new_size = orig->end - split + 1;
1890 new_size = new->end - other->start + 1;
1892 new_size = other->end - new->start + 1;
1920 old_size = other->end - other->start + 1;
1922 old_size = new->end - new->start + 1;
2002 u64 len = state->end + 1 - state->start;
2029 BTRFS_I(inode)->new_delalloc_bytes += state->end + 1 -
2044 u64 len = state->end + 1 - state->start;
2261 const u64 end = start + len - 1;
2263 while (search_start < end) {
2264 const u64 search_len = end - search_start + 1;
2295 int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
2299 WARN_ON(PAGE_ALIGNED(end));
2312 end + 1 - start,
2318 return set_extent_delalloc(&inode->io_tree, start, end, extra_bits,
2469 int btrfs_writepage_cow_fixup(struct page *page, u64 start, u64 end)
2636 u64 start, end;
2648 end = start + ordered_extent->num_bytes - 1;
2662 btrfs_free_io_failure_record(BTRFS_I(inode), start, end);
2693 lock_extent_bits(io_tree, start, end, &cached_state);
2752 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, clear_bits,
2776 clear_extent_uptodate(io_tree, unwritten_start, end, NULL);
2779 btrfs_drop_extent_cache(BTRFS_I(inode), unwritten_start, end, 0);
2832 u64 end, int uptodate)
2839 trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
2843 end - start + 1, uptodate))
2896 struct page *page, u64 start, u64 end, int mirror)
2912 test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
2913 clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM);
2919 (size_t)(end - start + 1));
3269 /* we hit the end of the leaf before we found an xattr or
3691 * If we have a pending delayed iput we could end up with the final iput
3693 * up we can end up burning a lot of time in btrfs-cleaner without any
5022 * (which would be dropped in the end io callback of each bio).
5023 * Therefore here we effectively end up waiting for those bios and
5034 u64 end;
5040 end = state->end;
5044 lock_extent_bits(io_tree, start, end, &cached_state);
5052 * Note, end is the bytenr of last byte, so we need + 1 here.
5056 end - start + 1);
5058 clear_extent_bit(io_tree, start, end,
6555 * decompression code contains a memset to fill in any space between the end
6556 * of the uncompressed data and the end of max_size in case the decompressed
6558 * the end of an inline extent and the beginning of the next block, so we
6809 u64 end;
6830 end = start + len;
6831 if (end < start)
6832 end = (u64)-1;
6834 end -= 1;
6840 end, len, EXTENT_DELALLOC, 1);
6849 if (delalloc_start > end || delalloc_end <= start) {
6881 if (hole_end <= start || hole_em->start > end) {
7507 * We need to unlock only the end area that we aren't using.
7934 * the end IO handler won't happen before we increase the
8118 u64 end = start + PAGE_SIZE - 1;
8123 btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
8219 u64 end;
8255 end = min(page_end,
8262 clear_extent_bit(tree, start, end,
8285 end - start + 1, 1))
8291 lock_extent_bits(tree, start, end,
8295 start = end + 1;
8360 u64 end;
8367 end = page_end;
8374 * end up waiting indefinitely to get a lock on the page currently
8424 end = page_start + reserved_space - 1;
8438 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end,
8442 ret2 = btrfs_set_extent_delalloc(BTRFS_I(inode), page_start, end, 0,
8519 * doesn't end up using space reserved for updating the inode. We also
9230 * now so we don't add too much work to the end of the transaction
9793 u64 end = start + num_bytes - 1;
9801 * If we are severely fragmented we could end up with really
9901 if (clear_offset < end)
9903 end - clear_offset + 1);
10009 void btrfs_set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
10013 unsigned long end_index = end >> PAGE_SHIFT;