Lines Matching defs:end
169 void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
172 unsigned long end_index = end >> PAGE_SHIFT;
186 unsigned long page_ops, u64 start, u64 end)
190 ASSERT(end + 1 - start != 0 && end + 1 - start < U32_MAX);
191 len = end + 1 - start;
207 struct page *locked_page, u64 start, u64 end,
212 pgoff_t end_index = end >> PAGE_SHIFT;
227 page_ops, start, end);
236 u64 start, u64 end)
239 unsigned long end_index = end >> PAGE_SHIFT;
245 __process_pages_contig(inode->i_mapping, locked_page, start, end,
252 u64 end)
257 pgoff_t end_index = end >> PAGE_SHIFT;
276 u32 len = end + 1 - start;
311 * @end: The original end bytenr of the search range
312 * Will store the extent range end bytenr.
315 * range, and @start/@end will store the delalloc range start/end.
318 * original range, and @start/@end will be the non-delalloc range start/end.
323 u64 *end)
328 const u64 orig_end = *end;
338 /* Caller should pass a valid @end to indicate the search range end */
354 *end = min(delalloc_end, orig_end);
409 *end = delalloc_end;
414 void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
418 clear_extent_bit(&inode->io_tree, start, end, clear_bits, NULL);
421 start, end, page_ops);
506 u64 end;
522 struct btrfs_inode *inode, u64 start, u64 end,
533 * Contiguous to processed extent, just uptodate the end.
541 * Thus we need to do processed->end + 1 >= start check
544 processed->end + 1 >= start && end >= processed->end) {
545 processed->end = end;
554 unlock_extent(tree, processed->start, processed->end, &cached);
560 processed->end = end;
605 u64 end;
631 end = start + bvec->bv_len - 1;
645 * NOTE: i_size is exclusive while end is inclusive.
647 if (page->index == end_index && i_size <= end) {
652 offset_in_page(end) + 1);
659 start, end, uptodate);
837 * sector aligned. alloc_new_bio() then sets it to the end of
978 const u64 end = start + PAGE_SIZE - 1;
992 unlock_extent(tree, start, end, NULL);
1007 while (cur <= end) {
1021 end - cur + 1, em_cached);
1023 unlock_extent(tree, cur, end, NULL);
1024 end_page_read(page, false, cur, end + 1 - cur);
1029 BUG_ON(end < cur);
1034 iosize = min(extent_map_end(em) - cur, end - cur + 1);
1129 u64 end = start + PAGE_SIZE - 1;
1133 btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
1145 u64 start, u64 end,
1153 btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
1238 * Return the next dirty range in [@start, @end).
1242 struct page *page, u64 *start, u64 *end)
1258 *end = page_offset(page) + PAGE_SIZE;
1275 *end = page_offset(page) + range_end_bit * fs_info->sectorsize;
1294 u64 end = cur + PAGE_SIZE - 1;
1310 while (cur <= end) {
1311 u32 len = end - cur + 1;
1349 ASSERT(cur < end);
1364 iosize = min(min(em_end, end + 1), dirty_range_end) - cur;
1371 "page %lu not writeback, cur %llu end %llu",
1372 page->index, cur, end);
1499 * under IO since we can end up having no IO bits set for a short period
1643 unsigned long end;
1650 end = btrfs_node_key_ptr_offset(eb, nritems);
1651 memzero_extent_buffer(eb, end, eb->len - end);
1658 end = btrfs_item_nr_offset(eb, 0);
1660 end += BTRFS_LEAF_DATA_SIZE(eb->fs_info);
1662 end += btrfs_item_offset(eb, nritems - 1);
1663 memzero_extent_buffer(eb, start, end - start);
1882 pgoff_t end; /* Inclusive */
1889 end = -1;
1897 end = wbc->range_end >> PAGE_SHIFT;
1907 tag_pages_for_writeback(mapping, index, end);
1908 while (!done && !nr_to_write_done && (index <= end) &&
1909 (nr_folios = filemap_get_folios_tag(mapping, &index, end,
2010 pgoff_t end; /* Inclusive */
2031 end = -1;
2039 end = wbc->range_end >> PAGE_SHIFT;
2063 tag_pages_for_writeback(mapping, index, end);
2065 while (!done && !nr_to_write_done && (index <= end) &&
2067 end, tag, &fbatch))) {
2157 u64 start, u64 end, struct writeback_control *wbc,
2175 ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(end + 1, sectorsize));
2177 while (cur <= end) {
2178 u64 cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end);
2268 u64 end = start + folio_size(folio) - 1;
2275 if (start > end)
2278 lock_extent(tree, start, end, &cached_state);
2286 unlock_extent(tree, start, end, &cached_state);
2299 u64 end = start + PAGE_SIZE - 1;
2302 if (test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL)) {
2315 ret = __clear_extent_bit(tree, start, end, clear_bits, NULL, NULL);
2337 u64 end = start + PAGE_SIZE - 1;
2345 while (start <= end) {
2349 len = end - start + 1;
2443 /* Set at the end of extent_fiemap(). */
2451 * find an extent that starts at an offset behind the end offset of the
2462 * range [768K, 2M[, which may end up as the last item of leaf X or as
2496 * to end at the offset of the file extent item we have
2521 * cached extent's end. In this case just ignore the
2527 * cached extent but its end offset goes beyond the
2528 * end offset of the cached extent. We don't want to
2533 * going the cached extent's end to the end of the
2715 * extent. The end offset (@end) is inclusive.
2724 u64 start, u64 end)
2737 while (cur_offset < end && cur_offset < i_size) {
2744 delalloc = btrfs_find_delalloc_in_range(inode, cur_offset, end,
2803 if (disk_bytenr != 0 && last_delalloc_end < end) {
2809 prealloc_len = end + 1 - start;
2812 prealloc_len = end + 1 - prealloc_start;
3003 /* We've reached the end of the fiemap range, stop. */
4581 /* Already beyond page end */
4600 const u64 end = page_offset(page) + PAGE_SIZE;
4603 while (cur < end) {
4647 * check the page private at the end. And