/kernel/linux/linux-5.10/fs/btrfs/ |
H A D | extent_map.c | 109 } else if (em->start >= extent_map_end(entry)) { in tree_insert() 118 while (parent && em->start >= extent_map_end(entry)) { in tree_insert() 123 if (end > entry->start && em->start < extent_map_end(entry)) in tree_insert() 133 if (end > entry->start && em->start < extent_map_end(entry)) in tree_insert() 162 else if (offset >= extent_map_end(entry)) in __tree_search() 170 while (prev && offset >= extent_map_end(prev_entry)) { in __tree_search() 221 if (extent_map_end(prev) == next->start && in mergable_maps() 439 if (strict && !(end > em->start && start < extent_map_end(em))) in __lookup_extent_mapping() 551 BUG_ON(map_start < em->start || map_start >= extent_map_end(em)); in merge_extent_mapping() 561 start = prev ? extent_map_end(pre in merge_extent_mapping() [all...] |
H A D | extent_map.h | 63 static inline u64 extent_map_end(struct extent_map *em) in extent_map_end() function
|
H A D | extent_io.c | 3118 start < extent_map_end(em)) { in __get_extent_map() 3213 BUG_ON(extent_map_end(em) <= cur); in btrfs_do_readpage() 3222 iosize = min(extent_map_end(em) - cur, end - cur + 1); in btrfs_do_readpage() 3223 cur_end = min(extent_map_end(em) - 1, end); in btrfs_do_readpage() 3513 em_end = extent_map_end(em); in __extent_writepage_io() 4470 extent_map_end(em) - 1, in try_release_extent_mapping() 4508 start = extent_map_end(em); in try_release_extent_mapping() 4548 offset = extent_map_end(em); in get_extent_skip_holes() 4776 if (em->start >= max || extent_map_end(em) < off) in extent_fiemap() 4795 em_end = extent_map_end(e in extent_fiemap() [all...] |
H A D | ioctl.c | 1078 end = extent_map_end(em); in check_defrag_in_cache() 1259 *defrag_end = extent_map_end(em); in should_defrag_range() 1262 *skip = extent_map_end(em); in should_defrag_range() 1420 search_start = extent_map_end(em); in cluster_pages_for_defrag()
|
H A D | compression.c | 566 (last_offset + PAGE_SIZE > extent_map_end(em)) || in add_ra_bio_pages()
|
H A D | file.c | 3416 last_byte = min(extent_map_end(em), alloc_end); in btrfs_fallocate() 3417 actual_end = min_t(u64, extent_map_end(em), offset + len); in btrfs_fallocate()
|
H A D | inode.c | 2287 search_start = extent_map_end(em); in btrfs_find_new_delalloc_bytes() 4793 last_byte = min(extent_map_end(em), block_end); in btrfs_cont_expand() 6769 extent_map_end(em) - 1, NULL, GFP_NOFS); in btrfs_get_extent() 6780 if (em->start > start || extent_map_end(em) <= start) { in btrfs_get_extent() 6865 const u64 hole_end = extent_map_end(hole_em); in btrfs_get_extent_fiemap()
|
H A D | volumes.c | 7142 next_start = extent_map_end(em); in btrfs_check_rw_degradable()
|
/kernel/linux/linux-6.6/fs/btrfs/ |
H A D | extent_map.c | 99 } else if (em->start >= extent_map_end(entry)) { in tree_insert() 108 while (parent && em->start >= extent_map_end(entry)) { in tree_insert() 113 if (end > entry->start && em->start < extent_map_end(entry)) in tree_insert() 123 if (end > entry->start && em->start < extent_map_end(entry)) in tree_insert() 153 else if (offset >= extent_map_end(entry)) in __tree_search() 160 while (prev && offset >= extent_map_end(prev_entry)) { in __tree_search() 217 if (extent_map_end(prev) == next->start && in mergable_maps() 442 if (strict && !(end > em->start && start < extent_map_end(em))) in __lookup_extent_mapping() 561 BUG_ON(map_start < em->start || map_start >= extent_map_end(em)); in merge_extent_mapping() 571 start = prev ? extent_map_end(pre in merge_extent_mapping() [all...] |
H A D | extent_map.h | 73 static inline u64 extent_map_end(struct extent_map *em) in extent_map_end() function
|
H A D | extent_io.c | 948 start < extent_map_end(em)) { in __get_extent_map() 1028 BUG_ON(extent_map_end(em) <= cur); in btrfs_do_readpage() 1034 iosize = min(extent_map_end(em) - cur, end - cur + 1); in btrfs_do_readpage() 1347 em_end = extent_map_end(em); in __extent_writepage_io() 1361 * Note that em_end from extent_map_end() and dirty_range_end from in __extent_writepage_io() 2363 extent_map_end(em) - 1, in try_release_extent_mapping() 2401 start = extent_map_end(em); in try_release_extent_mapping()
|
H A D | defrag.c | 944 range_len = min(extent_map_end(em), start + len) - cur; in defrag_collect_targets() 975 cur = extent_map_end(em); in defrag_collect_targets()
|
H A D | compression.c | 415 (cur + fs_info->sectorsize > extent_map_end(em)) || in add_ra_bio_pages()
|
H A D | file.c | 3129 last_byte = min(extent_map_end(em), alloc_end); in btrfs_fallocate() 3130 actual_end = min_t(u64, extent_map_end(em), offset + len); in btrfs_fallocate()
|
H A D | inode.c | 2640 search_start = extent_map_end(em); in btrfs_find_new_delalloc_bytes() 4904 last_byte = min(extent_map_end(em), block_end); in btrfs_cont_expand() 6918 if (em->start > start || extent_map_end(em) <= start) { in btrfs_get_extent() 10122 encoded->len = min_t(u64, extent_map_end(em), in btrfs_encoded_read()
|
H A D | volumes.c | 7209 next_start = extent_map_end(em); in btrfs_check_rw_degradable()
|
/kernel/linux/linux-5.10/fs/btrfs/tests/ |
H A D | extent-map-tests.c | 122 (em->start != 0 || extent_map_end(em) != SZ_16K || in test_case_1() 210 (em->start != 0 || extent_map_end(em) != SZ_1K || in test_case_2() 277 (start < em->start || start + len > extent_map_end(em) || in __test_case_3() 391 if (em && (start < em->start || start + len > extent_map_end(em))) { in __test_case_4()
|
/kernel/linux/linux-6.6/fs/btrfs/tests/ |
H A D | extent-map-tests.c | 125 (em->start != 0 || extent_map_end(em) != SZ_16K || in test_case_1() 213 (em->start != 0 || extent_map_end(em) != SZ_1K || in test_case_2() 280 (start < em->start || start + len > extent_map_end(em) || in __test_case_3() 394 if (em && (start < em->start || start + len > extent_map_end(em))) { in __test_case_4()
|