Lines Matching refs:start

46 			   cache->start);
79 key.objectid = block_group->start;
111 key.objectid = block_group->start;
120 block_group->start);
184 static void le_bitmap_set(unsigned long *map, unsigned int start, int len)
186 u8 *p = ((u8 *)map) + BIT_BYTE(start);
187 const unsigned int size = start + len;
188 int bits_to_set = BITS_PER_BYTE - (start % BITS_PER_BYTE);
189 u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(start);
216 u64 start, end;
230 start = block_group->start;
231 end = block_group->start + block_group->length;
249 ASSERT(found_key.objectid == block_group->start);
256 ASSERT(found_key.objectid >= start);
260 first = div_u64(found_key.objectid - start,
262 last = div_u64(found_key.objectid + found_key.offset - start,
296 block_group->start, extent_count,
305 i = start;
353 u64 start, end;
367 start = block_group->start;
368 end = block_group->start + block_group->length;
386 ASSERT(found_key.objectid == block_group->start);
395 ASSERT(found_key.objectid >= start);
399 bitmap_pos = div_u64(found_key.objectid - start,
443 key.objectid = start + start_bit * block_group->fs_info->sectorsize;
460 block_group->start, extent_count,
538 struct btrfs_path *path, u64 *start, u64 *size,
544 u64 end = *start + *size;
554 ASSERT(*start >= found_start && *start < found_end);
561 first = (*start - found_start) >> fs_info->sectorsize_bits;
569 *size -= end - *start;
570 *start = end;
607 u64 start, u64 size, int remove)
611 u64 end = start + size;
621 if (start > block_group->start) {
622 u64 prev_block = start - block_group->fs_info->sectorsize;
636 if (start >= key.objectid + key.offset) {
642 key.objectid = start;
657 cur_start = start;
673 if (end < block_group->start + block_group->length) {
720 u64 start, u64 size)
725 u64 end = start + size;
729 key.objectid = start;
743 ASSERT(start >= found_start && end <= found_end);
770 if (start > found_start) {
773 key.offset = start - found_start;
806 struct btrfs_path *path, u64 start, u64 size)
826 start, size, 1);
829 start, size);
834 u64 start, u64 size)
849 block_group = btrfs_lookup_block_group(trans->fs_info, start);
857 ret = __remove_from_free_space_tree(trans, block_group, path, start,
872 u64 start, u64 size)
877 u64 end = start + size;
899 new_key.objectid = start;
904 if (start == block_group->start)
906 key.objectid = start - 1;
924 ASSERT(found_start >= block_group->start &&
925 found_end > block_group->start);
926 ASSERT(found_start < start && found_end <= start);
932 if (found_end == start) {
944 if (end == block_group->start + block_group->length)
964 ASSERT(found_start >= block_group->start &&
965 found_end > block_group->start);
966 ASSERT((found_start < start && found_end <= start) ||
999 struct btrfs_path *path, u64 start, u64 size)
1019 start, size, 0);
1021 return add_free_space_extent(trans, block_group, path, start,
1027 u64 start, u64 size)
1042 block_group = btrfs_lookup_block_group(trans->fs_info, start);
1050 ret = __add_to_free_space_tree(trans, block_group, path, start, size);
1072 u64 start, end;
1099 key.objectid = block_group->start;
1109 start = block_group->start;
1110 end = block_group->start + block_group->length;
1119 if (start < key.objectid) {
1122 path2, start,
1124 start);
1128 start = key.objectid;
1130 start += trans->fs_info->nodesize;
1132 start += key.offset;
1134 if (key.objectid != block_group->start)
1144 if (start < end) {
1146 start, end - start);
1366 block_group->start,
1407 u64 start, end;
1425 start = block_group->start;
1426 end = block_group->start + block_group->length;
1444 ASSERT(found_key.objectid == block_group->start);
1452 ASSERT(found_key.objectid >= start);
1496 end = block_group->start + block_group->length;
1548 block_group->start, extent_count,
1577 end = block_group->start + block_group->length;
1612 block_group->start, extent_count,