Lines Matching refs:start
74 (zone->wp + SUPER_INFO_SECTORS > zone->start + zone->capacity);
119 *wp_ret = zones[0].start << SECTOR_SHIFT;
129 u64 zone_end = (zones[i].start + zones[i].capacity) << SECTOR_SHIFT;
145 sector = zones[1].start;
147 sector = zones[0].start;
207 zones[i].start = i * zone_sectors + pos;
210 zones[i].wp = zones[i].start + zone_sectors;
507 sector = zones[nr_zones - 1].start + zones[nr_zones - 1].len;
824 *bytenr_ret = zones[0].start << SECTOR_SHIFT;
835 if (wp == zones[0].start << SECTOR_SHIFT)
837 else if (wp == zones[1].start << SECTOR_SHIFT)
844 reset->start, reset->len,
850 reset->wp = reset->start;
859 if (wp == zones[0].start << SECTOR_SHIFT)
860 zone_end = zones[1].start + zones[1].capacity;
861 else if (wp == zones[1].start << SECTOR_SHIFT)
862 zone_end = zones[0].start + zones[0].capacity;
990 if (zone->wp != zone->start + zone->capacity) {
994 REQ_OP_ZONE_FINISH, zone->start,
1000 zone->wp = zone->start + zone->len;
1161 int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size)
1165 unsigned long begin = start >> shift;
1170 ASSERT(IS_ALIGNED(start, zinfo->zone_size));
1185 for (pos = start; pos < start + size; pos += zinfo->zone_size) {
1244 key.objectid = cache->start + cache->length;
1256 ret = btrfs_previous_extent_item(root, path, cache->start);
1272 if (!(found_key.objectid >= cache->start &&
1273 found_key.objectid + length <= cache->start + cache->length)) {
1277 *offset_ret = found_key.objectid + length - cache->start;
1292 u64 logical = cache->start;
1418 zone.start << SECTOR_SHIFT,
1444 ((zone.wp - zone.start) << SECTOR_SHIFT);
1460 cache->start);
1539 cache->alloc_offset, cache->start);
1547 cache->start);
1560 cache->meta_write_pointer = cache->alloc_offset + cache->start;
1611 set_extent_bit(&trans->dirty_pages, eb->start, eb->start + eb->len - 1,
1617 u64 start = (bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT);
1643 cache = btrfs_lookup_block_group(fs_info, start);
1764 if (fs_info->treelog_bg == block_group->start) {
1782 if (tgt->meta_write_pointer < tgt->start + tgt->alloc_offset) {
1829 if (block_group->start > eb->start ||
1830 block_group->start + block_group->length <= eb->start) {
1838 block_group = btrfs_lookup_block_group(fs_info, eb->start);
1844 if (block_group->meta_write_pointer == eb->start) {
1860 * start writing this eb. In that case, we can just bail out.
1862 if (block_group->meta_write_pointer > eb->start)
1944 wp = physical_start + ((zone.wp - zone.start) << SECTOR_SHIFT);
2042 const u64 end = block_group->start + block_group->length;
2049 block_group->start >> fs_info->sectorsize_bits) {
2058 if (eb->start < block_group->start)
2060 if (eb->start >= end)
2088 block_group->start + block_group->alloc_offset > block_group->meta_write_pointer) {
2114 btrfs_wait_ordered_roots(fs_info, U64_MAX, block_group->start,
2145 block_group->meta_write_pointer = block_group->start +
2264 block_group->start + block_group->zone_capacity)
2280 btrfs_zone_finish_endio(bg->fs_info, bg->start, bg->length);
2288 eb->start + eb->len * 2 <= bg->start + bg->zone_capacity)
2293 bg->start);
2310 if (fs_info->data_reloc_bg == bg->start)
2377 if (block_group->start + block_group->alloc_offset == logical + length) {