Lines Matching defs:cache
238 /* Check cache */
280 /* Populate cache */
466 * Enable zone cache only for a zoned device. On a non-zoned device, we
468 * use the cache.
475 "zoned: failed to allocate zone cache for %s",
798 * Space cache writing is not COWed. Disable that to avoid write errors
802 btrfs_err(info, "zoned: space cache v1 is not supported");
1214 static int calculate_alloc_pointer(struct btrfs_block_group *cache,
1217 struct btrfs_fs_info *fs_info = cache->fs_info;
1244 key.objectid = cache->start + cache->length;
1256 ret = btrfs_previous_extent_item(root, path, cache->start);
1272 if (!(found_key.objectid >= cache->start &&
1273 found_key.objectid + length <= cache->start + cache->length)) {
1277 *offset_ret = found_key.objectid + length - cache->start;
1285 int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
1287 struct btrfs_fs_info *fs_info = cache->fs_info;
1292 u64 logical = cache->start;
1293 u64 length = cache->length;
1325 cache->physical_map = kmemdup(map, map_lookup_size(map->num_stripes), GFP_NOFS);
1326 if (!cache->physical_map) {
1451 set_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &cache->runtime_flags);
1455 cache->zone_capacity = cache->length;
1456 ret = calculate_alloc_pointer(cache, &last_alloc, new);
1460 cache->start);
1463 cache->alloc_offset = last_alloc;
1464 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags);
1478 cache->alloc_offset = alloc_offsets[0];
1479 cache->zone_capacity = caps[0];
1481 set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags);
1510 if (!btrfs_zone_activate(cache)) {
1517 &cache->runtime_flags);
1519 cache->alloc_offset = alloc_offsets[0];
1520 cache->zone_capacity = min(caps[0], caps[1]);
1536 if (cache->alloc_offset > fs_info->zone_size) {
1539 cache->alloc_offset, cache->start);
1543 if (cache->alloc_offset > cache->zone_capacity) {
1546 cache->alloc_offset, cache->zone_capacity,
1547 cache->start);
1552 if (!ret && num_conventional && last_alloc > cache->alloc_offset) {
1555 logical, last_alloc, cache->alloc_offset);
1560 cache->meta_write_pointer = cache->alloc_offset + cache->start;
1561 if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags)) {
1562 btrfs_get_block_group(cache);
1564 list_add_tail(&cache->active_bg_list,
1569 kfree(cache->physical_map);
1570 cache->physical_map = NULL;
1581 void btrfs_calc_zone_unusable(struct btrfs_block_group *cache)
1585 if (!btrfs_is_zoned(cache->fs_info))
1588 WARN_ON(cache->bytes_super != 0);
1589 unusable = (cache->alloc_offset - cache->used) +
1590 (cache->length - cache->zone_capacity);
1591 free = cache->zone_capacity - cache->alloc_offset;
1594 cache->cached = BTRFS_CACHE_FINISHED;
1595 cache->free_space_ctl->free_space = free;
1596 cache->zone_unusable = unusable;
1620 struct btrfs_block_group *cache;
1643 cache = btrfs_lookup_block_group(fs_info, start);
1644 ASSERT(cache);
1645 if (!cache)
1648 ret = !!test_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &cache->runtime_flags);
1649 btrfs_put_block_group(cache);