Lines Matching defs:cache
10 #include "free-space-cache.h"
141 void btrfs_get_block_group(struct btrfs_block_group *cache)
143 refcount_inc(&cache->refs);
146 void btrfs_put_block_group(struct btrfs_block_group *cache)
148 if (refcount_dec_and_test(&cache->refs)) {
149 WARN_ON(cache->pinned > 0);
157 if (!(cache->flags & BTRFS_BLOCK_GROUP_METADATA) ||
158 !BTRFS_FS_LOG_CLEANUP_ERROR(cache->fs_info))
159 WARN_ON(cache->reserved > 0);
166 if (WARN_ON(!list_empty(&cache->discard_list)))
167 btrfs_discard_cancel_work(&cache->fs_info->discard_ctl,
168 cache);
170 kfree(cache->free_space_ctl);
171 kfree(cache->physical_map);
172 kfree(cache);
177 * This adds the block group to the fs_info rb tree for the block group cache
184 struct btrfs_block_group *cache;
194 cache = rb_entry(parent, struct btrfs_block_group, cache_node);
195 if (block_group->start < cache->start) {
197 } else if (block_group->start > cache->start) {
222 struct btrfs_block_group *cache, *ret = NULL;
230 cache = rb_entry(n, struct btrfs_block_group, cache_node);
231 end = cache->start + cache->length - 1;
232 start = cache->start;
236 ret = cache;
240 ret = cache;
245 ret = cache;
275 struct btrfs_block_group *cache)
277 struct btrfs_fs_info *fs_info = cache->fs_info;
283 if (RB_EMPTY_NODE(&cache->cache_node)) {
284 const u64 next_bytenr = cache->start + cache->length;
287 btrfs_put_block_group(cache);
290 node = rb_next(&cache->cache_node);
291 btrfs_put_block_group(cache);
293 cache = rb_entry(node, struct btrfs_block_group, cache_node);
294 btrfs_get_block_group(cache);
296 cache = NULL;
298 return cache;
405 struct btrfs_block_group *cache)
409 spin_lock(&cache->lock);
410 if (!cache->caching_ctl) {
411 spin_unlock(&cache->lock);
415 ctl = cache->caching_ctl;
417 spin_unlock(&cache->lock);
437 * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
440 void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache,
446 caching_ctl = btrfs_get_caching_control(cache);
458 wait_event(caching_ctl->wait, btrfs_block_group_done(cache) ||
460 (cache->free_space_ctl->free_space >= num_bytes)));
465 static int btrfs_caching_ctl_wait_done(struct btrfs_block_group *cache,
468 wait_event(caching_ctl->wait, btrfs_block_group_done(cache));
469 return cache->cached == BTRFS_CACHE_ERROR ? -EIO : 0;
472 static int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache)
477 caching_ctl = btrfs_get_caching_control(cache);
479 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
480 ret = btrfs_caching_ctl_wait_done(cache, caching_ctl);
507 * Add a free space range to the in memory free space cache of a block group.
509 * locations are not added to the free space cache.
515 * added to the block group's free space cache.
867 * We failed to load the space cache, set ourselves to
878 * can't actually cache from the free space tree as our commit root and
918 int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait)
920 struct btrfs_fs_info *fs_info = cache->fs_info;
924 /* Allocator for zoned filesystems does not use the cache at all */
935 caching_ctl->block_group = cache;
940 spin_lock(&cache->lock);
941 if (cache->cached != BTRFS_CACHE_NO) {
944 caching_ctl = cache->caching_ctl;
947 spin_unlock(&cache->lock);
950 WARN_ON(cache->caching_ctl);
951 cache->caching_ctl = caching_ctl;
952 cache->cached = BTRFS_CACHE_STARTED;
953 spin_unlock(&cache->lock);
960 btrfs_get_block_group(cache);
965 ret = btrfs_caching_ctl_wait_done(cache, caching_ctl);
1113 * Make sure our free space cache IO is done before removing the
1335 * Mark block group @cache read-only, so later write won't happen to block
1336 * group @cache.
1347 static int inc_block_group_ro(struct btrfs_block_group *cache, int force)
1349 struct btrfs_space_info *sinfo = cache->space_info;
1354 spin_lock(&cache->lock);
1356 if (cache->swap_extents) {
1361 if (cache->ro) {
1362 cache->ro++;
1367 num_bytes = cache->length - cache->reserved - cache->pinned -
1368 cache->bytes_super - cache->zone_unusable - cache->used;
1392 if (btrfs_can_overcommit(cache->fs_info, sinfo, num_bytes,
1399 if (btrfs_is_zoned(cache->fs_info)) {
1401 sinfo->bytes_readonly += cache->zone_unusable;
1402 sinfo->bytes_zone_unusable -= cache->zone_unusable;
1403 cache->zone_unusable = 0;
1405 cache->ro++;
1406 list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
1409 spin_unlock(&cache->lock);
1411 if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) {
1412 btrfs_info(cache->fs_info,
1413 "unable to make block group %llu ro", cache->start);
1414 btrfs_dump_space_info(cache->fs_info, cache->space_info, 0, 0);
2146 static int exclude_super_stripes(struct btrfs_block_group *cache)
2148 struct btrfs_fs_info *fs_info = cache->fs_info;
2155 if (cache->start < BTRFS_SUPER_INFO_OFFSET) {
2156 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->start;
2157 cache->bytes_super += stripe_len;
2158 ret = set_extent_bit(&fs_info->excluded_extents, cache->start,
2159 cache->start + stripe_len - 1,
2167 ret = btrfs_rmap_block(fs_info, cache->start,
2177 cache->start);
2183 cache->start + cache->length - logical[nr]);
2185 cache->bytes_super += len;
2203 struct btrfs_block_group *cache;
2205 cache = kzalloc(sizeof(*cache), GFP_NOFS);
2206 if (!cache)
2209 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
2211 if (!cache->free_space_ctl) {
2212 kfree(cache);
2216 cache->start = start;
2218 cache->fs_info = fs_info;
2219 cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start);
2221 cache->discard_index = BTRFS_DISCARD_INDEX_UNUSED;
2223 refcount_set(&cache->refs, 1);
2224 spin_lock_init(&cache->lock);
2225 init_rwsem(&cache->data_rwsem);
2226 INIT_LIST_HEAD(&cache->list);
2227 INIT_LIST_HEAD(&cache->cluster_list);
2228 INIT_LIST_HEAD(&cache->bg_list);
2229 INIT_LIST_HEAD(&cache->ro_list);
2230 INIT_LIST_HEAD(&cache->discard_list);
2231 INIT_LIST_HEAD(&cache->dirty_list);
2232 INIT_LIST_HEAD(&cache->io_list);
2233 INIT_LIST_HEAD(&cache->active_bg_list);
2234 btrfs_init_free_space_ctl(cache, cache->free_space_ctl);
2235 atomic_set(&cache->frozen, 0);
2236 mutex_init(&cache->free_space_lock);
2238 return cache;
2300 struct btrfs_block_group *cache;
2306 cache = btrfs_create_block_group_cache(info, key->objectid);
2307 if (!cache)
2310 cache->length = key->offset;
2311 cache->used = btrfs_stack_block_group_used(bgi);
2312 cache->commit_used = cache->used;
2313 cache->flags = btrfs_stack_block_group_flags(bgi);
2314 cache->global_root_id = btrfs_stack_block_group_chunk_objectid(bgi);
2316 set_free_space_tree_thresholds(cache);
2320 * When we mount with old space cache, we need to
2324 * truncate the old free space cache inode and
2327 * the new space cache info onto disk.
2330 cache->disk_cache_state = BTRFS_DC_CLEAR;
2332 if (!mixed && ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
2333 (cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
2336 cache->start);
2341 ret = btrfs_load_block_group_zone_info(cache, false);
2344 cache->start);
2353 ret = exclude_super_stripes(cache);
2356 btrfs_free_excluded_extents(cache);
2373 btrfs_calc_zone_unusable(cache);
2375 btrfs_free_excluded_extents(cache);
2376 } else if (cache->length == cache->used) {
2377 cache->cached = BTRFS_CACHE_FINISHED;
2378 btrfs_free_excluded_extents(cache);
2379 } else if (cache->used == 0) {
2380 cache->cached = BTRFS_CACHE_FINISHED;
2381 ret = btrfs_add_new_free_space(cache, cache->start,
2382 cache->start + cache->length, NULL);
2383 btrfs_free_excluded_extents(cache);
2388 ret = btrfs_add_block_group_cache(info, cache);
2390 btrfs_remove_free_space_cache(cache);
2393 trace_btrfs_add_block_group(info, cache, 0);
2394 btrfs_add_bg_to_space_info(info, cache);
2396 set_avail_alloc_bits(info, cache->flags);
2397 if (btrfs_chunk_writeable(info, cache->start)) {
2398 if (cache->used == 0) {
2399 ASSERT(list_empty(&cache->bg_list));
2401 btrfs_discard_queue_work(&info->discard_ctl, cache);
2403 btrfs_mark_bg_unused(cache);
2406 inc_block_group_ro(cache, 1);
2411 btrfs_put_block_group(cache);
2434 /* Fill dummy cache as FULL */
2442 * We may have some valid block group cache added already, in
2471 struct btrfs_block_group *cache;
2536 cache = list_first_entry(&space_info->block_groups[i],
2539 btrfs_sysfs_add_block_group_type(cache);
2552 list_for_each_entry(cache,
2555 inc_block_group_ro(cache, 1);
2556 list_for_each_entry(cache,
2559 inc_block_group_ro(cache, 1);
2790 struct btrfs_block_group *cache;
2795 cache = btrfs_create_block_group_cache(fs_info, chunk_offset);
2796 if (!cache)
2804 set_bit(BLOCK_GROUP_FLAG_NEW, &cache->runtime_flags);
2806 cache->length = size;
2807 set_free_space_tree_thresholds(cache);
2808 cache->flags = type;
2809 cache->cached = BTRFS_CACHE_FINISHED;
2810 cache->global_root_id = calculate_global_root_id(fs_info, cache->start);
2813 set_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &cache->runtime_flags);
2815 ret = btrfs_load_block_group_zone_info(cache, true);
2817 btrfs_put_block_group(cache);
2821 ret = exclude_super_stripes(cache);
2824 btrfs_free_excluded_extents(cache);
2825 btrfs_put_block_group(cache);
2829 ret = btrfs_add_new_free_space(cache, chunk_offset, chunk_offset + size, NULL);
2830 btrfs_free_excluded_extents(cache);
2832 btrfs_put_block_group(cache);
2841 cache->space_info = btrfs_find_space_info(fs_info, cache->flags);
2842 ASSERT(cache->space_info);
2844 ret = btrfs_add_block_group_cache(fs_info, cache);
2846 btrfs_remove_free_space_cache(cache);
2847 btrfs_put_block_group(cache);
2855 trace_btrfs_add_block_group(fs_info, cache, 1);
2856 btrfs_add_bg_to_space_info(fs_info, cache);
2860 if (btrfs_should_fragment_free_space(cache)) {
2861 cache->space_info->bytes_used += size >> 1;
2862 fragment_free_space(cache);
2866 list_add_tail(&cache->bg_list, &trans->new_bgs);
2871 return cache;
2878 * @cache: the destination block group
2883 int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
2886 struct btrfs_fs_info *fs_info = cache->fs_info;
2901 ret = inc_block_group_ro(cache, 0);
2915 * block group cache has started writing. If it already started,
2937 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags);
2938 if (alloc_flags != cache->flags) {
2952 ret = inc_block_group_ro(cache, 0);
2964 (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM))
2967 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags);
2975 ret = btrfs_zoned_activate_one_bg(fs_info, cache->space_info, true);
2979 ret = inc_block_group_ro(cache, 0);
2983 if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
2984 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags);
2996 void btrfs_dec_block_group_ro(struct btrfs_block_group *cache)
2998 struct btrfs_space_info *sinfo = cache->space_info;
3001 BUG_ON(!cache->ro);
3004 spin_lock(&cache->lock);
3005 if (!--cache->ro) {
3006 if (btrfs_is_zoned(cache->fs_info)) {
3008 cache->zone_unusable =
3009 (cache->alloc_offset - cache->used) +
3010 (cache->length - cache->zone_capacity);
3011 sinfo->bytes_zone_unusable += cache->zone_unusable;
3012 sinfo->bytes_readonly -= cache->zone_unusable;
3014 num_bytes = cache->length - cache->reserved -
3015 cache->pinned - cache->bytes_super -
3016 cache->zone_unusable - cache->used;
3018 list_del_init(&cache->ro_list);
3020 spin_unlock(&cache->lock);
3026 struct btrfs_block_group *cache)
3041 * We cannot use cache->used directly outside of the spin lock, as it
3044 spin_lock(&cache->lock);
3045 old_commit_used = cache->commit_used;
3046 used = cache->used;
3048 if (cache->commit_used == used) {
3049 spin_unlock(&cache->lock);
3052 cache->commit_used = used;
3053 spin_unlock(&cache->lock);
3055 key.objectid = cache->start;
3057 key.offset = cache->length;
3070 cache->global_root_id);
3071 btrfs_set_stack_block_group_flags(&bgi, cache->flags);
3086 spin_lock(&cache->lock);
3087 cache->commit_used = old_commit_used;
3088 spin_unlock(&cache->lock);
3147 * from here on out we know not to trust this cache when we load up next
3155 * super cache generation to 0 so we know to invalidate the
3156 * cache, but then we'd have to keep track of the block groups
3157 * that fail this way so we know we _have_ to reset this cache
3158 * before the next commit or risk reading stale cache. So to
3202 * We hit an ENOSPC when setting up the cache in this transaction, just
3203 * skip doing the setup, we've already cleared the cache so we're safe.
3214 * cache.
3232 * Our cache requires contiguous chunks so that we don't modify a bunch
3233 * of metadata or split extents when writing the cache out, which means
3262 struct btrfs_block_group *cache, *tmp;
3275 list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
3277 if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3278 cache_save_setup(cache, trans, path);
3286 * Transaction commit does final block group cache writeback during a critical
3288 * order for the cache to actually match the block group, but can introduce a
3291 * So, btrfs_start_dirty_block_groups is here to kick off block group cache IO.
3300 struct btrfs_block_group *cache;
3332 * writing out the cache
3338 cache = list_first_entry(&dirty, struct btrfs_block_group,
3345 if (!list_empty(&cache->io_list)) {
3346 list_del_init(&cache->io_list);
3347 btrfs_wait_cache_io(trans, cache, path);
3348 btrfs_put_block_group(cache);
3353 * btrfs_wait_cache_io uses the cache->dirty_list to decide if
3361 list_del_init(&cache->dirty_list);
3366 cache_save_setup(cache, trans, path);
3368 if (cache->disk_cache_state == BTRFS_DC_SETUP) {
3369 cache->io_ctl.inode = NULL;
3370 ret = btrfs_write_out_cache(trans, cache, path);
3371 if (ret == 0 && cache->io_ctl.inode) {
3379 list_add_tail(&cache->io_list, io);
3382 * If we failed to write the cache, the
3389 ret = update_block_group_item(trans, path, cache);
3402 if (list_empty(&cache->dirty_list)) {
3403 list_add_tail(&cache->dirty_list,
3405 btrfs_get_block_group(cache);
3416 btrfs_put_block_group(cache);
3466 struct btrfs_block_group *cache;
3482 * space cache, which run inode.c:btrfs_finish_ordered_io(), and can
3494 cache = list_first_entry(&cur_trans->dirty_bgs,
3503 if (!list_empty(&cache->io_list)) {
3505 list_del_init(&cache->io_list);
3506 btrfs_wait_cache_io(trans, cache, path);
3507 btrfs_put_block_group(cache);
3515 list_del_init(&cache->dirty_list);
3519 cache_save_setup(cache, trans, path);
3525 if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
3526 cache->io_ctl.inode = NULL;
3527 ret = btrfs_write_out_cache(trans, cache, path);
3528 if (ret == 0 && cache->io_ctl.inode) {
3530 list_add_tail(&cache->io_list, io);
3533 * If we failed to write the cache, the
3540 ret = update_block_group_item(trans, path, cache);
3544 * cache's inode (at inode.c:btrfs_finish_ordered_io())
3557 ret = update_block_group_item(trans, path, cache);
3565 btrfs_put_block_group(cache);
3576 cache = list_first_entry(io, struct btrfs_block_group,
3578 list_del_init(&cache->io_list);
3579 btrfs_wait_cache_io(trans, cache, path);
3580 btrfs_put_block_group(cache);
3591 struct btrfs_block_group *cache = NULL;
3612 cache = btrfs_lookup_block_group(info, bytenr);
3613 if (!cache) {
3617 space_info = cache->space_info;
3618 factor = btrfs_bg_type_to_factor(cache->flags);
3621 * If this block group has free space cache written out, we
3626 if (!alloc && !btrfs_block_group_done(cache))
3627 btrfs_cache_block_group(cache, true);
3629 byte_in_group = bytenr - cache->start;
3630 WARN_ON(byte_in_group > cache->length);
3633 spin_lock(&cache->lock);
3636 cache->disk_cache_state < BTRFS_DC_CLEAR)
3637 cache->disk_cache_state = BTRFS_DC_CLEAR;
3639 old_val = cache->used;
3640 num_bytes = min(total, cache->length - byte_in_group);
3643 cache->used = old_val;
3644 cache->reserved -= num_bytes;
3648 spin_unlock(&cache->lock);
3652 cache->used = old_val;
3653 cache->pinned += num_bytes;
3659 reclaim = should_reclaim_block_group(cache, num_bytes);
3661 spin_unlock(&cache->lock);
3670 if (list_empty(&cache->dirty_list)) {
3671 list_add_tail(&cache->dirty_list,
3674 btrfs_get_block_group(cache);
3682 * cache writeout.
3686 btrfs_mark_bg_unused(cache);
3688 btrfs_mark_bg_to_reclaim(cache);
3691 btrfs_put_block_group(cache);
3704 * @cache: The cache we are manipulating
3714 int btrfs_add_reserved_bytes(struct btrfs_block_group *cache,
3718 struct btrfs_space_info *space_info = cache->space_info;
3723 spin_lock(&cache->lock);
3724 if (cache->ro) {
3729 if (btrfs_block_group_should_use_size_class(cache)) {
3731 ret = btrfs_use_block_group_size_class(cache, size_class, force_wrong_size_class);
3735 cache->reserved += num_bytes;
3737 trace_btrfs_space_reservation(cache->fs_info, "space_info",
3739 btrfs_space_info_update_bytes_may_use(cache->fs_info,
3742 cache->delalloc_bytes += num_bytes;
3749 btrfs_try_granting_tickets(cache->fs_info, space_info);
3751 spin_unlock(&cache->lock);
3759 * @cache: The cache we are manipulating
3768 void btrfs_free_reserved_bytes(struct btrfs_block_group *cache,
3771 struct btrfs_space_info *space_info = cache->space_info;
3774 spin_lock(&cache->lock);
3775 if (cache->ro)
3777 cache->reserved -= num_bytes;
3782 cache->delalloc_bytes -= num_bytes;
3783 spin_unlock(&cache->lock);
3785 btrfs_try_granting_tickets(cache->fs_info, space_info);
3889 * then adds back the entry to the block group cache).
4460 void btrfs_freeze_block_group(struct btrfs_block_group *cache)
4462 atomic_inc(&cache->frozen);