Lines Matching refs:cache
8 #include "free-space-cache.h"
123 void btrfs_get_block_group(struct btrfs_block_group *cache)
125 refcount_inc(&cache->refs);
128 void btrfs_put_block_group(struct btrfs_block_group *cache)
130 if (refcount_dec_and_test(&cache->refs)) {
131 WARN_ON(cache->pinned > 0);
132 WARN_ON(cache->reserved > 0);
139 if (WARN_ON(!list_empty(&cache->discard_list)))
140 btrfs_discard_cancel_work(&cache->fs_info->discard_ctl,
141 cache);
151 WARN_ON(!RB_EMPTY_ROOT(&cache->full_stripe_locks_root.root));
152 kfree(cache->free_space_ctl);
153 kfree(cache);
158 * This adds the block group to the fs_info rb tree for the block group cache
165 struct btrfs_block_group *cache;
174 cache = rb_entry(parent, struct btrfs_block_group, cache_node);
175 if (block_group->start < cache->start) {
177 } else if (block_group->start > cache->start) {
204 struct btrfs_block_group *cache, *ret = NULL;
212 cache = rb_entry(n, struct btrfs_block_group, cache_node);
213 end = cache->start + cache->length - 1;
214 start = cache->start;
218 ret = cache;
222 ret = cache;
227 ret = cache;
260 struct btrfs_block_group *cache)
262 struct btrfs_fs_info *fs_info = cache->fs_info;
268 if (RB_EMPTY_NODE(&cache->cache_node)) {
269 const u64 next_bytenr = cache->start + cache->length;
272 btrfs_put_block_group(cache);
273 cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache;
275 node = rb_next(&cache->cache_node);
276 btrfs_put_block_group(cache);
278 cache = rb_entry(node, struct btrfs_block_group, cache_node);
279 btrfs_get_block_group(cache);
281 cache = NULL;
283 return cache;
368 struct btrfs_block_group *cache)
372 spin_lock(&cache->lock);
373 if (!cache->caching_ctl) {
374 spin_unlock(&cache->lock);
378 ctl = cache->caching_ctl;
380 spin_unlock(&cache->lock);
400 * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
403 void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache,
408 caching_ctl = btrfs_get_caching_control(cache);
412 wait_event(caching_ctl->wait, btrfs_block_group_done(cache) ||
413 (cache->free_space_ctl->free_space >= num_bytes));
418 int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache)
423 caching_ctl = btrfs_get_caching_control(cache);
425 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
427 wait_event(caching_ctl->wait, btrfs_block_group_done(cache));
428 if (cache->cached == BTRFS_CACHE_ERROR)
651 * can't actually cache from the free space tree as our commit root and
693 int btrfs_cache_block_group(struct btrfs_block_group *cache, int load_cache_only)
696 struct btrfs_fs_info *fs_info = cache->fs_info;
707 caching_ctl->block_group = cache;
708 caching_ctl->progress = cache->start;
712 spin_lock(&cache->lock);
715 * case where one thread starts to load the space cache info, and then
717 * allocation while the other thread is still loading the space cache
722 * from a block group who's cache gets evicted for one reason or
725 while (cache->cached == BTRFS_CACHE_FAST) {
728 ctl = cache->caching_ctl;
731 spin_unlock(&cache->lock);
737 spin_lock(&cache->lock);
740 if (cache->cached != BTRFS_CACHE_NO) {
741 spin_unlock(&cache->lock);
745 WARN_ON(cache->caching_ctl);
746 cache->caching_ctl = caching_ctl;
747 cache->cached = BTRFS_CACHE_FAST;
748 spin_unlock(&cache->lock);
752 ret = load_free_space_cache(cache);
754 spin_lock(&cache->lock);
756 cache->caching_ctl = NULL;
757 cache->cached = BTRFS_CACHE_FINISHED;
758 cache->last_byte_to_unpin = (u64)-1;
762 cache->caching_ctl = NULL;
763 cache->cached = BTRFS_CACHE_NO;
765 cache->cached = BTRFS_CACHE_STARTED;
766 cache->has_caching_ctl = 1;
769 spin_unlock(&cache->lock);
772 btrfs_should_fragment_free_space(cache)) {
775 spin_lock(&cache->space_info->lock);
776 spin_lock(&cache->lock);
777 bytes_used = cache->length - cache->used;
778 cache->space_info->bytes_used += bytes_used >> 1;
779 spin_unlock(&cache->lock);
780 spin_unlock(&cache->space_info->lock);
781 fragment_free_space(cache);
789 btrfs_free_excluded_extents(cache);
797 spin_lock(&cache->lock);
799 cache->caching_ctl = NULL;
800 cache->cached = BTRFS_CACHE_NO;
802 cache->cached = BTRFS_CACHE_STARTED;
803 cache->has_caching_ctl = 1;
805 spin_unlock(&cache->lock);
819 btrfs_get_block_group(cache);
966 * Make sure our free space cache IO is done before removing the
1218 * Mark block group @cache read-only, so later write won't happen to block
1219 * group @cache.
1230 static int inc_block_group_ro(struct btrfs_block_group *cache, int force)
1232 struct btrfs_space_info *sinfo = cache->space_info;
1237 spin_lock(&cache->lock);
1239 if (cache->swap_extents) {
1244 if (cache->ro) {
1245 cache->ro++;
1250 num_bytes = cache->length - cache->reserved - cache->pinned -
1251 cache->bytes_super - cache->used;
1275 if (btrfs_can_overcommit(cache->fs_info, sinfo, num_bytes,
1282 cache->ro++;
1283 list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
1286 spin_unlock(&cache->lock);
1288 if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) {
1289 btrfs_info(cache->fs_info,
1290 "unable to make block group %llu ro", cache->start);
1291 btrfs_dump_space_info(cache->fs_info, cache->space_info, 0, 0);
1741 static int exclude_super_stripes(struct btrfs_block_group *cache)
1743 struct btrfs_fs_info *fs_info = cache->fs_info;
1749 if (cache->start < BTRFS_SUPER_INFO_OFFSET) {
1750 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->start;
1751 cache->bytes_super += stripe_len;
1752 ret = btrfs_add_excluded_extent(fs_info, cache->start,
1760 ret = btrfs_rmap_block(fs_info, cache->start,
1767 cache->start + cache->length - logical[nr]);
1769 cache->bytes_super += len;
1783 static void link_block_group(struct btrfs_block_group *cache)
1785 struct btrfs_space_info *space_info = cache->space_info;
1786 int index = btrfs_bg_flags_to_raid_index(cache->flags);
1789 list_add_tail(&cache->list, &space_info->block_groups[index]);
1796 struct btrfs_block_group *cache;
1798 cache = kzalloc(sizeof(*cache), GFP_NOFS);
1799 if (!cache)
1802 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
1804 if (!cache->free_space_ctl) {
1805 kfree(cache);
1809 cache->start = start;
1811 cache->fs_info = fs_info;
1812 cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start);
1814 cache->discard_index = BTRFS_DISCARD_INDEX_UNUSED;
1816 refcount_set(&cache->refs, 1);
1817 spin_lock_init(&cache->lock);
1818 init_rwsem(&cache->data_rwsem);
1819 INIT_LIST_HEAD(&cache->list);
1820 INIT_LIST_HEAD(&cache->cluster_list);
1821 INIT_LIST_HEAD(&cache->bg_list);
1822 INIT_LIST_HEAD(&cache->ro_list);
1823 INIT_LIST_HEAD(&cache->discard_list);
1824 INIT_LIST_HEAD(&cache->dirty_list);
1825 INIT_LIST_HEAD(&cache->io_list);
1826 btrfs_init_free_space_ctl(cache);
1827 atomic_set(&cache->frozen, 0);
1828 mutex_init(&cache->free_space_lock);
1829 btrfs_init_full_stripe_locks_tree(&cache->full_stripe_locks_root);
1831 return cache;
1888 static void read_block_group_item(struct btrfs_block_group *cache,
1896 cache->length = key->offset;
1900 cache->used = btrfs_stack_block_group_used(&bgi);
1901 cache->flags = btrfs_stack_block_group_flags(&bgi);
1909 struct btrfs_block_group *cache;
1916 cache = btrfs_create_block_group_cache(info, key->objectid);
1917 if (!cache)
1920 read_block_group_item(cache, path, key);
1922 set_free_space_tree_thresholds(cache);
1926 * When we mount with old space cache, we need to
1930 * truncate the old free space cache inode and
1933 * the new space cache info onto disk.
1936 cache->disk_cache_state = BTRFS_DC_CLEAR;
1938 if (!mixed && ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
1939 (cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
1942 cache->start);
1952 ret = exclude_super_stripes(cache);
1955 btrfs_free_excluded_extents(cache);
1965 if (cache->length == cache->used) {
1966 cache->last_byte_to_unpin = (u64)-1;
1967 cache->cached = BTRFS_CACHE_FINISHED;
1968 btrfs_free_excluded_extents(cache);
1969 } else if (cache->used == 0) {
1970 cache->last_byte_to_unpin = (u64)-1;
1971 cache->cached = BTRFS_CACHE_FINISHED;
1972 add_new_free_space(cache, cache->start,
1973 cache->start + cache->length);
1974 btrfs_free_excluded_extents(cache);
1977 ret = btrfs_add_block_group_cache(info, cache);
1979 btrfs_remove_free_space_cache(cache);
1982 trace_btrfs_add_block_group(info, cache, 0);
1983 btrfs_update_space_info(info, cache->flags, cache->length,
1984 cache->used, cache->bytes_super, &space_info);
1986 cache->space_info = space_info;
1988 link_block_group(cache);
1990 set_avail_alloc_bits(info, cache->flags);
1991 if (btrfs_chunk_readonly(info, cache->start)) {
1992 inc_block_group_ro(cache, 1);
1993 } else if (cache->used == 0) {
1994 ASSERT(list_empty(&cache->bg_list));
1996 btrfs_discard_queue_work(&info->discard_ctl, cache);
1998 btrfs_mark_bg_unused(cache);
2002 btrfs_put_block_group(cache);
2010 struct btrfs_block_group *cache;
2053 cache = list_first_entry(&space_info->block_groups[i],
2056 btrfs_sysfs_add_block_group_type(cache);
2069 list_for_each_entry(cache,
2072 inc_block_group_ro(cache, 1);
2073 list_for_each_entry(cache,
2076 inc_block_group_ro(cache, 1);
2158 struct btrfs_block_group *cache;
2163 cache = btrfs_create_block_group_cache(fs_info, chunk_offset);
2164 if (!cache)
2167 cache->length = size;
2168 set_free_space_tree_thresholds(cache);
2169 cache->used = bytes_used;
2170 cache->flags = type;
2171 cache->last_byte_to_unpin = (u64)-1;
2172 cache->cached = BTRFS_CACHE_FINISHED;
2173 cache->needs_free_space = 1;
2174 ret = exclude_super_stripes(cache);
2177 btrfs_free_excluded_extents(cache);
2178 btrfs_put_block_group(cache);
2182 add_new_free_space(cache, chunk_offset, chunk_offset + size);
2184 btrfs_free_excluded_extents(cache);
2187 if (btrfs_should_fragment_free_space(cache)) {
2191 fragment_free_space(cache);
2199 cache->space_info = btrfs_find_space_info(fs_info, cache->flags);
2200 ASSERT(cache->space_info);
2202 ret = btrfs_add_block_group_cache(fs_info, cache);
2204 btrfs_remove_free_space_cache(cache);
2205 btrfs_put_block_group(cache);
2213 trace_btrfs_add_block_group(fs_info, cache, 1);
2214 btrfs_update_space_info(fs_info, cache->flags, size, bytes_used,
2215 cache->bytes_super, &cache->space_info);
2218 link_block_group(cache);
2220 list_add_tail(&cache->bg_list, &trans->new_bgs);
2232 * @cache: the destination block group
2237 int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
2240 struct btrfs_fs_info *fs_info = cache->fs_info;
2252 * block groups cache has started writing. If it already started,
2273 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags);
2274 if (alloc_flags != cache->flags) {
2288 ret = inc_block_group_ro(cache, 0);
2300 (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM))
2303 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags);
2307 ret = inc_block_group_ro(cache, 0);
2311 if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
2312 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags);
2324 void btrfs_dec_block_group_ro(struct btrfs_block_group *cache)
2326 struct btrfs_space_info *sinfo = cache->space_info;
2329 BUG_ON(!cache->ro);
2332 spin_lock(&cache->lock);
2333 if (!--cache->ro) {
2334 num_bytes = cache->length - cache->reserved -
2335 cache->pinned - cache->bytes_super - cache->used;
2337 list_del_init(&cache->ro_list);
2339 spin_unlock(&cache->lock);
2345 struct btrfs_block_group *cache)
2355 key.objectid = cache->start;
2357 key.offset = cache->length;
2368 btrfs_set_stack_block_group_used(&bgi, cache->used);
2371 btrfs_set_stack_block_group_flags(&bgi, cache->flags);
2430 * from here on out we know not to trust this cache when we load up next
2438 * super cache generation to 0 so we know to invalidate the
2439 * cache, but then we'd have to keep track of the block groups
2440 * that fail this way so we know we _have_ to reset this cache
2441 * before the next commit or risk reading stale cache. So to
2485 * We hit an ENOSPC when setting up the cache in this transaction, just
2486 * skip doing the setup, we've already cleared the cache so we're safe.
2497 * cache.
2515 * Our cache requires contiguous chunks so that we don't modify a bunch
2516 * of metadata or split extents when writing the cache out, which means
2545 struct btrfs_block_group *cache, *tmp;
2558 list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
2560 if (cache->disk_cache_state == BTRFS_DC_CLEAR)
2561 cache_save_setup(cache, trans, path);
2569 * Transaction commit does final block group cache writeback during a critical
2571 * order for the cache to actually match the block group, but can introduce a
2574 * So, btrfs_start_dirty_block_groups is here to kick off block group cache IO.
2583 struct btrfs_block_group *cache;
2615 * writing out the cache
2621 cache = list_first_entry(&dirty, struct btrfs_block_group,
2628 if (!list_empty(&cache->io_list)) {
2629 list_del_init(&cache->io_list);
2630 btrfs_wait_cache_io(trans, cache, path);
2631 btrfs_put_block_group(cache);
2636 * btrfs_wait_cache_io uses the cache->dirty_list to decide if
2644 list_del_init(&cache->dirty_list);
2649 cache_save_setup(cache, trans, path);
2651 if (cache->disk_cache_state == BTRFS_DC_SETUP) {
2652 cache->io_ctl.inode = NULL;
2653 ret = btrfs_write_out_cache(trans, cache, path);
2654 if (ret == 0 && cache->io_ctl.inode) {
2662 list_add_tail(&cache->io_list, io);
2665 * If we failed to write the cache, the
2672 ret = update_block_group_item(trans, path, cache);
2685 if (list_empty(&cache->dirty_list)) {
2686 list_add_tail(&cache->dirty_list,
2688 btrfs_get_block_group(cache);
2699 btrfs_put_block_group(cache);
2749 struct btrfs_block_group *cache;
2765 * space cache, which run inode.c:btrfs_finish_ordered_io(), and can
2777 cache = list_first_entry(&cur_trans->dirty_bgs,
2786 if (!list_empty(&cache->io_list)) {
2788 list_del_init(&cache->io_list);
2789 btrfs_wait_cache_io(trans, cache, path);
2790 btrfs_put_block_group(cache);
2798 list_del_init(&cache->dirty_list);
2802 cache_save_setup(cache, trans, path);
2808 if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
2809 cache->io_ctl.inode = NULL;
2810 ret = btrfs_write_out_cache(trans, cache, path);
2811 if (ret == 0 && cache->io_ctl.inode) {
2813 list_add_tail(&cache->io_list, io);
2816 * If we failed to write the cache, the
2823 ret = update_block_group_item(trans, path, cache);
2827 * cache's inode (at inode.c:btrfs_finish_ordered_io())
2840 ret = update_block_group_item(trans, path, cache);
2848 btrfs_put_block_group(cache);
2859 cache = list_first_entry(io, struct btrfs_block_group,
2861 list_del_init(&cache->io_list);
2862 btrfs_wait_cache_io(trans, cache, path);
2863 btrfs_put_block_group(cache);
2874 struct btrfs_block_group *cache = NULL;
2892 cache = btrfs_lookup_block_group(info, bytenr);
2893 if (!cache) {
2897 factor = btrfs_bg_type_to_factor(cache->flags);
2900 * If this block group has free space cache written out, we
2905 if (!alloc && !btrfs_block_group_done(cache))
2906 btrfs_cache_block_group(cache, 1);
2908 byte_in_group = bytenr - cache->start;
2909 WARN_ON(byte_in_group > cache->length);
2911 spin_lock(&cache->space_info->lock);
2912 spin_lock(&cache->lock);
2915 cache->disk_cache_state < BTRFS_DC_CLEAR)
2916 cache->disk_cache_state = BTRFS_DC_CLEAR;
2918 old_val = cache->used;
2919 num_bytes = min(total, cache->length - byte_in_group);
2922 cache->used = old_val;
2923 cache->reserved -= num_bytes;
2924 cache->space_info->bytes_reserved -= num_bytes;
2925 cache->space_info->bytes_used += num_bytes;
2926 cache->space_info->disk_used += num_bytes * factor;
2927 spin_unlock(&cache->lock);
2928 spin_unlock(&cache->space_info->lock);
2931 cache->used = old_val;
2932 cache->pinned += num_bytes;
2934 cache->space_info, num_bytes);
2935 cache->space_info->bytes_used -= num_bytes;
2936 cache->space_info->disk_used -= num_bytes * factor;
2937 spin_unlock(&cache->lock);
2938 spin_unlock(&cache->space_info->lock);
2940 __btrfs_mod_total_bytes_pinned(cache->space_info,
2948 if (list_empty(&cache->dirty_list)) {
2949 list_add_tail(&cache->dirty_list,
2952 btrfs_get_block_group(cache);
2960 * cache writeout.
2964 btrfs_mark_bg_unused(cache);
2967 btrfs_put_block_group(cache);
2979 * @cache: The cache we are manipulating
2989 int btrfs_add_reserved_bytes(struct btrfs_block_group *cache,
2992 struct btrfs_space_info *space_info = cache->space_info;
2996 spin_lock(&cache->lock);
2997 if (cache->ro) {
3000 cache->reserved += num_bytes;
3002 trace_btrfs_space_reservation(cache->fs_info, "space_info",
3004 btrfs_space_info_update_bytes_may_use(cache->fs_info,
3007 cache->delalloc_bytes += num_bytes;
3014 btrfs_try_granting_tickets(cache->fs_info, space_info);
3016 spin_unlock(&cache->lock);
3023 * @cache: The cache we are manipulating
3032 void btrfs_free_reserved_bytes(struct btrfs_block_group *cache,
3035 struct btrfs_space_info *space_info = cache->space_info;
3038 spin_lock(&cache->lock);
3039 if (cache->ro)
3041 cache->reserved -= num_bytes;
3046 cache->delalloc_bytes -= num_bytes;
3047 spin_unlock(&cache->lock);
3049 btrfs_try_granting_tickets(cache->fs_info, space_info);
3416 void btrfs_freeze_block_group(struct btrfs_block_group *cache)
3418 atomic_inc(&cache->frozen);