Lines Matching refs:block_group
26 int btrfs_should_fragment_free_space(struct btrfs_block_group *block_group)
28 struct btrfs_fs_info *fs_info = block_group->fs_info;
31 block_group->flags & BTRFS_BLOCK_GROUP_METADATA) ||
33 block_group->flags & BTRFS_BLOCK_GROUP_DATA);
162 * A block_group shouldn't be on the discard_list anymore.
163 * Remove the block_group from the discard_list to prevent us
180 struct btrfs_block_group *block_group)
187 ASSERT(block_group->length != 0);
195 if (block_group->start < cache->start) {
197 } else if (block_group->start > cache->start) {
206 rb_link_node(&block_group->cache_node, parent, p);
207 rb_insert_color_cached(&block_group->cache_node,
486 static void fragment_free_space(struct btrfs_block_group *block_group)
488 struct btrfs_fs_info *fs_info = block_group->fs_info;
489 u64 start = block_group->start;
490 u64 len = block_group->length;
491 u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ?
496 btrfs_remove_free_space(block_group, start, chunk);
511 * @block_group: The target block group.
519 int btrfs_add_new_free_space(struct btrfs_block_group *block_group, u64 start,
522 struct btrfs_fs_info *info = block_group->fs_info;
540 ret = btrfs_add_free_space_async_trimmed(block_group,
554 ret = btrfs_add_free_space_async_trimmed(block_group, start,
568 * @block_group the block group to sample from
581 struct btrfs_block_group *block_group,
585 struct btrfs_fs_info *fs_info = block_group->fs_info;
588 u64 search_end = block_group->start + block_group->length;
603 extent_root = btrfs_extent_root(fs_info, max_t(u64, block_group->start,
610 search_offset = index * div_u64(block_group->length, max_index);
611 search_key.objectid = block_group->start + search_offset;
618 found_key->objectid >= block_group->start &&
638 * @block_group: the block group we are caching
670 struct btrfs_block_group *block_group)
672 struct btrfs_fs_info *fs_info = block_group->fs_info;
675 u64 min_size = block_group->length;
679 if (!btrfs_block_group_should_use_size_class(block_group))
685 ret = sample_block_group_extent_item(caching_ctl, block_group, i, 5, &key);
694 spin_lock(&block_group->lock);
695 block_group->size_class = size_class;
696 spin_unlock(&block_group->lock);
704 struct btrfs_block_group *block_group = caching_ctl->block_group;
705 struct btrfs_fs_info *fs_info = block_group->fs_info;
720 last = max_t(u64, block_group->start, BTRFS_SUPER_INFO_OFFSET);
729 if (btrfs_should_fragment_free_space(block_group))
796 if (key.objectid < block_group->start) {
801 if (key.objectid >= block_group->start + block_group->length)
808 ret = btrfs_add_new_free_space(block_group, last,
830 ret = btrfs_add_new_free_space(block_group, last,
831 block_group->start + block_group->length,
846 struct btrfs_block_group *block_group;
852 block_group = caching_ctl->block_group;
853 fs_info = block_group->fs_info;
858 load_block_group_size_class(caching_ctl, block_group);
860 ret = load_free_space_cache(block_group);
870 spin_lock(&block_group->lock);
871 block_group->cached = BTRFS_CACHE_STARTED;
872 spin_unlock(&block_group->lock);
889 spin_lock(&block_group->lock);
890 block_group->caching_ctl = NULL;
891 block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED;
892 spin_unlock(&block_group->lock);
895 if (btrfs_should_fragment_free_space(block_group)) {
898 spin_lock(&block_group->space_info->lock);
899 spin_lock(&block_group->lock);
900 bytes_used = block_group->length - block_group->used;
901 block_group->space_info->bytes_used += bytes_used >> 1;
902 spin_unlock(&block_group->lock);
903 spin_unlock(&block_group->space_info->lock);
904 fragment_free_space(block_group);
909 btrfs_free_excluded_extents(block_group);
915 btrfs_put_block_group(block_group);
935 caching_ctl->block_group = cache;
1027 struct btrfs_block_group *block_group)
1035 key.objectid = block_group->start;
1037 key.offset = block_group->length;
1054 struct btrfs_block_group *block_group;
1065 block_group = btrfs_lookup_block_group(fs_info, group_start);
1066 BUG_ON(!block_group);
1067 BUG_ON(!block_group->ro);
1069 trace_btrfs_remove_block_group(block_group);
1074 btrfs_free_excluded_extents(block_group);
1075 btrfs_free_ref_tree_range(fs_info, block_group->start,
1076 block_group->length);
1078 index = btrfs_bg_flags_to_raid_index(block_group->flags);
1079 factor = btrfs_bg_type_to_factor(block_group->flags);
1084 btrfs_return_cluster_to_free_space(block_group, cluster);
1093 btrfs_return_cluster_to_free_space(block_group, cluster);
1096 btrfs_clear_treelog_bg(block_group);
1097 btrfs_clear_data_reloc_bg(block_group);
1109 inode = lookup_free_space_inode(block_group, path);
1117 if (!list_empty(&block_group->io_list)) {
1118 list_del_init(&block_group->io_list);
1120 WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
1123 btrfs_wait_cache_io(trans, block_group, path);
1124 btrfs_put_block_group(block_group);
1128 if (!list_empty(&block_group->dirty_list)) {
1129 list_del_init(&block_group->dirty_list);
1131 btrfs_put_block_group(block_group);
1136 ret = btrfs_remove_free_space_inode(trans, inode, block_group);
1141 rb_erase_cached(&block_group->cache_node,
1143 RB_CLEAR_NODE(&block_group->cache_node);
1146 btrfs_put_block_group(block_group);
1150 down_write(&block_group->space_info->groups_sem);
1155 list_del_init(&block_group->list);
1156 if (list_empty(&block_group->space_info->block_groups[index])) {
1157 kobj = block_group->space_info->block_group_kobjs[index];
1158 block_group->space_info->block_group_kobjs[index] = NULL;
1159 clear_avail_alloc_bits(fs_info, block_group->flags);
1161 up_write(&block_group->space_info->groups_sem);
1162 clear_incompat_bg_bits(fs_info, block_group->flags);
1168 if (block_group->cached == BTRFS_CACHE_STARTED)
1169 btrfs_wait_block_group_cache_done(block_group);
1172 caching_ctl = btrfs_get_caching_control(block_group);
1177 if (ctl->block_group == block_group) {
1195 WARN_ON(!list_empty(&block_group->dirty_list));
1196 WARN_ON(!list_empty(&block_group->io_list));
1199 btrfs_remove_free_space_cache(block_group);
1201 spin_lock(&block_group->space_info->lock);
1202 list_del_init(&block_group->ro_list);
1205 WARN_ON(block_group->space_info->total_bytes
1206 < block_group->length);
1207 WARN_ON(block_group->space_info->bytes_readonly
1208 < block_group->length - block_group->zone_unusable);
1209 WARN_ON(block_group->space_info->bytes_zone_unusable
1210 < block_group->zone_unusable);
1211 WARN_ON(block_group->space_info->disk_total
1212 < block_group->length * factor);
1214 block_group->space_info->total_bytes -= block_group->length;
1215 block_group->space_info->bytes_readonly -=
1216 (block_group->length - block_group->zone_unusable);
1217 block_group->space_info->bytes_zone_unusable -=
1218 block_group->zone_unusable;
1219 block_group->space_info->disk_total -= block_group->length * factor;
1221 spin_unlock(&block_group->space_info->lock);
1234 ret = remove_block_group_free_space(trans, block_group);
1238 ret = remove_block_group_item(trans, path, block_group);
1242 spin_lock(&block_group->lock);
1243 set_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags);
1250 * from the rbtree, they have already incremented block_group->frozen -
1271 remove_em = (atomic_read(&block_group->frozen) == 0);
1272 spin_unlock(&block_group->lock);
1287 btrfs_put_block_group(block_group);
1471 struct btrfs_block_group *block_group;
1495 block_group = list_first_entry(&fs_info->unused_bgs,
1498 list_del_init(&block_group->bg_list);
1500 space_info = block_group->space_info;
1503 btrfs_put_block_group(block_group);
1508 btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group);
1519 !btrfs_is_free_space_trimmed(block_group)) {
1520 trace_btrfs_skip_unused_block_group(block_group);
1524 block_group);
1529 spin_lock(&block_group->lock);
1530 if (btrfs_is_block_group_used(block_group) || block_group->ro ||
1531 list_is_singular(&block_group->list)) {
1538 trace_btrfs_skip_unused_block_group(block_group);
1539 spin_unlock(&block_group->lock);
1565 if (space_info->total_bytes - block_group->length < used) {
1571 btrfs_get_block_group(block_group);
1572 list_add_tail(&block_group->bg_list, &retry_list);
1574 trace_btrfs_skip_unused_block_group(block_group);
1575 spin_unlock(&block_group->lock);
1581 spin_unlock(&block_group->lock);
1585 ret = inc_block_group_ro(block_group, 0);
1592 ret = btrfs_zone_finish(block_group);
1594 btrfs_dec_block_group_ro(block_group);
1605 block_group->start);
1607 btrfs_dec_block_group_ro(block_group);
1616 if (!clean_pinned_extents(trans, block_group)) {
1617 btrfs_dec_block_group_ro(block_group);
1622 * At this point, the block_group is read only and should fail
1624 * cause this block_group to be placed back on the discard
1625 * lists because now the block_group isn't fully discarded.
1629 if (!list_empty(&block_group->discard_list)) {
1631 btrfs_dec_block_group_ro(block_group);
1633 block_group);
1640 spin_lock(&block_group->lock);
1643 -block_group->pinned);
1644 space_info->bytes_readonly += block_group->pinned;
1645 block_group->pinned = 0;
1647 spin_unlock(&block_group->lock);
1669 btrfs_freeze_block_group(block_group);
1675 ret = btrfs_remove_chunk(trans, block_group->start);
1679 btrfs_unfreeze_block_group(block_group);
1695 list_move(&block_group->bg_list,
1698 btrfs_get_block_group(block_group);
1703 btrfs_put_block_group(block_group);
1717 btrfs_put_block_group(block_group);
2585 struct btrfs_block_group *block_group)
2594 spin_lock(&block_group->lock);
2595 btrfs_set_stack_block_group_used(&bgi, block_group->used);
2597 block_group->global_root_id);
2598 btrfs_set_stack_block_group_flags(&bgi, block_group->flags);
2599 old_commit_used = block_group->commit_used;
2600 block_group->commit_used = block_group->used;
2601 key.objectid = block_group->start;
2603 key.offset = block_group->length;
2604 spin_unlock(&block_group->lock);
2608 spin_lock(&block_group->lock);
2609 block_group->commit_used = old_commit_used;
2610 spin_unlock(&block_group->lock);
2715 struct btrfs_block_group *block_group;
2721 block_group = list_first_entry(&trans->new_bgs,
2727 index = btrfs_bg_flags_to_raid_index(block_group->flags);
2729 ret = insert_block_group_item(trans, block_group);
2733 &block_group->runtime_flags)) {
2735 ret = btrfs_chunk_alloc_add_chunk_item(trans, block_group);
2740 ret = insert_dev_extents(trans, block_group->start,
2741 block_group->length);
2744 add_block_group_free_space(trans, block_group);
2752 if (block_group->space_info->block_group_kobjs[index] == NULL)
2753 btrfs_sysfs_add_block_group_type(block_group);
2758 list_del_init(&block_group->bg_list);
2759 clear_bit(BLOCK_GROUP_FLAG_NEW, &block_group->runtime_flags);
3094 static int cache_save_setup(struct btrfs_block_group *block_group,
3098 struct btrfs_fs_info *fs_info = block_group->fs_info;
3115 if (block_group->length < (100 * SZ_1M)) {
3116 spin_lock(&block_group->lock);
3117 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
3118 spin_unlock(&block_group->lock);
3125 inode = lookup_free_space_inode(block_group, path);
3136 if (block_group->ro)
3139 ret = create_free_space_inode(trans, block_group, path);
3169 if (block_group->cache_generation == trans->transid &&
3186 spin_lock(&block_group->lock);
3187 if (block_group->cached != BTRFS_CACHE_FINISHED ||
3196 spin_unlock(&block_group->lock);
3199 spin_unlock(&block_group->lock);
3216 cache_size = div_u64(block_group->length, SZ_256M);
3249 spin_lock(&block_group->lock);
3251 block_group->cache_generation = trans->transid;
3252 block_group->disk_cache_state = dcs;
3253 spin_unlock(&block_group->lock);
3702 * Update the block_group and space info counters.
3757 * Update the block_group and space info counters.
4309 struct btrfs_block_group *block_group;
4311 block_group = btrfs_lookup_first_block_group(info, 0);
4312 while (block_group) {
4313 btrfs_wait_block_group_cache_done(block_group);
4314 spin_lock(&block_group->lock);
4316 &block_group->runtime_flags)) {
4317 struct inode *inode = block_group->inode;
4319 block_group->inode = NULL;
4320 spin_unlock(&block_group->lock);
4322 ASSERT(block_group->io_ctl.inode == NULL);
4325 spin_unlock(&block_group->lock);
4327 block_group = btrfs_next_block_group(block_group);
4338 struct btrfs_block_group *block_group;
4365 block_group = list_first_entry(&info->unused_bgs,
4368 list_del_init(&block_group->bg_list);
4369 btrfs_put_block_group(block_group);
4373 block_group = list_first_entry(&info->reclaim_bgs,
4376 list_del_init(&block_group->bg_list);
4377 btrfs_put_block_group(block_group);
4383 block_group = list_first_entry(&info->zone_active_bgs,
4386 list_del_init(&block_group->active_bg_list);
4387 btrfs_put_block_group(block_group);
4393 block_group = rb_entry(n, struct btrfs_block_group,
4395 rb_erase_cached(&block_group->cache_node,
4397 RB_CLEAR_NODE(&block_group->cache_node);
4400 down_write(&block_group->space_info->groups_sem);
4401 list_del(&block_group->list);
4402 up_write(&block_group->space_info->groups_sem);
4408 if (block_group->cached == BTRFS_CACHE_NO ||
4409 block_group->cached == BTRFS_CACHE_ERROR)
4410 btrfs_free_excluded_extents(block_group);
4412 btrfs_remove_free_space_cache(block_group);
4413 ASSERT(block_group->cached != BTRFS_CACHE_STARTED);
4414 ASSERT(list_empty(&block_group->dirty_list));
4415 ASSERT(list_empty(&block_group->io_list));
4416 ASSERT(list_empty(&block_group->bg_list));
4417 ASSERT(refcount_read(&block_group->refs) == 1);
4418 ASSERT(block_group->swap_extents == 0);
4419 btrfs_put_block_group(block_group);
4465 void btrfs_unfreeze_block_group(struct btrfs_block_group *block_group)
4467 struct btrfs_fs_info *fs_info = block_group->fs_info;
4472 spin_lock(&block_group->lock);
4473 cleanup = (atomic_dec_and_test(&block_group->frozen) &&
4474 test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags));
4475 spin_unlock(&block_group->lock);
4480 em = lookup_extent_mapping(em_tree, block_group->start,
4495 btrfs_remove_free_space_cache(block_group);
4539 * Returns: 0 if the size class was valid for this block_group, -EAGAIN in the
4564 * and hit the same empty block_group. Make the loser try again.
4576 * one in the block_group so we set size_class.