Lines Matching refs:block_group
135 * A block_group shouldn't be on the discard_list anymore.
136 * Remove the block_group from the discard_list to prevent us
161 struct btrfs_block_group *block_group)
167 ASSERT(block_group->length != 0);
175 if (block_group->start < cache->start) {
177 } else if (block_group->start > cache->start) {
185 rb_link_node(&block_group->cache_node, parent, p);
186 rb_insert_color(&block_group->cache_node,
189 if (info->first_logical_byte > block_group->start)
190 info->first_logical_byte = block_group->start;
435 static void fragment_free_space(struct btrfs_block_group *block_group)
437 struct btrfs_fs_info *fs_info = block_group->fs_info;
438 u64 start = block_group->start;
439 u64 len = block_group->length;
440 u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ?
445 btrfs_remove_free_space(block_group, start, chunk);
461 u64 add_new_free_space(struct btrfs_block_group *block_group, u64 start, u64 end)
463 struct btrfs_fs_info *info = block_group->fs_info;
480 ret = btrfs_add_free_space_async_trimmed(block_group,
492 ret = btrfs_add_free_space_async_trimmed(block_group, start,
502 struct btrfs_block_group *block_group = caching_ctl->block_group;
503 struct btrfs_fs_info *fs_info = block_group->fs_info;
518 last = max_t(u64, block_group->start, BTRFS_SUPER_INFO_OFFSET);
526 if (btrfs_should_fragment_free_space(block_group))
598 if (key.objectid < block_group->start) {
603 if (key.objectid >= block_group->start + block_group->length)
608 total_found += add_new_free_space(block_group, last,
626 total_found += add_new_free_space(block_group, last,
627 block_group->start + block_group->length);
637 struct btrfs_block_group *block_group;
643 block_group = caching_ctl->block_group;
644 fs_info = block_group->fs_info;
662 spin_lock(&block_group->lock);
663 block_group->caching_ctl = NULL;
664 block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED;
665 spin_unlock(&block_group->lock);
668 if (btrfs_should_fragment_free_space(block_group)) {
671 spin_lock(&block_group->space_info->lock);
672 spin_lock(&block_group->lock);
673 bytes_used = block_group->length - block_group->used;
674 block_group->space_info->bytes_used += bytes_used >> 1;
675 spin_unlock(&block_group->lock);
676 spin_unlock(&block_group->space_info->lock);
677 fragment_free_space(block_group);
684 btrfs_free_excluded_extents(block_group);
690 btrfs_put_block_group(block_group);
707 caching_ctl->block_group = cache;
881 struct btrfs_block_group *block_group)
889 key.objectid = block_group->start;
891 key.offset = block_group->length;
908 struct btrfs_block_group *block_group;
921 block_group = btrfs_lookup_block_group(fs_info, group_start);
922 BUG_ON(!block_group);
923 BUG_ON(!block_group->ro);
925 trace_btrfs_remove_block_group(block_group);
930 btrfs_free_excluded_extents(block_group);
931 btrfs_free_ref_tree_range(fs_info, block_group->start,
932 block_group->length);
934 index = btrfs_bg_flags_to_raid_index(block_group->flags);
935 factor = btrfs_bg_type_to_factor(block_group->flags);
940 btrfs_return_cluster_to_free_space(block_group, cluster);
949 btrfs_return_cluster_to_free_space(block_group, cluster);
962 inode = lookup_free_space_inode(block_group, path);
970 if (!list_empty(&block_group->io_list)) {
971 list_del_init(&block_group->io_list);
973 WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
976 btrfs_wait_cache_io(trans, block_group, path);
977 btrfs_put_block_group(block_group);
981 if (!list_empty(&block_group->dirty_list)) {
982 list_del_init(&block_group->dirty_list);
984 btrfs_put_block_group(block_group);
997 spin_lock(&block_group->lock);
998 if (block_group->iref) {
999 block_group->iref = 0;
1000 block_group->inode = NULL;
1001 spin_unlock(&block_group->lock);
1004 spin_unlock(&block_group->lock);
1012 key.offset = block_group->start;
1027 rb_erase(&block_group->cache_node,
1029 RB_CLEAR_NODE(&block_group->cache_node);
1032 btrfs_put_block_group(block_group);
1034 if (fs_info->first_logical_byte == block_group->start)
1038 down_write(&block_group->space_info->groups_sem);
1043 list_del_init(&block_group->list);
1044 if (list_empty(&block_group->space_info->block_groups[index])) {
1045 kobj = block_group->space_info->block_group_kobjs[index];
1046 block_group->space_info->block_group_kobjs[index] = NULL;
1047 clear_avail_alloc_bits(fs_info, block_group->flags);
1049 up_write(&block_group->space_info->groups_sem);
1050 clear_incompat_bg_bits(fs_info, block_group->flags);
1056 if (block_group->has_caching_ctl)
1057 caching_ctl = btrfs_get_caching_control(block_group);
1058 if (block_group->cached == BTRFS_CACHE_STARTED)
1059 btrfs_wait_block_group_cache_done(block_group);
1060 if (block_group->has_caching_ctl) {
1067 if (ctl->block_group == block_group) {
1084 WARN_ON(!list_empty(&block_group->dirty_list));
1085 WARN_ON(!list_empty(&block_group->io_list));
1088 btrfs_remove_free_space_cache(block_group);
1090 spin_lock(&block_group->space_info->lock);
1091 list_del_init(&block_group->ro_list);
1094 WARN_ON(block_group->space_info->total_bytes
1095 < block_group->length);
1096 WARN_ON(block_group->space_info->bytes_readonly
1097 < block_group->length);
1098 WARN_ON(block_group->space_info->disk_total
1099 < block_group->length * factor);
1101 block_group->space_info->total_bytes -= block_group->length;
1102 block_group->space_info->bytes_readonly -= block_group->length;
1103 block_group->space_info->disk_total -= block_group->length * factor;
1105 spin_unlock(&block_group->space_info->lock);
1118 ret = remove_block_group_free_space(trans, block_group);
1122 ret = remove_block_group_item(trans, path, block_group);
1126 spin_lock(&block_group->lock);
1127 block_group->removed = 1;
1133 * from the rbtree, they have already incremented block_group->frozen -
1154 remove_em = (atomic_read(&block_group->frozen) == 0);
1155 spin_unlock(&block_group->lock);
1170 btrfs_put_block_group(block_group);
1347 struct btrfs_block_group *block_group;
1360 block_group = list_first_entry(&fs_info->unused_bgs,
1363 list_del_init(&block_group->bg_list);
1365 space_info = block_group->space_info;
1368 btrfs_put_block_group(block_group);
1373 btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group);
1386 !btrfs_is_free_space_trimmed(block_group)) {
1387 trace_btrfs_skip_unused_block_group(block_group);
1391 block_group);
1395 spin_lock(&block_group->lock);
1396 if (block_group->reserved || block_group->pinned ||
1397 block_group->used || block_group->ro ||
1398 list_is_singular(&block_group->list)) {
1405 trace_btrfs_skip_unused_block_group(block_group);
1406 spin_unlock(&block_group->lock);
1410 spin_unlock(&block_group->lock);
1413 ret = inc_block_group_ro(block_group, 0);
1425 block_group->start);
1427 btrfs_dec_block_group_ro(block_group);
1436 if (!clean_pinned_extents(trans, block_group)) {
1437 btrfs_dec_block_group_ro(block_group);
1442 * At this point, the block_group is read only and should fail
1444 * cause this block_group to be placed back on the discard
1445 * lists because now the block_group isn't fully discarded.
1449 if (!list_empty(&block_group->discard_list)) {
1451 btrfs_dec_block_group_ro(block_group);
1453 block_group);
1460 spin_lock(&block_group->lock);
1463 -block_group->pinned);
1464 space_info->bytes_readonly += block_group->pinned;
1465 __btrfs_mod_total_bytes_pinned(space_info, -block_group->pinned);
1466 block_group->pinned = 0;
1468 spin_unlock(&block_group->lock);
1486 btrfs_freeze_block_group(block_group);
1492 ret = btrfs_remove_chunk(trans, block_group->start);
1496 btrfs_unfreeze_block_group(block_group);
1512 list_move(&block_group->bg_list,
1515 btrfs_get_block_group(block_group);
1521 btrfs_put_block_group(block_group);
1530 btrfs_put_block_group(block_group);
2087 struct btrfs_block_group *block_group)
2094 spin_lock(&block_group->lock);
2095 btrfs_set_stack_block_group_used(&bgi, block_group->used);
2098 btrfs_set_stack_block_group_flags(&bgi, block_group->flags);
2099 key.objectid = block_group->start;
2101 key.offset = block_group->length;
2102 spin_unlock(&block_group->lock);
2111 struct btrfs_block_group *block_group;
2120 block_group = list_first_entry(&trans->new_bgs,
2126 index = btrfs_bg_flags_to_raid_index(block_group->flags);
2128 ret = insert_block_group_item(trans, block_group);
2131 ret = btrfs_finish_chunk_alloc(trans, block_group->start,
2132 block_group->length);
2135 add_block_group_free_space(trans, block_group);
2143 if (block_group->space_info->block_group_kobjs[index] == NULL)
2144 btrfs_sysfs_add_block_group_type(block_group);
2149 list_del_init(&block_group->bg_list);
2380 static int cache_save_setup(struct btrfs_block_group *block_group,
2384 struct btrfs_fs_info *fs_info = block_group->fs_info;
2398 if (block_group->length < (100 * SZ_1M)) {
2399 spin_lock(&block_group->lock);
2400 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
2401 spin_unlock(&block_group->lock);
2408 inode = lookup_free_space_inode(block_group, path);
2419 if (block_group->ro)
2422 ret = create_free_space_inode(trans, block_group, path);
2452 if (block_group->cache_generation == trans->transid &&
2469 spin_lock(&block_group->lock);
2470 if (block_group->cached != BTRFS_CACHE_FINISHED ||
2479 spin_unlock(&block_group->lock);
2482 spin_unlock(&block_group->lock);
2499 num_pages = div_u64(block_group->length, SZ_256M);
2532 spin_lock(&block_group->lock);
2534 block_group->cache_generation = trans->transid;
2535 block_group->disk_cache_state = dcs;
2536 spin_unlock(&block_group->lock);
2978 * btrfs_add_reserved_bytes - update the block_group and space info counters
3022 * btrfs_free_reserved_bytes - update the block_group and space info counters
3296 struct btrfs_block_group *block_group;
3302 block_group = btrfs_lookup_first_block_group(info, last);
3303 while (block_group) {
3304 btrfs_wait_block_group_cache_done(block_group);
3305 spin_lock(&block_group->lock);
3306 if (block_group->iref)
3308 spin_unlock(&block_group->lock);
3309 block_group = btrfs_next_block_group(block_group);
3311 if (!block_group) {
3318 inode = block_group->inode;
3319 block_group->iref = 0;
3320 block_group->inode = NULL;
3321 spin_unlock(&block_group->lock);
3322 ASSERT(block_group->io_ctl.inode == NULL);
3324 last = block_group->start + block_group->length;
3325 btrfs_put_block_group(block_group);
3336 struct btrfs_block_group *block_group;
3352 block_group = list_first_entry(&info->unused_bgs,
3355 list_del_init(&block_group->bg_list);
3356 btrfs_put_block_group(block_group);
3362 block_group = rb_entry(n, struct btrfs_block_group,
3364 rb_erase(&block_group->cache_node,
3366 RB_CLEAR_NODE(&block_group->cache_node);
3369 down_write(&block_group->space_info->groups_sem);
3370 list_del(&block_group->list);
3371 up_write(&block_group->space_info->groups_sem);
3377 if (block_group->cached == BTRFS_CACHE_NO ||
3378 block_group->cached == BTRFS_CACHE_ERROR)
3379 btrfs_free_excluded_extents(block_group);
3381 btrfs_remove_free_space_cache(block_group);
3382 ASSERT(block_group->cached != BTRFS_CACHE_STARTED);
3383 ASSERT(list_empty(&block_group->dirty_list));
3384 ASSERT(list_empty(&block_group->io_list));
3385 ASSERT(list_empty(&block_group->bg_list));
3386 ASSERT(refcount_read(&block_group->refs) == 1);
3387 ASSERT(block_group->swap_extents == 0);
3388 btrfs_put_block_group(block_group);
3421 void btrfs_unfreeze_block_group(struct btrfs_block_group *block_group)
3423 struct btrfs_fs_info *fs_info = block_group->fs_info;
3428 spin_lock(&block_group->lock);
3429 cleanup = (atomic_dec_and_test(&block_group->frozen) &&
3430 block_group->removed);
3431 spin_unlock(&block_group->lock);
3436 em = lookup_extent_mapping(em_tree, block_group->start,
3451 __btrfs_remove_free_space_cache(block_group->free_space_ctl);