Lines Matching refs:bg
319 struct btrfs_block_group *bg;
322 bg = btrfs_lookup_block_group(fs_info, bytenr);
323 if (!bg)
326 spin_lock(&bg->lock);
327 if (bg->ro)
330 atomic_inc(&bg->nocow_writers);
331 spin_unlock(&bg->lock);
334 btrfs_put_block_group(bg);
339 return bg;
353 void btrfs_dec_nocow_writers(struct btrfs_block_group *bg)
355 if (atomic_dec_and_test(&bg->nocow_writers))
356 wake_up_var(&bg->nocow_writers);
359 btrfs_put_block_group(bg);
362 void btrfs_wait_nocow_writers(struct btrfs_block_group *bg)
364 wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers));
370 struct btrfs_block_group *bg;
372 bg = btrfs_lookup_block_group(fs_info, start);
373 ASSERT(bg);
374 if (atomic_dec_and_test(&bg->reservations))
375 wake_up_var(&bg->reservations);
376 btrfs_put_block_group(bg);
379 void btrfs_wait_block_group_reservations(struct btrfs_block_group *bg)
381 struct btrfs_space_info *space_info = bg->space_info;
383 ASSERT(bg->ro);
385 if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA))
401 wait_var_event(&bg->reservations, !atomic_read(&bg->reservations));
838 static inline void btrfs_free_excluded_extents(const struct btrfs_block_group *bg)
840 clear_extent_bits(&bg->fs_info->excluded_extents, bg->start,
841 bg->start + bg->length - 1, EXTENT_UPTODATE);
1380 * Here we make sure if we mark this bg RO, we still have enough
1420 struct btrfs_block_group *bg)
1422 struct btrfs_fs_info *fs_info = bg->fs_info;
1424 const u64 start = bg->start;
1425 const u64 end = start + bg->length - 1;
1721 void btrfs_mark_bg_unused(struct btrfs_block_group *bg)
1723 struct btrfs_fs_info *fs_info = bg->fs_info;
1726 if (list_empty(&bg->bg_list)) {
1727 btrfs_get_block_group(bg);
1728 trace_btrfs_add_unused_block_group(bg);
1729 list_add_tail(&bg->bg_list, &fs_info->unused_bgs);
1730 } else if (!test_bit(BLOCK_GROUP_FLAG_NEW, &bg->runtime_flags)) {
1732 trace_btrfs_add_unused_block_group(bg);
1733 list_move_tail(&bg->bg_list, &fs_info->unused_bgs);
1760 static bool should_reclaim_block_group(struct btrfs_block_group *bg, u64 bytes_freed)
1762 const struct btrfs_space_info *space_info = bg->space_info;
1764 const u64 new_val = bg->used;
1771 thresh = mult_perc(bg->length, reclaim_thresh);
1788 struct btrfs_block_group *bg;
1828 bg = list_first_entry(&fs_info->reclaim_bgs,
1831 list_del_init(&bg->bg_list);
1833 space_info = bg->space_info;
1839 spin_lock(&bg->lock);
1840 if (bg->reserved || bg->pinned || bg->ro) {
1847 spin_unlock(&bg->lock);
1851 if (bg->used == 0) {
1864 btrfs_mark_bg_unused(bg);
1865 spin_unlock(&bg->lock);
1880 if (!should_reclaim_block_group(bg, bg->length)) {
1881 spin_unlock(&bg->lock);
1885 spin_unlock(&bg->lock);
1906 zone_unusable = bg->zone_unusable;
1907 ret = inc_block_group_ro(bg, 0);
1914 bg->start,
1915 div64_u64(bg->used * 100, bg->length),
1916 div64_u64(zone_unusable * 100, bg->length));
1917 trace_btrfs_reclaim_block_group(bg);
1918 ret = btrfs_relocate_chunk(fs_info, bg->start);
1920 btrfs_dec_block_group_ro(bg);
1922 bg->start);
1927 btrfs_mark_bg_to_reclaim(bg);
1928 btrfs_put_block_group(bg);
1959 void btrfs_mark_bg_to_reclaim(struct btrfs_block_group *bg)
1961 struct btrfs_fs_info *fs_info = bg->fs_info;
1964 if (list_empty(&bg->bg_list)) {
1965 btrfs_get_block_group(bg);
1966 trace_btrfs_add_reclaim_block_group(bg);
1967 list_add_tail(&bg->bg_list, &fs_info->reclaim_bgs);
1977 struct btrfs_block_group_item bg;
1992 "logical %llu len %llu found bg but no related chunk",
2005 read_extent_buffer(leaf, &bg, btrfs_item_ptr_offset(leaf, slot),
2006 sizeof(bg));
2007 flags = btrfs_stack_block_group_flags(&bg) &
2249 struct btrfs_block_group *bg;
2265 bg = btrfs_lookup_block_group(fs_info, em->start);
2266 if (!bg) {
2274 if (bg->start != em->start || bg->length != em->len ||
2275 (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) !=
2281 bg->start, bg->length,
2282 bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
2285 btrfs_put_block_group(bg);
2290 btrfs_put_block_group(bg);
2335 "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
2343 btrfs_err(info, "zoned: failed to load zone info of bg %llu",
2424 struct btrfs_block_group *bg;
2428 bg = btrfs_create_block_group_cache(fs_info, em->start);
2429 if (!bg) {
2435 bg->length = em->len;
2436 bg->flags = map->type;
2437 bg->cached = BTRFS_CACHE_FINISHED;
2438 bg->used = em->len;
2439 bg->flags = map->type;
2440 ret = btrfs_add_block_group_cache(fs_info, bg);
2447 btrfs_put_block_group(bg);
2452 btrfs_remove_free_space_cache(bg);
2453 btrfs_put_block_group(bg);
2457 btrfs_add_bg_to_space_info(fs_info, bg);
2459 set_avail_alloc_bits(fs_info, bg->flags);
2838 * assigned to our block group. We want our bg to be added to the rbtree
2959 * Skip chunk alloction if the bg is SYSTEM, this is to avoid system
3835 struct btrfs_block_group *bg;
3846 bg = btrfs_create_chunk(trans, flags);
3847 if (IS_ERR(bg)) {
3848 ret = PTR_ERR(bg);
3852 ret = btrfs_chunk_alloc_add_chunk_item(trans, bg);
3908 ret = btrfs_chunk_alloc_add_chunk_item(trans, bg);
3923 btrfs_get_block_group(bg);
3924 return bg;
4215 struct btrfs_block_group *bg;
4223 bg = btrfs_create_chunk(trans, flags);
4224 if (IS_ERR(bg)) {
4225 ret = PTR_ERR(bg);
4245 btrfs_chunk_alloc_add_chunk_item(trans, bg);
4499 bool btrfs_inc_block_group_swap_extents(struct btrfs_block_group *bg)
4503 spin_lock(&bg->lock);
4504 if (bg->ro)
4507 bg->swap_extents++;
4508 spin_unlock(&bg->lock);
4513 void btrfs_dec_block_group_swap_extents(struct btrfs_block_group *bg, int amount)
4515 spin_lock(&bg->lock);
4516 ASSERT(!bg->ro);
4517 ASSERT(bg->swap_extents >= amount);
4518 bg->swap_extents -= amount;
4519 spin_unlock(&bg->lock);
4534 * @bg: The block group we allocated in.
4550 int btrfs_use_block_group_size_class(struct btrfs_block_group *bg,
4557 if (bg->size_class == size_class)
4569 if (bg->size_class != BTRFS_BG_SZ_NONE) {
4578 bg->size_class = size_class;
4583 bool btrfs_block_group_should_use_size_class(struct btrfs_block_group *bg)
4585 if (btrfs_is_zoned(bg->fs_info))
4587 if (!btrfs_is_block_group_data_only(bg))