Lines Matching refs:ac_sb
875 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
885 for (i = ac->ac_2order; i < MB_NUM_ORDERS(ac->ac_sb); i++) {
917 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
949 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
958 for (i = mb_avg_fragment_size_order(ac->ac_sb, ac->ac_g_ex.fe_len);
959 i < MB_NUM_ORDERS(ac->ac_sb); i++) {
994 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1051 frag_order = mb_avg_fragment_size_order(ac->ac_sb,
1069 if (unlikely(!test_opt2(ac->ac_sb, MB_OPTIMIZE_SCAN)))
2133 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2183 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2240 BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
2241 BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
2302 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
2306 ext4_lock_group(ac->ac_sb, group);
2318 ext4_unlock_group(ac->ac_sb, group);
2329 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2330 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2340 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
2344 ext4_lock_group(ac->ac_sb, group);
2356 start = ext4_grp_offs_to_block(ac->ac_sb, &ex);
2381 ext4_unlock_group(ac->ac_sb, group);
2395 struct super_block *sb = ac->ac_sb;
2414 ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0,
2417 ext4_mark_group_bitmap_corrupted(ac->ac_sb,
2449 struct super_block *sb = ac->ac_sb;
2532 struct super_block *sb = ac->ac_sb;
2577 int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb));
2578 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2606 if (ac->ac_2order >= MB_NUM_ORDERS(ac->ac_sb))
2645 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2646 struct super_block *sb = ac->ac_sb;
2802 sb = ac->ac_sb;
4000 sb = ac->ac_sb;
4222 struct super_block *sb = ac->ac_sb;
4252 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4290 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4429 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4455 bsbits = ac->ac_sb->s_blocksize_bits;
4516 (ext4_lblk_t)EXT4_BLOCKS_PER_GROUP(ac->ac_sb)));
4534 if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb))
4535 size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb);
4560 ext4_msg(ac->ac_sb, KERN_ERR,
4566 BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
4581 ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size,
4589 ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1,
4595 mb_debug(ac->ac_sb, "goal: %lld(was %lld) blocks at %u\n", size,
4601 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4647 err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b);
4656 ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
4659 ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
4676 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4686 ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group,
4698 mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa);
4709 ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart,
4722 mb_debug(ac->ac_sb, "use %u/%u from group pa %p\n",
4762 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4776 if (ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex) != start)
4792 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4962 goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex);
5167 struct super_block *sb = ac->ac_sb;
5267 struct super_block *sb = ac->ac_sb;
5704 struct super_block *sb = ac->ac_sb;
5742 ext4_mb_show_pa(ac->ac_sb);
5755 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
5756 int bsbits = ac->ac_sb->s_blocksize_bits;
5769 isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
5834 ac->ac_sb = sb;
5944 struct super_block *sb = ac->ac_sb;
5991 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
6017 ext4_mb_put_pa(ac, ac->ac_sb, pa);