Lines Matching refs:sbi
33 struct f2fs_sb_info *sbi = data;
34 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
35 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
36 wait_queue_head_t *fggc_wq = &sbi->gc_thread->fggc_wq;
55 if (test_opt(sbi, GC_MERGE) && waitqueue_active(fggc_wq))
62 if (try_to_freeze() || f2fs_readonly(sbi->sb)) {
63 stat_other_skip_bggc_count(sbi);
69 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
71 stat_other_skip_bggc_count(sbi);
75 if (time_to_inject(sbi, FAULT_CHECKPOINT))
76 f2fs_stop_checkpoint(sbi, false,
79 if (!sb_start_write_trylock(sbi->sb)) {
80 stat_other_skip_bggc_count(sbi);
97 if (sbi->gc_mode == GC_URGENT_HIGH ||
98 sbi->gc_mode == GC_URGENT_MID) {
100 f2fs_down_write(&sbi->gc_lock);
105 f2fs_down_write(&sbi->gc_lock);
107 } else if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
108 stat_other_skip_bggc_count(sbi);
112 if (!is_idle(sbi, GC_TIME)) {
114 f2fs_up_write(&sbi->gc_lock);
115 stat_io_skip_bggc_count(sbi);
119 if (has_enough_invalid_blocks(sbi))
124 stat_inc_gc_call_count(sbi, foreground ?
127 sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC;
138 if (f2fs_gc(sbi, &gc_control)) {
151 trace_f2fs_background_gc(sbi->sb, wait_ms,
152 prefree_segments(sbi), free_segments(sbi));
155 f2fs_balance_fs_bg(sbi, true);
157 if (sbi->gc_mode != GC_NORMAL) {
158 spin_lock(&sbi->gc_remaining_trials_lock);
159 if (sbi->gc_remaining_trials) {
160 sbi->gc_remaining_trials--;
161 if (!sbi->gc_remaining_trials)
162 sbi->gc_mode = GC_NORMAL;
164 spin_unlock(&sbi->gc_remaining_trials_lock);
166 sb_end_write(sbi->sb);
172 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi)
175 dev_t dev = sbi->sb->s_bdev->bd_dev;
177 gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
188 sbi->gc_thread = gc_th;
189 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
190 init_waitqueue_head(&sbi->gc_thread->fggc_wq);
191 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
197 sbi->gc_thread = NULL;
204 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi)
206 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
213 sbi->gc_thread = NULL;
216 static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type)
221 if (sbi->am.atgc_enabled)
229 switch (sbi->gc_mode) {
245 static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
248 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
261 p->gc_mode = select_gc_type(sbi, gc_type);
262 p->ofs_unit = sbi->segs_per_sec;
263 if (__is_large_section(sbi)) {
266 0, MAIN_SECS(sbi));
278 (sbi->gc_mode != GC_URGENT_HIGH) &&
280 p->max_search > sbi->max_victim_search)
281 p->max_search = sbi->max_victim_search;
284 if (f2fs_need_rand_seg(sbi))
285 p->offset = get_random_u32_below(MAIN_SECS(sbi) * sbi->segs_per_sec);
286 else if (test_opt(sbi, NOHEAP) &&
290 p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
293 static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
298 return sbi->blocks_per_seg;
304 return 2 * sbi->blocks_per_seg * p->ofs_unit;
313 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
315 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
323 for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
324 if (sec_usage_check(sbi, secno))
327 return GET_SEG_FROM_SEC(sbi, secno);
332 static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
334 struct sit_info *sit_i = SIT_I(sbi);
335 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
336 unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
342 unsigned int usable_segs_per_sec = f2fs_usable_segs_in_sec(sbi, segno);
345 mtime += get_seg_entry(sbi, start + i)->mtime;
346 vblocks = get_valid_blocks(sbi, segno, true);
351 u = (vblocks * 100) >> sbi->log_blocks_per_seg;
365 static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
369 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
373 return get_valid_blocks(sbi, segno, true);
375 return get_cb_cost(sbi, segno);
377 f2fs_bug_on(sbi, 1);
393 static bool f2fs_check_victim_tree(struct f2fs_sb_info *sbi,
409 f2fs_info(sbi, "broken victim_rbtree, "
420 static struct victim_entry *__lookup_victim_entry(struct f2fs_sb_info *sbi,
423 struct atgc_management *am = &sbi->am;
438 static struct victim_entry *__create_victim_entry(struct f2fs_sb_info *sbi,
441 struct atgc_management *am = &sbi->am;
455 static void __insert_victim_entry(struct f2fs_sb_info *sbi,
458 struct atgc_management *am = &sbi->am;
478 ve = __create_victim_entry(sbi, mtime, segno);
484 static void add_victim_entry(struct f2fs_sb_info *sbi,
487 struct sit_info *sit_i = SIT_I(sbi);
488 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
489 unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
493 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
495 get_valid_blocks(sbi, segno, true) == 0)
499 for (i = 0; i < sbi->segs_per_sec; i++)
500 mtime += get_seg_entry(sbi, start + i)->mtime;
501 mtime = div_u64(mtime, sbi->segs_per_sec);
517 __insert_victim_entry(sbi, mtime, segno);
520 static void atgc_lookup_victim(struct f2fs_sb_info *sbi,
523 struct sit_info *sit_i = SIT_I(sbi);
524 struct atgc_management *am = &sbi->am;
532 unsigned int sec_blocks = CAP_BLKS_PER_SEC(sbi);
564 vblocks = get_valid_blocks(sbi, ve->segno, true);
565 f2fs_bug_on(sbi, !vblocks || vblocks == sec_blocks);
571 f2fs_bug_on(sbi, age + u >= UINT_MAX);
593 static void atssr_lookup_victim(struct f2fs_sb_info *sbi,
596 struct sit_info *sit_i = SIT_I(sbi);
597 struct atgc_management *am = &sbi->am;
602 unsigned int seg_blocks = sbi->blocks_per_seg;
615 ve = __lookup_victim_entry(sbi, p->age);
628 vblocks = get_seg_entry(sbi, ve->segno)->ckpt_valid_blocks;
629 f2fs_bug_on(sbi, !vblocks);
658 static void lookup_victim_by_age(struct f2fs_sb_info *sbi,
661 f2fs_bug_on(sbi, !f2fs_check_victim_tree(sbi, &sbi->am.root));
664 atgc_lookup_victim(sbi, p);
666 atssr_lookup_victim(sbi, p);
668 f2fs_bug_on(sbi, 1);
671 static void release_victim_entry(struct f2fs_sb_info *sbi)
673 struct atgc_management *am = &sbi->am;
684 f2fs_bug_on(sbi, am->victim_count);
685 f2fs_bug_on(sbi, !list_empty(&am->victim_list));
688 static bool f2fs_pin_section(struct f2fs_sb_info *sbi, unsigned int segno)
690 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
691 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
713 static void f2fs_unpin_all_sections(struct f2fs_sb_info *sbi, bool enable)
715 unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
717 if (f2fs_pinned_section_exists(DIRTY_I(sbi))) {
718 memset(DIRTY_I(sbi)->pinned_secmap, 0, bitmap_size);
719 DIRTY_I(sbi)->pinned_secmap_cnt = 0;
721 DIRTY_I(sbi)->enable_pin_section = enable;
744 int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result,
748 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
749 struct sit_info *sm = SIT_I(sbi);
758 last_segment = MAIN_SECS(sbi) * sbi->segs_per_sec;
762 p.age_threshold = sbi->am.age_threshold;
765 select_policy(sbi, gc_type, type, &p);
768 p.min_cost = get_max_cost(sbi, &p);
774 SIT_I(sbi)->dirty_min_mtime = ULLONG_MAX;
777 if (!get_valid_blocks(sbi, *result, false)) {
782 if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
793 if (__is_large_section(sbi) && p.alloc_mode == LFS) {
794 if (sbi->next_victim_seg[BG_GC] != NULL_SEGNO) {
795 p.min_segno = sbi->next_victim_seg[BG_GC];
797 sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
801 sbi->next_victim_seg[FG_GC] != NULL_SEGNO) {
802 p.min_segno = sbi->next_victim_seg[FG_GC];
804 sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
811 p.min_segno = check_bg_victims(sbi);
849 secno = GET_SEC_FROM_SEG(sbi, segno);
851 if (sec_usage_check(sbi, secno))
855 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
861 if (get_ckpt_valid_blocks(sbi, segno, true))
869 if (!f2fs_segment_has_free_slot(sbi, segno))
881 add_victim_entry(sbi, &p, segno);
885 cost = get_gc_cost(sbi, segno, &p);
899 (MAIN_SECS(sbi) * sbi->segs_per_sec);
906 lookup_victim_by_age(sbi, &p);
907 release_victim_entry(sbi);
921 secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
923 sbi->cur_victim_sec = secno;
932 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
933 sbi->cur_victim_sec,
934 prefree_segments(sbi), free_segments(sbi));
978 static int check_valid_map(struct f2fs_sb_info *sbi,
981 struct sit_info *sit_i = SIT_I(sbi);
986 sentry = get_seg_entry(sbi, segno);
997 static int gc_node_segment(struct f2fs_sb_info *sbi,
1006 unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
1008 start_addr = START_BLOCK(sbi, segno);
1014 atomic_inc(&sbi->wb_sync_req[NODE]);
1023 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
1026 if (check_valid_map(sbi, segno, off) == 0)
1030 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
1036 f2fs_ra_node_page(sbi, nid);
1041 node_page = f2fs_get_node_page(sbi, nid);
1046 if (check_valid_map(sbi, segno, off) == 0) {
1051 if (f2fs_get_node_info(sbi, nid, &ni, false)) {
1064 stat_inc_node_blk_count(sbi, 1, gc_type);
1071 atomic_dec(&sbi->wb_sync_req[NODE]);
1104 static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
1115 node_page = f2fs_get_node_page(sbi, nid);
1119 if (f2fs_get_node_info(sbi, nid, dni, false)) {
1125 f2fs_warn(sbi, "%s: valid data with mismatched node version.",
1127 set_sbi_flag(sbi, SBI_NEED_FSCK);
1130 if (f2fs_check_nid_range(sbi, dni->ino)) {
1144 f2fs_err(sbi, "Inconsistent blkaddr offset: base:%u, ofs_in_node:%u, max:%u, ino:%u, nid:%u",
1156 unsigned int segno = GET_SEGNO(sbi, blkaddr);
1157 unsigned long offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
1159 if (unlikely(check_valid_map(sbi, segno, offset))) {
1160 if (!test_and_set_bit(segno, SIT_I(sbi)->invalid_segmap)) {
1161 f2fs_err(sbi, "mismatched blkaddr %u (source_blkaddr %u) in seg %u",
1163 set_sbi_flag(sbi, SBI_NEED_FSCK);
1174 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1179 .sbi = sbi,
1197 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1200 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
1216 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1219 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
1235 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi),
1249 f2fs_update_iostat(sbi, inode, FS_DATA_READ_IO, F2FS_BLKSIZE);
1250 f2fs_update_iostat(sbi, NULL, FS_GDATA_READ_IO, F2FS_BLKSIZE);
1268 .sbi = F2FS_I_SB(inode),
1284 bool lfs_mode = f2fs_lfs_mode(fio.sbi);
1285 int type = fio.sbi->am.atgc_enabled && (gc_type == BG_GC) &&
1286 (fio.sbi->gc_mode != GC_URGENT_HIGH) ?
1322 err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
1331 f2fs_down_write(&fio.sbi->io_order_lock);
1333 mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi),
1350 f2fs_update_iostat(fio.sbi, inode, FS_DATA_READ_IO,
1352 f2fs_update_iostat(fio.sbi, NULL, FS_GDATA_READ_IO,
1356 if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) ||
1367 f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
1370 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
1384 f2fs_invalidate_internal_cache(fio.sbi, fio.old_blkaddr);
1388 dec_page_count(fio.sbi, F2FS_DIRTY_META);
1403 f2fs_update_iostat(fio.sbi, NULL, FS_GC_DATA_IO, F2FS_BLKSIZE);
1411 f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
1415 f2fs_up_write(&fio.sbi->io_order_lock);
1451 .sbi = F2FS_I_SB(inode),
1499 static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
1503 struct super_block *sb = sbi->sb;
1509 unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
1511 start_addr = START_BLOCK(sbi, segno);
1529 if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) ||
1530 (!force_migrate && get_valid_blocks(sbi, segno, true) ==
1531 CAP_BLKS_PER_SEC(sbi)))
1534 if (check_valid_map(sbi, segno, off) == 0)
1538 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
1544 f2fs_ra_node_page(sbi, nid);
1549 if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
1553 f2fs_ra_node_page(sbi, dni.ino);
1576 sbi->skipped_gc_rwsem++;
1617 sbi->skipped_gc_rwsem++;
1622 sbi->skipped_gc_rwsem++;
1650 stat_inc_data_blk_count(sbi, 1, gc_type);
1660 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
1663 struct sit_info *sit_i = SIT_I(sbi);
1667 ret = f2fs_get_victim(sbi, victim, gc_type, NO_CHECK_TYPE, LFS, 0);
1672 static int do_garbage_collect(struct f2fs_sb_info *sbi,
1681 unsigned int end_segno = start_segno + sbi->segs_per_sec;
1683 unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
1688 if (__is_large_section(sbi))
1689 end_segno = rounddown(end_segno, sbi->segs_per_sec);
1696 if (f2fs_sb_has_blkzoned(sbi))
1697 end_segno -= sbi->segs_per_sec -
1698 f2fs_usable_segs_in_sec(sbi, segno);
1700 sanity_check_seg_type(sbi, get_seg_entry(sbi, segno)->type);
1703 if (__is_large_section(sbi))
1704 f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
1709 sum_page = f2fs_get_sum_page(sbi, segno++);
1715 sum_page = find_get_page(META_MAPPING(sbi),
1716 GET_SUM_BLOCK(sbi, segno));
1730 sum_page = find_get_page(META_MAPPING(sbi),
1731 GET_SUM_BLOCK(sbi, segno));
1734 if (get_valid_blocks(sbi, segno, false) == 0)
1736 if (gc_type == BG_GC && __is_large_section(sbi) &&
1737 migrated >= sbi->migration_granularity)
1739 if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi)))
1744 f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT",
1746 set_sbi_flag(sbi, SBI_NEED_FSCK);
1747 f2fs_stop_checkpoint(sbi, false,
1760 submitted += gc_node_segment(sbi, sum->entries, segno,
1763 submitted += gc_data_segment(sbi, sum->entries, gc_list,
1767 stat_inc_gc_seg_count(sbi, data_type, gc_type);
1768 sbi->gc_reclaimed_segs[sbi->gc_mode]++;
1773 get_valid_blocks(sbi, segno, false) == 0)
1776 if (__is_large_section(sbi))
1777 sbi->next_victim_seg[gc_type] =
1784 f2fs_submit_merged_write(sbi, data_type);
1789 stat_inc_gc_sec_count(sbi, data_type, gc_type);
1794 int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control)
1808 trace_f2fs_gc_begin(sbi->sb, gc_type, gc_control->no_bg_gc,
1810 get_pages(sbi, F2FS_DIRTY_NODES),
1811 get_pages(sbi, F2FS_DIRTY_DENTS),
1812 get_pages(sbi, F2FS_DIRTY_IMETA),
1813 free_sections(sbi),
1814 free_segments(sbi),
1815 reserved_segments(sbi),
1816 prefree_segments(sbi));
1818 cpc.reason = __get_cp_reason(sbi);
1820 sbi->skipped_gc_rwsem = 0;
1821 if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
1825 if (unlikely(f2fs_cp_error(sbi))) {
1831 if (has_not_enough_free_secs(sbi, 0, 0)) {
1839 if (prefree_segments(sbi)) {
1840 stat_inc_cp_call_count(sbi, TOTAL_CALL);
1841 ret = f2fs_write_checkpoint(sbi, &cpc);
1855 ret = __get_victim(sbi, &segno, gc_type);
1859 f2fs_pinned_section_exists(DIRTY_I(sbi))) {
1860 f2fs_unpin_all_sections(sbi, false);
1866 seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type,
1870 if (seg_freed == f2fs_usable_segs_in_sec(sbi, segno)) {
1876 sbi->cur_victim_sec = NULL_SEGNO;
1878 if (has_enough_free_secs(sbi, sec_freed, 0)) {
1884 if (sbi->skipped_gc_rwsem)
1889 stat_inc_cp_call_count(sbi, TOTAL_CALL);
1890 ret = f2fs_write_checkpoint(sbi, &cpc);
1893 } else if (has_enough_free_secs(sbi, 0, 0)) {
1897 __get_secs_required(sbi, NULL, &upper_secs, NULL);
1903 if (free_sections(sbi) <= upper_secs + NR_GC_CHECKPOINT_SECS &&
1904 prefree_segments(sbi)) {
1905 stat_inc_cp_call_count(sbi, TOTAL_CALL);
1906 ret = f2fs_write_checkpoint(sbi, &cpc);
1917 SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
1918 SIT_I(sbi)->last_victim[FLUSH_DEVICE] = gc_control->victim_segno;
1921 f2fs_unpin_all_sections(sbi, true);
1923 trace_f2fs_gc_end(sbi->sb, ret, total_freed, total_sec_freed,
1924 get_pages(sbi, F2FS_DIRTY_NODES),
1925 get_pages(sbi, F2FS_DIRTY_DENTS),
1926 get_pages(sbi, F2FS_DIRTY_IMETA),
1927 free_sections(sbi),
1928 free_segments(sbi),
1929 reserved_segments(sbi),
1930 prefree_segments(sbi));
1932 f2fs_up_write(&sbi->gc_lock);
1953 static void init_atgc_management(struct f2fs_sb_info *sbi)
1955 struct atgc_management *am = &sbi->am;
1957 if (test_opt(sbi, ATGC) &&
1958 SIT_I(sbi)->elapsed_time >= DEF_GC_THREAD_AGE_THRESHOLD)
1971 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
1973 sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
1976 if (f2fs_is_multi_device(sbi) && !__is_large_section(sbi))
1977 SIT_I(sbi)->last_victim[ALLOC_NEXT] =
1978 GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
1980 init_atgc_management(sbi);
1983 static int free_segment_range(struct f2fs_sb_info *sbi,
1993 MAIN_SECS(sbi) -= secs;
1994 start = MAIN_SECS(sbi) * sbi->segs_per_sec;
1995 end = MAIN_SEGS(sbi) - 1;
1997 mutex_lock(&DIRTY_I(sbi)->seglist_lock);
1999 if (SIT_I(sbi)->last_victim[gc_mode] >= start)
2000 SIT_I(sbi)->last_victim[gc_mode] = 0;
2003 if (sbi->next_victim_seg[gc_type] >= start)
2004 sbi->next_victim_seg[gc_type] = NULL_SEGNO;
2005 mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
2009 f2fs_allocate_segment_for_resize(sbi, type, start, end);
2012 for (segno = start; segno <= end; segno += sbi->segs_per_sec) {
2018 do_garbage_collect(sbi, segno, &gc_list, FG_GC, true);
2021 if (!gc_only && get_valid_blocks(sbi, segno, true)) {
2033 stat_inc_cp_call_count(sbi, TOTAL_CALL);
2034 err = f2fs_write_checkpoint(sbi, &cpc);
2038 next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start);
2040 f2fs_err(sbi, "segno %u should be free but still inuse!",
2042 f2fs_bug_on(sbi, 1);
2045 MAIN_SECS(sbi) += secs;
2049 static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
2051 struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi);
2056 int segs = secs * sbi->segs_per_sec;
2058 f2fs_down_write(&sbi->sb_lock);
2069 (long long)segs * sbi->blocks_per_seg);
2070 if (f2fs_is_multi_device(sbi)) {
2071 int last_dev = sbi->s_ndevs - 1;
2079 f2fs_up_write(&sbi->sb_lock);
2082 static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
2084 int segs = secs * sbi->segs_per_sec;
2085 long long blks = (long long)segs * sbi->blocks_per_seg;
2087 le64_to_cpu(F2FS_CKPT(sbi)->user_block_count);
2089 SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs;
2090 MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs;
2091 MAIN_SECS(sbi) += secs;
2092 FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs;
2093 FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs;
2094 F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks);
2096 if (f2fs_is_multi_device(sbi)) {
2097 int last_dev = sbi->s_ndevs - 1;
2105 div_u64(blks, sbi->blocks_per_blkz);
2112 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
2119 old_block_count = le64_to_cpu(F2FS_RAW_SUPER(sbi)->block_count);
2123 if (f2fs_is_multi_device(sbi)) {
2124 int last_dev = sbi->s_ndevs - 1;
2127 if (block_count + last_segs * sbi->blocks_per_seg <=
2133 div_u64_rem(block_count, BLKS_PER_SEC(sbi), &rem);
2140 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
2141 f2fs_err(sbi, "Should run fsck to repair first.");
2145 if (test_opt(sbi, DISABLE_CHECKPOINT)) {
2146 f2fs_err(sbi, "Checkpoint should be enabled.");
2155 secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi));
2158 if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
2164 f2fs_lock_op(sbi);
2166 spin_lock(&sbi->stat_lock);
2167 if (shrunk_blocks + valid_user_blocks(sbi) +
2168 sbi->current_reserved_blocks + sbi->unusable_block_count +
2169 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2171 spin_unlock(&sbi->stat_lock);
2176 err = free_segment_range(sbi, secs, true);
2179 f2fs_unlock_op(sbi);
2180 f2fs_up_write(&sbi->gc_lock);
2186 err = freeze_super(sbi->sb, FREEZE_HOLDER_USERSPACE);
2190 if (f2fs_readonly(sbi->sb)) {
2191 err = thaw_super(sbi->sb, FREEZE_HOLDER_USERSPACE);
2197 f2fs_down_write(&sbi->gc_lock);
2198 f2fs_down_write(&sbi->cp_global_sem);
2200 spin_lock(&sbi->stat_lock);
2201 if (shrunk_blocks + valid_user_blocks(sbi) +
2202 sbi->current_reserved_blocks + sbi->unusable_block_count +
2203 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2206 sbi->user_block_count -= shrunk_blocks;
2207 spin_unlock(&sbi->stat_lock);
2211 set_sbi_flag(sbi, SBI_IS_RESIZEFS);
2212 err = free_segment_range(sbi, secs, false);
2216 update_sb_metadata(sbi, -secs);
2218 err = f2fs_commit_super(sbi, false);
2220 update_sb_metadata(sbi, secs);
2224 update_fs_metadata(sbi, -secs);
2225 clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
2226 set_sbi_flag(sbi, SBI_IS_DIRTY);
2228 stat_inc_cp_call_count(sbi, TOTAL_CALL);
2229 err = f2fs_write_checkpoint(sbi, &cpc);
2231 update_fs_metadata(sbi, secs);
2232 update_sb_metadata(sbi, secs);
2233 f2fs_commit_super(sbi, false);
2236 clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
2238 set_sbi_flag(sbi, SBI_NEED_FSCK);
2239 f2fs_err(sbi, "resize_fs failed, should run fsck to repair!");
2241 spin_lock(&sbi->stat_lock);
2242 sbi->user_block_count += shrunk_blocks;
2243 spin_unlock(&sbi->stat_lock);
2246 f2fs_up_write(&sbi->cp_global_sem);
2247 f2fs_up_write(&sbi->gc_lock);
2248 thaw_super(sbi->sb, FREEZE_HOLDER_USERSPACE);