Lines Matching refs:sbi
32 struct f2fs_sb_info *sbi = data;
33 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
34 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
35 wait_queue_head_t *fggc_wq = &sbi->gc_thread->fggc_wq;
50 if (test_opt(sbi, GC_MERGE) && waitqueue_active(fggc_wq))
58 stat_other_skip_bggc_count(sbi);
64 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
66 stat_other_skip_bggc_count(sbi);
70 if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
71 f2fs_show_injection_info(sbi, FAULT_CHECKPOINT);
72 f2fs_stop_checkpoint(sbi, false);
75 if (!sb_start_write_trylock(sbi->sb)) {
76 stat_other_skip_bggc_count(sbi);
93 if (sbi->gc_mode == GC_URGENT_HIGH) {
95 down_write(&sbi->gc_lock);
100 down_write(&sbi->gc_lock);
102 } else if (!down_write_trylock(&sbi->gc_lock)) {
103 stat_other_skip_bggc_count(sbi);
107 if (!is_idle(sbi, GC_TIME)) {
109 up_write(&sbi->gc_lock);
110 stat_io_skip_bggc_count(sbi);
114 if (has_enough_invalid_blocks(sbi))
120 stat_inc_bggc_count(sbi->stat_info);
122 sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC;
129 if (f2fs_gc(sbi, sync_mode, !foreground, false, NULL_SEGNO))
135 trace_f2fs_background_gc(sbi->sb, wait_ms,
136 prefree_segments(sbi), free_segments(sbi));
139 f2fs_balance_fs_bg(sbi, true);
141 sb_end_write(sbi->sb);
147 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi)
150 dev_t dev = sbi->sb->s_bdev->bd_dev;
153 gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
166 sbi->gc_thread = gc_th;
167 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
168 init_waitqueue_head(&sbi->gc_thread->fggc_wq);
169 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
174 sbi->gc_thread = NULL;
180 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi)
182 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
188 sbi->gc_thread = NULL;
191 static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type)
196 if (sbi->am.atgc_enabled)
204 switch (sbi->gc_mode) {
220 static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
223 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
236 p->gc_mode = select_gc_type(sbi, gc_type);
237 p->ofs_unit = sbi->segs_per_sec;
238 if (__is_large_section(sbi)) {
241 0, MAIN_SECS(sbi));
253 (sbi->gc_mode != GC_URGENT_HIGH) &&
255 p->max_search > sbi->max_victim_search)
256 p->max_search = sbi->max_victim_search;
259 if (test_opt(sbi, NOHEAP) &&
263 p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
266 static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
271 return sbi->blocks_per_seg;
277 return 2 * sbi->blocks_per_seg * p->ofs_unit;
286 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
288 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
296 for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
297 if (sec_usage_check(sbi, secno))
300 return GET_SEG_FROM_SEC(sbi, secno);
305 static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
307 struct sit_info *sit_i = SIT_I(sbi);
308 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
309 unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
315 unsigned int usable_segs_per_sec = f2fs_usable_segs_in_sec(sbi, segno);
318 mtime += get_seg_entry(sbi, start + i)->mtime;
319 vblocks = get_valid_blocks(sbi, segno, true);
324 u = (vblocks * 100) >> sbi->log_blocks_per_seg;
338 static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
342 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
346 return get_valid_blocks(sbi, segno, true);
348 return get_cb_cost(sbi, segno);
350 f2fs_bug_on(sbi, 1);
366 static struct victim_entry *attach_victim_entry(struct f2fs_sb_info *sbi,
371 struct atgc_management *am = &sbi->am;
389 static void insert_victim_entry(struct f2fs_sb_info *sbi,
392 struct atgc_management *am = &sbi->am;
397 p = f2fs_lookup_rb_tree_ext(sbi, &am->root, &parent, mtime, &left_most);
398 attach_victim_entry(sbi, mtime, segno, parent, p, left_most);
401 static void add_victim_entry(struct f2fs_sb_info *sbi,
404 struct sit_info *sit_i = SIT_I(sbi);
405 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
406 unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
410 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
412 get_valid_blocks(sbi, segno, true) == 0)
416 for (i = 0; i < sbi->segs_per_sec; i++)
417 mtime += get_seg_entry(sbi, start + i)->mtime;
418 mtime = div_u64(mtime, sbi->segs_per_sec);
434 insert_victim_entry(sbi, mtime, segno);
437 static struct rb_node *lookup_central_victim(struct f2fs_sb_info *sbi,
440 struct atgc_management *am = &sbi->am;
444 f2fs_lookup_rb_tree_ext(sbi, &am->root, &parent, p->age, &left_most);
449 static void atgc_lookup_victim(struct f2fs_sb_info *sbi,
452 struct sit_info *sit_i = SIT_I(sbi);
453 struct atgc_management *am = &sbi->am;
462 unsigned int sec_blocks = BLKS_PER_SEC(sbi);
496 vblocks = get_valid_blocks(sbi, ve->segno, true);
497 f2fs_bug_on(sbi, !vblocks || vblocks == sec_blocks);
503 f2fs_bug_on(sbi, age + u >= UINT_MAX);
525 static void atssr_lookup_victim(struct f2fs_sb_info *sbi,
528 struct sit_info *sit_i = SIT_I(sbi);
529 struct atgc_management *am = &sbi->am;
536 unsigned int seg_blocks = sbi->blocks_per_seg;
549 node = lookup_central_victim(sbi, p);
565 vblocks = get_seg_entry(sbi, ve->segno)->ckpt_valid_blocks;
566 f2fs_bug_on(sbi, !vblocks);
598 static void lookup_victim_by_age(struct f2fs_sb_info *sbi,
601 f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
602 &sbi->am.root, true));
605 atgc_lookup_victim(sbi, p);
607 atssr_lookup_victim(sbi, p);
609 f2fs_bug_on(sbi, 1);
612 static void release_victim_entry(struct f2fs_sb_info *sbi)
614 struct atgc_management *am = &sbi->am;
625 f2fs_bug_on(sbi, am->victim_count);
626 f2fs_bug_on(sbi, !list_empty(&am->victim_list));
637 static int get_victim_by_default(struct f2fs_sb_info *sbi,
641 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
642 struct sit_info *sm = SIT_I(sbi);
651 last_segment = MAIN_SECS(sbi) * sbi->segs_per_sec;
655 p.age_threshold = sbi->am.age_threshold;
658 select_policy(sbi, gc_type, type, &p);
661 p.min_cost = get_max_cost(sbi, &p);
667 SIT_I(sbi)->dirty_min_mtime = ULLONG_MAX;
670 if (!get_valid_blocks(sbi, *result, false)) {
675 if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
686 if (__is_large_section(sbi) && p.alloc_mode == LFS) {
687 if (sbi->next_victim_seg[BG_GC] != NULL_SEGNO) {
688 p.min_segno = sbi->next_victim_seg[BG_GC];
690 sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
694 sbi->next_victim_seg[FG_GC] != NULL_SEGNO) {
695 p.min_segno = sbi->next_victim_seg[FG_GC];
697 sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
704 p.min_segno = check_bg_victims(sbi);
742 secno = GET_SEC_FROM_SEG(sbi, segno);
744 if (sec_usage_check(sbi, secno))
748 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
754 if (get_ckpt_valid_blocks(sbi, segno, true))
762 if (!f2fs_segment_has_free_slot(sbi, segno))
771 add_victim_entry(sbi, &p, segno);
775 cost = get_gc_cost(sbi, segno, &p);
789 (MAIN_SECS(sbi) * sbi->segs_per_sec);
796 lookup_victim_by_age(sbi, &p);
797 release_victim_entry(sbi);
811 secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
813 sbi->cur_victim_sec = secno;
822 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
823 sbi->cur_victim_sec,
824 prefree_segments(sbi), free_segments(sbi));
870 static int check_valid_map(struct f2fs_sb_info *sbi,
873 struct sit_info *sit_i = SIT_I(sbi);
878 sentry = get_seg_entry(sbi, segno);
889 static int gc_node_segment(struct f2fs_sb_info *sbi,
898 unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
900 start_addr = START_BLOCK(sbi, segno);
906 atomic_inc(&sbi->wb_sync_req[NODE]);
915 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
918 if (check_valid_map(sbi, segno, off) == 0)
922 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
928 f2fs_ra_node_page(sbi, nid);
933 node_page = f2fs_get_node_page(sbi, nid);
938 if (check_valid_map(sbi, segno, off) == 0) {
943 if (f2fs_get_node_info(sbi, nid, &ni)) {
956 stat_inc_node_blk_count(sbi, 1, gc_type);
963 atomic_dec(&sbi->wb_sync_req[NODE]);
994 static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
1005 node_page = f2fs_get_node_page(sbi, nid);
1009 if (f2fs_get_node_info(sbi, nid, dni)) {
1015 f2fs_warn(sbi, "%s: valid data with mismatched node version.",
1017 set_sbi_flag(sbi, SBI_NEED_FSCK);
1020 if (f2fs_check_nid_range(sbi, dni->ino)) {
1034 f2fs_err(sbi, "Inconsistent blkaddr offset: base:%u, ofs_in_node:%u, max:%u, ino:%u, nid:%u",
1046 unsigned int segno = GET_SEGNO(sbi, blkaddr);
1047 unsigned long offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
1049 if (unlikely(check_valid_map(sbi, segno, offset))) {
1050 if (!test_and_set_bit(segno, SIT_I(sbi)->invalid_segmap)) {
1051 f2fs_err(sbi, "mismatched blkaddr %u (source_blkaddr %u) in seg %u\n",
1053 set_sbi_flag(sbi, SBI_NEED_FSCK);
1064 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1070 .sbi = sbi,
1088 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1106 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1124 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi),
1138 f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
1139 f2fs_update_iostat(sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE);
1157 .sbi = F2FS_I_SB(inode),
1173 bool lfs_mode = f2fs_lfs_mode(fio.sbi);
1174 int type = fio.sbi->am.atgc_enabled ?
1220 err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
1231 down_write(&fio.sbi->io_order_lock);
1233 mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi),
1250 f2fs_update_iostat(fio.sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
1251 f2fs_update_iostat(fio.sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE);
1254 if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) ||
1262 f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
1265 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
1278 invalidate_mapping_pages(META_MAPPING(fio.sbi),
1283 dec_page_count(fio.sbi, F2FS_DIRTY_META);
1302 f2fs_update_iostat(fio.sbi, FS_GC_DATA_IO, F2FS_BLKSIZE);
1312 f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
1316 up_write(&fio.sbi->io_order_lock);
1361 .sbi = F2FS_I_SB(inode),
1410 static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
1414 struct super_block *sb = sbi->sb;
1420 unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
1422 start_addr = START_BLOCK(sbi, segno);
1440 if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) ||
1441 (!force_migrate && get_valid_blocks(sbi, segno, true) ==
1442 BLKS_PER_SEC(sbi)))
1445 if (check_valid_map(sbi, segno, off) == 0)
1449 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
1455 f2fs_ra_node_page(sbi, nid);
1460 if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
1464 f2fs_ra_node_page(sbi, dni.ino);
1474 set_sbi_flag(sbi, SBI_NEED_FSCK);
1481 sbi->skipped_gc_rwsem++;
1522 sbi->skipped_gc_rwsem++;
1527 sbi->skipped_gc_rwsem++;
1555 stat_inc_data_blk_count(sbi, 1, gc_type);
1565 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
1568 struct sit_info *sit_i = SIT_I(sbi);
1572 ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
1578 static int do_garbage_collect(struct f2fs_sb_info *sbi,
1587 unsigned int end_segno = start_segno + sbi->segs_per_sec;
1589 unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
1593 if (__is_large_section(sbi))
1594 end_segno = rounddown(end_segno, sbi->segs_per_sec);
1601 if (f2fs_sb_has_blkzoned(sbi))
1602 end_segno -= sbi->segs_per_sec -
1603 f2fs_usable_segs_in_sec(sbi, segno);
1605 sanity_check_seg_type(sbi, get_seg_entry(sbi, segno)->type);
1608 if (__is_large_section(sbi))
1609 f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
1614 sum_page = f2fs_get_sum_page(sbi, segno++);
1620 sum_page = find_get_page(META_MAPPING(sbi),
1621 GET_SUM_BLOCK(sbi, segno));
1635 sum_page = find_get_page(META_MAPPING(sbi),
1636 GET_SUM_BLOCK(sbi, segno));
1639 if (get_valid_blocks(sbi, segno, false) == 0)
1641 if (gc_type == BG_GC && __is_large_section(sbi) &&
1642 migrated >= sbi->migration_granularity)
1644 if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi)))
1649 f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT",
1651 set_sbi_flag(sbi, SBI_NEED_FSCK);
1652 f2fs_stop_checkpoint(sbi, false);
1664 submitted += gc_node_segment(sbi, sum->entries, segno,
1667 submitted += gc_data_segment(sbi, sum->entries, gc_list,
1671 stat_inc_seg_count(sbi, type, gc_type);
1676 get_valid_blocks(sbi, segno, false) == 0)
1679 if (__is_large_section(sbi))
1680 sbi->next_victim_seg[gc_type] =
1687 f2fs_submit_merged_write(sbi,
1692 stat_inc_call_count(sbi->stat_info);
1697 int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
1709 unsigned long long last_skipped = sbi->skipped_atomic_files[FG_GC];
1713 trace_f2fs_gc_begin(sbi->sb, sync, background,
1714 get_pages(sbi, F2FS_DIRTY_NODES),
1715 get_pages(sbi, F2FS_DIRTY_DENTS),
1716 get_pages(sbi, F2FS_DIRTY_IMETA),
1717 free_sections(sbi),
1718 free_segments(sbi),
1719 reserved_segments(sbi),
1720 prefree_segments(sbi));
1722 cpc.reason = __get_cp_reason(sbi);
1723 sbi->skipped_gc_rwsem = 0;
1726 if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
1730 if (unlikely(f2fs_cp_error(sbi))) {
1735 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) {
1741 if (prefree_segments(sbi) &&
1742 !is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
1743 ret = f2fs_write_checkpoint(sbi, &cpc);
1747 if (has_not_enough_free_secs(sbi, 0, 0))
1756 ret = __get_victim(sbi, &segno, gc_type);
1760 seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type, force);
1762 seg_freed == f2fs_usable_segs_in_sec(sbi, segno))
1767 if (sbi->skipped_atomic_files[FG_GC] > last_skipped ||
1768 sbi->skipped_gc_rwsem)
1770 last_skipped = sbi->skipped_atomic_files[FG_GC];
1775 sbi->cur_victim_sec = NULL_SEGNO;
1780 if (!has_not_enough_free_secs(sbi, sec_freed, 0))
1786 if (free_sections(sbi) < NR_CURSEG_PERSIST_TYPE &&
1787 prefree_segments(sbi) &&
1788 !is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
1789 ret = f2fs_write_checkpoint(sbi, &cpc);
1798 sbi->skipped_gc_rwsem) {
1799 f2fs_drop_inmem_pages_all(sbi, true);
1803 if (gc_type == FG_GC && !is_sbi_flag_set(sbi, SBI_CP_DISABLED))
1804 ret = f2fs_write_checkpoint(sbi, &cpc);
1806 SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
1807 SIT_I(sbi)->last_victim[FLUSH_DEVICE] = init_segno;
1809 trace_f2fs_gc_end(sbi->sb, ret, total_freed, sec_freed,
1810 get_pages(sbi, F2FS_DIRTY_NODES),
1811 get_pages(sbi, F2FS_DIRTY_DENTS),
1812 get_pages(sbi, F2FS_DIRTY_IMETA),
1813 free_sections(sbi),
1814 free_segments(sbi),
1815 reserved_segments(sbi),
1816 prefree_segments(sbi));
1818 up_write(&sbi->gc_lock);
1841 static void init_atgc_management(struct f2fs_sb_info *sbi)
1843 struct atgc_management *am = &sbi->am;
1845 if (test_opt(sbi, ATGC) &&
1846 SIT_I(sbi)->elapsed_time >= DEF_GC_THREAD_AGE_THRESHOLD)
1859 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
1861 DIRTY_I(sbi)->v_ops = &default_v_ops;
1863 sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
1866 if (f2fs_is_multi_device(sbi) && !__is_large_section(sbi))
1867 SIT_I(sbi)->last_victim[ALLOC_NEXT] =
1868 GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
1870 init_atgc_management(sbi);
1873 static int free_segment_range(struct f2fs_sb_info *sbi,
1883 MAIN_SECS(sbi) -= secs;
1884 start = MAIN_SECS(sbi) * sbi->segs_per_sec;
1885 end = MAIN_SEGS(sbi) - 1;
1887 mutex_lock(&DIRTY_I(sbi)->seglist_lock);
1889 if (SIT_I(sbi)->last_victim[gc_mode] >= start)
1890 SIT_I(sbi)->last_victim[gc_mode] = 0;
1893 if (sbi->next_victim_seg[gc_type] >= start)
1894 sbi->next_victim_seg[gc_type] = NULL_SEGNO;
1895 mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
1899 f2fs_allocate_segment_for_resize(sbi, type, start, end);
1902 for (segno = start; segno <= end; segno += sbi->segs_per_sec) {
1908 do_garbage_collect(sbi, segno, &gc_list, FG_GC, true);
1911 if (!gc_only && get_valid_blocks(sbi, segno, true)) {
1923 err = f2fs_write_checkpoint(sbi, &cpc);
1927 next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start);
1929 f2fs_err(sbi, "segno %u should be free but still inuse!",
1931 f2fs_bug_on(sbi, 1);
1934 MAIN_SECS(sbi) += secs;
1938 static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
1940 struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi);
1945 int segs = secs * sbi->segs_per_sec;
1947 down_write(&sbi->sb_lock);
1958 (long long)segs * sbi->blocks_per_seg);
1959 if (f2fs_is_multi_device(sbi)) {
1960 int last_dev = sbi->s_ndevs - 1;
1968 up_write(&sbi->sb_lock);
1971 static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
1973 int segs = secs * sbi->segs_per_sec;
1974 long long blks = (long long)segs * sbi->blocks_per_seg;
1976 le64_to_cpu(F2FS_CKPT(sbi)->user_block_count);
1978 SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs;
1979 MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs;
1980 MAIN_SECS(sbi) += secs;
1981 FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs;
1982 FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs;
1983 F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks);
1985 if (f2fs_is_multi_device(sbi)) {
1986 int last_dev = sbi->s_ndevs - 1;
1994 (int)(blks >> sbi->log_blocks_per_blkz);
2001 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
2008 old_block_count = le64_to_cpu(F2FS_RAW_SUPER(sbi)->block_count);
2012 if (f2fs_is_multi_device(sbi)) {
2013 int last_dev = sbi->s_ndevs - 1;
2016 if (block_count + last_segs * sbi->blocks_per_seg <=
2022 div_u64_rem(block_count, BLKS_PER_SEC(sbi), &rem);
2029 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
2030 f2fs_err(sbi, "Should run fsck to repair first.");
2034 if (test_opt(sbi, DISABLE_CHECKPOINT)) {
2035 f2fs_err(sbi, "Checkpoint should be enabled.");
2044 secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi));
2047 if (!down_write_trylock(&sbi->gc_lock)) {
2053 f2fs_lock_op(sbi);
2055 spin_lock(&sbi->stat_lock);
2056 if (shrunk_blocks + valid_user_blocks(sbi) +
2057 sbi->current_reserved_blocks + sbi->unusable_block_count +
2058 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2060 spin_unlock(&sbi->stat_lock);
2065 err = free_segment_range(sbi, secs, true);
2068 f2fs_unlock_op(sbi);
2069 up_write(&sbi->gc_lock);
2075 freeze_super(sbi->sb);
2077 if (f2fs_readonly(sbi->sb)) {
2078 thaw_super(sbi->sb);
2082 down_write(&sbi->gc_lock);
2083 mutex_lock(&sbi->cp_mutex);
2085 spin_lock(&sbi->stat_lock);
2086 if (shrunk_blocks + valid_user_blocks(sbi) +
2087 sbi->current_reserved_blocks + sbi->unusable_block_count +
2088 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2091 sbi->user_block_count -= shrunk_blocks;
2092 spin_unlock(&sbi->stat_lock);
2096 set_sbi_flag(sbi, SBI_IS_RESIZEFS);
2097 err = free_segment_range(sbi, secs, false);
2101 update_sb_metadata(sbi, -secs);
2103 err = f2fs_commit_super(sbi, false);
2105 update_sb_metadata(sbi, secs);
2109 update_fs_metadata(sbi, -secs);
2110 clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
2111 set_sbi_flag(sbi, SBI_IS_DIRTY);
2113 err = f2fs_write_checkpoint(sbi, &cpc);
2115 update_fs_metadata(sbi, secs);
2116 update_sb_metadata(sbi, secs);
2117 f2fs_commit_super(sbi, false);
2120 clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
2122 set_sbi_flag(sbi, SBI_NEED_FSCK);
2123 f2fs_err(sbi, "resize_fs failed, should run fsck to repair!");
2125 spin_lock(&sbi->stat_lock);
2126 sbi->user_block_count += shrunk_blocks;
2127 spin_unlock(&sbi->stat_lock);
2130 mutex_unlock(&sbi->cp_mutex);
2131 up_write(&sbi->gc_lock);
2132 thaw_super(sbi->sb);