Lines Matching refs:sbi

187 bool f2fs_need_SSR(struct f2fs_sb_info *sbi)
189 int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
190 int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
191 int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
193 if (f2fs_lfs_mode(sbi))
195 if (sbi->gc_mode == GC_URGENT_HIGH)
197 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
200 return free_sections(sbi) <= (node_secs + 2 * dent_secs + imeta_secs +
201 SM_I(sbi)->min_ssr_sections + reserved_sections(sbi));
205 static bool need_ssr_by_type(struct f2fs_sb_info *sbi, int type, int contig_level)
207 int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
208 int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
209 int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
210 u64 valid_blocks = sbi->total_valid_block_count;
211 u64 total_blocks = MAIN_SEGS(sbi) << sbi->log_blocks_per_seg;
213 unsigned int free_segs = free_segments(sbi);
214 unsigned int ovp_segments = overprovision_segments(sbi);
219 if (sbi->hot_cold_params.enable == GRADING_SSR_OFF)
220 return f2fs_need_SSR(sbi);
221 if (f2fs_lfs_mode(sbi))
223 if (sbi->gc_mode == GC_URGENT_HIGH)
226 free_sections(sbi) > dirty_sum + 3 * reserved_sections(sbi) / 2)
228 if (free_sections(sbi) <= (unsigned int)(dirty_sum + 2 * reserved_sections(sbi)))
239 lower_limit = sbi->hot_cold_params.hot_data_lower_limit;
240 waterline = sbi->hot_cold_params.hot_data_waterline;
243 lower_limit = sbi->hot_cold_params.warm_data_lower_limit;
244 waterline = sbi->hot_cold_params.warm_data_waterline;
247 lower_limit = sbi->hot_cold_params.hot_node_lower_limit;
248 waterline = sbi->hot_cold_params.hot_node_waterline;
251 lower_limit = sbi->hot_cold_params.warm_node_lower_limit;
252 waterline = sbi->hot_cold_params.warm_node_waterline;
264 (le64_to_cpu(sbi->raw_super->block_count) - sbi->total_valid_block_count),
265 free_segments(sbi), contig_level);
301 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
344 err = f2fs_get_node_info(sbi, dn.nid, &ni);
351 f2fs_invalidate_blocks(sbi, dn.data_blkaddr);
354 f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
374 void f2fs_drop_inmem_pages_all(struct f2fs_sb_info *sbi, bool gc_failure)
376 struct list_head *head = &sbi->inode_list[ATOMIC_FILE];
379 unsigned int count = sbi->atomic_files;
382 spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
384 spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
391 spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
414 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
422 spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
427 sbi->atomic_files--;
429 spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
443 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
448 f2fs_bug_on(sbi, !IS_ATOMIC_WRITTEN_PAGE(page));
458 f2fs_bug_on(sbi, !cur);
462 dec_page_count(sbi, F2FS_INMEM_PAGES);
474 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
478 .sbi = sbi,
530 f2fs_submit_merged_write_cond(sbi, inode, NULL, 0, DATA);
557 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
561 f2fs_balance_fs(sbi, true);
565 f2fs_lock_op(sbi);
574 f2fs_unlock_op(sbi);
584 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
586 if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
587 f2fs_show_injection_info(sbi, FAULT_CHECKPOINT);
588 f2fs_stop_checkpoint(sbi, false);
592 if (need && excess_cached_nats(sbi))
593 f2fs_balance_fs_bg(sbi, false);
595 if (!f2fs_is_checkpoint_ready(sbi))
602 if (has_not_enough_free_secs(sbi, 0, 0)) {
603 if (test_opt(sbi, GC_MERGE) && sbi->gc_thread &&
604 sbi->gc_thread->f2fs_gc_task) {
607 prepare_to_wait(&sbi->gc_thread->fggc_wq, &wait,
609 wake_up(&sbi->gc_thread->gc_wait_queue_head);
611 finish_wait(&sbi->gc_thread->fggc_wq, &wait);
613 down_write(&sbi->gc_lock);
614 f2fs_gc(sbi, false, false, false, NULL_SEGNO);
619 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg)
621 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
625 if (!f2fs_available_free_memory(sbi, EXTENT_CACHE))
626 f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER);
629 if (!f2fs_available_free_memory(sbi, NAT_ENTRIES))
630 f2fs_try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
632 if (!f2fs_available_free_memory(sbi, FREE_NIDS))
633 f2fs_try_to_free_nids(sbi, MAX_FREE_NIDS);
635 f2fs_build_free_nids(sbi, false, false);
637 if (excess_dirty_nats(sbi) || excess_dirty_nodes(sbi) ||
638 excess_prefree_segs(sbi))
642 if (is_inflight_io(sbi, REQ_TIME) ||
643 (!f2fs_time_over(sbi, REQ_TIME) && rwsem_is_locked(&sbi->cp_rwsem)))
647 if (f2fs_time_over(sbi, CP_TIME))
651 if (f2fs_available_free_memory(sbi, NAT_ENTRIES) &&
652 f2fs_available_free_memory(sbi, INO_ENTRIES))
656 if (test_opt(sbi, DATA_FLUSH) && from_bg) {
659 mutex_lock(&sbi->flush_lock);
662 f2fs_sync_dirty_inodes(sbi, FILE_INODE, false);
665 mutex_unlock(&sbi->flush_lock);
667 f2fs_sync_fs(sbi->sb, true);
668 stat_inc_bg_cp_count(sbi->stat_info);
671 static int __submit_flush_wait(struct f2fs_sb_info *sbi,
677 bio = f2fs_bio_alloc(sbi, 0, false);
686 trace_f2fs_issue_flush(bdev, test_opt(sbi, NOBARRIER),
687 test_opt(sbi, FLUSH_MERGE), ret);
691 static int submit_flush_wait(struct f2fs_sb_info *sbi, nid_t ino)
696 if (!f2fs_is_multi_device(sbi))
697 return __submit_flush_wait(sbi, sbi->sb->s_bdev);
699 for (i = 0; i < sbi->s_ndevs; i++) {
700 if (!f2fs_is_dirty_device(sbi, ino, i, FLUSH_INO))
702 ret = __submit_flush_wait(sbi, FDEV(i).bdev);
711 struct f2fs_sb_info *sbi = data;
712 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
718 sb_start_intwrite(sbi->sb);
729 ret = submit_flush_wait(sbi, cmd->ino);
740 sb_end_intwrite(sbi->sb);
747 int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino)
749 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
753 if (test_opt(sbi, NOBARRIER))
756 if (!test_opt(sbi, FLUSH_MERGE)) {
758 ret = submit_flush_wait(sbi, ino);
765 f2fs_is_multi_device(sbi)) {
766 ret = submit_flush_wait(sbi, ino);
797 ret = submit_flush_wait(sbi, ino);
814 int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi)
816 dev_t dev = sbi->sb->s_bdev->bd_dev;
820 if (SM_I(sbi)->fcc_info) {
821 fcc = SM_I(sbi)->fcc_info;
827 fcc = f2fs_kzalloc(sbi, sizeof(struct flush_cmd_control), GFP_KERNEL);
834 SM_I(sbi)->fcc_info = fcc;
835 if (!test_opt(sbi, FLUSH_MERGE))
839 fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
844 SM_I(sbi)->fcc_info = NULL;
851 void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free)
853 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
863 SM_I(sbi)->fcc_info = NULL;
867 int f2fs_flush_device_cache(struct f2fs_sb_info *sbi)
871 if (!f2fs_is_multi_device(sbi))
874 if (test_opt(sbi, NOBARRIER))
877 for (i = 1; i < sbi->s_ndevs; i++) {
878 if (!f2fs_test_bit(i, (char *)&sbi->dirty_device))
880 ret = __submit_flush_wait(sbi, FDEV(i).bdev);
884 spin_lock(&sbi->dev_lock);
885 f2fs_clear_bit(i, (char *)&sbi->dirty_device);
886 spin_unlock(&sbi->dev_lock);
892 static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
895 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
898 if (IS_CURSEG(sbi, segno))
905 struct seg_entry *sentry = get_seg_entry(sbi, segno);
909 f2fs_bug_on(sbi, 1);
915 if (__is_large_section(sbi)) {
916 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
918 get_valid_blocks(sbi, segno, true);
920 f2fs_bug_on(sbi, unlikely(!valid_blocks ||
921 valid_blocks == BLKS_PER_SEC(sbi)));
923 if (!IS_CURSEC(sbi, secno))
929 static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
932 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
939 struct seg_entry *sentry = get_seg_entry(sbi, segno);
945 valid_blocks = get_valid_blocks(sbi, segno, true);
947 clear_bit(GET_SEC_FROM_SEG(sbi, segno),
950 clear_bit(segno, SIT_I(sbi)->invalid_segmap);
953 if (__is_large_section(sbi)) {
954 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
957 valid_blocks == BLKS_PER_SEC(sbi)) {
962 if (!IS_CURSEC(sbi, secno))
973 static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
975 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
979 if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
982 usable_blocks = f2fs_usable_blks_in_seg(sbi, segno);
985 valid_blocks = get_valid_blocks(sbi, segno, false);
986 ckpt_valid_blocks = get_ckpt_valid_blocks(sbi, segno, false);
988 if (valid_blocks == 0 && (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) ||
990 __locate_dirty_segment(sbi, segno, PRE);
991 __remove_dirty_segment(sbi, segno, DIRTY);
993 __locate_dirty_segment(sbi, segno, DIRTY);
996 __remove_dirty_segment(sbi, segno, DIRTY);
1003 void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi)
1005 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1009 for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
1010 if (get_valid_blocks(sbi, segno, false))
1012 if (IS_CURSEG(sbi, segno))
1014 __locate_dirty_segment(sbi, segno, PRE);
1015 __remove_dirty_segment(sbi, segno, DIRTY);
1020 block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi)
1023 (overprovision_segments(sbi) - reserved_segments(sbi));
1024 block_t ovp_holes = ovp_hole_segs << sbi->log_blocks_per_seg;
1025 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1032 for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
1033 se = get_seg_entry(sbi, segno);
1035 holes[NODE] += f2fs_usable_blks_in_seg(sbi, segno) -
1038 holes[DATA] += f2fs_usable_blks_in_seg(sbi, segno) -
1049 int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable)
1052 (overprovision_segments(sbi) - reserved_segments(sbi));
1053 if (unusable > F2FS_OPTION(sbi).unusable_cap)
1055 if (is_sbi_flag_set(sbi, SBI_CP_DISABLED_QUICK) &&
1056 dirty_segments(sbi) > ovp_hole_segs)
1062 static unsigned int get_free_segment(struct f2fs_sb_info *sbi)
1064 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1068 for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
1069 if (get_valid_blocks(sbi, segno, false))
1071 if (get_ckpt_valid_blocks(sbi, segno, false))
1080 static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi,
1084 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1088 f2fs_bug_on(sbi, !len);
1112 static struct discard_cmd *__attach_discard_cmd(struct f2fs_sb_info *sbi,
1118 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1121 dc = __create_discard_cmd(sbi, bdev, lstart, start, len);
1144 static void __remove_discard_cmd(struct f2fs_sb_info *sbi,
1147 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1159 f2fs_bug_on(sbi, dc->ref);
1167 KERN_INFO, sbi->sb->s_id,
1189 static void __check_sit_bitmap(struct f2fs_sb_info *sbi,
1196 unsigned long offset, size, max_blocks = sbi->blocks_per_seg;
1200 segno = GET_SEGNO(sbi, blk);
1201 sentry = get_seg_entry(sbi, segno);
1202 offset = GET_BLKOFF_FROM_SEG0(sbi, blk);
1204 if (end < START_BLOCK(sbi, segno + 1))
1205 size = GET_BLKOFF_FROM_SEG0(sbi, end);
1210 f2fs_bug_on(sbi, offset != size);
1211 blk = START_BLOCK(sbi, segno + 1);
1216 static void __init_discard_policy(struct f2fs_sb_info *sbi,
1220 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1254 static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
1258 static int __submit_discard_cmd(struct f2fs_sb_info *sbi,
1268 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1281 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
1309 if (time_to_inject(sbi, FAULT_DISCARD)) {
1310 f2fs_show_injection_info(sbi, FAULT_DISCARD);
1328 f2fs_bug_on(sbi, !bio);
1347 __check_sit_bitmap(sbi, lstart, lstart + len);
1356 f2fs_update_iostat(sbi, FS_DISCARD, 1);
1366 __update_discard_tree_range(sbi, bdev, lstart, start, len);
1371 static void __insert_discard_tree(struct f2fs_sb_info *sbi,
1377 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1388 p = f2fs_lookup_rb_tree_for_insert(sbi, &dcc->root, &parent,
1391 __attach_discard_cmd(sbi, bdev, lstart, start, len, parent,
1401 static void __punch_discard_cmd(struct f2fs_sb_info *sbi,
1404 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1409 __remove_discard_cmd(sbi, dc);
1424 __insert_discard_tree(sbi, dc->bdev, blkaddr + 1,
1438 static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
1442 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1511 __remove_discard_cmd(sbi, tdc);
1516 __insert_discard_tree(sbi, bdev, di.lstart, di.start,
1529 static int __queue_discard_cmd(struct f2fs_sb_info *sbi,
1539 if (f2fs_is_multi_device(sbi)) {
1540 int devi = f2fs_target_device_index(sbi, blkstart);
1544 mutex_lock(&SM_I(sbi)->dcc_info->cmd_lock);
1545 __update_discard_tree_range(sbi, bdev, lblkstart, blkstart, blklen);
1546 mutex_unlock(&SM_I(sbi)->dcc_info->cmd_lock);
1550 static unsigned int __issue_discard_cmd_orderly(struct f2fs_sb_info *sbi,
1554 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1584 if (dpolicy->io_aware && !is_idle(sbi, DISCARD_TIME)) {
1590 err = __submit_discard_cmd(sbi, dpolicy, spolicy_index, dc, &issued);
1597 __remove_discard_cmd(sbi, dc);
1613 static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
1616 static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
1619 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1628 f2fs_update_time(sbi, UMOUNT_DISCARD_TIMEOUT);
1633 f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi, &dcc->root, false));
1641 f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT))
1650 issued = __issue_discard_cmd_orderly(sbi, dpolicy, i);
1661 f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
1664 f2fs_bug_on(sbi, dc->state != D_PREP);
1667 f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT))
1671 !is_idle(sbi, DISCARD_TIME)) {
1675 __submit_discard_cmd(sbi, dpolicy, i, dc, &issued);
1692 __wait_all_discard_cmd(sbi, dpolicy);
1702 static bool __drop_discard_cmd(struct f2fs_sb_info *sbi)
1704 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1714 f2fs_bug_on(sbi, dc->state != D_PREP);
1715 __remove_discard_cmd(sbi, dc);
1724 void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi)
1726 __drop_discard_cmd(sbi);
1729 static unsigned int __wait_one_discard_bio(struct f2fs_sb_info *sbi,
1732 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1737 f2fs_bug_on(sbi, dc->state != D_DONE);
1742 __remove_discard_cmd(sbi, dc);
1749 static unsigned int __wait_discard_cmd_range(struct f2fs_sb_info *sbi,
1753 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1773 __remove_discard_cmd(sbi, dc);
1783 trimmed += __wait_one_discard_bio(sbi, dc);
1790 static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
1797 return __wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX);
1800 __init_discard_policy(sbi, &dp, DPOLICY_FSTRIM, 1);
1801 discard_blks = __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
1802 __init_discard_policy(sbi, &dp, DPOLICY_UMOUNT, 1);
1803 discard_blks += __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
1809 static void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
1811 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1820 __punch_discard_cmd(sbi, dc, blkaddr);
1829 __wait_one_discard_bio(sbi, dc);
1832 void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi)
1834 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1845 bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi)
1847 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1851 __init_discard_policy(sbi, &dpolicy, DPOLICY_UMOUNT, 0);
1852 __issue_discard_cmd(sbi, &dpolicy);
1853 dropped = __drop_discard_cmd(sbi);
1856 __wait_all_discard_cmd(sbi, NULL);
1858 f2fs_bug_on(sbi, atomic_read(&dcc->discard_cmd_cnt));
1862 static int select_discard_type(struct f2fs_sb_info *sbi)
1864 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1865 block_t user_block_count = sbi->user_block_count;
1866 block_t ovp_count = SM_I(sbi)->ovp_segments << sbi->log_blocks_per_seg;
1868 valid_user_blocks(sbi) + ovp_count;
1871 if (fs_available_blocks >= fs_free_space_threshold(sbi) &&
1873 device_free_space_threshold(sbi)) {
1875 } else if (fs_available_blocks < fs_free_space_threshold(sbi) &&
1877 device_free_space_threshold(sbi)) {
1887 struct f2fs_sb_info *sbi = data;
1888 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1897 discard_type = select_discard_type(sbi);
1898 __init_discard_policy(sbi, &dpolicy, discard_type, 0);
1910 __wait_all_discard_cmd(sbi, NULL);
1914 if (f2fs_readonly(sbi->sb))
1918 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
1923 if (sbi->gc_mode == GC_URGENT_HIGH)
1924 __init_discard_policy(sbi, &dpolicy, DPOLICY_FORCE, 0);
1926 sb_start_intwrite(sbi->sb);
1928 issued = __issue_discard_cmd(sbi, &dpolicy);
1930 __wait_all_discard_cmd(sbi, &dpolicy);
1933 wait_ms = f2fs_time_to_wait(sbi, DISCARD_TIME);
1940 sb_end_intwrite(sbi->sb);
1947 static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
1954 if (f2fs_is_multi_device(sbi)) {
1955 devi = f2fs_target_device_index(sbi, blkstart);
1958 f2fs_err(sbi, "Invalid block %x", blkstart);
1965 if (f2fs_blkz_is_seq(sbi, devi, blkstart)) {
1971 f2fs_err(sbi, "(%d) %s: Unaligned zone reset attempted (block %x + %x)",
1972 devi, sbi->s_ndevs ? FDEV(devi).path : "",
1982 return __queue_discard_cmd(sbi, bdev, lblkstart, blklen);
1986 static int __issue_discard_async(struct f2fs_sb_info *sbi,
1990 if (f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(bdev))
1991 return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
1993 return __queue_discard_cmd(sbi, bdev, blkstart, blklen);
1996 static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
2006 bdev = f2fs_target_device(sbi, blkstart, NULL);
2011 f2fs_target_device(sbi, i, NULL);
2014 err = __issue_discard_async(sbi, bdev,
2024 se = get_seg_entry(sbi, GET_SEGNO(sbi, i));
2025 offset = GET_BLKOFF_FROM_SEG0(sbi, i);
2028 sbi->discard_blks--;
2032 err = __issue_discard_async(sbi, bdev, start, len);
2036 static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
2040 int max_blocks = sbi->blocks_per_seg;
2041 struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start);
2045 unsigned long *dmap = SIT_I(sbi)->tmp_map;
2049 struct list_head *head = &SM_I(sbi)->dcc_info->entry_list;
2052 if (se->valid_blocks == max_blocks || !f2fs_hw_support_discard(sbi))
2056 if (!f2fs_realtime_discard_enable(sbi) || !se->valid_blocks ||
2057 SM_I(sbi)->dcc_info->nr_discards >=
2058 SM_I(sbi)->dcc_info->max_discards)
2067 while (force || SM_I(sbi)->dcc_info->nr_discards <=
2068 SM_I(sbi)->dcc_info->max_discards) {
2084 de->start_blkaddr = START_BLOCK(sbi, cpc->trim_start);
2091 SM_I(sbi)->dcc_info->nr_discards += end - start;
2102 void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi)
2104 struct list_head *head = &(SM_I(sbi)->dcc_info->entry_list);
2115 static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
2117 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2121 for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi))
2122 __set_test_and_free(sbi, segno, false);
2126 void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
2129 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
2132 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2137 bool need_align = f2fs_lfs_mode(sbi) && __is_large_section(sbi);
2146 start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1);
2147 if (start >= MAIN_SEGS(sbi))
2149 end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi),
2153 start = rounddown(start, sbi->segs_per_sec);
2154 end = roundup(end, sbi->segs_per_sec);
2162 if (!f2fs_realtime_discard_enable(sbi))
2169 if (!f2fs_lfs_mode(sbi) || !__is_large_section(sbi)) {
2170 f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
2171 (end - start) << sbi->log_blocks_per_seg);
2175 secno = GET_SEC_FROM_SEG(sbi, start);
2176 start_segno = GET_SEG_FROM_SEC(sbi, secno);
2177 if (!IS_CURSEC(sbi, secno) &&
2178 !get_valid_blocks(sbi, start, true))
2179 f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno),
2180 sbi->segs_per_sec << sbi->log_blocks_per_seg);
2182 start = start_segno + sbi->segs_per_sec;
2198 sbi->blocks_per_seg, cur_pos);
2201 if (f2fs_sb_has_blkzoned(sbi) ||
2205 f2fs_issue_discard(sbi, entry->start_blkaddr + cur_pos,
2210 sbi->blocks_per_seg, cur_pos);
2216 if (cur_pos < sbi->blocks_per_seg)
2223 wake_up_discard_thread(sbi, false);
2226 static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
2228 dev_t dev = sbi->sb->s_bdev->bd_dev;
2232 if (SM_I(sbi)->dcc_info) {
2233 dcc = SM_I(sbi)->dcc_info;
2237 dcc = f2fs_kzalloc(sbi, sizeof(struct discard_cmd_control), GFP_KERNEL);
2252 dcc->max_discards = MAIN_SEGS(sbi) << sbi->log_blocks_per_seg;
2259 SM_I(sbi)->dcc_info = dcc;
2261 dcc->f2fs_issue_discard = kthread_run(issue_discard_thread, sbi,
2266 SM_I(sbi)->dcc_info = NULL;
2273 static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi)
2275 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
2280 f2fs_stop_discard_thread(sbi);
2287 f2fs_issue_discard_timeout(sbi);
2290 SM_I(sbi)->dcc_info = NULL;
2293 static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
2295 struct sit_info *sit_i = SIT_I(sbi);
2305 static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
2308 struct seg_entry *se = get_seg_entry(sbi, segno);
2311 __mark_sit_entry_dirty(sbi, segno);
2314 static inline unsigned long long get_segment_mtime(struct f2fs_sb_info *sbi,
2317 unsigned int segno = GET_SEGNO(sbi, blkaddr);
2321 return get_seg_entry(sbi, segno)->mtime;
2324 static void update_segment_mtime(struct f2fs_sb_info *sbi, block_t blkaddr,
2328 unsigned int segno = GET_SEGNO(sbi, blkaddr);
2329 unsigned long long ctime = get_mtime(sbi, false);
2335 se = get_seg_entry(sbi, segno);
2343 if (ctime > SIT_I(sbi)->max_mtime)
2344 SIT_I(sbi)->max_mtime = ctime;
2347 static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
2357 segno = GET_SEGNO(sbi, blkaddr);
2359 se = get_seg_entry(sbi, segno);
2361 offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
2363 f2fs_bug_on(sbi, (new_vblocks < 0 ||
2364 (new_vblocks > f2fs_usable_blks_in_seg(sbi, segno))));
2375 f2fs_err(sbi, "Inconsistent error when setting bitmap, blk:%u, old bit:%d",
2377 f2fs_bug_on(sbi, 1);
2381 f2fs_err(sbi, "Bitmap was wrongly set, blk:%u",
2383 f2fs_bug_on(sbi, 1);
2389 sbi->discard_blks--;
2395 if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
2405 f2fs_err(sbi, "Inconsistent error when clearing bitmap, blk:%u, old bit:%d",
2407 f2fs_bug_on(sbi, 1);
2411 f2fs_err(sbi, "Bitmap was wrongly cleared, blk:%u",
2413 f2fs_bug_on(sbi, 1);
2416 } else if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2424 spin_lock(&sbi->stat_lock);
2425 sbi->unusable_block_count++;
2426 spin_unlock(&sbi->stat_lock);
2431 sbi->discard_blks++;
2436 __mark_sit_entry_dirty(sbi, segno);
2439 SIT_I(sbi)->written_valid_blocks += del;
2441 if (__is_large_section(sbi))
2442 get_sec_entry(sbi, segno)->valid_blocks += del;
2445 void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
2447 unsigned int segno = GET_SEGNO(sbi, addr);
2448 struct sit_info *sit_i = SIT_I(sbi);
2450 f2fs_bug_on(sbi, addr == NULL_ADDR);
2454 invalidate_mapping_pages(META_MAPPING(sbi), addr, addr);
2459 update_segment_mtime(sbi, addr, 0);
2460 update_sit_entry(sbi, addr, -1);
2463 locate_dirty_segment(sbi, segno);
2468 bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr)
2470 struct sit_info *sit_i = SIT_I(sbi);
2480 segno = GET_SEGNO(sbi, blkaddr);
2481 se = get_seg_entry(sbi, segno);
2482 offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
2495 static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
2498 struct curseg_info *curseg = CURSEG_I(sbi, type);
2507 int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
2513 if (sbi->ckpt->alloc_type[i] == SSR)
2514 valid_sum_count += sbi->blocks_per_seg;
2518 F2FS_CKPT(sbi)->cur_data_blkoff[i]);
2520 valid_sum_count += curseg_blkoff(sbi, i);
2537 struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
2539 if (unlikely(f2fs_cp_error(sbi)))
2541 return f2fs_get_meta_page_retry(sbi, GET_SUM_BLOCK(sbi, segno));
2544 void f2fs_update_meta_page(struct f2fs_sb_info *sbi,
2547 struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
2554 static void write_sum_page(struct f2fs_sb_info *sbi,
2557 f2fs_update_meta_page(sbi, (void *)sum_blk, blk_addr);
2560 static void write_current_sum_page(struct f2fs_sb_info *sbi,
2563 struct curseg_info *curseg = CURSEG_I(sbi, type);
2564 struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
2586 static int is_next_segment_free(struct f2fs_sb_info *sbi,
2590 struct free_segmap_info *free_i = FREE_I(sbi);
2592 if (segno < MAIN_SEGS(sbi) && segno % sbi->segs_per_sec)
2601 static void get_new_segment(struct f2fs_sb_info *sbi,
2604 struct free_segmap_info *free_i = FREE_I(sbi);
2606 unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone;
2607 unsigned int hint = GET_SEC_FROM_SEG(sbi, *newseg);
2608 unsigned int old_zoneno = GET_ZONE_FROM_SEG(sbi, *newseg);
2616 if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
2618 GET_SEG_FROM_SEC(sbi, hint + 1), *newseg + 1);
2619 if (segno < GET_SEG_FROM_SEC(sbi, hint + 1))
2623 secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
2624 if (secno >= MAIN_SECS(sbi)) {
2627 MAIN_SECS(sbi), 0);
2628 f2fs_bug_on(sbi, secno >= MAIN_SECS(sbi));
2643 MAIN_SECS(sbi), 0);
2644 f2fs_bug_on(sbi, left_start >= MAIN_SECS(sbi));
2649 segno = GET_SEG_FROM_SEC(sbi, secno);
2650 zoneno = GET_ZONE_FROM_SEC(sbi, secno);
2655 if (sbi->secs_per_zone == 1)
2666 if (CURSEG_I(sbi, i)->zone == zoneno)
2672 hint = zoneno * sbi->secs_per_zone - 1;
2676 hint = (zoneno + 1) * sbi->secs_per_zone;
2682 f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap));
2683 __set_inuse(sbi, segno);
2688 static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
2690 struct curseg_info *curseg = CURSEG_I(sbi, type);
2696 curseg->zone = GET_ZONE_FROM_SEG(sbi, curseg->segno);
2703 sanity_check_seg_type(sbi, seg_type);
2709 __set_sit_entry_type(sbi, seg_type, curseg->segno, modified);
2712 static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
2714 struct curseg_info *curseg = CURSEG_I(sbi, type);
2717 sanity_check_seg_type(sbi, seg_type);
2720 if (__is_large_section(sbi))
2727 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2730 if (test_opt(sbi, NOHEAP) &&
2734 if (SIT_I(sbi)->last_victim[ALLOC_NEXT])
2735 return SIT_I(sbi)->last_victim[ALLOC_NEXT];
2738 if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
2748 static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
2750 struct curseg_info *curseg = CURSEG_I(sbi, type);
2756 write_sum_page(sbi, curseg->sum_blk,
2757 GET_SUM_BLOCK(sbi, segno));
2761 if (test_opt(sbi, NOHEAP))
2764 segno = __get_next_segno(sbi, type);
2765 get_new_segment(sbi, &segno, new_sec, dir);
2767 reset_curseg(sbi, type, 1);
2771 static void __next_free_blkoff(struct f2fs_sb_info *sbi,
2774 struct seg_entry *se = get_seg_entry(sbi, seg->segno);
2776 unsigned long *target_map = SIT_I(sbi)->tmp_map;
2784 pos = find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start);
2794 static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
2798 __next_free_blkoff(sbi, seg, seg->next_blkoff + 1);
2803 bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno)
2805 struct seg_entry *se = get_seg_entry(sbi, segno);
2807 unsigned long *target_map = SIT_I(sbi)->tmp_map;
2815 pos = find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, 0);
2817 return pos < sbi->blocks_per_seg;
2824 static void change_curseg(struct f2fs_sb_info *sbi, int type, bool flush)
2826 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2827 struct curseg_info *curseg = CURSEG_I(sbi, type);
2833 write_sum_page(sbi, curseg->sum_blk,
2834 GET_SUM_BLOCK(sbi, curseg->segno));
2836 __set_test_and_inuse(sbi, new_segno);
2839 __remove_dirty_segment(sbi, new_segno, PRE);
2840 __remove_dirty_segment(sbi, new_segno, DIRTY);
2843 reset_curseg(sbi, type, 1);
2845 __next_free_blkoff(sbi, curseg, 0);
2847 sum_page = f2fs_get_sum_page(sbi, new_segno);
2858 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type,
2861 static void get_atssr_segment(struct f2fs_sb_info *sbi, int type,
2865 struct curseg_info *curseg = CURSEG_I(sbi, type);
2869 if (get_ssr_segment(sbi, type, alloc_mode, age)) {
2870 struct seg_entry *se = get_seg_entry(sbi, curseg->next_segno);
2873 change_curseg(sbi, type, true);
2877 new_curseg(sbi, type, true);
2879 stat_inc_seg_type(sbi, curseg);
2882 static void __f2fs_init_atgc_curseg(struct f2fs_sb_info *sbi)
2884 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC);
2886 if (!sbi->am.atgc_enabled)
2889 down_read(&SM_I(sbi)->curseg_lock);
2892 down_write(&SIT_I(sbi)->sentry_lock);
2894 get_atssr_segment(sbi, CURSEG_ALL_DATA_ATGC, CURSEG_COLD_DATA, SSR, 0);
2896 up_write(&SIT_I(sbi)->sentry_lock);
2899 up_read(&SM_I(sbi)->curseg_lock);
2902 void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi)
2904 __f2fs_init_atgc_curseg(sbi);
2907 static void __f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi, int type)
2909 struct curseg_info *curseg = CURSEG_I(sbi, type);
2915 if (get_valid_blocks(sbi, curseg->segno, false)) {
2916 write_sum_page(sbi, curseg->sum_blk,
2917 GET_SUM_BLOCK(sbi, curseg->segno));
2919 mutex_lock(&DIRTY_I(sbi)->seglist_lock);
2920 __set_test_and_free(sbi, curseg->segno, true);
2921 mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
2927 void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi)
2929 __f2fs_save_inmem_curseg(sbi, CURSEG_COLD_DATA_PINNED);
2931 if (sbi->am.atgc_enabled)
2932 __f2fs_save_inmem_curseg(sbi, CURSEG_ALL_DATA_ATGC);
2935 static void __f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi, int type)
2937 struct curseg_info *curseg = CURSEG_I(sbi, type);
2942 if (get_valid_blocks(sbi, curseg->segno, false))
2945 mutex_lock(&DIRTY_I(sbi)->seglist_lock);
2946 __set_test_and_inuse(sbi, curseg->segno);
2947 mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
2952 void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi)
2954 __f2fs_restore_inmem_curseg(sbi, CURSEG_COLD_DATA_PINNED);
2956 if (sbi->am.atgc_enabled)
2957 __f2fs_restore_inmem_curseg(sbi, CURSEG_ALL_DATA_ATGC);
2960 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type,
2963 struct curseg_info *curseg = CURSEG_I(sbi, type);
2964 const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
2970 sanity_check_seg_type(sbi, seg_type);
2973 if (!v_ops->get_victim(sbi, &segno, BG_GC, seg_type, alloc_mode, age)) {
3000 if (!v_ops->get_victim(sbi, &segno, BG_GC, i, alloc_mode, age)) {
3007 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
3008 segno = get_free_segment(sbi);
3021 static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
3024 struct curseg_info *curseg = CURSEG_I(sbi, type);
3027 new_curseg(sbi, type, true);
3028 else if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) &&
3030 new_curseg(sbi, type, false);
3032 is_next_segment_free(sbi, curseg, type) &&
3033 likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
3034 new_curseg(sbi, type, false);
3036 else if (need_ssr_by_type(sbi, type, contig_level) && get_ssr_segment(sbi, type, SSR, 0))
3038 else if (f2fs_need_SSR(sbi) &&
3039 get_ssr_segment(sbi, type, SSR, 0))
3041 change_curseg(sbi, type, true);
3043 new_curseg(sbi, type, false);
3045 stat_inc_seg_type(sbi, curseg);
3048 void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
3051 struct curseg_info *curseg = CURSEG_I(sbi, type);
3054 down_read(&SM_I(sbi)->curseg_lock);
3056 down_write(&SIT_I(sbi)->sentry_lock);
3058 segno = CURSEG_I(sbi, type)->segno;
3062 if (f2fs_need_SSR(sbi) && get_ssr_segment(sbi, type, SSR, 0))
3063 change_curseg(sbi, type, true);
3065 new_curseg(sbi, type, true);
3067 stat_inc_seg_type(sbi, curseg);
3069 locate_dirty_segment(sbi, segno);
3071 up_write(&SIT_I(sbi)->sentry_lock);
3074 f2fs_notice(sbi, "For resize: curseg of type %d: %u ==> %u",
3078 up_read(&SM_I(sbi)->curseg_lock);
3081 static void __allocate_new_segment(struct f2fs_sb_info *sbi, int type,
3084 struct curseg_info *curseg = CURSEG_I(sbi, type);
3091 get_valid_blocks(sbi, curseg->segno, new_sec))
3094 if (!get_ckpt_valid_blocks(sbi, curseg->segno, new_sec))
3098 SIT_I(sbi)->s_ops->allocate_segment(sbi, type, true, SEQ_NONE);
3099 locate_dirty_segment(sbi, old_segno);
3102 static void __allocate_new_section(struct f2fs_sb_info *sbi, int type)
3104 __allocate_new_segment(sbi, type, true);
3107 void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type)
3109 down_read(&SM_I(sbi)->curseg_lock);
3110 down_write(&SIT_I(sbi)->sentry_lock);
3111 __allocate_new_section(sbi, type);
3112 up_write(&SIT_I(sbi)->sentry_lock);
3113 up_read(&SM_I(sbi)->curseg_lock);
3116 void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi)
3120 down_read(&SM_I(sbi)->curseg_lock);
3121 down_write(&SIT_I(sbi)->sentry_lock);
3123 __allocate_new_segment(sbi, i, false);
3124 up_write(&SIT_I(sbi)->sentry_lock);
3125 up_read(&SM_I(sbi)->curseg_lock);
3132 bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
3138 down_write(&SIT_I(sbi)->sentry_lock);
3140 if (add_discard_addrs(sbi, cpc, true)) {
3145 up_write(&SIT_I(sbi)->sentry_lock);
3151 static unsigned int __issue_discard_cmd_range(struct f2fs_sb_info *sbi,
3155 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
3168 f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi, &dcc->root, false));
3177 f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
3202 err = __submit_discard_cmd(sbi, dpolicy, 0, dc, &issued);
3208 __remove_discard_cmd(sbi, dc);
3212 trimmed += __wait_all_discard_cmd(sbi, NULL);
3219 __remove_discard_cmd(sbi, dc);
3232 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
3242 bool need_align = f2fs_lfs_mode(sbi) && __is_large_section(sbi);
3244 if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
3247 if (end < MAIN_BLKADDR(sbi))
3250 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
3251 f2fs_warn(sbi, "Found FS corruption, run fsck to fix.");
3256 start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
3257 end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
3258 GET_SEGNO(sbi, end);
3260 start_segno = rounddown(start_segno, sbi->segs_per_sec);
3261 end_segno = roundup(end_segno + 1, sbi->segs_per_sec) - 1;
3269 if (sbi->discard_blks == 0)
3272 down_write(&sbi->gc_lock);
3273 err = f2fs_write_checkpoint(sbi, &cpc);
3274 up_write(&sbi->gc_lock);
3284 if (f2fs_realtime_discard_enable(sbi))
3287 start_block = START_BLOCK(sbi, start_segno);
3288 end_block = START_BLOCK(sbi, end_segno + 1);
3290 __init_discard_policy(sbi, &dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen);
3291 trimmed = __issue_discard_cmd_range(sbi, &dpolicy,
3294 trimmed += __wait_discard_cmd_range(sbi, &dpolicy,
3302 static bool __has_curseg_space(struct f2fs_sb_info *sbi,
3305 return curseg->next_blkoff < f2fs_usable_blks_in_seg(sbi,
3382 enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi,
3385 if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_USER) {
3396 } else if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_FS) {
3447 if (fio->sbi->am.atgc_enabled)
3472 switch (F2FS_OPTION(fio->sbi).active_logs) {
3483 f2fs_bug_on(fio->sbi, true);
3495 void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
3500 struct sit_info *sit_i = SIT_I(sbi);
3501 struct curseg_info *curseg = CURSEG_I(sbi, type);
3510 down_read(&SM_I(sbi)->curseg_lock);
3516 f2fs_bug_on(sbi, GET_SEGNO(sbi, old_blkaddr) == NULL_SEGNO);
3517 se = get_seg_entry(sbi, GET_SEGNO(sbi, old_blkaddr));
3518 sanity_check_seg_type(sbi, se->type);
3519 f2fs_bug_on(sbi, IS_NODESEG(se->type));
3521 *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
3523 f2fs_bug_on(sbi, curseg->next_blkoff >= sbi->blocks_per_seg);
3525 f2fs_wait_discard_bio(sbi, *new_blkaddr);
3532 __add_sum_entry(sbi, type, sum);
3534 __refresh_next_blkoff(sbi, curseg);
3536 stat_inc_block_count(sbi, curseg);
3539 old_mtime = get_segment_mtime(sbi, old_blkaddr);
3541 update_segment_mtime(sbi, old_blkaddr, 0);
3544 update_segment_mtime(sbi, *new_blkaddr, old_mtime);
3550 update_sit_entry(sbi, *new_blkaddr, 1);
3551 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
3552 update_sit_entry(sbi, old_blkaddr, -1);
3554 if (!__has_curseg_space(sbi, curseg)) {
3556 get_atssr_segment(sbi, type, se->type,
3565 if (page && page->mapping && page->mapping != NODE_MAPPING(sbi) &&
3566 page->mapping != META_MAPPING(sbi)) {
3572 sit_i->s_ops->allocate_segment(sbi, type, false, contig);
3580 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
3581 locate_dirty_segment(sbi, GET_SEGNO(sbi, *new_blkaddr));
3586 fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
3588 f2fs_inode_chksum_set(sbi, page);
3594 if (F2FS_IO_ALIGNED(sbi))
3599 io = sbi->write_io[fio->type] + fio->temp;
3607 up_read(&SM_I(sbi)->curseg_lock);
3612 struct f2fs_sb_info *sbi = fio->sbi;
3615 if (!f2fs_is_multi_device(sbi))
3618 devidx = f2fs_target_device_index(sbi, fio->new_blkaddr);
3621 f2fs_set_dirty_device(sbi, fio->ino, devidx, FLUSH_INO);
3624 if (!f2fs_test_bit(devidx, (char *)&sbi->dirty_device)) {
3625 spin_lock(&sbi->dev_lock);
3626 f2fs_set_bit(devidx, (char *)&sbi->dirty_device);
3627 spin_unlock(&sbi->dev_lock);
3634 bool keep_order = (f2fs_lfs_mode(fio->sbi) && type == CURSEG_COLD_DATA);
3637 down_read(&fio->sbi->io_order_lock);
3639 f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
3641 if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO)
3642 invalidate_mapping_pages(META_MAPPING(fio->sbi),
3655 up_read(&fio->sbi->io_order_lock);
3658 void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
3662 .sbi = sbi,
3674 if (unlikely(page->index >= MAIN_BLKADDR(sbi)))
3681 stat_inc_meta_count(sbi, page->index);
3682 f2fs_update_iostat(sbi, io_type, F2FS_BLKSIZE);
3692 f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE);
3698 struct f2fs_sb_info *sbi = fio->sbi;
3701 f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR);
3706 f2fs_update_iostat(sbi, fio->io_type, F2FS_BLKSIZE);
3712 struct f2fs_sb_info *sbi = fio->sbi;
3719 segno = GET_SEGNO(sbi, fio->new_blkaddr);
3721 if (!IS_DATASEG(get_seg_entry(sbi, segno)->type)) {
3722 set_sbi_flag(sbi, SBI_NEED_FSCK);
3723 f2fs_warn(sbi, "%s: incorrect segment(%u) type, run fsck to fix.",
3728 stat_inc_inplace_blocks(fio->sbi);
3730 if (fio->bio && !(SM_I(sbi)->ipu_policy & (1 << F2FS_IPU_NOCACHE)))
3736 f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE);
3742 static inline int __f2fs_get_curseg(struct f2fs_sb_info *sbi,
3748 if (CURSEG_I(sbi, i)->segno == segno)
3754 void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
3759 struct sit_info *sit_i = SIT_I(sbi);
3766 segno = GET_SEGNO(sbi, new_blkaddr);
3767 se = get_seg_entry(sbi, segno);
3770 down_write(&SM_I(sbi)->curseg_lock);
3774 if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
3781 if (IS_CURSEG(sbi, segno)) {
3783 type = __f2fs_get_curseg(sbi, segno);
3784 f2fs_bug_on(sbi, type == NO_CHECK_TYPE);
3790 f2fs_bug_on(sbi, !IS_DATASEG(type));
3791 curseg = CURSEG_I(sbi, type);
3802 change_curseg(sbi, type, true);
3805 curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
3806 __add_sum_entry(sbi, type, sum);
3810 update_segment_mtime(sbi, new_blkaddr, 0);
3811 update_sit_entry(sbi, new_blkaddr, 1);
3813 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
3814 invalidate_mapping_pages(META_MAPPING(sbi),
3817 update_segment_mtime(sbi, old_blkaddr, 0);
3818 update_sit_entry(sbi, old_blkaddr, -1);
3821 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
3822 locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr));
3824 locate_dirty_segment(sbi, old_cursegno);
3829 change_curseg(sbi, type, true);
3836 up_write(&SM_I(sbi)->curseg_lock);
3839 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
3848 f2fs_do_replace_block(sbi, &sum, old_addr, new_addr,
3858 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
3861 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, type);
3863 f2fs_submit_merged_ipu_write(sbi, NULL, page);
3866 f2fs_bug_on(sbi, locked && PageWriteback(page));
3875 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3884 cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
3900 static int read_compacted_summaries(struct f2fs_sb_info *sbi)
3902 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3909 start = start_sum_block(sbi);
3911 page = f2fs_get_meta_page(sbi, start++);
3917 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
3921 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
3930 seg_i = CURSEG_I(sbi, i);
3934 reset_curseg(sbi, i, 0);
3939 blk_off = sbi->blocks_per_seg;
3953 page = f2fs_get_meta_page(sbi, start++);
3964 static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
3966 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3980 if (__exist_node_summaries(sbi))
3981 blk_addr = sum_blk_addr(sbi, NR_CURSEG_PERSIST_TYPE, type);
3983 blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
3989 if (__exist_node_summaries(sbi))
3990 blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
3993 blk_addr = GET_SUM_BLOCK(sbi, segno);
3996 new = f2fs_get_meta_page(sbi, blk_addr);
4002 if (__exist_node_summaries(sbi)) {
4005 for (i = 0; i < sbi->blocks_per_seg; i++, ns++) {
4010 err = f2fs_restore_node_summary(sbi, segno, sum);
4017 curseg = CURSEG_I(sbi, type);
4028 reset_curseg(sbi, type, 0);
4037 static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
4039 struct f2fs_journal *sit_j = CURSEG_I(sbi, CURSEG_COLD_DATA)->journal;
4040 struct f2fs_journal *nat_j = CURSEG_I(sbi, CURSEG_HOT_DATA)->journal;
4044 if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) {
4045 int npages = f2fs_npages_for_summary_flush(sbi, true);
4048 f2fs_ra_meta_pages(sbi, start_sum_block(sbi), npages,
4052 err = read_compacted_summaries(sbi);
4058 if (__exist_node_summaries(sbi))
4059 f2fs_ra_meta_pages(sbi,
4060 sum_blk_addr(sbi, NR_CURSEG_PERSIST_TYPE, type),
4064 err = read_normal_summaries(sbi, type);
4072 f2fs_err(sbi, "invalid journal entries nats %u sits %u\n",
4080 static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
4089 page = f2fs_grab_meta_page(sbi, blkaddr++);
4094 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
4099 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
4106 seg_i = CURSEG_I(sbi, i);
4107 if (sbi->ckpt->alloc_type[i] == SSR)
4108 blkoff = sbi->blocks_per_seg;
4110 blkoff = curseg_blkoff(sbi, i);
4114 page = f2fs_grab_meta_page(sbi, blkaddr++);
4138 static void write_normal_summaries(struct f2fs_sb_info *sbi,
4148 write_current_sum_page(sbi, i, blkaddr + (i - type));
4151 void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
4153 if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG))
4154 write_compacted_summaries(sbi, start_blk);
4156 write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
4159 void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
4161 write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
4186 static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
4189 return f2fs_get_meta_page(sbi, current_sit_addr(sbi, segno));
4192 static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
4195 struct sit_info *sit_i = SIT_I(sbi);
4199 src_off = current_sit_addr(sbi, start);
4200 dst_off = next_sit_addr(sbi, src_off);
4202 page = f2fs_grab_meta_page(sbi, dst_off);
4203 seg_info_to_sit_page(sbi, page, start);
4262 static void add_sits_in_set(struct f2fs_sb_info *sbi)
4264 struct f2fs_sm_info *sm_info = SM_I(sbi);
4266 unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap;
4269 for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi))
4273 static void remove_sits_in_journal(struct f2fs_sb_info *sbi)
4275 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
4285 dirtied = __mark_sit_entry_dirty(sbi, segno);
4288 add_sit_entry(segno, &SM_I(sbi)->sit_entry_set);
4298 void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
4300 struct sit_info *sit_i = SIT_I(sbi);
4302 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
4305 struct list_head *head = &SM_I(sbi)->sit_entry_set;
4306 bool to_journal = !is_sbi_flag_set(sbi, SBI_IS_RESIZEFS);
4318 add_sits_in_set(sbi);
4327 remove_sits_in_journal(sbi);
4339 (unsigned long)MAIN_SEGS(sbi));
4349 page = get_next_sit_page(sbi, start_segno);
4357 se = get_seg_entry(sbi, segno);
4361 f2fs_bug_on(sbi, 1);
4367 add_discard_addrs(sbi, cpc, false);
4373 f2fs_bug_on(sbi, offset < 0);
4378 check_block_count(sbi, segno,
4384 check_block_count(sbi, segno,
4398 f2fs_bug_on(sbi, ses->entry_cnt);
4402 f2fs_bug_on(sbi, !list_empty(head));
4403 f2fs_bug_on(sbi, sit_i->dirty_sentries);
4409 add_discard_addrs(sbi, cpc, false);
4415 set_prefree_as_free_segments(sbi);
4418 static int build_sit_info(struct f2fs_sb_info *sbi)
4420 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
4427 sit_i = f2fs_kzalloc(sbi, sizeof(struct sit_info), GFP_KERNEL);
4431 SM_I(sbi)->sit_info = sit_i;
4434 f2fs_kvzalloc(sbi, array_size(sizeof(struct seg_entry),
4435 MAIN_SEGS(sbi)),
4440 main_bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
4441 sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(sbi, main_bitmap_size,
4447 bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * 4;
4449 bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * 3;
4451 sit_i->bitmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
4457 for (start = 0; start < MAIN_SEGS(sbi); start++) {
4473 sit_i->tmp_map = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
4477 if (__is_large_section(sbi)) {
4479 f2fs_kvzalloc(sbi, array_size(sizeof(struct sec_entry),
4480 MAIN_SECS(sbi)),
4490 sit_bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
4491 src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
4503 sit_i->invalid_segmap = f2fs_kvzalloc(sbi,
4513 sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
4518 sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
4524 static int build_free_segmap(struct f2fs_sb_info *sbi)
4530 free_i = f2fs_kzalloc(sbi, sizeof(struct free_segmap_info), GFP_KERNEL);
4534 SM_I(sbi)->free_info = free_i;
4536 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
4537 free_i->free_segmap = f2fs_kvmalloc(sbi, bitmap_size, GFP_KERNEL);
4541 sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
4542 free_i->free_secmap = f2fs_kvmalloc(sbi, sec_bitmap_size, GFP_KERNEL);
4551 free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi));
4558 static int build_curseg(struct f2fs_sb_info *sbi)
4563 array = f2fs_kzalloc(sbi, array_size(NR_CURSEG_TYPE,
4568 SM_I(sbi)->curseg_array = array;
4572 array[i].sum_blk = f2fs_kzalloc(sbi, PAGE_SIZE, GFP_KERNEL);
4576 array[i].journal = f2fs_kzalloc(sbi,
4590 return restore_curseg_summaries(sbi);
4593 static int build_sit_entries(struct f2fs_sb_info *sbi)
4595 struct sit_info *sit_i = SIT_I(sbi);
4596 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
4600 int sit_blk_cnt = SIT_BLK_CNT(sbi);
4607 readed = f2fs_ra_meta_pages(sbi, start_blk, BIO_MAX_PAGES,
4613 for (; start < end && start < MAIN_SEGS(sbi); start++) {
4618 page = get_current_sit_page(sbi, start);
4625 err = check_block_count(sbi, start, &sit);
4631 f2fs_err(sbi, "Invalid segment type: %u, segno: %u",
4639 if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
4646 sbi->discard_blks +=
4647 sbi->blocks_per_seg -
4651 if (__is_large_section(sbi))
4652 get_sec_entry(sbi, start)->valid_blocks +=
4663 if (start >= MAIN_SEGS(sbi)) {
4664 f2fs_err(sbi, "Wrong journal entry on segno %u",
4677 err = check_block_count(sbi, start, &sit);
4683 f2fs_err(sbi, "Invalid segment type: %u, segno: %u",
4691 if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
4696 sbi->discard_blks += old_valid_blocks;
4697 sbi->discard_blks -= se->valid_blocks;
4700 if (__is_large_section(sbi)) {
4701 get_sec_entry(sbi, start)->valid_blocks +=
4703 get_sec_entry(sbi, start)->valid_blocks -=
4712 if (sit_valid_blocks[NODE] != valid_node_count(sbi)) {
4713 f2fs_err(sbi, "SIT is corrupted node# %u vs %u",
4714 sit_valid_blocks[NODE], valid_node_count(sbi));
4719 valid_user_blocks(sbi)) {
4720 f2fs_err(sbi, "SIT is corrupted data# %u %u vs %u",
4722 valid_user_blocks(sbi));
4729 static void init_free_segmap(struct f2fs_sb_info *sbi)
4735 for (start = 0; start < MAIN_SEGS(sbi); start++) {
4736 if (f2fs_usable_blks_in_seg(sbi, start) == 0)
4738 sentry = get_seg_entry(sbi, start);
4740 __set_free(sbi, start);
4742 SIT_I(sbi)->written_valid_blocks +=
4748 struct curseg_info *curseg_t = CURSEG_I(sbi, type);
4749 __set_test_and_inuse(sbi, curseg_t->segno);
4753 static void init_dirty_segmap(struct f2fs_sb_info *sbi)
4755 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
4756 struct free_segmap_info *free_i = FREE_I(sbi);
4759 block_t blks_per_sec = BLKS_PER_SEC(sbi);
4763 segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset);
4764 if (segno >= MAIN_SEGS(sbi))
4767 valid_blocks = get_valid_blocks(sbi, segno, false);
4768 usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
4772 f2fs_bug_on(sbi, 1);
4776 __locate_dirty_segment(sbi, segno, DIRTY);
4780 if (!__is_large_section(sbi))
4784 for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
4785 valid_blocks = get_valid_blocks(sbi, segno, true);
4786 secno = GET_SEC_FROM_SEG(sbi, segno);
4790 if (IS_CURSEC(sbi, secno))
4797 static int init_victim_secmap(struct f2fs_sb_info *sbi)
4799 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
4800 unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
4802 dirty_i->victim_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
4808 static int build_dirty_segmap(struct f2fs_sb_info *sbi)
4814 dirty_i = f2fs_kzalloc(sbi, sizeof(struct dirty_seglist_info),
4819 SM_I(sbi)->dirty_info = dirty_i;
4822 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
4825 dirty_i->dirty_segmap[i] = f2fs_kvzalloc(sbi, bitmap_size,
4831 if (__is_large_section(sbi)) {
4832 bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
4833 dirty_i->dirty_secmap = f2fs_kvzalloc(sbi,
4839 init_dirty_segmap(sbi);
4840 return init_victim_secmap(sbi);
4843 static int sanity_check_curseg(struct f2fs_sb_info *sbi)
4852 struct curseg_info *curseg = CURSEG_I(sbi, i);
4853 struct seg_entry *se = get_seg_entry(sbi, curseg->segno);
4856 sanity_check_seg_type(sbi, curseg->seg_type);
4859 f2fs_err(sbi,
4871 for (blkofs += 1; blkofs < sbi->blocks_per_seg; blkofs++) {
4875 f2fs_err(sbi,
4887 static int check_zone_write_pointer(struct f2fs_sb_info *sbi,
4893 unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
4901 wp_segno = GET_SEGNO(sbi, wp_block);
4902 wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
4904 zone_segno = GET_SEGNO(sbi, zone_block);
4905 zone_secno = GET_SEC_FROM_SEG(sbi, zone_segno);
4907 if (zone_segno >= MAIN_SEGS(sbi))
4915 if (zone_secno == GET_SEC_FROM_SEG(sbi,
4916 CURSEG_I(sbi, i)->segno))
4923 for (s = sbi->segs_per_sec - 1; s >= 0; s--) {
4925 se = get_seg_entry(sbi, segno);
4926 for (b = sbi->blocks_per_seg - 1; b >= 0; b--)
4928 last_valid_block = START_BLOCK(sbi, segno) + b;
4942 f2fs_notice(sbi, "Valid block beyond write pointer: "
4944 GET_SEGNO(sbi, last_valid_block),
4945 GET_BLKOFF_FROM_SEG0(sbi, last_valid_block),
4955 f2fs_notice(sbi,
4959 ret = __f2fs_issue_discard_zone(sbi, fdev->bdev, zone_block,
4962 f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
4971 static struct f2fs_dev_info *get_target_zoned_dev(struct f2fs_sb_info *sbi,
4976 for (i = 0; i < sbi->s_ndevs; i++) {
4979 if (sbi->s_ndevs == 1 || (FDEV(i).start_blk <= zone_blkaddr &&
4993 static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
4995 struct curseg_info *cs = CURSEG_I(sbi, type);
5000 unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
5004 cs_section = GET_SEC_FROM_SEG(sbi, cs->segno);
5005 cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section));
5007 zbd = get_target_zoned_dev(sbi, cs_zone_block);
5017 f2fs_err(sbi, "Report zone failed: %s errno=(%d)",
5026 wp_segno = GET_SEGNO(sbi, wp_block);
5027 wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
5034 f2fs_notice(sbi, "Unaligned curseg[%d] with write pointer: "
5038 f2fs_notice(sbi, "Assign new section to curseg[%d]: "
5040 allocate_segment_by_default(sbi, type, true, SEQ_NONE);
5043 if (check_zone_write_pointer(sbi, zbd, &zone))
5047 cs_section = GET_SEC_FROM_SEG(sbi, cs->segno);
5048 cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section));
5050 zbd = get_target_zoned_dev(sbi, cs_zone_block);
5059 f2fs_err(sbi, "Report zone failed: %s errno=(%d)",
5068 f2fs_notice(sbi,
5072 err = __f2fs_issue_discard_zone(sbi, zbd->bdev,
5076 f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
5085 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi)
5090 ret = fix_curseg_write_pointer(sbi, i);
5099 struct f2fs_sb_info *sbi;
5108 return check_zone_write_pointer(args->sbi, args->fdev, zone);
5111 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi)
5116 for (i = 0; i < sbi->s_ndevs; i++) {
5120 args.sbi = sbi;
5140 struct f2fs_sb_info *sbi, unsigned int segno)
5145 if (!sbi->unusable_blocks_per_sec)
5146 return sbi->blocks_per_seg;
5148 secno = GET_SEC_FROM_SEG(sbi, segno);
5149 seg_start = START_BLOCK(sbi, segno);
5150 sec_start_blkaddr = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, secno));
5151 sec_cap_blkaddr = sec_start_blkaddr + CAP_BLKS_PER_SEC(sbi);
5161 if (seg_start + sbi->blocks_per_seg > sec_cap_blkaddr)
5164 return sbi->blocks_per_seg;
5167 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi)
5172 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi)
5177 static inline unsigned int f2fs_usable_zone_blks_in_seg(struct f2fs_sb_info *sbi,
5184 unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
5187 if (f2fs_sb_has_blkzoned(sbi))
5188 return f2fs_usable_zone_blks_in_seg(sbi, segno);
5190 return sbi->blocks_per_seg;
5193 unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi,
5196 if (f2fs_sb_has_blkzoned(sbi))
5197 return CAP_SEGS_PER_SEC(sbi);
5199 return sbi->segs_per_sec;
5205 static void init_min_max_mtime(struct f2fs_sb_info *sbi)
5207 struct sit_info *sit_i = SIT_I(sbi);
5214 for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
5218 for (i = 0; i < sbi->segs_per_sec; i++)
5219 mtime += get_seg_entry(sbi, segno + i)->mtime;
5221 mtime = div_u64(mtime, sbi->segs_per_sec);
5226 sit_i->max_mtime = get_mtime(sbi, false);
5231 int f2fs_build_segment_manager(struct f2fs_sb_info *sbi)
5233 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
5234 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
5238 sm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_sm_info), GFP_KERNEL);
5243 sbi->sm_info = sm_info;
5256 if (!f2fs_lfs_mode(sbi))
5260 sm_info->min_seq_blocks = sbi->blocks_per_seg * sbi->segs_per_sec;
5262 sm_info->min_ssr_sections = reserved_sections(sbi);
5268 if (!f2fs_readonly(sbi->sb)) {
5269 err = f2fs_create_flush_cmd_control(sbi);
5274 err = create_discard_cmd_control(sbi);
5278 err = build_sit_info(sbi);
5281 err = build_free_segmap(sbi);
5284 err = build_curseg(sbi);
5289 err = build_sit_entries(sbi);
5293 init_free_segmap(sbi);
5294 err = build_dirty_segmap(sbi);
5298 err = sanity_check_curseg(sbi);
5302 init_min_max_mtime(sbi);
5306 static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
5309 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5317 static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
5319 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5323 static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
5325 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5333 discard_dirty_segmap(sbi, i);
5335 if (__is_large_section(sbi)) {
5341 destroy_victim_secmap(sbi);
5342 SM_I(sbi)->dirty_info = NULL;
5346 static void destroy_curseg(struct f2fs_sb_info *sbi)
5348 struct curseg_info *array = SM_I(sbi)->curseg_array;
5353 SM_I(sbi)->curseg_array = NULL;
5361 static void destroy_free_segmap(struct f2fs_sb_info *sbi)
5363 struct free_segmap_info *free_i = SM_I(sbi)->free_info;
5366 SM_I(sbi)->free_info = NULL;
5372 static void destroy_sit_info(struct f2fs_sb_info *sbi)
5374 struct sit_info *sit_i = SIT_I(sbi);
5387 SM_I(sbi)->sit_info = NULL;
5396 void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi)
5398 struct f2fs_sm_info *sm_info = SM_I(sbi);
5402 f2fs_destroy_flush_cmd_control(sbi, true);
5403 destroy_discard_cmd_control(sbi);
5404 destroy_dirty_segmap(sbi);
5405 destroy_curseg(sbi);
5406 destroy_free_segmap(sbi);
5407 destroy_sit_info(sbi);
5408 sbi->sm_info = NULL;