Lines Matching refs:sbi
171 bool f2fs_need_SSR(struct f2fs_sb_info *sbi)
173 int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
174 int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
175 int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
177 if (f2fs_lfs_mode(sbi))
179 if (sbi->gc_mode == GC_URGENT_HIGH)
181 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
184 return free_sections(sbi) <= (node_secs + 2 * dent_secs + imeta_secs +
185 SM_I(sbi)->min_ssr_sections + reserved_sections(sbi));
215 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
231 err = f2fs_get_node_info(sbi, dn.nid, &ni, false);
241 dec_valid_block_count(sbi, inode, 1);
242 f2fs_invalidate_blocks(sbi, dn.data_blkaddr);
245 f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
251 err = inc_valid_block_count(sbi, inode, &count, true);
259 dec_valid_block_count(sbi, F2FS_I(inode)->cow_inode, count);
261 f2fs_replace_block(sbi, &dn, dn.data_blkaddr, new_addr,
298 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
333 } else if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
337 f2fs_handle_error(sbi,
365 sbi->revoked_atomic_block += fi->atomic_write_cnt;
367 sbi->committed_atomic_block += fi->atomic_write_cnt;
378 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
387 f2fs_lock_op(sbi);
391 f2fs_unlock_op(sbi);
401 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
403 if (time_to_inject(sbi, FAULT_CHECKPOINT))
404 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_FAULT_INJECT);
407 if (need && excess_cached_nats(sbi))
408 f2fs_balance_fs_bg(sbi, false);
410 if (!f2fs_is_checkpoint_ready(sbi))
417 if (has_enough_free_secs(sbi, 0, 0))
420 if (test_opt(sbi, GC_MERGE) && sbi->gc_thread &&
421 sbi->gc_thread->f2fs_gc_task) {
424 prepare_to_wait(&sbi->gc_thread->fggc_wq, &wait,
426 wake_up(&sbi->gc_thread->gc_wait_queue_head);
428 finish_wait(&sbi->gc_thread->fggc_wq, &wait);
437 f2fs_down_write(&sbi->gc_lock);
438 stat_inc_gc_call_count(sbi, FOREGROUND);
439 f2fs_gc(sbi, &gc_control);
443 static inline bool excess_dirty_threshold(struct f2fs_sb_info *sbi)
445 int factor = f2fs_rwsem_is_locked(&sbi->cp_rwsem) ? 3 : 2;
446 unsigned int dents = get_pages(sbi, F2FS_DIRTY_DENTS);
447 unsigned int qdata = get_pages(sbi, F2FS_DIRTY_QDATA);
448 unsigned int nodes = get_pages(sbi, F2FS_DIRTY_NODES);
449 unsigned int meta = get_pages(sbi, F2FS_DIRTY_META);
450 unsigned int imeta = get_pages(sbi, F2FS_DIRTY_IMETA);
451 unsigned int threshold = sbi->blocks_per_seg * factor *
462 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg)
464 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
468 if (!f2fs_available_free_memory(sbi, READ_EXTENT_CACHE))
469 f2fs_shrink_read_extent_tree(sbi,
473 if (!f2fs_available_free_memory(sbi, AGE_EXTENT_CACHE))
474 f2fs_shrink_age_extent_tree(sbi,
478 if (!f2fs_available_free_memory(sbi, NAT_ENTRIES))
479 f2fs_try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
481 if (!f2fs_available_free_memory(sbi, FREE_NIDS))
482 f2fs_try_to_free_nids(sbi, MAX_FREE_NIDS);
484 f2fs_build_free_nids(sbi, false, false);
486 if (excess_dirty_nats(sbi) || excess_dirty_threshold(sbi) ||
487 excess_prefree_segs(sbi) || !f2fs_space_for_roll_forward(sbi))
491 if (is_inflight_io(sbi, REQ_TIME) ||
492 (!f2fs_time_over(sbi, REQ_TIME) && f2fs_rwsem_is_locked(&sbi->cp_rwsem)))
496 if (f2fs_time_over(sbi, CP_TIME))
500 if (f2fs_available_free_memory(sbi, NAT_ENTRIES) &&
501 f2fs_available_free_memory(sbi, INO_ENTRIES))
505 if (test_opt(sbi, DATA_FLUSH) && from_bg) {
508 mutex_lock(&sbi->flush_lock);
511 f2fs_sync_dirty_inodes(sbi, FILE_INODE, false);
514 mutex_unlock(&sbi->flush_lock);
516 stat_inc_cp_call_count(sbi, BACKGROUND);
517 f2fs_sync_fs(sbi->sb, 1);
520 static int __submit_flush_wait(struct f2fs_sb_info *sbi,
525 trace_f2fs_issue_flush(bdev, test_opt(sbi, NOBARRIER),
526 test_opt(sbi, FLUSH_MERGE), ret);
528 f2fs_update_iostat(sbi, NULL, FS_FLUSH_IO, 0);
532 static int submit_flush_wait(struct f2fs_sb_info *sbi, nid_t ino)
537 if (!f2fs_is_multi_device(sbi))
538 return __submit_flush_wait(sbi, sbi->sb->s_bdev);
540 for (i = 0; i < sbi->s_ndevs; i++) {
541 if (!f2fs_is_dirty_device(sbi, ino, i, FLUSH_INO))
543 ret = __submit_flush_wait(sbi, FDEV(i).bdev);
552 struct f2fs_sb_info *sbi = data;
553 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
568 ret = submit_flush_wait(sbi, cmd->ino);
584 int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino)
586 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
590 if (test_opt(sbi, NOBARRIER))
593 if (!test_opt(sbi, FLUSH_MERGE)) {
595 ret = submit_flush_wait(sbi, ino);
602 f2fs_is_multi_device(sbi)) {
603 ret = submit_flush_wait(sbi, ino);
638 ret = submit_flush_wait(sbi, ino);
655 int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi)
657 dev_t dev = sbi->sb->s_bdev->bd_dev;
660 if (SM_I(sbi)->fcc_info) {
661 fcc = SM_I(sbi)->fcc_info;
667 fcc = f2fs_kzalloc(sbi, sizeof(struct flush_cmd_control), GFP_KERNEL);
674 SM_I(sbi)->fcc_info = fcc;
675 if (!test_opt(sbi, FLUSH_MERGE))
679 fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
691 void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free)
693 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
703 SM_I(sbi)->fcc_info = NULL;
707 int f2fs_flush_device_cache(struct f2fs_sb_info *sbi)
711 if (!f2fs_is_multi_device(sbi))
714 if (test_opt(sbi, NOBARRIER))
717 for (i = 1; i < sbi->s_ndevs; i++) {
720 if (!f2fs_test_bit(i, (char *)&sbi->dirty_device))
724 ret = __submit_flush_wait(sbi, FDEV(i).bdev);
730 f2fs_stop_checkpoint(sbi, false,
735 spin_lock(&sbi->dev_lock);
736 f2fs_clear_bit(i, (char *)&sbi->dirty_device);
737 spin_unlock(&sbi->dev_lock);
743 static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
746 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
749 if (IS_CURSEG(sbi, segno))
756 struct seg_entry *sentry = get_seg_entry(sbi, segno);
760 f2fs_bug_on(sbi, 1);
766 if (__is_large_section(sbi)) {
767 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
769 get_valid_blocks(sbi, segno, true);
771 f2fs_bug_on(sbi, unlikely(!valid_blocks ||
772 valid_blocks == CAP_BLKS_PER_SEC(sbi)));
774 if (!IS_CURSEC(sbi, secno))
780 static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
783 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
790 struct seg_entry *sentry = get_seg_entry(sbi, segno);
796 valid_blocks = get_valid_blocks(sbi, segno, true);
798 clear_bit(GET_SEC_FROM_SEG(sbi, segno),
801 clear_bit(segno, SIT_I(sbi)->invalid_segmap);
804 if (__is_large_section(sbi)) {
805 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
808 valid_blocks == CAP_BLKS_PER_SEC(sbi)) {
813 if (!IS_CURSEC(sbi, secno))
824 static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
826 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
830 if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
833 usable_blocks = f2fs_usable_blks_in_seg(sbi, segno);
836 valid_blocks = get_valid_blocks(sbi, segno, false);
837 ckpt_valid_blocks = get_ckpt_valid_blocks(sbi, segno, false);
839 if (valid_blocks == 0 && (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) ||
841 __locate_dirty_segment(sbi, segno, PRE);
842 __remove_dirty_segment(sbi, segno, DIRTY);
844 __locate_dirty_segment(sbi, segno, DIRTY);
847 __remove_dirty_segment(sbi, segno, DIRTY);
854 void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi)
856 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
860 for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
861 if (get_valid_blocks(sbi, segno, false))
863 if (IS_CURSEG(sbi, segno))
865 __locate_dirty_segment(sbi, segno, PRE);
866 __remove_dirty_segment(sbi, segno, DIRTY);
871 block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi)
874 (overprovision_segments(sbi) - reserved_segments(sbi));
875 block_t ovp_holes = ovp_hole_segs << sbi->log_blocks_per_seg;
876 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
883 for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
884 se = get_seg_entry(sbi, segno);
886 holes[NODE] += f2fs_usable_blks_in_seg(sbi, segno) -
889 holes[DATA] += f2fs_usable_blks_in_seg(sbi, segno) -
900 int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable)
903 (overprovision_segments(sbi) - reserved_segments(sbi));
904 if (unusable > F2FS_OPTION(sbi).unusable_cap)
906 if (is_sbi_flag_set(sbi, SBI_CP_DISABLED_QUICK) &&
907 dirty_segments(sbi) > ovp_hole_segs)
913 static unsigned int get_free_segment(struct f2fs_sb_info *sbi)
915 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
919 for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
920 if (get_valid_blocks(sbi, segno, false))
922 if (get_ckpt_valid_blocks(sbi, segno, false))
931 static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi,
935 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
939 f2fs_bug_on(sbi, !len);
963 static bool f2fs_check_discard_tree(struct f2fs_sb_info *sbi)
966 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
979 f2fs_info(sbi, "broken discard_rbtree, "
991 static struct discard_cmd *__lookup_discard_cmd(struct f2fs_sb_info *sbi,
994 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1083 static void __remove_discard_cmd(struct f2fs_sb_info *sbi,
1086 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1098 f2fs_bug_on(sbi, dc->ref);
1106 KERN_INFO, sbi->sb->s_id,
1128 static void __check_sit_bitmap(struct f2fs_sb_info *sbi,
1135 unsigned long offset, size, max_blocks = sbi->blocks_per_seg;
1139 segno = GET_SEGNO(sbi, blk);
1140 sentry = get_seg_entry(sbi, segno);
1141 offset = GET_BLKOFF_FROM_SEG0(sbi, blk);
1143 if (end < START_BLOCK(sbi, segno + 1))
1144 size = GET_BLKOFF_FROM_SEG0(sbi, end);
1149 f2fs_bug_on(sbi, offset != size);
1150 blk = START_BLOCK(sbi, segno + 1);
1155 static void __init_discard_policy(struct f2fs_sb_info *sbi,
1159 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1178 if (utilization(sbi) > dcc->discard_urgent_util) {
1199 static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
1204 static void __submit_zone_reset_cmd(struct f2fs_sb_info *sbi,
1209 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1229 __check_sit_bitmap(sbi, dc->di.lstart, dc->di.lstart + dc->di.len);
1237 f2fs_update_iostat(sbi, NULL, FS_ZONE_RESET_IO, dc->di.len * F2FS_BLKSIZE);
1242 static int __submit_discard_cmd(struct f2fs_sb_info *sbi,
1249 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1259 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
1263 if (f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(bdev)) {
1264 int devi = f2fs_bdev_index(sbi, bdev);
1269 if (f2fs_blkz_is_seq(sbi, devi, dc->di.start)) {
1270 __submit_zone_reset_cmd(sbi, dc, flag,
1302 if (time_to_inject(sbi, FAULT_DISCARD)) {
1319 f2fs_bug_on(sbi, !bio);
1338 __check_sit_bitmap(sbi, lstart, lstart + len);
1347 f2fs_update_iostat(sbi, NULL, FS_DISCARD_IO, len * F2FS_BLKSIZE);
1357 __update_discard_tree_range(sbi, bdev, lstart, start, len);
1362 static void __insert_discard_cmd(struct f2fs_sb_info *sbi,
1366 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1383 f2fs_bug_on(sbi, 1);
1387 dc = __create_discard_cmd(sbi, bdev, lstart, start, len);
1399 static void __punch_discard_cmd(struct f2fs_sb_info *sbi,
1402 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1407 __remove_discard_cmd(sbi, dc);
1422 __insert_discard_cmd(sbi, dc->bdev, blkaddr + 1,
1435 static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
1439 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1504 __remove_discard_cmd(sbi, tdc);
1509 __insert_discard_cmd(sbi, bdev,
1522 static void __queue_zone_reset_cmd(struct f2fs_sb_info *sbi,
1528 mutex_lock(&SM_I(sbi)->dcc_info->cmd_lock);
1529 __insert_discard_cmd(sbi, bdev, lblkstart, blkstart, blklen);
1530 mutex_unlock(&SM_I(sbi)->dcc_info->cmd_lock);
1534 static void __queue_discard_cmd(struct f2fs_sb_info *sbi,
1544 if (f2fs_is_multi_device(sbi)) {
1545 int devi = f2fs_target_device_index(sbi, blkstart);
1549 mutex_lock(&SM_I(sbi)->dcc_info->cmd_lock);
1550 __update_discard_tree_range(sbi, bdev, lblkstart, blkstart, blklen);
1551 mutex_unlock(&SM_I(sbi)->dcc_info->cmd_lock);
1554 static void __issue_discard_cmd_orderly(struct f2fs_sb_info *sbi,
1557 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1579 if (dpolicy->io_aware && !is_idle(sbi, DISCARD_TIME)) {
1585 err = __submit_discard_cmd(sbi, dpolicy, dc, issued);
1592 __remove_discard_cmd(sbi, dc);
1606 static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
1609 static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
1612 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1620 f2fs_update_time(sbi, UMOUNT_DISCARD_TIMEOUT);
1626 f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT))
1633 __issue_discard_cmd_orderly(sbi, dpolicy, &issued);
1643 f2fs_bug_on(sbi, !f2fs_check_discard_tree(sbi));
1646 f2fs_bug_on(sbi, dc->state != D_PREP);
1649 f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT))
1653 !is_idle(sbi, DISCARD_TIME)) {
1658 __submit_discard_cmd(sbi, dpolicy, dc, &issued);
1672 __wait_all_discard_cmd(sbi, dpolicy);
1682 static bool __drop_discard_cmd(struct f2fs_sb_info *sbi)
1684 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1694 f2fs_bug_on(sbi, dc->state != D_PREP);
1695 __remove_discard_cmd(sbi, dc);
1704 void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi)
1706 __drop_discard_cmd(sbi);
1709 static unsigned int __wait_one_discard_bio(struct f2fs_sb_info *sbi,
1712 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1717 f2fs_bug_on(sbi, dc->state != D_DONE);
1722 __remove_discard_cmd(sbi, dc);
1729 static unsigned int __wait_discard_cmd_range(struct f2fs_sb_info *sbi,
1733 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1753 __remove_discard_cmd(sbi, iter);
1763 trimmed += __wait_one_discard_bio(sbi, dc);
1770 static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
1777 return __wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX);
1780 __init_discard_policy(sbi, &dp, DPOLICY_FSTRIM, MIN_DISCARD_GRANULARITY);
1781 discard_blks = __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
1782 __init_discard_policy(sbi, &dp, DPOLICY_UMOUNT, MIN_DISCARD_GRANULARITY);
1783 discard_blks += __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
1789 static void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
1791 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1796 dc = __lookup_discard_cmd(sbi, blkaddr);
1798 if (dc && f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(dc->bdev)) {
1799 int devi = f2fs_bdev_index(sbi, dc->bdev);
1806 if (f2fs_blkz_is_seq(sbi, devi, dc->di.start)) {
1809 __submit_zone_reset_cmd(sbi, dc, REQ_SYNC,
1814 __wait_one_discard_bio(sbi, dc);
1821 __punch_discard_cmd(sbi, dc, blkaddr);
1830 __wait_one_discard_bio(sbi, dc);
1833 void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi)
1835 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1847 * @sbi: the f2fs_sb_info data for discard cmd to issue
1853 bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi)
1855 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1862 __init_discard_policy(sbi, &dpolicy, DPOLICY_UMOUNT,
1864 __issue_discard_cmd(sbi, &dpolicy);
1865 dropped = __drop_discard_cmd(sbi);
1868 __wait_all_discard_cmd(sbi, NULL);
1870 f2fs_bug_on(sbi, atomic_read(&dcc->discard_cmd_cnt));
1876 struct f2fs_sb_info *sbi = data;
1877 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1891 if (sbi->gc_mode == GC_URGENT_HIGH ||
1892 !f2fs_available_free_memory(sbi, DISCARD_CACHE))
1893 __init_discard_policy(sbi, &dpolicy, DPOLICY_FORCE,
1896 __init_discard_policy(sbi, &dpolicy, DPOLICY_BG,
1904 __wait_all_discard_cmd(sbi, NULL);
1908 if (f2fs_readonly(sbi->sb))
1912 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK) ||
1918 sb_start_intwrite(sbi->sb);
1920 issued = __issue_discard_cmd(sbi, &dpolicy);
1922 __wait_all_discard_cmd(sbi, &dpolicy);
1925 wait_ms = f2fs_time_to_wait(sbi, DISCARD_TIME);
1934 sb_end_intwrite(sbi->sb);
1941 static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
1949 if (f2fs_is_multi_device(sbi)) {
1950 devi = f2fs_target_device_index(sbi, blkstart);
1953 f2fs_err(sbi, "Invalid block %x", blkstart);
1960 if (f2fs_blkz_is_seq(sbi, devi, blkstart)) {
1966 f2fs_err(sbi, "(%d) %s: Unaligned zone reset attempted (block %x + %x)",
1967 devi, sbi->s_ndevs ? FDEV(devi).path : "",
1972 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) {
1978 __queue_zone_reset_cmd(sbi, bdev, blkstart, lblkstart, blklen);
1983 __queue_discard_cmd(sbi, bdev, lblkstart, blklen);
1988 static int __issue_discard_async(struct f2fs_sb_info *sbi,
1992 if (f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(bdev))
1993 return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
1995 __queue_discard_cmd(sbi, bdev, blkstart, blklen);
1999 static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
2009 bdev = f2fs_target_device(sbi, blkstart, NULL);
2014 f2fs_target_device(sbi, i, NULL);
2017 err = __issue_discard_async(sbi, bdev,
2027 se = get_seg_entry(sbi, GET_SEGNO(sbi, i));
2028 offset = GET_BLKOFF_FROM_SEG0(sbi, i);
2030 if (f2fs_block_unit_discard(sbi) &&
2032 sbi->discard_blks--;
2036 err = __issue_discard_async(sbi, bdev, start, len);
2040 static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
2044 int max_blocks = sbi->blocks_per_seg;
2045 struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start);
2049 unsigned long *dmap = SIT_I(sbi)->tmp_map;
2053 struct list_head *head = &SM_I(sbi)->dcc_info->entry_list;
2056 if (se->valid_blocks == max_blocks || !f2fs_hw_support_discard(sbi) ||
2057 !f2fs_block_unit_discard(sbi))
2061 if (!f2fs_realtime_discard_enable(sbi) || !se->valid_blocks ||
2062 SM_I(sbi)->dcc_info->nr_discards >=
2063 SM_I(sbi)->dcc_info->max_discards)
2072 while (force || SM_I(sbi)->dcc_info->nr_discards <=
2073 SM_I(sbi)->dcc_info->max_discards) {
2089 de->start_blkaddr = START_BLOCK(sbi, cpc->trim_start);
2096 SM_I(sbi)->dcc_info->nr_discards += end - start;
2107 void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi)
2109 struct list_head *head = &(SM_I(sbi)->dcc_info->entry_list);
2120 static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
2122 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2126 for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi))
2127 __set_test_and_free(sbi, segno, false);
2131 void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
2134 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
2137 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2142 bool section_alignment = F2FS_OPTION(sbi).discard_unit ==
2145 if (f2fs_lfs_mode(sbi) && __is_large_section(sbi))
2155 start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1);
2156 if (start >= MAIN_SEGS(sbi))
2158 end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi),
2162 start = rounddown(start, sbi->segs_per_sec);
2163 end = roundup(end, sbi->segs_per_sec);
2171 if (!f2fs_realtime_discard_enable(sbi))
2179 if (!f2fs_sb_has_blkzoned(sbi) &&
2180 (!f2fs_lfs_mode(sbi) || !__is_large_section(sbi))) {
2181 f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
2182 (end - start) << sbi->log_blocks_per_seg);
2186 secno = GET_SEC_FROM_SEG(sbi, start);
2187 start_segno = GET_SEG_FROM_SEC(sbi, secno);
2188 if (!IS_CURSEC(sbi, secno) &&
2189 !get_valid_blocks(sbi, start, true))
2190 f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno),
2191 sbi->segs_per_sec << sbi->log_blocks_per_seg);
2193 start = start_segno + sbi->segs_per_sec;
2201 if (!f2fs_block_unit_discard(sbi))
2212 sbi->blocks_per_seg, cur_pos);
2215 if (f2fs_sb_has_blkzoned(sbi) ||
2219 f2fs_issue_discard(sbi, entry->start_blkaddr + cur_pos,
2224 sbi->blocks_per_seg, cur_pos);
2230 if (cur_pos < sbi->blocks_per_seg)
2238 wake_up_discard_thread(sbi, false);
2241 int f2fs_start_discard_thread(struct f2fs_sb_info *sbi)
2243 dev_t dev = sbi->sb->s_bdev->bd_dev;
2244 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
2247 if (!f2fs_realtime_discard_enable(sbi))
2250 dcc->f2fs_issue_discard = kthread_run(issue_discard_thread, sbi,
2260 static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
2265 if (SM_I(sbi)->dcc_info) {
2266 dcc = SM_I(sbi)->dcc_info;
2270 dcc = f2fs_kzalloc(sbi, sizeof(struct discard_cmd_control), GFP_KERNEL);
2277 if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SEGMENT)
2278 dcc->discard_granularity = sbi->blocks_per_seg;
2279 else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SECTION)
2280 dcc->discard_granularity = BLKS_PER_SEC(sbi);
2292 dcc->max_discards = MAIN_SEGS(sbi) << sbi->log_blocks_per_seg;
2304 SM_I(sbi)->dcc_info = dcc;
2306 err = f2fs_start_discard_thread(sbi);
2309 SM_I(sbi)->dcc_info = NULL;
2315 static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi)
2317 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
2322 f2fs_stop_discard_thread(sbi);
2328 f2fs_issue_discard_timeout(sbi);
2331 SM_I(sbi)->dcc_info = NULL;
2334 static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
2336 struct sit_info *sit_i = SIT_I(sbi);
2346 static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
2349 struct seg_entry *se = get_seg_entry(sbi, segno);
2353 __mark_sit_entry_dirty(sbi, segno);
2356 static inline unsigned long long get_segment_mtime(struct f2fs_sb_info *sbi,
2359 unsigned int segno = GET_SEGNO(sbi, blkaddr);
2363 return get_seg_entry(sbi, segno)->mtime;
2366 static void update_segment_mtime(struct f2fs_sb_info *sbi, block_t blkaddr,
2370 unsigned int segno = GET_SEGNO(sbi, blkaddr);
2371 unsigned long long ctime = get_mtime(sbi, false);
2377 se = get_seg_entry(sbi, segno);
2385 if (ctime > SIT_I(sbi)->max_mtime)
2386 SIT_I(sbi)->max_mtime = ctime;
2389 static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
2399 segno = GET_SEGNO(sbi, blkaddr);
2401 se = get_seg_entry(sbi, segno);
2403 offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
2405 f2fs_bug_on(sbi, (new_vblocks < 0 ||
2406 (new_vblocks > f2fs_usable_blks_in_seg(sbi, segno))));
2417 f2fs_err(sbi, "Inconsistent error when setting bitmap, blk:%u, old bit:%d",
2419 f2fs_bug_on(sbi, 1);
2423 f2fs_err(sbi, "Bitmap was wrongly set, blk:%u",
2425 f2fs_bug_on(sbi, 1);
2430 if (f2fs_block_unit_discard(sbi) &&
2432 sbi->discard_blks--;
2438 if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
2448 f2fs_err(sbi, "Inconsistent error when clearing bitmap, blk:%u, old bit:%d",
2450 f2fs_bug_on(sbi, 1);
2454 f2fs_err(sbi, "Bitmap was wrongly cleared, blk:%u",
2456 f2fs_bug_on(sbi, 1);
2459 } else if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2467 spin_lock(&sbi->stat_lock);
2468 sbi->unusable_block_count++;
2469 spin_unlock(&sbi->stat_lock);
2473 if (f2fs_block_unit_discard(sbi) &&
2475 sbi->discard_blks++;
2480 __mark_sit_entry_dirty(sbi, segno);
2483 SIT_I(sbi)->written_valid_blocks += del;
2485 if (__is_large_section(sbi))
2486 get_sec_entry(sbi, segno)->valid_blocks += del;
2489 void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
2491 unsigned int segno = GET_SEGNO(sbi, addr);
2492 struct sit_info *sit_i = SIT_I(sbi);
2494 f2fs_bug_on(sbi, addr == NULL_ADDR);
2498 f2fs_invalidate_internal_cache(sbi, addr);
2503 update_segment_mtime(sbi, addr, 0);
2504 update_sit_entry(sbi, addr, -1);
2507 locate_dirty_segment(sbi, segno);
2512 bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr)
2514 struct sit_info *sit_i = SIT_I(sbi);
2524 segno = GET_SEGNO(sbi, blkaddr);
2525 se = get_seg_entry(sbi, segno);
2526 offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
2536 static unsigned short f2fs_curseg_valid_blocks(struct f2fs_sb_info *sbi, int type)
2538 struct curseg_info *curseg = CURSEG_I(sbi, type);
2540 if (sbi->ckpt->alloc_type[type] == SSR)
2541 return sbi->blocks_per_seg;
2548 int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
2554 if (sbi->ckpt->alloc_type[i] != SSR && for_ra)
2556 le16_to_cpu(F2FS_CKPT(sbi)->cur_data_blkoff[i]);
2558 valid_sum_count += f2fs_curseg_valid_blocks(sbi, i);
2574 struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
2576 if (unlikely(f2fs_cp_error(sbi)))
2578 return f2fs_get_meta_page_retry(sbi, GET_SUM_BLOCK(sbi, segno));
2581 void f2fs_update_meta_page(struct f2fs_sb_info *sbi,
2584 struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
2591 static void write_sum_page(struct f2fs_sb_info *sbi,
2594 f2fs_update_meta_page(sbi, (void *)sum_blk, blk_addr);
2597 static void write_current_sum_page(struct f2fs_sb_info *sbi,
2600 struct curseg_info *curseg = CURSEG_I(sbi, type);
2601 struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
2623 static int is_next_segment_free(struct f2fs_sb_info *sbi,
2627 struct free_segmap_info *free_i = FREE_I(sbi);
2629 if (segno < MAIN_SEGS(sbi) && segno % sbi->segs_per_sec)
2638 static void get_new_segment(struct f2fs_sb_info *sbi,
2641 struct free_segmap_info *free_i = FREE_I(sbi);
2643 unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone;
2644 unsigned int hint = GET_SEC_FROM_SEG(sbi, *newseg);
2645 unsigned int old_zoneno = GET_ZONE_FROM_SEG(sbi, *newseg);
2653 if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
2655 GET_SEG_FROM_SEC(sbi, hint + 1), *newseg + 1);
2656 if (segno < GET_SEG_FROM_SEC(sbi, hint + 1))
2660 secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
2661 if (secno >= MAIN_SECS(sbi)) {
2664 MAIN_SECS(sbi));
2665 f2fs_bug_on(sbi, secno >= MAIN_SECS(sbi));
2680 MAIN_SECS(sbi));
2681 f2fs_bug_on(sbi, left_start >= MAIN_SECS(sbi));
2686 segno = GET_SEG_FROM_SEC(sbi, secno);
2687 zoneno = GET_ZONE_FROM_SEC(sbi, secno);
2692 if (sbi->secs_per_zone == 1)
2703 if (CURSEG_I(sbi, i)->zone == zoneno)
2709 hint = zoneno * sbi->secs_per_zone - 1;
2713 hint = (zoneno + 1) * sbi->secs_per_zone;
2719 f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap));
2720 __set_inuse(sbi, segno);
2725 static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
2727 struct curseg_info *curseg = CURSEG_I(sbi, type);
2733 curseg->zone = GET_ZONE_FROM_SEG(sbi, curseg->segno);
2740 sanity_check_seg_type(sbi, seg_type);
2746 __set_sit_entry_type(sbi, seg_type, curseg->segno, modified);
2749 static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
2751 struct curseg_info *curseg = CURSEG_I(sbi, type);
2754 sanity_check_seg_type(sbi, seg_type);
2755 if (f2fs_need_rand_seg(sbi))
2756 return get_random_u32_below(MAIN_SECS(sbi) * sbi->segs_per_sec);
2759 if (__is_large_section(sbi))
2766 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2769 if (test_opt(sbi, NOHEAP) &&
2773 if (SIT_I(sbi)->last_victim[ALLOC_NEXT])
2774 return SIT_I(sbi)->last_victim[ALLOC_NEXT];
2777 if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
2787 static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
2789 struct curseg_info *curseg = CURSEG_I(sbi, type);
2795 write_sum_page(sbi, curseg->sum_blk,
2796 GET_SUM_BLOCK(sbi, segno));
2800 if (test_opt(sbi, NOHEAP))
2803 segno = __get_next_segno(sbi, type);
2804 get_new_segment(sbi, &segno, new_sec, dir);
2806 reset_curseg(sbi, type, 1);
2808 if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK)
2810 get_random_u32_inclusive(1, sbi->max_fragment_chunk);
2813 static int __next_free_blkoff(struct f2fs_sb_info *sbi,
2816 struct seg_entry *se = get_seg_entry(sbi, segno);
2818 unsigned long *target_map = SIT_I(sbi)->tmp_map;
2826 return __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start);
2829 static int f2fs_find_next_ssr_block(struct f2fs_sb_info *sbi,
2832 return __next_free_blkoff(sbi, seg->segno, seg->next_blkoff + 1);
2835 bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno)
2837 return __next_free_blkoff(sbi, segno, 0) < sbi->blocks_per_seg;
2844 static void change_curseg(struct f2fs_sb_info *sbi, int type)
2846 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2847 struct curseg_info *curseg = CURSEG_I(sbi, type);
2852 write_sum_page(sbi, curseg->sum_blk, GET_SUM_BLOCK(sbi, curseg->segno));
2854 __set_test_and_inuse(sbi, new_segno);
2857 __remove_dirty_segment(sbi, new_segno, PRE);
2858 __remove_dirty_segment(sbi, new_segno, DIRTY);
2861 reset_curseg(sbi, type, 1);
2863 curseg->next_blkoff = __next_free_blkoff(sbi, curseg->segno, 0);
2865 sum_page = f2fs_get_sum_page(sbi, new_segno);
2876 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type,
2879 static void get_atssr_segment(struct f2fs_sb_info *sbi, int type,
2883 struct curseg_info *curseg = CURSEG_I(sbi, type);
2887 if (get_ssr_segment(sbi, type, alloc_mode, age)) {
2888 struct seg_entry *se = get_seg_entry(sbi, curseg->next_segno);
2891 change_curseg(sbi, type);
2895 new_curseg(sbi, type, true);
2897 stat_inc_seg_type(sbi, curseg);
2900 static void __f2fs_init_atgc_curseg(struct f2fs_sb_info *sbi)
2902 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC);
2904 if (!sbi->am.atgc_enabled)
2907 f2fs_down_read(&SM_I(sbi)->curseg_lock);
2910 down_write(&SIT_I(sbi)->sentry_lock);
2912 get_atssr_segment(sbi, CURSEG_ALL_DATA_ATGC, CURSEG_COLD_DATA, SSR, 0);
2914 up_write(&SIT_I(sbi)->sentry_lock);
2917 f2fs_up_read(&SM_I(sbi)->curseg_lock);
2920 void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi)
2922 __f2fs_init_atgc_curseg(sbi);
2925 static void __f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi, int type)
2927 struct curseg_info *curseg = CURSEG_I(sbi, type);
2933 if (get_valid_blocks(sbi, curseg->segno, false)) {
2934 write_sum_page(sbi, curseg->sum_blk,
2935 GET_SUM_BLOCK(sbi, curseg->segno));
2937 mutex_lock(&DIRTY_I(sbi)->seglist_lock);
2938 __set_test_and_free(sbi, curseg->segno, true);
2939 mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
2945 void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi)
2947 __f2fs_save_inmem_curseg(sbi, CURSEG_COLD_DATA_PINNED);
2949 if (sbi->am.atgc_enabled)
2950 __f2fs_save_inmem_curseg(sbi, CURSEG_ALL_DATA_ATGC);
2953 static void __f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi, int type)
2955 struct curseg_info *curseg = CURSEG_I(sbi, type);
2960 if (get_valid_blocks(sbi, curseg->segno, false))
2963 mutex_lock(&DIRTY_I(sbi)->seglist_lock);
2964 __set_test_and_inuse(sbi, curseg->segno);
2965 mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
2970 void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi)
2972 __f2fs_restore_inmem_curseg(sbi, CURSEG_COLD_DATA_PINNED);
2974 if (sbi->am.atgc_enabled)
2975 __f2fs_restore_inmem_curseg(sbi, CURSEG_ALL_DATA_ATGC);
2978 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type,
2981 struct curseg_info *curseg = CURSEG_I(sbi, type);
2987 sanity_check_seg_type(sbi, seg_type);
2990 if (!f2fs_get_victim(sbi, &segno, BG_GC, seg_type, alloc_mode, age)) {
3017 if (!f2fs_get_victim(sbi, &segno, BG_GC, i, alloc_mode, age)) {
3024 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
3025 segno = get_free_segment(sbi);
3034 static bool need_new_seg(struct f2fs_sb_info *sbi, int type)
3036 struct curseg_info *curseg = CURSEG_I(sbi, type);
3038 if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) &&
3042 is_next_segment_free(sbi, curseg, type) &&
3043 likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
3045 if (!f2fs_need_SSR(sbi) || !get_ssr_segment(sbi, type, SSR, 0))
3050 void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
3053 struct curseg_info *curseg = CURSEG_I(sbi, type);
3056 f2fs_down_read(&SM_I(sbi)->curseg_lock);
3058 down_write(&SIT_I(sbi)->sentry_lock);
3060 segno = CURSEG_I(sbi, type)->segno;
3064 if (f2fs_need_SSR(sbi) && get_ssr_segment(sbi, type, SSR, 0))
3065 change_curseg(sbi, type);
3067 new_curseg(sbi, type, true);
3069 stat_inc_seg_type(sbi, curseg);
3071 locate_dirty_segment(sbi, segno);
3073 up_write(&SIT_I(sbi)->sentry_lock);
3076 f2fs_notice(sbi, "For resize: curseg of type %d: %u ==> %u",
3080 f2fs_up_read(&SM_I(sbi)->curseg_lock);
3083 static void __allocate_new_segment(struct f2fs_sb_info *sbi, int type,
3086 struct curseg_info *curseg = CURSEG_I(sbi, type);
3091 !get_valid_blocks(sbi, curseg->segno, new_sec) &&
3092 !get_ckpt_valid_blocks(sbi, curseg->segno, new_sec))
3096 new_curseg(sbi, type, true);
3097 stat_inc_seg_type(sbi, curseg);
3098 locate_dirty_segment(sbi, old_segno);
3101 void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force)
3103 f2fs_down_read(&SM_I(sbi)->curseg_lock);
3104 down_write(&SIT_I(sbi)->sentry_lock);
3105 __allocate_new_segment(sbi, type, true, force);
3106 up_write(&SIT_I(sbi)->sentry_lock);
3107 f2fs_up_read(&SM_I(sbi)->curseg_lock);
3110 void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi)
3114 f2fs_down_read(&SM_I(sbi)->curseg_lock);
3115 down_write(&SIT_I(sbi)->sentry_lock);
3117 __allocate_new_segment(sbi, i, false, false);
3118 up_write(&SIT_I(sbi)->sentry_lock);
3119 f2fs_up_read(&SM_I(sbi)->curseg_lock);
3122 bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
3128 down_write(&SIT_I(sbi)->sentry_lock);
3130 if (add_discard_addrs(sbi, cpc, true)) {
3135 up_write(&SIT_I(sbi)->sentry_lock);
3141 static unsigned int __issue_discard_cmd_range(struct f2fs_sb_info *sbi,
3145 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
3158 f2fs_bug_on(sbi, !f2fs_check_discard_tree(sbi));
3179 err = __submit_discard_cmd(sbi, dpolicy, dc, &issued);
3185 __remove_discard_cmd(sbi, dc);
3189 trimmed += __wait_all_discard_cmd(sbi, NULL);
3196 __remove_discard_cmd(sbi, dc);
3209 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
3219 bool need_align = f2fs_lfs_mode(sbi) && __is_large_section(sbi);
3221 if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
3224 if (end < MAIN_BLKADDR(sbi))
3227 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
3228 f2fs_warn(sbi, "Found FS corruption, run fsck to fix.");
3233 start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
3234 end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
3235 GET_SEGNO(sbi, end);
3237 start_segno = rounddown(start_segno, sbi->segs_per_sec);
3238 end_segno = roundup(end_segno + 1, sbi->segs_per_sec) - 1;
3246 if (sbi->discard_blks == 0)
3249 f2fs_down_write(&sbi->gc_lock);
3250 stat_inc_cp_call_count(sbi, TOTAL_CALL);
3251 err = f2fs_write_checkpoint(sbi, &cpc);
3252 f2fs_up_write(&sbi->gc_lock);
3262 if (f2fs_realtime_discard_enable(sbi))
3265 start_block = START_BLOCK(sbi, start_segno);
3266 end_block = START_BLOCK(sbi, end_segno + 1);
3268 __init_discard_policy(sbi, &dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen);
3269 trimmed = __issue_discard_cmd_range(sbi, &dpolicy,
3272 trimmed += __wait_discard_cmd_range(sbi, &dpolicy,
3319 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3325 if (ei.age <= sbi->hot_data_age_threshold)
3327 if (ei.age <= sbi->warm_data_age_threshold)
3344 if (fio->sbi->am.atgc_enabled &&
3346 (fio->sbi->gc_mode != GC_URGENT_HIGH))
3375 switch (F2FS_OPTION(fio->sbi).active_logs) {
3386 f2fs_bug_on(fio->sbi, true);
3398 static void f2fs_randomize_chunk(struct f2fs_sb_info *sbi,
3406 get_random_u32_inclusive(1, sbi->max_fragment_chunk);
3408 get_random_u32_inclusive(1, sbi->max_fragment_hole);
3411 void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
3416 struct sit_info *sit_i = SIT_I(sbi);
3417 struct curseg_info *curseg = CURSEG_I(sbi, type);
3423 f2fs_down_read(&SM_I(sbi)->curseg_lock);
3429 f2fs_bug_on(sbi, GET_SEGNO(sbi, old_blkaddr) == NULL_SEGNO);
3430 se = get_seg_entry(sbi, GET_SEGNO(sbi, old_blkaddr));
3431 sanity_check_seg_type(sbi, se->type);
3432 f2fs_bug_on(sbi, IS_NODESEG(se->type));
3434 *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
3436 f2fs_bug_on(sbi, curseg->next_blkoff >= sbi->blocks_per_seg);
3438 f2fs_wait_discard_bio(sbi, *new_blkaddr);
3442 curseg->next_blkoff = f2fs_find_next_ssr_block(sbi, curseg);
3445 if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK)
3446 f2fs_randomize_chunk(sbi, curseg);
3448 if (curseg->next_blkoff >= f2fs_usable_blks_in_seg(sbi, curseg->segno))
3450 stat_inc_block_count(sbi, curseg);
3453 old_mtime = get_segment_mtime(sbi, old_blkaddr);
3455 update_segment_mtime(sbi, old_blkaddr, 0);
3458 update_segment_mtime(sbi, *new_blkaddr, old_mtime);
3464 update_sit_entry(sbi, *new_blkaddr, 1);
3465 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
3466 update_sit_entry(sbi, old_blkaddr, -1);
3474 get_atssr_segment(sbi, type, se->type,
3477 if (need_new_seg(sbi, type))
3478 new_curseg(sbi, type, false);
3480 change_curseg(sbi, type);
3481 stat_inc_seg_type(sbi, curseg);
3489 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
3490 locate_dirty_segment(sbi, GET_SEGNO(sbi, *new_blkaddr));
3493 atomic64_inc(&sbi->allocated_data_blocks);
3498 fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
3500 f2fs_inode_chksum_set(sbi, page);
3506 if (F2FS_IO_ALIGNED(sbi))
3511 io = sbi->write_io[fio->type] + fio->temp;
3519 f2fs_up_read(&SM_I(sbi)->curseg_lock);
3522 void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino,
3525 if (!f2fs_is_multi_device(sbi))
3529 unsigned int devidx = f2fs_target_device_index(sbi, blkaddr);
3533 f2fs_set_dirty_device(sbi, ino, devidx, FLUSH_INO);
3536 if (!f2fs_test_bit(devidx, (char *)&sbi->dirty_device)) {
3537 spin_lock(&sbi->dev_lock);
3538 f2fs_set_bit(devidx, (char *)&sbi->dirty_device);
3539 spin_unlock(&sbi->dev_lock);
3552 bool keep_order = (f2fs_lfs_mode(fio->sbi) && type == CURSEG_COLD_DATA);
3555 f2fs_down_read(&fio->sbi->io_order_lock);
3557 f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
3559 if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO)
3560 f2fs_invalidate_internal_cache(fio->sbi, fio->old_blkaddr);
3569 f2fs_update_device_state(fio->sbi, fio->ino, fio->new_blkaddr, 1);
3572 f2fs_up_read(&fio->sbi->io_order_lock);
3575 void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
3579 .sbi = sbi,
3591 if (unlikely(page->index >= MAIN_BLKADDR(sbi)))
3597 stat_inc_meta_count(sbi, page->index);
3598 f2fs_update_iostat(sbi, NULL, io_type, F2FS_BLKSIZE);
3608 f2fs_update_iostat(fio->sbi, NULL, fio->io_type, F2FS_BLKSIZE);
3614 struct f2fs_sb_info *sbi = fio->sbi;
3617 f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR);
3624 f2fs_update_iostat(sbi, dn->inode, fio->io_type, F2FS_BLKSIZE);
3630 struct f2fs_sb_info *sbi = fio->sbi;
3637 segno = GET_SEGNO(sbi, fio->new_blkaddr);
3639 if (!IS_DATASEG(get_seg_entry(sbi, segno)->type)) {
3640 set_sbi_flag(sbi, SBI_NEED_FSCK);
3641 f2fs_warn(sbi, "%s: incorrect segment(%u) type, run fsck to fix.",
3644 f2fs_handle_error(sbi, ERROR_INCONSISTENT_SUM_TYPE);
3648 if (f2fs_cp_error(sbi)) {
3654 f2fs_truncate_meta_inode_pages(sbi, fio->new_blkaddr, 1);
3656 stat_inc_inplace_blocks(fio->sbi);
3658 if (fio->bio && !IS_F2FS_IPU_NOCACHE(sbi))
3663 f2fs_update_device_state(fio->sbi, fio->ino,
3665 f2fs_update_iostat(fio->sbi, fio->page->mapping->host,
3681 static inline int __f2fs_get_curseg(struct f2fs_sb_info *sbi,
3687 if (CURSEG_I(sbi, i)->segno == segno)
3693 void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
3698 struct sit_info *sit_i = SIT_I(sbi);
3706 segno = GET_SEGNO(sbi, new_blkaddr);
3707 se = get_seg_entry(sbi, segno);
3710 f2fs_down_write(&SM_I(sbi)->curseg_lock);
3714 if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
3721 if (IS_CURSEG(sbi, segno)) {
3723 type = __f2fs_get_curseg(sbi, segno);
3724 f2fs_bug_on(sbi, type == NO_CHECK_TYPE);
3730 f2fs_bug_on(sbi, !IS_DATASEG(type));
3731 curseg = CURSEG_I(sbi, type);
3743 change_curseg(sbi, type);
3746 curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
3751 update_segment_mtime(sbi, new_blkaddr, 0);
3752 update_sit_entry(sbi, new_blkaddr, 1);
3754 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
3755 f2fs_invalidate_internal_cache(sbi, old_blkaddr);
3757 update_segment_mtime(sbi, old_blkaddr, 0);
3758 update_sit_entry(sbi, old_blkaddr, -1);
3761 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
3762 locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr));
3764 locate_dirty_segment(sbi, old_cursegno);
3769 change_curseg(sbi, type);
3777 f2fs_up_write(&SM_I(sbi)->curseg_lock);
3780 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
3789 f2fs_do_replace_block(sbi, &sum, old_addr, new_addr,
3799 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
3802 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, type);
3804 f2fs_submit_merged_ipu_write(sbi, NULL, page);
3807 f2fs_bug_on(sbi, locked && PageWriteback(page));
3816 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3825 cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
3835 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3844 f2fs_truncate_meta_inode_pages(sbi, blkaddr, len);
3847 static int read_compacted_summaries(struct f2fs_sb_info *sbi)
3849 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3856 start = start_sum_block(sbi);
3858 page = f2fs_get_meta_page(sbi, start++);
3864 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
3868 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
3877 seg_i = CURSEG_I(sbi, i);
3881 reset_curseg(sbi, i, 0);
3886 blk_off = sbi->blocks_per_seg;
3901 page = f2fs_get_meta_page(sbi, start++);
3912 static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
3914 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3928 if (__exist_node_summaries(sbi))
3929 blk_addr = sum_blk_addr(sbi, NR_CURSEG_PERSIST_TYPE, type);
3931 blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
3937 if (__exist_node_summaries(sbi))
3938 blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
3941 blk_addr = GET_SUM_BLOCK(sbi, segno);
3944 new = f2fs_get_meta_page(sbi, blk_addr);
3950 if (__exist_node_summaries(sbi)) {
3954 for (i = 0; i < sbi->blocks_per_seg; i++, ns++) {
3959 err = f2fs_restore_node_summary(sbi, segno, sum);
3966 curseg = CURSEG_I(sbi, type);
3977 reset_curseg(sbi, type, 0);
3986 static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
3988 struct f2fs_journal *sit_j = CURSEG_I(sbi, CURSEG_COLD_DATA)->journal;
3989 struct f2fs_journal *nat_j = CURSEG_I(sbi, CURSEG_HOT_DATA)->journal;
3993 if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) {
3994 int npages = f2fs_npages_for_summary_flush(sbi, true);
3997 f2fs_ra_meta_pages(sbi, start_sum_block(sbi), npages,
4001 err = read_compacted_summaries(sbi);
4007 if (__exist_node_summaries(sbi))
4008 f2fs_ra_meta_pages(sbi,
4009 sum_blk_addr(sbi, NR_CURSEG_PERSIST_TYPE, type),
4013 err = read_normal_summaries(sbi, type);
4021 f2fs_err(sbi, "invalid journal entries nats %u sits %u",
4029 static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
4038 page = f2fs_grab_meta_page(sbi, blkaddr++);
4043 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
4048 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
4054 seg_i = CURSEG_I(sbi, i);
4055 for (j = 0; j < f2fs_curseg_valid_blocks(sbi, i); j++) {
4057 page = f2fs_grab_meta_page(sbi, blkaddr++);
4081 static void write_normal_summaries(struct f2fs_sb_info *sbi,
4092 write_current_sum_page(sbi, i, blkaddr + (i - type));
4095 void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
4097 if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG))
4098 write_compacted_summaries(sbi, start_blk);
4100 write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
4103 void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
4105 write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
4130 static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
4133 return f2fs_get_meta_page(sbi, current_sit_addr(sbi, segno));
4136 static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
4139 struct sit_info *sit_i = SIT_I(sbi);
4143 src_off = current_sit_addr(sbi, start);
4144 dst_off = next_sit_addr(sbi, src_off);
4146 page = f2fs_grab_meta_page(sbi, dst_off);
4147 seg_info_to_sit_page(sbi, page, start);
4209 static void add_sits_in_set(struct f2fs_sb_info *sbi)
4211 struct f2fs_sm_info *sm_info = SM_I(sbi);
4213 unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap;
4216 for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi))
4220 static void remove_sits_in_journal(struct f2fs_sb_info *sbi)
4222 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
4232 dirtied = __mark_sit_entry_dirty(sbi, segno);
4235 add_sit_entry(segno, &SM_I(sbi)->sit_entry_set);
4245 void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
4247 struct sit_info *sit_i = SIT_I(sbi);
4249 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
4252 struct list_head *head = &SM_I(sbi)->sit_entry_set;
4253 bool to_journal = !is_sbi_flag_set(sbi, SBI_IS_RESIZEFS);
4265 add_sits_in_set(sbi);
4274 remove_sits_in_journal(sbi);
4286 (unsigned long)MAIN_SEGS(sbi));
4296 page = get_next_sit_page(sbi, start_segno);
4304 se = get_seg_entry(sbi, segno);
4308 f2fs_bug_on(sbi, 1);
4314 add_discard_addrs(sbi, cpc, false);
4320 f2fs_bug_on(sbi, offset < 0);
4325 check_block_count(sbi, segno,
4331 check_block_count(sbi, segno,
4345 f2fs_bug_on(sbi, ses->entry_cnt);
4349 f2fs_bug_on(sbi, !list_empty(head));
4350 f2fs_bug_on(sbi, sit_i->dirty_sentries);
4356 add_discard_addrs(sbi, cpc, false);
4362 set_prefree_as_free_segments(sbi);
4365 static int build_sit_info(struct f2fs_sb_info *sbi)
4367 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
4372 unsigned int discard_map = f2fs_block_unit_discard(sbi) ? 1 : 0;
4375 sit_i = f2fs_kzalloc(sbi, sizeof(struct sit_info), GFP_KERNEL);
4379 SM_I(sbi)->sit_info = sit_i;
4382 f2fs_kvzalloc(sbi, array_size(sizeof(struct seg_entry),
4383 MAIN_SEGS(sbi)),
4388 main_bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
4389 sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(sbi, main_bitmap_size,
4395 bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * (3 + discard_map);
4397 bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * (2 + discard_map);
4399 sit_i->bitmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
4405 for (start = 0; start < MAIN_SEGS(sbi); start++) {
4423 sit_i->tmp_map = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
4427 if (__is_large_section(sbi)) {
4429 f2fs_kvzalloc(sbi, array_size(sizeof(struct sec_entry),
4430 MAIN_SECS(sbi)),
4440 sit_bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
4441 src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
4453 sit_i->invalid_segmap = f2fs_kvzalloc(sbi,
4460 sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
4465 sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
4471 static int build_free_segmap(struct f2fs_sb_info *sbi)
4477 free_i = f2fs_kzalloc(sbi, sizeof(struct free_segmap_info), GFP_KERNEL);
4481 SM_I(sbi)->free_info = free_i;
4483 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
4484 free_i->free_segmap = f2fs_kvmalloc(sbi, bitmap_size, GFP_KERNEL);
4488 sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
4489 free_i->free_secmap = f2fs_kvmalloc(sbi, sec_bitmap_size, GFP_KERNEL);
4498 free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi));
4505 static int build_curseg(struct f2fs_sb_info *sbi)
4510 array = f2fs_kzalloc(sbi, array_size(NR_CURSEG_TYPE,
4515 SM_I(sbi)->curseg_array = array;
4519 array[i].sum_blk = f2fs_kzalloc(sbi, PAGE_SIZE, GFP_KERNEL);
4523 array[i].journal = f2fs_kzalloc(sbi,
4537 return restore_curseg_summaries(sbi);
4540 static int build_sit_entries(struct f2fs_sb_info *sbi)
4542 struct sit_info *sit_i = SIT_I(sbi);
4543 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
4547 int sit_blk_cnt = SIT_BLK_CNT(sbi);
4554 readed = f2fs_ra_meta_pages(sbi, start_blk, BIO_MAX_VECS,
4560 for (; start < end && start < MAIN_SEGS(sbi); start++) {
4565 page = get_current_sit_page(sbi, start);
4572 err = check_block_count(sbi, start, &sit);
4578 f2fs_err(sbi, "Invalid segment type: %u, segno: %u",
4580 f2fs_handle_error(sbi,
4587 if (f2fs_block_unit_discard(sbi)) {
4589 if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
4596 sbi->discard_blks +=
4597 sbi->blocks_per_seg -
4602 if (__is_large_section(sbi))
4603 get_sec_entry(sbi, start)->valid_blocks +=
4614 if (start >= MAIN_SEGS(sbi)) {
4615 f2fs_err(sbi, "Wrong journal entry on segno %u",
4618 f2fs_handle_error(sbi, ERROR_CORRUPTED_JOURNAL);
4629 err = check_block_count(sbi, start, &sit);
4635 f2fs_err(sbi, "Invalid segment type: %u, segno: %u",
4638 f2fs_handle_error(sbi, ERROR_INCONSISTENT_SUM_TYPE);
4644 if (f2fs_block_unit_discard(sbi)) {
4645 if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
4650 sbi->discard_blks += old_valid_blocks;
4651 sbi->discard_blks -= se->valid_blocks;
4655 if (__is_large_section(sbi)) {
4656 get_sec_entry(sbi, start)->valid_blocks +=
4658 get_sec_entry(sbi, start)->valid_blocks -=
4667 if (sit_valid_blocks[NODE] != valid_node_count(sbi)) {
4668 f2fs_err(sbi, "SIT is corrupted node# %u vs %u",
4669 sit_valid_blocks[NODE], valid_node_count(sbi));
4670 f2fs_handle_error(sbi, ERROR_INCONSISTENT_NODE_COUNT);
4675 valid_user_blocks(sbi)) {
4676 f2fs_err(sbi, "SIT is corrupted data# %u %u vs %u",
4678 valid_user_blocks(sbi));
4679 f2fs_handle_error(sbi, ERROR_INCONSISTENT_BLOCK_COUNT);
4686 static void init_free_segmap(struct f2fs_sb_info *sbi)
4692 for (start = 0; start < MAIN_SEGS(sbi); start++) {
4693 if (f2fs_usable_blks_in_seg(sbi, start) == 0)
4695 sentry = get_seg_entry(sbi, start);
4697 __set_free(sbi, start);
4699 SIT_I(sbi)->written_valid_blocks +=
4705 struct curseg_info *curseg_t = CURSEG_I(sbi, type);
4707 __set_test_and_inuse(sbi, curseg_t->segno);
4711 static void init_dirty_segmap(struct f2fs_sb_info *sbi)
4713 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
4714 struct free_segmap_info *free_i = FREE_I(sbi);
4720 segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset);
4721 if (segno >= MAIN_SEGS(sbi))
4724 valid_blocks = get_valid_blocks(sbi, segno, false);
4725 usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
4729 f2fs_bug_on(sbi, 1);
4733 __locate_dirty_segment(sbi, segno, DIRTY);
4737 if (!__is_large_section(sbi))
4741 for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
4742 valid_blocks = get_valid_blocks(sbi, segno, true);
4743 secno = GET_SEC_FROM_SEG(sbi, segno);
4745 if (!valid_blocks || valid_blocks == CAP_BLKS_PER_SEC(sbi))
4747 if (IS_CURSEC(sbi, secno))
4754 static int init_victim_secmap(struct f2fs_sb_info *sbi)
4756 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
4757 unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
4759 dirty_i->victim_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
4763 dirty_i->pinned_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
4772 static int build_dirty_segmap(struct f2fs_sb_info *sbi)
4778 dirty_i = f2fs_kzalloc(sbi, sizeof(struct dirty_seglist_info),
4783 SM_I(sbi)->dirty_info = dirty_i;
4786 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
4789 dirty_i->dirty_segmap[i] = f2fs_kvzalloc(sbi, bitmap_size,
4795 if (__is_large_section(sbi)) {
4796 bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
4797 dirty_i->dirty_secmap = f2fs_kvzalloc(sbi,
4803 init_dirty_segmap(sbi);
4804 return init_victim_secmap(sbi);
4807 static int sanity_check_curseg(struct f2fs_sb_info *sbi)
4816 struct curseg_info *curseg = CURSEG_I(sbi, i);
4817 struct seg_entry *se = get_seg_entry(sbi, curseg->segno);
4820 if (f2fs_sb_has_readonly(sbi) &&
4824 sanity_check_seg_type(sbi, curseg->seg_type);
4827 f2fs_err(sbi,
4830 f2fs_handle_error(sbi, ERROR_INVALID_CURSEG);
4840 for (blkofs += 1; blkofs < sbi->blocks_per_seg; blkofs++) {
4844 f2fs_err(sbi,
4848 f2fs_handle_error(sbi, ERROR_INVALID_CURSEG);
4857 static int check_zone_write_pointer(struct f2fs_sb_info *sbi,
4863 unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
4871 wp_segno = GET_SEGNO(sbi, wp_block);
4872 wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
4874 zone_segno = GET_SEGNO(sbi, zone_block);
4875 zone_secno = GET_SEC_FROM_SEG(sbi, zone_segno);
4877 if (zone_segno >= MAIN_SEGS(sbi))
4885 if (zone_secno == GET_SEC_FROM_SEG(sbi,
4886 CURSEG_I(sbi, i)->segno))
4893 for (s = sbi->segs_per_sec - 1; s >= 0; s--) {
4895 se = get_seg_entry(sbi, segno);
4896 for (b = sbi->blocks_per_seg - 1; b >= 0; b--)
4898 last_valid_block = START_BLOCK(sbi, segno) + b;
4918 f2fs_notice(sbi,
4922 ret = __f2fs_issue_discard_zone(sbi, fdev->bdev, zone_block,
4925 f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
4938 f2fs_notice(sbi, "Valid blocks are not aligned with write pointer: "
4940 GET_SEGNO(sbi, last_valid_block),
4941 GET_BLKOFF_FROM_SEG0(sbi, last_valid_block),
4951 f2fs_err(sbi, "Fill up zone failed: %s (errno=%d)",
4954 f2fs_err(sbi, "Finishing zone failed: %s (errno=%d)",
4961 static struct f2fs_dev_info *get_target_zoned_dev(struct f2fs_sb_info *sbi,
4966 for (i = 0; i < sbi->s_ndevs; i++) {
4969 if (sbi->s_ndevs == 1 || (FDEV(i).start_blk <= zone_blkaddr &&
4984 static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
4986 struct curseg_info *cs = CURSEG_I(sbi, type);
4991 unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
4995 cs_section = GET_SEC_FROM_SEG(sbi, cs->segno);
4996 cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section));
4998 zbd = get_target_zoned_dev(sbi, cs_zone_block);
5008 f2fs_err(sbi, "Report zone failed: %s errno=(%d)",
5017 wp_segno = GET_SEGNO(sbi, wp_block);
5018 wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
5025 f2fs_notice(sbi, "Unaligned curseg[%d] with write pointer: "
5029 f2fs_notice(sbi, "Assign new section to curseg[%d]: "
5032 f2fs_allocate_new_section(sbi, type, true);
5035 if (check_zone_write_pointer(sbi, zbd, &zone))
5039 cs_section = GET_SEC_FROM_SEG(sbi, cs->segno);
5040 cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section));
5042 zbd = get_target_zoned_dev(sbi, cs_zone_block);
5051 f2fs_err(sbi, "Report zone failed: %s errno=(%d)",
5060 f2fs_notice(sbi,
5064 err = __f2fs_issue_discard_zone(sbi, zbd->bdev, cs_zone_block,
5067 f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
5076 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi)
5081 ret = fix_curseg_write_pointer(sbi, i);
5090 struct f2fs_sb_info *sbi;
5101 return check_zone_write_pointer(args->sbi, args->fdev, zone);
5104 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi)
5109 for (i = 0; i < sbi->s_ndevs; i++) {
5113 args.sbi = sbi;
5133 struct f2fs_sb_info *sbi, unsigned int segno)
5138 if (!sbi->unusable_blocks_per_sec)
5139 return sbi->blocks_per_seg;
5141 secno = GET_SEC_FROM_SEG(sbi, segno);
5142 seg_start = START_BLOCK(sbi, segno);
5143 sec_start_blkaddr = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, secno));
5144 sec_cap_blkaddr = sec_start_blkaddr + CAP_BLKS_PER_SEC(sbi);
5154 if (seg_start + sbi->blocks_per_seg > sec_cap_blkaddr)
5157 return sbi->blocks_per_seg;
5160 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi)
5165 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi)
5170 static inline unsigned int f2fs_usable_zone_blks_in_seg(struct f2fs_sb_info *sbi,
5177 unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
5180 if (f2fs_sb_has_blkzoned(sbi))
5181 return f2fs_usable_zone_blks_in_seg(sbi, segno);
5183 return sbi->blocks_per_seg;
5186 unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi,
5189 if (f2fs_sb_has_blkzoned(sbi))
5190 return CAP_SEGS_PER_SEC(sbi);
5192 return sbi->segs_per_sec;
5198 static void init_min_max_mtime(struct f2fs_sb_info *sbi)
5200 struct sit_info *sit_i = SIT_I(sbi);
5207 for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
5211 for (i = 0; i < sbi->segs_per_sec; i++)
5212 mtime += get_seg_entry(sbi, segno + i)->mtime;
5214 mtime = div_u64(mtime, sbi->segs_per_sec);
5219 sit_i->max_mtime = get_mtime(sbi, false);
5224 int f2fs_build_segment_manager(struct f2fs_sb_info *sbi)
5226 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
5227 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
5231 sm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_sm_info), GFP_KERNEL);
5236 sbi->sm_info = sm_info;
5249 if (!f2fs_lfs_mode(sbi))
5253 sm_info->min_seq_blocks = sbi->blocks_per_seg;
5255 sm_info->min_ssr_sections = reserved_sections(sbi);
5261 err = f2fs_create_flush_cmd_control(sbi);
5265 err = create_discard_cmd_control(sbi);
5269 err = build_sit_info(sbi);
5272 err = build_free_segmap(sbi);
5275 err = build_curseg(sbi);
5280 err = build_sit_entries(sbi);
5284 init_free_segmap(sbi);
5285 err = build_dirty_segmap(sbi);
5289 err = sanity_check_curseg(sbi);
5293 init_min_max_mtime(sbi);
5297 static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
5300 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5308 static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
5310 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5316 static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
5318 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5326 discard_dirty_segmap(sbi, i);
5328 if (__is_large_section(sbi)) {
5334 destroy_victim_secmap(sbi);
5335 SM_I(sbi)->dirty_info = NULL;
5339 static void destroy_curseg(struct f2fs_sb_info *sbi)
5341 struct curseg_info *array = SM_I(sbi)->curseg_array;
5346 SM_I(sbi)->curseg_array = NULL;
5354 static void destroy_free_segmap(struct f2fs_sb_info *sbi)
5356 struct free_segmap_info *free_i = SM_I(sbi)->free_info;
5360 SM_I(sbi)->free_info = NULL;
5366 static void destroy_sit_info(struct f2fs_sb_info *sbi)
5368 struct sit_info *sit_i = SIT_I(sbi);
5381 SM_I(sbi)->sit_info = NULL;
5390 void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi)
5392 struct f2fs_sm_info *sm_info = SM_I(sbi);
5396 f2fs_destroy_flush_cmd_control(sbi, true);
5397 destroy_discard_cmd_control(sbi);
5398 destroy_dirty_segmap(sbi);
5399 destroy_curseg(sbi);
5400 destroy_free_segmap(sbi);
5401 destroy_sit_info(sbi);
5402 sbi->sm_info = NULL;