Lines Matching refs:sbi

26 void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io)
28 f2fs_build_fault_attr(sbi, 0, 0);
29 set_ckpt_flags(sbi, CP_ERROR_FLAG);
31 f2fs_flush_merged_writes(sbi);
37 struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
39 struct address_space *mapping = META_MAPPING(sbi);
53 static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index,
56 struct address_space *mapping = META_MAPPING(sbi);
59 .sbi = sbi,
89 f2fs_update_iostat(sbi, FS_META_READ_IO, F2FS_BLKSIZE);
105 struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
107 return __get_meta_page(sbi, index, true);
110 struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index)
116 page = __get_meta_page(sbi, index, true);
121 f2fs_stop_checkpoint(sbi, false);
127 struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index)
129 return __get_meta_page(sbi, index, false);
132 static bool __is_bitmap_valid(struct f2fs_sb_info *sbi, block_t blkaddr,
142 segno = GET_SEGNO(sbi, blkaddr);
143 offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
144 se = get_seg_entry(sbi, segno);
148 f2fs_err(sbi, "Inconsistent error blkaddr:%u, sit bitmap:%d",
150 set_sbi_flag(sbi, SBI_NEED_FSCK);
155 f2fs_err(sbi, "Inconsistent error blkaddr:%u, sit bitmap:%d",
157 set_sbi_flag(sbi, SBI_NEED_FSCK);
163 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
170 if (unlikely(blkaddr >= SIT_BLK_CNT(sbi)))
174 if (unlikely(blkaddr >= MAIN_BLKADDR(sbi) ||
175 blkaddr < SM_I(sbi)->ssa_blkaddr))
179 if (unlikely(blkaddr >= SIT_I(sbi)->sit_base_addr ||
180 blkaddr < __start_cp_addr(sbi)))
184 if (unlikely(blkaddr >= MAX_BLKADDR(sbi) ||
185 blkaddr < MAIN_BLKADDR(sbi)))
192 if (unlikely(blkaddr >= MAX_BLKADDR(sbi) ||
193 blkaddr < MAIN_BLKADDR(sbi))) {
194 f2fs_warn(sbi, "access invalid blkaddr:%u",
196 set_sbi_flag(sbi, SBI_NEED_FSCK);
200 return __is_bitmap_valid(sbi, blkaddr, type);
204 if (unlikely(blkaddr < SEG0_BLKADDR(sbi) ||
205 blkaddr >= MAIN_BLKADDR(sbi)))
218 int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
224 .sbi = sbi,
241 if (!f2fs_is_valid_blkaddr(sbi, blkno, type))
247 NAT_BLOCK_OFFSET(NM_I(sbi)->max_nid)))
250 fio.new_blkaddr = current_nat_addr(sbi,
254 if (unlikely(blkno >= TOTAL_SEGS(sbi)))
257 fio.new_blkaddr = current_sit_addr(sbi,
269 page = f2fs_grab_cache_page(META_MAPPING(sbi),
283 f2fs_update_iostat(sbi, FS_META_READ_IO, F2FS_BLKSIZE);
290 void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index)
295 page = find_get_page(META_MAPPING(sbi), index);
301 f2fs_ra_meta_pages(sbi, index, BIO_MAX_PAGES, META_POR, true);
308 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
312 if (unlikely(f2fs_cp_error(sbi))) {
313 if (is_sbi_flag_set(sbi, SBI_IS_CLOSE)) {
315 dec_page_count(sbi, F2FS_DIRTY_META);
321 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
323 if (wbc->for_reclaim && page->index < GET_SUM_BLOCK(sbi, 0))
326 f2fs_do_write_meta_page(sbi, page, io_type);
327 dec_page_count(sbi, F2FS_DIRTY_META);
330 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, META);
334 if (unlikely(f2fs_cp_error(sbi)))
335 f2fs_submit_merged_write(sbi, META);
353 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
356 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
361 get_pages(sbi, F2FS_DIRTY_META) <
362 nr_pages_to_skip(sbi, META))
366 if (!mutex_trylock(&sbi->cp_mutex))
370 diff = nr_pages_to_write(sbi, META, wbc);
371 written = f2fs_sync_meta_pages(sbi, META, wbc->nr_to_write, FS_META_IO);
372 mutex_unlock(&sbi->cp_mutex);
377 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_META);
382 long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
385 struct address_space *mapping = META_MAPPING(sbi);
444 f2fs_submit_merged_write(sbi, type);
478 static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino,
481 struct inode_management *im = &sbi->im[type];
493 f2fs_bug_on(sbi, 1);
513 static void __remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
515 struct inode_management *im = &sbi->im[type];
531 void f2fs_add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
534 __add_ino_entry(sbi, ino, 0, type);
537 void f2fs_remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
540 __remove_ino_entry(sbi, ino, type);
544 bool f2fs_exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode)
546 struct inode_management *im = &sbi->im[mode];
555 void f2fs_release_ino_entry(struct f2fs_sb_info *sbi, bool all)
561 struct inode_management *im = &sbi->im[i];
574 void f2fs_set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
577 __add_ino_entry(sbi, ino, devidx, type);
580 bool f2fs_is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
583 struct inode_management *im = &sbi->im[type];
595 int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi)
597 struct inode_management *im = &sbi->im[ORPHAN_INO];
602 if (time_to_inject(sbi, FAULT_ORPHAN)) {
604 f2fs_show_injection_info(sbi, FAULT_ORPHAN);
608 if (unlikely(im->ino_num >= sbi->max_orphans))
617 void f2fs_release_orphan_inode(struct f2fs_sb_info *sbi)
619 struct inode_management *im = &sbi->im[ORPHAN_INO];
622 f2fs_bug_on(sbi, im->ino_num == 0);
634 void f2fs_remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
637 __remove_ino_entry(sbi, ino, ORPHAN_INO);
640 static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
646 inode = f2fs_iget_retry(sbi->sb, ino);
652 f2fs_bug_on(sbi, PTR_ERR(inode) == -ENOENT);
667 err = f2fs_get_node_info(sbi, ino, &ni);
679 set_sbi_flag(sbi, SBI_NEED_FSCK);
680 f2fs_warn(sbi, "%s: orphan failed (ino=%x), run fsck to fix.",
685 int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi)
688 unsigned int s_flags = sbi->sb->s_flags;
694 if (!is_set_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG))
697 if (bdev_read_only(sbi->sb->s_bdev)) {
698 f2fs_info(sbi, "write access unavailable, skipping orphan cleanup");
703 f2fs_info(sbi, "orphan cleanup on readonly fs");
704 sbi->sb->s_flags &= ~SB_RDONLY;
709 sbi->sb->s_flags |= SB_ACTIVE;
715 quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY);
718 start_blk = __start_cp_addr(sbi) + 1 + __cp_payload(sbi);
719 orphan_blocks = __start_sum_addr(sbi) - 1 - __cp_payload(sbi);
721 f2fs_ra_meta_pages(sbi, start_blk, orphan_blocks, META_CP, true);
727 page = f2fs_get_meta_page(sbi, start_blk + i);
736 err = recover_orphan_inode(sbi, ino);
745 clear_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG);
747 set_sbi_flag(sbi, SBI_IS_RECOVERED);
752 f2fs_quota_off_umount(sbi->sb);
754 sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
759 static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
768 struct inode_management *im = &sbi->im[ORPHAN_INO];
782 page = f2fs_grab_meta_page(sbi, start_blk++);
816 static __u32 f2fs_checkpoint_chksum(struct f2fs_sb_info *sbi,
822 chksum = f2fs_crc32(sbi, ckpt, chksum_ofs);
825 chksum = f2fs_chksum(sbi, chksum, (__u8 *)ckpt + chksum_ofs,
831 static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr,
838 *cp_page = f2fs_get_meta_page(sbi, cp_addr);
848 f2fs_warn(sbi, "invalid crc_offset: %zu", crc_offset);
852 crc = f2fs_checkpoint_chksum(sbi, *cp_block);
855 f2fs_warn(sbi, "invalid crc value");
863 static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
872 err = get_checkpoint_version(sbi, cp_addr, &cp_block,
879 if (cp_blocks > sbi->blocks_per_seg || cp_blocks <= F2FS_CP_PACKS) {
880 f2fs_warn(sbi, "invalid cp_pack_total_block_count:%u",
887 err = get_checkpoint_version(sbi, cp_addr, &cp_block,
904 int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi)
907 struct f2fs_super_block *fsb = sbi->raw_super;
909 unsigned long blk_size = sbi->blocksize;
912 unsigned int cp_blks = 1 + __cp_payload(sbi);
917 sbi->ckpt = f2fs_kvzalloc(sbi, array_size(blk_size, cp_blks),
919 if (!sbi->ckpt)
926 cp1 = validate_checkpoint(sbi, cp_start_blk_no, &cp1_version);
931 cp2 = validate_checkpoint(sbi, cp_start_blk_no, &cp2_version);
948 memcpy(sbi->ckpt, cp_block, blk_size);
951 sbi->cur_cp_pack = 1;
953 sbi->cur_cp_pack = 2;
956 if (f2fs_sanity_check_ckpt(sbi)) {
970 unsigned char *ckpt = (unsigned char *)sbi->ckpt;
972 cur_page = f2fs_get_meta_page(sbi, cp_blk_no + i);
990 kvfree(sbi->ckpt);
996 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1005 &sbi->inode_list[type]);
1006 stat_inc_dirty_inode(sbi, type);
1023 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1030 spin_lock(&sbi->inode_lock[type]);
1031 if (type != FILE_INODE || test_opt(sbi, DATA_FLUSH))
1034 spin_unlock(&sbi->inode_lock[type]);
1042 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1049 if (type == FILE_INODE && !test_opt(sbi, DATA_FLUSH))
1052 spin_lock(&sbi->inode_lock[type]);
1054 spin_unlock(&sbi->inode_lock[type]);
1057 int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type,
1066 trace_f2fs_sync_dirty_inodes_enter(sbi->sb, is_dir,
1067 get_pages(sbi, is_dir ?
1070 if (unlikely(f2fs_cp_error(sbi))) {
1071 trace_f2fs_sync_dirty_inodes_exit(sbi->sb, is_dir,
1072 get_pages(sbi, is_dir ?
1077 spin_lock(&sbi->inode_lock[type]);
1079 head = &sbi->inode_list[type];
1081 spin_unlock(&sbi->inode_lock[type]);
1082 trace_f2fs_sync_dirty_inodes_exit(sbi->sb, is_dir,
1083 get_pages(sbi, is_dir ?
1089 spin_unlock(&sbi->inode_lock[type]);
1114 f2fs_submit_merged_write(sbi, DATA);
1120 int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi)
1122 struct list_head *head = &sbi->inode_list[DIRTY_META];
1125 s64 total = get_pages(sbi, F2FS_DIRTY_IMETA);
1128 if (unlikely(f2fs_cp_error(sbi)))
1131 spin_lock(&sbi->inode_lock[DIRTY_META]);
1133 spin_unlock(&sbi->inode_lock[DIRTY_META]);
1139 spin_unlock(&sbi->inode_lock[DIRTY_META]);
1152 static void __prepare_cp_block(struct f2fs_sb_info *sbi)
1154 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1155 struct f2fs_nm_info *nm_i = NM_I(sbi);
1158 next_free_nid(sbi, &last_nid);
1159 ckpt->valid_block_count = cpu_to_le64(valid_user_blocks(sbi));
1160 ckpt->valid_node_count = cpu_to_le32(valid_node_count(sbi));
1161 ckpt->valid_inode_count = cpu_to_le32(valid_inode_count(sbi));
1165 static bool __need_flush_quota(struct f2fs_sb_info *sbi)
1169 if (!is_journalled_quota(sbi))
1172 if (!down_write_trylock(&sbi->quota_sem))
1174 if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH)) {
1176 } else if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR)) {
1178 } else if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_FLUSH)) {
1179 clear_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH);
1181 } else if (get_pages(sbi, F2FS_DIRTY_QDATA)) {
1184 up_write(&sbi->quota_sem);
1191 static int block_operations(struct f2fs_sb_info *sbi)
1203 f2fs_flush_inline_data(sbi);
1206 f2fs_lock_all(sbi);
1207 if (__need_flush_quota(sbi)) {
1211 set_sbi_flag(sbi, SBI_QUOTA_SKIP_FLUSH);
1212 set_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH);
1215 f2fs_unlock_all(sbi);
1218 locked = down_read_trylock(&sbi->sb->s_umount);
1219 f2fs_quota_sync(sbi->sb, -1);
1221 up_read(&sbi->sb->s_umount);
1228 if (get_pages(sbi, F2FS_DIRTY_DENTS)) {
1229 f2fs_unlock_all(sbi);
1230 err = f2fs_sync_dirty_inodes(sbi, DIR_INODE, true);
1241 down_write(&sbi->node_change);
1243 if (get_pages(sbi, F2FS_DIRTY_IMETA)) {
1244 up_write(&sbi->node_change);
1245 f2fs_unlock_all(sbi);
1246 err = f2fs_sync_inode_meta(sbi);
1254 down_write(&sbi->node_write);
1256 if (get_pages(sbi, F2FS_DIRTY_NODES)) {
1257 up_write(&sbi->node_write);
1258 atomic_inc(&sbi->wb_sync_req[NODE]);
1259 err = f2fs_sync_node_pages(sbi, &wbc, false, FS_CP_NODE_IO);
1260 atomic_dec(&sbi->wb_sync_req[NODE]);
1262 up_write(&sbi->node_change);
1263 f2fs_unlock_all(sbi);
1271 * sbi->node_change is used only for AIO write_begin path which produces
1274 __prepare_cp_block(sbi);
1275 up_write(&sbi->node_change);
1279 static void unblock_operations(struct f2fs_sb_info *sbi)
1281 up_write(&sbi->node_write);
1282 f2fs_unlock_all(sbi);
1285 void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type)
1290 if (!get_pages(sbi, type))
1293 if (unlikely(f2fs_cp_error(sbi) &&
1294 !is_sbi_flag_set(sbi, SBI_IS_CLOSE)))
1298 f2fs_sync_meta_pages(sbi, META, LONG_MAX,
1301 f2fs_submit_merged_write(sbi, DATA);
1303 prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE);
1306 finish_wait(&sbi->cp_wait, &wait);
1309 static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1311 unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num;
1312 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1315 spin_lock_irqsave(&sbi->cp_lock, flags);
1319 sbi->blocks_per_seg - NM_I(sbi)->nat_bits_blocks)
1320 disable_nat_bits(sbi, false);
1342 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
1345 if (is_sbi_flag_set(sbi, SBI_IS_RESIZEFS))
1350 if (is_sbi_flag_set(sbi, SBI_CP_DISABLED))
1355 if (is_sbi_flag_set(sbi, SBI_CP_DISABLED_QUICK))
1360 if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH))
1365 if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR))
1372 spin_unlock_irqrestore(&sbi->cp_lock, flags);
1375 static void commit_checkpoint(struct f2fs_sb_info *sbi,
1387 struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
1396 f2fs_bug_on(sbi, 1);
1400 if (unlikely(err && f2fs_cp_error(sbi))) {
1405 f2fs_bug_on(sbi, err);
1409 f2fs_submit_merged_write(sbi, META_FLUSH);
1412 static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1414 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1415 struct f2fs_nm_info *nm_i = NM_I(sbi);
1416 unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num, flags;
1421 int cp_payload_blks = __cp_payload(sbi);
1422 struct super_block *sb = sbi->sb;
1423 struct curseg_info *seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
1428 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
1431 ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi, true));
1432 ckpt->free_segment_count = cpu_to_le32(free_segments(sbi));
1435 cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_NODE));
1437 cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_NODE));
1439 curseg_alloc_type(sbi, i + CURSEG_HOT_NODE);
1443 cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_DATA));
1445 cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_DATA));
1447 curseg_alloc_type(sbi, i + CURSEG_HOT_DATA);
1451 data_sum_blocks = f2fs_npages_for_summary_flush(sbi, false);
1452 spin_lock_irqsave(&sbi->cp_lock, flags);
1457 spin_unlock_irqrestore(&sbi->cp_lock, flags);
1473 update_ckpt_flags(sbi, cpc);
1476 get_sit_bitmap(sbi, __bitmap_ptr(sbi, SIT_BITMAP));
1477 get_nat_bitmap(sbi, __bitmap_ptr(sbi, NAT_BITMAP));
1479 crc32 = f2fs_checkpoint_chksum(sbi, ckpt);
1484 start_blk = __start_cp_next_addr(sbi);
1487 if (enabled_nat_bits(sbi, cpc)) {
1494 blk = start_blk + sbi->blocks_per_seg - nm_i->nat_bits_blocks;
1496 f2fs_update_meta_page(sbi, nm_i->nat_bits +
1501 f2fs_update_meta_page(sbi, ckpt, start_blk++);
1504 f2fs_update_meta_page(sbi, (char *)ckpt + i * F2FS_BLKSIZE,
1508 write_orphan_inodes(sbi, start_blk);
1512 f2fs_write_data_summaries(sbi, start_blk);
1516 kbytes_written = sbi->kbytes_written;
1518 kbytes_written += BD_PART_WRITTEN(sbi);
1523 f2fs_write_node_summaries(sbi, start_blk);
1528 sbi->last_valid_block_count = sbi->total_valid_block_count;
1529 percpu_counter_set(&sbi->alloc_valid_block_count, 0);
1532 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
1534 f2fs_wait_on_all_pages(sbi, F2FS_DIRTY_META);
1537 f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
1540 err = f2fs_flush_device_cache(sbi);
1545 commit_checkpoint(sbi, ckpt, start_blk);
1546 f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
1552 if (f2fs_sb_has_encrypt(sbi) || f2fs_sb_has_verity(sbi) ||
1553 f2fs_sb_has_compression(sbi))
1554 invalidate_mapping_pages(META_MAPPING(sbi),
1555 MAIN_BLKADDR(sbi), MAX_BLKADDR(sbi) - 1);
1557 f2fs_release_ino_entry(sbi, false);
1559 f2fs_reset_fsync_node_info(sbi);
1561 clear_sbi_flag(sbi, SBI_IS_DIRTY);
1562 clear_sbi_flag(sbi, SBI_NEED_CP);
1563 clear_sbi_flag(sbi, SBI_QUOTA_SKIP_FLUSH);
1565 spin_lock(&sbi->stat_lock);
1566 sbi->unusable_block_count = 0;
1567 spin_unlock(&sbi->stat_lock);
1569 __set_cp_next_pack(sbi);
1575 if (get_pages(sbi, F2FS_DIRTY_NODES) ||
1576 get_pages(sbi, F2FS_DIRTY_IMETA))
1577 set_sbi_flag(sbi, SBI_IS_DIRTY);
1579 f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_DENTS));
1581 return unlikely(f2fs_cp_error(sbi)) ? -EIO : 0;
1584 int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1586 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1590 if (f2fs_readonly(sbi->sb) || f2fs_hw_is_readonly(sbi))
1593 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
1596 f2fs_warn(sbi, "Start checkpoint disabled!");
1599 mutex_lock(&sbi->cp_mutex);
1601 if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) &&
1603 ((cpc->reason & CP_DISCARD) && !sbi->discard_blks)))
1605 if (unlikely(f2fs_cp_error(sbi))) {
1610 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "start block_ops");
1612 err = block_operations(sbi);
1616 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish block_ops");
1618 f2fs_flush_merged_writes(sbi);
1622 if (!f2fs_exist_trim_candidates(sbi, cpc)) {
1623 unblock_operations(sbi);
1627 if (NM_I(sbi)->nat_cnt[DIRTY_NAT] == 0 &&
1628 SIT_I(sbi)->dirty_sentries == 0 &&
1629 prefree_segments(sbi) == 0) {
1630 f2fs_flush_sit_entries(sbi, cpc);
1631 f2fs_clear_prefree_segments(sbi, cpc);
1632 unblock_operations(sbi);
1646 err = f2fs_flush_nat_entries(sbi, cpc);
1650 f2fs_flush_sit_entries(sbi, cpc);
1653 f2fs_save_inmem_curseg(sbi);
1655 err = do_checkpoint(sbi, cpc);
1657 f2fs_release_discard_addrs(sbi);
1659 f2fs_clear_prefree_segments(sbi, cpc);
1661 f2fs_restore_inmem_curseg(sbi);
1663 unblock_operations(sbi);
1664 stat_inc_cp_count(sbi->stat_info);
1667 f2fs_notice(sbi, "checkpoint: version = %llx", ckpt_ver);
1670 f2fs_update_time(sbi, CP_TIME);
1671 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint");
1674 mutex_unlock(&sbi->cp_mutex);
1678 void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi)
1683 struct inode_management *im = &sbi->im[i];
1691 sbi->max_orphans = (sbi->blocks_per_seg - F2FS_CP_PACKS -
1692 NR_CURSEG_PERSIST_TYPE - __cp_payload(sbi)) *