1// SPDX-License-Identifier: GPL-2.0 2/* 3 * fs/f2fs/inode.c 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8#include <linux/fs.h> 9#include <linux/f2fs_fs.h> 10#include <linux/buffer_head.h> 11#include <linux/backing-dev.h> 12#include <linux/writeback.h> 13 14#include "f2fs.h" 15#include "node.h" 16#include "segment.h" 17#include "xattr.h" 18 19#include <trace/events/f2fs.h> 20 21void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync) 22{ 23 if (is_inode_flag_set(inode, FI_NEW_INODE)) 24 return; 25 26 if (f2fs_readonly(F2FS_I_SB(inode)->sb)) 27 return; 28 29 if (f2fs_inode_dirtied(inode, sync)) 30 return; 31 32 mark_inode_dirty_sync(inode); 33} 34 35void f2fs_set_inode_flags(struct inode *inode) 36{ 37 unsigned int flags = F2FS_I(inode)->i_flags; 38 unsigned int new_fl = 0; 39 40 if (flags & F2FS_SYNC_FL) 41 new_fl |= S_SYNC; 42 if (flags & F2FS_APPEND_FL) 43 new_fl |= S_APPEND; 44 if (flags & F2FS_IMMUTABLE_FL) 45 new_fl |= S_IMMUTABLE; 46 if (flags & F2FS_NOATIME_FL) 47 new_fl |= S_NOATIME; 48 if (flags & F2FS_DIRSYNC_FL) 49 new_fl |= S_DIRSYNC; 50 if (file_is_encrypt(inode)) 51 new_fl |= S_ENCRYPTED; 52 if (file_is_verity(inode)) 53 new_fl |= S_VERITY; 54 if (flags & F2FS_CASEFOLD_FL) 55 new_fl |= S_CASEFOLD; 56 inode_set_flags(inode, new_fl, 57 S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC| 58 S_ENCRYPTED|S_VERITY|S_CASEFOLD); 59} 60 61static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri) 62{ 63 int extra_size = get_extra_isize(inode); 64 65 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || 66 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { 67 if (ri->i_addr[extra_size]) 68 inode->i_rdev = old_decode_dev( 69 le32_to_cpu(ri->i_addr[extra_size])); 70 else 71 inode->i_rdev = new_decode_dev( 72 le32_to_cpu(ri->i_addr[extra_size + 1])); 73 } 74} 75 76static int __written_first_block(struct f2fs_sb_info *sbi, 77 struct f2fs_inode *ri) 78{ 79 block_t addr = le32_to_cpu(ri->i_addr[offset_in_addr(ri)]); 80 81 if (!__is_valid_data_blkaddr(addr)) 82 return 1; 83 if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC_ENHANCE)) 84 return -EFSCORRUPTED; 85 return 0; 86} 87 88static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri) 89{ 90 int extra_size = get_extra_isize(inode); 91 92 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 93 if (old_valid_dev(inode->i_rdev)) { 94 ri->i_addr[extra_size] = 95 cpu_to_le32(old_encode_dev(inode->i_rdev)); 96 ri->i_addr[extra_size + 1] = 0; 97 } else { 98 ri->i_addr[extra_size] = 0; 99 ri->i_addr[extra_size + 1] = 100 cpu_to_le32(new_encode_dev(inode->i_rdev)); 101 ri->i_addr[extra_size + 2] = 0; 102 } 103 } 104} 105 106static void __recover_inline_status(struct inode *inode, struct page *ipage) 107{ 108 void *inline_data = inline_data_addr(inode, ipage); 109 __le32 *start = inline_data; 110 __le32 *end = start + MAX_INLINE_DATA(inode) / sizeof(__le32); 111 112 while (start < end) { 113 if (*start++) { 114 f2fs_wait_on_page_writeback(ipage, NODE, true, true); 115 116 set_inode_flag(inode, FI_DATA_EXIST); 117 set_raw_inline(inode, F2FS_INODE(ipage)); 118 set_page_dirty(ipage); 119 return; 120 } 121 } 122 return; 123} 124 125static bool f2fs_enable_inode_chksum(struct f2fs_sb_info *sbi, struct page *page) 126{ 127 struct f2fs_inode *ri = &F2FS_NODE(page)->i; 128 129 if (!f2fs_sb_has_inode_chksum(sbi)) 130 return false; 131 132 if (!IS_INODE(page) || !(ri->i_inline & F2FS_EXTRA_ATTR)) 133 return false; 134 135 if (!F2FS_FITS_IN_INODE(ri, le16_to_cpu(ri->i_extra_isize), 136 i_inode_checksum)) 137 return false; 138 139 return true; 140} 141 142static __u32 f2fs_inode_chksum(struct f2fs_sb_info *sbi, struct page *page) 143{ 144 struct f2fs_node *node = F2FS_NODE(page); 145 struct f2fs_inode *ri = &node->i; 146 __le32 ino = node->footer.ino; 147 __le32 gen = ri->i_generation; 148 __u32 chksum, chksum_seed; 149 __u32 dummy_cs = 0; 150 unsigned int offset = offsetof(struct f2fs_inode, i_inode_checksum); 151 unsigned int cs_size = sizeof(dummy_cs); 152 153 chksum = f2fs_chksum(sbi, sbi->s_chksum_seed, (__u8 *)&ino, 154 sizeof(ino)); 155 chksum_seed = f2fs_chksum(sbi, chksum, (__u8 *)&gen, sizeof(gen)); 156 157 chksum = f2fs_chksum(sbi, chksum_seed, (__u8 *)ri, offset); 158 chksum = f2fs_chksum(sbi, chksum, (__u8 *)&dummy_cs, cs_size); 159 offset += cs_size; 160 chksum = f2fs_chksum(sbi, chksum, (__u8 *)ri + offset, 161 F2FS_BLKSIZE - offset); 162 return chksum; 163} 164 165bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page) 166{ 167 struct f2fs_inode *ri; 168 __u32 provided, calculated; 169 170 if (unlikely(is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN))) 171 return true; 172 173#ifdef CONFIG_F2FS_CHECK_FS 174 if (!f2fs_enable_inode_chksum(sbi, page)) 175#else 176 if (!f2fs_enable_inode_chksum(sbi, page) || 177 PageDirty(page) || PageWriteback(page)) 178#endif 179 return true; 180 181 ri = &F2FS_NODE(page)->i; 182 provided = le32_to_cpu(ri->i_inode_checksum); 183 calculated = f2fs_inode_chksum(sbi, page); 184 185 if (provided != calculated) 186 f2fs_warn(sbi, "checksum invalid, nid = %lu, ino_of_node = %x, %x vs. %x", 187 page->index, ino_of_node(page), provided, calculated); 188 189 return provided == calculated; 190} 191 192void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page) 193{ 194 struct f2fs_inode *ri = &F2FS_NODE(page)->i; 195 196 if (!f2fs_enable_inode_chksum(sbi, page)) 197 return; 198 199 ri->i_inode_checksum = cpu_to_le32(f2fs_inode_chksum(sbi, page)); 200} 201 202static bool sanity_check_inode(struct inode *inode, struct page *node_page) 203{ 204 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 205 struct f2fs_inode_info *fi = F2FS_I(inode); 206 struct f2fs_inode *ri = F2FS_INODE(node_page); 207 unsigned long long iblocks; 208 209 iblocks = le64_to_cpu(F2FS_INODE(node_page)->i_blocks); 210 if (!iblocks) { 211 set_sbi_flag(sbi, SBI_NEED_FSCK); 212 f2fs_warn(sbi, "%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, run fsck to fix.", 213 __func__, inode->i_ino, iblocks); 214 return false; 215 } 216 217 if (ino_of_node(node_page) != nid_of_node(node_page)) { 218 set_sbi_flag(sbi, SBI_NEED_FSCK); 219 f2fs_warn(sbi, "%s: corrupted inode footer i_ino=%lx, ino,nid: [%u, %u] run fsck to fix.", 220 __func__, inode->i_ino, 221 ino_of_node(node_page), nid_of_node(node_page)); 222 return false; 223 } 224 225 if (f2fs_sb_has_flexible_inline_xattr(sbi) 226 && !f2fs_has_extra_attr(inode)) { 227 set_sbi_flag(sbi, SBI_NEED_FSCK); 228 f2fs_warn(sbi, "%s: corrupted inode ino=%lx, run fsck to fix.", 229 __func__, inode->i_ino); 230 return false; 231 } 232 233 if (f2fs_has_extra_attr(inode) && 234 !f2fs_sb_has_extra_attr(sbi)) { 235 set_sbi_flag(sbi, SBI_NEED_FSCK); 236 f2fs_warn(sbi, "%s: inode (ino=%lx) is with extra_attr, but extra_attr feature is off", 237 __func__, inode->i_ino); 238 return false; 239 } 240 241 if (fi->i_extra_isize > F2FS_TOTAL_EXTRA_ATTR_SIZE || 242 fi->i_extra_isize % sizeof(__le32)) { 243 set_sbi_flag(sbi, SBI_NEED_FSCK); 244 f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_extra_isize: %d, max: %zu", 245 __func__, inode->i_ino, fi->i_extra_isize, 246 F2FS_TOTAL_EXTRA_ATTR_SIZE); 247 return false; 248 } 249 250 if (f2fs_has_extra_attr(inode) && 251 f2fs_sb_has_flexible_inline_xattr(sbi) && 252 f2fs_has_inline_xattr(inode) && 253 (!fi->i_inline_xattr_size || 254 fi->i_inline_xattr_size > MAX_INLINE_XATTR_SIZE)) { 255 set_sbi_flag(sbi, SBI_NEED_FSCK); 256 f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_inline_xattr_size: %d, max: %zu", 257 __func__, inode->i_ino, fi->i_inline_xattr_size, 258 MAX_INLINE_XATTR_SIZE); 259 return false; 260 } 261 262 if (F2FS_I(inode)->extent_tree) { 263 struct extent_info *ei = &F2FS_I(inode)->extent_tree->largest; 264 265 if (ei->len && 266 (!f2fs_is_valid_blkaddr(sbi, ei->blk, 267 DATA_GENERIC_ENHANCE) || 268 !f2fs_is_valid_blkaddr(sbi, ei->blk + ei->len - 1, 269 DATA_GENERIC_ENHANCE))) { 270 set_sbi_flag(sbi, SBI_NEED_FSCK); 271 f2fs_warn(sbi, "%s: inode (ino=%lx) extent info [%u, %u, %u] is incorrect, run fsck to fix", 272 __func__, inode->i_ino, 273 ei->blk, ei->fofs, ei->len); 274 return false; 275 } 276 } 277 278 if (f2fs_sanity_check_inline_data(inode)) { 279 set_sbi_flag(sbi, SBI_NEED_FSCK); 280 f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_data, run fsck to fix", 281 __func__, inode->i_ino, inode->i_mode); 282 return false; 283 } 284 285 if (f2fs_has_inline_dentry(inode) && !S_ISDIR(inode->i_mode)) { 286 set_sbi_flag(sbi, SBI_NEED_FSCK); 287 f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_dentry, run fsck to fix", 288 __func__, inode->i_ino, inode->i_mode); 289 return false; 290 } 291 292 if ((fi->i_flags & F2FS_CASEFOLD_FL) && !f2fs_sb_has_casefold(sbi)) { 293 set_sbi_flag(sbi, SBI_NEED_FSCK); 294 f2fs_warn(sbi, "%s: inode (ino=%lx) has casefold flag, but casefold feature is off", 295 __func__, inode->i_ino); 296 return false; 297 } 298 299 if (f2fs_has_extra_attr(inode) && f2fs_sb_has_compression(sbi) && 300 fi->i_flags & F2FS_COMPR_FL && 301 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, 302 i_log_cluster_size)) { 303 if (ri->i_compress_algorithm >= COMPRESS_MAX) { 304 set_sbi_flag(sbi, SBI_NEED_FSCK); 305 f2fs_warn(sbi, "%s: inode (ino=%lx) has unsupported " 306 "compress algorithm: %u, run fsck to fix", 307 __func__, inode->i_ino, 308 ri->i_compress_algorithm); 309 return false; 310 } 311 if (le64_to_cpu(ri->i_compr_blocks) > 312 SECTOR_TO_BLOCK(inode->i_blocks)) { 313 set_sbi_flag(sbi, SBI_NEED_FSCK); 314 f2fs_warn(sbi, "%s: inode (ino=%lx) has inconsistent " 315 "i_compr_blocks:%llu, i_blocks:%llu, run fsck to fix", 316 __func__, inode->i_ino, 317 le64_to_cpu(ri->i_compr_blocks), 318 SECTOR_TO_BLOCK(inode->i_blocks)); 319 return false; 320 } 321 if (ri->i_log_cluster_size < MIN_COMPRESS_LOG_SIZE || 322 ri->i_log_cluster_size > MAX_COMPRESS_LOG_SIZE) { 323 set_sbi_flag(sbi, SBI_NEED_FSCK); 324 f2fs_warn(sbi, "%s: inode (ino=%lx) has unsupported " 325 "log cluster size: %u, run fsck to fix", 326 __func__, inode->i_ino, 327 ri->i_log_cluster_size); 328 return false; 329 } 330 } 331 332 if (fi->i_xattr_nid && f2fs_check_nid_range(sbi, fi->i_xattr_nid)) { 333 f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_xattr_nid: %u, run fsck to fix.", 334 __func__, inode->i_ino, fi->i_xattr_nid); 335 return false; 336 } 337 338 return true; 339} 340 341static int do_read_inode(struct inode *inode) 342{ 343 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 344 struct f2fs_inode_info *fi = F2FS_I(inode); 345 struct page *node_page; 346 struct f2fs_inode *ri; 347 projid_t i_projid; 348 int err; 349 350 /* Check if ino is within scope */ 351 if (f2fs_check_nid_range(sbi, inode->i_ino)) 352 return -EINVAL; 353 354 node_page = f2fs_get_node_page(sbi, inode->i_ino); 355 if (IS_ERR(node_page)) 356 return PTR_ERR(node_page); 357 358 ri = F2FS_INODE(node_page); 359 360 inode->i_mode = le16_to_cpu(ri->i_mode); 361 i_uid_write(inode, le32_to_cpu(ri->i_uid)); 362 i_gid_write(inode, le32_to_cpu(ri->i_gid)); 363 set_nlink(inode, le32_to_cpu(ri->i_links)); 364 inode->i_size = le64_to_cpu(ri->i_size); 365 inode->i_blocks = SECTOR_FROM_BLOCK(le64_to_cpu(ri->i_blocks) - 1); 366 367 inode->i_atime.tv_sec = le64_to_cpu(ri->i_atime); 368 inode->i_ctime.tv_sec = le64_to_cpu(ri->i_ctime); 369 inode->i_mtime.tv_sec = le64_to_cpu(ri->i_mtime); 370 inode->i_atime.tv_nsec = le32_to_cpu(ri->i_atime_nsec); 371 inode->i_ctime.tv_nsec = le32_to_cpu(ri->i_ctime_nsec); 372 inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec); 373 inode->i_generation = le32_to_cpu(ri->i_generation); 374 if (S_ISDIR(inode->i_mode)) 375 fi->i_current_depth = le32_to_cpu(ri->i_current_depth); 376 else if (S_ISREG(inode->i_mode)) 377 fi->i_gc_failures[GC_FAILURE_PIN] = 378 le16_to_cpu(ri->i_gc_failures); 379 fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid); 380 fi->i_flags = le32_to_cpu(ri->i_flags); 381 if (S_ISREG(inode->i_mode)) 382 fi->i_flags &= ~F2FS_PROJINHERIT_FL; 383 bitmap_zero(fi->flags, FI_MAX); 384 fi->i_advise = ri->i_advise; 385 fi->i_pino = le32_to_cpu(ri->i_pino); 386 fi->i_dir_level = ri->i_dir_level; 387 388 f2fs_init_extent_tree(inode, node_page); 389 390 get_inline_info(inode, ri); 391 392 fi->i_extra_isize = f2fs_has_extra_attr(inode) ? 393 le16_to_cpu(ri->i_extra_isize) : 0; 394 395 if (f2fs_sb_has_flexible_inline_xattr(sbi)) { 396 fi->i_inline_xattr_size = le16_to_cpu(ri->i_inline_xattr_size); 397 } else if (f2fs_has_inline_xattr(inode) || 398 f2fs_has_inline_dentry(inode)) { 399 fi->i_inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS; 400 } else { 401 402 /* 403 * Previous inline data or directory always reserved 200 bytes 404 * in inode layout, even if inline_xattr is disabled. In order 405 * to keep inline_dentry's structure for backward compatibility, 406 * we get the space back only from inline_data. 407 */ 408 fi->i_inline_xattr_size = 0; 409 } 410 411 if (!sanity_check_inode(inode, node_page)) { 412 f2fs_put_page(node_page, 1); 413 return -EFSCORRUPTED; 414 } 415 416 /* check data exist */ 417 if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode)) 418 __recover_inline_status(inode, node_page); 419 420 /* try to recover cold bit for non-dir inode */ 421 if (!S_ISDIR(inode->i_mode) && !is_cold_node(node_page)) { 422 f2fs_wait_on_page_writeback(node_page, NODE, true, true); 423 set_cold_node(node_page, false); 424 set_page_dirty(node_page); 425 } 426 427 /* get rdev by using inline_info */ 428 __get_inode_rdev(inode, ri); 429 430 if (S_ISREG(inode->i_mode)) { 431 err = __written_first_block(sbi, ri); 432 if (err < 0) { 433 f2fs_put_page(node_page, 1); 434 return err; 435 } 436 if (!err) 437 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN); 438 } 439 440 if (!f2fs_need_inode_block_update(sbi, inode->i_ino)) 441 fi->last_disk_size = inode->i_size; 442 443 if (fi->i_flags & F2FS_PROJINHERIT_FL) 444 set_inode_flag(inode, FI_PROJ_INHERIT); 445 446 if (f2fs_has_extra_attr(inode) && f2fs_sb_has_project_quota(sbi) && 447 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid)) 448 i_projid = (projid_t)le32_to_cpu(ri->i_projid); 449 else 450 i_projid = F2FS_DEF_PROJID; 451 fi->i_projid = make_kprojid(&init_user_ns, i_projid); 452 453 if (f2fs_has_extra_attr(inode) && f2fs_sb_has_inode_crtime(sbi) && 454 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) { 455 fi->i_crtime.tv_sec = le64_to_cpu(ri->i_crtime); 456 fi->i_crtime.tv_nsec = le32_to_cpu(ri->i_crtime_nsec); 457 } 458 459 if (f2fs_has_extra_attr(inode) && f2fs_sb_has_compression(sbi) && 460 (fi->i_flags & F2FS_COMPR_FL)) { 461 if (F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, 462 i_log_cluster_size)) { 463 atomic_set(&fi->i_compr_blocks, 464 le64_to_cpu(ri->i_compr_blocks)); 465 fi->i_compress_algorithm = ri->i_compress_algorithm; 466 fi->i_log_cluster_size = ri->i_log_cluster_size; 467 fi->i_cluster_size = 1 << fi->i_log_cluster_size; 468 set_inode_flag(inode, FI_COMPRESSED_FILE); 469 } 470 } 471 472 F2FS_I(inode)->i_disk_time[0] = inode->i_atime; 473 F2FS_I(inode)->i_disk_time[1] = inode->i_ctime; 474 F2FS_I(inode)->i_disk_time[2] = inode->i_mtime; 475 F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime; 476 f2fs_put_page(node_page, 1); 477 478 stat_inc_inline_xattr(inode); 479 stat_inc_inline_inode(inode); 480 stat_inc_inline_dir(inode); 481 stat_inc_compr_inode(inode); 482 stat_add_compr_blocks(inode, atomic_read(&fi->i_compr_blocks)); 483 484 return 0; 485} 486 487struct inode *f2fs_iget(struct super_block *sb, unsigned long ino) 488{ 489 struct f2fs_sb_info *sbi = F2FS_SB(sb); 490 struct inode *inode; 491 int ret = 0; 492 493 inode = iget_locked(sb, ino); 494 if (!inode) 495 return ERR_PTR(-ENOMEM); 496 497 if (!(inode->i_state & I_NEW)) { 498 trace_f2fs_iget(inode); 499 return inode; 500 } 501 if (ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi)) 502 goto make_now; 503 504 ret = do_read_inode(inode); 505 if (ret) 506 goto bad_inode; 507make_now: 508 if (ino == F2FS_NODE_INO(sbi)) { 509 inode->i_mapping->a_ops = &f2fs_node_aops; 510 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); 511 } else if (ino == F2FS_META_INO(sbi)) { 512 inode->i_mapping->a_ops = &f2fs_meta_aops; 513 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); 514 } else if (S_ISREG(inode->i_mode)) { 515 inode->i_op = &f2fs_file_inode_operations; 516 inode->i_fop = &f2fs_file_operations; 517 inode->i_mapping->a_ops = &f2fs_dblock_aops; 518 } else if (S_ISDIR(inode->i_mode)) { 519 inode->i_op = &f2fs_dir_inode_operations; 520 inode->i_fop = &f2fs_dir_operations; 521 inode->i_mapping->a_ops = &f2fs_dblock_aops; 522 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); 523 } else if (S_ISLNK(inode->i_mode)) { 524 if (file_is_encrypt(inode)) 525 inode->i_op = &f2fs_encrypted_symlink_inode_operations; 526 else 527 inode->i_op = &f2fs_symlink_inode_operations; 528 inode_nohighmem(inode); 529 inode->i_mapping->a_ops = &f2fs_dblock_aops; 530 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || 531 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { 532 inode->i_op = &f2fs_special_inode_operations; 533 init_special_inode(inode, inode->i_mode, inode->i_rdev); 534 } else { 535 ret = -EIO; 536 goto bad_inode; 537 } 538 f2fs_set_inode_flags(inode); 539 unlock_new_inode(inode); 540 trace_f2fs_iget(inode); 541 return inode; 542 543bad_inode: 544 f2fs_inode_synced(inode); 545 iget_failed(inode); 546 trace_f2fs_iget_exit(inode, ret); 547 return ERR_PTR(ret); 548} 549 550struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino) 551{ 552 struct inode *inode; 553retry: 554 inode = f2fs_iget(sb, ino); 555 if (IS_ERR(inode)) { 556 if (PTR_ERR(inode) == -ENOMEM) { 557 congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT); 558 goto retry; 559 } 560 } 561 return inode; 562} 563 564void f2fs_update_inode(struct inode *inode, struct page *node_page) 565{ 566 struct f2fs_inode *ri; 567 struct extent_tree *et = F2FS_I(inode)->extent_tree; 568 569 f2fs_wait_on_page_writeback(node_page, NODE, true, true); 570 set_page_dirty(node_page); 571 572 f2fs_inode_synced(inode); 573 574 ri = F2FS_INODE(node_page); 575 576 ri->i_mode = cpu_to_le16(inode->i_mode); 577 ri->i_advise = F2FS_I(inode)->i_advise; 578 ri->i_uid = cpu_to_le32(i_uid_read(inode)); 579 ri->i_gid = cpu_to_le32(i_gid_read(inode)); 580 ri->i_links = cpu_to_le32(inode->i_nlink); 581 ri->i_size = cpu_to_le64(i_size_read(inode)); 582 ri->i_blocks = cpu_to_le64(SECTOR_TO_BLOCK(inode->i_blocks) + 1); 583 584 if (et) { 585 read_lock(&et->lock); 586 set_raw_extent(&et->largest, &ri->i_ext); 587 read_unlock(&et->lock); 588 } else { 589 memset(&ri->i_ext, 0, sizeof(ri->i_ext)); 590 } 591 set_raw_inline(inode, ri); 592 593 ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec); 594 ri->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec); 595 ri->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec); 596 ri->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec); 597 ri->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); 598 ri->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); 599 if (S_ISDIR(inode->i_mode)) 600 ri->i_current_depth = 601 cpu_to_le32(F2FS_I(inode)->i_current_depth); 602 else if (S_ISREG(inode->i_mode)) 603 ri->i_gc_failures = 604 cpu_to_le16(F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN]); 605 ri->i_xattr_nid = cpu_to_le32(F2FS_I(inode)->i_xattr_nid); 606 ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags); 607 ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino); 608 ri->i_generation = cpu_to_le32(inode->i_generation); 609 ri->i_dir_level = F2FS_I(inode)->i_dir_level; 610 611 if (f2fs_has_extra_attr(inode)) { 612 ri->i_extra_isize = cpu_to_le16(F2FS_I(inode)->i_extra_isize); 613 614 if (f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(inode))) 615 ri->i_inline_xattr_size = 616 cpu_to_le16(F2FS_I(inode)->i_inline_xattr_size); 617 618 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) && 619 F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize, 620 i_projid)) { 621 projid_t i_projid; 622 623 i_projid = from_kprojid(&init_user_ns, 624 F2FS_I(inode)->i_projid); 625 ri->i_projid = cpu_to_le32(i_projid); 626 } 627 628 if (f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) && 629 F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize, 630 i_crtime)) { 631 ri->i_crtime = 632 cpu_to_le64(F2FS_I(inode)->i_crtime.tv_sec); 633 ri->i_crtime_nsec = 634 cpu_to_le32(F2FS_I(inode)->i_crtime.tv_nsec); 635 } 636 637 if (f2fs_sb_has_compression(F2FS_I_SB(inode)) && 638 F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize, 639 i_log_cluster_size)) { 640 ri->i_compr_blocks = 641 cpu_to_le64(atomic_read( 642 &F2FS_I(inode)->i_compr_blocks)); 643 ri->i_compress_algorithm = 644 F2FS_I(inode)->i_compress_algorithm; 645 ri->i_log_cluster_size = 646 F2FS_I(inode)->i_log_cluster_size; 647 } 648 } 649 650 __set_inode_rdev(inode, ri); 651 652 /* deleted inode */ 653 if (inode->i_nlink == 0) 654 clear_inline_node(node_page); 655 656 F2FS_I(inode)->i_disk_time[0] = inode->i_atime; 657 F2FS_I(inode)->i_disk_time[1] = inode->i_ctime; 658 F2FS_I(inode)->i_disk_time[2] = inode->i_mtime; 659 F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime; 660 661#ifdef CONFIG_F2FS_CHECK_FS 662 f2fs_inode_chksum_set(F2FS_I_SB(inode), node_page); 663#endif 664} 665 666void f2fs_update_inode_page(struct inode *inode) 667{ 668 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 669 struct page *node_page; 670retry: 671 node_page = f2fs_get_node_page(sbi, inode->i_ino); 672 if (IS_ERR(node_page)) { 673 int err = PTR_ERR(node_page); 674 if (err == -ENOMEM) { 675 cond_resched(); 676 goto retry; 677 } else if (err != -ENOENT) { 678 f2fs_stop_checkpoint(sbi, false); 679 } 680 return; 681 } 682 f2fs_update_inode(inode, node_page); 683 f2fs_put_page(node_page, 1); 684} 685 686int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc) 687{ 688 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 689 690 if (inode->i_ino == F2FS_NODE_INO(sbi) || 691 inode->i_ino == F2FS_META_INO(sbi)) 692 return 0; 693 694 /* 695 * atime could be updated without dirtying f2fs inode in lazytime mode 696 */ 697 if (f2fs_is_time_consistent(inode) && 698 !is_inode_flag_set(inode, FI_DIRTY_INODE)) 699 return 0; 700 701 if (!f2fs_is_checkpoint_ready(sbi)) 702 return -ENOSPC; 703 704 /* 705 * We need to balance fs here to prevent from producing dirty node pages 706 * during the urgent cleaning time when runing out of free sections. 707 */ 708 f2fs_update_inode_page(inode); 709 if (wbc && wbc->nr_to_write) 710 f2fs_balance_fs(sbi, true); 711 return 0; 712} 713 714/* 715 * Called at the last iput() if i_nlink is zero 716 */ 717void f2fs_evict_inode(struct inode *inode) 718{ 719 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 720 nid_t xnid = F2FS_I(inode)->i_xattr_nid; 721 int err = 0; 722 723 /* some remained atomic pages should discarded */ 724 if (f2fs_is_atomic_file(inode)) 725 f2fs_drop_inmem_pages(inode); 726 727 trace_f2fs_evict_inode(inode); 728 truncate_inode_pages_final(&inode->i_data); 729 730 if (inode->i_ino == F2FS_NODE_INO(sbi) || 731 inode->i_ino == F2FS_META_INO(sbi)) 732 goto out_clear; 733 734 f2fs_bug_on(sbi, get_dirty_pages(inode)); 735 f2fs_remove_dirty_inode(inode); 736 737 f2fs_destroy_extent_tree(inode); 738 739 if (inode->i_nlink || is_bad_inode(inode)) 740 goto no_delete; 741 742 err = dquot_initialize(inode); 743 if (err) { 744 err = 0; 745 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); 746 } 747 748 f2fs_remove_ino_entry(sbi, inode->i_ino, APPEND_INO); 749 f2fs_remove_ino_entry(sbi, inode->i_ino, UPDATE_INO); 750 f2fs_remove_ino_entry(sbi, inode->i_ino, FLUSH_INO); 751 752 sb_start_intwrite(inode->i_sb); 753 set_inode_flag(inode, FI_NO_ALLOC); 754 i_size_write(inode, 0); 755retry: 756 if (F2FS_HAS_BLOCKS(inode)) 757 err = f2fs_truncate(inode); 758 759 if (time_to_inject(sbi, FAULT_EVICT_INODE)) { 760 f2fs_show_injection_info(sbi, FAULT_EVICT_INODE); 761 err = -EIO; 762 } 763 764 if (!err) { 765 f2fs_lock_op(sbi); 766 err = f2fs_remove_inode_page(inode); 767 f2fs_unlock_op(sbi); 768 if (err == -ENOENT) { 769 err = 0; 770 771 /* 772 * in fuzzed image, another node may has the same 773 * block address as inode's, if it was truncated 774 * previously, truncation of inode node will fail. 775 */ 776 if (is_inode_flag_set(inode, FI_DIRTY_INODE)) { 777 f2fs_warn(F2FS_I_SB(inode), 778 "f2fs_evict_inode: inconsistent node id, ino:%lu", 779 inode->i_ino); 780 f2fs_inode_synced(inode); 781 set_sbi_flag(sbi, SBI_NEED_FSCK); 782 } 783 } 784 } 785 786 /* give more chances, if ENOMEM case */ 787 if (err == -ENOMEM) { 788 err = 0; 789 goto retry; 790 } 791 792 if (err) { 793 f2fs_update_inode_page(inode); 794 if (dquot_initialize_needed(inode)) 795 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); 796 } 797 sb_end_intwrite(inode->i_sb); 798no_delete: 799 dquot_drop(inode); 800 801 stat_dec_inline_xattr(inode); 802 stat_dec_inline_dir(inode); 803 stat_dec_inline_inode(inode); 804 stat_dec_compr_inode(inode); 805 stat_sub_compr_blocks(inode, 806 atomic_read(&F2FS_I(inode)->i_compr_blocks)); 807 808 if (likely(!f2fs_cp_error(sbi) && 809 !is_sbi_flag_set(sbi, SBI_CP_DISABLED))) 810 f2fs_bug_on(sbi, is_inode_flag_set(inode, FI_DIRTY_INODE)); 811 else 812 f2fs_inode_synced(inode); 813 814 /* for the case f2fs_new_inode() was failed, .i_ino is zero, skip it */ 815 if (inode->i_ino) 816 invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino, 817 inode->i_ino); 818 if (xnid) 819 invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid); 820 if (inode->i_nlink) { 821 if (is_inode_flag_set(inode, FI_APPEND_WRITE)) 822 f2fs_add_ino_entry(sbi, inode->i_ino, APPEND_INO); 823 if (is_inode_flag_set(inode, FI_UPDATE_WRITE)) 824 f2fs_add_ino_entry(sbi, inode->i_ino, UPDATE_INO); 825 } 826 if (is_inode_flag_set(inode, FI_FREE_NID)) { 827 f2fs_alloc_nid_failed(sbi, inode->i_ino); 828 clear_inode_flag(inode, FI_FREE_NID); 829 } else { 830 /* 831 * If xattr nid is corrupted, we can reach out error condition, 832 * err & !f2fs_exist_written_data(sbi, inode->i_ino, ORPHAN_INO)). 833 * In that case, f2fs_check_nid_range() is enough to give a clue. 834 */ 835 } 836out_clear: 837 fscrypt_put_encryption_info(inode); 838 fsverity_cleanup_inode(inode); 839 clear_inode(inode); 840} 841 842/* caller should call f2fs_lock_op() */ 843void f2fs_handle_failed_inode(struct inode *inode) 844{ 845 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 846 struct node_info ni; 847 int err; 848 849 /* 850 * clear nlink of inode in order to release resource of inode 851 * immediately. 852 */ 853 clear_nlink(inode); 854 855 /* 856 * we must call this to avoid inode being remained as dirty, resulting 857 * in a panic when flushing dirty inodes in gdirty_list. 858 */ 859 f2fs_update_inode_page(inode); 860 f2fs_inode_synced(inode); 861 862 /* don't make bad inode, since it becomes a regular file. */ 863 unlock_new_inode(inode); 864 865 /* 866 * Note: we should add inode to orphan list before f2fs_unlock_op() 867 * so we can prevent losing this orphan when encoutering checkpoint 868 * and following suddenly power-off. 869 */ 870 err = f2fs_get_node_info(sbi, inode->i_ino, &ni); 871 if (err) { 872 set_sbi_flag(sbi, SBI_NEED_FSCK); 873 set_inode_flag(inode, FI_FREE_NID); 874 f2fs_warn(sbi, "May loss orphan inode, run fsck to fix."); 875 goto out; 876 } 877 878 if (ni.blk_addr != NULL_ADDR) { 879 err = f2fs_acquire_orphan_inode(sbi); 880 if (err) { 881 set_sbi_flag(sbi, SBI_NEED_FSCK); 882 f2fs_warn(sbi, "Too many orphan inodes, run fsck to fix."); 883 } else { 884 f2fs_add_orphan_inode(inode); 885 } 886 f2fs_alloc_nid_done(sbi, inode->i_ino); 887 } else { 888 set_inode_flag(inode, FI_FREE_NID); 889 } 890 891out: 892 f2fs_unlock_op(sbi); 893 894 /* iput will drop the inode object */ 895 iput(inode); 896} 897