Lines Matching refs:inode

3  * fs/f2fs/inode.c
27 void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync)
29 if (is_inode_flag_set(inode, FI_NEW_INODE))
32 if (f2fs_inode_dirtied(inode, sync))
35 mark_inode_dirty_sync(inode);
38 void f2fs_set_inode_flags(struct inode *inode)
40 unsigned int flags = F2FS_I(inode)->i_flags;
53 if (file_is_encrypt(inode))
55 if (file_is_verity(inode))
59 inode_set_flags(inode, new_fl,
64 static void __get_inode_rdev(struct inode *inode, struct page *node_page)
66 __le32 *addr = get_dnode_addr(inode, node_page);
68 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
69 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
71 inode->i_rdev = old_decode_dev(le32_to_cpu(addr[0]));
73 inode->i_rdev = new_decode_dev(le32_to_cpu(addr[1]));
77 static void __set_inode_rdev(struct inode *inode, struct page *node_page)
79 __le32 *addr = get_dnode_addr(inode, node_page);
81 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
82 if (old_valid_dev(inode->i_rdev)) {
83 addr[0] = cpu_to_le32(old_encode_dev(inode->i_rdev));
87 addr[1] = cpu_to_le32(new_encode_dev(inode->i_rdev));
93 static void __recover_inline_status(struct inode *inode, struct page *ipage)
95 void *inline_data = inline_data_addr(inode, ipage);
97 __le32 *end = start + MAX_INLINE_DATA(inode) / sizeof(__le32);
103 set_inode_flag(inode, FI_DATA_EXIST);
104 set_raw_inline(inode, F2FS_INODE(ipage));
189 static bool sanity_check_compress_inode(struct inode *inode,
192 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
197 "%s: inode (ino=%lx) has unsupported compress algorithm: %u, run fsck to fix",
198 __func__, inode->i_ino, ri->i_compress_algorithm);
202 SECTOR_TO_BLOCK(inode->i_blocks)) {
204 "%s: inode (ino=%lx) has inconsistent i_compr_blocks:%llu, i_blocks:%llu, run fsck to fix",
205 __func__, inode->i_ino, le64_to_cpu(ri->i_compr_blocks),
206 SECTOR_TO_BLOCK(inode->i_blocks));
212 "%s: inode (ino=%lx) has unsupported log cluster size: %u, run fsck to fix",
213 __func__, inode->i_ino, ri->i_log_cluster_size);
256 f2fs_warn(sbi, "%s: inode (ino=%lx) has unsupported compress level: %u, run fsck to fix",
257 __func__, inode->i_ino, clevel);
261 static bool sanity_check_inode(struct inode *inode, struct page *node_page)
263 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
264 struct f2fs_inode_info *fi = F2FS_I(inode);
270 f2fs_warn(sbi, "%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, run fsck to fix.",
271 __func__, inode->i_ino, iblocks);
276 f2fs_warn(sbi, "%s: corrupted inode footer i_ino=%lx, ino,nid: [%u, %u] run fsck to fix.",
277 __func__, inode->i_ino,
282 if (f2fs_has_extra_attr(inode)) {
284 f2fs_warn(sbi, "%s: inode (ino=%lx) is with extra_attr, but extra_attr feature is off",
285 __func__, inode->i_ino);
291 f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_extra_isize: %d, max: %zu",
292 __func__, inode->i_ino, fi->i_extra_isize,
297 f2fs_has_inline_xattr(inode) &&
300 f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_inline_xattr_size: %d, max: %zu",
301 __func__, inode->i_ino, fi->i_inline_xattr_size,
309 if (!sanity_check_compress_inode(inode, ri))
313 f2fs_warn(sbi, "%s: corrupted inode ino=%lx, run fsck to fix.",
314 __func__, inode->i_ino);
320 f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.",
321 __func__, inode->i_ino, F2FS_FEATURE_PRJQUOTA);
325 f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.",
326 __func__, inode->i_ino, F2FS_FEATURE_INODE_CHKSUM);
330 f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.",
331 __func__, inode->i_ino, F2FS_FEATURE_FLEXIBLE_INLINE_XATTR);
335 f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.",
336 __func__, inode->i_ino, F2FS_FEATURE_INODE_CRTIME);
340 f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.",
341 __func__, inode->i_ino, F2FS_FEATURE_COMPRESSION);
346 if (f2fs_sanity_check_inline_data(inode)) {
347 f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_data, run fsck to fix",
348 __func__, inode->i_ino, inode->i_mode);
352 if (f2fs_has_inline_dentry(inode) && !S_ISDIR(inode->i_mode)) {
353 f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_dentry, run fsck to fix",
354 __func__, inode->i_ino, inode->i_mode);
359 f2fs_warn(sbi, "%s: inode (ino=%lx) has casefold flag, but casefold feature is off",
360 __func__, inode->i_ino);
367 static void init_idisk_time(struct inode *inode)
369 struct f2fs_inode_info *fi = F2FS_I(inode);
371 fi->i_disk_time[0] = inode->i_atime;
372 fi->i_disk_time[1] = inode_get_ctime(inode);
373 fi->i_disk_time[2] = inode->i_mtime;
376 static int do_read_inode(struct inode *inode)
378 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
379 struct f2fs_inode_info *fi = F2FS_I(inode);
385 if (f2fs_check_nid_range(sbi, inode->i_ino))
388 node_page = f2fs_get_node_page(sbi, inode->i_ino);
394 inode->i_mode = le16_to_cpu(ri->i_mode);
395 i_uid_write(inode, le32_to_cpu(ri->i_uid));
396 i_gid_write(inode, le32_to_cpu(ri->i_gid));
397 set_nlink(inode, le32_to_cpu(ri->i_links));
398 inode->i_size = le64_to_cpu(ri->i_size);
399 inode->i_blocks = SECTOR_FROM_BLOCK(le64_to_cpu(ri->i_blocks) - 1);
401 inode->i_atime.tv_sec = le64_to_cpu(ri->i_atime);
402 inode_set_ctime(inode, le64_to_cpu(ri->i_ctime),
404 inode->i_mtime.tv_sec = le64_to_cpu(ri->i_mtime);
405 inode->i_atime.tv_nsec = le32_to_cpu(ri->i_atime_nsec);
406 inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
407 inode->i_generation = le32_to_cpu(ri->i_generation);
408 if (S_ISDIR(inode->i_mode))
410 else if (S_ISREG(inode->i_mode))
415 if (S_ISREG(inode->i_mode))
422 get_inline_info(inode, ri);
424 fi->i_extra_isize = f2fs_has_extra_attr(inode) ?
429 } else if (f2fs_has_inline_xattr(inode) ||
430 f2fs_has_inline_dentry(inode)) {
436 * in inode layout, even if inline_xattr is disabled. In order
443 if (!sanity_check_inode(inode, node_page)) {
451 if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
452 __recover_inline_status(inode, node_page);
454 /* try to recover cold bit for non-dir inode */
455 if (!S_ISDIR(inode->i_mode) && !is_cold_node(node_page)) {
462 __get_inode_rdev(inode, node_page);
464 if (!f2fs_need_inode_block_update(sbi, inode->i_ino))
465 fi->last_disk_size = inode->i_size;
468 set_inode_flag(inode, FI_PROJ_INHERIT);
470 if (f2fs_has_extra_attr(inode) && f2fs_sb_has_project_quota(sbi) &&
477 if (f2fs_has_extra_attr(inode) && f2fs_sb_has_inode_crtime(sbi) &&
483 if (f2fs_has_extra_attr(inode) && f2fs_sb_has_compression(sbi) &&
499 set_inode_flag(inode, FI_COMPRESSED_FILE);
503 init_idisk_time(inode);
506 f2fs_init_read_extent_tree(inode, node_page);
507 f2fs_init_age_extent_tree(inode);
509 if (!sanity_check_extent_cache(inode)) {
517 stat_inc_inline_xattr(inode);
518 stat_inc_inline_inode(inode);
519 stat_inc_inline_dir(inode);
520 stat_inc_compr_inode(inode);
521 stat_add_compr_blocks(inode, atomic_read(&fi->i_compr_blocks));
532 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
535 struct inode *inode;
538 inode = iget_locked(sb, ino);
539 if (!inode)
542 if (!(inode->i_state & I_NEW)) {
544 f2fs_err(sbi, "inaccessible inode: %lu, run fsck to repair", ino);
547 trace_f2fs_iget_exit(inode, ret);
548 iput(inode);
553 trace_f2fs_iget(inode);
554 return inode;
560 ret = do_read_inode(inode);
565 inode->i_mapping->a_ops = &f2fs_node_aops;
566 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
568 inode->i_mapping->a_ops = &f2fs_meta_aops;
569 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
572 inode->i_mapping->a_ops = &f2fs_compress_aops;
575 * inode
577 inode->i_mode |= S_IFREG;
579 mapping_set_gfp_mask(inode->i_mapping,
581 } else if (S_ISREG(inode->i_mode)) {
582 inode->i_op = &f2fs_file_inode_operations;
583 inode->i_fop = &f2fs_file_operations;
584 inode->i_mapping->a_ops = &f2fs_dblock_aops;
585 } else if (S_ISDIR(inode->i_mode)) {
586 inode->i_op = &f2fs_dir_inode_operations;
587 inode->i_fop = &f2fs_dir_operations;
588 inode->i_mapping->a_ops = &f2fs_dblock_aops;
589 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
590 } else if (S_ISLNK(inode->i_mode)) {
591 if (file_is_encrypt(inode))
592 inode->i_op = &f2fs_encrypted_symlink_inode_operations;
594 inode->i_op = &f2fs_symlink_inode_operations;
595 inode_nohighmem(inode);
596 inode->i_mapping->a_ops = &f2fs_dblock_aops;
597 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
598 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
599 inode->i_op = &f2fs_special_inode_operations;
600 init_special_inode(inode, inode->i_mode, inode->i_rdev);
605 f2fs_set_inode_flags(inode);
607 if (file_should_truncate(inode) &&
609 ret = f2fs_truncate(inode);
612 file_dont_truncate(inode);
615 unlock_new_inode(inode);
616 trace_f2fs_iget(inode);
617 return inode;
620 f2fs_inode_synced(inode);
621 iget_failed(inode);
622 trace_f2fs_iget_exit(inode, ret);
626 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino)
628 struct inode *inode;
630 inode = f2fs_iget(sb, ino);
631 if (IS_ERR(inode)) {
632 if (PTR_ERR(inode) == -ENOMEM) {
637 return inode;
640 void f2fs_update_inode(struct inode *inode, struct page *node_page)
643 struct extent_tree *et = F2FS_I(inode)->extent_tree[EX_READ];
648 f2fs_inode_synced(inode);
652 ri->i_mode = cpu_to_le16(inode->i_mode);
653 ri->i_advise = F2FS_I(inode)->i_advise;
654 ri->i_uid = cpu_to_le32(i_uid_read(inode));
655 ri->i_gid = cpu_to_le32(i_gid_read(inode));
656 ri->i_links = cpu_to_le32(inode->i_nlink);
657 ri->i_blocks = cpu_to_le64(SECTOR_TO_BLOCK(inode->i_blocks) + 1);
659 if (!f2fs_is_atomic_file(inode) ||
660 is_inode_flag_set(inode, FI_ATOMIC_COMMITTED))
661 ri->i_size = cpu_to_le64(i_size_read(inode));
670 set_raw_inline(inode, ri);
672 ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
673 ri->i_ctime = cpu_to_le64(inode_get_ctime(inode).tv_sec);
674 ri->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
675 ri->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
676 ri->i_ctime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
677 ri->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
678 if (S_ISDIR(inode->i_mode))
680 cpu_to_le32(F2FS_I(inode)->i_current_depth);
681 else if (S_ISREG(inode->i_mode))
683 cpu_to_le16(F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN]);
684 ri->i_xattr_nid = cpu_to_le32(F2FS_I(inode)->i_xattr_nid);
685 ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags);
686 ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino);
687 ri->i_generation = cpu_to_le32(inode->i_generation);
688 ri->i_dir_level = F2FS_I(inode)->i_dir_level;
690 if (f2fs_has_extra_attr(inode)) {
691 ri->i_extra_isize = cpu_to_le16(F2FS_I(inode)->i_extra_isize);
693 if (f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(inode)))
695 cpu_to_le16(F2FS_I(inode)->i_inline_xattr_size);
697 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) &&
698 F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
703 F2FS_I(inode)->i_projid);
707 if (f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
708 F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
711 cpu_to_le64(F2FS_I(inode)->i_crtime.tv_sec);
713 cpu_to_le32(F2FS_I(inode)->i_crtime.tv_nsec);
716 if (f2fs_sb_has_compression(F2FS_I_SB(inode)) &&
717 F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
723 &F2FS_I(inode)->i_compr_blocks));
725 F2FS_I(inode)->i_compress_algorithm;
726 compress_flag = F2FS_I(inode)->i_compress_flag |
727 F2FS_I(inode)->i_compress_level <<
731 F2FS_I(inode)->i_log_cluster_size;
735 __set_inode_rdev(inode, node_page);
737 /* deleted inode */
738 if (inode->i_nlink == 0)
741 init_idisk_time(inode);
743 f2fs_inode_chksum_set(F2FS_I_SB(inode), node_page);
747 void f2fs_update_inode_page(struct inode *inode)
749 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
753 node_page = f2fs_get_node_page(sbi, inode->i_ino);
766 f2fs_update_inode(inode, node_page);
770 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
772 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
774 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
775 inode->i_ino == F2FS_META_INO(sbi))
779 * atime could be updated without dirtying f2fs inode in lazytime mode
781 if (f2fs_is_time_consistent(inode) &&
782 !is_inode_flag_set(inode, FI_DIRTY_INODE))
792 f2fs_update_inode_page(inode);
801 void f2fs_evict_inode(struct inode *inode)
803 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
804 struct f2fs_inode_info *fi = F2FS_I(inode);
808 f2fs_abort_atomic_write(inode, true);
816 trace_f2fs_evict_inode(inode);
817 truncate_inode_pages_final(&inode->i_data);
819 if ((inode->i_nlink || is_bad_inode(inode)) &&
820 test_opt(sbi, COMPRESS_CACHE) && f2fs_compressed_file(inode))
821 f2fs_invalidate_compress_pages(sbi, inode->i_ino);
823 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
824 inode->i_ino == F2FS_META_INO(sbi) ||
825 inode->i_ino == F2FS_COMPRESS_INO(sbi))
828 f2fs_bug_on(sbi, get_dirty_pages(inode));
829 f2fs_remove_dirty_inode(inode);
831 f2fs_destroy_extent_tree(inode);
833 if (inode->i_nlink || is_bad_inode(inode))
836 err = f2fs_dquot_initialize(inode);
842 f2fs_remove_ino_entry(sbi, inode->i_ino, APPEND_INO);
843 f2fs_remove_ino_entry(sbi, inode->i_ino, UPDATE_INO);
844 f2fs_remove_ino_entry(sbi, inode->i_ino, FLUSH_INO);
847 sb_start_intwrite(inode->i_sb);
848 set_inode_flag(inode, FI_NO_ALLOC);
849 i_size_write(inode, 0);
851 if (F2FS_HAS_BLOCKS(inode))
852 err = f2fs_truncate(inode);
859 err = f2fs_remove_inode_page(inode);
866 * block address as inode's, if it was truncated
867 * previously, truncation of inode node will fail.
869 if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
870 f2fs_warn(F2FS_I_SB(inode),
872 inode->i_ino);
873 f2fs_inode_synced(inode);
886 f2fs_update_inode_page(inode);
887 if (dquot_initialize_needed(inode))
891 sb_end_intwrite(inode->i_sb);
893 dquot_drop(inode);
895 stat_dec_inline_xattr(inode);
896 stat_dec_inline_dir(inode);
897 stat_dec_inline_inode(inode);
898 stat_dec_compr_inode(inode);
899 stat_sub_compr_blocks(inode,
904 f2fs_bug_on(sbi, is_inode_flag_set(inode, FI_DIRTY_INODE));
906 f2fs_inode_synced(inode);
909 if (inode->i_ino)
910 invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino,
911 inode->i_ino);
914 if (inode->i_nlink) {
915 if (is_inode_flag_set(inode, FI_APPEND_WRITE))
916 f2fs_add_ino_entry(sbi, inode->i_ino, APPEND_INO);
917 if (is_inode_flag_set(inode, FI_UPDATE_WRITE))
918 f2fs_add_ino_entry(sbi, inode->i_ino, UPDATE_INO);
920 if (is_inode_flag_set(inode, FI_FREE_NID)) {
921 f2fs_alloc_nid_failed(sbi, inode->i_ino);
922 clear_inode_flag(inode, FI_FREE_NID);
926 * err & !f2fs_exist_written_data(sbi, inode->i_ino, ORPHAN_INO)).
931 fscrypt_put_encryption_info(inode);
932 fsverity_cleanup_inode(inode);
933 clear_inode(inode);
937 void f2fs_handle_failed_inode(struct inode *inode)
939 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
944 * clear nlink of inode in order to release resource of inode
947 clear_nlink(inode);
950 * we must call this to avoid inode being remained as dirty, resulting
953 f2fs_update_inode_page(inode);
954 f2fs_inode_synced(inode);
956 /* don't make bad inode, since it becomes a regular file. */
957 unlock_new_inode(inode);
960 * Note: we should add inode to orphan list before f2fs_unlock_op()
964 err = f2fs_get_node_info(sbi, inode->i_ino, &ni, false);
967 set_inode_flag(inode, FI_FREE_NID);
968 f2fs_warn(sbi, "May loss orphan inode, run fsck to fix.");
978 f2fs_add_orphan_inode(inode);
980 f2fs_alloc_nid_done(sbi, inode->i_ino);
982 set_inode_flag(inode, FI_FREE_NID);
988 /* iput will drop the inode object */
989 iput(inode);