Lines Matching refs:ii
78 struct nilfs_inode_info *ii = NILFS_I(inode);
85 ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks);
101 err = nilfs_bmap_insert(ii->i_bmap, blkoff,
329 struct nilfs_inode_info *ii;
343 ii = NILFS_I(inode);
344 ii->i_state = BIT(NILFS_I_NEW);
345 ii->i_root = root;
364 ii->i_bh = bh;
372 err = nilfs_bmap_read(ii->i_bmap, NULL);
376 set_bit(NILFS_I_BMAP, &ii->i_state);
380 ii->i_flags = nilfs_mask_flags(
383 /* ii->i_file_acl = 0; */
384 /* ii->i_dir_acl = 0; */
385 ii->i_dir_start_lookup = 0;
444 struct nilfs_inode_info *ii = NILFS_I(inode);
464 ii->i_flags = le32_to_cpu(raw_inode->i_flags);
466 ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
467 ii->i_dir_acl = S_ISREG(inode->i_mode) ?
470 ii->i_dir_start_lookup = 0;
475 err = nilfs_bmap_read(ii->i_bmap, raw_inode);
478 set_bit(NILFS_I_BMAP, &ii->i_state);
542 struct nilfs_inode_info *ii;
547 ii = NILFS_I(inode);
548 if (test_bit(NILFS_I_BTNC, &ii->i_state)) {
554 if (test_bit(NILFS_I_SHADOW, &ii->i_state)) {
561 if (!test_bit(NILFS_I_GCINODE, &ii->i_state))
564 return args->for_gc && args->cno == ii->i_cno;
669 struct nilfs_inode_info *ii = NILFS_I(inode);
673 if (ii->i_assoc_inode)
677 args.root = ii->i_root;
678 args.cno = ii->i_cno;
679 args.for_gc = test_bit(NILFS_I_GCINODE, &ii->i_state) != 0;
681 args.for_shadow = test_bit(NILFS_I_SHADOW, &ii->i_state) != 0;
692 NILFS_I(btnc_inode)->i_bmap = ii->i_bmap;
693 ii->i_assoc_inode = btnc_inode;
707 struct nilfs_inode_info *ii = NILFS_I(inode);
708 struct inode *btnc_inode = ii->i_assoc_inode;
712 ii->i_assoc_inode = NULL;
764 struct nilfs_inode_info *ii = NILFS_I(inode);
777 raw_inode->i_flags = cpu_to_le32(ii->i_flags);
791 nilfs_bmap_write(ii->i_bmap, raw_inode);
804 struct nilfs_inode_info *ii = NILFS_I(inode);
805 struct inode *ifile = ii->i_root->ifile;
810 if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state))
813 set_bit(NILFS_I_INODE_SYNC, &ii->i_state);
827 static void nilfs_truncate_bmap(struct nilfs_inode_info *ii,
833 if (!test_bit(NILFS_I_BMAP, &ii->i_state))
836 ret = nilfs_bmap_last_key(ii->i_bmap, &b);
846 ret = nilfs_bmap_truncate(ii->i_bmap, b);
847 nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb);
849 nilfs_bmap_truncate(ii->i_bmap, b) == 0))
853 nilfs_warn(ii->vfs_inode.i_sb, "error %d truncating bmap (ino=%lu)",
854 ret, ii->vfs_inode.i_ino);
863 struct nilfs_inode_info *ii = NILFS_I(inode);
865 if (!test_bit(NILFS_I_BMAP, &ii->i_state))
876 nilfs_truncate_bmap(ii, blkoff);
893 struct nilfs_inode_info *ii = NILFS_I(inode);
898 BUG_ON(!list_empty(&ii->i_dirty));
899 brelse(ii->i_bh);
900 ii->i_bh = NULL;
905 if (test_bit(NILFS_I_BMAP, &ii->i_state))
906 nilfs_bmap_clear(ii->i_bmap);
908 if (!test_bit(NILFS_I_BTNC, &ii->i_state))
911 if (ii->i_root && inode->i_ino == NILFS_ROOT_INO)
912 nilfs_put_root(ii->i_root);
919 struct nilfs_inode_info *ii = NILFS_I(inode);
923 if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
951 nilfs_truncate_bmap(ii, 0);
955 ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino);
957 atomic64_dec(&ii->i_root->inodes_count);
1024 struct nilfs_inode_info *ii = NILFS_I(inode);
1028 if (ii->i_bh == NULL || unlikely(!buffer_uptodate(ii->i_bh))) {
1030 err = nilfs_ifile_get_inode_block(ii->i_root->ifile,
1035 if (ii->i_bh == NULL)
1036 ii->i_bh = *pbh;
1037 else if (unlikely(!buffer_uptodate(ii->i_bh))) {
1038 __brelse(ii->i_bh);
1039 ii->i_bh = *pbh;
1042 *pbh = ii->i_bh;
1045 *pbh = ii->i_bh;
1054 struct nilfs_inode_info *ii = NILFS_I(inode);
1058 if (!list_empty(&ii->i_dirty)) {
1060 ret = test_bit(NILFS_I_DIRTY, &ii->i_state) ||
1061 test_bit(NILFS_I_BUSY, &ii->i_state);
1069 struct nilfs_inode_info *ii = NILFS_I(inode);
1074 if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state))
1078 if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
1079 !test_bit(NILFS_I_BUSY, &ii->i_state)) {
1084 if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) {
1098 list_move_tail(&ii->i_dirty, &nilfs->ns_dirty_files);
1099 set_bit(NILFS_I_QUEUED, &ii->i_state);