Lines Matching refs:inode

27 void reiserfs_evict_inode(struct inode *inode)
35 2 * REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb);
39 if (!inode->i_nlink && !is_bad_inode(inode))
40 dquot_initialize(inode);
42 truncate_inode_pages_final(&inode->i_data);
43 if (inode->i_nlink)
47 * The = 0 happens when we abort creating a new inode
51 if (!(inode->i_state & I_NEW) && INODE_PKEY(inode)->k_objectid != 0) {
53 reiserfs_delete_xattrs(inode);
55 reiserfs_write_lock(inode->i_sb);
57 if (journal_begin(&th, inode->i_sb, jbegin_count))
59 reiserfs_update_inode_transaction(inode);
61 reiserfs_discard_prealloc(&th, inode);
63 err = reiserfs_delete_object(&th, inode);
71 int depth = reiserfs_write_unlock_nested(inode->i_sb);
72 dquot_free_inode(inode);
73 reiserfs_write_lock_nested(inode->i_sb, depth);
91 remove_save_link(inode, 0 /* not truncate */);
93 reiserfs_write_unlock(inode->i_sb);
100 clear_inode(inode);
102 dquot_drop(inode);
103 inode->i_blocks = 0;
107 clear_inode(inode);
108 dquot_drop(inode);
124 * take base of inode_key (it comes from inode always) (dirid, objectid)
125 * and version from an inode, set offset and type of key
127 void make_cpu_key(struct cpu_key *key, struct inode *inode, loff_t offset,
130 _make_cpu_key(key, get_inode_item_key_version(inode),
131 le32_to_cpu(INODE_PKEY(inode)->k_dir_id),
132 le32_to_cpu(INODE_PKEY(inode)->k_objectid), offset, type,
163 * I don't want a bloated inode struct..., and I don't like code complexity....
232 b_blocknr_t block, struct inode *inode)
234 map_bh(bh, inode->i_sb, block);
241 static int file_capable(struct inode *inode, sector_t block)
244 if (get_inode_item_key_version(inode) != KEY_FORMAT_3_5 ||
246 block < (1 << (31 - inode->i_sb->s_blocksize_bits)))
253 struct inode *inode, struct treepath *path)
267 reiserfs_update_sd(th, inode);
272 reiserfs_update_inode_transaction(inode);
285 static int _get_block_create_0(struct inode *inode, sector_t block,
301 make_cpu_key(&key, inode,
302 (loff_t) block * inode->i_sb->s_blocksize + 1, TYPE_ANY,
305 result = search_for_position_by_key(inode->i_sb, &key, &path);
331 * the inode to avoid search_by_key in case of subsequent
337 map_bh(bh_result, inode->i_sb, blocknr);
403 memset(p, 0, inode->i_sb->s_blocksize);
414 if ((le_ih_k_offset(ih) + path.pos_in_item) > inode->i_size)
416 if ((le_ih_k_offset(ih) - 1 + ih_item_len(ih)) > inode->i_size) {
418 inode->i_size - (le_ih_k_offset(ih) - 1) -
442 result = search_for_position_by_key(inode->i_sb, &key, &path);
463 map_bh(bh_result, inode->i_sb, 0);
472 static int reiserfs_bmap(struct inode *inode, sector_t block,
475 if (!file_capable(inode, block))
478 reiserfs_write_lock(inode->i_sb);
480 _get_block_create_0(inode, block, bh_result, 0);
481 reiserfs_write_unlock(inode->i_sb);
503 static int reiserfs_get_block_create_0(struct inode *inode, sector_t block,
507 return reiserfs_get_block(inode, block, bh_result, GET_BLOCK_NO_HOLE);
514 static int reiserfs_get_blocks_direct_io(struct inode *inode,
528 bh_result->b_size = i_blocksize(inode);
530 ret = reiserfs_get_block(inode, iblock, bh_result,
549 if (REISERFS_I(inode)->i_flags & i_pack_on_close_mask) {
552 reiserfs_write_lock(inode->i_sb);
554 err = reiserfs_commit_for_inode(inode);
555 REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask;
557 reiserfs_write_unlock(inode->i_sb);
576 static int convert_tail_for_hole(struct inode *inode,
600 tail_page = grab_cache_page(inode->i_mapping, index);
641 struct inode *inode,
649 return reiserfs_new_unf_blocknrs2(th, inode, allocated_block_nr,
653 return reiserfs_new_unf_blocknrs(th, inode, allocated_block_nr, path,
657 int reiserfs_get_block(struct inode *inode, sector_t block,
682 2 * REISERFS_QUOTA_TRANS_BLOCKS(inode->i_sb);
686 (((loff_t) block) << inode->i_sb->s_blocksize_bits) + 1;
688 reiserfs_write_lock(inode->i_sb);
689 version = get_inode_item_key_version(inode);
691 if (!file_capable(inode, block)) {
692 reiserfs_write_unlock(inode->i_sb);
703 ret = _get_block_create_0(inode, block, bh_result,
705 reiserfs_write_unlock(inode->i_sb);
714 reiserfs_transaction_running(inode->i_sb))
722 if ((have_large_tails(inode->i_sb)
723 && inode->i_size < i_block_size(inode) * 4)
724 || (have_small_tails(inode->i_sb)
725 && inode->i_size < i_block_size(inode)))
726 REISERFS_I(inode)->i_flags |= i_pack_on_close_mask;
729 make_cpu_key(&key, inode, new_offset, TYPE_ANY, 3 /*key length */ );
730 if ((new_offset + inode->i_sb->s_blocksize - 1) > inode->i_size) {
732 th = reiserfs_persistent_transaction(inode->i_sb, jbegin_count);
737 reiserfs_update_inode_transaction(inode);
741 retval = search_for_position_by_key(inode->i_sb, &key, &path);
752 fs_gen = get_generation(inode->i_sb);
764 _allocate_block(th, block, inode, &allocated_block_nr,
773 SB_JOURNAL(inode->i_sb)->j_next_async_flush = 1;
774 retval = restart_transaction(th, inode, &path);
778 _allocate_block(th, block, inode,
791 if (fs_changed(fs_gen, inode->i_sb)
807 reiserfs_prepare_for_journal(inode->i_sb, bh, 1);
808 if (fs_changed(fs_gen, inode->i_sb)
810 reiserfs_restore_prepared_buffer(inode->i_sb,
816 && reiserfs_data_ordered(inode->i_sb))
817 reiserfs_add_ordered_list(inode, bh_result);
821 reiserfs_update_sd(th, inode);
823 set_block_dev_mapped(bh_result, unfm_ptr, inode);
829 reiserfs_write_unlock(inode->i_sb);
833 * there is no need to make sure the inode is updated with this
867 allocated_block_nr, inode);
877 inode, (char *)&unp);
879 reiserfs_free_block(th, inode,
893 1) & ~(inode->i_sb->s_blocksize - 1)) + 1;
902 allocated_block_nr, inode);
929 convert_tail_for_hole(inode, bh_result,
933 reiserfs_error(inode->i_sb,
936 "inode %lu, error %d",
937 inode->i_ino,
945 th = reiserfs_persistent_transaction(inode->i_sb, 3);
948 inode,
957 direct2indirect(th, inode, &path, unbh,
961 reiserfs_free_block(th, inode,
987 reiserfs_add_tail_list(inode, unbh);
991 * from adding this buffer to the inode's
1018 MAX_ITEM_LEN(inode->i_sb->s_blocksize) /
1029 make_cpu_key(&tmp_key, inode,
1033 inode->i_sb->s_blocksize),
1041 cpu_key_k_offset(&tmp_key)) >> inode->i_sb->
1063 allocated_block_nr, inode);
1077 reiserfs_paste_into_item(th, &path, &tmp_key, inode,
1086 reiserfs_free_block(th, inode,
1097 inode->i_size +=
1098 inode->i_sb->s_blocksize * blocks_needed;
1108 * if it is too big or too full. Update the inode so things
1114 retval = restart_transaction(th, inode, &path);
1123 reiserfs_cond_resched(inode->i_sb);
1125 retval = search_for_position_by_key(inode->i_sb, &key, &path);
1131 reiserfs_warning(inode->i_sb, "vs-825",
1135 reiserfs_free_block(th, inode,
1152 reiserfs_update_sd(th, inode);
1158 reiserfs_write_unlock(inode->i_sb);
1173 static int real_space_diff(struct inode *inode, int sd_size)
1176 loff_t blocksize = inode->i_sb->s_blocksize;
1178 if (S_ISLNK(inode->i_mode) || S_ISDIR(inode->i_mode))
1191 ((inode->i_size +
1192 (blocksize - 1)) >> inode->i_sb->s_blocksize_bits) * UNFM_P_SIZE +
1197 static inline loff_t to_real_used_space(struct inode *inode, ulong blocks,
1200 if (S_ISLNK(inode->i_mode) || S_ISDIR(inode->i_mode)) {
1201 return inode->i_size +
1202 (loff_t) (real_space_diff(inode, sd_size));
1204 return ((loff_t) real_space_diff(inode, sd_size)) +
1209 static inline ulong to_fake_used_blocks(struct inode *inode, int sd_size)
1211 loff_t bytes = inode_get_bytes(inode);
1212 loff_t real_space = real_space_diff(inode, sd_size);
1215 if (S_ISLNK(inode->i_mode) || S_ISDIR(inode->i_mode)) {
1231 * of old type. Version stored in the inode says about body items, so
1232 * in update_stat_data we can not rely on inode, but have to check
1237 static void init_inode(struct inode *inode, struct treepath *path)
1246 copy_key(INODE_PKEY(inode), &ih->ih_key);
1248 INIT_LIST_HEAD(&REISERFS_I(inode)->i_prealloc_list);
1249 REISERFS_I(inode)->i_flags = 0;
1250 REISERFS_I(inode)->i_prealloc_block = 0;
1251 REISERFS_I(inode)->i_prealloc_count = 0;
1252 REISERFS_I(inode)->i_trans_id = 0;
1253 REISERFS_I(inode)->i_jl = NULL;
1254 reiserfs_init_xattr_rwsem(inode);
1261 set_inode_item_key_version(inode, KEY_FORMAT_3_5);
1262 set_inode_sd_version(inode, STAT_DATA_V1);
1263 inode->i_mode = sd_v1_mode(sd);
1264 set_nlink(inode, sd_v1_nlink(sd));
1265 i_uid_write(inode, sd_v1_uid(sd));
1266 i_gid_write(inode, sd_v1_gid(sd));
1267 inode->i_size = sd_v1_size(sd);
1268 inode->i_atime.tv_sec = sd_v1_atime(sd);
1269 inode->i_mtime.tv_sec = sd_v1_mtime(sd);
1270 inode->i_ctime.tv_sec = sd_v1_ctime(sd);
1271 inode->i_atime.tv_nsec = 0;
1272 inode->i_ctime.tv_nsec = 0;
1273 inode->i_mtime.tv_nsec = 0;
1275 inode->i_blocks = sd_v1_blocks(sd);
1276 inode->i_generation = le32_to_cpu(INODE_PKEY(inode)->k_dir_id);
1277 blocks = (inode->i_size + 511) >> 9;
1278 blocks = _ROUND_UP(blocks, inode->i_sb->s_blocksize >> 9);
1286 * updated if file's inode will ever change
1288 if (inode->i_blocks > blocks) {
1289 inode->i_blocks = blocks;
1293 REISERFS_I(inode)->i_first_direct_byte =
1300 if (inode->i_blocks & 1) {
1301 inode->i_blocks++;
1303 inode_set_bytes(inode,
1304 to_real_used_space(inode, inode->i_blocks,
1310 REISERFS_I(inode)->i_flags &= ~i_nopack_mask;
1318 inode->i_mode = sd_v2_mode(sd);
1319 set_nlink(inode, sd_v2_nlink(sd));
1320 i_uid_write(inode, sd_v2_uid(sd));
1321 inode->i_size = sd_v2_size(sd);
1322 i_gid_write(inode, sd_v2_gid(sd));
1323 inode->i_mtime.tv_sec = sd_v2_mtime(sd);
1324 inode->i_atime.tv_sec = sd_v2_atime(sd);
1325 inode->i_ctime.tv_sec = sd_v2_ctime(sd);
1326 inode->i_ctime.tv_nsec = 0;
1327 inode->i_mtime.tv_nsec = 0;
1328 inode->i_atime.tv_nsec = 0;
1329 inode->i_blocks = sd_v2_blocks(sd);
1331 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1332 inode->i_generation =
1333 le32_to_cpu(INODE_PKEY(inode)->k_dir_id);
1335 inode->i_generation = sd_v2_generation(sd);
1337 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
1338 set_inode_item_key_version(inode, KEY_FORMAT_3_5);
1340 set_inode_item_key_version(inode, KEY_FORMAT_3_6);
1341 REISERFS_I(inode)->i_first_direct_byte = 0;
1342 set_inode_sd_version(inode, STAT_DATA_V2);
1343 inode_set_bytes(inode,
1344 to_real_used_space(inode, inode->i_blocks,
1347 * read persistent inode attributes from sd and initialise
1348 * generic inode flags from them
1350 REISERFS_I(inode)->i_attrs = sd_v2_attrs(sd);
1351 sd_attrs_to_i_attrs(sd_v2_attrs(sd), inode);
1355 if (S_ISREG(inode->i_mode)) {
1356 inode->i_op = &reiserfs_file_inode_operations;
1357 inode->i_fop = &reiserfs_file_operations;
1358 inode->i_mapping->a_ops = &reiserfs_address_space_operations;
1359 } else if (S_ISDIR(inode->i_mode)) {
1360 inode->i_op = &reiserfs_dir_inode_operations;
1361 inode->i_fop = &reiserfs_dir_operations;
1362 } else if (S_ISLNK(inode->i_mode)) {
1363 inode->i_op = &reiserfs_symlink_inode_operations;
1364 inode_nohighmem(inode);
1365 inode->i_mapping->a_ops = &reiserfs_address_space_operations;
1367 inode->i_blocks = 0;
1368 inode->i_op = &reiserfs_special_inode_operations;
1369 init_special_inode(inode, inode->i_mode, new_decode_dev(rdev));
1373 /* update new stat data with inode fields */
1374 static void inode2sd(void *sd, struct inode *inode, loff_t size)
1378 set_sd_v2_mode(sd_v2, inode->i_mode);
1379 set_sd_v2_nlink(sd_v2, inode->i_nlink);
1380 set_sd_v2_uid(sd_v2, i_uid_read(inode));
1382 set_sd_v2_gid(sd_v2, i_gid_read(inode));
1383 set_sd_v2_mtime(sd_v2, inode->i_mtime.tv_sec);
1384 set_sd_v2_atime(sd_v2, inode->i_atime.tv_sec);
1385 set_sd_v2_ctime(sd_v2, inode->i_ctime.tv_sec);
1386 set_sd_v2_blocks(sd_v2, to_fake_used_blocks(inode, SD_V2_SIZE));
1387 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1388 set_sd_v2_rdev(sd_v2, new_encode_dev(inode->i_rdev));
1390 set_sd_v2_generation(sd_v2, inode->i_generation);
1391 set_sd_v2_attrs(sd_v2, REISERFS_I(inode)->i_attrs);
1394 /* used to copy inode's fields to old stat data */
1395 static void inode2sd_v1(void *sd, struct inode *inode, loff_t size)
1399 set_sd_v1_mode(sd_v1, inode->i_mode);
1400 set_sd_v1_uid(sd_v1, i_uid_read(inode));
1401 set_sd_v1_gid(sd_v1, i_gid_read(inode));
1402 set_sd_v1_nlink(sd_v1, inode->i_nlink);
1404 set_sd_v1_atime(sd_v1, inode->i_atime.tv_sec);
1405 set_sd_v1_ctime(sd_v1, inode->i_ctime.tv_sec);
1406 set_sd_v1_mtime(sd_v1, inode->i_mtime.tv_sec);
1408 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1409 set_sd_v1_rdev(sd_v1, new_encode_dev(inode->i_rdev));
1411 set_sd_v1_blocks(sd_v1, to_fake_used_blocks(inode, SD_V1_SIZE));
1415 REISERFS_I(inode)->i_first_direct_byte);
1422 static void update_stat_data(struct treepath *path, struct inode *inode,
1432 reiserfs_panic(inode->i_sb, "vs-13065", "key %k, found item %h",
1433 INODE_PKEY(inode), ih);
1437 inode2sd_v1(ih_item_body(bh, ih), inode, size);
1439 inode2sd(ih_item_body(bh, ih), inode, size);
1446 struct inode *inode, loff_t size)
1458 make_cpu_key(&key, inode, SD_OFFSET, TYPE_STAT_DATA, 3);
1463 retval = search_item(inode->i_sb, &key, &path);
1465 reiserfs_error(inode->i_sb, "vs-13050",
1473 if (inode->i_nlink == 0) {
1474 /*reiserfs_warning (inode->i_sb, "vs-13050: reiserfs_update_sd: i_nlink == 0, stat data not found"); */
1477 reiserfs_warning(inode->i_sb, "vs-13060",
1480 INODE_PKEY(inode), inode->i_nlink,
1494 fs_gen = get_generation(inode->i_sb);
1495 reiserfs_prepare_for_journal(inode->i_sb, bh, 1);
1498 if (fs_changed(fs_gen, inode->i_sb)
1500 reiserfs_restore_prepared_buffer(inode->i_sb, bh);
1505 update_stat_data(&path, inode, size);
1512 * reiserfs_read_locked_inode is called to read the inode off disk, and it
1514 * and clear the key in the private portion of the inode, otherwise a
1515 * corresponding iput might try to delete whatever object the inode last
1518 static void reiserfs_make_bad_inode(struct inode *inode)
1520 memset(INODE_PKEY(inode), 0, KEY_SIZE);
1521 make_bad_inode(inode);
1528 int reiserfs_init_locked_inode(struct inode *inode, void *p)
1531 inode->i_ino = args->objectid;
1532 INODE_PKEY(inode)->k_dir_id = cpu_to_le32(args->dirid);
1538 * inode stat data fields
1540 void reiserfs_read_locked_inode(struct inode *inode,
1554 _make_cpu_key(&key, KEY_FORMAT_3_5, dirino, inode->i_ino, 0, 0, 3);
1557 retval = search_item(inode->i_sb, &key, &path_to_sd);
1559 reiserfs_error(inode->i_sb, "vs-13070",
1562 reiserfs_make_bad_inode(inode);
1569 reiserfs_make_bad_inode(inode);
1570 clear_nlink(inode);
1574 init_inode(inode, &path_to_sd);
1577 * It is possible that knfsd is trying to access inode of a file
1584 * More logical fix would require changes in fs/inode.c:iput() to
1585 * remove inode from hash-table _after_ fs cleaned disk stuff up and
1586 * in iget() to return NULL if I_FREEING inode is found in
1591 * Currently there is one place where it's ok to meet inode with
1595 if ((inode->i_nlink == 0) &&
1596 !REISERFS_SB(inode->i_sb)->s_is_unlinked_ok) {
1597 reiserfs_warning(inode->i_sb, "vs-13075",
1598 "dead inode read from disk %K. "
1601 reiserfs_make_bad_inode(inode);
1604 /* init inode should be relsing */
1610 if (get_inode_sd_version(inode) == STAT_DATA_V1)
1611 cache_no_acl(inode);
1617 * @inode: inode from hash table to check
1621 * having the same inode numbers. Such inodes can only exist due to some
1623 * inode numbers (objectids) are distinguished by parent directory ids.
1626 int reiserfs_find_actor(struct inode *inode, void *opaque)
1632 return (inode->i_ino == args->objectid) &&
1633 (le32_to_cpu(INODE_PKEY(inode)->k_dir_id) == args->dirid);
1636 struct inode *reiserfs_iget(struct super_block *s, const struct cpu_key *key)
1638 struct inode *inode;
1645 inode = iget5_locked(s, key->on_disk_key.k_objectid,
1649 if (!inode)
1652 if (inode->i_state & I_NEW) {
1653 reiserfs_read_locked_inode(inode, &args);
1654 unlock_new_inode(inode);
1657 if (comp_short_keys(INODE_PKEY(inode), key) || is_bad_inode(inode)) {
1659 iput(inode);
1660 inode = NULL;
1662 return inode;
1670 struct inode *inode;
1675 inode = reiserfs_iget(sb, &key);
1676 if (inode && !IS_ERR(inode) && generation != 0 &&
1677 generation != inode->i_generation) {
1678 iput(inode);
1679 inode = NULL;
1683 return d_obtain_alias(inode);
1730 int reiserfs_encode_fh(struct inode *inode, __u32 * data, int *lenp,
1731 struct inode *parent)
1743 data[0] = inode->i_ino;
1744 data[1] = le32_to_cpu(INODE_PKEY(inode)->k_dir_id);
1745 data[2] = inode->i_generation;
1764 * reiserfs inodes are never really dirty, since the dirty inode call
1765 * always logs them. This call allows the VFS inode marking routines
1769 int reiserfs_write_inode(struct inode *inode, struct writeback_control *wbc)
1774 if (sb_rdonly(inode->i_sb))
1780 * inode needs to reach disk for safety, and they can safely be
1781 * ignored because the altered inode has already been logged.
1784 reiserfs_write_lock(inode->i_sb);
1785 if (!journal_begin(&th, inode->i_sb, jbegin_count)) {
1786 reiserfs_update_sd(&th, inode);
1789 reiserfs_write_unlock(inode->i_sb);
1799 struct inode *inode,
1801 struct inode *dir)
1854 return reiserfs_insert_item(th, path, &key, ih, inode, body);
1862 struct inode *inode,
1897 return reiserfs_insert_item(th, path, &key, ih, inode, symname);
1906 * NOTE! uid and gid must already be set in the inode. If we return
1908 * for the fresh inode. This can only be done outside a transaction, so
1912 * @dir: parent directory for new inode
1913 * @mode: mode of new inode
1914 * @symname: symlink contents if inode is symlink
1917 * @inode: inode to be filled
1918 * @security: optional security context to associate with this inode
1921 struct inode *dir, umode_t mode, const char *symname,
1925 struct inode *inode,
1941 err = dquot_alloc_inode(inode);
1957 args.objectid = inode->i_ino = le32_to_cpu(ih.ih_key.k_objectid);
1964 memcpy(INODE_PKEY(inode), &ih.ih_key, KEY_SIZE);
1967 depth = reiserfs_write_unlock_nested(inode->i_sb);
1968 err = insert_inode_locked4(inode, args.objectid,
1970 reiserfs_write_lock_nested(inode->i_sb, depth);
1980 * note that the private part of inode isn't filled in yet,
1983 inode->i_generation = le32_to_cpu(INODE_PKEY(dir)->k_objectid);
1986 inode->i_generation =
1989 inode->i_generation = ++event;
1993 set_nlink(inode, (S_ISDIR(mode) ? 2 : 1));
1997 inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
1998 inode->i_size = i_size;
1999 inode->i_blocks = 0;
2000 inode->i_bytes = 0;
2001 REISERFS_I(inode)->i_first_direct_byte = S_ISLNK(mode) ? 1 :
2004 INIT_LIST_HEAD(&REISERFS_I(inode)->i_prealloc_list);
2005 REISERFS_I(inode)->i_flags = 0;
2006 REISERFS_I(inode)->i_prealloc_block = 0;
2007 REISERFS_I(inode)->i_prealloc_count = 0;
2008 REISERFS_I(inode)->i_trans_id = 0;
2009 REISERFS_I(inode)->i_jl = NULL;
2010 REISERFS_I(inode)->i_attrs =
2012 sd_attrs_to_i_attrs(REISERFS_I(inode)->i_attrs, inode);
2013 reiserfs_init_xattr_rwsem(inode);
2033 if (i_uid_read(inode) & ~0xffff || i_gid_read(inode) & ~0xffff) {
2038 inode2sd_v1(&sd, inode, inode->i_size);
2040 inode2sd(&sd, inode, inode->i_size);
2043 * store in in-core inode the key of stat data and version all
2048 set_inode_item_key_version(inode, KEY_FORMAT_3_5);
2050 set_inode_item_key_version(inode, KEY_FORMAT_3_6);
2052 set_inode_sd_version(inode, STAT_DATA_V1);
2054 set_inode_sd_version(inode, STAT_DATA_V2);
2062 reiserfs_insert_item(th, &path_to_key, &key, &ih, inode,
2076 reiserfs_new_directory(th, inode, &ih, &path_to_key, dir);
2084 reiserfs_new_symlink(th, inode, &ih, &path_to_key, symname,
2099 inode->i_flags |= S_PRIVATE;
2100 inode->i_opflags &= ~IOP_XATTR;
2103 if (reiserfs_posixacl(inode->i_sb)) {
2104 reiserfs_write_unlock(inode->i_sb);
2105 retval = reiserfs_inherit_default_acl(th, dir, dentry, inode);
2106 reiserfs_write_lock(inode->i_sb);
2113 } else if (inode->i_sb->s_flags & SB_POSIXACL) {
2114 reiserfs_warning(inode->i_sb, "jdm-13090",
2120 reiserfs_write_unlock(inode->i_sb);
2121 retval = reiserfs_security_write(th, inode, security);
2122 reiserfs_write_lock(inode->i_sb);
2133 reiserfs_update_sd(th, inode);
2140 INODE_PKEY(inode)->k_objectid = 0;
2143 depth = reiserfs_write_unlock_nested(inode->i_sb);
2144 dquot_free_inode(inode);
2145 reiserfs_write_lock_nested(inode->i_sb, depth);
2153 depth = reiserfs_write_unlock_nested(inode->i_sb);
2154 dquot_drop(inode);
2155 reiserfs_write_lock_nested(inode->i_sb, depth);
2156 inode->i_flags |= S_NOQUOTA;
2157 make_bad_inode(inode);
2160 clear_nlink(inode);
2162 if (inode->i_state & I_NEW)
2163 unlock_new_inode(inode);
2164 iput(inode);
2181 static int grab_tail_page(struct inode *inode,
2190 unsigned long index = (inode->i_size - 1) >> PAGE_SHIFT;
2193 unsigned long blocksize = inode->i_sb->s_blocksize;
2194 unsigned long offset = (inode->i_size) & (PAGE_SIZE - 1);
2201 * we know that we are only called with inode->i_size > 0.
2209 page = grab_cache_page(inode->i_mapping, index);
2239 reiserfs_error(inode->i_sb, "clm-6000",
2262 int reiserfs_truncate_file(struct inode *inode, int update_timestamps)
2266 unsigned long offset = inode->i_size & (PAGE_SIZE - 1);
2267 unsigned blocksize = inode->i_sb->s_blocksize;
2274 reiserfs_write_lock(inode->i_sb);
2276 if (inode->i_size > 0) {
2277 error = grab_tail_page(inode, &page, &bh);
2285 reiserfs_error(inode->i_sb, "clm-6001",
2305 error = journal_begin(&th, inode->i_sb,
2309 reiserfs_update_inode_transaction(inode);
2317 add_save_link(&th, inode, 1);
2318 err2 = reiserfs_do_truncate(&th, inode, page, update_timestamps);
2330 error = remove_save_link(inode, 1 /* truncate */);
2349 reiserfs_write_unlock(inode->i_sb);
2358 reiserfs_write_unlock(inode->i_sb);
2363 static int map_block_for_writepage(struct inode *inode,
2377 loff_t byte_offset = ((loff_t)block << inode->i_sb->s_blocksize_bits)+1;
2396 reiserfs_write_lock(inode->i_sb);
2397 make_cpu_key(&key, inode, byte_offset, TYPE_ANY, 3);
2400 retval = search_for_position_by_key(inode->i_sb, &key, &path);
2414 reiserfs_warning(inode->i_sb, "clm-6002",
2423 get_block_num(item, pos_in_item), inode);
2430 fs_gen = get_generation(inode->i_sb);
2435 retval = journal_begin(&th, inode->i_sb, jbegin_count);
2438 reiserfs_update_inode_transaction(inode);
2440 if (fs_changed(fs_gen, inode->i_sb)
2442 reiserfs_restore_prepared_buffer(inode->i_sb,
2448 reiserfs_prepare_for_journal(inode->i_sb, bh, 1);
2450 if (fs_changed(fs_gen, inode->i_sb)
2452 reiserfs_restore_prepared_buffer(inode->i_sb, bh);
2461 set_block_dev_mapped(bh_result, 0, inode);
2465 (byte_offset + bytes_copied) < inode->i_size) {
2472 reiserfs_warning(inode->i_sb, "clm-6003",
2473 "bad item inode %lu", inode->i_ino);
2487 reiserfs_write_unlock(inode->i_sb);
2491 retval = reiserfs_get_block(inode, block, bh_result,
2525 struct inode *inode = page->mapping->host;
2526 unsigned long end_index = inode->i_size >> PAGE_SHIFT;
2535 struct super_block *s = inode->i_sb;
2565 last_offset = inode->i_size & (PAGE_SIZE - 1);
2575 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
2595 if ((error = map_block_for_writepage(inode, bh, block))) {
2617 reiserfs_update_inode_transaction(inode);
2745 struct inode *inode = page->mapping->host;
2746 reiserfs_wait_on_write_block(inode->i_sb);
2750 static void reiserfs_truncate_failed_write(struct inode *inode)
2752 truncate_inode_pages(inode->i_mapping, inode->i_size);
2753 reiserfs_truncate_file(inode, 0);
2761 struct inode *inode;
2767 inode = mapping->host;
2770 (pos & (inode->i_sb->s_blocksize - 1)) == 0) {
2781 reiserfs_wait_on_write_block(inode->i_sb);
2783 if (reiserfs_transaction_running(inode->i_sb)) {
2793 if (ret && reiserfs_transaction_running(inode->i_sb)) {
2813 reiserfs_write_lock(inode->i_sb);
2815 reiserfs_write_unlock(inode->i_sb);
2825 reiserfs_truncate_failed_write(inode);
2832 struct inode *inode = page->mapping->host;
2837 depth = reiserfs_write_unlock_nested(inode->i_sb);
2838 reiserfs_wait_on_write_block(inode->i_sb);
2839 reiserfs_write_lock_nested(inode->i_sb, depth);
2842 if (reiserfs_transaction_running(inode->i_sb)) {
2853 if (ret && reiserfs_transaction_running(inode->i_sb)) {
2873 reiserfs_write_lock(inode->i_sb);
2875 reiserfs_write_unlock(inode->i_sb);
2894 struct inode *inode = page->mapping->host;
2904 reiserfs_wait_on_write_block(inode->i_sb);
2905 if (reiserfs_transaction_running(inode->i_sb))
2919 reiserfs_commit_page(inode, page, start, start + copied);
2926 if (pos + copied > inode->i_size) {
2928 reiserfs_write_lock(inode->i_sb);
2935 if ((have_large_tails(inode->i_sb)
2936 && inode->i_size > i_block_size(inode) * 4)
2937 || (have_small_tails(inode->i_sb)
2938 && inode->i_size > i_block_size(inode)))
2939 REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask;
2941 ret = journal_begin(&myth, inode->i_sb, 1);
2945 reiserfs_update_inode_transaction(inode);
2946 inode->i_size = pos + copied;
2949 * to use mark_inode_dirty so the inode gets pushed around on
2952 mark_inode_dirty(inode);
2953 reiserfs_update_sd(&myth, inode);
2961 reiserfs_write_lock(inode->i_sb);
2965 mark_inode_dirty(inode);
2973 reiserfs_write_unlock(inode->i_sb);
2977 if (pos + len > inode->i_size)
2978 reiserfs_truncate_failed_write(inode);
2983 reiserfs_write_unlock(inode->i_sb);
2987 reiserfs_update_sd(th, inode);
2996 struct inode *inode = page->mapping->host;
3003 depth = reiserfs_write_unlock_nested(inode->i_sb);
3004 reiserfs_wait_on_write_block(inode->i_sb);
3005 reiserfs_write_lock_nested(inode->i_sb, depth);
3007 if (reiserfs_transaction_running(inode->i_sb)) {
3010 reiserfs_commit_page(inode, page, from, to);
3017 if (pos > inode->i_size) {
3024 if ((have_large_tails(inode->i_sb)
3025 && inode->i_size > i_block_size(inode) * 4)
3026 || (have_small_tails(inode->i_sb)
3027 && inode->i_size > i_block_size(inode)))
3028 REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask;
3030 ret = journal_begin(&myth, inode->i_sb, 1);
3034 reiserfs_update_inode_transaction(inode);
3035 inode->i_size = pos;
3038 * to use mark_inode_dirty so the inode gets pushed around
3041 mark_inode_dirty(inode);
3042 reiserfs_update_sd(&myth, inode);
3050 mark_inode_dirty(inode);
3062 reiserfs_update_sd(th, inode);
3069 void sd_attrs_to_i_attrs(__u16 sd_attrs, struct inode *inode)
3071 if (reiserfs_attrs(inode->i_sb)) {
3073 inode->i_flags |= S_SYNC;
3075 inode->i_flags &= ~S_SYNC;
3077 inode->i_flags |= S_IMMUTABLE;
3079 inode->i_flags &= ~S_IMMUTABLE;
3081 inode->i_flags |= S_APPEND;
3083 inode->i_flags &= ~S_APPEND;
3085 inode->i_flags |= S_NOATIME;
3087 inode->i_flags &= ~S_NOATIME;
3089 REISERFS_I(inode)->i_flags |= i_nopack_mask;
3091 REISERFS_I(inode)->i_flags &= ~i_nopack_mask;
3099 static int invalidatepage_can_drop(struct inode *inode, struct buffer_head *bh)
3102 struct reiserfs_journal *j = SB_JOURNAL(inode->i_sb);
3113 if (reiserfs_file_data_log(inode)) {
3140 && jl != SB_JOURNAL(inode->i_sb)->j_current_jl)
3157 struct inode *inode = page->mapping->host;
3184 if (invalidatepage_can_drop(inode, bh))
3208 struct inode *inode = page->mapping->host;
3209 if (reiserfs_file_data_log(inode)) {
3227 struct inode *inode = page->mapping->host;
3228 struct reiserfs_journal *j = SB_JOURNAL(inode->i_sb);
3261 struct inode *inode = file->f_mapping->host;
3265 ret = blockdev_direct_IO(iocb, inode, iter,
3273 loff_t isize = i_size_read(inode);
3276 if ((end > isize) && inode_newsize_ok(inode, isize) == 0) {
3277 truncate_setsize(inode, isize);
3278 reiserfs_vfs_truncate_file(inode);
3287 struct inode *inode = d_inode(dentry);
3298 if (is_quota_modification(inode, attr)) {
3299 error = dquot_initialize(inode);
3303 reiserfs_write_lock(inode->i_sb);
3309 if (get_inode_item_key_version(inode) == KEY_FORMAT_3_5 &&
3311 reiserfs_write_unlock(inode->i_sb);
3316 inode_dio_wait(inode);
3319 if (attr->ia_size > inode->i_size) {
3320 error = generic_cont_expand_simple(inode, attr->ia_size);
3321 if (REISERFS_I(inode)->i_prealloc_count > 0) {
3324 /* we're changing at most 2 bitmaps, inode + super */
3325 err = journal_begin(&th, inode->i_sb, 4);
3327 reiserfs_discard_prealloc(&th, inode);
3334 reiserfs_write_unlock(inode->i_sb);
3344 reiserfs_write_unlock(inode->i_sb);
3348 (get_inode_sd_version(inode) == STAT_DATA_V1)) {
3354 if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
3355 (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
3359 (REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb) +
3360 REISERFS_QUOTA_DEL_BLOCKS(inode->i_sb)) +
3363 error = reiserfs_chown_xattrs(inode, attr);
3370 * info and , inode write (sb, inode)
3372 reiserfs_write_lock(inode->i_sb);
3373 error = journal_begin(&th, inode->i_sb, jbegin_count);
3374 reiserfs_write_unlock(inode->i_sb);
3377 error = dquot_transfer(inode, attr);
3378 reiserfs_write_lock(inode->i_sb);
3381 reiserfs_write_unlock(inode->i_sb);
3386 * Update corresponding info in inode so that everything
3390 inode->i_uid = attr->ia_uid;
3392 inode->i_gid = attr->ia_gid;
3393 mark_inode_dirty(inode);
3395 reiserfs_write_unlock(inode->i_sb);
3401 attr->ia_size != i_size_read(inode)) {
3402 error = inode_newsize_ok(inode, attr->ia_size);
3408 mutex_lock(&REISERFS_I(inode)->tailpack);
3409 truncate_setsize(inode, attr->ia_size);
3410 reiserfs_truncate_file(inode, 1);
3411 mutex_unlock(&REISERFS_I(inode)->tailpack);
3416 setattr_copy(inode, attr);
3417 mark_inode_dirty(inode);
3420 if (!error && reiserfs_posixacl(inode->i_sb)) {
3422 error = reiserfs_acl_chmod(inode);