xref: /kernel/linux/linux-6.6/fs/ufs/inode.c (revision 62306a36)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 *  linux/fs/ufs/inode.c
4 *
5 * Copyright (C) 1998
6 * Daniel Pirkl <daniel.pirkl@email.cz>
7 * Charles University, Faculty of Mathematics and Physics
8 *
9 *  from
10 *
11 *  linux/fs/ext2/inode.c
12 *
13 * Copyright (C) 1992, 1993, 1994, 1995
14 * Remy Card (card@masi.ibp.fr)
15 * Laboratoire MASI - Institut Blaise Pascal
16 * Universite Pierre et Marie Curie (Paris VI)
17 *
18 *  from
19 *
20 *  linux/fs/minix/inode.c
21 *
22 *  Copyright (C) 1991, 1992  Linus Torvalds
23 *
24 *  Goal-directed block allocation by Stephen Tweedie (sct@dcs.ed.ac.uk), 1993
25 *  Big-endian to little-endian byte-swapping/bitmaps by
26 *        David S. Miller (davem@caip.rutgers.edu), 1995
27 */
28
29#include <linux/uaccess.h>
30
31#include <linux/errno.h>
32#include <linux/fs.h>
33#include <linux/time.h>
34#include <linux/stat.h>
35#include <linux/string.h>
36#include <linux/mm.h>
37#include <linux/buffer_head.h>
38#include <linux/writeback.h>
39#include <linux/iversion.h>
40
41#include "ufs_fs.h"
42#include "ufs.h"
43#include "swab.h"
44#include "util.h"
45
46static int ufs_block_to_path(struct inode *inode, sector_t i_block, unsigned offsets[4])
47{
48	struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi;
49	int ptrs = uspi->s_apb;
50	int ptrs_bits = uspi->s_apbshift;
51	const long direct_blocks = UFS_NDADDR,
52		indirect_blocks = ptrs,
53		double_blocks = (1 << (ptrs_bits * 2));
54	int n = 0;
55
56
57	UFSD("ptrs=uspi->s_apb = %d,double_blocks=%ld \n",ptrs,double_blocks);
58	if (i_block < direct_blocks) {
59		offsets[n++] = i_block;
60	} else if ((i_block -= direct_blocks) < indirect_blocks) {
61		offsets[n++] = UFS_IND_BLOCK;
62		offsets[n++] = i_block;
63	} else if ((i_block -= indirect_blocks) < double_blocks) {
64		offsets[n++] = UFS_DIND_BLOCK;
65		offsets[n++] = i_block >> ptrs_bits;
66		offsets[n++] = i_block & (ptrs - 1);
67	} else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
68		offsets[n++] = UFS_TIND_BLOCK;
69		offsets[n++] = i_block >> (ptrs_bits * 2);
70		offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
71		offsets[n++] = i_block & (ptrs - 1);
72	} else {
73		ufs_warning(inode->i_sb, "ufs_block_to_path", "block > big");
74	}
75	return n;
76}
77
78typedef struct {
79	void	*p;
80	union {
81		__fs32	key32;
82		__fs64	key64;
83	};
84	struct buffer_head *bh;
85} Indirect;
86
87static inline int grow_chain32(struct ufs_inode_info *ufsi,
88			       struct buffer_head *bh, __fs32 *v,
89			       Indirect *from, Indirect *to)
90{
91	Indirect *p;
92	unsigned seq;
93	to->bh = bh;
94	do {
95		seq = read_seqbegin(&ufsi->meta_lock);
96		to->key32 = *(__fs32 *)(to->p = v);
97		for (p = from; p <= to && p->key32 == *(__fs32 *)p->p; p++)
98			;
99	} while (read_seqretry(&ufsi->meta_lock, seq));
100	return (p > to);
101}
102
103static inline int grow_chain64(struct ufs_inode_info *ufsi,
104			       struct buffer_head *bh, __fs64 *v,
105			       Indirect *from, Indirect *to)
106{
107	Indirect *p;
108	unsigned seq;
109	to->bh = bh;
110	do {
111		seq = read_seqbegin(&ufsi->meta_lock);
112		to->key64 = *(__fs64 *)(to->p = v);
113		for (p = from; p <= to && p->key64 == *(__fs64 *)p->p; p++)
114			;
115	} while (read_seqretry(&ufsi->meta_lock, seq));
116	return (p > to);
117}
118
119/*
120 * Returns the location of the fragment from
121 * the beginning of the filesystem.
122 */
123
124static u64 ufs_frag_map(struct inode *inode, unsigned offsets[4], int depth)
125{
126	struct ufs_inode_info *ufsi = UFS_I(inode);
127	struct super_block *sb = inode->i_sb;
128	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
129	u64 mask = (u64) uspi->s_apbmask>>uspi->s_fpbshift;
130	int shift = uspi->s_apbshift-uspi->s_fpbshift;
131	Indirect chain[4], *q = chain;
132	unsigned *p;
133	unsigned flags = UFS_SB(sb)->s_flags;
134	u64 res = 0;
135
136	UFSD(": uspi->s_fpbshift = %d ,uspi->s_apbmask = %x, mask=%llx\n",
137		uspi->s_fpbshift, uspi->s_apbmask,
138		(unsigned long long)mask);
139
140	if (depth == 0)
141		goto no_block;
142
143again:
144	p = offsets;
145
146	if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
147		goto ufs2;
148
149	if (!grow_chain32(ufsi, NULL, &ufsi->i_u1.i_data[*p++], chain, q))
150		goto changed;
151	if (!q->key32)
152		goto no_block;
153	while (--depth) {
154		__fs32 *ptr;
155		struct buffer_head *bh;
156		unsigned n = *p++;
157
158		bh = sb_bread(sb, uspi->s_sbbase +
159				  fs32_to_cpu(sb, q->key32) + (n>>shift));
160		if (!bh)
161			goto no_block;
162		ptr = (__fs32 *)bh->b_data + (n & mask);
163		if (!grow_chain32(ufsi, bh, ptr, chain, ++q))
164			goto changed;
165		if (!q->key32)
166			goto no_block;
167	}
168	res = fs32_to_cpu(sb, q->key32);
169	goto found;
170
171ufs2:
172	if (!grow_chain64(ufsi, NULL, &ufsi->i_u1.u2_i_data[*p++], chain, q))
173		goto changed;
174	if (!q->key64)
175		goto no_block;
176
177	while (--depth) {
178		__fs64 *ptr;
179		struct buffer_head *bh;
180		unsigned n = *p++;
181
182		bh = sb_bread(sb, uspi->s_sbbase +
183				  fs64_to_cpu(sb, q->key64) + (n>>shift));
184		if (!bh)
185			goto no_block;
186		ptr = (__fs64 *)bh->b_data + (n & mask);
187		if (!grow_chain64(ufsi, bh, ptr, chain, ++q))
188			goto changed;
189		if (!q->key64)
190			goto no_block;
191	}
192	res = fs64_to_cpu(sb, q->key64);
193found:
194	res += uspi->s_sbbase;
195no_block:
196	while (q > chain) {
197		brelse(q->bh);
198		q--;
199	}
200	return res;
201
202changed:
203	while (q > chain) {
204		brelse(q->bh);
205		q--;
206	}
207	goto again;
208}
209
210/*
211 * Unpacking tails: we have a file with partial final block and
212 * we had been asked to extend it.  If the fragment being written
213 * is within the same block, we need to extend the tail just to cover
214 * that fragment.  Otherwise the tail is extended to full block.
215 *
216 * Note that we might need to create a _new_ tail, but that will
217 * be handled elsewhere; this is strictly for resizing old
218 * ones.
219 */
220static bool
221ufs_extend_tail(struct inode *inode, u64 writes_to,
222		  int *err, struct page *locked_page)
223{
224	struct ufs_inode_info *ufsi = UFS_I(inode);
225	struct super_block *sb = inode->i_sb;
226	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
227	unsigned lastfrag = ufsi->i_lastfrag;	/* it's a short file, so unsigned is enough */
228	unsigned block = ufs_fragstoblks(lastfrag);
229	unsigned new_size;
230	void *p;
231	u64 tmp;
232
233	if (writes_to < (lastfrag | uspi->s_fpbmask))
234		new_size = (writes_to & uspi->s_fpbmask) + 1;
235	else
236		new_size = uspi->s_fpb;
237
238	p = ufs_get_direct_data_ptr(uspi, ufsi, block);
239	tmp = ufs_new_fragments(inode, p, lastfrag, ufs_data_ptr_to_cpu(sb, p),
240				new_size - (lastfrag & uspi->s_fpbmask), err,
241				locked_page);
242	return tmp != 0;
243}
244
245/**
246 * ufs_inode_getfrag() - allocate new fragment(s)
247 * @inode: pointer to inode
248 * @index: number of block pointer within the inode's array.
249 * @new_fragment: number of new allocated fragment(s)
250 * @err: we set it if something wrong
251 * @new: we set it if we allocate new block
252 * @locked_page: for ufs_new_fragments()
253 */
254static u64
255ufs_inode_getfrag(struct inode *inode, unsigned index,
256		  sector_t new_fragment, int *err,
257		  int *new, struct page *locked_page)
258{
259	struct ufs_inode_info *ufsi = UFS_I(inode);
260	struct super_block *sb = inode->i_sb;
261	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
262	u64 tmp, goal, lastfrag;
263	unsigned nfrags = uspi->s_fpb;
264	void *p;
265
266        /* TODO : to be done for write support
267        if ( (flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
268             goto ufs2;
269         */
270
271	p = ufs_get_direct_data_ptr(uspi, ufsi, index);
272	tmp = ufs_data_ptr_to_cpu(sb, p);
273	if (tmp)
274		goto out;
275
276	lastfrag = ufsi->i_lastfrag;
277
278	/* will that be a new tail? */
279	if (new_fragment < UFS_NDIR_FRAGMENT && new_fragment >= lastfrag)
280		nfrags = (new_fragment & uspi->s_fpbmask) + 1;
281
282	goal = 0;
283	if (index) {
284		goal = ufs_data_ptr_to_cpu(sb,
285				 ufs_get_direct_data_ptr(uspi, ufsi, index - 1));
286		if (goal)
287			goal += uspi->s_fpb;
288	}
289	tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment),
290				goal, nfrags, err, locked_page);
291
292	if (!tmp) {
293		*err = -ENOSPC;
294		return 0;
295	}
296
297	if (new)
298		*new = 1;
299	inode_set_ctime_current(inode);
300	if (IS_SYNC(inode))
301		ufs_sync_inode (inode);
302	mark_inode_dirty(inode);
303out:
304	return tmp + uspi->s_sbbase;
305
306     /* This part : To be implemented ....
307        Required only for writing, not required for READ-ONLY.
308ufs2:
309
310	u2_block = ufs_fragstoblks(fragment);
311	u2_blockoff = ufs_fragnum(fragment);
312	p = ufsi->i_u1.u2_i_data + block;
313	goal = 0;
314
315repeat2:
316	tmp = fs32_to_cpu(sb, *p);
317	lastfrag = ufsi->i_lastfrag;
318
319     */
320}
321
322/**
323 * ufs_inode_getblock() - allocate new block
324 * @inode: pointer to inode
325 * @ind_block: block number of the indirect block
326 * @index: number of pointer within the indirect block
327 * @new_fragment: number of new allocated fragment
328 *  (block will hold this fragment and also uspi->s_fpb-1)
329 * @err: see ufs_inode_getfrag()
330 * @new: see ufs_inode_getfrag()
331 * @locked_page: see ufs_inode_getfrag()
332 */
333static u64
334ufs_inode_getblock(struct inode *inode, u64 ind_block,
335		  unsigned index, sector_t new_fragment, int *err,
336		  int *new, struct page *locked_page)
337{
338	struct super_block *sb = inode->i_sb;
339	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
340	int shift = uspi->s_apbshift - uspi->s_fpbshift;
341	u64 tmp = 0, goal;
342	struct buffer_head *bh;
343	void *p;
344
345	if (!ind_block)
346		return 0;
347
348	bh = sb_bread(sb, ind_block + (index >> shift));
349	if (unlikely(!bh)) {
350		*err = -EIO;
351		return 0;
352	}
353
354	index &= uspi->s_apbmask >> uspi->s_fpbshift;
355	if (uspi->fs_magic == UFS2_MAGIC)
356		p = (__fs64 *)bh->b_data + index;
357	else
358		p = (__fs32 *)bh->b_data + index;
359
360	tmp = ufs_data_ptr_to_cpu(sb, p);
361	if (tmp)
362		goto out;
363
364	if (index && (uspi->fs_magic == UFS2_MAGIC ?
365		      (tmp = fs64_to_cpu(sb, ((__fs64 *)bh->b_data)[index-1])) :
366		      (tmp = fs32_to_cpu(sb, ((__fs32 *)bh->b_data)[index-1]))))
367		goal = tmp + uspi->s_fpb;
368	else
369		goal = bh->b_blocknr + uspi->s_fpb;
370	tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), goal,
371				uspi->s_fpb, err, locked_page);
372	if (!tmp)
373		goto out;
374
375	if (new)
376		*new = 1;
377
378	mark_buffer_dirty(bh);
379	if (IS_SYNC(inode))
380		sync_dirty_buffer(bh);
381	inode_set_ctime_current(inode);
382	mark_inode_dirty(inode);
383out:
384	brelse (bh);
385	UFSD("EXIT\n");
386	if (tmp)
387		tmp += uspi->s_sbbase;
388	return tmp;
389}
390
391/**
392 * ufs_getfrag_block() - `get_block_t' function, interface between UFS and
393 * read_folio, writepage and so on
394 */
395
396static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create)
397{
398	struct super_block *sb = inode->i_sb;
399	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
400	int err = 0, new = 0;
401	unsigned offsets[4];
402	int depth = ufs_block_to_path(inode, fragment >> uspi->s_fpbshift, offsets);
403	u64 phys64 = 0;
404	unsigned frag = fragment & uspi->s_fpbmask;
405
406	phys64 = ufs_frag_map(inode, offsets, depth);
407	if (!create)
408		goto done;
409
410	if (phys64) {
411		if (fragment >= UFS_NDIR_FRAGMENT)
412			goto done;
413		read_seqlock_excl(&UFS_I(inode)->meta_lock);
414		if (fragment < UFS_I(inode)->i_lastfrag) {
415			read_sequnlock_excl(&UFS_I(inode)->meta_lock);
416			goto done;
417		}
418		read_sequnlock_excl(&UFS_I(inode)->meta_lock);
419	}
420        /* This code entered only while writing ....? */
421
422	mutex_lock(&UFS_I(inode)->truncate_mutex);
423
424	UFSD("ENTER, ino %lu, fragment %llu\n", inode->i_ino, (unsigned long long)fragment);
425	if (unlikely(!depth)) {
426		ufs_warning(sb, "ufs_get_block", "block > big");
427		err = -EIO;
428		goto out;
429	}
430
431	if (UFS_I(inode)->i_lastfrag < UFS_NDIR_FRAGMENT) {
432		unsigned lastfrag = UFS_I(inode)->i_lastfrag;
433		unsigned tailfrags = lastfrag & uspi->s_fpbmask;
434		if (tailfrags && fragment >= lastfrag) {
435			if (!ufs_extend_tail(inode, fragment,
436					     &err, bh_result->b_page))
437				goto out;
438		}
439	}
440
441	if (depth == 1) {
442		phys64 = ufs_inode_getfrag(inode, offsets[0], fragment,
443					   &err, &new, bh_result->b_page);
444	} else {
445		int i;
446		phys64 = ufs_inode_getfrag(inode, offsets[0], fragment,
447					   &err, NULL, NULL);
448		for (i = 1; i < depth - 1; i++)
449			phys64 = ufs_inode_getblock(inode, phys64, offsets[i],
450						fragment, &err, NULL, NULL);
451		phys64 = ufs_inode_getblock(inode, phys64, offsets[depth - 1],
452					fragment, &err, &new, bh_result->b_page);
453	}
454out:
455	if (phys64) {
456		phys64 += frag;
457		map_bh(bh_result, sb, phys64);
458		if (new)
459			set_buffer_new(bh_result);
460	}
461	mutex_unlock(&UFS_I(inode)->truncate_mutex);
462	return err;
463
464done:
465	if (phys64)
466		map_bh(bh_result, sb, phys64 + frag);
467	return 0;
468}
469
470static int ufs_writepage(struct page *page, struct writeback_control *wbc)
471{
472	return block_write_full_page(page,ufs_getfrag_block,wbc);
473}
474
475static int ufs_read_folio(struct file *file, struct folio *folio)
476{
477	return block_read_full_folio(folio, ufs_getfrag_block);
478}
479
480int ufs_prepare_chunk(struct page *page, loff_t pos, unsigned len)
481{
482	return __block_write_begin(page, pos, len, ufs_getfrag_block);
483}
484
485static void ufs_truncate_blocks(struct inode *);
486
487static void ufs_write_failed(struct address_space *mapping, loff_t to)
488{
489	struct inode *inode = mapping->host;
490
491	if (to > inode->i_size) {
492		truncate_pagecache(inode, inode->i_size);
493		ufs_truncate_blocks(inode);
494	}
495}
496
497static int ufs_write_begin(struct file *file, struct address_space *mapping,
498			loff_t pos, unsigned len,
499			struct page **pagep, void **fsdata)
500{
501	int ret;
502
503	ret = block_write_begin(mapping, pos, len, pagep, ufs_getfrag_block);
504	if (unlikely(ret))
505		ufs_write_failed(mapping, pos + len);
506
507	return ret;
508}
509
510static int ufs_write_end(struct file *file, struct address_space *mapping,
511			loff_t pos, unsigned len, unsigned copied,
512			struct page *page, void *fsdata)
513{
514	int ret;
515
516	ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
517	if (ret < len)
518		ufs_write_failed(mapping, pos + len);
519	return ret;
520}
521
522static sector_t ufs_bmap(struct address_space *mapping, sector_t block)
523{
524	return generic_block_bmap(mapping,block,ufs_getfrag_block);
525}
526
527const struct address_space_operations ufs_aops = {
528	.dirty_folio = block_dirty_folio,
529	.invalidate_folio = block_invalidate_folio,
530	.read_folio = ufs_read_folio,
531	.writepage = ufs_writepage,
532	.write_begin = ufs_write_begin,
533	.write_end = ufs_write_end,
534	.bmap = ufs_bmap
535};
536
537static void ufs_set_inode_ops(struct inode *inode)
538{
539	if (S_ISREG(inode->i_mode)) {
540		inode->i_op = &ufs_file_inode_operations;
541		inode->i_fop = &ufs_file_operations;
542		inode->i_mapping->a_ops = &ufs_aops;
543	} else if (S_ISDIR(inode->i_mode)) {
544		inode->i_op = &ufs_dir_inode_operations;
545		inode->i_fop = &ufs_dir_operations;
546		inode->i_mapping->a_ops = &ufs_aops;
547	} else if (S_ISLNK(inode->i_mode)) {
548		if (!inode->i_blocks) {
549			inode->i_link = (char *)UFS_I(inode)->i_u1.i_symlink;
550			inode->i_op = &simple_symlink_inode_operations;
551		} else {
552			inode->i_mapping->a_ops = &ufs_aops;
553			inode->i_op = &page_symlink_inode_operations;
554			inode_nohighmem(inode);
555		}
556	} else
557		init_special_inode(inode, inode->i_mode,
558				   ufs_get_inode_dev(inode->i_sb, UFS_I(inode)));
559}
560
561static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
562{
563	struct ufs_inode_info *ufsi = UFS_I(inode);
564	struct super_block *sb = inode->i_sb;
565	umode_t mode;
566
567	/*
568	 * Copy data to the in-core inode.
569	 */
570	inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode);
571	set_nlink(inode, fs16_to_cpu(sb, ufs_inode->ui_nlink));
572	if (inode->i_nlink == 0)
573		return -ESTALE;
574
575	/*
576	 * Linux now has 32-bit uid and gid, so we can support EFT.
577	 */
578	i_uid_write(inode, ufs_get_inode_uid(sb, ufs_inode));
579	i_gid_write(inode, ufs_get_inode_gid(sb, ufs_inode));
580
581	inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size);
582	inode->i_atime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec);
583	inode_set_ctime(inode,
584			(signed)fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec),
585			0);
586	inode->i_mtime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec);
587	inode->i_mtime.tv_nsec = 0;
588	inode->i_atime.tv_nsec = 0;
589	inode->i_blocks = fs32_to_cpu(sb, ufs_inode->ui_blocks);
590	inode->i_generation = fs32_to_cpu(sb, ufs_inode->ui_gen);
591	ufsi->i_flags = fs32_to_cpu(sb, ufs_inode->ui_flags);
592	ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
593	ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
594
595
596	if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
597		memcpy(ufsi->i_u1.i_data, &ufs_inode->ui_u2.ui_addr,
598		       sizeof(ufs_inode->ui_u2.ui_addr));
599	} else {
600		memcpy(ufsi->i_u1.i_symlink, ufs_inode->ui_u2.ui_symlink,
601		       sizeof(ufs_inode->ui_u2.ui_symlink) - 1);
602		ufsi->i_u1.i_symlink[sizeof(ufs_inode->ui_u2.ui_symlink) - 1] = 0;
603	}
604	return 0;
605}
606
607static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
608{
609	struct ufs_inode_info *ufsi = UFS_I(inode);
610	struct super_block *sb = inode->i_sb;
611	umode_t mode;
612
613	UFSD("Reading ufs2 inode, ino %lu\n", inode->i_ino);
614	/*
615	 * Copy data to the in-core inode.
616	 */
617	inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode);
618	set_nlink(inode, fs16_to_cpu(sb, ufs2_inode->ui_nlink));
619	if (inode->i_nlink == 0)
620		return -ESTALE;
621
622        /*
623         * Linux now has 32-bit uid and gid, so we can support EFT.
624         */
625	i_uid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_uid));
626	i_gid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_gid));
627
628	inode->i_size = fs64_to_cpu(sb, ufs2_inode->ui_size);
629	inode->i_atime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_atime);
630	inode_set_ctime(inode, fs64_to_cpu(sb, ufs2_inode->ui_ctime),
631			fs32_to_cpu(sb, ufs2_inode->ui_ctimensec));
632	inode->i_mtime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_mtime);
633	inode->i_atime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_atimensec);
634	inode->i_mtime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_mtimensec);
635	inode->i_blocks = fs64_to_cpu(sb, ufs2_inode->ui_blocks);
636	inode->i_generation = fs32_to_cpu(sb, ufs2_inode->ui_gen);
637	ufsi->i_flags = fs32_to_cpu(sb, ufs2_inode->ui_flags);
638	/*
639	ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
640	ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
641	*/
642
643	if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
644		memcpy(ufsi->i_u1.u2_i_data, &ufs2_inode->ui_u2.ui_addr,
645		       sizeof(ufs2_inode->ui_u2.ui_addr));
646	} else {
647		memcpy(ufsi->i_u1.i_symlink, ufs2_inode->ui_u2.ui_symlink,
648		       sizeof(ufs2_inode->ui_u2.ui_symlink) - 1);
649		ufsi->i_u1.i_symlink[sizeof(ufs2_inode->ui_u2.ui_symlink) - 1] = 0;
650	}
651	return 0;
652}
653
654struct inode *ufs_iget(struct super_block *sb, unsigned long ino)
655{
656	struct ufs_inode_info *ufsi;
657	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
658	struct buffer_head * bh;
659	struct inode *inode;
660	int err = -EIO;
661
662	UFSD("ENTER, ino %lu\n", ino);
663
664	if (ino < UFS_ROOTINO || ino > (uspi->s_ncg * uspi->s_ipg)) {
665		ufs_warning(sb, "ufs_read_inode", "bad inode number (%lu)\n",
666			    ino);
667		return ERR_PTR(-EIO);
668	}
669
670	inode = iget_locked(sb, ino);
671	if (!inode)
672		return ERR_PTR(-ENOMEM);
673	if (!(inode->i_state & I_NEW))
674		return inode;
675
676	ufsi = UFS_I(inode);
677
678	bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino));
679	if (!bh) {
680		ufs_warning(sb, "ufs_read_inode", "unable to read inode %lu\n",
681			    inode->i_ino);
682		goto bad_inode;
683	}
684	if ((UFS_SB(sb)->s_flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
685		struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data;
686
687		err = ufs2_read_inode(inode,
688				      ufs2_inode + ufs_inotofsbo(inode->i_ino));
689	} else {
690		struct ufs_inode *ufs_inode = (struct ufs_inode *)bh->b_data;
691
692		err = ufs1_read_inode(inode,
693				      ufs_inode + ufs_inotofsbo(inode->i_ino));
694	}
695	brelse(bh);
696	if (err)
697		goto bad_inode;
698
699	inode_inc_iversion(inode);
700	ufsi->i_lastfrag =
701		(inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift;
702	ufsi->i_dir_start_lookup = 0;
703	ufsi->i_osync = 0;
704
705	ufs_set_inode_ops(inode);
706
707	UFSD("EXIT\n");
708	unlock_new_inode(inode);
709	return inode;
710
711bad_inode:
712	iget_failed(inode);
713	return ERR_PTR(err);
714}
715
716static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode)
717{
718	struct super_block *sb = inode->i_sb;
719 	struct ufs_inode_info *ufsi = UFS_I(inode);
720
721	ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
722	ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink);
723
724	ufs_set_inode_uid(sb, ufs_inode, i_uid_read(inode));
725	ufs_set_inode_gid(sb, ufs_inode, i_gid_read(inode));
726
727	ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
728	ufs_inode->ui_atime.tv_sec = cpu_to_fs32(sb, inode->i_atime.tv_sec);
729	ufs_inode->ui_atime.tv_usec = 0;
730	ufs_inode->ui_ctime.tv_sec = cpu_to_fs32(sb,
731						 inode_get_ctime(inode).tv_sec);
732	ufs_inode->ui_ctime.tv_usec = 0;
733	ufs_inode->ui_mtime.tv_sec = cpu_to_fs32(sb, inode->i_mtime.tv_sec);
734	ufs_inode->ui_mtime.tv_usec = 0;
735	ufs_inode->ui_blocks = cpu_to_fs32(sb, inode->i_blocks);
736	ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags);
737	ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation);
738
739	if ((UFS_SB(sb)->s_flags & UFS_UID_MASK) == UFS_UID_EFT) {
740		ufs_inode->ui_u3.ui_sun.ui_shadow = cpu_to_fs32(sb, ufsi->i_shadow);
741		ufs_inode->ui_u3.ui_sun.ui_oeftflag = cpu_to_fs32(sb, ufsi->i_oeftflag);
742	}
743
744	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
745		/* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
746		ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.i_data[0];
747	} else if (inode->i_blocks) {
748		memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.i_data,
749		       sizeof(ufs_inode->ui_u2.ui_addr));
750	}
751	else {
752		memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink,
753		       sizeof(ufs_inode->ui_u2.ui_symlink));
754	}
755
756	if (!inode->i_nlink)
757		memset (ufs_inode, 0, sizeof(struct ufs_inode));
758}
759
760static void ufs2_update_inode(struct inode *inode, struct ufs2_inode *ufs_inode)
761{
762	struct super_block *sb = inode->i_sb;
763 	struct ufs_inode_info *ufsi = UFS_I(inode);
764
765	UFSD("ENTER\n");
766	ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
767	ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink);
768
769	ufs_inode->ui_uid = cpu_to_fs32(sb, i_uid_read(inode));
770	ufs_inode->ui_gid = cpu_to_fs32(sb, i_gid_read(inode));
771
772	ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
773	ufs_inode->ui_atime = cpu_to_fs64(sb, inode->i_atime.tv_sec);
774	ufs_inode->ui_atimensec = cpu_to_fs32(sb, inode->i_atime.tv_nsec);
775	ufs_inode->ui_ctime = cpu_to_fs64(sb, inode_get_ctime(inode).tv_sec);
776	ufs_inode->ui_ctimensec = cpu_to_fs32(sb,
777					      inode_get_ctime(inode).tv_nsec);
778	ufs_inode->ui_mtime = cpu_to_fs64(sb, inode->i_mtime.tv_sec);
779	ufs_inode->ui_mtimensec = cpu_to_fs32(sb, inode->i_mtime.tv_nsec);
780
781	ufs_inode->ui_blocks = cpu_to_fs64(sb, inode->i_blocks);
782	ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags);
783	ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation);
784
785	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
786		/* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
787		ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.u2_i_data[0];
788	} else if (inode->i_blocks) {
789		memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.u2_i_data,
790		       sizeof(ufs_inode->ui_u2.ui_addr));
791	} else {
792		memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink,
793		       sizeof(ufs_inode->ui_u2.ui_symlink));
794 	}
795
796	if (!inode->i_nlink)
797		memset (ufs_inode, 0, sizeof(struct ufs2_inode));
798	UFSD("EXIT\n");
799}
800
801static int ufs_update_inode(struct inode * inode, int do_sync)
802{
803	struct super_block *sb = inode->i_sb;
804	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
805	struct buffer_head * bh;
806
807	UFSD("ENTER, ino %lu\n", inode->i_ino);
808
809	if (inode->i_ino < UFS_ROOTINO ||
810	    inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) {
811		ufs_warning (sb, "ufs_read_inode", "bad inode number (%lu)\n", inode->i_ino);
812		return -1;
813	}
814
815	bh = sb_bread(sb, ufs_inotofsba(inode->i_ino));
816	if (!bh) {
817		ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino);
818		return -1;
819	}
820	if (uspi->fs_magic == UFS2_MAGIC) {
821		struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data;
822
823		ufs2_update_inode(inode,
824				  ufs2_inode + ufs_inotofsbo(inode->i_ino));
825	} else {
826		struct ufs_inode *ufs_inode = (struct ufs_inode *) bh->b_data;
827
828		ufs1_update_inode(inode, ufs_inode + ufs_inotofsbo(inode->i_ino));
829	}
830
831	mark_buffer_dirty(bh);
832	if (do_sync)
833		sync_dirty_buffer(bh);
834	brelse (bh);
835
836	UFSD("EXIT\n");
837	return 0;
838}
839
840int ufs_write_inode(struct inode *inode, struct writeback_control *wbc)
841{
842	return ufs_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
843}
844
845int ufs_sync_inode (struct inode *inode)
846{
847	return ufs_update_inode (inode, 1);
848}
849
850void ufs_evict_inode(struct inode * inode)
851{
852	int want_delete = 0;
853
854	if (!inode->i_nlink && !is_bad_inode(inode))
855		want_delete = 1;
856
857	truncate_inode_pages_final(&inode->i_data);
858	if (want_delete) {
859		inode->i_size = 0;
860		if (inode->i_blocks &&
861		    (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
862		     S_ISLNK(inode->i_mode)))
863			ufs_truncate_blocks(inode);
864		ufs_update_inode(inode, inode_needs_sync(inode));
865	}
866
867	invalidate_inode_buffers(inode);
868	clear_inode(inode);
869
870	if (want_delete)
871		ufs_free_inode(inode);
872}
873
874struct to_free {
875	struct inode *inode;
876	u64 to;
877	unsigned count;
878};
879
880static inline void free_data(struct to_free *ctx, u64 from, unsigned count)
881{
882	if (ctx->count && ctx->to != from) {
883		ufs_free_blocks(ctx->inode, ctx->to - ctx->count, ctx->count);
884		ctx->count = 0;
885	}
886	ctx->count += count;
887	ctx->to = from + count;
888}
889
890#define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift)
891
892static void ufs_trunc_direct(struct inode *inode)
893{
894	struct ufs_inode_info *ufsi = UFS_I(inode);
895	struct super_block * sb;
896	struct ufs_sb_private_info * uspi;
897	void *p;
898	u64 frag1, frag2, frag3, frag4, block1, block2;
899	struct to_free ctx = {.inode = inode};
900	unsigned i, tmp;
901
902	UFSD("ENTER: ino %lu\n", inode->i_ino);
903
904	sb = inode->i_sb;
905	uspi = UFS_SB(sb)->s_uspi;
906
907	frag1 = DIRECT_FRAGMENT;
908	frag4 = min_t(u64, UFS_NDIR_FRAGMENT, ufsi->i_lastfrag);
909	frag2 = ((frag1 & uspi->s_fpbmask) ? ((frag1 | uspi->s_fpbmask) + 1) : frag1);
910	frag3 = frag4 & ~uspi->s_fpbmask;
911	block1 = block2 = 0;
912	if (frag2 > frag3) {
913		frag2 = frag4;
914		frag3 = frag4 = 0;
915	} else if (frag2 < frag3) {
916		block1 = ufs_fragstoblks (frag2);
917		block2 = ufs_fragstoblks (frag3);
918	}
919
920	UFSD("ino %lu, frag1 %llu, frag2 %llu, block1 %llu, block2 %llu,"
921	     " frag3 %llu, frag4 %llu\n", inode->i_ino,
922	     (unsigned long long)frag1, (unsigned long long)frag2,
923	     (unsigned long long)block1, (unsigned long long)block2,
924	     (unsigned long long)frag3, (unsigned long long)frag4);
925
926	if (frag1 >= frag2)
927		goto next1;
928
929	/*
930	 * Free first free fragments
931	 */
932	p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag1));
933	tmp = ufs_data_ptr_to_cpu(sb, p);
934	if (!tmp )
935		ufs_panic (sb, "ufs_trunc_direct", "internal error");
936	frag2 -= frag1;
937	frag1 = ufs_fragnum (frag1);
938
939	ufs_free_fragments(inode, tmp + frag1, frag2);
940
941next1:
942	/*
943	 * Free whole blocks
944	 */
945	for (i = block1 ; i < block2; i++) {
946		p = ufs_get_direct_data_ptr(uspi, ufsi, i);
947		tmp = ufs_data_ptr_to_cpu(sb, p);
948		if (!tmp)
949			continue;
950		write_seqlock(&ufsi->meta_lock);
951		ufs_data_ptr_clear(uspi, p);
952		write_sequnlock(&ufsi->meta_lock);
953
954		free_data(&ctx, tmp, uspi->s_fpb);
955	}
956
957	free_data(&ctx, 0, 0);
958
959	if (frag3 >= frag4)
960		goto next3;
961
962	/*
963	 * Free last free fragments
964	 */
965	p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag3));
966	tmp = ufs_data_ptr_to_cpu(sb, p);
967	if (!tmp )
968		ufs_panic(sb, "ufs_truncate_direct", "internal error");
969	frag4 = ufs_fragnum (frag4);
970	write_seqlock(&ufsi->meta_lock);
971	ufs_data_ptr_clear(uspi, p);
972	write_sequnlock(&ufsi->meta_lock);
973
974	ufs_free_fragments (inode, tmp, frag4);
975 next3:
976
977	UFSD("EXIT: ino %lu\n", inode->i_ino);
978}
979
980static void free_full_branch(struct inode *inode, u64 ind_block, int depth)
981{
982	struct super_block *sb = inode->i_sb;
983	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
984	struct ufs_buffer_head *ubh = ubh_bread(sb, ind_block, uspi->s_bsize);
985	unsigned i;
986
987	if (!ubh)
988		return;
989
990	if (--depth) {
991		for (i = 0; i < uspi->s_apb; i++) {
992			void *p = ubh_get_data_ptr(uspi, ubh, i);
993			u64 block = ufs_data_ptr_to_cpu(sb, p);
994			if (block)
995				free_full_branch(inode, block, depth);
996		}
997	} else {
998		struct to_free ctx = {.inode = inode};
999
1000		for (i = 0; i < uspi->s_apb; i++) {
1001			void *p = ubh_get_data_ptr(uspi, ubh, i);
1002			u64 block = ufs_data_ptr_to_cpu(sb, p);
1003			if (block)
1004				free_data(&ctx, block, uspi->s_fpb);
1005		}
1006		free_data(&ctx, 0, 0);
1007	}
1008
1009	ubh_bforget(ubh);
1010	ufs_free_blocks(inode, ind_block, uspi->s_fpb);
1011}
1012
1013static void free_branch_tail(struct inode *inode, unsigned from, struct ufs_buffer_head *ubh, int depth)
1014{
1015	struct super_block *sb = inode->i_sb;
1016	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
1017	unsigned i;
1018
1019	if (--depth) {
1020		for (i = from; i < uspi->s_apb ; i++) {
1021			void *p = ubh_get_data_ptr(uspi, ubh, i);
1022			u64 block = ufs_data_ptr_to_cpu(sb, p);
1023			if (block) {
1024				write_seqlock(&UFS_I(inode)->meta_lock);
1025				ufs_data_ptr_clear(uspi, p);
1026				write_sequnlock(&UFS_I(inode)->meta_lock);
1027				ubh_mark_buffer_dirty(ubh);
1028				free_full_branch(inode, block, depth);
1029			}
1030		}
1031	} else {
1032		struct to_free ctx = {.inode = inode};
1033
1034		for (i = from; i < uspi->s_apb; i++) {
1035			void *p = ubh_get_data_ptr(uspi, ubh, i);
1036			u64 block = ufs_data_ptr_to_cpu(sb, p);
1037			if (block) {
1038				write_seqlock(&UFS_I(inode)->meta_lock);
1039				ufs_data_ptr_clear(uspi, p);
1040				write_sequnlock(&UFS_I(inode)->meta_lock);
1041				ubh_mark_buffer_dirty(ubh);
1042				free_data(&ctx, block, uspi->s_fpb);
1043			}
1044		}
1045		free_data(&ctx, 0, 0);
1046	}
1047	if (IS_SYNC(inode) && ubh_buffer_dirty(ubh))
1048		ubh_sync_block(ubh);
1049	ubh_brelse(ubh);
1050}
1051
1052static int ufs_alloc_lastblock(struct inode *inode, loff_t size)
1053{
1054	int err = 0;
1055	struct super_block *sb = inode->i_sb;
1056	struct address_space *mapping = inode->i_mapping;
1057	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
1058	unsigned i, end;
1059	sector_t lastfrag;
1060	struct page *lastpage;
1061	struct buffer_head *bh;
1062	u64 phys64;
1063
1064	lastfrag = (size + uspi->s_fsize - 1) >> uspi->s_fshift;
1065
1066	if (!lastfrag)
1067		goto out;
1068
1069	lastfrag--;
1070
1071	lastpage = ufs_get_locked_page(mapping, lastfrag >>
1072				       (PAGE_SHIFT - inode->i_blkbits));
1073       if (IS_ERR(lastpage)) {
1074               err = -EIO;
1075               goto out;
1076       }
1077
1078       end = lastfrag & ((1 << (PAGE_SHIFT - inode->i_blkbits)) - 1);
1079       bh = page_buffers(lastpage);
1080       for (i = 0; i < end; ++i)
1081               bh = bh->b_this_page;
1082
1083
1084       err = ufs_getfrag_block(inode, lastfrag, bh, 1);
1085
1086       if (unlikely(err))
1087	       goto out_unlock;
1088
1089       if (buffer_new(bh)) {
1090	       clear_buffer_new(bh);
1091	       clean_bdev_bh_alias(bh);
1092	       /*
1093		* we do not zeroize fragment, because of
1094		* if it maped to hole, it already contains zeroes
1095		*/
1096	       set_buffer_uptodate(bh);
1097	       mark_buffer_dirty(bh);
1098	       set_page_dirty(lastpage);
1099       }
1100
1101       if (lastfrag >= UFS_IND_FRAGMENT) {
1102	       end = uspi->s_fpb - ufs_fragnum(lastfrag) - 1;
1103	       phys64 = bh->b_blocknr + 1;
1104	       for (i = 0; i < end; ++i) {
1105		       bh = sb_getblk(sb, i + phys64);
1106		       lock_buffer(bh);
1107		       memset(bh->b_data, 0, sb->s_blocksize);
1108		       set_buffer_uptodate(bh);
1109		       mark_buffer_dirty(bh);
1110		       unlock_buffer(bh);
1111		       sync_dirty_buffer(bh);
1112		       brelse(bh);
1113	       }
1114       }
1115out_unlock:
1116       ufs_put_locked_page(lastpage);
1117out:
1118       return err;
1119}
1120
1121static void ufs_truncate_blocks(struct inode *inode)
1122{
1123	struct ufs_inode_info *ufsi = UFS_I(inode);
1124	struct super_block *sb = inode->i_sb;
1125	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
1126	unsigned offsets[4];
1127	int depth;
1128	int depth2;
1129	unsigned i;
1130	struct ufs_buffer_head *ubh[3];
1131	void *p;
1132	u64 block;
1133
1134	if (inode->i_size) {
1135		sector_t last = (inode->i_size - 1) >> uspi->s_bshift;
1136		depth = ufs_block_to_path(inode, last, offsets);
1137		if (!depth)
1138			return;
1139	} else {
1140		depth = 1;
1141	}
1142
1143	for (depth2 = depth - 1; depth2; depth2--)
1144		if (offsets[depth2] != uspi->s_apb - 1)
1145			break;
1146
1147	mutex_lock(&ufsi->truncate_mutex);
1148	if (depth == 1) {
1149		ufs_trunc_direct(inode);
1150		offsets[0] = UFS_IND_BLOCK;
1151	} else {
1152		/* get the blocks that should be partially emptied */
1153		p = ufs_get_direct_data_ptr(uspi, ufsi, offsets[0]++);
1154		for (i = 0; i < depth2; i++) {
1155			block = ufs_data_ptr_to_cpu(sb, p);
1156			if (!block)
1157				break;
1158			ubh[i] = ubh_bread(sb, block, uspi->s_bsize);
1159			if (!ubh[i]) {
1160				write_seqlock(&ufsi->meta_lock);
1161				ufs_data_ptr_clear(uspi, p);
1162				write_sequnlock(&ufsi->meta_lock);
1163				break;
1164			}
1165			p = ubh_get_data_ptr(uspi, ubh[i], offsets[i + 1]++);
1166		}
1167		while (i--)
1168			free_branch_tail(inode, offsets[i + 1], ubh[i], depth - i - 1);
1169	}
1170	for (i = offsets[0]; i <= UFS_TIND_BLOCK; i++) {
1171		p = ufs_get_direct_data_ptr(uspi, ufsi, i);
1172		block = ufs_data_ptr_to_cpu(sb, p);
1173		if (block) {
1174			write_seqlock(&ufsi->meta_lock);
1175			ufs_data_ptr_clear(uspi, p);
1176			write_sequnlock(&ufsi->meta_lock);
1177			free_full_branch(inode, block, i - UFS_IND_BLOCK + 1);
1178		}
1179	}
1180	read_seqlock_excl(&ufsi->meta_lock);
1181	ufsi->i_lastfrag = DIRECT_FRAGMENT;
1182	read_sequnlock_excl(&ufsi->meta_lock);
1183	mark_inode_dirty(inode);
1184	mutex_unlock(&ufsi->truncate_mutex);
1185}
1186
1187static int ufs_truncate(struct inode *inode, loff_t size)
1188{
1189	int err = 0;
1190
1191	UFSD("ENTER: ino %lu, i_size: %llu, old_i_size: %llu\n",
1192	     inode->i_ino, (unsigned long long)size,
1193	     (unsigned long long)i_size_read(inode));
1194
1195	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1196	      S_ISLNK(inode->i_mode)))
1197		return -EINVAL;
1198	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1199		return -EPERM;
1200
1201	err = ufs_alloc_lastblock(inode, size);
1202
1203	if (err)
1204		goto out;
1205
1206	block_truncate_page(inode->i_mapping, size, ufs_getfrag_block);
1207
1208	truncate_setsize(inode, size);
1209
1210	ufs_truncate_blocks(inode);
1211	inode->i_mtime = inode_set_ctime_current(inode);
1212	mark_inode_dirty(inode);
1213out:
1214	UFSD("EXIT: err %d\n", err);
1215	return err;
1216}
1217
1218int ufs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
1219		struct iattr *attr)
1220{
1221	struct inode *inode = d_inode(dentry);
1222	unsigned int ia_valid = attr->ia_valid;
1223	int error;
1224
1225	error = setattr_prepare(&nop_mnt_idmap, dentry, attr);
1226	if (error)
1227		return error;
1228
1229	if (ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) {
1230		error = ufs_truncate(inode, attr->ia_size);
1231		if (error)
1232			return error;
1233	}
1234
1235	setattr_copy(&nop_mnt_idmap, inode, attr);
1236	mark_inode_dirty(inode);
1237	return 0;
1238}
1239
1240const struct inode_operations ufs_file_inode_operations = {
1241	.setattr = ufs_setattr,
1242};
1243