1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_mount.h"
13#include "xfs_inode.h"
14#include "xfs_trans.h"
15#include "xfs_inode_item.h"
16#include "xfs_trace.h"
17#include "xfs_trans_priv.h"
18#include "xfs_buf_item.h"
19#include "xfs_log.h"
20#include "xfs_error.h"
21
22#include <linux/iversion.h>
23
24kmem_zone_t	*xfs_ili_zone;		/* inode log item zone */
25
26static inline struct xfs_inode_log_item *INODE_ITEM(struct xfs_log_item *lip)
27{
28	return container_of(lip, struct xfs_inode_log_item, ili_item);
29}
30
31/*
32 * The logged size of an inode fork is always the current size of the inode
33 * fork. This means that when an inode fork is relogged, the size of the logged
34 * region is determined by the current state, not the combination of the
35 * previously logged state + the current state. This is different relogging
36 * behaviour to most other log items which will retain the size of the
37 * previously logged changes when smaller regions are relogged.
38 *
39 * Hence operations that remove data from the inode fork (e.g. shortform
40 * dir/attr remove, extent form extent removal, etc), the size of the relogged
41 * inode gets -smaller- rather than stays the same size as the previously logged
42 * size and this can result in the committing transaction reducing the amount of
43 * space being consumed by the CIL.
44 */
45STATIC void
46xfs_inode_item_data_fork_size(
47	struct xfs_inode_log_item *iip,
48	int			*nvecs,
49	int			*nbytes)
50{
51	struct xfs_inode	*ip = iip->ili_inode;
52
53	switch (ip->i_df.if_format) {
54	case XFS_DINODE_FMT_EXTENTS:
55		if ((iip->ili_fields & XFS_ILOG_DEXT) &&
56		    ip->i_df.if_nextents > 0 &&
57		    ip->i_df.if_bytes > 0) {
58			/* worst case, doesn't subtract delalloc extents */
59			*nbytes += XFS_IFORK_DSIZE(ip);
60			*nvecs += 1;
61		}
62		break;
63	case XFS_DINODE_FMT_BTREE:
64		if ((iip->ili_fields & XFS_ILOG_DBROOT) &&
65		    ip->i_df.if_broot_bytes > 0) {
66			*nbytes += ip->i_df.if_broot_bytes;
67			*nvecs += 1;
68		}
69		break;
70	case XFS_DINODE_FMT_LOCAL:
71		if ((iip->ili_fields & XFS_ILOG_DDATA) &&
72		    ip->i_df.if_bytes > 0) {
73			*nbytes += roundup(ip->i_df.if_bytes, 4);
74			*nvecs += 1;
75		}
76		break;
77
78	case XFS_DINODE_FMT_DEV:
79		break;
80	default:
81		ASSERT(0);
82		break;
83	}
84}
85
86STATIC void
87xfs_inode_item_attr_fork_size(
88	struct xfs_inode_log_item *iip,
89	int			*nvecs,
90	int			*nbytes)
91{
92	struct xfs_inode	*ip = iip->ili_inode;
93
94	switch (ip->i_afp->if_format) {
95	case XFS_DINODE_FMT_EXTENTS:
96		if ((iip->ili_fields & XFS_ILOG_AEXT) &&
97		    ip->i_afp->if_nextents > 0 &&
98		    ip->i_afp->if_bytes > 0) {
99			/* worst case, doesn't subtract unused space */
100			*nbytes += XFS_IFORK_ASIZE(ip);
101			*nvecs += 1;
102		}
103		break;
104	case XFS_DINODE_FMT_BTREE:
105		if ((iip->ili_fields & XFS_ILOG_ABROOT) &&
106		    ip->i_afp->if_broot_bytes > 0) {
107			*nbytes += ip->i_afp->if_broot_bytes;
108			*nvecs += 1;
109		}
110		break;
111	case XFS_DINODE_FMT_LOCAL:
112		if ((iip->ili_fields & XFS_ILOG_ADATA) &&
113		    ip->i_afp->if_bytes > 0) {
114			*nbytes += roundup(ip->i_afp->if_bytes, 4);
115			*nvecs += 1;
116		}
117		break;
118	default:
119		ASSERT(0);
120		break;
121	}
122}
123
124/*
125 * This returns the number of iovecs needed to log the given inode item.
126 *
127 * We need one iovec for the inode log format structure, one for the
128 * inode core, and possibly one for the inode data/extents/b-tree root
129 * and one for the inode attribute data/extents/b-tree root.
130 */
131STATIC void
132xfs_inode_item_size(
133	struct xfs_log_item	*lip,
134	int			*nvecs,
135	int			*nbytes)
136{
137	struct xfs_inode_log_item *iip = INODE_ITEM(lip);
138	struct xfs_inode	*ip = iip->ili_inode;
139
140	*nvecs += 2;
141	*nbytes += sizeof(struct xfs_inode_log_format) +
142		   xfs_log_dinode_size(ip->i_mount);
143
144	xfs_inode_item_data_fork_size(iip, nvecs, nbytes);
145	if (XFS_IFORK_Q(ip))
146		xfs_inode_item_attr_fork_size(iip, nvecs, nbytes);
147}
148
149STATIC void
150xfs_inode_item_format_data_fork(
151	struct xfs_inode_log_item *iip,
152	struct xfs_inode_log_format *ilf,
153	struct xfs_log_vec	*lv,
154	struct xfs_log_iovec	**vecp)
155{
156	struct xfs_inode	*ip = iip->ili_inode;
157	size_t			data_bytes;
158
159	switch (ip->i_df.if_format) {
160	case XFS_DINODE_FMT_EXTENTS:
161		iip->ili_fields &=
162			~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT | XFS_ILOG_DEV);
163
164		if ((iip->ili_fields & XFS_ILOG_DEXT) &&
165		    ip->i_df.if_nextents > 0 &&
166		    ip->i_df.if_bytes > 0) {
167			struct xfs_bmbt_rec *p;
168
169			ASSERT(xfs_iext_count(&ip->i_df) > 0);
170
171			p = xlog_prepare_iovec(lv, vecp, XLOG_REG_TYPE_IEXT);
172			data_bytes = xfs_iextents_copy(ip, p, XFS_DATA_FORK);
173			xlog_finish_iovec(lv, *vecp, data_bytes);
174
175			ASSERT(data_bytes <= ip->i_df.if_bytes);
176
177			ilf->ilf_dsize = data_bytes;
178			ilf->ilf_size++;
179		} else {
180			iip->ili_fields &= ~XFS_ILOG_DEXT;
181		}
182		break;
183	case XFS_DINODE_FMT_BTREE:
184		iip->ili_fields &=
185			~(XFS_ILOG_DDATA | XFS_ILOG_DEXT | XFS_ILOG_DEV);
186
187		if ((iip->ili_fields & XFS_ILOG_DBROOT) &&
188		    ip->i_df.if_broot_bytes > 0) {
189			ASSERT(ip->i_df.if_broot != NULL);
190			xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_IBROOT,
191					ip->i_df.if_broot,
192					ip->i_df.if_broot_bytes);
193			ilf->ilf_dsize = ip->i_df.if_broot_bytes;
194			ilf->ilf_size++;
195		} else {
196			ASSERT(!(iip->ili_fields &
197				 XFS_ILOG_DBROOT));
198			iip->ili_fields &= ~XFS_ILOG_DBROOT;
199		}
200		break;
201	case XFS_DINODE_FMT_LOCAL:
202		iip->ili_fields &=
203			~(XFS_ILOG_DEXT | XFS_ILOG_DBROOT | XFS_ILOG_DEV);
204		if ((iip->ili_fields & XFS_ILOG_DDATA) &&
205		    ip->i_df.if_bytes > 0) {
206			/*
207			 * Round i_bytes up to a word boundary.
208			 * The underlying memory is guaranteed
209			 * to be there by xfs_idata_realloc().
210			 */
211			data_bytes = roundup(ip->i_df.if_bytes, 4);
212			ASSERT(ip->i_df.if_u1.if_data != NULL);
213			ASSERT(ip->i_d.di_size > 0);
214			xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_ILOCAL,
215					ip->i_df.if_u1.if_data, data_bytes);
216			ilf->ilf_dsize = (unsigned)data_bytes;
217			ilf->ilf_size++;
218		} else {
219			iip->ili_fields &= ~XFS_ILOG_DDATA;
220		}
221		break;
222	case XFS_DINODE_FMT_DEV:
223		iip->ili_fields &=
224			~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT | XFS_ILOG_DEXT);
225		if (iip->ili_fields & XFS_ILOG_DEV)
226			ilf->ilf_u.ilfu_rdev = sysv_encode_dev(VFS_I(ip)->i_rdev);
227		break;
228	default:
229		ASSERT(0);
230		break;
231	}
232}
233
234STATIC void
235xfs_inode_item_format_attr_fork(
236	struct xfs_inode_log_item *iip,
237	struct xfs_inode_log_format *ilf,
238	struct xfs_log_vec	*lv,
239	struct xfs_log_iovec	**vecp)
240{
241	struct xfs_inode	*ip = iip->ili_inode;
242	size_t			data_bytes;
243
244	switch (ip->i_afp->if_format) {
245	case XFS_DINODE_FMT_EXTENTS:
246		iip->ili_fields &=
247			~(XFS_ILOG_ADATA | XFS_ILOG_ABROOT);
248
249		if ((iip->ili_fields & XFS_ILOG_AEXT) &&
250		    ip->i_afp->if_nextents > 0 &&
251		    ip->i_afp->if_bytes > 0) {
252			struct xfs_bmbt_rec *p;
253
254			ASSERT(xfs_iext_count(ip->i_afp) ==
255				ip->i_afp->if_nextents);
256
257			p = xlog_prepare_iovec(lv, vecp, XLOG_REG_TYPE_IATTR_EXT);
258			data_bytes = xfs_iextents_copy(ip, p, XFS_ATTR_FORK);
259			xlog_finish_iovec(lv, *vecp, data_bytes);
260
261			ilf->ilf_asize = data_bytes;
262			ilf->ilf_size++;
263		} else {
264			iip->ili_fields &= ~XFS_ILOG_AEXT;
265		}
266		break;
267	case XFS_DINODE_FMT_BTREE:
268		iip->ili_fields &=
269			~(XFS_ILOG_ADATA | XFS_ILOG_AEXT);
270
271		if ((iip->ili_fields & XFS_ILOG_ABROOT) &&
272		    ip->i_afp->if_broot_bytes > 0) {
273			ASSERT(ip->i_afp->if_broot != NULL);
274
275			xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_IATTR_BROOT,
276					ip->i_afp->if_broot,
277					ip->i_afp->if_broot_bytes);
278			ilf->ilf_asize = ip->i_afp->if_broot_bytes;
279			ilf->ilf_size++;
280		} else {
281			iip->ili_fields &= ~XFS_ILOG_ABROOT;
282		}
283		break;
284	case XFS_DINODE_FMT_LOCAL:
285		iip->ili_fields &=
286			~(XFS_ILOG_AEXT | XFS_ILOG_ABROOT);
287
288		if ((iip->ili_fields & XFS_ILOG_ADATA) &&
289		    ip->i_afp->if_bytes > 0) {
290			/*
291			 * Round i_bytes up to a word boundary.
292			 * The underlying memory is guaranteed
293			 * to be there by xfs_idata_realloc().
294			 */
295			data_bytes = roundup(ip->i_afp->if_bytes, 4);
296			ASSERT(ip->i_afp->if_u1.if_data != NULL);
297			xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_IATTR_LOCAL,
298					ip->i_afp->if_u1.if_data,
299					data_bytes);
300			ilf->ilf_asize = (unsigned)data_bytes;
301			ilf->ilf_size++;
302		} else {
303			iip->ili_fields &= ~XFS_ILOG_ADATA;
304		}
305		break;
306	default:
307		ASSERT(0);
308		break;
309	}
310}
311
312/*
313 * Convert an incore timestamp to a log timestamp.  Note that the log format
314 * specifies host endian format!
315 */
316static inline xfs_ictimestamp_t
317xfs_inode_to_log_dinode_ts(
318	struct xfs_inode		*ip,
319	const struct timespec64		tv)
320{
321	struct xfs_legacy_ictimestamp	*lits;
322	xfs_ictimestamp_t		its;
323
324	if (xfs_inode_has_bigtime(ip))
325		return xfs_inode_encode_bigtime(tv);
326
327	lits = (struct xfs_legacy_ictimestamp *)&its;
328	lits->t_sec = tv.tv_sec;
329	lits->t_nsec = tv.tv_nsec;
330
331	return its;
332}
333
334static void
335xfs_inode_to_log_dinode(
336	struct xfs_inode	*ip,
337	struct xfs_log_dinode	*to,
338	xfs_lsn_t		lsn)
339{
340	struct xfs_icdinode	*from = &ip->i_d;
341	struct inode		*inode = VFS_I(ip);
342
343	to->di_magic = XFS_DINODE_MAGIC;
344	to->di_format = xfs_ifork_format(&ip->i_df);
345	to->di_uid = i_uid_read(inode);
346	to->di_gid = i_gid_read(inode);
347	to->di_projid_lo = from->di_projid & 0xffff;
348	to->di_projid_hi = from->di_projid >> 16;
349
350	memset(to->di_pad, 0, sizeof(to->di_pad));
351	memset(to->di_pad3, 0, sizeof(to->di_pad3));
352	to->di_atime = xfs_inode_to_log_dinode_ts(ip, inode->i_atime);
353	to->di_mtime = xfs_inode_to_log_dinode_ts(ip, inode->i_mtime);
354	to->di_ctime = xfs_inode_to_log_dinode_ts(ip, inode->i_ctime);
355	to->di_nlink = inode->i_nlink;
356	to->di_gen = inode->i_generation;
357	to->di_mode = inode->i_mode;
358
359	to->di_size = from->di_size;
360	to->di_nblocks = from->di_nblocks;
361	to->di_extsize = from->di_extsize;
362	to->di_nextents = xfs_ifork_nextents(&ip->i_df);
363	to->di_anextents = xfs_ifork_nextents(ip->i_afp);
364	to->di_forkoff = from->di_forkoff;
365	to->di_aformat = xfs_ifork_format(ip->i_afp);
366	to->di_dmevmask = from->di_dmevmask;
367	to->di_dmstate = from->di_dmstate;
368	to->di_flags = from->di_flags;
369
370	/* log a dummy value to ensure log structure is fully initialised */
371	to->di_next_unlinked = NULLAGINO;
372
373	if (xfs_sb_version_has_v3inode(&ip->i_mount->m_sb)) {
374		to->di_version = 3;
375		to->di_changecount = inode_peek_iversion(inode);
376		to->di_crtime = xfs_inode_to_log_dinode_ts(ip, from->di_crtime);
377		to->di_flags2 = from->di_flags2;
378		to->di_cowextsize = from->di_cowextsize;
379		to->di_ino = ip->i_ino;
380		to->di_lsn = lsn;
381		memset(to->di_pad2, 0, sizeof(to->di_pad2));
382		uuid_copy(&to->di_uuid, &ip->i_mount->m_sb.sb_meta_uuid);
383		to->di_flushiter = 0;
384	} else {
385		to->di_version = 2;
386		to->di_flushiter = from->di_flushiter;
387	}
388}
389
390/*
391 * Format the inode core. Current timestamp data is only in the VFS inode
392 * fields, so we need to grab them from there. Hence rather than just copying
393 * the XFS inode core structure, format the fields directly into the iovec.
394 */
395static void
396xfs_inode_item_format_core(
397	struct xfs_inode	*ip,
398	struct xfs_log_vec	*lv,
399	struct xfs_log_iovec	**vecp)
400{
401	struct xfs_log_dinode	*dic;
402
403	dic = xlog_prepare_iovec(lv, vecp, XLOG_REG_TYPE_ICORE);
404	xfs_inode_to_log_dinode(ip, dic, ip->i_itemp->ili_item.li_lsn);
405	xlog_finish_iovec(lv, *vecp, xfs_log_dinode_size(ip->i_mount));
406}
407
408/*
409 * This is called to fill in the vector of log iovecs for the given inode
410 * log item.  It fills the first item with an inode log format structure,
411 * the second with the on-disk inode structure, and a possible third and/or
412 * fourth with the inode data/extents/b-tree root and inode attributes
413 * data/extents/b-tree root.
414 *
415 * Note: Always use the 64 bit inode log format structure so we don't
416 * leave an uninitialised hole in the format item on 64 bit systems. Log
417 * recovery on 32 bit systems handles this just fine, so there's no reason
418 * for not using an initialising the properly padded structure all the time.
419 */
420STATIC void
421xfs_inode_item_format(
422	struct xfs_log_item	*lip,
423	struct xfs_log_vec	*lv)
424{
425	struct xfs_inode_log_item *iip = INODE_ITEM(lip);
426	struct xfs_inode	*ip = iip->ili_inode;
427	struct xfs_log_iovec	*vecp = NULL;
428	struct xfs_inode_log_format *ilf;
429
430	ilf = xlog_prepare_iovec(lv, &vecp, XLOG_REG_TYPE_IFORMAT);
431	ilf->ilf_type = XFS_LI_INODE;
432	ilf->ilf_ino = ip->i_ino;
433	ilf->ilf_blkno = ip->i_imap.im_blkno;
434	ilf->ilf_len = ip->i_imap.im_len;
435	ilf->ilf_boffset = ip->i_imap.im_boffset;
436	ilf->ilf_fields = XFS_ILOG_CORE;
437	ilf->ilf_size = 2; /* format + core */
438
439	/*
440	 * make sure we don't leak uninitialised data into the log in the case
441	 * when we don't log every field in the inode.
442	 */
443	ilf->ilf_dsize = 0;
444	ilf->ilf_asize = 0;
445	ilf->ilf_pad = 0;
446	memset(&ilf->ilf_u, 0, sizeof(ilf->ilf_u));
447
448	xlog_finish_iovec(lv, vecp, sizeof(*ilf));
449
450	xfs_inode_item_format_core(ip, lv, &vecp);
451	xfs_inode_item_format_data_fork(iip, ilf, lv, &vecp);
452	if (XFS_IFORK_Q(ip)) {
453		xfs_inode_item_format_attr_fork(iip, ilf, lv, &vecp);
454	} else {
455		iip->ili_fields &=
456			~(XFS_ILOG_ADATA | XFS_ILOG_ABROOT | XFS_ILOG_AEXT);
457	}
458
459	/* update the format with the exact fields we actually logged */
460	ilf->ilf_fields |= (iip->ili_fields & ~XFS_ILOG_TIMESTAMP);
461}
462
463/*
464 * This is called to pin the inode associated with the inode log
465 * item in memory so it cannot be written out.
466 */
467STATIC void
468xfs_inode_item_pin(
469	struct xfs_log_item	*lip)
470{
471	struct xfs_inode	*ip = INODE_ITEM(lip)->ili_inode;
472
473	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
474	ASSERT(lip->li_buf);
475
476	trace_xfs_inode_pin(ip, _RET_IP_);
477	atomic_inc(&ip->i_pincount);
478}
479
480
481/*
482 * This is called to unpin the inode associated with the inode log
483 * item which was previously pinned with a call to xfs_inode_item_pin().
484 *
485 * Also wake up anyone in xfs_iunpin_wait() if the count goes to 0.
486 *
487 * Note that unpin can race with inode cluster buffer freeing marking the buffer
488 * stale. In that case, flush completions are run from the buffer unpin call,
489 * which may happen before the inode is unpinned. If we lose the race, there
490 * will be no buffer attached to the log item, but the inode will be marked
491 * XFS_ISTALE.
492 */
493STATIC void
494xfs_inode_item_unpin(
495	struct xfs_log_item	*lip,
496	int			remove)
497{
498	struct xfs_inode	*ip = INODE_ITEM(lip)->ili_inode;
499
500	trace_xfs_inode_unpin(ip, _RET_IP_);
501	ASSERT(lip->li_buf || xfs_iflags_test(ip, XFS_ISTALE));
502	ASSERT(atomic_read(&ip->i_pincount) > 0);
503	if (atomic_dec_and_test(&ip->i_pincount))
504		wake_up_bit(&ip->i_flags, __XFS_IPINNED_BIT);
505}
506
507STATIC uint
508xfs_inode_item_push(
509	struct xfs_log_item	*lip,
510	struct list_head	*buffer_list)
511		__releases(&lip->li_ailp->ail_lock)
512		__acquires(&lip->li_ailp->ail_lock)
513{
514	struct xfs_inode_log_item *iip = INODE_ITEM(lip);
515	struct xfs_inode	*ip = iip->ili_inode;
516	struct xfs_buf		*bp = lip->li_buf;
517	uint			rval = XFS_ITEM_SUCCESS;
518	int			error;
519
520	ASSERT(iip->ili_item.li_buf);
521
522	if (xfs_ipincount(ip) > 0 || xfs_buf_ispinned(bp) ||
523	    (ip->i_flags & XFS_ISTALE))
524		return XFS_ITEM_PINNED;
525
526	if (xfs_iflags_test(ip, XFS_IFLUSHING))
527		return XFS_ITEM_FLUSHING;
528
529	if (!xfs_buf_trylock(bp))
530		return XFS_ITEM_LOCKED;
531
532	spin_unlock(&lip->li_ailp->ail_lock);
533
534	/*
535	 * We need to hold a reference for flushing the cluster buffer as it may
536	 * fail the buffer without IO submission. In which case, we better get a
537	 * reference for that completion because otherwise we don't get a
538	 * reference for IO until we queue the buffer for delwri submission.
539	 */
540	xfs_buf_hold(bp);
541	error = xfs_iflush_cluster(bp);
542	if (!error) {
543		if (!xfs_buf_delwri_queue(bp, buffer_list))
544			rval = XFS_ITEM_FLUSHING;
545		xfs_buf_relse(bp);
546	} else {
547		/*
548		 * Release the buffer if we were unable to flush anything. On
549		 * any other error, the buffer has already been released.
550		 */
551		if (error == -EAGAIN)
552			xfs_buf_relse(bp);
553		rval = XFS_ITEM_LOCKED;
554	}
555
556	spin_lock(&lip->li_ailp->ail_lock);
557	return rval;
558}
559
560/*
561 * Unlock the inode associated with the inode log item.
562 */
563STATIC void
564xfs_inode_item_release(
565	struct xfs_log_item	*lip)
566{
567	struct xfs_inode_log_item *iip = INODE_ITEM(lip);
568	struct xfs_inode	*ip = iip->ili_inode;
569	unsigned short		lock_flags;
570
571	ASSERT(ip->i_itemp != NULL);
572	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
573
574	lock_flags = iip->ili_lock_flags;
575	iip->ili_lock_flags = 0;
576	if (lock_flags)
577		xfs_iunlock(ip, lock_flags);
578}
579
580/*
581 * This is called to find out where the oldest active copy of the inode log
582 * item in the on disk log resides now that the last log write of it completed
583 * at the given lsn.  Since we always re-log all dirty data in an inode, the
584 * latest copy in the on disk log is the only one that matters.  Therefore,
585 * simply return the given lsn.
586 *
587 * If the inode has been marked stale because the cluster is being freed, we
588 * don't want to (re-)insert this inode into the AIL. There is a race condition
589 * where the cluster buffer may be unpinned before the inode is inserted into
590 * the AIL during transaction committed processing. If the buffer is unpinned
591 * before the inode item has been committed and inserted, then it is possible
592 * for the buffer to be written and IO completes before the inode is inserted
593 * into the AIL. In that case, we'd be inserting a clean, stale inode into the
594 * AIL which will never get removed. It will, however, get reclaimed which
595 * triggers an assert in xfs_inode_free() complaining about freein an inode
596 * still in the AIL.
597 *
598 * To avoid this, just unpin the inode directly and return a LSN of -1 so the
599 * transaction committed code knows that it does not need to do any further
600 * processing on the item.
601 */
602STATIC xfs_lsn_t
603xfs_inode_item_committed(
604	struct xfs_log_item	*lip,
605	xfs_lsn_t		lsn)
606{
607	struct xfs_inode_log_item *iip = INODE_ITEM(lip);
608	struct xfs_inode	*ip = iip->ili_inode;
609
610	if (xfs_iflags_test(ip, XFS_ISTALE)) {
611		xfs_inode_item_unpin(lip, 0);
612		return -1;
613	}
614	return lsn;
615}
616
617STATIC void
618xfs_inode_item_committing(
619	struct xfs_log_item	*lip,
620	xfs_csn_t		seq)
621{
622	INODE_ITEM(lip)->ili_commit_seq = seq;
623	return xfs_inode_item_release(lip);
624}
625
626static const struct xfs_item_ops xfs_inode_item_ops = {
627	.iop_size	= xfs_inode_item_size,
628	.iop_format	= xfs_inode_item_format,
629	.iop_pin	= xfs_inode_item_pin,
630	.iop_unpin	= xfs_inode_item_unpin,
631	.iop_release	= xfs_inode_item_release,
632	.iop_committed	= xfs_inode_item_committed,
633	.iop_push	= xfs_inode_item_push,
634	.iop_committing	= xfs_inode_item_committing,
635};
636
637
638/*
639 * Initialize the inode log item for a newly allocated (in-core) inode.
640 */
641void
642xfs_inode_item_init(
643	struct xfs_inode	*ip,
644	struct xfs_mount	*mp)
645{
646	struct xfs_inode_log_item *iip;
647
648	ASSERT(ip->i_itemp == NULL);
649	iip = ip->i_itemp = kmem_cache_zalloc(xfs_ili_zone,
650					      GFP_KERNEL | __GFP_NOFAIL);
651
652	iip->ili_inode = ip;
653	spin_lock_init(&iip->ili_lock);
654	xfs_log_item_init(mp, &iip->ili_item, XFS_LI_INODE,
655						&xfs_inode_item_ops);
656}
657
658/*
659 * Free the inode log item and any memory hanging off of it.
660 */
661void
662xfs_inode_item_destroy(
663	struct xfs_inode	*ip)
664{
665	struct xfs_inode_log_item *iip = ip->i_itemp;
666
667	ASSERT(iip->ili_item.li_buf == NULL);
668
669	ip->i_itemp = NULL;
670	kmem_free(iip->ili_item.li_lv_shadow);
671	kmem_cache_free(xfs_ili_zone, iip);
672}
673
674
675/*
676 * We only want to pull the item from the AIL if it is actually there
677 * and its location in the log has not changed since we started the
678 * flush.  Thus, we only bother if the inode's lsn has not changed.
679 */
680static void
681xfs_iflush_ail_updates(
682	struct xfs_ail		*ailp,
683	struct list_head	*list)
684{
685	struct xfs_log_item	*lip;
686	xfs_lsn_t		tail_lsn = 0;
687
688	/* this is an opencoded batch version of xfs_trans_ail_delete */
689	spin_lock(&ailp->ail_lock);
690	list_for_each_entry(lip, list, li_bio_list) {
691		xfs_lsn_t	lsn;
692
693		clear_bit(XFS_LI_FAILED, &lip->li_flags);
694		if (INODE_ITEM(lip)->ili_flush_lsn != lip->li_lsn)
695			continue;
696
697		lsn = xfs_ail_delete_one(ailp, lip);
698		if (!tail_lsn && lsn)
699			tail_lsn = lsn;
700	}
701	xfs_ail_update_finish(ailp, tail_lsn);
702}
703
704/*
705 * Walk the list of inodes that have completed their IOs. If they are clean
706 * remove them from the list and dissociate them from the buffer. Buffers that
707 * are still dirty remain linked to the buffer and on the list. Caller must
708 * handle them appropriately.
709 */
710static void
711xfs_iflush_finish(
712	struct xfs_buf		*bp,
713	struct list_head	*list)
714{
715	struct xfs_log_item	*lip, *n;
716
717	list_for_each_entry_safe(lip, n, list, li_bio_list) {
718		struct xfs_inode_log_item *iip = INODE_ITEM(lip);
719		bool	drop_buffer = false;
720
721		spin_lock(&iip->ili_lock);
722
723		/*
724		 * Remove the reference to the cluster buffer if the inode is
725		 * clean in memory and drop the buffer reference once we've
726		 * dropped the locks we hold.
727		 */
728		ASSERT(iip->ili_item.li_buf == bp);
729		if (!iip->ili_fields) {
730			iip->ili_item.li_buf = NULL;
731			list_del_init(&lip->li_bio_list);
732			drop_buffer = true;
733		}
734		iip->ili_last_fields = 0;
735		iip->ili_flush_lsn = 0;
736		spin_unlock(&iip->ili_lock);
737		xfs_iflags_clear(iip->ili_inode, XFS_IFLUSHING);
738		if (drop_buffer)
739			xfs_buf_rele(bp);
740	}
741}
742
743/*
744 * Inode buffer IO completion routine.  It is responsible for removing inodes
745 * attached to the buffer from the AIL if they have not been re-logged and
746 * completing the inode flush.
747 */
748void
749xfs_buf_inode_iodone(
750	struct xfs_buf		*bp)
751{
752	struct xfs_log_item	*lip, *n;
753	LIST_HEAD(flushed_inodes);
754	LIST_HEAD(ail_updates);
755
756	/*
757	 * Pull the attached inodes from the buffer one at a time and take the
758	 * appropriate action on them.
759	 */
760	list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) {
761		struct xfs_inode_log_item *iip = INODE_ITEM(lip);
762
763		if (xfs_iflags_test(iip->ili_inode, XFS_ISTALE)) {
764			xfs_iflush_abort(iip->ili_inode);
765			continue;
766		}
767		if (!iip->ili_last_fields)
768			continue;
769
770		/* Do an unlocked check for needing the AIL lock. */
771		if (iip->ili_flush_lsn == lip->li_lsn ||
772		    test_bit(XFS_LI_FAILED, &lip->li_flags))
773			list_move_tail(&lip->li_bio_list, &ail_updates);
774		else
775			list_move_tail(&lip->li_bio_list, &flushed_inodes);
776	}
777
778	if (!list_empty(&ail_updates)) {
779		xfs_iflush_ail_updates(bp->b_mount->m_ail, &ail_updates);
780		list_splice_tail(&ail_updates, &flushed_inodes);
781	}
782
783	xfs_iflush_finish(bp, &flushed_inodes);
784	if (!list_empty(&flushed_inodes))
785		list_splice_tail(&flushed_inodes, &bp->b_li_list);
786}
787
788void
789xfs_buf_inode_io_fail(
790	struct xfs_buf		*bp)
791{
792	struct xfs_log_item	*lip;
793
794	list_for_each_entry(lip, &bp->b_li_list, li_bio_list)
795		set_bit(XFS_LI_FAILED, &lip->li_flags);
796}
797
798/*
799 * This is the inode flushing abort routine.  It is called when
800 * the filesystem is shutting down to clean up the inode state.  It is
801 * responsible for removing the inode item from the AIL if it has not been
802 * re-logged and clearing the inode's flush state.
803 */
804void
805xfs_iflush_abort(
806	struct xfs_inode	*ip)
807{
808	struct xfs_inode_log_item *iip = ip->i_itemp;
809	struct xfs_buf		*bp = NULL;
810
811	if (iip) {
812		/*
813		 * Clear the failed bit before removing the item from the AIL so
814		 * xfs_trans_ail_delete() doesn't try to clear and release the
815		 * buffer attached to the log item before we are done with it.
816		 */
817		clear_bit(XFS_LI_FAILED, &iip->ili_item.li_flags);
818		xfs_trans_ail_delete(&iip->ili_item, 0);
819
820		/*
821		 * Clear the inode logging fields so no more flushes are
822		 * attempted.
823		 */
824		spin_lock(&iip->ili_lock);
825		iip->ili_last_fields = 0;
826		iip->ili_fields = 0;
827		iip->ili_fsync_fields = 0;
828		iip->ili_flush_lsn = 0;
829		bp = iip->ili_item.li_buf;
830		iip->ili_item.li_buf = NULL;
831		list_del_init(&iip->ili_item.li_bio_list);
832		spin_unlock(&iip->ili_lock);
833	}
834	xfs_iflags_clear(ip, XFS_IFLUSHING);
835	if (bp)
836		xfs_buf_rele(bp);
837}
838
839/*
840 * convert an xfs_inode_log_format struct from the old 32 bit version
841 * (which can have different field alignments) to the native 64 bit version
842 */
843int
844xfs_inode_item_format_convert(
845	struct xfs_log_iovec		*buf,
846	struct xfs_inode_log_format	*in_f)
847{
848	struct xfs_inode_log_format_32	*in_f32 = buf->i_addr;
849
850	if (buf->i_len != sizeof(*in_f32)) {
851		XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
852		return -EFSCORRUPTED;
853	}
854
855	in_f->ilf_type = in_f32->ilf_type;
856	in_f->ilf_size = in_f32->ilf_size;
857	in_f->ilf_fields = in_f32->ilf_fields;
858	in_f->ilf_asize = in_f32->ilf_asize;
859	in_f->ilf_dsize = in_f32->ilf_dsize;
860	in_f->ilf_ino = in_f32->ilf_ino;
861	memcpy(&in_f->ilf_u, &in_f32->ilf_u, sizeof(in_f->ilf_u));
862	in_f->ilf_blkno = in_f32->ilf_blkno;
863	in_f->ilf_len = in_f32->ilf_len;
864	in_f->ilf_boffset = in_f32->ilf_boffset;
865	return 0;
866}
867