1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2020-2022, Red Hat, Inc.
4 * All Rights Reserved.
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_mount.h"
13#include "xfs_inode.h"
14#include "xfs_trans.h"
15#include "xfs_trans_priv.h"
16#include "xfs_ag.h"
17#include "xfs_iunlink_item.h"
18#include "xfs_trace.h"
19#include "xfs_error.h"
20
21struct kmem_cache	*xfs_iunlink_cache;
22
23static inline struct xfs_iunlink_item *IUL_ITEM(struct xfs_log_item *lip)
24{
25	return container_of(lip, struct xfs_iunlink_item, item);
26}
27
28static void
29xfs_iunlink_item_release(
30	struct xfs_log_item	*lip)
31{
32	struct xfs_iunlink_item	*iup = IUL_ITEM(lip);
33
34	xfs_perag_put(iup->pag);
35	kmem_cache_free(xfs_iunlink_cache, IUL_ITEM(lip));
36}
37
38
39static uint64_t
40xfs_iunlink_item_sort(
41	struct xfs_log_item	*lip)
42{
43	return IUL_ITEM(lip)->ip->i_ino;
44}
45
46/*
47 * Look up the inode cluster buffer and log the on-disk unlinked inode change
48 * we need to make.
49 */
50static int
51xfs_iunlink_log_dinode(
52	struct xfs_trans	*tp,
53	struct xfs_iunlink_item	*iup)
54{
55	struct xfs_mount	*mp = tp->t_mountp;
56	struct xfs_inode	*ip = iup->ip;
57	struct xfs_dinode	*dip;
58	struct xfs_buf		*ibp;
59	int			offset;
60	int			error;
61
62	error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &ibp);
63	if (error)
64		return error;
65	/*
66	 * Don't log the unlinked field on stale buffers as this may be the
67	 * transaction that frees the inode cluster and relogging the buffer
68	 * here will incorrectly remove the stale state.
69	 */
70	if (ibp->b_flags & XBF_STALE)
71		goto out;
72
73	dip = xfs_buf_offset(ibp, ip->i_imap.im_boffset);
74
75	/* Make sure the old pointer isn't garbage. */
76	if (be32_to_cpu(dip->di_next_unlinked) != iup->old_agino) {
77		xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, dip,
78				sizeof(*dip), __this_address);
79		error = -EFSCORRUPTED;
80		goto out;
81	}
82
83	trace_xfs_iunlink_update_dinode(mp, iup->pag->pag_agno,
84			XFS_INO_TO_AGINO(mp, ip->i_ino),
85			be32_to_cpu(dip->di_next_unlinked), iup->next_agino);
86
87	dip->di_next_unlinked = cpu_to_be32(iup->next_agino);
88	offset = ip->i_imap.im_boffset +
89			offsetof(struct xfs_dinode, di_next_unlinked);
90
91	xfs_dinode_calc_crc(mp, dip);
92	xfs_trans_inode_buf(tp, ibp);
93	xfs_trans_log_buf(tp, ibp, offset, offset + sizeof(xfs_agino_t) - 1);
94	return 0;
95out:
96	xfs_trans_brelse(tp, ibp);
97	return error;
98}
99
100/*
101 * On precommit, we grab the inode cluster buffer for the inode number we were
102 * passed, then update the next unlinked field for that inode in the buffer and
103 * log the buffer. This ensures that the inode cluster buffer was logged in the
104 * correct order w.r.t. other inode cluster buffers. We can then remove the
105 * iunlink item from the transaction and release it as it is has now served it's
106 * purpose.
107 */
108static int
109xfs_iunlink_item_precommit(
110	struct xfs_trans	*tp,
111	struct xfs_log_item	*lip)
112{
113	struct xfs_iunlink_item	*iup = IUL_ITEM(lip);
114	int			error;
115
116	error = xfs_iunlink_log_dinode(tp, iup);
117	list_del(&lip->li_trans);
118	xfs_iunlink_item_release(lip);
119	return error;
120}
121
122static const struct xfs_item_ops xfs_iunlink_item_ops = {
123	.iop_release	= xfs_iunlink_item_release,
124	.iop_sort	= xfs_iunlink_item_sort,
125	.iop_precommit	= xfs_iunlink_item_precommit,
126};
127
128
129/*
130 * Initialize the inode log item for a newly allocated (in-core) inode.
131 *
132 * Inode extents can only reside within an AG. Hence specify the starting
133 * block for the inode chunk by offset within an AG as well as the
134 * length of the allocated extent.
135 *
136 * This joins the item to the transaction and marks it dirty so
137 * that we don't need a separate call to do this, nor does the
138 * caller need to know anything about the iunlink item.
139 */
140int
141xfs_iunlink_log_inode(
142	struct xfs_trans	*tp,
143	struct xfs_inode	*ip,
144	struct xfs_perag	*pag,
145	xfs_agino_t		next_agino)
146{
147	struct xfs_mount	*mp = tp->t_mountp;
148	struct xfs_iunlink_item	*iup;
149
150	ASSERT(xfs_verify_agino_or_null(pag, next_agino));
151	ASSERT(xfs_verify_agino_or_null(pag, ip->i_next_unlinked));
152
153	/*
154	 * Since we're updating a linked list, we should never find that the
155	 * current pointer is the same as the new value, unless we're
156	 * terminating the list.
157	 */
158	if (ip->i_next_unlinked == next_agino) {
159		if (next_agino != NULLAGINO)
160			return -EFSCORRUPTED;
161		return 0;
162	}
163
164	iup = kmem_cache_zalloc(xfs_iunlink_cache, GFP_KERNEL | __GFP_NOFAIL);
165	xfs_log_item_init(mp, &iup->item, XFS_LI_IUNLINK,
166			  &xfs_iunlink_item_ops);
167
168	iup->ip = ip;
169	iup->next_agino = next_agino;
170	iup->old_agino = ip->i_next_unlinked;
171	iup->pag = xfs_perag_hold(pag);
172
173	xfs_trans_add_item(tp, &iup->item);
174	tp->t_flags |= XFS_TRANS_DIRTY;
175	set_bit(XFS_LI_DIRTY, &iup->item.li_flags);
176	return 0;
177}
178
179