xref: /kernel/linux/linux-6.6/fs/xfs/libxfs/xfs_defer.c (revision 62306a36)
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2016 Oracle.  All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_mount.h"
13#include "xfs_defer.h"
14#include "xfs_trans.h"
15#include "xfs_buf_item.h"
16#include "xfs_inode.h"
17#include "xfs_inode_item.h"
18#include "xfs_trace.h"
19#include "xfs_icache.h"
20#include "xfs_log.h"
21#include "xfs_rmap.h"
22#include "xfs_refcount.h"
23#include "xfs_bmap.h"
24#include "xfs_alloc.h"
25#include "xfs_buf.h"
26#include "xfs_da_format.h"
27#include "xfs_da_btree.h"
28#include "xfs_attr.h"
29
30static struct kmem_cache	*xfs_defer_pending_cache;
31
32/*
33 * Deferred Operations in XFS
34 *
35 * Due to the way locking rules work in XFS, certain transactions (block
36 * mapping and unmapping, typically) have permanent reservations so that
37 * we can roll the transaction to adhere to AG locking order rules and
38 * to unlock buffers between metadata updates.  Prior to rmap/reflink,
39 * the mapping code had a mechanism to perform these deferrals for
40 * extents that were going to be freed; this code makes that facility
41 * more generic.
42 *
43 * When adding the reverse mapping and reflink features, it became
44 * necessary to perform complex remapping multi-transactions to comply
45 * with AG locking order rules, and to be able to spread a single
46 * refcount update operation (an operation on an n-block extent can
47 * update as many as n records!) among multiple transactions.  XFS can
48 * roll a transaction to facilitate this, but using this facility
49 * requires us to log "intent" items in case log recovery needs to
50 * redo the operation, and to log "done" items to indicate that redo
51 * is not necessary.
52 *
53 * Deferred work is tracked in xfs_defer_pending items.  Each pending
54 * item tracks one type of deferred work.  Incoming work items (which
55 * have not yet had an intent logged) are attached to a pending item
56 * on the dop_intake list, where they wait for the caller to finish
57 * the deferred operations.
58 *
59 * Finishing a set of deferred operations is an involved process.  To
60 * start, we define "rolling a deferred-op transaction" as follows:
61 *
62 * > For each xfs_defer_pending item on the dop_intake list,
63 *   - Sort the work items in AG order.  XFS locking
64 *     order rules require us to lock buffers in AG order.
65 *   - Create a log intent item for that type.
66 *   - Attach it to the pending item.
67 *   - Move the pending item from the dop_intake list to the
68 *     dop_pending list.
69 * > Roll the transaction.
70 *
71 * NOTE: To avoid exceeding the transaction reservation, we limit the
72 * number of items that we attach to a given xfs_defer_pending.
73 *
74 * The actual finishing process looks like this:
75 *
76 * > For each xfs_defer_pending in the dop_pending list,
77 *   - Roll the deferred-op transaction as above.
78 *   - Create a log done item for that type, and attach it to the
79 *     log intent item.
80 *   - For each work item attached to the log intent item,
81 *     * Perform the described action.
82 *     * Attach the work item to the log done item.
83 *     * If the result of doing the work was -EAGAIN, ->finish work
84 *       wants a new transaction.  See the "Requesting a Fresh
85 *       Transaction while Finishing Deferred Work" section below for
86 *       details.
87 *
88 * The key here is that we must log an intent item for all pending
89 * work items every time we roll the transaction, and that we must log
90 * a done item as soon as the work is completed.  With this mechanism
91 * we can perform complex remapping operations, chaining intent items
92 * as needed.
93 *
94 * Requesting a Fresh Transaction while Finishing Deferred Work
95 *
96 * If ->finish_item decides that it needs a fresh transaction to
97 * finish the work, it must ask its caller (xfs_defer_finish) for a
98 * continuation.  The most likely cause of this circumstance are the
99 * refcount adjust functions deciding that they've logged enough items
100 * to be at risk of exceeding the transaction reservation.
101 *
102 * To get a fresh transaction, we want to log the existing log done
103 * item to prevent the log intent item from replaying, immediately log
104 * a new log intent item with the unfinished work items, roll the
105 * transaction, and re-call ->finish_item wherever it left off.  The
106 * log done item and the new log intent item must be in the same
107 * transaction or atomicity cannot be guaranteed; defer_finish ensures
108 * that this happens.
109 *
110 * This requires some coordination between ->finish_item and
111 * defer_finish.  Upon deciding to request a new transaction,
112 * ->finish_item should update the current work item to reflect the
113 * unfinished work.  Next, it should reset the log done item's list
114 * count to the number of items finished, and return -EAGAIN.
115 * defer_finish sees the -EAGAIN, logs the new log intent item
116 * with the remaining work items, and leaves the xfs_defer_pending
117 * item at the head of the dop_work queue.  Then it rolls the
118 * transaction and picks up processing where it left off.  It is
119 * required that ->finish_item must be careful to leave enough
120 * transaction reservation to fit the new log intent item.
121 *
122 * This is an example of remapping the extent (E, E+B) into file X at
123 * offset A and dealing with the extent (C, C+B) already being mapped
124 * there:
125 * +-------------------------------------------------+
126 * | Unmap file X startblock C offset A length B     | t0
127 * | Intent to reduce refcount for extent (C, B)     |
128 * | Intent to remove rmap (X, C, A, B)              |
129 * | Intent to free extent (D, 1) (bmbt block)       |
130 * | Intent to map (X, A, B) at startblock E         |
131 * +-------------------------------------------------+
132 * | Map file X startblock E offset A length B       | t1
133 * | Done mapping (X, E, A, B)                       |
134 * | Intent to increase refcount for extent (E, B)   |
135 * | Intent to add rmap (X, E, A, B)                 |
136 * +-------------------------------------------------+
137 * | Reduce refcount for extent (C, B)               | t2
138 * | Done reducing refcount for extent (C, 9)        |
139 * | Intent to reduce refcount for extent (C+9, B-9) |
140 * | (ran out of space after 9 refcount updates)     |
141 * +-------------------------------------------------+
142 * | Reduce refcount for extent (C+9, B+9)           | t3
143 * | Done reducing refcount for extent (C+9, B-9)    |
144 * | Increase refcount for extent (E, B)             |
145 * | Done increasing refcount for extent (E, B)      |
146 * | Intent to free extent (C, B)                    |
147 * | Intent to free extent (F, 1) (refcountbt block) |
148 * | Intent to remove rmap (F, 1, REFC)              |
149 * +-------------------------------------------------+
150 * | Remove rmap (X, C, A, B)                        | t4
151 * | Done removing rmap (X, C, A, B)                 |
152 * | Add rmap (X, E, A, B)                           |
153 * | Done adding rmap (X, E, A, B)                   |
154 * | Remove rmap (F, 1, REFC)                        |
155 * | Done removing rmap (F, 1, REFC)                 |
156 * +-------------------------------------------------+
157 * | Free extent (C, B)                              | t5
158 * | Done freeing extent (C, B)                      |
159 * | Free extent (D, 1)                              |
160 * | Done freeing extent (D, 1)                      |
161 * | Free extent (F, 1)                              |
162 * | Done freeing extent (F, 1)                      |
163 * +-------------------------------------------------+
164 *
165 * If we should crash before t2 commits, log recovery replays
166 * the following intent items:
167 *
168 * - Intent to reduce refcount for extent (C, B)
169 * - Intent to remove rmap (X, C, A, B)
170 * - Intent to free extent (D, 1) (bmbt block)
171 * - Intent to increase refcount for extent (E, B)
172 * - Intent to add rmap (X, E, A, B)
173 *
174 * In the process of recovering, it should also generate and take care
175 * of these intent items:
176 *
177 * - Intent to free extent (C, B)
178 * - Intent to free extent (F, 1) (refcountbt block)
179 * - Intent to remove rmap (F, 1, REFC)
180 *
181 * Note that the continuation requested between t2 and t3 is likely to
182 * reoccur.
183 */
184
185static const struct xfs_defer_op_type *defer_op_types[] = {
186	[XFS_DEFER_OPS_TYPE_BMAP]	= &xfs_bmap_update_defer_type,
187	[XFS_DEFER_OPS_TYPE_REFCOUNT]	= &xfs_refcount_update_defer_type,
188	[XFS_DEFER_OPS_TYPE_RMAP]	= &xfs_rmap_update_defer_type,
189	[XFS_DEFER_OPS_TYPE_FREE]	= &xfs_extent_free_defer_type,
190	[XFS_DEFER_OPS_TYPE_AGFL_FREE]	= &xfs_agfl_free_defer_type,
191	[XFS_DEFER_OPS_TYPE_ATTR]	= &xfs_attr_defer_type,
192};
193
194/*
195 * Ensure there's a log intent item associated with this deferred work item if
196 * the operation must be restarted on crash.  Returns 1 if there's a log item;
197 * 0 if there isn't; or a negative errno.
198 */
199static int
200xfs_defer_create_intent(
201	struct xfs_trans		*tp,
202	struct xfs_defer_pending	*dfp,
203	bool				sort)
204{
205	const struct xfs_defer_op_type	*ops = defer_op_types[dfp->dfp_type];
206	struct xfs_log_item		*lip;
207
208	if (dfp->dfp_intent)
209		return 1;
210
211	lip = ops->create_intent(tp, &dfp->dfp_work, dfp->dfp_count, sort);
212	if (!lip)
213		return 0;
214	if (IS_ERR(lip))
215		return PTR_ERR(lip);
216
217	dfp->dfp_intent = lip;
218	return 1;
219}
220
221/*
222 * For each pending item in the intake list, log its intent item and the
223 * associated extents, then add the entire intake list to the end of
224 * the pending list.
225 *
226 * Returns 1 if at least one log item was associated with the deferred work;
227 * 0 if there are no log items; or a negative errno.
228 */
229static int
230xfs_defer_create_intents(
231	struct xfs_trans		*tp)
232{
233	struct xfs_defer_pending	*dfp;
234	int				ret = 0;
235
236	list_for_each_entry(dfp, &tp->t_dfops, dfp_list) {
237		int			ret2;
238
239		trace_xfs_defer_create_intent(tp->t_mountp, dfp);
240		ret2 = xfs_defer_create_intent(tp, dfp, true);
241		if (ret2 < 0)
242			return ret2;
243		ret |= ret2;
244	}
245	return ret;
246}
247
248STATIC void
249xfs_defer_pending_abort(
250	struct xfs_mount		*mp,
251	struct list_head		*dop_list)
252{
253	struct xfs_defer_pending	*dfp;
254	const struct xfs_defer_op_type	*ops;
255
256	/* Abort intent items that don't have a done item. */
257	list_for_each_entry(dfp, dop_list, dfp_list) {
258		ops = defer_op_types[dfp->dfp_type];
259		trace_xfs_defer_pending_abort(mp, dfp);
260		if (dfp->dfp_intent && !dfp->dfp_done) {
261			ops->abort_intent(dfp->dfp_intent);
262			dfp->dfp_intent = NULL;
263		}
264	}
265}
266
267/* Abort all the intents that were committed. */
268STATIC void
269xfs_defer_trans_abort(
270	struct xfs_trans		*tp,
271	struct list_head		*dop_pending)
272{
273	trace_xfs_defer_trans_abort(tp, _RET_IP_);
274	xfs_defer_pending_abort(tp->t_mountp, dop_pending);
275}
276
277/*
278 * Capture resources that the caller said not to release ("held") when the
279 * transaction commits.  Caller is responsible for zero-initializing @dres.
280 */
281static int
282xfs_defer_save_resources(
283	struct xfs_defer_resources	*dres,
284	struct xfs_trans		*tp)
285{
286	struct xfs_buf_log_item		*bli;
287	struct xfs_inode_log_item	*ili;
288	struct xfs_log_item		*lip;
289
290	BUILD_BUG_ON(NBBY * sizeof(dres->dr_ordered) < XFS_DEFER_OPS_NR_BUFS);
291
292	list_for_each_entry(lip, &tp->t_items, li_trans) {
293		switch (lip->li_type) {
294		case XFS_LI_BUF:
295			bli = container_of(lip, struct xfs_buf_log_item,
296					   bli_item);
297			if (bli->bli_flags & XFS_BLI_HOLD) {
298				if (dres->dr_bufs >= XFS_DEFER_OPS_NR_BUFS) {
299					ASSERT(0);
300					return -EFSCORRUPTED;
301				}
302				if (bli->bli_flags & XFS_BLI_ORDERED)
303					dres->dr_ordered |=
304							(1U << dres->dr_bufs);
305				else
306					xfs_trans_dirty_buf(tp, bli->bli_buf);
307				dres->dr_bp[dres->dr_bufs++] = bli->bli_buf;
308			}
309			break;
310		case XFS_LI_INODE:
311			ili = container_of(lip, struct xfs_inode_log_item,
312					   ili_item);
313			if (ili->ili_lock_flags == 0) {
314				if (dres->dr_inos >= XFS_DEFER_OPS_NR_INODES) {
315					ASSERT(0);
316					return -EFSCORRUPTED;
317				}
318				xfs_trans_log_inode(tp, ili->ili_inode,
319						    XFS_ILOG_CORE);
320				dres->dr_ip[dres->dr_inos++] = ili->ili_inode;
321			}
322			break;
323		default:
324			break;
325		}
326	}
327
328	return 0;
329}
330
331/* Attach the held resources to the transaction. */
332static void
333xfs_defer_restore_resources(
334	struct xfs_trans		*tp,
335	struct xfs_defer_resources	*dres)
336{
337	unsigned short			i;
338
339	/* Rejoin the joined inodes. */
340	for (i = 0; i < dres->dr_inos; i++)
341		xfs_trans_ijoin(tp, dres->dr_ip[i], 0);
342
343	/* Rejoin the buffers and dirty them so the log moves forward. */
344	for (i = 0; i < dres->dr_bufs; i++) {
345		xfs_trans_bjoin(tp, dres->dr_bp[i]);
346		if (dres->dr_ordered & (1U << i))
347			xfs_trans_ordered_buf(tp, dres->dr_bp[i]);
348		xfs_trans_bhold(tp, dres->dr_bp[i]);
349	}
350}
351
352/* Roll a transaction so we can do some deferred op processing. */
353STATIC int
354xfs_defer_trans_roll(
355	struct xfs_trans		**tpp)
356{
357	struct xfs_defer_resources	dres = { };
358	int				error;
359
360	error = xfs_defer_save_resources(&dres, *tpp);
361	if (error)
362		return error;
363
364	trace_xfs_defer_trans_roll(*tpp, _RET_IP_);
365
366	/*
367	 * Roll the transaction.  Rolling always given a new transaction (even
368	 * if committing the old one fails!) to hand back to the caller, so we
369	 * join the held resources to the new transaction so that we always
370	 * return with the held resources joined to @tpp, no matter what
371	 * happened.
372	 */
373	error = xfs_trans_roll(tpp);
374
375	xfs_defer_restore_resources(*tpp, &dres);
376
377	if (error)
378		trace_xfs_defer_trans_roll_error(*tpp, error);
379	return error;
380}
381
382/*
383 * Free up any items left in the list.
384 */
385static void
386xfs_defer_cancel_list(
387	struct xfs_mount		*mp,
388	struct list_head		*dop_list)
389{
390	struct xfs_defer_pending	*dfp;
391	struct xfs_defer_pending	*pli;
392	struct list_head		*pwi;
393	struct list_head		*n;
394	const struct xfs_defer_op_type	*ops;
395
396	/*
397	 * Free the pending items.  Caller should already have arranged
398	 * for the intent items to be released.
399	 */
400	list_for_each_entry_safe(dfp, pli, dop_list, dfp_list) {
401		ops = defer_op_types[dfp->dfp_type];
402		trace_xfs_defer_cancel_list(mp, dfp);
403		list_del(&dfp->dfp_list);
404		list_for_each_safe(pwi, n, &dfp->dfp_work) {
405			list_del(pwi);
406			dfp->dfp_count--;
407			trace_xfs_defer_cancel_item(mp, dfp, pwi);
408			ops->cancel_item(pwi);
409		}
410		ASSERT(dfp->dfp_count == 0);
411		kmem_cache_free(xfs_defer_pending_cache, dfp);
412	}
413}
414
415/*
416 * Prevent a log intent item from pinning the tail of the log by logging a
417 * done item to release the intent item; and then log a new intent item.
418 * The caller should provide a fresh transaction and roll it after we're done.
419 */
420static int
421xfs_defer_relog(
422	struct xfs_trans		**tpp,
423	struct list_head		*dfops)
424{
425	struct xlog			*log = (*tpp)->t_mountp->m_log;
426	struct xfs_defer_pending	*dfp;
427	xfs_lsn_t			threshold_lsn = NULLCOMMITLSN;
428
429
430	ASSERT((*tpp)->t_flags & XFS_TRANS_PERM_LOG_RES);
431
432	list_for_each_entry(dfp, dfops, dfp_list) {
433		/*
434		 * If the log intent item for this deferred op is not a part of
435		 * the current log checkpoint, relog the intent item to keep
436		 * the log tail moving forward.  We're ok with this being racy
437		 * because an incorrect decision means we'll be a little slower
438		 * at pushing the tail.
439		 */
440		if (dfp->dfp_intent == NULL ||
441		    xfs_log_item_in_current_chkpt(dfp->dfp_intent))
442			continue;
443
444		/*
445		 * Figure out where we need the tail to be in order to maintain
446		 * the minimum required free space in the log.  Only sample
447		 * the log threshold once per call.
448		 */
449		if (threshold_lsn == NULLCOMMITLSN) {
450			threshold_lsn = xlog_grant_push_threshold(log, 0);
451			if (threshold_lsn == NULLCOMMITLSN)
452				break;
453		}
454		if (XFS_LSN_CMP(dfp->dfp_intent->li_lsn, threshold_lsn) >= 0)
455			continue;
456
457		trace_xfs_defer_relog_intent((*tpp)->t_mountp, dfp);
458		XFS_STATS_INC((*tpp)->t_mountp, defer_relog);
459		dfp->dfp_intent = xfs_trans_item_relog(dfp->dfp_intent, *tpp);
460	}
461
462	if ((*tpp)->t_flags & XFS_TRANS_DIRTY)
463		return xfs_defer_trans_roll(tpp);
464	return 0;
465}
466
467/*
468 * Log an intent-done item for the first pending intent, and finish the work
469 * items.
470 */
471static int
472xfs_defer_finish_one(
473	struct xfs_trans		*tp,
474	struct xfs_defer_pending	*dfp)
475{
476	const struct xfs_defer_op_type	*ops = defer_op_types[dfp->dfp_type];
477	struct xfs_btree_cur		*state = NULL;
478	struct list_head		*li, *n;
479	int				error;
480
481	trace_xfs_defer_pending_finish(tp->t_mountp, dfp);
482
483	dfp->dfp_done = ops->create_done(tp, dfp->dfp_intent, dfp->dfp_count);
484	list_for_each_safe(li, n, &dfp->dfp_work) {
485		list_del(li);
486		dfp->dfp_count--;
487		trace_xfs_defer_finish_item(tp->t_mountp, dfp, li);
488		error = ops->finish_item(tp, dfp->dfp_done, li, &state);
489		if (error == -EAGAIN) {
490			int		ret;
491
492			/*
493			 * Caller wants a fresh transaction; put the work item
494			 * back on the list and log a new log intent item to
495			 * replace the old one.  See "Requesting a Fresh
496			 * Transaction while Finishing Deferred Work" above.
497			 */
498			list_add(li, &dfp->dfp_work);
499			dfp->dfp_count++;
500			dfp->dfp_done = NULL;
501			dfp->dfp_intent = NULL;
502			ret = xfs_defer_create_intent(tp, dfp, false);
503			if (ret < 0)
504				error = ret;
505		}
506
507		if (error)
508			goto out;
509	}
510
511	/* Done with the dfp, free it. */
512	list_del(&dfp->dfp_list);
513	kmem_cache_free(xfs_defer_pending_cache, dfp);
514out:
515	if (ops->finish_cleanup)
516		ops->finish_cleanup(tp, state, error);
517	return error;
518}
519
520/*
521 * Finish all the pending work.  This involves logging intent items for
522 * any work items that wandered in since the last transaction roll (if
523 * one has even happened), rolling the transaction, and finishing the
524 * work items in the first item on the logged-and-pending list.
525 *
526 * If an inode is provided, relog it to the new transaction.
527 */
528int
529xfs_defer_finish_noroll(
530	struct xfs_trans		**tp)
531{
532	struct xfs_defer_pending	*dfp = NULL;
533	int				error = 0;
534	LIST_HEAD(dop_pending);
535
536	ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
537
538	trace_xfs_defer_finish(*tp, _RET_IP_);
539
540	/* Until we run out of pending work to finish... */
541	while (!list_empty(&dop_pending) || !list_empty(&(*tp)->t_dfops)) {
542		/*
543		 * Deferred items that are created in the process of finishing
544		 * other deferred work items should be queued at the head of
545		 * the pending list, which puts them ahead of the deferred work
546		 * that was created by the caller.  This keeps the number of
547		 * pending work items to a minimum, which decreases the amount
548		 * of time that any one intent item can stick around in memory,
549		 * pinning the log tail.
550		 */
551		int has_intents = xfs_defer_create_intents(*tp);
552
553		list_splice_init(&(*tp)->t_dfops, &dop_pending);
554
555		if (has_intents < 0) {
556			error = has_intents;
557			goto out_shutdown;
558		}
559		if (has_intents || dfp) {
560			error = xfs_defer_trans_roll(tp);
561			if (error)
562				goto out_shutdown;
563
564			/* Relog intent items to keep the log moving. */
565			error = xfs_defer_relog(tp, &dop_pending);
566			if (error)
567				goto out_shutdown;
568		}
569
570		dfp = list_first_entry(&dop_pending, struct xfs_defer_pending,
571				       dfp_list);
572		error = xfs_defer_finish_one(*tp, dfp);
573		if (error && error != -EAGAIN)
574			goto out_shutdown;
575	}
576
577	trace_xfs_defer_finish_done(*tp, _RET_IP_);
578	return 0;
579
580out_shutdown:
581	xfs_defer_trans_abort(*tp, &dop_pending);
582	xfs_force_shutdown((*tp)->t_mountp, SHUTDOWN_CORRUPT_INCORE);
583	trace_xfs_defer_finish_error(*tp, error);
584	xfs_defer_cancel_list((*tp)->t_mountp, &dop_pending);
585	xfs_defer_cancel(*tp);
586	return error;
587}
588
589int
590xfs_defer_finish(
591	struct xfs_trans	**tp)
592{
593	int			error;
594
595	/*
596	 * Finish and roll the transaction once more to avoid returning to the
597	 * caller with a dirty transaction.
598	 */
599	error = xfs_defer_finish_noroll(tp);
600	if (error)
601		return error;
602	if ((*tp)->t_flags & XFS_TRANS_DIRTY) {
603		error = xfs_defer_trans_roll(tp);
604		if (error) {
605			xfs_force_shutdown((*tp)->t_mountp,
606					   SHUTDOWN_CORRUPT_INCORE);
607			return error;
608		}
609	}
610
611	/* Reset LOWMODE now that we've finished all the dfops. */
612	ASSERT(list_empty(&(*tp)->t_dfops));
613	(*tp)->t_flags &= ~XFS_TRANS_LOWMODE;
614	return 0;
615}
616
617void
618xfs_defer_cancel(
619	struct xfs_trans	*tp)
620{
621	struct xfs_mount	*mp = tp->t_mountp;
622
623	trace_xfs_defer_cancel(tp, _RET_IP_);
624	xfs_defer_cancel_list(mp, &tp->t_dfops);
625}
626
627/* Add an item for later deferred processing. */
628void
629xfs_defer_add(
630	struct xfs_trans		*tp,
631	enum xfs_defer_ops_type		type,
632	struct list_head		*li)
633{
634	struct xfs_defer_pending	*dfp = NULL;
635	const struct xfs_defer_op_type	*ops = defer_op_types[type];
636
637	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
638	BUILD_BUG_ON(ARRAY_SIZE(defer_op_types) != XFS_DEFER_OPS_TYPE_MAX);
639
640	/*
641	 * Add the item to a pending item at the end of the intake list.
642	 * If the last pending item has the same type, reuse it.  Else,
643	 * create a new pending item at the end of the intake list.
644	 */
645	if (!list_empty(&tp->t_dfops)) {
646		dfp = list_last_entry(&tp->t_dfops,
647				struct xfs_defer_pending, dfp_list);
648		if (dfp->dfp_type != type ||
649		    (ops->max_items && dfp->dfp_count >= ops->max_items))
650			dfp = NULL;
651	}
652	if (!dfp) {
653		dfp = kmem_cache_zalloc(xfs_defer_pending_cache,
654				GFP_NOFS | __GFP_NOFAIL);
655		dfp->dfp_type = type;
656		dfp->dfp_intent = NULL;
657		dfp->dfp_done = NULL;
658		dfp->dfp_count = 0;
659		INIT_LIST_HEAD(&dfp->dfp_work);
660		list_add_tail(&dfp->dfp_list, &tp->t_dfops);
661	}
662
663	list_add_tail(li, &dfp->dfp_work);
664	trace_xfs_defer_add_item(tp->t_mountp, dfp, li);
665	dfp->dfp_count++;
666}
667
668/*
669 * Move deferred ops from one transaction to another and reset the source to
670 * initial state. This is primarily used to carry state forward across
671 * transaction rolls with pending dfops.
672 */
673void
674xfs_defer_move(
675	struct xfs_trans	*dtp,
676	struct xfs_trans	*stp)
677{
678	list_splice_init(&stp->t_dfops, &dtp->t_dfops);
679
680	/*
681	 * Low free space mode was historically controlled by a dfops field.
682	 * This meant that low mode state potentially carried across multiple
683	 * transaction rolls. Transfer low mode on a dfops move to preserve
684	 * that behavior.
685	 */
686	dtp->t_flags |= (stp->t_flags & XFS_TRANS_LOWMODE);
687	stp->t_flags &= ~XFS_TRANS_LOWMODE;
688}
689
690/*
691 * Prepare a chain of fresh deferred ops work items to be completed later.  Log
692 * recovery requires the ability to put off until later the actual finishing
693 * work so that it can process unfinished items recovered from the log in
694 * correct order.
695 *
696 * Create and log intent items for all the work that we're capturing so that we
697 * can be assured that the items will get replayed if the system goes down
698 * before log recovery gets a chance to finish the work it put off.  The entire
699 * deferred ops state is transferred to the capture structure and the
700 * transaction is then ready for the caller to commit it.  If there are no
701 * intent items to capture, this function returns NULL.
702 *
703 * If capture_ip is not NULL, the capture structure will obtain an extra
704 * reference to the inode.
705 */
706static struct xfs_defer_capture *
707xfs_defer_ops_capture(
708	struct xfs_trans		*tp)
709{
710	struct xfs_defer_capture	*dfc;
711	unsigned short			i;
712	int				error;
713
714	if (list_empty(&tp->t_dfops))
715		return NULL;
716
717	error = xfs_defer_create_intents(tp);
718	if (error < 0)
719		return ERR_PTR(error);
720
721	/* Create an object to capture the defer ops. */
722	dfc = kmem_zalloc(sizeof(*dfc), KM_NOFS);
723	INIT_LIST_HEAD(&dfc->dfc_list);
724	INIT_LIST_HEAD(&dfc->dfc_dfops);
725
726	/* Move the dfops chain and transaction state to the capture struct. */
727	list_splice_init(&tp->t_dfops, &dfc->dfc_dfops);
728	dfc->dfc_tpflags = tp->t_flags & XFS_TRANS_LOWMODE;
729	tp->t_flags &= ~XFS_TRANS_LOWMODE;
730
731	/* Capture the remaining block reservations along with the dfops. */
732	dfc->dfc_blkres = tp->t_blk_res - tp->t_blk_res_used;
733	dfc->dfc_rtxres = tp->t_rtx_res - tp->t_rtx_res_used;
734
735	/* Preserve the log reservation size. */
736	dfc->dfc_logres = tp->t_log_res;
737
738	error = xfs_defer_save_resources(&dfc->dfc_held, tp);
739	if (error) {
740		/*
741		 * Resource capture should never fail, but if it does, we
742		 * still have to shut down the log and release things
743		 * properly.
744		 */
745		xfs_force_shutdown(tp->t_mountp, SHUTDOWN_CORRUPT_INCORE);
746	}
747
748	/*
749	 * Grab extra references to the inodes and buffers because callers are
750	 * expected to release their held references after we commit the
751	 * transaction.
752	 */
753	for (i = 0; i < dfc->dfc_held.dr_inos; i++) {
754		ASSERT(xfs_isilocked(dfc->dfc_held.dr_ip[i], XFS_ILOCK_EXCL));
755		ihold(VFS_I(dfc->dfc_held.dr_ip[i]));
756	}
757
758	for (i = 0; i < dfc->dfc_held.dr_bufs; i++)
759		xfs_buf_hold(dfc->dfc_held.dr_bp[i]);
760
761	return dfc;
762}
763
764/* Release all resources that we used to capture deferred ops. */
765void
766xfs_defer_ops_capture_abort(
767	struct xfs_mount		*mp,
768	struct xfs_defer_capture	*dfc)
769{
770	unsigned short			i;
771
772	xfs_defer_pending_abort(mp, &dfc->dfc_dfops);
773	xfs_defer_cancel_list(mp, &dfc->dfc_dfops);
774
775	for (i = 0; i < dfc->dfc_held.dr_bufs; i++)
776		xfs_buf_relse(dfc->dfc_held.dr_bp[i]);
777
778	for (i = 0; i < dfc->dfc_held.dr_inos; i++)
779		xfs_irele(dfc->dfc_held.dr_ip[i]);
780
781	kmem_free(dfc);
782}
783
784/*
785 * Capture any deferred ops and commit the transaction.  This is the last step
786 * needed to finish a log intent item that we recovered from the log.  If any
787 * of the deferred ops operate on an inode, the caller must pass in that inode
788 * so that the reference can be transferred to the capture structure.  The
789 * caller must hold ILOCK_EXCL on the inode, and must unlock it before calling
790 * xfs_defer_ops_continue.
791 */
792int
793xfs_defer_ops_capture_and_commit(
794	struct xfs_trans		*tp,
795	struct list_head		*capture_list)
796{
797	struct xfs_mount		*mp = tp->t_mountp;
798	struct xfs_defer_capture	*dfc;
799	int				error;
800
801	/* If we don't capture anything, commit transaction and exit. */
802	dfc = xfs_defer_ops_capture(tp);
803	if (IS_ERR(dfc)) {
804		xfs_trans_cancel(tp);
805		return PTR_ERR(dfc);
806	}
807	if (!dfc)
808		return xfs_trans_commit(tp);
809
810	/* Commit the transaction and add the capture structure to the list. */
811	error = xfs_trans_commit(tp);
812	if (error) {
813		xfs_defer_ops_capture_abort(mp, dfc);
814		return error;
815	}
816
817	list_add_tail(&dfc->dfc_list, capture_list);
818	return 0;
819}
820
821/*
822 * Attach a chain of captured deferred ops to a new transaction and free the
823 * capture structure.  If an inode was captured, it will be passed back to the
824 * caller with ILOCK_EXCL held and joined to the transaction with lockflags==0.
825 * The caller now owns the inode reference.
826 */
827void
828xfs_defer_ops_continue(
829	struct xfs_defer_capture	*dfc,
830	struct xfs_trans		*tp,
831	struct xfs_defer_resources	*dres)
832{
833	unsigned int			i;
834
835	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
836	ASSERT(!(tp->t_flags & XFS_TRANS_DIRTY));
837
838	/* Lock the captured resources to the new transaction. */
839	if (dfc->dfc_held.dr_inos == 2)
840		xfs_lock_two_inodes(dfc->dfc_held.dr_ip[0], XFS_ILOCK_EXCL,
841				    dfc->dfc_held.dr_ip[1], XFS_ILOCK_EXCL);
842	else if (dfc->dfc_held.dr_inos == 1)
843		xfs_ilock(dfc->dfc_held.dr_ip[0], XFS_ILOCK_EXCL);
844
845	for (i = 0; i < dfc->dfc_held.dr_bufs; i++)
846		xfs_buf_lock(dfc->dfc_held.dr_bp[i]);
847
848	/* Join the captured resources to the new transaction. */
849	xfs_defer_restore_resources(tp, &dfc->dfc_held);
850	memcpy(dres, &dfc->dfc_held, sizeof(struct xfs_defer_resources));
851	dres->dr_bufs = 0;
852
853	/* Move captured dfops chain and state to the transaction. */
854	list_splice_init(&dfc->dfc_dfops, &tp->t_dfops);
855	tp->t_flags |= dfc->dfc_tpflags;
856
857	kmem_free(dfc);
858}
859
860/* Release the resources captured and continued during recovery. */
861void
862xfs_defer_resources_rele(
863	struct xfs_defer_resources	*dres)
864{
865	unsigned short			i;
866
867	for (i = 0; i < dres->dr_inos; i++) {
868		xfs_iunlock(dres->dr_ip[i], XFS_ILOCK_EXCL);
869		xfs_irele(dres->dr_ip[i]);
870		dres->dr_ip[i] = NULL;
871	}
872
873	for (i = 0; i < dres->dr_bufs; i++) {
874		xfs_buf_relse(dres->dr_bp[i]);
875		dres->dr_bp[i] = NULL;
876	}
877
878	dres->dr_inos = 0;
879	dres->dr_bufs = 0;
880	dres->dr_ordered = 0;
881}
882
883static inline int __init
884xfs_defer_init_cache(void)
885{
886	xfs_defer_pending_cache = kmem_cache_create("xfs_defer_pending",
887			sizeof(struct xfs_defer_pending),
888			0, 0, NULL);
889
890	return xfs_defer_pending_cache != NULL ? 0 : -ENOMEM;
891}
892
893static inline void
894xfs_defer_destroy_cache(void)
895{
896	kmem_cache_destroy(xfs_defer_pending_cache);
897	xfs_defer_pending_cache = NULL;
898}
899
900/* Set up caches for deferred work items. */
901int __init
902xfs_defer_init_item_caches(void)
903{
904	int				error;
905
906	error = xfs_defer_init_cache();
907	if (error)
908		return error;
909	error = xfs_rmap_intent_init_cache();
910	if (error)
911		goto err;
912	error = xfs_refcount_intent_init_cache();
913	if (error)
914		goto err;
915	error = xfs_bmap_intent_init_cache();
916	if (error)
917		goto err;
918	error = xfs_extfree_intent_init_cache();
919	if (error)
920		goto err;
921	error = xfs_attr_intent_init_cache();
922	if (error)
923		goto err;
924	return 0;
925err:
926	xfs_defer_destroy_item_caches();
927	return error;
928}
929
930/* Destroy all the deferred work item caches, if they've been allocated. */
931void
932xfs_defer_destroy_item_caches(void)
933{
934	xfs_attr_intent_destroy_cache();
935	xfs_extfree_intent_destroy_cache();
936	xfs_bmap_intent_destroy_cache();
937	xfs_refcount_intent_destroy_cache();
938	xfs_rmap_intent_destroy_cache();
939	xfs_defer_destroy_cache();
940}
941