xref: /kernel/linux/linux-5.10/fs/xfs/xfs_rmap_item.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2016 Oracle.  All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_format.h"
9#include "xfs_log_format.h"
10#include "xfs_trans_resv.h"
11#include "xfs_bit.h"
12#include "xfs_shared.h"
13#include "xfs_mount.h"
14#include "xfs_defer.h"
15#include "xfs_trans.h"
16#include "xfs_trans_priv.h"
17#include "xfs_rmap_item.h"
18#include "xfs_log.h"
19#include "xfs_rmap.h"
20#include "xfs_error.h"
21#include "xfs_log_priv.h"
22#include "xfs_log_recover.h"
23
24kmem_zone_t	*xfs_rui_zone;
25kmem_zone_t	*xfs_rud_zone;
26
27static const struct xfs_item_ops xfs_rui_item_ops;
28
29static inline struct xfs_rui_log_item *RUI_ITEM(struct xfs_log_item *lip)
30{
31	return container_of(lip, struct xfs_rui_log_item, rui_item);
32}
33
34STATIC void
35xfs_rui_item_free(
36	struct xfs_rui_log_item	*ruip)
37{
38	if (ruip->rui_format.rui_nextents > XFS_RUI_MAX_FAST_EXTENTS)
39		kmem_free(ruip);
40	else
41		kmem_cache_free(xfs_rui_zone, ruip);
42}
43
44/*
45 * Freeing the RUI requires that we remove it from the AIL if it has already
46 * been placed there. However, the RUI may not yet have been placed in the AIL
47 * when called by xfs_rui_release() from RUD processing due to the ordering of
48 * committed vs unpin operations in bulk insert operations. Hence the reference
49 * count to ensure only the last caller frees the RUI.
50 */
51STATIC void
52xfs_rui_release(
53	struct xfs_rui_log_item	*ruip)
54{
55	ASSERT(atomic_read(&ruip->rui_refcount) > 0);
56	if (atomic_dec_and_test(&ruip->rui_refcount)) {
57		xfs_trans_ail_delete(&ruip->rui_item, SHUTDOWN_LOG_IO_ERROR);
58		xfs_rui_item_free(ruip);
59	}
60}
61
62STATIC void
63xfs_rui_item_size(
64	struct xfs_log_item	*lip,
65	int			*nvecs,
66	int			*nbytes)
67{
68	struct xfs_rui_log_item	*ruip = RUI_ITEM(lip);
69
70	*nvecs += 1;
71	*nbytes += xfs_rui_log_format_sizeof(ruip->rui_format.rui_nextents);
72}
73
74/*
75 * This is called to fill in the vector of log iovecs for the
76 * given rui log item. We use only 1 iovec, and we point that
77 * at the rui_log_format structure embedded in the rui item.
78 * It is at this point that we assert that all of the extent
79 * slots in the rui item have been filled.
80 */
81STATIC void
82xfs_rui_item_format(
83	struct xfs_log_item	*lip,
84	struct xfs_log_vec	*lv)
85{
86	struct xfs_rui_log_item	*ruip = RUI_ITEM(lip);
87	struct xfs_log_iovec	*vecp = NULL;
88
89	ASSERT(atomic_read(&ruip->rui_next_extent) ==
90			ruip->rui_format.rui_nextents);
91
92	ruip->rui_format.rui_type = XFS_LI_RUI;
93	ruip->rui_format.rui_size = 1;
94
95	xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUI_FORMAT, &ruip->rui_format,
96			xfs_rui_log_format_sizeof(ruip->rui_format.rui_nextents));
97}
98
99/*
100 * The unpin operation is the last place an RUI is manipulated in the log. It is
101 * either inserted in the AIL or aborted in the event of a log I/O error. In
102 * either case, the RUI transaction has been successfully committed to make it
103 * this far. Therefore, we expect whoever committed the RUI to either construct
104 * and commit the RUD or drop the RUD's reference in the event of error. Simply
105 * drop the log's RUI reference now that the log is done with it.
106 */
107STATIC void
108xfs_rui_item_unpin(
109	struct xfs_log_item	*lip,
110	int			remove)
111{
112	struct xfs_rui_log_item	*ruip = RUI_ITEM(lip);
113
114	xfs_rui_release(ruip);
115}
116
117/*
118 * The RUI has been either committed or aborted if the transaction has been
119 * cancelled. If the transaction was cancelled, an RUD isn't going to be
120 * constructed and thus we free the RUI here directly.
121 */
122STATIC void
123xfs_rui_item_release(
124	struct xfs_log_item	*lip)
125{
126	xfs_rui_release(RUI_ITEM(lip));
127}
128
129/*
130 * Allocate and initialize an rui item with the given number of extents.
131 */
132STATIC struct xfs_rui_log_item *
133xfs_rui_init(
134	struct xfs_mount		*mp,
135	uint				nextents)
136
137{
138	struct xfs_rui_log_item		*ruip;
139
140	ASSERT(nextents > 0);
141	if (nextents > XFS_RUI_MAX_FAST_EXTENTS)
142		ruip = kmem_zalloc(xfs_rui_log_item_sizeof(nextents), 0);
143	else
144		ruip = kmem_cache_zalloc(xfs_rui_zone,
145					 GFP_KERNEL | __GFP_NOFAIL);
146
147	xfs_log_item_init(mp, &ruip->rui_item, XFS_LI_RUI, &xfs_rui_item_ops);
148	ruip->rui_format.rui_nextents = nextents;
149	ruip->rui_format.rui_id = (uintptr_t)(void *)ruip;
150	atomic_set(&ruip->rui_next_extent, 0);
151	atomic_set(&ruip->rui_refcount, 2);
152
153	return ruip;
154}
155
156/*
157 * Copy an RUI format buffer from the given buf, and into the destination
158 * RUI format structure.  The RUI/RUD items were designed not to need any
159 * special alignment handling.
160 */
161STATIC int
162xfs_rui_copy_format(
163	struct xfs_log_iovec		*buf,
164	struct xfs_rui_log_format	*dst_rui_fmt)
165{
166	struct xfs_rui_log_format	*src_rui_fmt;
167	uint				len;
168
169	src_rui_fmt = buf->i_addr;
170	len = xfs_rui_log_format_sizeof(src_rui_fmt->rui_nextents);
171
172	if (buf->i_len != len) {
173		XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
174		return -EFSCORRUPTED;
175	}
176
177	memcpy(dst_rui_fmt, src_rui_fmt, len);
178	return 0;
179}
180
181static inline struct xfs_rud_log_item *RUD_ITEM(struct xfs_log_item *lip)
182{
183	return container_of(lip, struct xfs_rud_log_item, rud_item);
184}
185
186STATIC void
187xfs_rud_item_size(
188	struct xfs_log_item	*lip,
189	int			*nvecs,
190	int			*nbytes)
191{
192	*nvecs += 1;
193	*nbytes += sizeof(struct xfs_rud_log_format);
194}
195
196/*
197 * This is called to fill in the vector of log iovecs for the
198 * given rud log item. We use only 1 iovec, and we point that
199 * at the rud_log_format structure embedded in the rud item.
200 * It is at this point that we assert that all of the extent
201 * slots in the rud item have been filled.
202 */
203STATIC void
204xfs_rud_item_format(
205	struct xfs_log_item	*lip,
206	struct xfs_log_vec	*lv)
207{
208	struct xfs_rud_log_item	*rudp = RUD_ITEM(lip);
209	struct xfs_log_iovec	*vecp = NULL;
210
211	rudp->rud_format.rud_type = XFS_LI_RUD;
212	rudp->rud_format.rud_size = 1;
213
214	xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUD_FORMAT, &rudp->rud_format,
215			sizeof(struct xfs_rud_log_format));
216}
217
218/*
219 * The RUD is either committed or aborted if the transaction is cancelled. If
220 * the transaction is cancelled, drop our reference to the RUI and free the
221 * RUD.
222 */
223STATIC void
224xfs_rud_item_release(
225	struct xfs_log_item	*lip)
226{
227	struct xfs_rud_log_item	*rudp = RUD_ITEM(lip);
228
229	xfs_rui_release(rudp->rud_ruip);
230	kmem_cache_free(xfs_rud_zone, rudp);
231}
232
233static const struct xfs_item_ops xfs_rud_item_ops = {
234	.flags		= XFS_ITEM_RELEASE_WHEN_COMMITTED,
235	.iop_size	= xfs_rud_item_size,
236	.iop_format	= xfs_rud_item_format,
237	.iop_release	= xfs_rud_item_release,
238};
239
240static struct xfs_rud_log_item *
241xfs_trans_get_rud(
242	struct xfs_trans		*tp,
243	struct xfs_rui_log_item		*ruip)
244{
245	struct xfs_rud_log_item		*rudp;
246
247	rudp = kmem_cache_zalloc(xfs_rud_zone, GFP_KERNEL | __GFP_NOFAIL);
248	xfs_log_item_init(tp->t_mountp, &rudp->rud_item, XFS_LI_RUD,
249			  &xfs_rud_item_ops);
250	rudp->rud_ruip = ruip;
251	rudp->rud_format.rud_rui_id = ruip->rui_format.rui_id;
252
253	xfs_trans_add_item(tp, &rudp->rud_item);
254	return rudp;
255}
256
257/* Set the map extent flags for this reverse mapping. */
258static void
259xfs_trans_set_rmap_flags(
260	struct xfs_map_extent		*rmap,
261	enum xfs_rmap_intent_type	type,
262	int				whichfork,
263	xfs_exntst_t			state)
264{
265	rmap->me_flags = 0;
266	if (state == XFS_EXT_UNWRITTEN)
267		rmap->me_flags |= XFS_RMAP_EXTENT_UNWRITTEN;
268	if (whichfork == XFS_ATTR_FORK)
269		rmap->me_flags |= XFS_RMAP_EXTENT_ATTR_FORK;
270	switch (type) {
271	case XFS_RMAP_MAP:
272		rmap->me_flags |= XFS_RMAP_EXTENT_MAP;
273		break;
274	case XFS_RMAP_MAP_SHARED:
275		rmap->me_flags |= XFS_RMAP_EXTENT_MAP_SHARED;
276		break;
277	case XFS_RMAP_UNMAP:
278		rmap->me_flags |= XFS_RMAP_EXTENT_UNMAP;
279		break;
280	case XFS_RMAP_UNMAP_SHARED:
281		rmap->me_flags |= XFS_RMAP_EXTENT_UNMAP_SHARED;
282		break;
283	case XFS_RMAP_CONVERT:
284		rmap->me_flags |= XFS_RMAP_EXTENT_CONVERT;
285		break;
286	case XFS_RMAP_CONVERT_SHARED:
287		rmap->me_flags |= XFS_RMAP_EXTENT_CONVERT_SHARED;
288		break;
289	case XFS_RMAP_ALLOC:
290		rmap->me_flags |= XFS_RMAP_EXTENT_ALLOC;
291		break;
292	case XFS_RMAP_FREE:
293		rmap->me_flags |= XFS_RMAP_EXTENT_FREE;
294		break;
295	default:
296		ASSERT(0);
297	}
298}
299
300/*
301 * Finish an rmap update and log it to the RUD. Note that the transaction is
302 * marked dirty regardless of whether the rmap update succeeds or fails to
303 * support the RUI/RUD lifecycle rules.
304 */
305static int
306xfs_trans_log_finish_rmap_update(
307	struct xfs_trans		*tp,
308	struct xfs_rud_log_item		*rudp,
309	enum xfs_rmap_intent_type	type,
310	uint64_t			owner,
311	int				whichfork,
312	xfs_fileoff_t			startoff,
313	xfs_fsblock_t			startblock,
314	xfs_filblks_t			blockcount,
315	xfs_exntst_t			state,
316	struct xfs_btree_cur		**pcur)
317{
318	int				error;
319
320	error = xfs_rmap_finish_one(tp, type, owner, whichfork, startoff,
321			startblock, blockcount, state, pcur);
322
323	/*
324	 * Mark the transaction dirty, even on error. This ensures the
325	 * transaction is aborted, which:
326	 *
327	 * 1.) releases the RUI and frees the RUD
328	 * 2.) shuts down the filesystem
329	 */
330	tp->t_flags |= XFS_TRANS_DIRTY;
331	set_bit(XFS_LI_DIRTY, &rudp->rud_item.li_flags);
332
333	return error;
334}
335
336/* Sort rmap intents by AG. */
337static int
338xfs_rmap_update_diff_items(
339	void				*priv,
340	const struct list_head		*a,
341	const struct list_head		*b)
342{
343	struct xfs_mount		*mp = priv;
344	struct xfs_rmap_intent		*ra;
345	struct xfs_rmap_intent		*rb;
346
347	ra = container_of(a, struct xfs_rmap_intent, ri_list);
348	rb = container_of(b, struct xfs_rmap_intent, ri_list);
349	return  XFS_FSB_TO_AGNO(mp, ra->ri_bmap.br_startblock) -
350		XFS_FSB_TO_AGNO(mp, rb->ri_bmap.br_startblock);
351}
352
353/* Log rmap updates in the intent item. */
354STATIC void
355xfs_rmap_update_log_item(
356	struct xfs_trans		*tp,
357	struct xfs_rui_log_item		*ruip,
358	struct xfs_rmap_intent		*rmap)
359{
360	uint				next_extent;
361	struct xfs_map_extent		*map;
362
363	tp->t_flags |= XFS_TRANS_DIRTY;
364	set_bit(XFS_LI_DIRTY, &ruip->rui_item.li_flags);
365
366	/*
367	 * atomic_inc_return gives us the value after the increment;
368	 * we want to use it as an array index so we need to subtract 1 from
369	 * it.
370	 */
371	next_extent = atomic_inc_return(&ruip->rui_next_extent) - 1;
372	ASSERT(next_extent < ruip->rui_format.rui_nextents);
373	map = &ruip->rui_format.rui_extents[next_extent];
374	map->me_owner = rmap->ri_owner;
375	map->me_startblock = rmap->ri_bmap.br_startblock;
376	map->me_startoff = rmap->ri_bmap.br_startoff;
377	map->me_len = rmap->ri_bmap.br_blockcount;
378	xfs_trans_set_rmap_flags(map, rmap->ri_type, rmap->ri_whichfork,
379			rmap->ri_bmap.br_state);
380}
381
382static struct xfs_log_item *
383xfs_rmap_update_create_intent(
384	struct xfs_trans		*tp,
385	struct list_head		*items,
386	unsigned int			count,
387	bool				sort)
388{
389	struct xfs_mount		*mp = tp->t_mountp;
390	struct xfs_rui_log_item		*ruip = xfs_rui_init(mp, count);
391	struct xfs_rmap_intent		*rmap;
392
393	ASSERT(count > 0);
394
395	xfs_trans_add_item(tp, &ruip->rui_item);
396	if (sort)
397		list_sort(mp, items, xfs_rmap_update_diff_items);
398	list_for_each_entry(rmap, items, ri_list)
399		xfs_rmap_update_log_item(tp, ruip, rmap);
400	return &ruip->rui_item;
401}
402
403/* Get an RUD so we can process all the deferred rmap updates. */
404static struct xfs_log_item *
405xfs_rmap_update_create_done(
406	struct xfs_trans		*tp,
407	struct xfs_log_item		*intent,
408	unsigned int			count)
409{
410	return &xfs_trans_get_rud(tp, RUI_ITEM(intent))->rud_item;
411}
412
413/* Process a deferred rmap update. */
414STATIC int
415xfs_rmap_update_finish_item(
416	struct xfs_trans		*tp,
417	struct xfs_log_item		*done,
418	struct list_head		*item,
419	struct xfs_btree_cur		**state)
420{
421	struct xfs_rmap_intent		*rmap;
422	int				error;
423
424	rmap = container_of(item, struct xfs_rmap_intent, ri_list);
425	error = xfs_trans_log_finish_rmap_update(tp, RUD_ITEM(done),
426			rmap->ri_type, rmap->ri_owner, rmap->ri_whichfork,
427			rmap->ri_bmap.br_startoff, rmap->ri_bmap.br_startblock,
428			rmap->ri_bmap.br_blockcount, rmap->ri_bmap.br_state,
429			state);
430	kmem_free(rmap);
431	return error;
432}
433
434/* Abort all pending RUIs. */
435STATIC void
436xfs_rmap_update_abort_intent(
437	struct xfs_log_item	*intent)
438{
439	xfs_rui_release(RUI_ITEM(intent));
440}
441
442/* Cancel a deferred rmap update. */
443STATIC void
444xfs_rmap_update_cancel_item(
445	struct list_head		*item)
446{
447	struct xfs_rmap_intent		*rmap;
448
449	rmap = container_of(item, struct xfs_rmap_intent, ri_list);
450	kmem_free(rmap);
451}
452
453const struct xfs_defer_op_type xfs_rmap_update_defer_type = {
454	.max_items	= XFS_RUI_MAX_FAST_EXTENTS,
455	.create_intent	= xfs_rmap_update_create_intent,
456	.abort_intent	= xfs_rmap_update_abort_intent,
457	.create_done	= xfs_rmap_update_create_done,
458	.finish_item	= xfs_rmap_update_finish_item,
459	.finish_cleanup = xfs_rmap_finish_one_cleanup,
460	.cancel_item	= xfs_rmap_update_cancel_item,
461};
462
463/*
464 * Process an rmap update intent item that was recovered from the log.
465 * We need to update the rmapbt.
466 */
467STATIC int
468xfs_rui_item_recover(
469	struct xfs_log_item		*lip,
470	struct list_head		*capture_list)
471{
472	struct xfs_rui_log_item		*ruip = RUI_ITEM(lip);
473	struct xfs_map_extent		*rmap;
474	struct xfs_rud_log_item		*rudp;
475	struct xfs_trans		*tp;
476	struct xfs_btree_cur		*rcur = NULL;
477	struct xfs_mount		*mp = lip->li_mountp;
478	xfs_fsblock_t			startblock_fsb;
479	enum xfs_rmap_intent_type	type;
480	xfs_exntst_t			state;
481	bool				op_ok;
482	int				i;
483	int				whichfork;
484	int				error = 0;
485
486	/*
487	 * First check the validity of the extents described by the
488	 * RUI.  If any are bad, then assume that all are bad and
489	 * just toss the RUI.
490	 */
491	for (i = 0; i < ruip->rui_format.rui_nextents; i++) {
492		rmap = &ruip->rui_format.rui_extents[i];
493		startblock_fsb = XFS_BB_TO_FSB(mp,
494				   XFS_FSB_TO_DADDR(mp, rmap->me_startblock));
495		switch (rmap->me_flags & XFS_RMAP_EXTENT_TYPE_MASK) {
496		case XFS_RMAP_EXTENT_MAP:
497		case XFS_RMAP_EXTENT_MAP_SHARED:
498		case XFS_RMAP_EXTENT_UNMAP:
499		case XFS_RMAP_EXTENT_UNMAP_SHARED:
500		case XFS_RMAP_EXTENT_CONVERT:
501		case XFS_RMAP_EXTENT_CONVERT_SHARED:
502		case XFS_RMAP_EXTENT_ALLOC:
503		case XFS_RMAP_EXTENT_FREE:
504			op_ok = true;
505			break;
506		default:
507			op_ok = false;
508			break;
509		}
510		if (!op_ok || startblock_fsb == 0 ||
511		    rmap->me_len == 0 ||
512		    startblock_fsb >= mp->m_sb.sb_dblocks ||
513		    rmap->me_len >= mp->m_sb.sb_agblocks ||
514		    (rmap->me_flags & ~XFS_RMAP_EXTENT_FLAGS))
515			return -EFSCORRUPTED;
516	}
517
518	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate,
519			mp->m_rmap_maxlevels, 0, XFS_TRANS_RESERVE, &tp);
520	if (error)
521		return error;
522	rudp = xfs_trans_get_rud(tp, ruip);
523
524	for (i = 0; i < ruip->rui_format.rui_nextents; i++) {
525		rmap = &ruip->rui_format.rui_extents[i];
526		state = (rmap->me_flags & XFS_RMAP_EXTENT_UNWRITTEN) ?
527				XFS_EXT_UNWRITTEN : XFS_EXT_NORM;
528		whichfork = (rmap->me_flags & XFS_RMAP_EXTENT_ATTR_FORK) ?
529				XFS_ATTR_FORK : XFS_DATA_FORK;
530		switch (rmap->me_flags & XFS_RMAP_EXTENT_TYPE_MASK) {
531		case XFS_RMAP_EXTENT_MAP:
532			type = XFS_RMAP_MAP;
533			break;
534		case XFS_RMAP_EXTENT_MAP_SHARED:
535			type = XFS_RMAP_MAP_SHARED;
536			break;
537		case XFS_RMAP_EXTENT_UNMAP:
538			type = XFS_RMAP_UNMAP;
539			break;
540		case XFS_RMAP_EXTENT_UNMAP_SHARED:
541			type = XFS_RMAP_UNMAP_SHARED;
542			break;
543		case XFS_RMAP_EXTENT_CONVERT:
544			type = XFS_RMAP_CONVERT;
545			break;
546		case XFS_RMAP_EXTENT_CONVERT_SHARED:
547			type = XFS_RMAP_CONVERT_SHARED;
548			break;
549		case XFS_RMAP_EXTENT_ALLOC:
550			type = XFS_RMAP_ALLOC;
551			break;
552		case XFS_RMAP_EXTENT_FREE:
553			type = XFS_RMAP_FREE;
554			break;
555		default:
556			XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
557			error = -EFSCORRUPTED;
558			goto abort_error;
559		}
560		error = xfs_trans_log_finish_rmap_update(tp, rudp, type,
561				rmap->me_owner, whichfork,
562				rmap->me_startoff, rmap->me_startblock,
563				rmap->me_len, state, &rcur);
564		if (error)
565			goto abort_error;
566
567	}
568
569	xfs_rmap_finish_one_cleanup(tp, rcur, error);
570	return xfs_defer_ops_capture_and_commit(tp, NULL, capture_list);
571
572abort_error:
573	xfs_rmap_finish_one_cleanup(tp, rcur, error);
574	xfs_trans_cancel(tp);
575	return error;
576}
577
578STATIC bool
579xfs_rui_item_match(
580	struct xfs_log_item	*lip,
581	uint64_t		intent_id)
582{
583	return RUI_ITEM(lip)->rui_format.rui_id == intent_id;
584}
585
586/* Relog an intent item to push the log tail forward. */
587static struct xfs_log_item *
588xfs_rui_item_relog(
589	struct xfs_log_item		*intent,
590	struct xfs_trans		*tp)
591{
592	struct xfs_rud_log_item		*rudp;
593	struct xfs_rui_log_item		*ruip;
594	struct xfs_map_extent		*extp;
595	unsigned int			count;
596
597	count = RUI_ITEM(intent)->rui_format.rui_nextents;
598	extp = RUI_ITEM(intent)->rui_format.rui_extents;
599
600	tp->t_flags |= XFS_TRANS_DIRTY;
601	rudp = xfs_trans_get_rud(tp, RUI_ITEM(intent));
602	set_bit(XFS_LI_DIRTY, &rudp->rud_item.li_flags);
603
604	ruip = xfs_rui_init(tp->t_mountp, count);
605	memcpy(ruip->rui_format.rui_extents, extp, count * sizeof(*extp));
606	atomic_set(&ruip->rui_next_extent, count);
607	xfs_trans_add_item(tp, &ruip->rui_item);
608	set_bit(XFS_LI_DIRTY, &ruip->rui_item.li_flags);
609	return &ruip->rui_item;
610}
611
612static const struct xfs_item_ops xfs_rui_item_ops = {
613	.iop_size	= xfs_rui_item_size,
614	.iop_format	= xfs_rui_item_format,
615	.iop_unpin	= xfs_rui_item_unpin,
616	.iop_release	= xfs_rui_item_release,
617	.iop_recover	= xfs_rui_item_recover,
618	.iop_match	= xfs_rui_item_match,
619	.iop_relog	= xfs_rui_item_relog,
620};
621
622/*
623 * This routine is called to create an in-core extent rmap update
624 * item from the rui format structure which was logged on disk.
625 * It allocates an in-core rui, copies the extents from the format
626 * structure into it, and adds the rui to the AIL with the given
627 * LSN.
628 */
629STATIC int
630xlog_recover_rui_commit_pass2(
631	struct xlog			*log,
632	struct list_head		*buffer_list,
633	struct xlog_recover_item	*item,
634	xfs_lsn_t			lsn)
635{
636	int				error;
637	struct xfs_mount		*mp = log->l_mp;
638	struct xfs_rui_log_item		*ruip;
639	struct xfs_rui_log_format	*rui_formatp;
640
641	rui_formatp = item->ri_buf[0].i_addr;
642
643	ruip = xfs_rui_init(mp, rui_formatp->rui_nextents);
644	error = xfs_rui_copy_format(&item->ri_buf[0], &ruip->rui_format);
645	if (error) {
646		xfs_rui_item_free(ruip);
647		return error;
648	}
649	atomic_set(&ruip->rui_next_extent, rui_formatp->rui_nextents);
650	/*
651	 * Insert the intent into the AIL directly and drop one reference so
652	 * that finishing or canceling the work will drop the other.
653	 */
654	xfs_trans_ail_insert(log->l_ailp, &ruip->rui_item, lsn);
655	xfs_rui_release(ruip);
656	return 0;
657}
658
659const struct xlog_recover_item_ops xlog_rui_item_ops = {
660	.item_type		= XFS_LI_RUI,
661	.commit_pass2		= xlog_recover_rui_commit_pass2,
662};
663
664/*
665 * This routine is called when an RUD format structure is found in a committed
666 * transaction in the log. Its purpose is to cancel the corresponding RUI if it
667 * was still in the log. To do this it searches the AIL for the RUI with an id
668 * equal to that in the RUD format structure. If we find it we drop the RUD
669 * reference, which removes the RUI from the AIL and frees it.
670 */
671STATIC int
672xlog_recover_rud_commit_pass2(
673	struct xlog			*log,
674	struct list_head		*buffer_list,
675	struct xlog_recover_item	*item,
676	xfs_lsn_t			lsn)
677{
678	struct xfs_rud_log_format	*rud_formatp;
679
680	rud_formatp = item->ri_buf[0].i_addr;
681	ASSERT(item->ri_buf[0].i_len == sizeof(struct xfs_rud_log_format));
682
683	xlog_recover_release_intent(log, XFS_LI_RUI, rud_formatp->rud_rui_id);
684	return 0;
685}
686
687const struct xlog_recover_item_ops xlog_rud_item_ops = {
688	.item_type		= XFS_LI_RUD,
689	.commit_pass2		= xlog_recover_rud_commit_pass2,
690};
691