1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2016 Oracle.  All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_format.h"
9#include "xfs_log_format.h"
10#include "xfs_trans_resv.h"
11#include "xfs_bit.h"
12#include "xfs_shared.h"
13#include "xfs_mount.h"
14#include "xfs_defer.h"
15#include "xfs_trans.h"
16#include "xfs_trans_priv.h"
17#include "xfs_refcount_item.h"
18#include "xfs_log.h"
19#include "xfs_refcount.h"
20#include "xfs_error.h"
21#include "xfs_log_priv.h"
22#include "xfs_log_recover.h"
23#include "xfs_ag.h"
24
25struct kmem_cache	*xfs_cui_cache;
26struct kmem_cache	*xfs_cud_cache;
27
28static const struct xfs_item_ops xfs_cui_item_ops;
29
30static inline struct xfs_cui_log_item *CUI_ITEM(struct xfs_log_item *lip)
31{
32	return container_of(lip, struct xfs_cui_log_item, cui_item);
33}
34
35STATIC void
36xfs_cui_item_free(
37	struct xfs_cui_log_item	*cuip)
38{
39	kmem_free(cuip->cui_item.li_lv_shadow);
40	if (cuip->cui_format.cui_nextents > XFS_CUI_MAX_FAST_EXTENTS)
41		kmem_free(cuip);
42	else
43		kmem_cache_free(xfs_cui_cache, cuip);
44}
45
46/*
47 * Freeing the CUI requires that we remove it from the AIL if it has already
48 * been placed there. However, the CUI may not yet have been placed in the AIL
49 * when called by xfs_cui_release() from CUD processing due to the ordering of
50 * committed vs unpin operations in bulk insert operations. Hence the reference
51 * count to ensure only the last caller frees the CUI.
52 */
53STATIC void
54xfs_cui_release(
55	struct xfs_cui_log_item	*cuip)
56{
57	ASSERT(atomic_read(&cuip->cui_refcount) > 0);
58	if (!atomic_dec_and_test(&cuip->cui_refcount))
59		return;
60
61	xfs_trans_ail_delete(&cuip->cui_item, 0);
62	xfs_cui_item_free(cuip);
63}
64
65
66STATIC void
67xfs_cui_item_size(
68	struct xfs_log_item	*lip,
69	int			*nvecs,
70	int			*nbytes)
71{
72	struct xfs_cui_log_item	*cuip = CUI_ITEM(lip);
73
74	*nvecs += 1;
75	*nbytes += xfs_cui_log_format_sizeof(cuip->cui_format.cui_nextents);
76}
77
78/*
79 * This is called to fill in the vector of log iovecs for the
80 * given cui log item. We use only 1 iovec, and we point that
81 * at the cui_log_format structure embedded in the cui item.
82 * It is at this point that we assert that all of the extent
83 * slots in the cui item have been filled.
84 */
85STATIC void
86xfs_cui_item_format(
87	struct xfs_log_item	*lip,
88	struct xfs_log_vec	*lv)
89{
90	struct xfs_cui_log_item	*cuip = CUI_ITEM(lip);
91	struct xfs_log_iovec	*vecp = NULL;
92
93	ASSERT(atomic_read(&cuip->cui_next_extent) ==
94			cuip->cui_format.cui_nextents);
95
96	cuip->cui_format.cui_type = XFS_LI_CUI;
97	cuip->cui_format.cui_size = 1;
98
99	xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_CUI_FORMAT, &cuip->cui_format,
100			xfs_cui_log_format_sizeof(cuip->cui_format.cui_nextents));
101}
102
103/*
104 * The unpin operation is the last place an CUI is manipulated in the log. It is
105 * either inserted in the AIL or aborted in the event of a log I/O error. In
106 * either case, the CUI transaction has been successfully committed to make it
107 * this far. Therefore, we expect whoever committed the CUI to either construct
108 * and commit the CUD or drop the CUD's reference in the event of error. Simply
109 * drop the log's CUI reference now that the log is done with it.
110 */
111STATIC void
112xfs_cui_item_unpin(
113	struct xfs_log_item	*lip,
114	int			remove)
115{
116	struct xfs_cui_log_item	*cuip = CUI_ITEM(lip);
117
118	xfs_cui_release(cuip);
119}
120
121/*
122 * The CUI has been either committed or aborted if the transaction has been
123 * cancelled. If the transaction was cancelled, an CUD isn't going to be
124 * constructed and thus we free the CUI here directly.
125 */
126STATIC void
127xfs_cui_item_release(
128	struct xfs_log_item	*lip)
129{
130	xfs_cui_release(CUI_ITEM(lip));
131}
132
133/*
134 * Allocate and initialize an cui item with the given number of extents.
135 */
136STATIC struct xfs_cui_log_item *
137xfs_cui_init(
138	struct xfs_mount		*mp,
139	uint				nextents)
140
141{
142	struct xfs_cui_log_item		*cuip;
143
144	ASSERT(nextents > 0);
145	if (nextents > XFS_CUI_MAX_FAST_EXTENTS)
146		cuip = kmem_zalloc(xfs_cui_log_item_sizeof(nextents),
147				0);
148	else
149		cuip = kmem_cache_zalloc(xfs_cui_cache,
150					 GFP_KERNEL | __GFP_NOFAIL);
151
152	xfs_log_item_init(mp, &cuip->cui_item, XFS_LI_CUI, &xfs_cui_item_ops);
153	cuip->cui_format.cui_nextents = nextents;
154	cuip->cui_format.cui_id = (uintptr_t)(void *)cuip;
155	atomic_set(&cuip->cui_next_extent, 0);
156	atomic_set(&cuip->cui_refcount, 2);
157
158	return cuip;
159}
160
161static inline struct xfs_cud_log_item *CUD_ITEM(struct xfs_log_item *lip)
162{
163	return container_of(lip, struct xfs_cud_log_item, cud_item);
164}
165
166STATIC void
167xfs_cud_item_size(
168	struct xfs_log_item	*lip,
169	int			*nvecs,
170	int			*nbytes)
171{
172	*nvecs += 1;
173	*nbytes += sizeof(struct xfs_cud_log_format);
174}
175
176/*
177 * This is called to fill in the vector of log iovecs for the
178 * given cud log item. We use only 1 iovec, and we point that
179 * at the cud_log_format structure embedded in the cud item.
180 * It is at this point that we assert that all of the extent
181 * slots in the cud item have been filled.
182 */
183STATIC void
184xfs_cud_item_format(
185	struct xfs_log_item	*lip,
186	struct xfs_log_vec	*lv)
187{
188	struct xfs_cud_log_item	*cudp = CUD_ITEM(lip);
189	struct xfs_log_iovec	*vecp = NULL;
190
191	cudp->cud_format.cud_type = XFS_LI_CUD;
192	cudp->cud_format.cud_size = 1;
193
194	xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_CUD_FORMAT, &cudp->cud_format,
195			sizeof(struct xfs_cud_log_format));
196}
197
198/*
199 * The CUD is either committed or aborted if the transaction is cancelled. If
200 * the transaction is cancelled, drop our reference to the CUI and free the
201 * CUD.
202 */
203STATIC void
204xfs_cud_item_release(
205	struct xfs_log_item	*lip)
206{
207	struct xfs_cud_log_item	*cudp = CUD_ITEM(lip);
208
209	xfs_cui_release(cudp->cud_cuip);
210	kmem_free(cudp->cud_item.li_lv_shadow);
211	kmem_cache_free(xfs_cud_cache, cudp);
212}
213
214static struct xfs_log_item *
215xfs_cud_item_intent(
216	struct xfs_log_item	*lip)
217{
218	return &CUD_ITEM(lip)->cud_cuip->cui_item;
219}
220
221static const struct xfs_item_ops xfs_cud_item_ops = {
222	.flags		= XFS_ITEM_RELEASE_WHEN_COMMITTED |
223			  XFS_ITEM_INTENT_DONE,
224	.iop_size	= xfs_cud_item_size,
225	.iop_format	= xfs_cud_item_format,
226	.iop_release	= xfs_cud_item_release,
227	.iop_intent	= xfs_cud_item_intent,
228};
229
230static struct xfs_cud_log_item *
231xfs_trans_get_cud(
232	struct xfs_trans		*tp,
233	struct xfs_cui_log_item		*cuip)
234{
235	struct xfs_cud_log_item		*cudp;
236
237	cudp = kmem_cache_zalloc(xfs_cud_cache, GFP_KERNEL | __GFP_NOFAIL);
238	xfs_log_item_init(tp->t_mountp, &cudp->cud_item, XFS_LI_CUD,
239			  &xfs_cud_item_ops);
240	cudp->cud_cuip = cuip;
241	cudp->cud_format.cud_cui_id = cuip->cui_format.cui_id;
242
243	xfs_trans_add_item(tp, &cudp->cud_item);
244	return cudp;
245}
246
247/*
248 * Finish an refcount update and log it to the CUD. Note that the
249 * transaction is marked dirty regardless of whether the refcount
250 * update succeeds or fails to support the CUI/CUD lifecycle rules.
251 */
252static int
253xfs_trans_log_finish_refcount_update(
254	struct xfs_trans		*tp,
255	struct xfs_cud_log_item		*cudp,
256	struct xfs_refcount_intent	*ri,
257	struct xfs_btree_cur		**pcur)
258{
259	int				error;
260
261	error = xfs_refcount_finish_one(tp, ri, pcur);
262
263	/*
264	 * Mark the transaction dirty, even on error. This ensures the
265	 * transaction is aborted, which:
266	 *
267	 * 1.) releases the CUI and frees the CUD
268	 * 2.) shuts down the filesystem
269	 */
270	tp->t_flags |= XFS_TRANS_DIRTY | XFS_TRANS_HAS_INTENT_DONE;
271	set_bit(XFS_LI_DIRTY, &cudp->cud_item.li_flags);
272
273	return error;
274}
275
276/* Sort refcount intents by AG. */
277static int
278xfs_refcount_update_diff_items(
279	void				*priv,
280	const struct list_head		*a,
281	const struct list_head		*b)
282{
283	struct xfs_refcount_intent	*ra;
284	struct xfs_refcount_intent	*rb;
285
286	ra = container_of(a, struct xfs_refcount_intent, ri_list);
287	rb = container_of(b, struct xfs_refcount_intent, ri_list);
288
289	return ra->ri_pag->pag_agno - rb->ri_pag->pag_agno;
290}
291
292/* Set the phys extent flags for this reverse mapping. */
293static void
294xfs_trans_set_refcount_flags(
295	struct xfs_phys_extent		*pmap,
296	enum xfs_refcount_intent_type	type)
297{
298	pmap->pe_flags = 0;
299	switch (type) {
300	case XFS_REFCOUNT_INCREASE:
301	case XFS_REFCOUNT_DECREASE:
302	case XFS_REFCOUNT_ALLOC_COW:
303	case XFS_REFCOUNT_FREE_COW:
304		pmap->pe_flags |= type;
305		break;
306	default:
307		ASSERT(0);
308	}
309}
310
311/* Log refcount updates in the intent item. */
312STATIC void
313xfs_refcount_update_log_item(
314	struct xfs_trans		*tp,
315	struct xfs_cui_log_item		*cuip,
316	struct xfs_refcount_intent	*ri)
317{
318	uint				next_extent;
319	struct xfs_phys_extent		*pmap;
320
321	tp->t_flags |= XFS_TRANS_DIRTY;
322	set_bit(XFS_LI_DIRTY, &cuip->cui_item.li_flags);
323
324	/*
325	 * atomic_inc_return gives us the value after the increment;
326	 * we want to use it as an array index so we need to subtract 1 from
327	 * it.
328	 */
329	next_extent = atomic_inc_return(&cuip->cui_next_extent) - 1;
330	ASSERT(next_extent < cuip->cui_format.cui_nextents);
331	pmap = &cuip->cui_format.cui_extents[next_extent];
332	pmap->pe_startblock = ri->ri_startblock;
333	pmap->pe_len = ri->ri_blockcount;
334	xfs_trans_set_refcount_flags(pmap, ri->ri_type);
335}
336
337static struct xfs_log_item *
338xfs_refcount_update_create_intent(
339	struct xfs_trans		*tp,
340	struct list_head		*items,
341	unsigned int			count,
342	bool				sort)
343{
344	struct xfs_mount		*mp = tp->t_mountp;
345	struct xfs_cui_log_item		*cuip = xfs_cui_init(mp, count);
346	struct xfs_refcount_intent	*ri;
347
348	ASSERT(count > 0);
349
350	xfs_trans_add_item(tp, &cuip->cui_item);
351	if (sort)
352		list_sort(mp, items, xfs_refcount_update_diff_items);
353	list_for_each_entry(ri, items, ri_list)
354		xfs_refcount_update_log_item(tp, cuip, ri);
355	return &cuip->cui_item;
356}
357
358/* Get an CUD so we can process all the deferred refcount updates. */
359static struct xfs_log_item *
360xfs_refcount_update_create_done(
361	struct xfs_trans		*tp,
362	struct xfs_log_item		*intent,
363	unsigned int			count)
364{
365	return &xfs_trans_get_cud(tp, CUI_ITEM(intent))->cud_item;
366}
367
368/* Take a passive ref to the AG containing the space we're refcounting. */
369void
370xfs_refcount_update_get_group(
371	struct xfs_mount		*mp,
372	struct xfs_refcount_intent	*ri)
373{
374	xfs_agnumber_t			agno;
375
376	agno = XFS_FSB_TO_AGNO(mp, ri->ri_startblock);
377	ri->ri_pag = xfs_perag_intent_get(mp, agno);
378}
379
380/* Release a passive AG ref after finishing refcounting work. */
381static inline void
382xfs_refcount_update_put_group(
383	struct xfs_refcount_intent	*ri)
384{
385	xfs_perag_intent_put(ri->ri_pag);
386}
387
388/* Process a deferred refcount update. */
389STATIC int
390xfs_refcount_update_finish_item(
391	struct xfs_trans		*tp,
392	struct xfs_log_item		*done,
393	struct list_head		*item,
394	struct xfs_btree_cur		**state)
395{
396	struct xfs_refcount_intent	*ri;
397	int				error;
398
399	ri = container_of(item, struct xfs_refcount_intent, ri_list);
400	error = xfs_trans_log_finish_refcount_update(tp, CUD_ITEM(done), ri,
401			state);
402
403	/* Did we run out of reservation?  Requeue what we didn't finish. */
404	if (!error && ri->ri_blockcount > 0) {
405		ASSERT(ri->ri_type == XFS_REFCOUNT_INCREASE ||
406		       ri->ri_type == XFS_REFCOUNT_DECREASE);
407		return -EAGAIN;
408	}
409
410	xfs_refcount_update_put_group(ri);
411	kmem_cache_free(xfs_refcount_intent_cache, ri);
412	return error;
413}
414
415/* Abort all pending CUIs. */
416STATIC void
417xfs_refcount_update_abort_intent(
418	struct xfs_log_item		*intent)
419{
420	xfs_cui_release(CUI_ITEM(intent));
421}
422
423/* Cancel a deferred refcount update. */
424STATIC void
425xfs_refcount_update_cancel_item(
426	struct list_head		*item)
427{
428	struct xfs_refcount_intent	*ri;
429
430	ri = container_of(item, struct xfs_refcount_intent, ri_list);
431
432	xfs_refcount_update_put_group(ri);
433	kmem_cache_free(xfs_refcount_intent_cache, ri);
434}
435
436const struct xfs_defer_op_type xfs_refcount_update_defer_type = {
437	.max_items	= XFS_CUI_MAX_FAST_EXTENTS,
438	.create_intent	= xfs_refcount_update_create_intent,
439	.abort_intent	= xfs_refcount_update_abort_intent,
440	.create_done	= xfs_refcount_update_create_done,
441	.finish_item	= xfs_refcount_update_finish_item,
442	.finish_cleanup = xfs_refcount_finish_one_cleanup,
443	.cancel_item	= xfs_refcount_update_cancel_item,
444};
445
446/* Is this recovered CUI ok? */
447static inline bool
448xfs_cui_validate_phys(
449	struct xfs_mount		*mp,
450	struct xfs_phys_extent		*pmap)
451{
452	if (!xfs_has_reflink(mp))
453		return false;
454
455	if (pmap->pe_flags & ~XFS_REFCOUNT_EXTENT_FLAGS)
456		return false;
457
458	switch (pmap->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK) {
459	case XFS_REFCOUNT_INCREASE:
460	case XFS_REFCOUNT_DECREASE:
461	case XFS_REFCOUNT_ALLOC_COW:
462	case XFS_REFCOUNT_FREE_COW:
463		break;
464	default:
465		return false;
466	}
467
468	return xfs_verify_fsbext(mp, pmap->pe_startblock, pmap->pe_len);
469}
470
471/*
472 * Process a refcount update intent item that was recovered from the log.
473 * We need to update the refcountbt.
474 */
475STATIC int
476xfs_cui_item_recover(
477	struct xfs_log_item		*lip,
478	struct list_head		*capture_list)
479{
480	struct xfs_trans_res		resv;
481	struct xfs_cui_log_item		*cuip = CUI_ITEM(lip);
482	struct xfs_cud_log_item		*cudp;
483	struct xfs_trans		*tp;
484	struct xfs_btree_cur		*rcur = NULL;
485	struct xfs_mount		*mp = lip->li_log->l_mp;
486	unsigned int			refc_type;
487	bool				requeue_only = false;
488	int				i;
489	int				error = 0;
490
491	/*
492	 * First check the validity of the extents described by the
493	 * CUI.  If any are bad, then assume that all are bad and
494	 * just toss the CUI.
495	 */
496	for (i = 0; i < cuip->cui_format.cui_nextents; i++) {
497		if (!xfs_cui_validate_phys(mp,
498					&cuip->cui_format.cui_extents[i])) {
499			XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
500					&cuip->cui_format,
501					sizeof(cuip->cui_format));
502			return -EFSCORRUPTED;
503		}
504	}
505
506	/*
507	 * Under normal operation, refcount updates are deferred, so we
508	 * wouldn't be adding them directly to a transaction.  All
509	 * refcount updates manage reservation usage internally and
510	 * dynamically by deferring work that won't fit in the
511	 * transaction.  Normally, any work that needs to be deferred
512	 * gets attached to the same defer_ops that scheduled the
513	 * refcount update.  However, we're in log recovery here, so we
514	 * use the passed in defer_ops and to finish up any work that
515	 * doesn't fit.  We need to reserve enough blocks to handle a
516	 * full btree split on either end of the refcount range.
517	 */
518	resv = xlog_recover_resv(&M_RES(mp)->tr_itruncate);
519	error = xfs_trans_alloc(mp, &resv, mp->m_refc_maxlevels * 2, 0,
520			XFS_TRANS_RESERVE, &tp);
521	if (error)
522		return error;
523
524	cudp = xfs_trans_get_cud(tp, cuip);
525
526	for (i = 0; i < cuip->cui_format.cui_nextents; i++) {
527		struct xfs_refcount_intent	fake = { };
528		struct xfs_phys_extent		*pmap;
529
530		pmap = &cuip->cui_format.cui_extents[i];
531		refc_type = pmap->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK;
532		switch (refc_type) {
533		case XFS_REFCOUNT_INCREASE:
534		case XFS_REFCOUNT_DECREASE:
535		case XFS_REFCOUNT_ALLOC_COW:
536		case XFS_REFCOUNT_FREE_COW:
537			fake.ri_type = refc_type;
538			break;
539		default:
540			XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
541					&cuip->cui_format,
542					sizeof(cuip->cui_format));
543			error = -EFSCORRUPTED;
544			goto abort_error;
545		}
546
547		fake.ri_startblock = pmap->pe_startblock;
548		fake.ri_blockcount = pmap->pe_len;
549
550		if (!requeue_only) {
551			xfs_refcount_update_get_group(mp, &fake);
552			error = xfs_trans_log_finish_refcount_update(tp, cudp,
553					&fake, &rcur);
554			xfs_refcount_update_put_group(&fake);
555		}
556		if (error == -EFSCORRUPTED)
557			XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
558					&cuip->cui_format,
559					sizeof(cuip->cui_format));
560		if (error)
561			goto abort_error;
562
563		/* Requeue what we didn't finish. */
564		if (fake.ri_blockcount > 0) {
565			struct xfs_bmbt_irec	irec = {
566				.br_startblock	= fake.ri_startblock,
567				.br_blockcount	= fake.ri_blockcount,
568			};
569
570			switch (fake.ri_type) {
571			case XFS_REFCOUNT_INCREASE:
572				xfs_refcount_increase_extent(tp, &irec);
573				break;
574			case XFS_REFCOUNT_DECREASE:
575				xfs_refcount_decrease_extent(tp, &irec);
576				break;
577			case XFS_REFCOUNT_ALLOC_COW:
578				xfs_refcount_alloc_cow_extent(tp,
579						irec.br_startblock,
580						irec.br_blockcount);
581				break;
582			case XFS_REFCOUNT_FREE_COW:
583				xfs_refcount_free_cow_extent(tp,
584						irec.br_startblock,
585						irec.br_blockcount);
586				break;
587			default:
588				ASSERT(0);
589			}
590			requeue_only = true;
591		}
592	}
593
594	xfs_refcount_finish_one_cleanup(tp, rcur, error);
595	return xfs_defer_ops_capture_and_commit(tp, capture_list);
596
597abort_error:
598	xfs_refcount_finish_one_cleanup(tp, rcur, error);
599	xfs_trans_cancel(tp);
600	return error;
601}
602
603STATIC bool
604xfs_cui_item_match(
605	struct xfs_log_item	*lip,
606	uint64_t		intent_id)
607{
608	return CUI_ITEM(lip)->cui_format.cui_id == intent_id;
609}
610
611/* Relog an intent item to push the log tail forward. */
612static struct xfs_log_item *
613xfs_cui_item_relog(
614	struct xfs_log_item		*intent,
615	struct xfs_trans		*tp)
616{
617	struct xfs_cud_log_item		*cudp;
618	struct xfs_cui_log_item		*cuip;
619	struct xfs_phys_extent		*pmap;
620	unsigned int			count;
621
622	count = CUI_ITEM(intent)->cui_format.cui_nextents;
623	pmap = CUI_ITEM(intent)->cui_format.cui_extents;
624
625	tp->t_flags |= XFS_TRANS_DIRTY;
626	cudp = xfs_trans_get_cud(tp, CUI_ITEM(intent));
627	set_bit(XFS_LI_DIRTY, &cudp->cud_item.li_flags);
628
629	cuip = xfs_cui_init(tp->t_mountp, count);
630	memcpy(cuip->cui_format.cui_extents, pmap, count * sizeof(*pmap));
631	atomic_set(&cuip->cui_next_extent, count);
632	xfs_trans_add_item(tp, &cuip->cui_item);
633	set_bit(XFS_LI_DIRTY, &cuip->cui_item.li_flags);
634	return &cuip->cui_item;
635}
636
637static const struct xfs_item_ops xfs_cui_item_ops = {
638	.flags		= XFS_ITEM_INTENT,
639	.iop_size	= xfs_cui_item_size,
640	.iop_format	= xfs_cui_item_format,
641	.iop_unpin	= xfs_cui_item_unpin,
642	.iop_release	= xfs_cui_item_release,
643	.iop_recover	= xfs_cui_item_recover,
644	.iop_match	= xfs_cui_item_match,
645	.iop_relog	= xfs_cui_item_relog,
646};
647
648static inline void
649xfs_cui_copy_format(
650	struct xfs_cui_log_format	*dst,
651	const struct xfs_cui_log_format	*src)
652{
653	unsigned int			i;
654
655	memcpy(dst, src, offsetof(struct xfs_cui_log_format, cui_extents));
656
657	for (i = 0; i < src->cui_nextents; i++)
658		memcpy(&dst->cui_extents[i], &src->cui_extents[i],
659				sizeof(struct xfs_phys_extent));
660}
661
662/*
663 * This routine is called to create an in-core extent refcount update
664 * item from the cui format structure which was logged on disk.
665 * It allocates an in-core cui, copies the extents from the format
666 * structure into it, and adds the cui to the AIL with the given
667 * LSN.
668 */
669STATIC int
670xlog_recover_cui_commit_pass2(
671	struct xlog			*log,
672	struct list_head		*buffer_list,
673	struct xlog_recover_item	*item,
674	xfs_lsn_t			lsn)
675{
676	struct xfs_mount		*mp = log->l_mp;
677	struct xfs_cui_log_item		*cuip;
678	struct xfs_cui_log_format	*cui_formatp;
679	size_t				len;
680
681	cui_formatp = item->ri_buf[0].i_addr;
682
683	if (item->ri_buf[0].i_len < xfs_cui_log_format_sizeof(0)) {
684		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
685				item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
686		return -EFSCORRUPTED;
687	}
688
689	len = xfs_cui_log_format_sizeof(cui_formatp->cui_nextents);
690	if (item->ri_buf[0].i_len != len) {
691		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
692				item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
693		return -EFSCORRUPTED;
694	}
695
696	cuip = xfs_cui_init(mp, cui_formatp->cui_nextents);
697	xfs_cui_copy_format(&cuip->cui_format, cui_formatp);
698	atomic_set(&cuip->cui_next_extent, cui_formatp->cui_nextents);
699	/*
700	 * Insert the intent into the AIL directly and drop one reference so
701	 * that finishing or canceling the work will drop the other.
702	 */
703	xfs_trans_ail_insert(log->l_ailp, &cuip->cui_item, lsn);
704	xfs_cui_release(cuip);
705	return 0;
706}
707
708const struct xlog_recover_item_ops xlog_cui_item_ops = {
709	.item_type		= XFS_LI_CUI,
710	.commit_pass2		= xlog_recover_cui_commit_pass2,
711};
712
713/*
714 * This routine is called when an CUD format structure is found in a committed
715 * transaction in the log. Its purpose is to cancel the corresponding CUI if it
716 * was still in the log. To do this it searches the AIL for the CUI with an id
717 * equal to that in the CUD format structure. If we find it we drop the CUD
718 * reference, which removes the CUI from the AIL and frees it.
719 */
720STATIC int
721xlog_recover_cud_commit_pass2(
722	struct xlog			*log,
723	struct list_head		*buffer_list,
724	struct xlog_recover_item	*item,
725	xfs_lsn_t			lsn)
726{
727	struct xfs_cud_log_format	*cud_formatp;
728
729	cud_formatp = item->ri_buf[0].i_addr;
730	if (item->ri_buf[0].i_len != sizeof(struct xfs_cud_log_format)) {
731		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp,
732				item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
733		return -EFSCORRUPTED;
734	}
735
736	xlog_recover_release_intent(log, XFS_LI_CUI, cud_formatp->cud_cui_id);
737	return 0;
738}
739
740const struct xlog_recover_item_ops xlog_cud_item_ops = {
741	.item_type		= XFS_LI_CUD,
742	.commit_pass2		= xlog_recover_cud_commit_pass2,
743};
744