1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_bit.h"
13#include "xfs_mount.h"
14#include "xfs_btree.h"
15#include "xfs_btree_staging.h"
16#include "xfs_ialloc.h"
17#include "xfs_ialloc_btree.h"
18#include "xfs_alloc.h"
19#include "xfs_error.h"
20#include "xfs_trace.h"
21#include "xfs_trans.h"
22#include "xfs_rmap.h"
23
24STATIC int
25xfs_inobt_get_minrecs(
26	struct xfs_btree_cur	*cur,
27	int			level)
28{
29	return M_IGEO(cur->bc_mp)->inobt_mnr[level != 0];
30}
31
32STATIC struct xfs_btree_cur *
33xfs_inobt_dup_cursor(
34	struct xfs_btree_cur	*cur)
35{
36	return xfs_inobt_init_cursor(cur->bc_mp, cur->bc_tp,
37			cur->bc_ag.agbp, cur->bc_ag.agno,
38			cur->bc_btnum);
39}
40
41STATIC void
42xfs_inobt_set_root(
43	struct xfs_btree_cur	*cur,
44	union xfs_btree_ptr	*nptr,
45	int			inc)	/* level change */
46{
47	struct xfs_buf		*agbp = cur->bc_ag.agbp;
48	struct xfs_agi		*agi = agbp->b_addr;
49
50	agi->agi_root = nptr->s;
51	be32_add_cpu(&agi->agi_level, inc);
52	xfs_ialloc_log_agi(cur->bc_tp, agbp, XFS_AGI_ROOT | XFS_AGI_LEVEL);
53}
54
55STATIC void
56xfs_finobt_set_root(
57	struct xfs_btree_cur	*cur,
58	union xfs_btree_ptr	*nptr,
59	int			inc)	/* level change */
60{
61	struct xfs_buf		*agbp = cur->bc_ag.agbp;
62	struct xfs_agi		*agi = agbp->b_addr;
63
64	agi->agi_free_root = nptr->s;
65	be32_add_cpu(&agi->agi_free_level, inc);
66	xfs_ialloc_log_agi(cur->bc_tp, agbp,
67			   XFS_AGI_FREE_ROOT | XFS_AGI_FREE_LEVEL);
68}
69
70/* Update the inode btree block counter for this btree. */
71static inline void
72xfs_inobt_mod_blockcount(
73	struct xfs_btree_cur	*cur,
74	int			howmuch)
75{
76	struct xfs_buf		*agbp = cur->bc_ag.agbp;
77	struct xfs_agi		*agi = agbp->b_addr;
78
79	if (!xfs_sb_version_hasinobtcounts(&cur->bc_mp->m_sb))
80		return;
81
82	if (cur->bc_btnum == XFS_BTNUM_FINO)
83		be32_add_cpu(&agi->agi_fblocks, howmuch);
84	else if (cur->bc_btnum == XFS_BTNUM_INO)
85		be32_add_cpu(&agi->agi_iblocks, howmuch);
86	xfs_ialloc_log_agi(cur->bc_tp, agbp, XFS_AGI_IBLOCKS);
87}
88
89STATIC int
90__xfs_inobt_alloc_block(
91	struct xfs_btree_cur	*cur,
92	union xfs_btree_ptr	*start,
93	union xfs_btree_ptr	*new,
94	int			*stat,
95	enum xfs_ag_resv_type	resv)
96{
97	xfs_alloc_arg_t		args;		/* block allocation args */
98	int			error;		/* error return value */
99	xfs_agblock_t		sbno = be32_to_cpu(start->s);
100
101	memset(&args, 0, sizeof(args));
102	args.tp = cur->bc_tp;
103	args.mp = cur->bc_mp;
104	args.oinfo = XFS_RMAP_OINFO_INOBT;
105	args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_ag.agno, sbno);
106	args.minlen = 1;
107	args.maxlen = 1;
108	args.prod = 1;
109	args.type = XFS_ALLOCTYPE_NEAR_BNO;
110	args.resv = resv;
111
112	error = xfs_alloc_vextent(&args);
113	if (error)
114		return error;
115
116	if (args.fsbno == NULLFSBLOCK) {
117		*stat = 0;
118		return 0;
119	}
120	ASSERT(args.len == 1);
121
122	new->s = cpu_to_be32(XFS_FSB_TO_AGBNO(args.mp, args.fsbno));
123	*stat = 1;
124	xfs_inobt_mod_blockcount(cur, 1);
125	return 0;
126}
127
128STATIC int
129xfs_inobt_alloc_block(
130	struct xfs_btree_cur	*cur,
131	union xfs_btree_ptr	*start,
132	union xfs_btree_ptr	*new,
133	int			*stat)
134{
135	return __xfs_inobt_alloc_block(cur, start, new, stat, XFS_AG_RESV_NONE);
136}
137
138STATIC int
139xfs_finobt_alloc_block(
140	struct xfs_btree_cur	*cur,
141	union xfs_btree_ptr	*start,
142	union xfs_btree_ptr	*new,
143	int			*stat)
144{
145	if (cur->bc_mp->m_finobt_nores)
146		return xfs_inobt_alloc_block(cur, start, new, stat);
147	return __xfs_inobt_alloc_block(cur, start, new, stat,
148			XFS_AG_RESV_METADATA);
149}
150
151STATIC int
152__xfs_inobt_free_block(
153	struct xfs_btree_cur	*cur,
154	struct xfs_buf		*bp,
155	enum xfs_ag_resv_type	resv)
156{
157	xfs_inobt_mod_blockcount(cur, -1);
158	return xfs_free_extent(cur->bc_tp,
159			XFS_DADDR_TO_FSB(cur->bc_mp, XFS_BUF_ADDR(bp)), 1,
160			&XFS_RMAP_OINFO_INOBT, resv);
161}
162
163STATIC int
164xfs_inobt_free_block(
165	struct xfs_btree_cur	*cur,
166	struct xfs_buf		*bp)
167{
168	return __xfs_inobt_free_block(cur, bp, XFS_AG_RESV_NONE);
169}
170
171STATIC int
172xfs_finobt_free_block(
173	struct xfs_btree_cur	*cur,
174	struct xfs_buf		*bp)
175{
176	if (cur->bc_mp->m_finobt_nores)
177		return xfs_inobt_free_block(cur, bp);
178	return __xfs_inobt_free_block(cur, bp, XFS_AG_RESV_METADATA);
179}
180
181STATIC int
182xfs_inobt_get_maxrecs(
183	struct xfs_btree_cur	*cur,
184	int			level)
185{
186	return M_IGEO(cur->bc_mp)->inobt_mxr[level != 0];
187}
188
189STATIC void
190xfs_inobt_init_key_from_rec(
191	union xfs_btree_key	*key,
192	union xfs_btree_rec	*rec)
193{
194	key->inobt.ir_startino = rec->inobt.ir_startino;
195}
196
197STATIC void
198xfs_inobt_init_high_key_from_rec(
199	union xfs_btree_key	*key,
200	union xfs_btree_rec	*rec)
201{
202	__u32			x;
203
204	x = be32_to_cpu(rec->inobt.ir_startino);
205	x += XFS_INODES_PER_CHUNK - 1;
206	key->inobt.ir_startino = cpu_to_be32(x);
207}
208
209STATIC void
210xfs_inobt_init_rec_from_cur(
211	struct xfs_btree_cur	*cur,
212	union xfs_btree_rec	*rec)
213{
214	rec->inobt.ir_startino = cpu_to_be32(cur->bc_rec.i.ir_startino);
215	if (xfs_sb_version_hassparseinodes(&cur->bc_mp->m_sb)) {
216		rec->inobt.ir_u.sp.ir_holemask =
217					cpu_to_be16(cur->bc_rec.i.ir_holemask);
218		rec->inobt.ir_u.sp.ir_count = cur->bc_rec.i.ir_count;
219		rec->inobt.ir_u.sp.ir_freecount = cur->bc_rec.i.ir_freecount;
220	} else {
221		/* ir_holemask/ir_count not supported on-disk */
222		rec->inobt.ir_u.f.ir_freecount =
223					cpu_to_be32(cur->bc_rec.i.ir_freecount);
224	}
225	rec->inobt.ir_free = cpu_to_be64(cur->bc_rec.i.ir_free);
226}
227
228/*
229 * initial value of ptr for lookup
230 */
231STATIC void
232xfs_inobt_init_ptr_from_cur(
233	struct xfs_btree_cur	*cur,
234	union xfs_btree_ptr	*ptr)
235{
236	struct xfs_agi		*agi = cur->bc_ag.agbp->b_addr;
237
238	ASSERT(cur->bc_ag.agno == be32_to_cpu(agi->agi_seqno));
239
240	ptr->s = agi->agi_root;
241}
242
243STATIC void
244xfs_finobt_init_ptr_from_cur(
245	struct xfs_btree_cur	*cur,
246	union xfs_btree_ptr	*ptr)
247{
248	struct xfs_agi		*agi = cur->bc_ag.agbp->b_addr;
249
250	ASSERT(cur->bc_ag.agno == be32_to_cpu(agi->agi_seqno));
251	ptr->s = agi->agi_free_root;
252}
253
254STATIC int64_t
255xfs_inobt_key_diff(
256	struct xfs_btree_cur	*cur,
257	union xfs_btree_key	*key)
258{
259	return (int64_t)be32_to_cpu(key->inobt.ir_startino) -
260			  cur->bc_rec.i.ir_startino;
261}
262
263STATIC int64_t
264xfs_inobt_diff_two_keys(
265	struct xfs_btree_cur	*cur,
266	union xfs_btree_key	*k1,
267	union xfs_btree_key	*k2)
268{
269	return (int64_t)be32_to_cpu(k1->inobt.ir_startino) -
270			  be32_to_cpu(k2->inobt.ir_startino);
271}
272
273static xfs_failaddr_t
274xfs_inobt_verify(
275	struct xfs_buf		*bp)
276{
277	struct xfs_mount	*mp = bp->b_mount;
278	struct xfs_btree_block	*block = XFS_BUF_TO_BLOCK(bp);
279	xfs_failaddr_t		fa;
280	unsigned int		level;
281
282	if (!xfs_verify_magic(bp, block->bb_magic))
283		return __this_address;
284
285	/*
286	 * During growfs operations, we can't verify the exact owner as the
287	 * perag is not fully initialised and hence not attached to the buffer.
288	 *
289	 * Similarly, during log recovery we will have a perag structure
290	 * attached, but the agi information will not yet have been initialised
291	 * from the on disk AGI. We don't currently use any of this information,
292	 * but beware of the landmine (i.e. need to check pag->pagi_init) if we
293	 * ever do.
294	 */
295	if (xfs_sb_version_hascrc(&mp->m_sb)) {
296		fa = xfs_btree_sblock_v5hdr_verify(bp);
297		if (fa)
298			return fa;
299	}
300
301	/* level verification */
302	level = be16_to_cpu(block->bb_level);
303	if (level >= M_IGEO(mp)->inobt_maxlevels)
304		return __this_address;
305
306	return xfs_btree_sblock_verify(bp,
307			M_IGEO(mp)->inobt_mxr[level != 0]);
308}
309
310static void
311xfs_inobt_read_verify(
312	struct xfs_buf	*bp)
313{
314	xfs_failaddr_t	fa;
315
316	if (!xfs_btree_sblock_verify_crc(bp))
317		xfs_verifier_error(bp, -EFSBADCRC, __this_address);
318	else {
319		fa = xfs_inobt_verify(bp);
320		if (fa)
321			xfs_verifier_error(bp, -EFSCORRUPTED, fa);
322	}
323
324	if (bp->b_error)
325		trace_xfs_btree_corrupt(bp, _RET_IP_);
326}
327
328static void
329xfs_inobt_write_verify(
330	struct xfs_buf	*bp)
331{
332	xfs_failaddr_t	fa;
333
334	fa = xfs_inobt_verify(bp);
335	if (fa) {
336		trace_xfs_btree_corrupt(bp, _RET_IP_);
337		xfs_verifier_error(bp, -EFSCORRUPTED, fa);
338		return;
339	}
340	xfs_btree_sblock_calc_crc(bp);
341
342}
343
344const struct xfs_buf_ops xfs_inobt_buf_ops = {
345	.name = "xfs_inobt",
346	.magic = { cpu_to_be32(XFS_IBT_MAGIC), cpu_to_be32(XFS_IBT_CRC_MAGIC) },
347	.verify_read = xfs_inobt_read_verify,
348	.verify_write = xfs_inobt_write_verify,
349	.verify_struct = xfs_inobt_verify,
350};
351
352const struct xfs_buf_ops xfs_finobt_buf_ops = {
353	.name = "xfs_finobt",
354	.magic = { cpu_to_be32(XFS_FIBT_MAGIC),
355		   cpu_to_be32(XFS_FIBT_CRC_MAGIC) },
356	.verify_read = xfs_inobt_read_verify,
357	.verify_write = xfs_inobt_write_verify,
358	.verify_struct = xfs_inobt_verify,
359};
360
361STATIC int
362xfs_inobt_keys_inorder(
363	struct xfs_btree_cur	*cur,
364	union xfs_btree_key	*k1,
365	union xfs_btree_key	*k2)
366{
367	return be32_to_cpu(k1->inobt.ir_startino) <
368		be32_to_cpu(k2->inobt.ir_startino);
369}
370
371STATIC int
372xfs_inobt_recs_inorder(
373	struct xfs_btree_cur	*cur,
374	union xfs_btree_rec	*r1,
375	union xfs_btree_rec	*r2)
376{
377	return be32_to_cpu(r1->inobt.ir_startino) + XFS_INODES_PER_CHUNK <=
378		be32_to_cpu(r2->inobt.ir_startino);
379}
380
381static const struct xfs_btree_ops xfs_inobt_ops = {
382	.rec_len		= sizeof(xfs_inobt_rec_t),
383	.key_len		= sizeof(xfs_inobt_key_t),
384
385	.dup_cursor		= xfs_inobt_dup_cursor,
386	.set_root		= xfs_inobt_set_root,
387	.alloc_block		= xfs_inobt_alloc_block,
388	.free_block		= xfs_inobt_free_block,
389	.get_minrecs		= xfs_inobt_get_minrecs,
390	.get_maxrecs		= xfs_inobt_get_maxrecs,
391	.init_key_from_rec	= xfs_inobt_init_key_from_rec,
392	.init_high_key_from_rec	= xfs_inobt_init_high_key_from_rec,
393	.init_rec_from_cur	= xfs_inobt_init_rec_from_cur,
394	.init_ptr_from_cur	= xfs_inobt_init_ptr_from_cur,
395	.key_diff		= xfs_inobt_key_diff,
396	.buf_ops		= &xfs_inobt_buf_ops,
397	.diff_two_keys		= xfs_inobt_diff_two_keys,
398	.keys_inorder		= xfs_inobt_keys_inorder,
399	.recs_inorder		= xfs_inobt_recs_inorder,
400};
401
402static const struct xfs_btree_ops xfs_finobt_ops = {
403	.rec_len		= sizeof(xfs_inobt_rec_t),
404	.key_len		= sizeof(xfs_inobt_key_t),
405
406	.dup_cursor		= xfs_inobt_dup_cursor,
407	.set_root		= xfs_finobt_set_root,
408	.alloc_block		= xfs_finobt_alloc_block,
409	.free_block		= xfs_finobt_free_block,
410	.get_minrecs		= xfs_inobt_get_minrecs,
411	.get_maxrecs		= xfs_inobt_get_maxrecs,
412	.init_key_from_rec	= xfs_inobt_init_key_from_rec,
413	.init_high_key_from_rec	= xfs_inobt_init_high_key_from_rec,
414	.init_rec_from_cur	= xfs_inobt_init_rec_from_cur,
415	.init_ptr_from_cur	= xfs_finobt_init_ptr_from_cur,
416	.key_diff		= xfs_inobt_key_diff,
417	.buf_ops		= &xfs_finobt_buf_ops,
418	.diff_two_keys		= xfs_inobt_diff_two_keys,
419	.keys_inorder		= xfs_inobt_keys_inorder,
420	.recs_inorder		= xfs_inobt_recs_inorder,
421};
422
423/*
424 * Initialize a new inode btree cursor.
425 */
426static struct xfs_btree_cur *
427xfs_inobt_init_common(
428	struct xfs_mount	*mp,		/* file system mount point */
429	struct xfs_trans	*tp,		/* transaction pointer */
430	xfs_agnumber_t		agno,		/* allocation group number */
431	xfs_btnum_t		btnum)		/* ialloc or free ino btree */
432{
433	struct xfs_btree_cur	*cur;
434
435	cur = kmem_cache_zalloc(xfs_btree_cur_zone, GFP_NOFS | __GFP_NOFAIL);
436	cur->bc_tp = tp;
437	cur->bc_mp = mp;
438	cur->bc_btnum = btnum;
439	if (btnum == XFS_BTNUM_INO) {
440		cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_ibt_2);
441		cur->bc_ops = &xfs_inobt_ops;
442	} else {
443		cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_fibt_2);
444		cur->bc_ops = &xfs_finobt_ops;
445	}
446
447	cur->bc_blocklog = mp->m_sb.sb_blocklog;
448
449	if (xfs_sb_version_hascrc(&mp->m_sb))
450		cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
451
452	cur->bc_ag.agno = agno;
453	return cur;
454}
455
456/* Create an inode btree cursor. */
457struct xfs_btree_cur *
458xfs_inobt_init_cursor(
459	struct xfs_mount	*mp,
460	struct xfs_trans	*tp,
461	struct xfs_buf		*agbp,
462	xfs_agnumber_t		agno,
463	xfs_btnum_t		btnum)
464{
465	struct xfs_btree_cur	*cur;
466	struct xfs_agi		*agi = agbp->b_addr;
467
468	cur = xfs_inobt_init_common(mp, tp, agno, btnum);
469	if (btnum == XFS_BTNUM_INO)
470		cur->bc_nlevels = be32_to_cpu(agi->agi_level);
471	else
472		cur->bc_nlevels = be32_to_cpu(agi->agi_free_level);
473	cur->bc_ag.agbp = agbp;
474	return cur;
475}
476
477/* Create an inode btree cursor with a fake root for staging. */
478struct xfs_btree_cur *
479xfs_inobt_stage_cursor(
480	struct xfs_mount	*mp,
481	struct xbtree_afakeroot	*afake,
482	xfs_agnumber_t		agno,
483	xfs_btnum_t		btnum)
484{
485	struct xfs_btree_cur	*cur;
486
487	cur = xfs_inobt_init_common(mp, NULL, agno, btnum);
488	xfs_btree_stage_afakeroot(cur, afake);
489	return cur;
490}
491
492/*
493 * Install a new inobt btree root.  Caller is responsible for invalidating
494 * and freeing the old btree blocks.
495 */
496void
497xfs_inobt_commit_staged_btree(
498	struct xfs_btree_cur	*cur,
499	struct xfs_trans	*tp,
500	struct xfs_buf		*agbp)
501{
502	struct xfs_agi		*agi = agbp->b_addr;
503	struct xbtree_afakeroot	*afake = cur->bc_ag.afake;
504	int			fields;
505
506	ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
507
508	if (cur->bc_btnum == XFS_BTNUM_INO) {
509		fields = XFS_AGI_ROOT | XFS_AGI_LEVEL;
510		agi->agi_root = cpu_to_be32(afake->af_root);
511		agi->agi_level = cpu_to_be32(afake->af_levels);
512		if (xfs_sb_version_hasinobtcounts(&cur->bc_mp->m_sb)) {
513			agi->agi_iblocks = cpu_to_be32(afake->af_blocks);
514			fields |= XFS_AGI_IBLOCKS;
515		}
516		xfs_ialloc_log_agi(tp, agbp, fields);
517		xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_inobt_ops);
518	} else {
519		fields = XFS_AGI_FREE_ROOT | XFS_AGI_FREE_LEVEL;
520		agi->agi_free_root = cpu_to_be32(afake->af_root);
521		agi->agi_free_level = cpu_to_be32(afake->af_levels);
522		if (xfs_sb_version_hasinobtcounts(&cur->bc_mp->m_sb)) {
523			agi->agi_fblocks = cpu_to_be32(afake->af_blocks);
524			fields |= XFS_AGI_IBLOCKS;
525		}
526		xfs_ialloc_log_agi(tp, agbp, fields);
527		xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_finobt_ops);
528	}
529}
530
531/*
532 * Calculate number of records in an inobt btree block.
533 */
534int
535xfs_inobt_maxrecs(
536	struct xfs_mount	*mp,
537	int			blocklen,
538	int			leaf)
539{
540	blocklen -= XFS_INOBT_BLOCK_LEN(mp);
541
542	if (leaf)
543		return blocklen / sizeof(xfs_inobt_rec_t);
544	return blocklen / (sizeof(xfs_inobt_key_t) + sizeof(xfs_inobt_ptr_t));
545}
546
547/*
548 * Convert the inode record holemask to an inode allocation bitmap. The inode
549 * allocation bitmap is inode granularity and specifies whether an inode is
550 * physically allocated on disk (not whether the inode is considered allocated
551 * or free by the fs).
552 *
553 * A bit value of 1 means the inode is allocated, a value of 0 means it is free.
554 */
555uint64_t
556xfs_inobt_irec_to_allocmask(
557	struct xfs_inobt_rec_incore	*rec)
558{
559	uint64_t			bitmap = 0;
560	uint64_t			inodespbit;
561	int				nextbit;
562	uint				allocbitmap;
563
564	/*
565	 * The holemask has 16-bits for a 64 inode record. Therefore each
566	 * holemask bit represents multiple inodes. Create a mask of bits to set
567	 * in the allocmask for each holemask bit.
568	 */
569	inodespbit = (1 << XFS_INODES_PER_HOLEMASK_BIT) - 1;
570
571	/*
572	 * Allocated inodes are represented by 0 bits in holemask. Invert the 0
573	 * bits to 1 and convert to a uint so we can use xfs_next_bit(). Mask
574	 * anything beyond the 16 holemask bits since this casts to a larger
575	 * type.
576	 */
577	allocbitmap = ~rec->ir_holemask & ((1 << XFS_INOBT_HOLEMASK_BITS) - 1);
578
579	/*
580	 * allocbitmap is the inverted holemask so every set bit represents
581	 * allocated inodes. To expand from 16-bit holemask granularity to
582	 * 64-bit (e.g., bit-per-inode), set inodespbit bits in the target
583	 * bitmap for every holemask bit.
584	 */
585	nextbit = xfs_next_bit(&allocbitmap, 1, 0);
586	while (nextbit != -1) {
587		ASSERT(nextbit < (sizeof(rec->ir_holemask) * NBBY));
588
589		bitmap |= (inodespbit <<
590			   (nextbit * XFS_INODES_PER_HOLEMASK_BIT));
591
592		nextbit = xfs_next_bit(&allocbitmap, 1, nextbit + 1);
593	}
594
595	return bitmap;
596}
597
598#if defined(DEBUG) || defined(XFS_WARN)
599/*
600 * Verify that an in-core inode record has a valid inode count.
601 */
602int
603xfs_inobt_rec_check_count(
604	struct xfs_mount		*mp,
605	struct xfs_inobt_rec_incore	*rec)
606{
607	int				inocount = 0;
608	int				nextbit = 0;
609	uint64_t			allocbmap;
610	int				wordsz;
611
612	wordsz = sizeof(allocbmap) / sizeof(unsigned int);
613	allocbmap = xfs_inobt_irec_to_allocmask(rec);
614
615	nextbit = xfs_next_bit((uint *) &allocbmap, wordsz, nextbit);
616	while (nextbit != -1) {
617		inocount++;
618		nextbit = xfs_next_bit((uint *) &allocbmap, wordsz,
619				       nextbit + 1);
620	}
621
622	if (inocount != rec->ir_count)
623		return -EFSCORRUPTED;
624
625	return 0;
626}
627#endif	/* DEBUG */
628
629static xfs_extlen_t
630xfs_inobt_max_size(
631	struct xfs_mount	*mp,
632	xfs_agnumber_t		agno)
633{
634	xfs_agblock_t		agblocks = xfs_ag_block_count(mp, agno);
635
636	/* Bail out if we're uninitialized, which can happen in mkfs. */
637	if (M_IGEO(mp)->inobt_mxr[0] == 0)
638		return 0;
639
640	/*
641	 * The log is permanently allocated, so the space it occupies will
642	 * never be available for the kinds of things that would require btree
643	 * expansion.  We therefore can pretend the space isn't there.
644	 */
645	if (mp->m_sb.sb_logstart &&
646	    XFS_FSB_TO_AGNO(mp, mp->m_sb.sb_logstart) == agno)
647		agblocks -= mp->m_sb.sb_logblocks;
648
649	return xfs_btree_calc_size(M_IGEO(mp)->inobt_mnr,
650				(uint64_t)agblocks * mp->m_sb.sb_inopblock /
651					XFS_INODES_PER_CHUNK);
652}
653
654/* Read AGI and create inobt cursor. */
655int
656xfs_inobt_cur(
657	struct xfs_mount	*mp,
658	struct xfs_trans	*tp,
659	xfs_agnumber_t		agno,
660	xfs_btnum_t		which,
661	struct xfs_btree_cur	**curpp,
662	struct xfs_buf		**agi_bpp)
663{
664	struct xfs_btree_cur	*cur;
665	int			error;
666
667	ASSERT(*agi_bpp == NULL);
668	ASSERT(*curpp == NULL);
669
670	error = xfs_ialloc_read_agi(mp, tp, agno, agi_bpp);
671	if (error)
672		return error;
673
674	cur = xfs_inobt_init_cursor(mp, tp, *agi_bpp, agno, which);
675	if (!cur) {
676		xfs_trans_brelse(tp, *agi_bpp);
677		*agi_bpp = NULL;
678		return -ENOMEM;
679	}
680	*curpp = cur;
681	return 0;
682}
683
684static int
685xfs_inobt_count_blocks(
686	struct xfs_mount	*mp,
687	struct xfs_trans	*tp,
688	xfs_agnumber_t		agno,
689	xfs_btnum_t		btnum,
690	xfs_extlen_t		*tree_blocks)
691{
692	struct xfs_buf		*agbp = NULL;
693	struct xfs_btree_cur	*cur = NULL;
694	int			error;
695
696	error = xfs_inobt_cur(mp, tp, agno, btnum, &cur, &agbp);
697	if (error)
698		return error;
699
700	error = xfs_btree_count_blocks(cur, tree_blocks);
701	xfs_btree_del_cursor(cur, error);
702	xfs_trans_brelse(tp, agbp);
703
704	return error;
705}
706
707/* Read finobt block count from AGI header. */
708static int
709xfs_finobt_read_blocks(
710	struct xfs_mount	*mp,
711	struct xfs_trans	*tp,
712	xfs_agnumber_t		agno,
713	xfs_extlen_t		*tree_blocks)
714{
715	struct xfs_buf		*agbp;
716	struct xfs_agi		*agi;
717	int			error;
718
719	error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
720	if (error)
721		return error;
722
723	agi = agbp->b_addr;
724	*tree_blocks = be32_to_cpu(agi->agi_fblocks);
725	xfs_trans_brelse(tp, agbp);
726	return 0;
727}
728
729/*
730 * Figure out how many blocks to reserve and how many are used by this btree.
731 */
732int
733xfs_finobt_calc_reserves(
734	struct xfs_mount	*mp,
735	struct xfs_trans	*tp,
736	xfs_agnumber_t		agno,
737	xfs_extlen_t		*ask,
738	xfs_extlen_t		*used)
739{
740	xfs_extlen_t		tree_len = 0;
741	int			error;
742
743	if (!xfs_sb_version_hasfinobt(&mp->m_sb))
744		return 0;
745
746	if (xfs_sb_version_hasinobtcounts(&mp->m_sb))
747		error = xfs_finobt_read_blocks(mp, tp, agno, &tree_len);
748	else
749		error = xfs_inobt_count_blocks(mp, tp, agno, XFS_BTNUM_FINO,
750				&tree_len);
751	if (error)
752		return error;
753
754	*ask += xfs_inobt_max_size(mp, agno);
755	*used += tree_len;
756	return 0;
757}
758
759/* Calculate the inobt btree size for some records. */
760xfs_extlen_t
761xfs_iallocbt_calc_size(
762	struct xfs_mount	*mp,
763	unsigned long long	len)
764{
765	return xfs_btree_calc_size(M_IGEO(mp)->inobt_mnr, len);
766}
767