Lines Matching refs:cur

26 	struct xfs_btree_cur	*cur)
28 return xfs_allocbt_init_cursor(cur->bc_mp, cur->bc_tp,
29 cur->bc_ag.agbp, cur->bc_ag.agno,
30 cur->bc_btnum);
35 struct xfs_btree_cur *cur,
39 struct xfs_buf *agbp = cur->bc_ag.agbp;
41 int btnum = cur->bc_btnum;
50 xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
55 struct xfs_btree_cur *cur,
64 error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_ag.agbp,
74 xfs_extent_busy_reuse(cur->bc_mp, cur->bc_ag.agno, bno, 1, false);
76 xfs_trans_agbtree_delta(cur->bc_tp, 1);
85 struct xfs_btree_cur *cur,
88 struct xfs_buf *agbp = cur->bc_ag.agbp;
93 bno = xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp));
94 error = xfs_alloc_put_freelist(cur->bc_tp, agbp, NULL, bno, 1);
98 xfs_extent_busy_insert(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1,
100 xfs_trans_agbtree_delta(cur->bc_tp, -1);
109 struct xfs_btree_cur *cur,
115 struct xfs_agf *agf = cur->bc_ag.agbp->b_addr;
120 ASSERT(cur->bc_btnum == XFS_BTNUM_CNT);
147 rrp = XFS_ALLOC_REC_ADDR(cur->bc_mp, block, numrecs);
160 pag = cur->bc_ag.agbp->b_pag;
162 xfs_alloc_log_agf(cur->bc_tp, cur->bc_ag.agbp, XFS_AGF_LONGEST);
167 struct xfs_btree_cur *cur,
170 return cur->bc_mp->m_alloc_mnr[level != 0];
175 struct xfs_btree_cur *cur,
178 return cur->bc_mp->m_alloc_mxr[level != 0];
214 struct xfs_btree_cur *cur,
217 rec->alloc.ar_startblock = cpu_to_be32(cur->bc_rec.a.ar_startblock);
218 rec->alloc.ar_blockcount = cpu_to_be32(cur->bc_rec.a.ar_blockcount);
223 struct xfs_btree_cur *cur,
226 struct xfs_agf *agf = cur->bc_ag.agbp->b_addr;
228 ASSERT(cur->bc_ag.agno == be32_to_cpu(agf->agf_seqno));
230 ptr->s = agf->agf_roots[cur->bc_btnum];
235 struct xfs_btree_cur *cur,
238 xfs_alloc_rec_incore_t *rec = &cur->bc_rec.a;
246 struct xfs_btree_cur *cur,
249 xfs_alloc_rec_incore_t *rec = &cur->bc_rec.a;
262 struct xfs_btree_cur *cur,
272 struct xfs_btree_cur *cur,
382 struct xfs_btree_cur *cur,
392 struct xfs_btree_cur *cur,
403 struct xfs_btree_cur *cur,
416 struct xfs_btree_cur *cur,
479 struct xfs_btree_cur *cur;
483 cur = kmem_cache_zalloc(xfs_btree_cur_zone, GFP_NOFS | __GFP_NOFAIL);
485 cur->bc_tp = tp;
486 cur->bc_mp = mp;
487 cur->bc_btnum = btnum;
488 cur->bc_blocklog = mp->m_sb.sb_blocklog;
491 cur->bc_ops = &xfs_cntbt_ops;
492 cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtc_2);
493 cur->bc_flags = XFS_BTREE_LASTREC_UPDATE;
495 cur->bc_ops = &xfs_bnobt_ops;
496 cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtb_2);
499 cur->bc_ag.agno = agno;
500 cur->bc_ag.abt.active = false;
503 cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
505 return cur;
520 struct xfs_btree_cur *cur;
522 cur = xfs_allocbt_init_common(mp, tp, agno, btnum);
524 cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
526 cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
528 cur->bc_ag.agbp = agbp;
530 return cur;
541 struct xfs_btree_cur *cur;
543 cur = xfs_allocbt_init_common(mp, NULL, agno, btnum);
544 xfs_btree_stage_afakeroot(cur, afake);
545 return cur;
554 struct xfs_btree_cur *cur,
559 struct xbtree_afakeroot *afake = cur->bc_ag.afake;
561 ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
563 agf->agf_roots[cur->bc_btnum] = cpu_to_be32(afake->af_root);
564 agf->agf_levels[cur->bc_btnum] = cpu_to_be32(afake->af_levels);
567 if (cur->bc_btnum == XFS_BTNUM_BNO) {
568 xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_bnobt_ops);
570 cur->bc_flags |= XFS_BTREE_LASTREC_UPDATE;
571 xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_cntbt_ops);