Lines Matching refs:args
317 xfs_alloc_arg_t *args, /* allocation argument structure */
330 busy = xfs_extent_busy_trim(args, &bno, &len, busy_gen);
336 if (bno < args->min_agbno && bno + len > args->min_agbno) {
337 diff = args->min_agbno - bno;
344 if (args->alignment > 1 && len >= args->minlen) {
345 xfs_agblock_t aligned_bno = roundup(bno, args->alignment);
436 xfs_alloc_arg_t *args) /* allocation argument structure */
441 ASSERT(args->mod < args->prod);
442 rlen = args->len;
443 ASSERT(rlen >= args->minlen);
444 ASSERT(rlen <= args->maxlen);
445 if (args->prod <= 1 || rlen < args->mod || rlen == args->maxlen ||
446 (args->mod == 0 && rlen < args->prod))
448 k = rlen % args->prod;
449 if (k == args->mod)
451 if (k > args->mod)
452 rlen = rlen - (k - args->mod);
454 rlen = rlen - args->prod + (args->mod - k);
456 if ((int)rlen < (int)args->minlen)
458 ASSERT(rlen >= args->minlen && rlen <= args->maxlen);
459 ASSERT(rlen % args->prod == args->mod);
460 ASSERT(args->pag->pagf_freeblks + args->pag->pagf_flcount >=
461 rlen + args->minleft);
462 args->len = rlen;
812 struct xfs_alloc_arg *args,
818 acur->cur_len = args->maxlen;
833 acur->cnt = xfs_allocbt_init_cursor(args->mp, args->tp,
834 args->agbp, args->pag, XFS_BTNUM_CNT);
835 error = xfs_alloc_lookup_ge(acur->cnt, 0, args->maxlen, &i);
843 acur->bnolt = xfs_allocbt_init_cursor(args->mp, args->tp,
844 args->agbp, args->pag, XFS_BTNUM_BNO);
846 acur->bnogt = xfs_allocbt_init_cursor(args->mp, args->tp,
847 args->agbp, args->pag, XFS_BTNUM_BNO);
878 struct xfs_alloc_arg *args,
896 if (XFS_IS_CORRUPT(args->mp, i != 1))
903 if (len < args->minlen) {
908 busy = xfs_alloc_compute_aligned(args, bno, len, &bnoa, &lena,
914 if (bnoa < args->min_agbno || bnoa > args->max_agbno) {
918 if (lena < args->minlen)
921 args->len = XFS_EXTLEN_MIN(lena, args->maxlen);
922 xfs_alloc_fix_len(args);
923 ASSERT(args->len >= args->minlen);
924 if (args->len < acur->len)
931 diff = xfs_alloc_compute_diff(args->agbno, args->len,
932 args->alignment, args->datatype,
945 ASSERT(args->len > acur->len ||
946 (args->len == acur->len && diff <= acur->diff));
950 acur->len = args->len;
959 if (acur->diff == 0 && acur->len == args->maxlen)
964 trace_xfs_alloc_cur_check(args->mp, cur->bc_btnum, bno, len, diff,
971 * trees and update the args structure.
975 struct xfs_alloc_arg *args,
978 struct xfs_agf __maybe_unused *agf = args->agbp->b_addr;
991 args->agbno = acur->bno;
992 args->len = acur->len;
993 args->wasfromfl = 0;
995 trace_xfs_alloc_cur(args);
1005 struct xfs_alloc_arg *args,
1019 error = xfs_alloc_lookup_ge(cur, args->agbno, cur_len, &i);
1029 error = xfs_alloc_cur_check(args, acur, cur, &i);
1042 if (bno > args->agbno) {
1047 error = xfs_alloc_cur_check(args, acur, cur,
1076 struct xfs_alloc_arg *args, /* allocation argument structure */
1082 struct xfs_agf *agf = args->agbp->b_addr;
1102 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1109 if (args->minlen != 1 || args->alignment != 1 ||
1110 args->resv == XFS_AG_RESV_AGFL ||
1111 be32_to_cpu(agf->agf_flcount) <= args->minleft)
1114 error = xfs_alloc_get_freelist(args->pag, args->tp, args->agbp,
1121 xfs_extent_busy_reuse(args->mp, args->pag, fbno, 1,
1122 (args->datatype & XFS_ALLOC_NOBUSY));
1124 if (args->datatype & XFS_ALLOC_USERDATA) {
1127 error = xfs_trans_get_buf(args->tp, args->mp->m_ddev_targp,
1128 XFS_AGB_TO_DADDR(args->mp, args->agno, fbno),
1129 args->mp->m_bsize, 0, &bp);
1132 xfs_trans_binval(args->tp, bp);
1134 *fbnop = args->agbno = fbno;
1135 *flenp = args->len = 1;
1136 if (XFS_IS_CORRUPT(args->mp, fbno >= be32_to_cpu(agf->agf_length))) {
1140 args->wasfromfl = 1;
1141 trace_xfs_alloc_small_freelist(args);
1147 error = xfs_rmap_free(args->tp, args->agbp, args->pag, fbno, 1,
1159 if (flen < args->minlen) {
1160 args->agbno = NULLAGBLOCK;
1161 trace_xfs_alloc_small_notenough(args);
1167 trace_xfs_alloc_small_done(args);
1171 trace_xfs_alloc_small_error(args);
1183 xfs_alloc_arg_t *args) /* allocation argument structure */
1185 struct xfs_agf __maybe_unused *agf = args->agbp->b_addr;
1197 ASSERT(args->alignment == 1);
1202 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1203 args->pag, XFS_BTNUM_BNO);
1210 error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i);
1222 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1226 ASSERT(fbno <= args->agbno);
1233 xfs_extent_busy_trim(args, &tbno, &tlen, &busy_gen);
1239 if (tbno > args->agbno)
1241 if (tlen < args->minlen)
1244 if (tend < args->agbno + args->minlen)
1253 args->len = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen)
1254 - args->agbno;
1255 xfs_alloc_fix_len(args);
1256 ASSERT(args->agbno + args->len <= tend);
1259 * We are allocating agbno for args->len
1262 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1263 args->pag, XFS_BTNUM_CNT);
1264 ASSERT(args->agbno + args->len <= be32_to_cpu(agf->agf_length));
1265 error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno,
1266 args->len, XFSA_FIXUP_BNO_OK);
1275 args->wasfromfl = 0;
1276 trace_xfs_alloc_exact_done(args);
1282 args->agbno = NULLAGBLOCK;
1283 trace_xfs_alloc_exact_notfound(args);
1288 trace_xfs_alloc_exact_error(args);
1298 struct xfs_alloc_arg *args,
1317 error = xfs_alloc_cur_check(args, acur, cur, &i);
1350 struct xfs_alloc_arg *args,
1363 error = xfs_alloc_lookup_ge(acur->cnt, args->agbno, acur->cur_len, &i);
1366 error = xfs_alloc_lookup_le(acur->bnolt, args->agbno, 0, &i);
1369 error = xfs_alloc_lookup_ge(acur->bnogt, args->agbno, 0, &i);
1399 trace_xfs_alloc_cur_lookup(args);
1405 error = xfs_alloc_walk_iter(args, acur, acur->bnolt, false,
1410 trace_xfs_alloc_cur_left(args);
1415 error = xfs_alloc_walk_iter(args, acur, acur->bnogt, true, true,
1420 trace_xfs_alloc_cur_right(args);
1430 error = xfs_alloc_cntbt_iter(args, acur);
1434 trace_xfs_alloc_cur_lookup_done(args);
1460 error = xfs_alloc_walk_iter(args, acur, fbcur, fbinc, true, -1,
1475 struct xfs_alloc_arg *args,
1496 if (*len || args->alignment > 1) {
1502 if (XFS_IS_CORRUPT(args->mp, i != 1))
1504 if (*len >= args->minlen)
1510 ASSERT(*len >= args->minlen);
1515 error = xfs_alloc_walk_iter(args, acur, acur->cnt, true, false, -1, &i);
1526 trace_xfs_alloc_near_first(args);
1539 struct xfs_alloc_arg *args,
1549 if (!args->min_agbno && !args->max_agbno)
1550 args->max_agbno = args->mp->m_sb.sb_agblocks - 1;
1551 ASSERT(args->min_agbno <= args->max_agbno);
1554 if (args->agbno < args->min_agbno)
1555 args->agbno = args->min_agbno;
1556 if (args->agbno > args->max_agbno)
1557 args->agbno = args->max_agbno;
1569 error = xfs_alloc_cur_setup(args, &acur);
1571 error = xfs_alloc_ag_vextent_small(args, acur.cnt, &bno,
1576 trace_xfs_alloc_near_noentry(args);
1595 error = xfs_alloc_ag_vextent_lastblock(args, &acur, &bno, &len,
1607 error = xfs_alloc_ag_vextent_locality(args, &acur, &i);
1623 trace_xfs_alloc_near_busy(args);
1624 error = xfs_extent_busy_flush(args->tp, args->pag,
1632 trace_xfs_alloc_size_neither(args);
1633 args->agbno = NULLAGBLOCK;
1639 error = xfs_alloc_cur_finish(args, &acur);
1654 struct xfs_alloc_arg *args,
1657 struct xfs_agf *agf = args->agbp->b_addr;
1675 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1676 args->pag, XFS_BTNUM_CNT);
1683 args->maxlen + args->alignment - 1, &i)))
1694 error = xfs_alloc_ag_vextent_small(args, cnt_cur,
1700 trace_xfs_alloc_size_noentry(args);
1704 busy = xfs_alloc_compute_aligned(args, fbno, flen, &rbno,
1714 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1719 busy = xfs_alloc_compute_aligned(args, fbno, flen,
1722 if (rlen >= args->maxlen)
1738 trace_xfs_alloc_size_busy(args);
1739 error = xfs_extent_busy_flush(args->tp, args->pag,
1756 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1757 if (XFS_IS_CORRUPT(args->mp,
1764 if (rlen < args->maxlen) {
1782 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1788 busy = xfs_alloc_compute_aligned(args, fbno, flen,
1790 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1791 if (XFS_IS_CORRUPT(args->mp,
1803 if (rlen == args->maxlen)
1810 if (XFS_IS_CORRUPT(args->mp, i != 1)) {
1819 args->wasfromfl = 0;
1823 args->len = rlen;
1824 if (rlen < args->minlen) {
1833 trace_xfs_alloc_size_busy(args);
1834 error = xfs_extent_busy_flush(args->tp, args->pag,
1845 xfs_alloc_fix_len(args);
1847 rlen = args->len;
1848 if (XFS_IS_CORRUPT(args->mp, rlen > flen)) {
1855 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1856 args->pag, XFS_BTNUM_BNO);
1863 args->len = rlen;
1864 args->agbno = rbno;
1865 if (XFS_IS_CORRUPT(args->mp,
1866 args->agbno + args->len >
1871 trace_xfs_alloc_size_done(args);
1875 trace_xfs_alloc_size_error(args);
1884 trace_xfs_alloc_size_nominleft(args);
1885 args->agbno = NULLAGBLOCK;
2321 struct xfs_alloc_arg *args,
2325 struct xfs_perag *pag = args->pag;
2334 reservation = xfs_ag_resv_needed(pag, args->resv);
2337 alloc_len = args->minlen + (args->alignment - 1) + args->minalignslop;
2349 reservation - min_free - args->minleft);
2350 if (available < (int)max(args->total, alloc_len))
2357 if (available < (int)args->maxlen && !(flags & XFS_ALLOC_FLAG_CHECK)) {
2358 args->maxlen = available;
2359 ASSERT(args->maxlen > 0);
2360 ASSERT(args->maxlen >= args->minlen);
2587 * args->minlen.
2591 struct xfs_alloc_arg *args,
2600 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, agbp,
2601 args->pag, XFS_BTNUM_CNT);
2602 error = xfs_alloc_lookup_ge(cnt_cur, 0, args->minlen, stat);
2615 if (*stat == 1 && flen != args->minlen)
2631 struct xfs_alloc_arg *args, /* allocation argument structure */
2634 struct xfs_mount *mp = args->mp;
2635 struct xfs_perag *pag = args->pag;
2636 struct xfs_trans *tp = args->tp;
2663 (args->datatype & XFS_ALLOC_USERDATA) &&
2670 if (!xfs_alloc_space_available(args, need, alloc_flags |
2694 if (!xfs_alloc_space_available(args, need, alloc_flags))
2698 if (args->alloc_minlen_only) {
2701 error = xfs_exact_minlen_extent_available(args, agbp, &stat);
2743 error = xfs_defer_agfl_block(tp, args->agno, bno, &targs.oinfo);
2751 targs.agno = args->agno;
2802 args->agbp = agbp;
2811 args->agbp = NULL;
3267 * callers to set up correctly, as well as bounds check the allocation args
3272 struct xfs_alloc_arg *args,
3276 struct xfs_mount *mp = args->mp;
3279 args->fsbno = NULLFSBLOCK;
3282 if (args->tp->t_highest_agno != NULLAGNUMBER)
3283 *minimum_agno = args->tp->t_highest_agno;
3291 if (args->maxlen > agsize)
3292 args->maxlen = agsize;
3293 if (args->alignment == 0)
3294 args->alignment = 1;
3296 ASSERT(args->minlen > 0);
3297 ASSERT(args->maxlen > 0);
3298 ASSERT(args->alignment > 0);
3299 ASSERT(args->resv != XFS_AG_RESV_AGFL);
3303 ASSERT(args->minlen <= args->maxlen);
3304 ASSERT(args->minlen <= agsize);
3305 ASSERT(args->mod < args->prod);
3309 args->minlen > args->maxlen || args->minlen > agsize ||
3310 args->mod >= args->prod) {
3311 trace_xfs_alloc_vextent_badargs(args);
3315 if (args->agno != NULLAGNUMBER && *minimum_agno > args->agno) {
3316 trace_xfs_alloc_vextent_skip_deadlock(args);
3332 struct xfs_alloc_arg *args,
3335 bool need_pag = !args->pag;
3339 args->pag = xfs_perag_get(args->mp, args->agno);
3341 args->agbp = NULL;
3342 error = xfs_alloc_fix_freelist(args, alloc_flags);
3344 trace_xfs_alloc_vextent_nofix(args);
3346 xfs_perag_put(args->pag);
3347 args->agbno = NULLAGBLOCK;
3350 if (!args->agbp) {
3352 trace_xfs_alloc_vextent_noagbp(args);
3353 args->agbno = NULLAGBLOCK;
3356 args->wasfromfl = 0;
3369 struct xfs_alloc_arg *args,
3374 struct xfs_mount *mp = args->mp;
3393 if (args->agbp &&
3394 (args->tp->t_highest_agno == NULLAGNUMBER ||
3395 args->agno > minimum_agno))
3396 args->tp->t_highest_agno = args->agno;
3404 if (alloc_error || args->agbno == NULLAGBLOCK) {
3405 args->fsbno = NULLFSBLOCK;
3410 args->fsbno = XFS_AGB_TO_FSB(mp, args->agno, args->agbno);
3412 ASSERT(args->len >= args->minlen);
3413 ASSERT(args->len <= args->maxlen);
3414 ASSERT(args->agbno % args->alignment == 0);
3415 XFS_AG_CHECK_DADDR(mp, XFS_FSB_TO_DADDR(mp, args->fsbno), args->len);
3418 if (!xfs_rmap_should_skip_owner_update(&args->oinfo)) {
3419 error = xfs_rmap_alloc(args->tp, args->agbp, args->pag,
3420 args->agbno, args->len, &args->oinfo);
3425 if (!args->wasfromfl) {
3426 error = xfs_alloc_update_counters(args->tp, args->agbp,
3427 -((long)(args->len)));
3431 ASSERT(!xfs_extent_busy_search(mp, args->pag, args->agbno,
3432 args->len));
3435 xfs_ag_resv_alloc_extent(args->pag, args->resv, args);
3438 XFS_STATS_ADD(mp, xs_allocb, args->len);
3440 trace_xfs_alloc_vextent_finish(args);
3443 if (drop_perag && args->pag) {
3444 xfs_perag_rele(args->pag);
3445 args->pag = NULL;
3455 * Caller is expected to hold a perag reference in args->pag.
3459 struct xfs_alloc_arg *args,
3462 struct xfs_mount *mp = args->mp;
3467 ASSERT(args->pag != NULL);
3468 ASSERT(args->pag->pag_agno == agno);
3470 args->agno = agno;
3471 args->agbno = 0;
3473 trace_xfs_alloc_vextent_this_ag(args);
3475 error = xfs_alloc_vextent_check_args(args, XFS_AGB_TO_FSB(mp, agno, 0),
3483 error = xfs_alloc_vextent_prepare_ag(args, alloc_flags);
3484 if (!error && args->agbp)
3485 error = xfs_alloc_ag_vextent_size(args, alloc_flags);
3487 return xfs_alloc_vextent_finish(args, minimum_agno, error, false);
3499 * On return, args->pag may be left referenced if we finish before the "all
3510 struct xfs_alloc_arg *args,
3516 struct xfs_mount *mp = args->mp;
3525 mp->m_sb.sb_agcount, agno, args->pag) {
3526 args->agno = agno;
3527 error = xfs_alloc_vextent_prepare_ag(args, alloc_flags);
3530 if (!args->agbp) {
3531 trace_xfs_alloc_vextent_loopfailed(args);
3539 if (args->agno == start_agno && target_agbno) {
3540 args->agbno = target_agbno;
3541 error = xfs_alloc_ag_vextent_near(args, alloc_flags);
3543 args->agbno = 0;
3544 error = xfs_alloc_ag_vextent_size(args, alloc_flags);
3549 xfs_perag_rele(args->pag);
3550 args->pag = NULL;
3553 if (args->agbp)
3567 ASSERT(args->pag == NULL);
3568 trace_xfs_alloc_vextent_allfailed(args);
3582 struct xfs_alloc_arg *args,
3585 struct xfs_mount *mp = args->mp;
3593 ASSERT(args->pag == NULL);
3595 args->agno = NULLAGNUMBER;
3596 args->agbno = NULLAGBLOCK;
3598 trace_xfs_alloc_vextent_start_ag(args);
3600 error = xfs_alloc_vextent_check_args(args, target, &minimum_agno);
3607 if ((args->datatype & XFS_ALLOC_INITIAL_USER_DATA) &&
3616 error = xfs_alloc_vextent_iterate_ags(args, minimum_agno, start_agno,
3620 if (args->agno == start_agno)
3624 mp->m_agfrotor = (args->agno * rotorstep + 1) %
3628 return xfs_alloc_vextent_finish(args, minimum_agno, error, true);
3638 struct xfs_alloc_arg *args,
3641 struct xfs_mount *mp = args->mp;
3647 ASSERT(args->pag == NULL);
3649 args->agno = NULLAGNUMBER;
3650 args->agbno = NULLAGBLOCK;
3652 trace_xfs_alloc_vextent_first_ag(args);
3654 error = xfs_alloc_vextent_check_args(args, target, &minimum_agno);
3662 error = xfs_alloc_vextent_iterate_ags(args, minimum_agno, start_agno,
3664 return xfs_alloc_vextent_finish(args, minimum_agno, error, true);
3669 * perag reference in args->pag.
3673 struct xfs_alloc_arg *args,
3676 struct xfs_mount *mp = args->mp;
3680 ASSERT(args->pag != NULL);
3681 ASSERT(args->pag->pag_agno == XFS_FSB_TO_AGNO(mp, target));
3683 args->agno = XFS_FSB_TO_AGNO(mp, target);
3684 args->agbno = XFS_FSB_TO_AGBNO(mp, target);
3686 trace_xfs_alloc_vextent_exact_bno(args);
3688 error = xfs_alloc_vextent_check_args(args, target, &minimum_agno);
3695 error = xfs_alloc_vextent_prepare_ag(args, 0);
3696 if (!error && args->agbp)
3697 error = xfs_alloc_ag_vextent_exact(args);
3699 return xfs_alloc_vextent_finish(args, minimum_agno, error, false);
3706 * Caller may or may not have a per-ag reference in args->pag.
3710 struct xfs_alloc_arg *args,
3713 struct xfs_mount *mp = args->mp;
3715 bool needs_perag = args->pag == NULL;
3720 ASSERT(args->pag->pag_agno == XFS_FSB_TO_AGNO(mp, target));
3722 args->agno = XFS_FSB_TO_AGNO(mp, target);
3723 args->agbno = XFS_FSB_TO_AGBNO(mp, target);
3725 trace_xfs_alloc_vextent_near_bno(args);
3727 error = xfs_alloc_vextent_check_args(args, target, &minimum_agno);
3735 args->pag = xfs_perag_grab(mp, args->agno);
3737 error = xfs_alloc_vextent_prepare_ag(args, alloc_flags);
3738 if (!error && args->agbp)
3739 error = xfs_alloc_ag_vextent_near(args, alloc_flags);
3741 return xfs_alloc_vextent_finish(args, minimum_agno, error, needs_perag);
3751 struct xfs_alloc_arg args;
3754 memset(&args, 0, sizeof(struct xfs_alloc_arg));
3755 args.tp = tp;
3756 args.mp = tp->t_mountp;
3757 args.agno = pag->pag_agno;
3758 args.pag = pag;
3764 if (args.agno >= args.mp->m_sb.sb_agcount)
3767 error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
3771 *agbp = args.agbp;