Lines Matching defs:refs
221 * Return 0 when both refs are for the same block (and can be merged).
274 * Add @newref to the @root rbtree, merging identical refs.
303 /* Identical refs, merge them and free @newref */
360 * delayed refs
373 * on disk refs (inline or keyed)
421 /* direct refs use root == 0, key == NULL */
431 /* indirect refs use parent == 0 */
609 * adding new delayed refs. To deal with this we need to look in cache
720 * We maintain three separate rbtrees: one for direct refs, one for
721 * indirect refs which have a key, and one for indirect refs which do not
725 * indirect refs with missing keys. An appropriate key is located and
726 * the ref is moved onto the tree for indirect refs. After all missing
753 * the tree, allocating new refs for each insertion, and then
830 * We may have inode lists attached to refs in the parents ulist, so we
831 * must free them before freeing the ulist and its refs.
888 * add all currently queued delayed refs from this head whose seq nr is
999 * refs have been checked.
1033 * enumerate all inline refs
1375 * refs) for the given bytenr to the refs list, merges duplicates and resolves
1376 * indirect refs to their parent bytenr.
1446 * lock it so we have a consistent view of the refs at the given
1454 refcount_inc(&head->refs);
1583 * This walks the tree of merged and resolved refs. Tree blocks are
1595 * refs with a node->action of BTRFS_DROP_DELAYED_REF.
1597 * identical refs to keep the overall count correct.
1598 * prelim_ref_insert() will merge only those refs
1599 * which compare identically. Any refs having
1646 ret = ulist_add_merge_ptr(ctx->refs, ref->parent,
1672 * this ref to the ref we added to the 'refs' ulist.
1697 * added to the ulist at @ctx->refs, and that ulist is allocated by this
1702 * Returns 0 on success and < 0 on error. On error @ctx->refs is not allocated.
1708 ASSERT(ctx->refs == NULL);
1710 ctx->refs = ulist_alloc(GFP_NOFS);
1711 if (!ctx->refs)
1717 free_leaf_list(ctx->refs);
1718 ctx->refs = NULL;
1739 * This function requires @ctx->refs to be NULL, as it uses it for allocating a
1752 ASSERT(ctx->refs == NULL);
1754 ctx->refs = ulist_alloc(GFP_NOFS);
1755 if (!ctx->refs)
1761 ulist_free(ctx->refs);
1762 ctx->refs = NULL;
1783 node = ulist_next(ctx->refs, &uiter);
1790 ulist_free(ctx->refs);
1791 ctx->refs = NULL;
1819 ulist_init(&ctx->refs);
1829 ulist_release(&ctx->refs);
1849 * delayed refs, but continues on even when no running transaction exists.
1884 ulist_init(&ctx->refs);
1919 walk_ctx.refs = &ctx->refs;
1925 const unsigned long prev_ref_count = ctx->refs.nnodes;
1944 * the ctx->refs ulist, in which case we have to check multiple
1972 if ((ctx->refs.nnodes - prev_ref_count) > 1)
1978 node = ulist_next(&ctx->refs, &uiter);
2043 ulist_release(&ctx->refs);
2272 * helper function to iterate extent inline refs. ptr must point to a 0 value
2274 * if more refs exist, 0 is returned and the next call to
2414 struct ulist *refs;
2448 refs = ctx->refs;
2449 ctx->refs = NULL;
2452 while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
2511 free_leaf_list(refs);
2958 /* We're still inside the inline refs */