Lines Matching refs:locked_ref
1939 struct btrfs_delayed_ref_head *locked_ref)
1950 lockdep_assert_held(&locked_ref->mutex);
1951 lockdep_assert_held(&locked_ref->lock);
1953 while ((ref = select_delayed_ref(locked_ref))) {
1956 spin_unlock(&locked_ref->lock);
1957 unselect_delayed_ref_head(delayed_refs, locked_ref);
1961 rb_erase_cached(&ref->ref_node, &locked_ref->ref_tree);
1972 locked_ref->ref_mod -= ref->ref_mod;
1975 locked_ref->ref_mod += ref->ref_mod;
1986 must_insert_reserved = locked_ref->must_insert_reserved;
1987 locked_ref->must_insert_reserved = false;
1989 extent_op = locked_ref->extent_op;
1990 locked_ref->extent_op = NULL;
1991 spin_unlock(&locked_ref->lock);
1998 unselect_delayed_ref_head(delayed_refs, locked_ref);
2006 spin_lock(&locked_ref->lock);
2007 btrfs_merge_delayed_refs(fs_info, delayed_refs, locked_ref);
2022 struct btrfs_delayed_ref_head *locked_ref = NULL;
2028 if (!locked_ref) {
2029 locked_ref = btrfs_obtain_ref_head(trans);
2030 if (IS_ERR_OR_NULL(locked_ref)) {
2031 if (PTR_ERR(locked_ref) == -EAGAIN) {
2051 spin_lock(&locked_ref->lock);
2052 btrfs_merge_delayed_refs(fs_info, delayed_refs, locked_ref);
2054 ret = btrfs_run_delayed_refs_for_head(trans, locked_ref);
2066 ret = cleanup_ref_head(trans, locked_ref);
2081 locked_ref = NULL;
2083 } while ((nr != -1 && count < nr) || locked_ref);