Lines Matching defs:cache
391 * Caller must ensure @cache is a RAID56 block group.
393 static u64 get_full_stripe_logical(struct btrfs_block_group *cache, u64 bytenr)
401 WARN_ON_ONCE(cache->full_stripe_len >= U32_MAX);
407 ret = div64_u64(bytenr - cache->start, cache->full_stripe_len) *
408 cache->full_stripe_len + cache->start;
1165 * of the page cache.
2979 struct btrfs_block_group *cache)
3223 spin_lock(&cache->lock);
3224 if (cache->removed) {
3225 spin_unlock(&cache->lock);
3229 spin_unlock(&cache->lock);
3368 struct btrfs_block_group *cache)
3386 spin_lock(&cache->lock);
3387 if (!cache->removed)
3389 spin_unlock(&cache->lock);
3405 chunk_offset, length, cache);
3432 struct btrfs_block_group *cache;
3495 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3499 if (!cache)
3510 spin_lock(&cache->lock);
3511 if (cache->removed) {
3512 spin_unlock(&cache->lock);
3513 btrfs_put_block_group(cache);
3516 btrfs_freeze_block_group(cache);
3517 spin_unlock(&cache->lock);
3559 ret = btrfs_inc_block_group_ro(cache, sctx->is_dev_replace);
3563 !(cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK)) {
3572 * for scrub, as later we would use our own cache
3575 * prevent RMW from screwing up out cache.
3581 cache->start);
3588 btrfs_unfreeze_block_group(cache);
3589 btrfs_put_block_group(cache);
3600 btrfs_wait_nocow_writers(cache);
3601 btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start,
3602 cache->length);
3613 found_key.offset, cache);
3653 btrfs_dec_block_group_ro(cache);
3662 spin_lock(&cache->lock);
3663 if (!cache->removed && !cache->ro && cache->reserved == 0 &&
3664 cache->used == 0) {
3665 spin_unlock(&cache->lock);
3668 cache);
3670 btrfs_mark_bg_unused(cache);
3672 spin_unlock(&cache->lock);
3675 btrfs_unfreeze_block_group(cache);
3676 btrfs_put_block_group(cache);