Lines Matching defs:cached
701 struct extent_state *cached;
733 cached = *cached_state;
740 if (cached && extent_state_in_tree(cached) &&
741 cached->start <= start && cached->end > start) {
743 refcount_dec(&cached->refs);
744 state = cached;
748 free_extent_state(cached);
1229 * here for the first iteration. We might have a cached state
1432 struct extent_state **cached)
1435 cached, GFP_NOFS, NULL);
2034 * cached. The total number found is returned.
2158 unsigned bits, int filled, struct extent_state *cached)
2165 if (cached && extent_state_in_tree(cached) && cached->start <= start &&
2166 cached->end > start)
2167 node = &cached->rb_node;
2780 struct extent_state *cached = NULL;
2784 set_extent_uptodate(tree, start, end, &cached, GFP_ATOMIC);
2785 unlock_extent_cached_atomic(tree, start, end, &cached);
3192 struct extent_state *cached = NULL;
3200 &cached, GFP_NOFS);
3202 cur + iosize - 1, &cached);
3284 struct extent_state *cached = NULL;
3292 &cached, GFP_NOFS);
3294 cur + iosize - 1, &cached);
4566 bool cached;
4573 * @len and @flags with cached one.
4574 * And only when we fails to merge, cached one will be submitted as
4585 if (!cache->cached)
4590 * fiemap extent won't overlap with cached one.
4620 /* Not mergeable, need to submit cached one */
4623 cache->cached = false;
4627 cache->cached = true;
4636 cache->cached = false;
4644 * The last fiemap cache may still be cached in the following case:
4649 * In this case, the first extent range will be cached but not emitted.
4657 if (!cache->cached)
4662 cache->cached = false;