Lines Matching defs:cache
185 * to cache the sharedness check result.
609 * adding new delayed refs. To deal with this we need to look in cache
1250 * snapshot field changing while updating or checking the cache.
1269 * Level -1 is used for the data extent, which is not reliable to cache
1271 * realizing. We cache results only for extent buffers that lead from
1278 /* Unused cache entry or being used for some other extent buffer. */
1320 * snapshot field changing while updating or checking the cache.
1340 * Level -1 is used for the data extent, which is not reliable to cache
1342 * realizing. We cache results only for extent buffers that lead from
1358 * If we found an extent buffer is shared, set the cache result for all
1946 * use the path cache which is made for a single path. Multiple
1960 * cache only works for a single path (by far the most common
2000 * If the path cache is disabled, then it means at some tree level we
2003 * extent. We have to invalidate the cache and cache only the sharedness
3001 struct btrfs_backref_cache *cache, int is_reloc)
3005 cache->rb_root = RB_ROOT;
3007 INIT_LIST_HEAD(&cache->pending[i]);
3008 INIT_LIST_HEAD(&cache->changed);
3009 INIT_LIST_HEAD(&cache->detached);
3010 INIT_LIST_HEAD(&cache->leaves);
3011 INIT_LIST_HEAD(&cache->pending_edge);
3012 INIT_LIST_HEAD(&cache->useless_node);
3013 cache->fs_info = fs_info;
3014 cache->is_reloc = is_reloc;
3018 struct btrfs_backref_cache *cache, u64 bytenr, int level)
3031 cache->nr_nodes++;
3039 struct btrfs_backref_cache *cache)
3045 cache->nr_edges++;
3050 * Drop the backref node from cache, also cleaning up all its
3054 * be the lowest node in the cache or a detached node.
3056 void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache,
3072 btrfs_backref_free_edge(cache, edge);
3079 list_add_tail(&upper->lower, &cache->leaves);
3084 btrfs_backref_drop_node(cache, node);
3088 * Release all nodes/edges from current cache
3090 void btrfs_backref_release_cache(struct btrfs_backref_cache *cache)
3095 while (!list_empty(&cache->detached)) {
3096 node = list_entry(cache->detached.next,
3098 btrfs_backref_cleanup_node(cache, node);
3101 while (!list_empty(&cache->leaves)) {
3102 node = list_entry(cache->leaves.next,
3104 btrfs_backref_cleanup_node(cache, node);
3107 cache->last_trans = 0;
3110 ASSERT(list_empty(&cache->pending[i]));
3111 ASSERT(list_empty(&cache->pending_edge));
3112 ASSERT(list_empty(&cache->useless_node));
3113 ASSERT(list_empty(&cache->changed));
3114 ASSERT(list_empty(&cache->detached));
3115 ASSERT(RB_EMPTY_ROOT(&cache->rb_root));
3116 ASSERT(!cache->nr_nodes);
3117 ASSERT(!cache->nr_edges);
3132 static int handle_direct_tree_backref(struct btrfs_backref_cache *cache,
3147 /* Only reloc backref cache cares about a specific root */
3148 if (cache->is_reloc) {
3149 root = find_reloc_root(cache->fs_info, cur->bytenr);
3155 * For generic purpose backref cache, reloc root node
3158 list_add(&cur->list, &cache->useless_node);
3163 edge = btrfs_backref_alloc_edge(cache);
3167 rb_node = rb_simple_search(&cache->rb_root, ref_key->offset);
3170 upper = btrfs_backref_alloc_node(cache, ref_key->offset,
3173 btrfs_backref_free_edge(cache, edge);
3181 list_add_tail(&edge->list[UPPER], &cache->pending_edge);
3206 struct btrfs_backref_cache *cache,
3212 struct btrfs_fs_info *fs_info = cache->fs_info;
3233 * For reloc backref cache, we may ignore reloc root. But for
3234 * general purpose backref cache, we can't rely on
3238 * For general purpose backref cache, reloc root detection is
3240 * bytenr), thus only do such check for reloc cache.
3242 if (btrfs_should_ignore_reloc_root(root) && cache->is_reloc) {
3244 list_add(&cur->list, &cache->useless_node);
3285 cache->is_reloc) {
3287 list_add(&lower->list, &cache->useless_node);
3294 edge = btrfs_backref_alloc_edge(cache);
3302 rb_node = rb_simple_search(&cache->rb_root, eb->start);
3304 upper = btrfs_backref_alloc_node(cache, eb->start,
3308 btrfs_backref_free_edge(cache, edge);
3333 &cache->pending_edge);
3362 * Add backref node @cur into @cache.
3374 struct btrfs_backref_cache *cache,
3417 list_add_tail(&edge->list[UPPER], &cache->pending_edge);
3465 ret = handle_direct_tree_backref(cache, &key, cur);
3474 ret = handle_indirect_tree_backref(trans, cache, path,
3495 int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache,
3498 struct list_head *useless_node = &cache->useless_node;
3505 /* Insert this node to cache if it's not COW-only */
3507 rb_node = rb_simple_insert(&cache->rb_root, start->bytenr,
3510 btrfs_backref_panic(cache->fs_info, start->bytenr,
3512 list_add_tail(&start->lower, &cache->leaves);
3536 btrfs_backref_free_edge(cache, edge);
3546 * been linked to the cache rb tree.
3547 * So if we have upper->rb_node populated, this means a cache
3573 /* Only cache non-COW-only (subvolume trees) tree blocks */
3575 rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr,
3578 btrfs_backref_panic(cache->fs_info,
3596 void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache,
3603 while (!list_empty(&cache->useless_node)) {
3604 lower = list_first_entry(&cache->useless_node,
3608 while (!list_empty(&cache->pending_edge)) {
3609 edge = list_first_entry(&cache->pending_edge,
3615 btrfs_backref_free_edge(cache, edge);
3619 * isn't in the cache, we can free it ourselves.
3623 list_add(&lower->list, &cache->useless_node);
3631 &cache->pending_edge);
3633 list_add(&upper->list, &cache->useless_node);
3636 while (!list_empty(&cache->useless_node)) {
3637 lower = list_first_entry(&cache->useless_node,
3642 btrfs_backref_drop_node(cache, lower);
3645 btrfs_backref_cleanup_node(cache, node);
3646 ASSERT(list_empty(&cache->useless_node) &&
3647 list_empty(&cache->pending_edge));