Lines Matching defs:cache
6 * User extended attribute client side cache functions.
21 * a cache structure attached to NFS inodes. This structure is allocated
22 * when needed, and freed when the cache is zapped.
24 * The cache structure contains as hash table of entries, and a pointer
25 * to a special-cased entry for the listxattr cache.
28 * counting. The cache entries use a similar refcounting scheme.
30 * This makes freeing a cache, both from the shrinker and from the
31 * zap cache path, easy. It also means that, in current use cases,
40 * Two shrinkers deal with the cache entries themselves: one for
45 * The other shrinker frees the cache structures themselves.
64 struct nfs4_xattr_cache *cache;
106 nfs4_xattr_hash_init(struct nfs4_xattr_cache *cache)
111 INIT_HLIST_HEAD(&cache->buckets[i].hlist);
112 spin_lock_init(&cache->buckets[i].lock);
113 cache->buckets[i].cache = cache;
114 cache->buckets[i].draining = false;
125 * Wrapper functions to add a cache entry to the right LRU.
150 * This function allocates cache entries. They are the normal
152 * cache. Those allocations use the same entry so that they can be
155 * xattr cache entries are allocated together with names. If the
163 * @name: Name of the extended attribute. NULL for listxattr cache
165 * @value: Value of attribute, or listxattr cache. NULL if the
270 struct nfs4_xattr_cache *cache;
273 cache = container_of(kref, struct nfs4_xattr_cache, ref);
276 if (WARN_ON(!hlist_empty(&cache->buckets[i].hlist)))
278 cache->buckets[i].draining = false;
281 cache->listxattr = NULL;
283 kmem_cache_free(nfs4_xattr_cache_cachep, cache);
290 struct nfs4_xattr_cache *cache;
292 cache = kmem_cache_alloc(nfs4_xattr_cache_cachep, GFP_KERNEL);
293 if (cache == NULL)
296 kref_init(&cache->ref);
297 atomic_long_set(&cache->nent, 0);
299 return cache;
303 * Set the listxattr cache, which is a special-cased cache entry.
305 * the cache is being drained - this prevents a new listxattr
306 * cache from being added to what is now a stale cache.
309 nfs4_xattr_set_listcache(struct nfs4_xattr_cache *cache,
315 spin_lock(&cache->listxattr_lock);
317 old = cache->listxattr;
324 cache->listxattr = new;
333 spin_unlock(&cache->listxattr_lock);
339 * Unlink a cache from its parent inode, clearing out an invalid
340 * cache. Must be called with i_lock held.
363 * Discard a cache. Called by get_cache() if there was an old,
364 * invalid cache. Can also be called from a shrinker callback.
366 * The cache is dead, it has already been unlinked from its inode,
367 * and no longer appears on the cache LRU list.
375 * any way to 'find' this cache. Then, remove the entries from the hash
378 * At that point, the cache will remain empty and can be freed when the final
384 nfs4_xattr_discard_cache(struct nfs4_xattr_cache *cache)
391 nfs4_xattr_set_listcache(cache, ERR_PTR(-ESTALE));
394 bucket = &cache->buckets[i];
406 atomic_long_set(&cache->nent, 0);
408 kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
412 * Get a referenced copy of the cache structure. Avoid doing allocs
416 * This function only checks the NFS_INO_INVALID_XATTR cache validity bit
417 * and acts accordingly, replacing the cache when needed. For the read case
418 * (!add), this means that the caller must make sure that the cache
420 * revalidate_inode to do this. The attribute cache timeout (for the
429 struct nfs4_xattr_cache *cache, *oldcache, *newcache;
433 cache = oldcache = NULL;
440 cache = nfsi->xattr_cache;
442 if (cache != NULL)
443 kref_get(&cache->ref);
447 if (add && cache == NULL) {
450 cache = nfs4_xattr_alloc_cache();
451 if (cache == NULL)
457 * The cache was invalidated again. Give up,
462 kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
463 cache = NULL;
474 kref_get(&cache->ref);
475 nfsi->xattr_cache = cache;
476 cache->inode = inode;
477 list_lru_add(&nfs4_xattr_cache_lru, &cache->lru);
483 * If there was a race, throw away the cache we just
488 kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
489 cache = newcache;
495 * Discard the now orphaned old cache.
500 return cache;
504 nfs4_xattr_hash_bucket(struct nfs4_xattr_cache *cache, const char *name)
506 return &cache->buckets[jhash(name, strlen(name), 0) &
507 (ARRAY_SIZE(cache->buckets) - 1)];
526 nfs4_xattr_hash_add(struct nfs4_xattr_cache *cache,
533 bucket = nfs4_xattr_hash_bucket(cache, entry->xattr_name);
548 atomic_long_inc(&cache->nent);
564 nfs4_xattr_hash_remove(struct nfs4_xattr_cache *cache, const char *name)
569 bucket = nfs4_xattr_hash_bucket(cache, name);
577 atomic_long_dec(&cache->nent);
587 nfs4_xattr_hash_find(struct nfs4_xattr_cache *cache, const char *name)
592 bucket = nfs4_xattr_hash_bucket(cache, name);
606 * Entry point to retrieve an entry from the cache.
611 struct nfs4_xattr_cache *cache;
615 cache = nfs4_xattr_get_cache(inode, 0);
616 if (cache == NULL)
620 entry = nfs4_xattr_hash_find(cache, name);
623 dprintk("%s: cache hit '%s', len %lu\n", __func__,
636 dprintk("%s: cache miss '%s'\n", __func__, name);
640 kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
646 * Retrieve a cached list of xattrs from the cache.
650 struct nfs4_xattr_cache *cache;
654 cache = nfs4_xattr_get_cache(inode, 0);
655 if (cache == NULL)
658 spin_lock(&cache->listxattr_lock);
660 entry = cache->listxattr;
676 spin_unlock(&cache->listxattr_lock);
678 kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
684 * Add an xattr to the cache.
686 * This also invalidates the xattr list cache.
691 struct nfs4_xattr_cache *cache;
697 cache = nfs4_xattr_get_cache(inode, 1);
698 if (cache == NULL)
705 (void)nfs4_xattr_set_listcache(cache, NULL);
707 if (!nfs4_xattr_hash_add(cache, entry))
711 kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
716 * Remove an xattr from the cache.
718 * This also invalidates the xattr list cache.
722 struct nfs4_xattr_cache *cache;
726 cache = nfs4_xattr_get_cache(inode, 0);
727 if (cache == NULL)
730 (void)nfs4_xattr_set_listcache(cache, NULL);
731 nfs4_xattr_hash_remove(cache, name);
733 kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
742 struct nfs4_xattr_cache *cache;
745 cache = nfs4_xattr_get_cache(inode, 1);
746 if (cache == NULL)
754 * This is just there to be able to get to bucket->cache,
758 entry->bucket = &cache->buckets[0];
760 if (!nfs4_xattr_set_listcache(cache, entry))
764 kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
768 * Zap the entire cache. Called when an inode is evicted.
783 * The entry LRU is shrunk more aggressively than the cache LRU,
828 struct nfs4_xattr_cache *cache = container_of(item,
831 if (atomic_long_read(&cache->nent) > 1)
835 * If a cache structure is on the LRU list, we know that
839 inode = cache->inode;
844 kref_get(&cache->ref);
846 cache->inode = NULL;
849 list_lru_isolate(lru, &cache->lru);
853 list_add_tail(&cache->dispose, dispose);
862 struct nfs4_xattr_cache *cache;
867 cache = list_first_entry(&dispose, struct nfs4_xattr_cache,
869 list_del_init(&cache->dispose);
870 nfs4_xattr_discard_cache(cache);
871 kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
893 struct nfs4_xattr_cache *cache;
898 cache = bucket->cache;
901 * Unhook the entry from its parent (either a cache bucket
902 * or a cache structure if it's a listxattr buf), so that
910 /* Regular cache entry */
917 atomic_long_dec(&cache->nent);
922 /* Listxattr cache entry */
923 if (!spin_trylock(&cache->listxattr_lock))
928 cache->listxattr = NULL;
931 spin_unlock(&cache->listxattr_lock);
984 struct nfs4_xattr_cache *cache = p;
986 spin_lock_init(&cache->listxattr_lock);
987 atomic_long_set(&cache->nent, 0);
988 nfs4_xattr_hash_init(cache);
989 cache->listxattr = NULL;
990 INIT_LIST_HEAD(&cache->lru);
991 INIT_LIST_HEAD(&cache->dispose);