Lines Matching refs:cache

6  * User extended attribute client side cache functions.
21 * a cache structure attached to NFS inodes. This structure is allocated
22 * when needed, and freed when the cache is zapped.
24 * The cache structure contains as hash table of entries, and a pointer
25 * to a special-cased entry for the listxattr cache.
28 * counting. The cache entries use a similar refcounting scheme.
30 * This makes freeing a cache, both from the shrinker and from the
31 * zap cache path, easy. It also means that, in current use cases,
40 * Two shrinkers deal with the cache entries themselves: one for
45 * The other shrinker frees the cache structures themselves.
64 struct nfs4_xattr_cache *cache;
106 nfs4_xattr_hash_init(struct nfs4_xattr_cache *cache)
111 INIT_HLIST_HEAD(&cache->buckets[i].hlist);
112 spin_lock_init(&cache->buckets[i].lock);
113 cache->buckets[i].cache = cache;
114 cache->buckets[i].draining = false;
125 * Wrapper functions to add a cache entry to the right LRU.
150 * This function allocates cache entries. They are the normal
152 * cache. Those allocations use the same entry so that they can be
155 * xattr cache entries are allocated together with names. If the
163 * @name: Name of the extended attribute. NULL for listxattr cache
165 * @value: Value of attribute, or listxattr cache. NULL if the
270 struct nfs4_xattr_cache *cache;
273 cache = container_of(kref, struct nfs4_xattr_cache, ref);
276 if (WARN_ON(!hlist_empty(&cache->buckets[i].hlist)))
278 cache->buckets[i].draining = false;
281 cache->listxattr = NULL;
283 kmem_cache_free(nfs4_xattr_cache_cachep, cache);
290 struct nfs4_xattr_cache *cache;
292 cache = kmem_cache_alloc(nfs4_xattr_cache_cachep,
294 if (cache == NULL)
297 kref_init(&cache->ref);
298 atomic_long_set(&cache->nent, 0);
300 return cache;
304 * Set the listxattr cache, which is a special-cased cache entry.
306 * the cache is being drained - this prevents a new listxattr
307 * cache from being added to what is now a stale cache.
310 nfs4_xattr_set_listcache(struct nfs4_xattr_cache *cache,
316 spin_lock(&cache->listxattr_lock);
318 old = cache->listxattr;
325 cache->listxattr = new;
334 spin_unlock(&cache->listxattr_lock);
340 * Unlink a cache from its parent inode, clearing out an invalid
341 * cache. Must be called with i_lock held.
364 * Discard a cache. Called by get_cache() if there was an old,
365 * invalid cache. Can also be called from a shrinker callback.
367 * The cache is dead, it has already been unlinked from its inode,
368 * and no longer appears on the cache LRU list.
376 * any way to 'find' this cache. Then, remove the entries from the hash
379 * At that point, the cache will remain empty and can be freed when the final
385 nfs4_xattr_discard_cache(struct nfs4_xattr_cache *cache)
392 nfs4_xattr_set_listcache(cache, ERR_PTR(-ESTALE));
395 bucket = &cache->buckets[i];
407 atomic_long_set(&cache->nent, 0);
409 kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
413 * Get a referenced copy of the cache structure. Avoid doing allocs
417 * This function only checks the NFS_INO_INVALID_XATTR cache validity bit
418 * and acts accordingly, replacing the cache when needed. For the read case
419 * (!add), this means that the caller must make sure that the cache
421 * revalidate_inode to do this. The attribute cache timeout (for the
430 struct nfs4_xattr_cache *cache, *oldcache, *newcache;
434 cache = oldcache = NULL;
441 cache = nfsi->xattr_cache;
443 if (cache != NULL)
444 kref_get(&cache->ref);
448 if (add && cache == NULL) {
451 cache = nfs4_xattr_alloc_cache();
452 if (cache == NULL)
458 * The cache was invalidated again. Give up,
463 kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
464 cache = NULL;
475 kref_get(&cache->ref);
476 nfsi->xattr_cache = cache;
477 cache->inode = inode;
478 list_lru_add(&nfs4_xattr_cache_lru, &cache->lru);
484 * If there was a race, throw away the cache we just
489 kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
490 cache = newcache;
496 * Discard the now orphaned old cache.
501 return cache;
505 nfs4_xattr_hash_bucket(struct nfs4_xattr_cache *cache, const char *name)
507 return &cache->buckets[jhash(name, strlen(name), 0) &
508 (ARRAY_SIZE(cache->buckets) - 1)];
527 nfs4_xattr_hash_add(struct nfs4_xattr_cache *cache,
534 bucket = nfs4_xattr_hash_bucket(cache, entry->xattr_name);
549 atomic_long_inc(&cache->nent);
565 nfs4_xattr_hash_remove(struct nfs4_xattr_cache *cache, const char *name)
570 bucket = nfs4_xattr_hash_bucket(cache, name);
578 atomic_long_dec(&cache->nent);
588 nfs4_xattr_hash_find(struct nfs4_xattr_cache *cache, const char *name)
593 bucket = nfs4_xattr_hash_bucket(cache, name);
607 * Entry point to retrieve an entry from the cache.
612 struct nfs4_xattr_cache *cache;
616 cache = nfs4_xattr_get_cache(inode, 0);
617 if (cache == NULL)
621 entry = nfs4_xattr_hash_find(cache, name);
624 dprintk("%s: cache hit '%s', len %lu\n", __func__,
637 dprintk("%s: cache miss '%s'\n", __func__, name);
641 kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
647 * Retrieve a cached list of xattrs from the cache.
651 struct nfs4_xattr_cache *cache;
655 cache = nfs4_xattr_get_cache(inode, 0);
656 if (cache == NULL)
659 spin_lock(&cache->listxattr_lock);
661 entry = cache->listxattr;
677 spin_unlock(&cache->listxattr_lock);
679 kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
685 * Add an xattr to the cache.
687 * This also invalidates the xattr list cache.
692 struct nfs4_xattr_cache *cache;
698 cache = nfs4_xattr_get_cache(inode, 1);
699 if (cache == NULL)
706 (void)nfs4_xattr_set_listcache(cache, NULL);
708 if (!nfs4_xattr_hash_add(cache, entry))
712 kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
717 * Remove an xattr from the cache.
719 * This also invalidates the xattr list cache.
723 struct nfs4_xattr_cache *cache;
727 cache = nfs4_xattr_get_cache(inode, 0);
728 if (cache == NULL)
731 (void)nfs4_xattr_set_listcache(cache, NULL);
732 nfs4_xattr_hash_remove(cache, name);
734 kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
743 struct nfs4_xattr_cache *cache;
746 cache = nfs4_xattr_get_cache(inode, 1);
747 if (cache == NULL)
755 * This is just there to be able to get to bucket->cache,
759 entry->bucket = &cache->buckets[0];
761 if (!nfs4_xattr_set_listcache(cache, entry))
765 kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
769 * Zap the entire cache. Called when an inode is evicted.
784 * The entry LRU is shrunk more aggressively than the cache LRU,
829 struct nfs4_xattr_cache *cache = container_of(item,
832 if (atomic_long_read(&cache->nent) > 1)
836 * If a cache structure is on the LRU list, we know that
840 inode = cache->inode;
845 kref_get(&cache->ref);
847 cache->inode = NULL;
850 list_lru_isolate(lru, &cache->lru);
854 list_add_tail(&cache->dispose, dispose);
863 struct nfs4_xattr_cache *cache;
868 cache = list_first_entry(&dispose, struct nfs4_xattr_cache,
870 list_del_init(&cache->dispose);
871 nfs4_xattr_discard_cache(cache);
872 kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
894 struct nfs4_xattr_cache *cache;
899 cache = bucket->cache;
902 * Unhook the entry from its parent (either a cache bucket
903 * or a cache structure if it's a listxattr buf), so that
911 /* Regular cache entry */
918 atomic_long_dec(&cache->nent);
923 /* Listxattr cache entry */
924 if (!spin_trylock(&cache->listxattr_lock))
929 cache->listxattr = NULL;
932 spin_unlock(&cache->listxattr_lock);
985 struct nfs4_xattr_cache *cache = (struct nfs4_xattr_cache *)p;
987 spin_lock_init(&cache->listxattr_lock);
988 atomic_long_set(&cache->nent, 0);
989 nfs4_xattr_hash_init(cache);
990 cache->listxattr = NULL;
991 INIT_LIST_HEAD(&cache->lru);
992 INIT_LIST_HEAD(&cache->dispose);