Lines Matching refs:nn
88 nfsd_cache_hash(__be32 xid, struct nfsd_net *nn)
90 return hash_32(be32_to_cpu(xid), nn->maskbits);
95 struct nfsd_net *nn)
121 struct nfsd_net *nn)
124 nn->drc_mem_usage -= rp->c_replvec.iov_len;
130 atomic_dec(&nn->num_drc_entries);
131 nn->drc_mem_usage -= sizeof(*rp);
138 struct nfsd_net *nn)
141 nfsd_reply_cache_free_locked(b, rp, nn);
157 int nfsd_reply_cache_init(struct nfsd_net *nn)
163 nn->max_drc_entries = nfsd_cache_size_limit();
164 atomic_set(&nn->num_drc_entries, 0);
165 hashsize = nfsd_hashsize(nn->max_drc_entries);
166 nn->maskbits = ilog2(hashsize);
168 nn->nfsd_reply_cache_shrinker.scan_objects = nfsd_reply_cache_scan;
169 nn->nfsd_reply_cache_shrinker.count_objects = nfsd_reply_cache_count;
170 nn->nfsd_reply_cache_shrinker.seeks = 1;
171 status = register_shrinker(&nn->nfsd_reply_cache_shrinker);
175 nn->drc_hashtbl = kvzalloc(array_size(hashsize,
176 sizeof(*nn->drc_hashtbl)), GFP_KERNEL);
177 if (!nn->drc_hashtbl)
181 INIT_LIST_HEAD(&nn->drc_hashtbl[i].lru_head);
182 spin_lock_init(&nn->drc_hashtbl[i].cache_lock);
184 nn->drc_hashsize = hashsize;
188 unregister_shrinker(&nn->nfsd_reply_cache_shrinker);
194 void nfsd_reply_cache_shutdown(struct nfsd_net *nn)
199 unregister_shrinker(&nn->nfsd_reply_cache_shrinker);
201 for (i = 0; i < nn->drc_hashsize; i++) {
202 struct list_head *head = &nn->drc_hashtbl[i].lru_head;
205 nfsd_reply_cache_free_locked(&nn->drc_hashtbl[i],
206 rp, nn);
210 kvfree(nn->drc_hashtbl);
211 nn->drc_hashtbl = NULL;
212 nn->drc_hashsize = 0;
228 prune_bucket(struct nfsd_drc_bucket *b, struct nfsd_net *nn)
240 if (atomic_read(&nn->num_drc_entries) <= nn->max_drc_entries &&
243 nfsd_reply_cache_free_locked(b, rp, nn);
254 prune_cache_entries(struct nfsd_net *nn)
259 for (i = 0; i < nn->drc_hashsize; i++) {
260 struct nfsd_drc_bucket *b = &nn->drc_hashtbl[i];
265 freed += prune_bucket(b, nn);
274 struct nfsd_net *nn = container_of(shrink,
277 return atomic_read(&nn->num_drc_entries);
283 struct nfsd_net *nn = container_of(shrink,
286 return prune_cache_entries(nn);
323 const struct svc_cacherep *rp, struct nfsd_net *nn)
327 ++nn->payload_misses;
328 trace_nfsd_drc_mismatch(nn, key, rp);
341 struct nfsd_net *nn)
354 cmp = nfsd_cache_key_cmp(key, rp, nn);
368 if (entries > nn->longest_chain) {
369 nn->longest_chain = entries;
370 nn->longest_chain_cachesize = atomic_read(&nn->num_drc_entries);
371 } else if (entries == nn->longest_chain) {
373 nn->longest_chain_cachesize = min_t(unsigned int,
374 nn->longest_chain_cachesize,
375 atomic_read(&nn->num_drc_entries));
399 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
403 u32 hash = nfsd_cache_hash(xid, nn);
404 struct nfsd_drc_bucket *b = &nn->drc_hashtbl[hash];
420 rp = nfsd_reply_cache_alloc(rqstp, csum, nn);
425 found = nfsd_cache_insert(b, rp, nn);
427 nfsd_reply_cache_free_locked(NULL, rp, nn);
436 atomic_inc(&nn->num_drc_entries);
437 nn->drc_mem_usage += sizeof(*rp);
440 prune_bucket(b, nn);
480 trace_nfsd_drc_found(nn, rqstp, rtn);
506 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
517 hash = nfsd_cache_hash(rp->c_key.k_xid, nn);
518 b = &nn->drc_hashtbl[hash];
525 nfsd_reply_cache_free(b, rp, nn);
540 nfsd_reply_cache_free(b, rp, nn);
547 nfsd_reply_cache_free(b, rp, nn);
551 nn->drc_mem_usage += bufsize;
587 struct nfsd_net *nn = m->private;
589 seq_printf(m, "max entries: %u\n", nn->max_drc_entries);
591 atomic_read(&nn->num_drc_entries));
592 seq_printf(m, "hash buckets: %u\n", 1 << nn->maskbits);
593 seq_printf(m, "mem usage: %u\n", nn->drc_mem_usage);
597 seq_printf(m, "payload misses: %u\n", nn->payload_misses);
598 seq_printf(m, "longest chain len: %u\n", nn->longest_chain);
599 seq_printf(m, "cachesize at longest: %u\n", nn->longest_chain_cachesize);
605 struct nfsd_net *nn = net_generic(file_inode(file)->i_sb->s_fs_info,
608 return single_open(file, nfsd_reply_cache_stats_show, nn);