Lines Matching refs:rp
97 struct svc_cacherep *rp;
99 rp = kmem_cache_alloc(drc_slab, GFP_KERNEL);
100 if (rp) {
101 rp->c_state = RC_UNUSED;
102 rp->c_type = RC_NOCACHE;
103 RB_CLEAR_NODE(&rp->c_node);
104 INIT_LIST_HEAD(&rp->c_lru);
106 memset(&rp->c_key, 0, sizeof(rp->c_key));
107 rp->c_key.k_xid = rqstp->rq_xid;
108 rp->c_key.k_proc = rqstp->rq_proc;
109 rpc_copy_addr((struct sockaddr *)&rp->c_key.k_addr, svc_addr(rqstp));
110 rpc_set_port((struct sockaddr *)&rp->c_key.k_addr, rpc_get_port(svc_addr(rqstp)));
111 rp->c_key.k_prot = rqstp->rq_prot;
112 rp->c_key.k_vers = rqstp->rq_vers;
113 rp->c_key.k_len = rqstp->rq_arg.len;
114 rp->c_key.k_csum = csum;
116 return rp;
120 nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct svc_cacherep *rp,
123 if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) {
124 nn->drc_mem_usage -= rp->c_replvec.iov_len;
125 kfree(rp->c_replvec.iov_base);
127 if (rp->c_state != RC_UNUSED) {
128 rb_erase(&rp->c_node, &b->rb_head);
129 list_del(&rp->c_lru);
131 nn->drc_mem_usage -= sizeof(*rp);
133 kmem_cache_free(drc_slab, rp);
137 nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct svc_cacherep *rp,
141 nfsd_reply_cache_free_locked(b, rp, nn);
196 struct svc_cacherep *rp;
204 rp = list_first_entry(head, struct svc_cacherep, c_lru);
206 rp, nn);
221 lru_put_end(struct nfsd_drc_bucket *b, struct svc_cacherep *rp)
223 rp->c_timestamp = jiffies;
224 list_move_tail(&rp->c_lru, &b->lru_head);
230 struct svc_cacherep *rp, *tmp;
233 list_for_each_entry_safe(rp, tmp, &b->lru_head, c_lru) {
238 if (rp->c_state == RC_INPROG)
241 time_before(jiffies, rp->c_timestamp + RC_EXPIRE))
243 nfsd_reply_cache_free_locked(b, rp, nn);
323 const struct svc_cacherep *rp, struct nfsd_net *nn)
325 if (key->c_key.k_xid == rp->c_key.k_xid &&
326 key->c_key.k_csum != rp->c_key.k_csum) {
328 trace_nfsd_drc_mismatch(nn, key, rp);
331 return memcmp(&key->c_key, &rp->c_key, sizeof(key->c_key));
343 struct svc_cacherep *rp, *ret = key;
352 rp = rb_entry(parent, struct svc_cacherep, c_node);
354 cmp = nfsd_cache_key_cmp(key, rp, nn);
360 ret = rp;
400 struct svc_cacherep *rp, *found;
420 rp = nfsd_reply_cache_alloc(rqstp, csum, nn);
421 if (!rp)
425 found = nfsd_cache_insert(b, rp, nn);
426 if (found != rp) {
427 nfsd_reply_cache_free_locked(NULL, rp, nn);
428 rp = found;
433 rqstp->rq_cacherep = rp;
434 rp->c_state = RC_INPROG;
437 nn->drc_mem_usage += sizeof(*rp);
453 if (rp->c_state == RC_INPROG)
459 if (!test_bit(RQ_SECURE, &rqstp->rq_flags) && rp->c_secure)
463 switch (rp->c_type) {
467 svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat);
471 if (!nfsd_cache_append(rqstp, &rp->c_replvec))
476 WARN_ONCE(1, "nfsd: bad repcache type %d\n", rp->c_type);
507 struct svc_cacherep *rp = rqstp->rq_cacherep;
514 if (!rp)
517 hash = nfsd_cache_hash(rp->c_key.k_xid, nn);
525 nfsd_reply_cache_free(b, rp, nn);
533 rp->c_replstat = *statp;
536 cachv = &rp->c_replvec;
540 nfsd_reply_cache_free(b, rp, nn);
547 nfsd_reply_cache_free(b, rp, nn);
552 lru_put_end(b, rp);
553 rp->c_secure = test_bit(RQ_SECURE, &rqstp->rq_flags);
554 rp->c_type = cachetype;
555 rp->c_state = RC_DONE;