Lines Matching defs:rqst

131 				struct rpc_rqst *rqst)
133 struct xdr_buf *xdr = &rqst->rq_snd_buf;
163 struct rpc_rqst *rqst)
165 return rqst->rq_rcv_buf.buflen <= r_xprt->rx_ep->re_max_inline_recv;
174 const struct rpc_rqst *rqst)
176 const struct xdr_buf *buf = &rqst->rq_rcv_buf;
364 struct rpc_rqst *rqst,
376 pos = rqst->rq_snd_buf.head[0].iov_len;
380 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_snd_buf, pos,
393 trace_xprtrdma_chunk_read(rqst->rq_task, pos, mr, nsegs);
421 struct rpc_rqst *rqst,
434 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf,
435 rqst->rq_rcv_buf.head[0].iov_len,
456 trace_xprtrdma_chunk_write(rqst->rq_task, mr, nsegs);
486 struct rpc_rqst *rqst,
502 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg);
522 trace_xprtrdma_chunk_reply(rqst->rq_task, mr, nsegs);
859 * @rqst: RPC request to be marshaled
861 * For the RPC in "rqst", this function:
876 rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
878 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
881 struct xdr_buf *buf = &rqst->rq_snd_buf;
886 if (unlikely(rqst->rq_rcv_buf.flags & XDRBUF_SPARSE_PAGES)) {
887 ret = rpcrdma_alloc_sparse_pages(&rqst->rq_rcv_buf);
894 rqst);
901 *p++ = rqst->rq_xid;
910 &rqst->rq_cred->cr_auth->au_flags);
921 if (rpcrdma_results_inline(r_xprt, rqst))
923 else if ((ddp_allowed && rqst->rq_rcv_buf.flags & XDRBUF_READ) &&
924 rpcrdma_nonpayload_inline(r_xprt, rqst))
943 if (rpcrdma_args_inline(r_xprt, rqst)) {
978 ret = rpcrdma_encode_read_list(r_xprt, req, rqst, rtype);
981 ret = rpcrdma_encode_write_list(r_xprt, req, rqst, wtype);
984 ret = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, wtype);
997 trace_xprtrdma_marshal_failed(rqst, ret);
1038 * rpcrdma_inline_fixup - Scatter inline received data into rqst's iovecs
1039 * @rqst: controlling RPC request
1056 rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
1067 rqst->rq_rcv_buf.head[0].iov_base = srcp;
1068 rqst->rq_private_buf.head[0].iov_base = srcp;
1073 curlen = rqst->rq_rcv_buf.head[0].iov_len;
1079 ppages = rqst->rq_rcv_buf.pages +
1080 (rqst->rq_rcv_buf.page_base >> PAGE_SHIFT);
1081 page_base = offset_in_page(rqst->rq_rcv_buf.page_base);
1083 if (copy_len && rqst->rq_rcv_buf.page_len) {
1086 pagelist_len = rqst->rq_rcv_buf.page_len;
1122 rqst->rq_rcv_buf.tail[0].iov_base = srcp;
1123 rqst->rq_private_buf.tail[0].iov_base = srcp;
1127 trace_xprtrdma_fixup(rqst, fixup_copy_count);
1283 struct rpc_rqst *rqst)
1301 /* Build the RPC reply's Payload stream in rqst->rq_rcv_buf */
1305 rpcrdma_inline_fixup(rqst, base, rpclen, writelist & 3);
1338 struct rpc_rqst *rqst)
1373 * the rep, rqst, and rq_task pointers remain stable.
1379 struct rpc_rqst *rqst = rep->rr_rqst;
1384 status = rpcrdma_decode_msg(r_xprt, rep, rqst);
1390 status = rpcrdma_decode_error(r_xprt, rep, rqst);
1400 xprt_complete_rqst(rqst->rq_task, status);
1401 xprt_unpin_rqst(rqst);
1408 rqst->rq_task->tk_status = status;
1434 struct rpc_rqst *rqst;
1465 rqst = xprt_lookup_rqst(xprt, rep->rr_xid);
1466 if (!rqst)
1468 xprt_pin_rqst(rqst);
1480 req = rpcr_to_rdmar(rqst);
1482 trace_xprtrdma_leaked_rep(rqst, req->rl_reply);
1486 rep->rr_rqst = rqst;
1488 trace_xprtrdma_reply(rqst->rq_task, rep, req, credits);