Lines Matching defs:r_xprt

126 static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt,
130 struct rpcrdma_ep *ep = r_xprt->rx_ep;
158 static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
161 return rqst->rq_rcv_buf.buflen <= r_xprt->rx_ep->re_max_inline_recv;
169 rpcrdma_nonpayload_inline(const struct rpcrdma_xprt *r_xprt,
175 r_xprt->rx_ep->re_max_inline_recv;
228 rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
295 static struct rpcrdma_mr_seg *rpcrdma_mr_prepare(struct rpcrdma_xprt *r_xprt,
303 *mr = rpcrdma_mr_get(r_xprt);
310 return frwr_map(r_xprt, seg, nsegs, writing, req->rl_slot.rq_xid, *mr);
313 trace_xprtrdma_nomrs_err(r_xprt, req);
314 xprt_wait_for_buffer_space(&r_xprt->rx_xprt);
315 rpcrdma_mrs_refresh(r_xprt);
333 static int rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt,
351 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_snd_buf, pos,
357 seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, false, &mr);
365 r_xprt->rx_stats.read_chunk_count++;
390 static int rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt,
396 struct rpcrdma_ep *ep = r_xprt->rx_ep;
406 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf,
421 seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, true, &mr);
429 r_xprt->rx_stats.write_chunk_count++;
430 r_xprt->rx_stats.total_rdma_request += mr->mr_length;
441 r_xprt->rx_stats.write_chunk_count++;
442 r_xprt->rx_stats.total_rdma_request += mr->mr_length;
468 static int rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt,
486 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg);
499 seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, true, &mr);
507 r_xprt->rx_stats.reply_chunk_count++;
508 r_xprt->rx_stats.total_rdma_request += mr->mr_length;
556 static void rpcrdma_prepare_hdr_sge(struct rpcrdma_xprt *r_xprt,
574 static bool rpcrdma_prepare_head_iov(struct rpcrdma_xprt *r_xprt,
581 if (!rpcrdma_regbuf_dma_map(r_xprt, rb))
662 static void rpcrdma_pullup_tail_iov(struct rpcrdma_xprt *r_xprt,
671 r_xprt->rx_stats.pullup_copy_count += xdr->tail[0].iov_len;
676 static void rpcrdma_pullup_pagelist(struct rpcrdma_xprt *r_xprt,
694 r_xprt->rx_stats.pullup_copy_count += len;
712 static bool rpcrdma_prepare_noch_pullup(struct rpcrdma_xprt *r_xprt,
717 rpcrdma_pullup_tail_iov(r_xprt, req, xdr);
720 rpcrdma_pullup_pagelist(r_xprt, req, xdr);
723 return rpcrdma_prepare_head_iov(r_xprt, req, xdr->len);
726 static bool rpcrdma_prepare_noch_mapped(struct rpcrdma_xprt *r_xprt,
732 if (!rpcrdma_prepare_head_iov(r_xprt, req, xdr->head[0].iov_len))
748 static bool rpcrdma_prepare_readch(struct rpcrdma_xprt *r_xprt,
752 if (!rpcrdma_prepare_head_iov(r_xprt, req, xdr->head[0].iov_len))
782 * @r_xprt: controlling transport
790 inline int rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt,
798 req->rl_sendctx = rpcrdma_sendctx_get_locked(r_xprt);
809 rpcrdma_prepare_hdr_sge(r_xprt, req, hdrlen);
814 if (!rpcrdma_prepare_noch_pullup(r_xprt, req, xdr))
818 if (!rpcrdma_prepare_noch_mapped(r_xprt, req, xdr))
822 if (!rpcrdma_prepare_readch(r_xprt, req, xdr))
842 * @r_xprt: controlling transport
860 rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
887 *p++ = r_xprt->rx_buf.rb_max_requests;
905 if (rpcrdma_results_inline(r_xprt, rqst))
908 rpcrdma_nonpayload_inline(r_xprt, rqst))
927 if (rpcrdma_args_inline(r_xprt, rqst)) {
935 r_xprt->rx_stats.nomsg_call_count++;
962 ret = rpcrdma_encode_read_list(r_xprt, req, rqst, rtype);
965 ret = rpcrdma_encode_write_list(r_xprt, req, rqst, wtype);
968 ret = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, wtype);
972 ret = rpcrdma_prepare_send_sges(r_xprt, req, req->rl_hdrbuf.len,
982 r_xprt->rx_stats.failed_marshal_count++;
995 static void rpcrdma_update_cwnd(struct rpcrdma_xprt *r_xprt, u32 grant)
997 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1000 __rpcrdma_update_cwnd_locked(xprt, &r_xprt->rx_buf, grant);
1006 * @r_xprt: controlling transport instance
1008 * Prepare @r_xprt for the next connection by reinitializing
1011 void rpcrdma_reset_cwnd(struct rpcrdma_xprt *r_xprt)
1013 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1017 __rpcrdma_update_cwnd_locked(xprt, &r_xprt->rx_buf, 1);
1121 rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
1124 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1159 rpcrdma_bc_receive_call(r_xprt, rep);
1262 rpcrdma_decode_msg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1284 r_xprt->rx_stats.fixup_copy_count +=
1287 r_xprt->rx_stats.total_rdma_reply += writelist;
1292 rpcrdma_decode_nomsg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
1312 r_xprt->rx_stats.total_rdma_reply += replychunk;
1317 rpcrdma_decode_error(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1376 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1377 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1383 status = rpcrdma_decode_msg(r_xprt, rep, rqst);
1386 status = rpcrdma_decode_nomsg(r_xprt, rep);
1389 status = rpcrdma_decode_error(r_xprt, rep, rqst);
1406 r_xprt->rx_stats.bad_reply_count++;
1429 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1430 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1431 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1457 if (rpcrdma_is_bcall(r_xprt, rep))
1472 else if (credits > r_xprt->rx_ep->re_max_requests)
1473 credits = r_xprt->rx_ep->re_max_requests;
1474 rpcrdma_post_recvs(r_xprt, credits + (buf->rb_bc_srv_max_requests << 1),
1477 rpcrdma_update_cwnd(r_xprt, credits);
1490 frwr_unmap_async(r_xprt, req);