Lines Matching defs:r_xprt
130 static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt,
134 struct rpcrdma_ep *ep = r_xprt->rx_ep;
162 static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
165 return rqst->rq_rcv_buf.buflen <= r_xprt->rx_ep->re_max_inline_recv;
173 rpcrdma_nonpayload_inline(const struct rpcrdma_xprt *r_xprt,
179 r_xprt->rx_ep->re_max_inline_recv;
245 rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
274 if (type == rpcrdma_readch && r_xprt->rx_ep->re_implicit_roundup)
282 if (type == rpcrdma_writech && r_xprt->rx_ep->re_implicit_roundup)
323 static struct rpcrdma_mr_seg *rpcrdma_mr_prepare(struct rpcrdma_xprt *r_xprt,
331 *mr = rpcrdma_mr_get(r_xprt);
339 return frwr_map(r_xprt, seg, nsegs, writing, req->rl_slot.rq_xid, *mr);
343 xprt_wait_for_buffer_space(&r_xprt->rx_xprt);
344 rpcrdma_mrs_refresh(r_xprt);
362 static int rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt,
380 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_snd_buf, pos,
386 seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, false, &mr);
394 r_xprt->rx_stats.read_chunk_count++;
419 static int rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt,
434 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf,
449 seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, true, &mr);
457 r_xprt->rx_stats.write_chunk_count++;
458 r_xprt->rx_stats.total_rdma_request += mr->mr_length;
484 static int rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt,
502 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg);
515 seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, true, &mr);
523 r_xprt->rx_stats.reply_chunk_count++;
524 r_xprt->rx_stats.total_rdma_request += mr->mr_length;
572 static void rpcrdma_prepare_hdr_sge(struct rpcrdma_xprt *r_xprt,
590 static bool rpcrdma_prepare_head_iov(struct rpcrdma_xprt *r_xprt,
597 if (!rpcrdma_regbuf_dma_map(r_xprt, rb))
678 static void rpcrdma_pullup_tail_iov(struct rpcrdma_xprt *r_xprt,
687 r_xprt->rx_stats.pullup_copy_count += xdr->tail[0].iov_len;
692 static void rpcrdma_pullup_pagelist(struct rpcrdma_xprt *r_xprt,
710 r_xprt->rx_stats.pullup_copy_count += len;
728 static bool rpcrdma_prepare_noch_pullup(struct rpcrdma_xprt *r_xprt,
733 rpcrdma_pullup_tail_iov(r_xprt, req, xdr);
736 rpcrdma_pullup_pagelist(r_xprt, req, xdr);
739 return rpcrdma_prepare_head_iov(r_xprt, req, xdr->len);
742 static bool rpcrdma_prepare_noch_mapped(struct rpcrdma_xprt *r_xprt,
748 if (!rpcrdma_prepare_head_iov(r_xprt, req, xdr->head[0].iov_len))
764 static bool rpcrdma_prepare_readch(struct rpcrdma_xprt *r_xprt,
768 if (!rpcrdma_prepare_head_iov(r_xprt, req, xdr->head[0].iov_len))
798 * @r_xprt: controlling transport
806 inline int rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt,
814 req->rl_sendctx = rpcrdma_sendctx_get_locked(r_xprt);
825 rpcrdma_prepare_hdr_sge(r_xprt, req, hdrlen);
830 if (!rpcrdma_prepare_noch_pullup(r_xprt, req, xdr))
834 if (!rpcrdma_prepare_noch_mapped(r_xprt, req, xdr))
838 if (!rpcrdma_prepare_readch(r_xprt, req, xdr))
858 * @r_xprt: controlling transport
876 rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
903 *p++ = r_xprt->rx_buf.rb_max_requests;
921 if (rpcrdma_results_inline(r_xprt, rqst))
924 rpcrdma_nonpayload_inline(r_xprt, rqst))
943 if (rpcrdma_args_inline(r_xprt, rqst)) {
951 r_xprt->rx_stats.nomsg_call_count++;
978 ret = rpcrdma_encode_read_list(r_xprt, req, rqst, rtype);
981 ret = rpcrdma_encode_write_list(r_xprt, req, rqst, wtype);
984 ret = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, wtype);
988 ret = rpcrdma_prepare_send_sges(r_xprt, req, req->rl_hdrbuf.len,
998 r_xprt->rx_stats.failed_marshal_count++;
1011 static void rpcrdma_update_cwnd(struct rpcrdma_xprt *r_xprt, u32 grant)
1013 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1016 __rpcrdma_update_cwnd_locked(xprt, &r_xprt->rx_buf, grant);
1022 * @r_xprt: controlling transport instance
1024 * Prepare @r_xprt for the next connection by reinitializing
1027 void rpcrdma_reset_cwnd(struct rpcrdma_xprt *r_xprt)
1029 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1033 __rpcrdma_update_cwnd_locked(xprt, &r_xprt->rx_buf, 1);
1137 rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
1140 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1175 rpcrdma_bc_receive_call(r_xprt, rep);
1282 rpcrdma_decode_msg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1304 r_xprt->rx_stats.fixup_copy_count +=
1307 r_xprt->rx_stats.total_rdma_reply += writelist;
1312 rpcrdma_decode_nomsg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
1332 r_xprt->rx_stats.total_rdma_reply += replychunk;
1337 rpcrdma_decode_error(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1377 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1378 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1384 status = rpcrdma_decode_msg(r_xprt, rep, rqst);
1387 status = rpcrdma_decode_nomsg(r_xprt, rep);
1390 status = rpcrdma_decode_error(r_xprt, rep, rqst);
1407 r_xprt->rx_stats.bad_reply_count++;
1430 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1431 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1432 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1458 if (rpcrdma_is_bcall(r_xprt, rep))
1473 else if (credits > r_xprt->rx_ep->re_max_requests)
1474 credits = r_xprt->rx_ep->re_max_requests;
1475 rpcrdma_post_recvs(r_xprt, credits + (buf->rb_bc_srv_max_requests << 1),
1478 rpcrdma_update_cwnd(r_xprt, credits);
1493 frwr_unmap_async(r_xprt, req);