Lines Matching defs:sctxt

346  * @sctxt: Send context for the RPC Reply
353 static ssize_t svc_rdma_encode_read_list(struct svc_rdma_send_ctxt *sctxt)
356 return xdr_stream_encode_item_absent(&sctxt->sc_stream);
362 * @sctxt: Send context for the RPC Reply
371 struct svc_rdma_send_ctxt *sctxt,
379 p = xdr_reserve_space(&sctxt->sc_stream, len);
402 * @sctxt: Send context for the RPC Reply
415 struct svc_rdma_send_ctxt *sctxt,
425 ret = xdr_stream_encode_item_present(&sctxt->sc_stream);
431 ret = xdr_stream_encode_u32(&sctxt->sc_stream, nsegs);
437 ret = svc_rdma_encode_write_segment(src, sctxt, &remaining);
450 * @sctxt: Send context for the RPC Reply
468 struct svc_rdma_send_ctxt *sctxt,
473 ret = svc_rdma_encode_write_chunk(rctxt->rc_write_list, sctxt, length);
479 ret = xdr_stream_encode_item_absent(&sctxt->sc_stream);
489 * @sctxt: Send context for the RPC Reply
502 struct svc_rdma_send_ctxt *sctxt,
505 return svc_rdma_encode_write_chunk(rctxt->rc_reply_chunk, sctxt,
547 * @sctxt: send_ctxt for the Send WR
556 struct svc_rdma_send_ctxt *sctxt,
564 if (sctxt->sc_hdrbuf.len + xdr->len < RPCRDMA_PULLUP_THRESH)
599 * @sctxt: send_ctxt for the Send WR; xprt hdr is already prepared
609 struct svc_rdma_send_ctxt *sctxt,
616 dst = sctxt->sc_xprt_buf + sctxt->sc_hdrbuf.len;
652 sctxt->sc_sges[0].length += xdr->len;
653 trace_svcrdma_send_pullup(sctxt->sc_sges[0].length);
659 * @sctxt: send_ctxt for the Send WR
669 struct svc_rdma_send_ctxt *sctxt,
681 sctxt->sc_send_wr.num_sge = 1;
682 sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len;
693 if (svc_rdma_pull_up_needed(rdma, sctxt, rctxt, xdr))
694 return svc_rdma_pull_up_reply_msg(rdma, sctxt, rctxt, xdr);
696 ++sctxt->sc_cur_sge_no;
697 ret = svc_rdma_dma_map_buf(rdma, sctxt,
727 ++sctxt->sc_cur_sge_no;
728 ret = svc_rdma_dma_map_page(rdma, sctxt, *ppages++,
741 ++sctxt->sc_cur_sge_no;
742 ret = svc_rdma_dma_map_buf(rdma, sctxt, base, len);
780 * of the rqstp and into the sctxt's page array. These pages are
788 struct svc_rdma_send_ctxt *sctxt,
794 ret = svc_rdma_map_reply_msg(rdma, sctxt, rctxt, &rqstp->rq_res);
798 svc_rdma_save_io_pages(rqstp, sctxt);
801 sctxt->sc_send_wr.opcode = IB_WR_SEND_WITH_INV;
802 sctxt->sc_send_wr.ex.invalidate_rkey = rctxt->rc_inv_rkey;
804 sctxt->sc_send_wr.opcode = IB_WR_SEND;
806 return svc_rdma_send(rdma, sctxt);
812 * @sctxt: Send context for the response
821 * The caller does not have to release @sctxt. It is released by
825 struct svc_rdma_send_ctxt *sctxt,
832 rpcrdma_set_xdrlen(&sctxt->sc_hdrbuf, 0);
833 xdr_init_encode(&sctxt->sc_stream, &sctxt->sc_hdrbuf,
834 sctxt->sc_xprt_buf, NULL);
836 p = xdr_reserve_space(&sctxt->sc_stream,
848 p = xdr_reserve_space(&sctxt->sc_stream, 3 * sizeof(*p));
858 p = xdr_reserve_space(&sctxt->sc_stream, sizeof(*p));
867 sctxt->sc_send_wr.num_sge = 1;
868 sctxt->sc_send_wr.opcode = IB_WR_SEND;
869 sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len;
870 if (svc_rdma_send(rdma, sctxt))
875 svc_rdma_send_ctxt_put(rdma, sctxt);
900 struct svc_rdma_send_ctxt *sctxt;
909 sctxt = svc_rdma_send_ctxt_get(rdma);
910 if (!sctxt)
913 p = xdr_reserve_space(&sctxt->sc_stream,
922 if (svc_rdma_encode_read_list(sctxt) < 0)
940 if (svc_rdma_encode_write_list(rctxt, sctxt, length) < 0)
943 if (xdr_stream_encode_item_absent(&sctxt->sc_stream) < 0)
950 if (svc_rdma_encode_reply_chunk(rctxt, sctxt, ret) < 0)
953 if (xdr_stream_encode_item_absent(&sctxt->sc_stream) < 0)
957 ret = svc_rdma_send_reply_msg(rdma, sctxt, rctxt, rqstp);
969 svc_rdma_save_io_pages(rqstp, sctxt);
970 svc_rdma_send_error_msg(rdma, sctxt, rctxt, ret);
974 svc_rdma_send_ctxt_put(rdma, sctxt);