Lines Matching refs:rdma

8 #include <rdma/rw.h>
53 svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges)
57 spin_lock(&rdma->sc_rw_ctxt_lock);
59 ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts);
62 spin_unlock(&rdma->sc_rw_ctxt_lock);
64 spin_unlock(&rdma->sc_rw_ctxt_lock);
82 trace_svcrdma_no_rwctx_err(rdma, sges);
86 static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma,
91 spin_lock(&rdma->sc_rw_ctxt_lock);
92 list_add(&ctxt->rw_list, &rdma->sc_rw_ctxts);
93 spin_unlock(&rdma->sc_rw_ctxt_lock);
98 * @rdma: transport about to be destroyed
101 void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma)
105 while ((ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts)) != NULL) {
113 * @rdma: controlling transport instance
122 static int svc_rdma_rw_ctx_init(struct svcxprt_rdma *rdma,
129 ret = rdma_rw_ctx_init(&ctxt->rw_ctx, rdma->sc_qp, rdma->sc_port_num,
133 svc_rdma_put_rw_ctxt(rdma, ctxt);
134 trace_svcrdma_dma_map_rw_err(rdma, ctxt->rw_nents, ret);
155 static void svc_rdma_cc_cid_init(struct svcxprt_rdma *rdma,
158 cid->ci_queue_id = rdma->sc_sq_cq->res.id;
159 cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids);
162 static void svc_rdma_cc_init(struct svcxprt_rdma *rdma,
165 svc_rdma_cc_cid_init(rdma, &cc->cc_cid);
166 cc->cc_rdma = rdma;
175 struct svcxprt_rdma *rdma = cc->cc_rdma;
181 rdma_rw_ctx_destroy(&ctxt->rw_ctx, rdma->sc_qp,
182 rdma->sc_port_num, ctxt->rw_sg_table.sgl,
184 svc_rdma_put_rw_ctxt(rdma, ctxt);
208 svc_rdma_write_info_alloc(struct svcxprt_rdma *rdma, __be32 *chunk)
220 svc_rdma_cc_init(rdma, &info->wi_cc);
243 struct svcxprt_rdma *rdma = cc->cc_rdma;
249 atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
250 wake_up(&rdma->sc_send_wait);
253 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
271 svc_rdma_read_info_alloc(struct svcxprt_rdma *rdma)
279 svc_rdma_cc_init(rdma, &info->ri_cc);
301 struct svcxprt_rdma *rdma = cc->cc_rdma;
307 atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
308 wake_up(&rdma->sc_send_wait);
311 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
312 svc_rdma_recv_ctxt_put(rdma, info->ri_readctxt);
314 spin_lock(&rdma->sc_rq_dto_lock);
316 &rdma->sc_read_complete_q);
318 set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags);
319 spin_unlock(&rdma->sc_rq_dto_lock);
321 svc_xprt_enqueue(&rdma->sc_xprt);
336 struct svcxprt_rdma *rdma = cc->cc_rdma;
337 struct svc_xprt *xprt = &rdma->sc_xprt;
344 if (cc->cc_sqecount > rdma->sc_sq_depth)
353 first_wr = rdma_rw_ctx_wrs(&ctxt->rw_ctx, rdma->sc_qp,
354 rdma->sc_port_num, cqe, first_wr);
360 &rdma->sc_sq_avail) > 0) {
362 ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr);
368 trace_svcrdma_sq_full(rdma);
369 atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
370 wait_event(rdma->sc_send_wait,
371 atomic_read(&rdma->sc_sq_avail) > cc->cc_sqecount);
372 trace_svcrdma_sq_retry(rdma);
375 trace_svcrdma_sq_post_err(rdma, ret);
382 atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
383 wake_up(&rdma->sc_send_wait);
445 struct svcxprt_rdma *rdma = cc->cc_rdma;
463 ctxt = svc_rdma_get_rw_ctxt(rdma,
469 ret = svc_rdma_rw_ctx_init(rdma, ctxt, offset, handle,
491 trace_svcrdma_small_wrch_err(rdma, remaining, info->wi_seg_no,
526 * @rdma: controlling RDMA transport
539 int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, __be32 *wr_ch,
549 info = svc_rdma_write_info_alloc(rdma, wr_ch);
571 * @rdma: controlling RDMA transport
582 int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma,
589 info = svc_rdma_write_info_alloc(rdma, rctxt->rc_reply_chunk);
828 * @rdma: controlling RDMA transport
843 int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma, struct svc_rqst *rqstp,
861 info = svc_rdma_read_info_alloc(rdma);