Lines Matching refs:rdma

8 #include <rdma/rw.h>
54 svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges)
59 spin_lock(&rdma->sc_rw_ctxt_lock);
60 node = llist_del_first(&rdma->sc_rw_ctxts);
61 spin_unlock(&rdma->sc_rw_ctxt_lock);
66 GFP_KERNEL, ibdev_to_node(rdma->sc_cm_id->device));
83 trace_svcrdma_no_rwctx_err(rdma, sges);
94 static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma,
97 __svc_rdma_put_rw_ctxt(ctxt, &rdma->sc_rw_ctxts);
102 * @rdma: transport about to be destroyed
105 void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma)
110 while ((node = llist_del_first(&rdma->sc_rw_ctxts)) != NULL) {
118 * @rdma: controlling transport instance
127 static int svc_rdma_rw_ctx_init(struct svcxprt_rdma *rdma,
134 ret = rdma_rw_ctx_init(&ctxt->rw_ctx, rdma->sc_qp, rdma->sc_port_num,
138 svc_rdma_put_rw_ctxt(rdma, ctxt);
139 trace_svcrdma_dma_map_rw_err(rdma, ctxt->rw_nents, ret);
163 static void svc_rdma_cc_cid_init(struct svcxprt_rdma *rdma,
166 cid->ci_queue_id = rdma->sc_sq_cq->res.id;
167 cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids);
170 static void svc_rdma_cc_init(struct svcxprt_rdma *rdma,
173 svc_rdma_cc_cid_init(rdma, &cc->cc_cid);
174 cc->cc_rdma = rdma;
188 struct svcxprt_rdma *rdma = cc->cc_rdma;
199 rdma_rw_ctx_destroy(&ctxt->rw_ctx, rdma->sc_qp,
200 rdma->sc_port_num, ctxt->rw_sg_table.sgl,
210 llist_add_batch(first, last, &rdma->sc_rw_ctxts);
233 svc_rdma_write_info_alloc(struct svcxprt_rdma *rdma,
239 ibdev_to_node(rdma->sc_cm_id->device));
246 svc_rdma_cc_init(rdma, &info->wi_cc);
269 struct svcxprt_rdma *rdma = cc->cc_rdma;
284 svc_rdma_wake_send_waiters(rdma, cc->cc_sqecount);
287 svc_xprt_deferred_close(&rdma->sc_xprt);
305 svc_rdma_read_info_alloc(struct svcxprt_rdma *rdma)
310 ibdev_to_node(rdma->sc_cm_id->device));
314 svc_rdma_cc_init(rdma, &info->ri_cc);
365 struct svcxprt_rdma *rdma = cc->cc_rdma;
374 if (cc->cc_sqecount > rdma->sc_sq_depth)
383 first_wr = rdma_rw_ctx_wrs(&ctxt->rw_ctx, rdma->sc_qp,
384 rdma->sc_port_num, cqe, first_wr);
390 &rdma->sc_sq_avail) > 0) {
392 ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr);
399 trace_svcrdma_sq_full(rdma);
400 atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
401 wait_event(rdma->sc_send_wait,
402 atomic_read(&rdma->sc_sq_avail) > cc->cc_sqecount);
403 trace_svcrdma_sq_retry(rdma);
406 trace_svcrdma_sq_post_err(rdma, ret);
407 svc_xprt_deferred_close(&rdma->sc_xprt);
413 atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
414 wake_up(&rdma->sc_send_wait);
476 struct svcxprt_rdma *rdma = cc->cc_rdma;
492 ctxt = svc_rdma_get_rw_ctxt(rdma,
499 ret = svc_rdma_rw_ctx_init(rdma, ctxt, offset, seg->rs_handle,
519 trace_svcrdma_small_wrch_err(rdma, remaining, info->wi_seg_no,
533 * %-EIO if an rdma-rw error occurred
554 * %-EIO if an rdma-rw error occurred
576 * %-EIO if an rdma-rw error occurred
607 * @rdma: controlling RDMA transport
618 int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma,
626 info = svc_rdma_write_info_alloc(rdma, chunk);
648 * @rdma: controlling RDMA transport
659 int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma,
672 info = svc_rdma_write_info_alloc(rdma, chunk);
1099 * @rdma: controlling RDMA transport
1120 int svc_rdma_process_read_list(struct svcxprt_rdma *rdma,
1128 info = svc_rdma_read_info_alloc(rdma);