Lines Matching refs:rdma
98 #include <rdma/ib_verbs.h>
99 #include <rdma/rdma_cm.h>
120 static void svc_rdma_recv_cid_init(struct svcxprt_rdma *rdma,
123 cid->ci_queue_id = rdma->sc_rq_cq->res.id;
124 cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids);
128 svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma *rdma)
137 buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL);
140 addr = ib_dma_map_single(rdma->sc_pd->device, buffer,
141 rdma->sc_max_req_size, DMA_FROM_DEVICE);
142 if (ib_dma_mapping_error(rdma->sc_pd->device, addr))
145 svc_rdma_recv_cid_init(rdma, &ctxt->rc_cid);
153 ctxt->rc_recv_sge.length = rdma->sc_max_req_size;
154 ctxt->rc_recv_sge.lkey = rdma->sc_pd->local_dma_lkey;
167 static void svc_rdma_recv_ctxt_destroy(struct svcxprt_rdma *rdma,
170 ib_dma_unmap_single(rdma->sc_pd->device, ctxt->rc_recv_sge.addr,
178 * @rdma: svcxprt_rdma being torn down
181 void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma)
186 while ((node = llist_del_first(&rdma->sc_recv_ctxts))) {
188 svc_rdma_recv_ctxt_destroy(rdma, ctxt);
193 svc_rdma_recv_ctxt_get(struct svcxprt_rdma *rdma)
198 node = llist_del_first(&rdma->sc_recv_ctxts);
209 ctxt = svc_rdma_recv_ctxt_alloc(rdma);
217 * @rdma: controlling svcxprt_rdma
221 void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
230 llist_add(&ctxt->rc_node, &rdma->sc_recv_ctxts);
232 svc_rdma_recv_ctxt_destroy(rdma, ctxt);
247 struct svcxprt_rdma *rdma =
252 svc_rdma_recv_ctxt_put(rdma, ctxt);
255 static int __svc_rdma_post_recv(struct svcxprt_rdma *rdma,
261 ret = ib_post_recv(rdma->sc_qp, &ctxt->rc_recv_wr, NULL);
267 trace_svcrdma_rq_post_err(rdma, ret);
268 svc_rdma_recv_ctxt_put(rdma, ctxt);
272 static int svc_rdma_post_recv(struct svcxprt_rdma *rdma)
276 if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags))
278 ctxt = svc_rdma_recv_ctxt_get(rdma);
281 return __svc_rdma_post_recv(rdma, ctxt);
286 * @rdma: fresh svcxprt_rdma
290 bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma)
296 for (i = 0; i < rdma->sc_max_requests; i++) {
297 ctxt = svc_rdma_recv_ctxt_get(rdma);
301 ret = __svc_rdma_post_recv(rdma, ctxt);
318 struct svcxprt_rdma *rdma = cq->cq_context;
329 if (svc_rdma_post_recv(rdma))
334 ib_dma_sync_single_for_cpu(rdma->sc_pd->device,
338 spin_lock(&rdma->sc_rq_dto_lock);
339 list_add_tail(&ctxt->rc_list, &rdma->sc_rq_dto_q);
341 set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags);
342 spin_unlock(&rdma->sc_rq_dto_lock);
343 if (!test_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags))
344 svc_xprt_enqueue(&rdma->sc_xprt);
349 svc_rdma_recv_ctxt_put(rdma, ctxt);
350 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
351 svc_xprt_enqueue(&rdma->sc_xprt);
356 * @rdma: svcxprt_rdma being shut down
359 void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma)
363 while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_read_complete_q))) {
365 svc_rdma_recv_ctxt_put(rdma, ctxt);
367 while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_rq_dto_q))) {
369 svc_rdma_recv_ctxt_put(rdma, ctxt);
559 static void svc_rdma_get_inv_rkey(struct svcxprt_rdma *rdma,
567 if (!rdma->sc_snd_w_inv)
721 static void svc_rdma_send_error(struct svcxprt_rdma *rdma,
727 sctxt = svc_rdma_send_ctxt_get(rdma);
730 svc_rdma_send_error_msg(rdma, sctxt, rctxt, status);