1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/*
3 * Copyright (c) 2014-2017 Oracle.  All rights reserved.
4 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses.  You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the BSD-type
10 * license below:
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 *      Redistributions of source code must retain the above copyright
17 *      notice, this list of conditions and the following disclaimer.
18 *
19 *      Redistributions in binary form must reproduce the above
20 *      copyright notice, this list of conditions and the following
21 *      disclaimer in the documentation and/or other materials provided
22 *      with the distribution.
23 *
24 *      Neither the name of the Network Appliance, Inc. nor the names of
25 *      its contributors may be used to endorse or promote products
26 *      derived from this software without specific prior written
27 *      permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 */
41
42/*
43 * verbs.c
44 *
45 * Encapsulates the major functions managing:
46 *  o adapters
47 *  o endpoints
48 *  o connections
49 *  o buffer memory
50 */
51
52#include <linux/interrupt.h>
53#include <linux/slab.h>
54#include <linux/sunrpc/addr.h>
55#include <linux/sunrpc/svc_rdma.h>
56#include <linux/log2.h>
57
58#include <asm-generic/barrier.h>
59#include <asm/bitops.h>
60
61#include <rdma/ib_cm.h>
62
63#include "xprt_rdma.h"
64#include <trace/events/rpcrdma.h>
65
66/*
67 * Globals/Macros
68 */
69
70#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
71# define RPCDBG_FACILITY	RPCDBG_TRANS
72#endif
73
74/*
75 * internal functions
76 */
77static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt);
78static void rpcrdma_sendctxs_destroy(struct rpcrdma_xprt *r_xprt);
79static void rpcrdma_sendctx_put_locked(struct rpcrdma_xprt *r_xprt,
80				       struct rpcrdma_sendctx *sc);
81static int rpcrdma_reqs_setup(struct rpcrdma_xprt *r_xprt);
82static void rpcrdma_reqs_reset(struct rpcrdma_xprt *r_xprt);
83static void rpcrdma_rep_destroy(struct rpcrdma_rep *rep);
84static void rpcrdma_reps_unmap(struct rpcrdma_xprt *r_xprt);
85static void rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt);
86static void rpcrdma_mrs_destroy(struct rpcrdma_xprt *r_xprt);
87static void rpcrdma_ep_get(struct rpcrdma_ep *ep);
88static int rpcrdma_ep_put(struct rpcrdma_ep *ep);
89static struct rpcrdma_regbuf *
90rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction,
91		     gfp_t flags);
92static void rpcrdma_regbuf_dma_unmap(struct rpcrdma_regbuf *rb);
93static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb);
94
95/* Wait for outstanding transport work to finish. ib_drain_qp
96 * handles the drains in the wrong order for us, so open code
97 * them here.
98 */
99static void rpcrdma_xprt_drain(struct rpcrdma_xprt *r_xprt)
100{
101	struct rpcrdma_ep *ep = r_xprt->rx_ep;
102	struct rdma_cm_id *id = ep->re_id;
103
104	/* Flush Receives, then wait for deferred Reply work
105	 * to complete.
106	 */
107	ib_drain_rq(id->qp);
108
109	/* Deferred Reply processing might have scheduled
110	 * local invalidations.
111	 */
112	ib_drain_sq(id->qp);
113
114	rpcrdma_ep_put(ep);
115}
116
117/**
118 * rpcrdma_qp_event_handler - Handle one QP event (error notification)
119 * @event: details of the event
120 * @context: ep that owns QP where event occurred
121 *
122 * Called from the RDMA provider (device driver) possibly in an interrupt
123 * context. The QP is always destroyed before the ID, so the ID will be
124 * reliably available when this handler is invoked.
125 */
126static void rpcrdma_qp_event_handler(struct ib_event *event, void *context)
127{
128	struct rpcrdma_ep *ep = context;
129
130	trace_xprtrdma_qp_event(ep, event);
131}
132
133/* Ensure xprt_force_disconnect() is invoked exactly once when a
134 * connection is closed or lost. (The important thing is it needs
135 * to be invoked "at least" once).
136 */
137static void rpcrdma_force_disconnect(struct rpcrdma_ep *ep)
138{
139	if (atomic_add_unless(&ep->re_force_disconnect, 1, 1))
140		xprt_force_disconnect(ep->re_xprt);
141}
142
143/**
144 * rpcrdma_flush_disconnect - Disconnect on flushed completion
145 * @r_xprt: transport to disconnect
146 * @wc: work completion entry
147 *
148 * Must be called in process context.
149 */
150void rpcrdma_flush_disconnect(struct rpcrdma_xprt *r_xprt, struct ib_wc *wc)
151{
152	if (wc->status != IB_WC_SUCCESS)
153		rpcrdma_force_disconnect(r_xprt->rx_ep);
154}
155
156/**
157 * rpcrdma_wc_send - Invoked by RDMA provider for each polled Send WC
158 * @cq:	completion queue
159 * @wc:	WCE for a completed Send WR
160 *
161 */
162static void rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
163{
164	struct ib_cqe *cqe = wc->wr_cqe;
165	struct rpcrdma_sendctx *sc =
166		container_of(cqe, struct rpcrdma_sendctx, sc_cqe);
167	struct rpcrdma_xprt *r_xprt = cq->cq_context;
168
169	/* WARNING: Only wr_cqe and status are reliable at this point */
170	trace_xprtrdma_wc_send(sc, wc);
171	rpcrdma_sendctx_put_locked(r_xprt, sc);
172	rpcrdma_flush_disconnect(r_xprt, wc);
173}
174
175/**
176 * rpcrdma_wc_receive - Invoked by RDMA provider for each polled Receive WC
177 * @cq:	completion queue
178 * @wc:	WCE for a completed Receive WR
179 *
180 */
181static void rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
182{
183	struct ib_cqe *cqe = wc->wr_cqe;
184	struct rpcrdma_rep *rep = container_of(cqe, struct rpcrdma_rep,
185					       rr_cqe);
186	struct rpcrdma_xprt *r_xprt = cq->cq_context;
187
188	/* WARNING: Only wr_cqe and status are reliable at this point */
189	trace_xprtrdma_wc_receive(wc);
190	--r_xprt->rx_ep->re_receive_count;
191	if (wc->status != IB_WC_SUCCESS)
192		goto out_flushed;
193
194	/* status == SUCCESS means all fields in wc are trustworthy */
195	rpcrdma_set_xdrlen(&rep->rr_hdrbuf, wc->byte_len);
196	rep->rr_wc_flags = wc->wc_flags;
197	rep->rr_inv_rkey = wc->ex.invalidate_rkey;
198
199	ib_dma_sync_single_for_cpu(rdmab_device(rep->rr_rdmabuf),
200				   rdmab_addr(rep->rr_rdmabuf),
201				   wc->byte_len, DMA_FROM_DEVICE);
202
203	rpcrdma_reply_handler(rep);
204	return;
205
206out_flushed:
207	rpcrdma_flush_disconnect(r_xprt, wc);
208	rpcrdma_rep_destroy(rep);
209}
210
211static void rpcrdma_update_cm_private(struct rpcrdma_ep *ep,
212				      struct rdma_conn_param *param)
213{
214	const struct rpcrdma_connect_private *pmsg = param->private_data;
215	unsigned int rsize, wsize;
216
217	/* Default settings for RPC-over-RDMA Version One */
218	ep->re_implicit_roundup = xprt_rdma_pad_optimize;
219	rsize = RPCRDMA_V1_DEF_INLINE_SIZE;
220	wsize = RPCRDMA_V1_DEF_INLINE_SIZE;
221
222	if (pmsg &&
223	    pmsg->cp_magic == rpcrdma_cmp_magic &&
224	    pmsg->cp_version == RPCRDMA_CMP_VERSION) {
225		ep->re_implicit_roundup = true;
226		rsize = rpcrdma_decode_buffer_size(pmsg->cp_send_size);
227		wsize = rpcrdma_decode_buffer_size(pmsg->cp_recv_size);
228	}
229
230	if (rsize < ep->re_inline_recv)
231		ep->re_inline_recv = rsize;
232	if (wsize < ep->re_inline_send)
233		ep->re_inline_send = wsize;
234
235	rpcrdma_set_max_header_sizes(ep);
236}
237
238/**
239 * rpcrdma_cm_event_handler - Handle RDMA CM events
240 * @id: rdma_cm_id on which an event has occurred
241 * @event: details of the event
242 *
243 * Called with @id's mutex held. Returns 1 if caller should
244 * destroy @id, otherwise 0.
245 */
246static int
247rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
248{
249	struct sockaddr *sap = (struct sockaddr *)&id->route.addr.dst_addr;
250	struct rpcrdma_ep *ep = id->context;
251
252	might_sleep();
253
254	switch (event->event) {
255	case RDMA_CM_EVENT_ADDR_RESOLVED:
256	case RDMA_CM_EVENT_ROUTE_RESOLVED:
257		ep->re_async_rc = 0;
258		complete(&ep->re_done);
259		return 0;
260	case RDMA_CM_EVENT_ADDR_ERROR:
261		ep->re_async_rc = -EPROTO;
262		complete(&ep->re_done);
263		return 0;
264	case RDMA_CM_EVENT_ROUTE_ERROR:
265		ep->re_async_rc = -ENETUNREACH;
266		complete(&ep->re_done);
267		return 0;
268	case RDMA_CM_EVENT_DEVICE_REMOVAL:
269		pr_info("rpcrdma: removing device %s for %pISpc\n",
270			ep->re_id->device->name, sap);
271		fallthrough;
272	case RDMA_CM_EVENT_ADDR_CHANGE:
273		ep->re_connect_status = -ENODEV;
274		goto disconnected;
275	case RDMA_CM_EVENT_ESTABLISHED:
276		rpcrdma_ep_get(ep);
277		ep->re_connect_status = 1;
278		rpcrdma_update_cm_private(ep, &event->param.conn);
279		trace_xprtrdma_inline_thresh(ep);
280		wake_up_all(&ep->re_connect_wait);
281		break;
282	case RDMA_CM_EVENT_CONNECT_ERROR:
283		ep->re_connect_status = -ENOTCONN;
284		goto wake_connect_worker;
285	case RDMA_CM_EVENT_UNREACHABLE:
286		ep->re_connect_status = -ENETUNREACH;
287		goto wake_connect_worker;
288	case RDMA_CM_EVENT_REJECTED:
289		dprintk("rpcrdma: connection to %pISpc rejected: %s\n",
290			sap, rdma_reject_msg(id, event->status));
291		ep->re_connect_status = -ECONNREFUSED;
292		if (event->status == IB_CM_REJ_STALE_CONN)
293			ep->re_connect_status = -ENOTCONN;
294wake_connect_worker:
295		wake_up_all(&ep->re_connect_wait);
296		return 0;
297	case RDMA_CM_EVENT_DISCONNECTED:
298		ep->re_connect_status = -ECONNABORTED;
299disconnected:
300		rpcrdma_force_disconnect(ep);
301		return rpcrdma_ep_put(ep);
302	default:
303		break;
304	}
305
306	dprintk("RPC:       %s: %pISpc on %s/frwr: %s\n", __func__, sap,
307		ep->re_id->device->name, rdma_event_msg(event->event));
308	return 0;
309}
310
311static struct rdma_cm_id *rpcrdma_create_id(struct rpcrdma_xprt *r_xprt,
312					    struct rpcrdma_ep *ep)
313{
314	unsigned long wtimeout = msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1;
315	struct rpc_xprt *xprt = &r_xprt->rx_xprt;
316	struct rdma_cm_id *id;
317	int rc;
318
319	init_completion(&ep->re_done);
320
321	id = rdma_create_id(xprt->xprt_net, rpcrdma_cm_event_handler, ep,
322			    RDMA_PS_TCP, IB_QPT_RC);
323	if (IS_ERR(id))
324		return id;
325
326	ep->re_async_rc = -ETIMEDOUT;
327	rc = rdma_resolve_addr(id, NULL, (struct sockaddr *)&xprt->addr,
328			       RDMA_RESOLVE_TIMEOUT);
329	if (rc)
330		goto out;
331	rc = wait_for_completion_interruptible_timeout(&ep->re_done, wtimeout);
332	if (rc < 0)
333		goto out;
334
335	rc = ep->re_async_rc;
336	if (rc)
337		goto out;
338
339	ep->re_async_rc = -ETIMEDOUT;
340	rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT);
341	if (rc)
342		goto out;
343	rc = wait_for_completion_interruptible_timeout(&ep->re_done, wtimeout);
344	if (rc < 0)
345		goto out;
346	rc = ep->re_async_rc;
347	if (rc)
348		goto out;
349
350	return id;
351
352out:
353	rdma_destroy_id(id);
354	return ERR_PTR(rc);
355}
356
357static void rpcrdma_ep_destroy(struct kref *kref)
358{
359	struct rpcrdma_ep *ep = container_of(kref, struct rpcrdma_ep, re_kref);
360
361	if (ep->re_id->qp) {
362		rdma_destroy_qp(ep->re_id);
363		ep->re_id->qp = NULL;
364	}
365
366	if (ep->re_attr.recv_cq)
367		ib_free_cq(ep->re_attr.recv_cq);
368	ep->re_attr.recv_cq = NULL;
369	if (ep->re_attr.send_cq)
370		ib_free_cq(ep->re_attr.send_cq);
371	ep->re_attr.send_cq = NULL;
372
373	if (ep->re_pd)
374		ib_dealloc_pd(ep->re_pd);
375	ep->re_pd = NULL;
376
377	kfree(ep);
378	module_put(THIS_MODULE);
379}
380
381static noinline void rpcrdma_ep_get(struct rpcrdma_ep *ep)
382{
383	kref_get(&ep->re_kref);
384}
385
386/* Returns:
387 *     %0 if @ep still has a positive kref count, or
388 *     %1 if @ep was destroyed successfully.
389 */
390static noinline int rpcrdma_ep_put(struct rpcrdma_ep *ep)
391{
392	return kref_put(&ep->re_kref, rpcrdma_ep_destroy);
393}
394
395static int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt)
396{
397	struct rpcrdma_connect_private *pmsg;
398	struct ib_device *device;
399	struct rdma_cm_id *id;
400	struct rpcrdma_ep *ep;
401	int rc;
402
403	ep = kzalloc(sizeof(*ep), GFP_NOFS);
404	if (!ep)
405		return -ENOTCONN;
406	ep->re_xprt = &r_xprt->rx_xprt;
407	kref_init(&ep->re_kref);
408
409	id = rpcrdma_create_id(r_xprt, ep);
410	if (IS_ERR(id)) {
411		kfree(ep);
412		return PTR_ERR(id);
413	}
414	__module_get(THIS_MODULE);
415	device = id->device;
416	ep->re_id = id;
417
418	ep->re_max_requests = r_xprt->rx_xprt.max_reqs;
419	ep->re_inline_send = xprt_rdma_max_inline_write;
420	ep->re_inline_recv = xprt_rdma_max_inline_read;
421	rc = frwr_query_device(ep, device);
422	if (rc)
423		goto out_destroy;
424
425	r_xprt->rx_buf.rb_max_requests = cpu_to_be32(ep->re_max_requests);
426
427	ep->re_attr.event_handler = rpcrdma_qp_event_handler;
428	ep->re_attr.qp_context = ep;
429	ep->re_attr.srq = NULL;
430	ep->re_attr.cap.max_inline_data = 0;
431	ep->re_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
432	ep->re_attr.qp_type = IB_QPT_RC;
433	ep->re_attr.port_num = ~0;
434
435	dprintk("RPC:       %s: requested max: dtos: send %d recv %d; "
436		"iovs: send %d recv %d\n",
437		__func__,
438		ep->re_attr.cap.max_send_wr,
439		ep->re_attr.cap.max_recv_wr,
440		ep->re_attr.cap.max_send_sge,
441		ep->re_attr.cap.max_recv_sge);
442
443	ep->re_send_batch = ep->re_max_requests >> 3;
444	ep->re_send_count = ep->re_send_batch;
445	init_waitqueue_head(&ep->re_connect_wait);
446
447	ep->re_attr.send_cq = ib_alloc_cq_any(device, r_xprt,
448					      ep->re_attr.cap.max_send_wr,
449					      IB_POLL_WORKQUEUE);
450	if (IS_ERR(ep->re_attr.send_cq)) {
451		rc = PTR_ERR(ep->re_attr.send_cq);
452		ep->re_attr.send_cq = NULL;
453		goto out_destroy;
454	}
455
456	ep->re_attr.recv_cq = ib_alloc_cq_any(device, r_xprt,
457					      ep->re_attr.cap.max_recv_wr,
458					      IB_POLL_WORKQUEUE);
459	if (IS_ERR(ep->re_attr.recv_cq)) {
460		rc = PTR_ERR(ep->re_attr.recv_cq);
461		ep->re_attr.recv_cq = NULL;
462		goto out_destroy;
463	}
464	ep->re_receive_count = 0;
465
466	/* Initialize cma parameters */
467	memset(&ep->re_remote_cma, 0, sizeof(ep->re_remote_cma));
468
469	/* Prepare RDMA-CM private message */
470	pmsg = &ep->re_cm_private;
471	pmsg->cp_magic = rpcrdma_cmp_magic;
472	pmsg->cp_version = RPCRDMA_CMP_VERSION;
473	pmsg->cp_flags |= RPCRDMA_CMP_F_SND_W_INV_OK;
474	pmsg->cp_send_size = rpcrdma_encode_buffer_size(ep->re_inline_send);
475	pmsg->cp_recv_size = rpcrdma_encode_buffer_size(ep->re_inline_recv);
476	ep->re_remote_cma.private_data = pmsg;
477	ep->re_remote_cma.private_data_len = sizeof(*pmsg);
478
479	/* Client offers RDMA Read but does not initiate */
480	ep->re_remote_cma.initiator_depth = 0;
481	ep->re_remote_cma.responder_resources =
482		min_t(int, U8_MAX, device->attrs.max_qp_rd_atom);
483
484	/* Limit transport retries so client can detect server
485	 * GID changes quickly. RPC layer handles re-establishing
486	 * transport connection and retransmission.
487	 */
488	ep->re_remote_cma.retry_count = 6;
489
490	/* RPC-over-RDMA handles its own flow control. In addition,
491	 * make all RNR NAKs visible so we know that RPC-over-RDMA
492	 * flow control is working correctly (no NAKs should be seen).
493	 */
494	ep->re_remote_cma.flow_control = 0;
495	ep->re_remote_cma.rnr_retry_count = 0;
496
497	ep->re_pd = ib_alloc_pd(device, 0);
498	if (IS_ERR(ep->re_pd)) {
499		rc = PTR_ERR(ep->re_pd);
500		ep->re_pd = NULL;
501		goto out_destroy;
502	}
503
504	rc = rdma_create_qp(id, ep->re_pd, &ep->re_attr);
505	if (rc)
506		goto out_destroy;
507
508	r_xprt->rx_ep = ep;
509	return 0;
510
511out_destroy:
512	rpcrdma_ep_put(ep);
513	rdma_destroy_id(id);
514	return rc;
515}
516
517/**
518 * rpcrdma_xprt_connect - Connect an unconnected transport
519 * @r_xprt: controlling transport instance
520 *
521 * Returns 0 on success or a negative errno.
522 */
523int rpcrdma_xprt_connect(struct rpcrdma_xprt *r_xprt)
524{
525	struct rpc_xprt *xprt = &r_xprt->rx_xprt;
526	struct rpcrdma_ep *ep;
527	int rc;
528
529	rc = rpcrdma_ep_create(r_xprt);
530	if (rc)
531		return rc;
532	ep = r_xprt->rx_ep;
533
534	xprt_clear_connected(xprt);
535	rpcrdma_reset_cwnd(r_xprt);
536
537	/* Bump the ep's reference count while there are
538	 * outstanding Receives.
539	 */
540	rpcrdma_ep_get(ep);
541	rpcrdma_post_recvs(r_xprt, 1, true);
542
543	rc = rdma_connect(ep->re_id, &ep->re_remote_cma);
544	if (rc)
545		goto out;
546
547	if (xprt->reestablish_timeout < RPCRDMA_INIT_REEST_TO)
548		xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO;
549	wait_event_interruptible(ep->re_connect_wait,
550				 ep->re_connect_status != 0);
551	if (ep->re_connect_status <= 0) {
552		rc = ep->re_connect_status;
553		goto out;
554	}
555
556	rc = rpcrdma_sendctxs_create(r_xprt);
557	if (rc) {
558		rc = -ENOTCONN;
559		goto out;
560	}
561
562	rc = rpcrdma_reqs_setup(r_xprt);
563	if (rc) {
564		rc = -ENOTCONN;
565		goto out;
566	}
567	rpcrdma_mrs_create(r_xprt);
568
569out:
570	trace_xprtrdma_connect(r_xprt, rc);
571	return rc;
572}
573
574/**
575 * rpcrdma_xprt_disconnect - Disconnect underlying transport
576 * @r_xprt: controlling transport instance
577 *
578 * Caller serializes. Either the transport send lock is held,
579 * or we're being called to destroy the transport.
580 *
581 * On return, @r_xprt is completely divested of all hardware
582 * resources and prepared for the next ->connect operation.
583 */
584void rpcrdma_xprt_disconnect(struct rpcrdma_xprt *r_xprt)
585{
586	struct rpcrdma_ep *ep = r_xprt->rx_ep;
587	struct rdma_cm_id *id;
588	int rc;
589
590	if (!ep)
591		return;
592
593	id = ep->re_id;
594	rc = rdma_disconnect(id);
595	trace_xprtrdma_disconnect(r_xprt, rc);
596
597	rpcrdma_xprt_drain(r_xprt);
598	rpcrdma_reps_unmap(r_xprt);
599	rpcrdma_reqs_reset(r_xprt);
600	rpcrdma_mrs_destroy(r_xprt);
601	rpcrdma_sendctxs_destroy(r_xprt);
602
603	if (rpcrdma_ep_put(ep))
604		rdma_destroy_id(id);
605
606	r_xprt->rx_ep = NULL;
607}
608
609/* Fixed-size circular FIFO queue. This implementation is wait-free and
610 * lock-free.
611 *
612 * Consumer is the code path that posts Sends. This path dequeues a
613 * sendctx for use by a Send operation. Multiple consumer threads
614 * are serialized by the RPC transport lock, which allows only one
615 * ->send_request call at a time.
616 *
617 * Producer is the code path that handles Send completions. This path
618 * enqueues a sendctx that has been completed. Multiple producer
619 * threads are serialized by the ib_poll_cq() function.
620 */
621
622/* rpcrdma_sendctxs_destroy() assumes caller has already quiesced
623 * queue activity, and rpcrdma_xprt_drain has flushed all remaining
624 * Send requests.
625 */
626static void rpcrdma_sendctxs_destroy(struct rpcrdma_xprt *r_xprt)
627{
628	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
629	unsigned long i;
630
631	if (!buf->rb_sc_ctxs)
632		return;
633	for (i = 0; i <= buf->rb_sc_last; i++)
634		kfree(buf->rb_sc_ctxs[i]);
635	kfree(buf->rb_sc_ctxs);
636	buf->rb_sc_ctxs = NULL;
637}
638
639static struct rpcrdma_sendctx *rpcrdma_sendctx_create(struct rpcrdma_ep *ep)
640{
641	struct rpcrdma_sendctx *sc;
642
643	sc = kzalloc(struct_size(sc, sc_sges, ep->re_attr.cap.max_send_sge),
644		     GFP_KERNEL);
645	if (!sc)
646		return NULL;
647
648	sc->sc_cqe.done = rpcrdma_wc_send;
649	return sc;
650}
651
652static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt)
653{
654	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
655	struct rpcrdma_sendctx *sc;
656	unsigned long i;
657
658	/* Maximum number of concurrent outstanding Send WRs. Capping
659	 * the circular queue size stops Send Queue overflow by causing
660	 * the ->send_request call to fail temporarily before too many
661	 * Sends are posted.
662	 */
663	i = r_xprt->rx_ep->re_max_requests + RPCRDMA_MAX_BC_REQUESTS;
664	buf->rb_sc_ctxs = kcalloc(i, sizeof(sc), GFP_KERNEL);
665	if (!buf->rb_sc_ctxs)
666		return -ENOMEM;
667
668	buf->rb_sc_last = i - 1;
669	for (i = 0; i <= buf->rb_sc_last; i++) {
670		sc = rpcrdma_sendctx_create(r_xprt->rx_ep);
671		if (!sc)
672			return -ENOMEM;
673
674		buf->rb_sc_ctxs[i] = sc;
675	}
676
677	buf->rb_sc_head = 0;
678	buf->rb_sc_tail = 0;
679	return 0;
680}
681
682/* The sendctx queue is not guaranteed to have a size that is a
683 * power of two, thus the helpers in circ_buf.h cannot be used.
684 * The other option is to use modulus (%), which can be expensive.
685 */
686static unsigned long rpcrdma_sendctx_next(struct rpcrdma_buffer *buf,
687					  unsigned long item)
688{
689	return likely(item < buf->rb_sc_last) ? item + 1 : 0;
690}
691
692/**
693 * rpcrdma_sendctx_get_locked - Acquire a send context
694 * @r_xprt: controlling transport instance
695 *
696 * Returns pointer to a free send completion context; or NULL if
697 * the queue is empty.
698 *
699 * Usage: Called to acquire an SGE array before preparing a Send WR.
700 *
701 * The caller serializes calls to this function (per transport), and
702 * provides an effective memory barrier that flushes the new value
703 * of rb_sc_head.
704 */
705struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_xprt *r_xprt)
706{
707	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
708	struct rpcrdma_sendctx *sc;
709	unsigned long next_head;
710
711	next_head = rpcrdma_sendctx_next(buf, buf->rb_sc_head);
712
713	if (next_head == READ_ONCE(buf->rb_sc_tail))
714		goto out_emptyq;
715
716	/* ORDER: item must be accessed _before_ head is updated */
717	sc = buf->rb_sc_ctxs[next_head];
718
719	/* Releasing the lock in the caller acts as a memory
720	 * barrier that flushes rb_sc_head.
721	 */
722	buf->rb_sc_head = next_head;
723
724	return sc;
725
726out_emptyq:
727	/* The queue is "empty" if there have not been enough Send
728	 * completions recently. This is a sign the Send Queue is
729	 * backing up. Cause the caller to pause and try again.
730	 */
731	xprt_wait_for_buffer_space(&r_xprt->rx_xprt);
732	r_xprt->rx_stats.empty_sendctx_q++;
733	return NULL;
734}
735
736/**
737 * rpcrdma_sendctx_put_locked - Release a send context
738 * @r_xprt: controlling transport instance
739 * @sc: send context to release
740 *
741 * Usage: Called from Send completion to return a sendctxt
742 * to the queue.
743 *
744 * The caller serializes calls to this function (per transport).
745 */
746static void rpcrdma_sendctx_put_locked(struct rpcrdma_xprt *r_xprt,
747				       struct rpcrdma_sendctx *sc)
748{
749	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
750	unsigned long next_tail;
751
752	/* Unmap SGEs of previously completed but unsignaled
753	 * Sends by walking up the queue until @sc is found.
754	 */
755	next_tail = buf->rb_sc_tail;
756	do {
757		next_tail = rpcrdma_sendctx_next(buf, next_tail);
758
759		/* ORDER: item must be accessed _before_ tail is updated */
760		rpcrdma_sendctx_unmap(buf->rb_sc_ctxs[next_tail]);
761
762	} while (buf->rb_sc_ctxs[next_tail] != sc);
763
764	/* Paired with READ_ONCE */
765	smp_store_release(&buf->rb_sc_tail, next_tail);
766
767	xprt_write_space(&r_xprt->rx_xprt);
768}
769
770static void
771rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt)
772{
773	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
774	struct rpcrdma_ep *ep = r_xprt->rx_ep;
775	unsigned int count;
776
777	for (count = 0; count < ep->re_max_rdma_segs; count++) {
778		struct rpcrdma_mr *mr;
779		int rc;
780
781		mr = kzalloc(sizeof(*mr), GFP_NOFS);
782		if (!mr)
783			break;
784
785		rc = frwr_mr_init(r_xprt, mr);
786		if (rc) {
787			kfree(mr);
788			break;
789		}
790
791		spin_lock(&buf->rb_lock);
792		rpcrdma_mr_push(mr, &buf->rb_mrs);
793		list_add(&mr->mr_all, &buf->rb_all_mrs);
794		spin_unlock(&buf->rb_lock);
795	}
796
797	r_xprt->rx_stats.mrs_allocated += count;
798	trace_xprtrdma_createmrs(r_xprt, count);
799}
800
801static void
802rpcrdma_mr_refresh_worker(struct work_struct *work)
803{
804	struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer,
805						  rb_refresh_worker);
806	struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt,
807						   rx_buf);
808
809	rpcrdma_mrs_create(r_xprt);
810	xprt_write_space(&r_xprt->rx_xprt);
811}
812
813/**
814 * rpcrdma_mrs_refresh - Wake the MR refresh worker
815 * @r_xprt: controlling transport instance
816 *
817 */
818void rpcrdma_mrs_refresh(struct rpcrdma_xprt *r_xprt)
819{
820	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
821	struct rpcrdma_ep *ep = r_xprt->rx_ep;
822
823	/* If there is no underlying connection, it's no use
824	 * to wake the refresh worker.
825	 */
826	if (ep->re_connect_status == 1) {
827		/* The work is scheduled on a WQ_MEM_RECLAIM
828		 * workqueue in order to prevent MR allocation
829		 * from recursing into NFS during direct reclaim.
830		 */
831		queue_work(xprtiod_workqueue, &buf->rb_refresh_worker);
832	}
833}
834
835/**
836 * rpcrdma_req_create - Allocate an rpcrdma_req object
837 * @r_xprt: controlling r_xprt
838 * @size: initial size, in bytes, of send and receive buffers
839 * @flags: GFP flags passed to memory allocators
840 *
841 * Returns an allocated and fully initialized rpcrdma_req or NULL.
842 */
843struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt, size_t size,
844				       gfp_t flags)
845{
846	struct rpcrdma_buffer *buffer = &r_xprt->rx_buf;
847	struct rpcrdma_req *req;
848
849	req = kzalloc(sizeof(*req), flags);
850	if (req == NULL)
851		goto out1;
852
853	req->rl_sendbuf = rpcrdma_regbuf_alloc(size, DMA_TO_DEVICE, flags);
854	if (!req->rl_sendbuf)
855		goto out2;
856
857	req->rl_recvbuf = rpcrdma_regbuf_alloc(size, DMA_NONE, flags);
858	if (!req->rl_recvbuf)
859		goto out3;
860
861	INIT_LIST_HEAD(&req->rl_free_mrs);
862	INIT_LIST_HEAD(&req->rl_registered);
863	spin_lock(&buffer->rb_lock);
864	list_add(&req->rl_all, &buffer->rb_allreqs);
865	spin_unlock(&buffer->rb_lock);
866	return req;
867
868out3:
869	rpcrdma_regbuf_free(req->rl_sendbuf);
870out2:
871	kfree(req);
872out1:
873	return NULL;
874}
875
876/**
877 * rpcrdma_req_setup - Per-connection instance setup of an rpcrdma_req object
878 * @r_xprt: controlling transport instance
879 * @req: rpcrdma_req object to set up
880 *
881 * Returns zero on success, and a negative errno on failure.
882 */
883int rpcrdma_req_setup(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
884{
885	struct rpcrdma_regbuf *rb;
886	size_t maxhdrsize;
887
888	/* Compute maximum header buffer size in bytes */
889	maxhdrsize = rpcrdma_fixed_maxsz + 3 +
890		     r_xprt->rx_ep->re_max_rdma_segs * rpcrdma_readchunk_maxsz;
891	maxhdrsize *= sizeof(__be32);
892	rb = rpcrdma_regbuf_alloc(__roundup_pow_of_two(maxhdrsize),
893				  DMA_TO_DEVICE, GFP_KERNEL);
894	if (!rb)
895		goto out;
896
897	if (!__rpcrdma_regbuf_dma_map(r_xprt, rb))
898		goto out_free;
899
900	req->rl_rdmabuf = rb;
901	xdr_buf_init(&req->rl_hdrbuf, rdmab_data(rb), rdmab_length(rb));
902	return 0;
903
904out_free:
905	rpcrdma_regbuf_free(rb);
906out:
907	return -ENOMEM;
908}
909
910/* ASSUMPTION: the rb_allreqs list is stable for the duration,
911 * and thus can be walked without holding rb_lock. Eg. the
912 * caller is holding the transport send lock to exclude
913 * device removal or disconnection.
914 */
915static int rpcrdma_reqs_setup(struct rpcrdma_xprt *r_xprt)
916{
917	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
918	struct rpcrdma_req *req;
919	int rc;
920
921	list_for_each_entry(req, &buf->rb_allreqs, rl_all) {
922		rc = rpcrdma_req_setup(r_xprt, req);
923		if (rc)
924			return rc;
925	}
926	return 0;
927}
928
929static void rpcrdma_req_reset(struct rpcrdma_req *req)
930{
931	/* Credits are valid for only one connection */
932	req->rl_slot.rq_cong = 0;
933
934	rpcrdma_regbuf_free(req->rl_rdmabuf);
935	req->rl_rdmabuf = NULL;
936
937	rpcrdma_regbuf_dma_unmap(req->rl_sendbuf);
938	rpcrdma_regbuf_dma_unmap(req->rl_recvbuf);
939
940	frwr_reset(req);
941}
942
943/* ASSUMPTION: the rb_allreqs list is stable for the duration,
944 * and thus can be walked without holding rb_lock. Eg. the
945 * caller is holding the transport send lock to exclude
946 * device removal or disconnection.
947 */
948static void rpcrdma_reqs_reset(struct rpcrdma_xprt *r_xprt)
949{
950	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
951	struct rpcrdma_req *req;
952
953	list_for_each_entry(req, &buf->rb_allreqs, rl_all)
954		rpcrdma_req_reset(req);
955}
956
957/* No locking needed here. This function is called only by the
958 * Receive completion handler.
959 */
960static noinline
961struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt,
962				       bool temp)
963{
964	struct rpcrdma_rep *rep;
965
966	rep = kzalloc(sizeof(*rep), GFP_KERNEL);
967	if (rep == NULL)
968		goto out;
969
970	rep->rr_rdmabuf = rpcrdma_regbuf_alloc(r_xprt->rx_ep->re_inline_recv,
971					       DMA_FROM_DEVICE, GFP_KERNEL);
972	if (!rep->rr_rdmabuf)
973		goto out_free;
974
975	xdr_buf_init(&rep->rr_hdrbuf, rdmab_data(rep->rr_rdmabuf),
976		     rdmab_length(rep->rr_rdmabuf));
977	rep->rr_cqe.done = rpcrdma_wc_receive;
978	rep->rr_rxprt = r_xprt;
979	rep->rr_recv_wr.next = NULL;
980	rep->rr_recv_wr.wr_cqe = &rep->rr_cqe;
981	rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov;
982	rep->rr_recv_wr.num_sge = 1;
983	rep->rr_temp = temp;
984	list_add(&rep->rr_all, &r_xprt->rx_buf.rb_all_reps);
985	return rep;
986
987out_free:
988	kfree(rep);
989out:
990	return NULL;
991}
992
993/* No locking needed here. This function is invoked only by the
994 * Receive completion handler, or during transport shutdown.
995 */
996static void rpcrdma_rep_destroy(struct rpcrdma_rep *rep)
997{
998	list_del(&rep->rr_all);
999	rpcrdma_regbuf_free(rep->rr_rdmabuf);
1000	kfree(rep);
1001}
1002
1003static struct rpcrdma_rep *rpcrdma_rep_get_locked(struct rpcrdma_buffer *buf)
1004{
1005	struct llist_node *node;
1006
1007	/* Calls to llist_del_first are required to be serialized */
1008	node = llist_del_first(&buf->rb_free_reps);
1009	if (!node)
1010		return NULL;
1011	return llist_entry(node, struct rpcrdma_rep, rr_node);
1012}
1013
1014static void rpcrdma_rep_put(struct rpcrdma_buffer *buf,
1015			    struct rpcrdma_rep *rep)
1016{
1017	llist_add(&rep->rr_node, &buf->rb_free_reps);
1018}
1019
1020static void rpcrdma_reps_unmap(struct rpcrdma_xprt *r_xprt)
1021{
1022	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1023	struct rpcrdma_rep *rep;
1024
1025	list_for_each_entry(rep, &buf->rb_all_reps, rr_all) {
1026		rpcrdma_regbuf_dma_unmap(rep->rr_rdmabuf);
1027		rep->rr_temp = true;
1028	}
1029}
1030
1031static void rpcrdma_reps_destroy(struct rpcrdma_buffer *buf)
1032{
1033	struct rpcrdma_rep *rep;
1034
1035	while ((rep = rpcrdma_rep_get_locked(buf)) != NULL)
1036		rpcrdma_rep_destroy(rep);
1037}
1038
1039/**
1040 * rpcrdma_buffer_create - Create initial set of req/rep objects
1041 * @r_xprt: transport instance to (re)initialize
1042 *
1043 * Returns zero on success, otherwise a negative errno.
1044 */
1045int rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
1046{
1047	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1048	int i, rc;
1049
1050	buf->rb_bc_srv_max_requests = 0;
1051	spin_lock_init(&buf->rb_lock);
1052	INIT_LIST_HEAD(&buf->rb_mrs);
1053	INIT_LIST_HEAD(&buf->rb_all_mrs);
1054	INIT_WORK(&buf->rb_refresh_worker, rpcrdma_mr_refresh_worker);
1055
1056	INIT_LIST_HEAD(&buf->rb_send_bufs);
1057	INIT_LIST_HEAD(&buf->rb_allreqs);
1058	INIT_LIST_HEAD(&buf->rb_all_reps);
1059
1060	rc = -ENOMEM;
1061	for (i = 0; i < r_xprt->rx_xprt.max_reqs; i++) {
1062		struct rpcrdma_req *req;
1063
1064		req = rpcrdma_req_create(r_xprt, RPCRDMA_V1_DEF_INLINE_SIZE * 2,
1065					 GFP_KERNEL);
1066		if (!req)
1067			goto out;
1068		list_add(&req->rl_list, &buf->rb_send_bufs);
1069	}
1070
1071	init_llist_head(&buf->rb_free_reps);
1072
1073	return 0;
1074out:
1075	rpcrdma_buffer_destroy(buf);
1076	return rc;
1077}
1078
1079/**
1080 * rpcrdma_req_destroy - Destroy an rpcrdma_req object
1081 * @req: unused object to be destroyed
1082 *
1083 * Relies on caller holding the transport send lock to protect
1084 * removing req->rl_all from buf->rb_all_reqs safely.
1085 */
1086void rpcrdma_req_destroy(struct rpcrdma_req *req)
1087{
1088	struct rpcrdma_mr *mr;
1089
1090	list_del(&req->rl_all);
1091
1092	while ((mr = rpcrdma_mr_pop(&req->rl_free_mrs))) {
1093		struct rpcrdma_buffer *buf = &mr->mr_xprt->rx_buf;
1094
1095		spin_lock(&buf->rb_lock);
1096		list_del(&mr->mr_all);
1097		spin_unlock(&buf->rb_lock);
1098
1099		frwr_release_mr(mr);
1100	}
1101
1102	rpcrdma_regbuf_free(req->rl_recvbuf);
1103	rpcrdma_regbuf_free(req->rl_sendbuf);
1104	rpcrdma_regbuf_free(req->rl_rdmabuf);
1105	kfree(req);
1106}
1107
1108/**
1109 * rpcrdma_mrs_destroy - Release all of a transport's MRs
1110 * @r_xprt: controlling transport instance
1111 *
1112 * Relies on caller holding the transport send lock to protect
1113 * removing mr->mr_list from req->rl_free_mrs safely.
1114 */
1115static void rpcrdma_mrs_destroy(struct rpcrdma_xprt *r_xprt)
1116{
1117	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1118	struct rpcrdma_mr *mr;
1119
1120	cancel_work_sync(&buf->rb_refresh_worker);
1121
1122	spin_lock(&buf->rb_lock);
1123	while ((mr = list_first_entry_or_null(&buf->rb_all_mrs,
1124					      struct rpcrdma_mr,
1125					      mr_all)) != NULL) {
1126		list_del(&mr->mr_list);
1127		list_del(&mr->mr_all);
1128		spin_unlock(&buf->rb_lock);
1129
1130		frwr_release_mr(mr);
1131
1132		spin_lock(&buf->rb_lock);
1133	}
1134	spin_unlock(&buf->rb_lock);
1135}
1136
1137/**
1138 * rpcrdma_buffer_destroy - Release all hw resources
1139 * @buf: root control block for resources
1140 *
1141 * ORDERING: relies on a prior rpcrdma_xprt_drain :
1142 * - No more Send or Receive completions can occur
1143 * - All MRs, reps, and reqs are returned to their free lists
1144 */
1145void
1146rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
1147{
1148	rpcrdma_reps_destroy(buf);
1149
1150	while (!list_empty(&buf->rb_send_bufs)) {
1151		struct rpcrdma_req *req;
1152
1153		req = list_first_entry(&buf->rb_send_bufs,
1154				       struct rpcrdma_req, rl_list);
1155		list_del(&req->rl_list);
1156		rpcrdma_req_destroy(req);
1157	}
1158}
1159
1160/**
1161 * rpcrdma_mr_get - Allocate an rpcrdma_mr object
1162 * @r_xprt: controlling transport
1163 *
1164 * Returns an initialized rpcrdma_mr or NULL if no free
1165 * rpcrdma_mr objects are available.
1166 */
1167struct rpcrdma_mr *
1168rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt)
1169{
1170	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1171	struct rpcrdma_mr *mr;
1172
1173	spin_lock(&buf->rb_lock);
1174	mr = rpcrdma_mr_pop(&buf->rb_mrs);
1175	spin_unlock(&buf->rb_lock);
1176	return mr;
1177}
1178
1179/**
1180 * rpcrdma_mr_put - DMA unmap an MR and release it
1181 * @mr: MR to release
1182 *
1183 */
1184void rpcrdma_mr_put(struct rpcrdma_mr *mr)
1185{
1186	struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
1187
1188	if (mr->mr_dir != DMA_NONE) {
1189		trace_xprtrdma_mr_unmap(mr);
1190		ib_dma_unmap_sg(r_xprt->rx_ep->re_id->device,
1191				mr->mr_sg, mr->mr_nents, mr->mr_dir);
1192		mr->mr_dir = DMA_NONE;
1193	}
1194
1195	rpcrdma_mr_push(mr, &mr->mr_req->rl_free_mrs);
1196}
1197
1198/**
1199 * rpcrdma_reply_put - Put reply buffers back into pool
1200 * @buffers: buffer pool
1201 * @req: object to return
1202 *
1203 */
1204void rpcrdma_reply_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req)
1205{
1206	if (req->rl_reply) {
1207		rpcrdma_rep_put(buffers, req->rl_reply);
1208		req->rl_reply = NULL;
1209	}
1210}
1211
1212/**
1213 * rpcrdma_buffer_get - Get a request buffer
1214 * @buffers: Buffer pool from which to obtain a buffer
1215 *
1216 * Returns a fresh rpcrdma_req, or NULL if none are available.
1217 */
1218struct rpcrdma_req *
1219rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
1220{
1221	struct rpcrdma_req *req;
1222
1223	spin_lock(&buffers->rb_lock);
1224	req = list_first_entry_or_null(&buffers->rb_send_bufs,
1225				       struct rpcrdma_req, rl_list);
1226	if (req)
1227		list_del_init(&req->rl_list);
1228	spin_unlock(&buffers->rb_lock);
1229	return req;
1230}
1231
1232/**
1233 * rpcrdma_buffer_put - Put request/reply buffers back into pool
1234 * @buffers: buffer pool
1235 * @req: object to return
1236 *
1237 */
1238void rpcrdma_buffer_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req)
1239{
1240	rpcrdma_reply_put(buffers, req);
1241
1242	spin_lock(&buffers->rb_lock);
1243	list_add(&req->rl_list, &buffers->rb_send_bufs);
1244	spin_unlock(&buffers->rb_lock);
1245}
1246
1247/**
1248 * rpcrdma_recv_buffer_put - Release rpcrdma_rep back to free list
1249 * @rep: rep to release
1250 *
1251 * Used after error conditions.
1252 */
1253void rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep)
1254{
1255	rpcrdma_rep_put(&rep->rr_rxprt->rx_buf, rep);
1256}
1257
1258/* Returns a pointer to a rpcrdma_regbuf object, or NULL.
1259 *
1260 * xprtrdma uses a regbuf for posting an outgoing RDMA SEND, or for
1261 * receiving the payload of RDMA RECV operations. During Long Calls
1262 * or Replies they may be registered externally via frwr_map.
1263 */
1264static struct rpcrdma_regbuf *
1265rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction,
1266		     gfp_t flags)
1267{
1268	struct rpcrdma_regbuf *rb;
1269
1270	rb = kmalloc(sizeof(*rb), flags);
1271	if (!rb)
1272		return NULL;
1273	rb->rg_data = kmalloc(size, flags);
1274	if (!rb->rg_data) {
1275		kfree(rb);
1276		return NULL;
1277	}
1278
1279	rb->rg_device = NULL;
1280	rb->rg_direction = direction;
1281	rb->rg_iov.length = size;
1282	return rb;
1283}
1284
1285/**
1286 * rpcrdma_regbuf_realloc - re-allocate a SEND/RECV buffer
1287 * @rb: regbuf to reallocate
1288 * @size: size of buffer to be allocated, in bytes
1289 * @flags: GFP flags
1290 *
1291 * Returns true if reallocation was successful. If false is
1292 * returned, @rb is left untouched.
1293 */
1294bool rpcrdma_regbuf_realloc(struct rpcrdma_regbuf *rb, size_t size, gfp_t flags)
1295{
1296	void *buf;
1297
1298	buf = kmalloc(size, flags);
1299	if (!buf)
1300		return false;
1301
1302	rpcrdma_regbuf_dma_unmap(rb);
1303	kfree(rb->rg_data);
1304
1305	rb->rg_data = buf;
1306	rb->rg_iov.length = size;
1307	return true;
1308}
1309
1310/**
1311 * __rpcrdma_regbuf_dma_map - DMA-map a regbuf
1312 * @r_xprt: controlling transport instance
1313 * @rb: regbuf to be mapped
1314 *
1315 * Returns true if the buffer is now DMA mapped to @r_xprt's device
1316 */
1317bool __rpcrdma_regbuf_dma_map(struct rpcrdma_xprt *r_xprt,
1318			      struct rpcrdma_regbuf *rb)
1319{
1320	struct ib_device *device = r_xprt->rx_ep->re_id->device;
1321
1322	if (rb->rg_direction == DMA_NONE)
1323		return false;
1324
1325	rb->rg_iov.addr = ib_dma_map_single(device, rdmab_data(rb),
1326					    rdmab_length(rb), rb->rg_direction);
1327	if (ib_dma_mapping_error(device, rdmab_addr(rb))) {
1328		trace_xprtrdma_dma_maperr(rdmab_addr(rb));
1329		return false;
1330	}
1331
1332	rb->rg_device = device;
1333	rb->rg_iov.lkey = r_xprt->rx_ep->re_pd->local_dma_lkey;
1334	return true;
1335}
1336
1337static void rpcrdma_regbuf_dma_unmap(struct rpcrdma_regbuf *rb)
1338{
1339	if (!rb)
1340		return;
1341
1342	if (!rpcrdma_regbuf_is_mapped(rb))
1343		return;
1344
1345	ib_dma_unmap_single(rb->rg_device, rdmab_addr(rb), rdmab_length(rb),
1346			    rb->rg_direction);
1347	rb->rg_device = NULL;
1348}
1349
1350static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb)
1351{
1352	rpcrdma_regbuf_dma_unmap(rb);
1353	if (rb)
1354		kfree(rb->rg_data);
1355	kfree(rb);
1356}
1357
1358/**
1359 * rpcrdma_post_sends - Post WRs to a transport's Send Queue
1360 * @r_xprt: controlling transport instance
1361 * @req: rpcrdma_req containing the Send WR to post
1362 *
1363 * Returns 0 if the post was successful, otherwise -ENOTCONN
1364 * is returned.
1365 */
1366int rpcrdma_post_sends(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
1367{
1368	struct ib_send_wr *send_wr = &req->rl_wr;
1369	struct rpcrdma_ep *ep = r_xprt->rx_ep;
1370	int rc;
1371
1372	if (!ep->re_send_count || kref_read(&req->rl_kref) > 1) {
1373		send_wr->send_flags |= IB_SEND_SIGNALED;
1374		ep->re_send_count = ep->re_send_batch;
1375	} else {
1376		send_wr->send_flags &= ~IB_SEND_SIGNALED;
1377		--ep->re_send_count;
1378	}
1379
1380	trace_xprtrdma_post_send(req);
1381	rc = frwr_send(r_xprt, req);
1382	if (rc)
1383		return -ENOTCONN;
1384	return 0;
1385}
1386
1387/**
1388 * rpcrdma_post_recvs - Refill the Receive Queue
1389 * @r_xprt: controlling transport instance
1390 * @needed: current credit grant
1391 * @temp: mark Receive buffers to be deleted after one use
1392 *
1393 */
1394void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, int needed, bool temp)
1395{
1396	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1397	struct rpcrdma_ep *ep = r_xprt->rx_ep;
1398	struct ib_recv_wr *wr, *bad_wr;
1399	struct rpcrdma_rep *rep;
1400	int count, rc;
1401
1402	rc = 0;
1403	count = 0;
1404
1405	if (likely(ep->re_receive_count > needed))
1406		goto out;
1407	needed -= ep->re_receive_count;
1408	if (!temp)
1409		needed += RPCRDMA_MAX_RECV_BATCH;
1410
1411	/* fast path: all needed reps can be found on the free list */
1412	wr = NULL;
1413	while (needed) {
1414		rep = rpcrdma_rep_get_locked(buf);
1415		if (rep && rep->rr_temp) {
1416			rpcrdma_rep_destroy(rep);
1417			continue;
1418		}
1419		if (!rep)
1420			rep = rpcrdma_rep_create(r_xprt, temp);
1421		if (!rep)
1422			break;
1423		if (!rpcrdma_regbuf_dma_map(r_xprt, rep->rr_rdmabuf)) {
1424			rpcrdma_rep_put(buf, rep);
1425			break;
1426		}
1427
1428		trace_xprtrdma_post_recv(rep);
1429		rep->rr_recv_wr.next = wr;
1430		wr = &rep->rr_recv_wr;
1431		--needed;
1432		++count;
1433	}
1434	if (!wr)
1435		goto out;
1436
1437	rc = ib_post_recv(ep->re_id->qp, wr,
1438			  (const struct ib_recv_wr **)&bad_wr);
1439out:
1440	trace_xprtrdma_post_recvs(r_xprt, count, rc);
1441	if (rc) {
1442		for (wr = bad_wr; wr;) {
1443			struct rpcrdma_rep *rep;
1444
1445			rep = container_of(wr, struct rpcrdma_rep, rr_recv_wr);
1446			wr = wr->next;
1447			rpcrdma_recv_buffer_put(rep);
1448			--count;
1449		}
1450	}
1451	ep->re_receive_count += count;
1452	return;
1453}
1454