1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2/* 3 * Copyright (c) 2014-2017 Oracle. All rights reserved. 4 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the BSD-type 10 * license below: 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 16 * Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 19 * Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials provided 22 * with the distribution. 23 * 24 * Neither the name of the Network Appliance, Inc. nor the names of 25 * its contributors may be used to endorse or promote products 26 * derived from this software without specific prior written 27 * permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 40 */ 41 42/* 43 * transport.c 44 * 45 * This file contains the top-level implementation of an RPC RDMA 46 * transport. 47 * 48 * Naming convention: functions beginning with xprt_ are part of the 49 * transport switch. All others are RPC RDMA internal. 50 */ 51 52#include <linux/module.h> 53#include <linux/slab.h> 54#include <linux/seq_file.h> 55#include <linux/smp.h> 56 57#include <linux/sunrpc/addr.h> 58#include <linux/sunrpc/svc_rdma.h> 59 60#include "xprt_rdma.h" 61#include <trace/events/rpcrdma.h> 62 63#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 64# define RPCDBG_FACILITY RPCDBG_TRANS 65#endif 66 67/* 68 * tunables 69 */ 70 71static unsigned int xprt_rdma_slot_table_entries = RPCRDMA_DEF_SLOT_TABLE; 72unsigned int xprt_rdma_max_inline_read = RPCRDMA_DEF_INLINE; 73unsigned int xprt_rdma_max_inline_write = RPCRDMA_DEF_INLINE; 74unsigned int xprt_rdma_memreg_strategy = RPCRDMA_FRWR; 75int xprt_rdma_pad_optimize; 76 77#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 78 79static unsigned int min_slot_table_size = RPCRDMA_MIN_SLOT_TABLE; 80static unsigned int max_slot_table_size = RPCRDMA_MAX_SLOT_TABLE; 81static unsigned int min_inline_size = RPCRDMA_MIN_INLINE; 82static unsigned int max_inline_size = RPCRDMA_MAX_INLINE; 83static unsigned int max_padding = PAGE_SIZE; 84static unsigned int min_memreg = RPCRDMA_BOUNCEBUFFERS; 85static unsigned int max_memreg = RPCRDMA_LAST - 1; 86static unsigned int dummy; 87 88static struct ctl_table_header *sunrpc_table_header; 89 90static struct ctl_table xr_tunables_table[] = { 91 { 92 .procname = "rdma_slot_table_entries", 93 .data = &xprt_rdma_slot_table_entries, 94 .maxlen = sizeof(unsigned int), 95 .mode = 0644, 96 .proc_handler = proc_dointvec_minmax, 97 .extra1 = &min_slot_table_size, 98 .extra2 = &max_slot_table_size 99 }, 100 { 101 .procname = "rdma_max_inline_read", 102 .data = &xprt_rdma_max_inline_read, 103 .maxlen = sizeof(unsigned int), 104 .mode = 0644, 105 .proc_handler = proc_dointvec_minmax, 106 .extra1 = &min_inline_size, 107 .extra2 = &max_inline_size, 108 }, 109 { 110 .procname = "rdma_max_inline_write", 111 .data = &xprt_rdma_max_inline_write, 112 .maxlen = sizeof(unsigned int), 113 .mode = 0644, 114 .proc_handler = proc_dointvec_minmax, 115 .extra1 = &min_inline_size, 116 .extra2 = &max_inline_size, 117 }, 118 { 119 .procname = "rdma_inline_write_padding", 120 .data = &dummy, 121 .maxlen = sizeof(unsigned int), 122 .mode = 0644, 123 .proc_handler = proc_dointvec_minmax, 124 .extra1 = SYSCTL_ZERO, 125 .extra2 = &max_padding, 126 }, 127 { 128 .procname = "rdma_memreg_strategy", 129 .data = &xprt_rdma_memreg_strategy, 130 .maxlen = sizeof(unsigned int), 131 .mode = 0644, 132 .proc_handler = proc_dointvec_minmax, 133 .extra1 = &min_memreg, 134 .extra2 = &max_memreg, 135 }, 136 { 137 .procname = "rdma_pad_optimize", 138 .data = &xprt_rdma_pad_optimize, 139 .maxlen = sizeof(unsigned int), 140 .mode = 0644, 141 .proc_handler = proc_dointvec, 142 }, 143 { }, 144}; 145 146static struct ctl_table sunrpc_table[] = { 147 { 148 .procname = "sunrpc", 149 .mode = 0555, 150 .child = xr_tunables_table 151 }, 152 { }, 153}; 154 155#endif 156 157static const struct rpc_xprt_ops xprt_rdma_procs; 158 159static void 160xprt_rdma_format_addresses4(struct rpc_xprt *xprt, struct sockaddr *sap) 161{ 162 struct sockaddr_in *sin = (struct sockaddr_in *)sap; 163 char buf[20]; 164 165 snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr)); 166 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL); 167 168 xprt->address_strings[RPC_DISPLAY_NETID] = RPCBIND_NETID_RDMA; 169} 170 171static void 172xprt_rdma_format_addresses6(struct rpc_xprt *xprt, struct sockaddr *sap) 173{ 174 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap; 175 char buf[40]; 176 177 snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr); 178 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL); 179 180 xprt->address_strings[RPC_DISPLAY_NETID] = RPCBIND_NETID_RDMA6; 181} 182 183void 184xprt_rdma_format_addresses(struct rpc_xprt *xprt, struct sockaddr *sap) 185{ 186 char buf[128]; 187 188 switch (sap->sa_family) { 189 case AF_INET: 190 xprt_rdma_format_addresses4(xprt, sap); 191 break; 192 case AF_INET6: 193 xprt_rdma_format_addresses6(xprt, sap); 194 break; 195 default: 196 pr_err("rpcrdma: Unrecognized address family\n"); 197 return; 198 } 199 200 (void)rpc_ntop(sap, buf, sizeof(buf)); 201 xprt->address_strings[RPC_DISPLAY_ADDR] = kstrdup(buf, GFP_KERNEL); 202 203 snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap)); 204 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL); 205 206 snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap)); 207 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL); 208 209 xprt->address_strings[RPC_DISPLAY_PROTO] = "rdma"; 210} 211 212void 213xprt_rdma_free_addresses(struct rpc_xprt *xprt) 214{ 215 unsigned int i; 216 217 for (i = 0; i < RPC_DISPLAY_MAX; i++) 218 switch (i) { 219 case RPC_DISPLAY_PROTO: 220 case RPC_DISPLAY_NETID: 221 continue; 222 default: 223 kfree(xprt->address_strings[i]); 224 } 225} 226 227/** 228 * xprt_rdma_connect_worker - establish connection in the background 229 * @work: worker thread context 230 * 231 * Requester holds the xprt's send lock to prevent activity on this 232 * transport while a fresh connection is being established. RPC tasks 233 * sleep on the xprt's pending queue waiting for connect to complete. 234 */ 235static void 236xprt_rdma_connect_worker(struct work_struct *work) 237{ 238 struct rpcrdma_xprt *r_xprt = container_of(work, struct rpcrdma_xprt, 239 rx_connect_worker.work); 240 struct rpc_xprt *xprt = &r_xprt->rx_xprt; 241 int rc; 242 243 rc = rpcrdma_xprt_connect(r_xprt); 244 xprt_clear_connecting(xprt); 245 if (!rc) { 246 xprt->connect_cookie++; 247 xprt->stat.connect_count++; 248 xprt->stat.connect_time += (long)jiffies - 249 xprt->stat.connect_start; 250 xprt_set_connected(xprt); 251 rc = -EAGAIN; 252 } else 253 rpcrdma_xprt_disconnect(r_xprt); 254 xprt_unlock_connect(xprt, r_xprt); 255 xprt_wake_pending_tasks(xprt, rc); 256} 257 258/** 259 * xprt_rdma_inject_disconnect - inject a connection fault 260 * @xprt: transport context 261 * 262 * If @xprt is connected, disconnect it to simulate spurious 263 * connection loss. Caller must hold @xprt's send lock to 264 * ensure that data structures and hardware resources are 265 * stable during the rdma_disconnect() call. 266 */ 267static void 268xprt_rdma_inject_disconnect(struct rpc_xprt *xprt) 269{ 270 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 271 272 trace_xprtrdma_op_inject_dsc(r_xprt); 273 rdma_disconnect(r_xprt->rx_ep->re_id); 274} 275 276/** 277 * xprt_rdma_destroy - Full tear down of transport 278 * @xprt: doomed transport context 279 * 280 * Caller guarantees there will be no more calls to us with 281 * this @xprt. 282 */ 283static void 284xprt_rdma_destroy(struct rpc_xprt *xprt) 285{ 286 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 287 288 cancel_delayed_work_sync(&r_xprt->rx_connect_worker); 289 290 rpcrdma_xprt_disconnect(r_xprt); 291 rpcrdma_buffer_destroy(&r_xprt->rx_buf); 292 293 xprt_rdma_free_addresses(xprt); 294 xprt_free(xprt); 295 296 module_put(THIS_MODULE); 297} 298 299/* 60 second timeout, no retries */ 300static const struct rpc_timeout xprt_rdma_default_timeout = { 301 .to_initval = 60 * HZ, 302 .to_maxval = 60 * HZ, 303}; 304 305/** 306 * xprt_setup_rdma - Set up transport to use RDMA 307 * 308 * @args: rpc transport arguments 309 */ 310static struct rpc_xprt * 311xprt_setup_rdma(struct xprt_create *args) 312{ 313 struct rpc_xprt *xprt; 314 struct rpcrdma_xprt *new_xprt; 315 struct sockaddr *sap; 316 int rc; 317 318 if (args->addrlen > sizeof(xprt->addr)) 319 return ERR_PTR(-EBADF); 320 321 if (!try_module_get(THIS_MODULE)) 322 return ERR_PTR(-EIO); 323 324 xprt = xprt_alloc(args->net, sizeof(struct rpcrdma_xprt), 0, 325 xprt_rdma_slot_table_entries); 326 if (!xprt) { 327 module_put(THIS_MODULE); 328 return ERR_PTR(-ENOMEM); 329 } 330 331 xprt->timeout = &xprt_rdma_default_timeout; 332 xprt->connect_timeout = xprt->timeout->to_initval; 333 xprt->max_reconnect_timeout = xprt->timeout->to_maxval; 334 xprt->bind_timeout = RPCRDMA_BIND_TO; 335 xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO; 336 xprt->idle_timeout = RPCRDMA_IDLE_DISC_TO; 337 338 xprt->resvport = 0; /* privileged port not needed */ 339 xprt->ops = &xprt_rdma_procs; 340 341 /* 342 * Set up RDMA-specific connect data. 343 */ 344 sap = args->dstaddr; 345 346 /* Ensure xprt->addr holds valid server TCP (not RDMA) 347 * address, for any side protocols which peek at it */ 348 xprt->prot = IPPROTO_TCP; 349 xprt->addrlen = args->addrlen; 350 memcpy(&xprt->addr, sap, xprt->addrlen); 351 352 if (rpc_get_port(sap)) 353 xprt_set_bound(xprt); 354 xprt_rdma_format_addresses(xprt, sap); 355 356 new_xprt = rpcx_to_rdmax(xprt); 357 rc = rpcrdma_buffer_create(new_xprt); 358 if (rc) { 359 xprt_rdma_free_addresses(xprt); 360 xprt_free(xprt); 361 module_put(THIS_MODULE); 362 return ERR_PTR(rc); 363 } 364 365 INIT_DELAYED_WORK(&new_xprt->rx_connect_worker, 366 xprt_rdma_connect_worker); 367 368 xprt->max_payload = RPCRDMA_MAX_DATA_SEGS << PAGE_SHIFT; 369 370 return xprt; 371} 372 373/** 374 * xprt_rdma_close - close a transport connection 375 * @xprt: transport context 376 * 377 * Called during autoclose or device removal. 378 * 379 * Caller holds @xprt's send lock to prevent activity on this 380 * transport while the connection is torn down. 381 */ 382void xprt_rdma_close(struct rpc_xprt *xprt) 383{ 384 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 385 386 rpcrdma_xprt_disconnect(r_xprt); 387 388 xprt->reestablish_timeout = 0; 389 ++xprt->connect_cookie; 390 xprt_disconnect_done(xprt); 391} 392 393/** 394 * xprt_rdma_set_port - update server port with rpcbind result 395 * @xprt: controlling RPC transport 396 * @port: new port value 397 * 398 * Transport connect status is unchanged. 399 */ 400static void 401xprt_rdma_set_port(struct rpc_xprt *xprt, u16 port) 402{ 403 struct sockaddr *sap = (struct sockaddr *)&xprt->addr; 404 char buf[8]; 405 406 rpc_set_port(sap, port); 407 408 kfree(xprt->address_strings[RPC_DISPLAY_PORT]); 409 snprintf(buf, sizeof(buf), "%u", port); 410 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL); 411 412 kfree(xprt->address_strings[RPC_DISPLAY_HEX_PORT]); 413 snprintf(buf, sizeof(buf), "%4hx", port); 414 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL); 415} 416 417/** 418 * xprt_rdma_timer - invoked when an RPC times out 419 * @xprt: controlling RPC transport 420 * @task: RPC task that timed out 421 * 422 * Invoked when the transport is still connected, but an RPC 423 * retransmit timeout occurs. 424 * 425 * Since RDMA connections don't have a keep-alive, forcibly 426 * disconnect and retry to connect. This drives full 427 * detection of the network path, and retransmissions of 428 * all pending RPCs. 429 */ 430static void 431xprt_rdma_timer(struct rpc_xprt *xprt, struct rpc_task *task) 432{ 433 xprt_force_disconnect(xprt); 434} 435 436/** 437 * xprt_rdma_set_connect_timeout - set timeouts for establishing a connection 438 * @xprt: controlling transport instance 439 * @connect_timeout: reconnect timeout after client disconnects 440 * @reconnect_timeout: reconnect timeout after server disconnects 441 * 442 */ 443static void xprt_rdma_set_connect_timeout(struct rpc_xprt *xprt, 444 unsigned long connect_timeout, 445 unsigned long reconnect_timeout) 446{ 447 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 448 449 trace_xprtrdma_op_set_cto(r_xprt, connect_timeout, reconnect_timeout); 450 451 spin_lock(&xprt->transport_lock); 452 453 if (connect_timeout < xprt->connect_timeout) { 454 struct rpc_timeout to; 455 unsigned long initval; 456 457 to = *xprt->timeout; 458 initval = connect_timeout; 459 if (initval < RPCRDMA_INIT_REEST_TO << 1) 460 initval = RPCRDMA_INIT_REEST_TO << 1; 461 to.to_initval = initval; 462 to.to_maxval = initval; 463 r_xprt->rx_timeout = to; 464 xprt->timeout = &r_xprt->rx_timeout; 465 xprt->connect_timeout = connect_timeout; 466 } 467 468 if (reconnect_timeout < xprt->max_reconnect_timeout) 469 xprt->max_reconnect_timeout = reconnect_timeout; 470 471 spin_unlock(&xprt->transport_lock); 472} 473 474/** 475 * xprt_rdma_connect - schedule an attempt to reconnect 476 * @xprt: transport state 477 * @task: RPC scheduler context (unused) 478 * 479 */ 480static void 481xprt_rdma_connect(struct rpc_xprt *xprt, struct rpc_task *task) 482{ 483 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 484 struct rpcrdma_ep *ep = r_xprt->rx_ep; 485 unsigned long delay; 486 487 WARN_ON_ONCE(!xprt_lock_connect(xprt, task, r_xprt)); 488 489 delay = 0; 490 if (ep && ep->re_connect_status != 0) { 491 delay = xprt_reconnect_delay(xprt); 492 xprt_reconnect_backoff(xprt, RPCRDMA_INIT_REEST_TO); 493 } 494 trace_xprtrdma_op_connect(r_xprt, delay); 495 queue_delayed_work(xprtiod_workqueue, &r_xprt->rx_connect_worker, 496 delay); 497} 498 499/** 500 * xprt_rdma_alloc_slot - allocate an rpc_rqst 501 * @xprt: controlling RPC transport 502 * @task: RPC task requesting a fresh rpc_rqst 503 * 504 * tk_status values: 505 * %0 if task->tk_rqstp points to a fresh rpc_rqst 506 * %-EAGAIN if no rpc_rqst is available; queued on backlog 507 */ 508static void 509xprt_rdma_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task) 510{ 511 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 512 struct rpcrdma_req *req; 513 514 req = rpcrdma_buffer_get(&r_xprt->rx_buf); 515 if (!req) 516 goto out_sleep; 517 task->tk_rqstp = &req->rl_slot; 518 task->tk_status = 0; 519 return; 520 521out_sleep: 522 task->tk_status = -ENOMEM; 523 xprt_add_backlog(xprt, task); 524} 525 526/** 527 * xprt_rdma_free_slot - release an rpc_rqst 528 * @xprt: controlling RPC transport 529 * @rqst: rpc_rqst to release 530 * 531 */ 532static void 533xprt_rdma_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *rqst) 534{ 535 struct rpcrdma_xprt *r_xprt = 536 container_of(xprt, struct rpcrdma_xprt, rx_xprt); 537 538 rpcrdma_reply_put(&r_xprt->rx_buf, rpcr_to_rdmar(rqst)); 539 if (!xprt_wake_up_backlog(xprt, rqst)) { 540 memset(rqst, 0, sizeof(*rqst)); 541 rpcrdma_buffer_put(&r_xprt->rx_buf, rpcr_to_rdmar(rqst)); 542 } 543} 544 545static bool rpcrdma_check_regbuf(struct rpcrdma_xprt *r_xprt, 546 struct rpcrdma_regbuf *rb, size_t size, 547 gfp_t flags) 548{ 549 if (unlikely(rdmab_length(rb) < size)) { 550 if (!rpcrdma_regbuf_realloc(rb, size, flags)) 551 return false; 552 r_xprt->rx_stats.hardway_register_count += size; 553 } 554 return true; 555} 556 557/** 558 * xprt_rdma_allocate - allocate transport resources for an RPC 559 * @task: RPC task 560 * 561 * Return values: 562 * 0: Success; rq_buffer points to RPC buffer to use 563 * ENOMEM: Out of memory, call again later 564 * EIO: A permanent error occurred, do not retry 565 */ 566static int 567xprt_rdma_allocate(struct rpc_task *task) 568{ 569 struct rpc_rqst *rqst = task->tk_rqstp; 570 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt); 571 struct rpcrdma_req *req = rpcr_to_rdmar(rqst); 572 gfp_t flags; 573 574 flags = RPCRDMA_DEF_GFP; 575 if (RPC_IS_ASYNC(task)) 576 flags = GFP_NOWAIT | __GFP_NOWARN; 577 if (RPC_IS_SWAPPER(task)) 578 flags |= __GFP_MEMALLOC; 579 580 if (!rpcrdma_check_regbuf(r_xprt, req->rl_sendbuf, rqst->rq_callsize, 581 flags)) 582 goto out_fail; 583 if (!rpcrdma_check_regbuf(r_xprt, req->rl_recvbuf, rqst->rq_rcvsize, 584 flags)) 585 goto out_fail; 586 587 rqst->rq_buffer = rdmab_data(req->rl_sendbuf); 588 rqst->rq_rbuffer = rdmab_data(req->rl_recvbuf); 589 return 0; 590 591out_fail: 592 return -ENOMEM; 593} 594 595/** 596 * xprt_rdma_free - release resources allocated by xprt_rdma_allocate 597 * @task: RPC task 598 * 599 * Caller guarantees rqst->rq_buffer is non-NULL. 600 */ 601static void 602xprt_rdma_free(struct rpc_task *task) 603{ 604 struct rpc_rqst *rqst = task->tk_rqstp; 605 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt); 606 struct rpcrdma_req *req = rpcr_to_rdmar(rqst); 607 608 if (!list_empty(&req->rl_registered)) 609 frwr_unmap_sync(r_xprt, req); 610 611 /* XXX: If the RPC is completing because of a signal and 612 * not because a reply was received, we ought to ensure 613 * that the Send completion has fired, so that memory 614 * involved with the Send is not still visible to the NIC. 615 */ 616} 617 618/** 619 * xprt_rdma_send_request - marshal and send an RPC request 620 * @rqst: RPC message in rq_snd_buf 621 * 622 * Caller holds the transport's write lock. 623 * 624 * Returns: 625 * %0 if the RPC message has been sent 626 * %-ENOTCONN if the caller should reconnect and call again 627 * %-EAGAIN if the caller should call again 628 * %-ENOBUFS if the caller should call again after a delay 629 * %-EMSGSIZE if encoding ran out of buffer space. The request 630 * was not sent. Do not try to send this message again. 631 * %-EIO if an I/O error occurred. The request was not sent. 632 * Do not try to send this message again. 633 */ 634static int 635xprt_rdma_send_request(struct rpc_rqst *rqst) 636{ 637 struct rpc_xprt *xprt = rqst->rq_xprt; 638 struct rpcrdma_req *req = rpcr_to_rdmar(rqst); 639 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 640 int rc = 0; 641 642#if defined(CONFIG_SUNRPC_BACKCHANNEL) 643 if (unlikely(!rqst->rq_buffer)) 644 return xprt_rdma_bc_send_reply(rqst); 645#endif /* CONFIG_SUNRPC_BACKCHANNEL */ 646 647 if (!xprt_connected(xprt)) 648 return -ENOTCONN; 649 650 if (!xprt_request_get_cong(xprt, rqst)) 651 return -EBADSLT; 652 653 rc = rpcrdma_marshal_req(r_xprt, rqst); 654 if (rc < 0) 655 goto failed_marshal; 656 657 /* Must suppress retransmit to maintain credits */ 658 if (rqst->rq_connect_cookie == xprt->connect_cookie) 659 goto drop_connection; 660 rqst->rq_xtime = ktime_get(); 661 662 if (rpcrdma_post_sends(r_xprt, req)) 663 goto drop_connection; 664 665 rqst->rq_xmit_bytes_sent += rqst->rq_snd_buf.len; 666 667 /* An RPC with no reply will throw off credit accounting, 668 * so drop the connection to reset the credit grant. 669 */ 670 if (!rpc_reply_expected(rqst->rq_task)) 671 goto drop_connection; 672 return 0; 673 674failed_marshal: 675 if (rc != -ENOTCONN) 676 return rc; 677drop_connection: 678 xprt_rdma_close(xprt); 679 return -ENOTCONN; 680} 681 682void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) 683{ 684 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 685 long idle_time = 0; 686 687 if (xprt_connected(xprt)) 688 idle_time = (long)(jiffies - xprt->last_used) / HZ; 689 690 seq_puts(seq, "\txprt:\trdma "); 691 seq_printf(seq, "%u %lu %lu %lu %ld %lu %lu %lu %llu %llu ", 692 0, /* need a local port? */ 693 xprt->stat.bind_count, 694 xprt->stat.connect_count, 695 xprt->stat.connect_time / HZ, 696 idle_time, 697 xprt->stat.sends, 698 xprt->stat.recvs, 699 xprt->stat.bad_xids, 700 xprt->stat.req_u, 701 xprt->stat.bklog_u); 702 seq_printf(seq, "%lu %lu %lu %llu %llu %llu %llu %lu %lu %lu %lu ", 703 r_xprt->rx_stats.read_chunk_count, 704 r_xprt->rx_stats.write_chunk_count, 705 r_xprt->rx_stats.reply_chunk_count, 706 r_xprt->rx_stats.total_rdma_request, 707 r_xprt->rx_stats.total_rdma_reply, 708 r_xprt->rx_stats.pullup_copy_count, 709 r_xprt->rx_stats.fixup_copy_count, 710 r_xprt->rx_stats.hardway_register_count, 711 r_xprt->rx_stats.failed_marshal_count, 712 r_xprt->rx_stats.bad_reply_count, 713 r_xprt->rx_stats.nomsg_call_count); 714 seq_printf(seq, "%lu %lu %lu %lu %lu %lu\n", 715 r_xprt->rx_stats.mrs_recycled, 716 r_xprt->rx_stats.mrs_orphaned, 717 r_xprt->rx_stats.mrs_allocated, 718 r_xprt->rx_stats.local_inv_needed, 719 r_xprt->rx_stats.empty_sendctx_q, 720 r_xprt->rx_stats.reply_waits_for_send); 721} 722 723static int 724xprt_rdma_enable_swap(struct rpc_xprt *xprt) 725{ 726 return 0; 727} 728 729static void 730xprt_rdma_disable_swap(struct rpc_xprt *xprt) 731{ 732} 733 734/* 735 * Plumbing for rpc transport switch and kernel module 736 */ 737 738static const struct rpc_xprt_ops xprt_rdma_procs = { 739 .reserve_xprt = xprt_reserve_xprt_cong, 740 .release_xprt = xprt_release_xprt_cong, /* sunrpc/xprt.c */ 741 .alloc_slot = xprt_rdma_alloc_slot, 742 .free_slot = xprt_rdma_free_slot, 743 .release_request = xprt_release_rqst_cong, /* ditto */ 744 .wait_for_reply_request = xprt_wait_for_reply_request_def, /* ditto */ 745 .timer = xprt_rdma_timer, 746 .rpcbind = rpcb_getport_async, /* sunrpc/rpcb_clnt.c */ 747 .set_port = xprt_rdma_set_port, 748 .connect = xprt_rdma_connect, 749 .buf_alloc = xprt_rdma_allocate, 750 .buf_free = xprt_rdma_free, 751 .send_request = xprt_rdma_send_request, 752 .close = xprt_rdma_close, 753 .destroy = xprt_rdma_destroy, 754 .set_connect_timeout = xprt_rdma_set_connect_timeout, 755 .print_stats = xprt_rdma_print_stats, 756 .enable_swap = xprt_rdma_enable_swap, 757 .disable_swap = xprt_rdma_disable_swap, 758 .inject_disconnect = xprt_rdma_inject_disconnect, 759#if defined(CONFIG_SUNRPC_BACKCHANNEL) 760 .bc_setup = xprt_rdma_bc_setup, 761 .bc_maxpayload = xprt_rdma_bc_maxpayload, 762 .bc_num_slots = xprt_rdma_bc_max_slots, 763 .bc_free_rqst = xprt_rdma_bc_free_rqst, 764 .bc_destroy = xprt_rdma_bc_destroy, 765#endif 766}; 767 768static struct xprt_class xprt_rdma = { 769 .list = LIST_HEAD_INIT(xprt_rdma.list), 770 .name = "rdma", 771 .owner = THIS_MODULE, 772 .ident = XPRT_TRANSPORT_RDMA, 773 .setup = xprt_setup_rdma, 774 .netid = { "rdma", "rdma6", "" }, 775}; 776 777void xprt_rdma_cleanup(void) 778{ 779#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 780 if (sunrpc_table_header) { 781 unregister_sysctl_table(sunrpc_table_header); 782 sunrpc_table_header = NULL; 783 } 784#endif 785 786 xprt_unregister_transport(&xprt_rdma); 787 xprt_unregister_transport(&xprt_rdma_bc); 788} 789 790int xprt_rdma_init(void) 791{ 792 int rc; 793 794 rc = xprt_register_transport(&xprt_rdma); 795 if (rc) 796 return rc; 797 798 rc = xprt_register_transport(&xprt_rdma_bc); 799 if (rc) { 800 xprt_unregister_transport(&xprt_rdma); 801 return rc; 802 } 803 804#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 805 if (!sunrpc_table_header) 806 sunrpc_table_header = register_sysctl_table(sunrpc_table); 807#endif 808 return 0; 809} 810