1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Copyright (c) 2016 Avago Technologies. All rights reserved. 4 */ 5#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 6#include <linux/module.h> 7#include <linux/parser.h> 8#include <uapi/scsi/fc/fc_fs.h> 9#include <uapi/scsi/fc/fc_els.h> 10#include <linux/delay.h> 11#include <linux/overflow.h> 12 13#include "nvme.h" 14#include "fabrics.h" 15#include <linux/nvme-fc-driver.h> 16#include <linux/nvme-fc.h> 17#include "fc.h" 18#include <scsi/scsi_transport_fc.h> 19 20/* *************************** Data Structures/Defines ****************** */ 21 22 23enum nvme_fc_queue_flags { 24 NVME_FC_Q_CONNECTED = 0, 25 NVME_FC_Q_LIVE, 26}; 27 28#define NVME_FC_DEFAULT_DEV_LOSS_TMO 60 /* seconds */ 29#define NVME_FC_DEFAULT_RECONNECT_TMO 2 /* delay between reconnects 30 * when connected and a 31 * connection failure. 32 */ 33 34struct nvme_fc_queue { 35 struct nvme_fc_ctrl *ctrl; 36 struct device *dev; 37 struct blk_mq_hw_ctx *hctx; 38 void *lldd_handle; 39 size_t cmnd_capsule_len; 40 u32 qnum; 41 u32 rqcnt; 42 u32 seqno; 43 44 u64 connection_id; 45 atomic_t csn; 46 47 unsigned long flags; 48} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ 49 50enum nvme_fcop_flags { 51 FCOP_FLAGS_TERMIO = (1 << 0), 52 FCOP_FLAGS_AEN = (1 << 1), 53}; 54 55struct nvmefc_ls_req_op { 56 struct nvmefc_ls_req ls_req; 57 58 struct nvme_fc_rport *rport; 59 struct nvme_fc_queue *queue; 60 struct request *rq; 61 u32 flags; 62 63 int ls_error; 64 struct completion ls_done; 65 struct list_head lsreq_list; /* rport->ls_req_list */ 66 bool req_queued; 67}; 68 69struct nvmefc_ls_rcv_op { 70 struct nvme_fc_rport *rport; 71 struct nvmefc_ls_rsp *lsrsp; 72 union nvmefc_ls_requests *rqstbuf; 73 union nvmefc_ls_responses *rspbuf; 74 u16 rqstdatalen; 75 bool handled; 76 dma_addr_t rspdma; 77 struct list_head lsrcv_list; /* rport->ls_rcv_list */ 78} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ 79 80enum nvme_fcpop_state { 81 FCPOP_STATE_UNINIT = 0, 82 FCPOP_STATE_IDLE = 1, 83 FCPOP_STATE_ACTIVE = 2, 84 FCPOP_STATE_ABORTED = 3, 85 FCPOP_STATE_COMPLETE = 4, 86}; 87 88struct nvme_fc_fcp_op { 89 struct nvme_request nreq; /* 90 * nvme/host/core.c 91 * requires this to be 92 * the 1st element in the 93 * private structure 94 * associated with the 95 * request. 96 */ 97 struct nvmefc_fcp_req fcp_req; 98 99 struct nvme_fc_ctrl *ctrl; 100 struct nvme_fc_queue *queue; 101 struct request *rq; 102 103 atomic_t state; 104 u32 flags; 105 u32 rqno; 106 u32 nents; 107 108 struct nvme_fc_cmd_iu cmd_iu; 109 struct nvme_fc_ersp_iu rsp_iu; 110}; 111 112struct nvme_fcp_op_w_sgl { 113 struct nvme_fc_fcp_op op; 114 struct scatterlist sgl[NVME_INLINE_SG_CNT]; 115 uint8_t priv[]; 116}; 117 118struct nvme_fc_lport { 119 struct nvme_fc_local_port localport; 120 121 struct ida endp_cnt; 122 struct list_head port_list; /* nvme_fc_port_list */ 123 struct list_head endp_list; 124 struct device *dev; /* physical device for dma */ 125 struct nvme_fc_port_template *ops; 126 struct kref ref; 127 atomic_t act_rport_cnt; 128} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ 129 130struct nvme_fc_rport { 131 struct nvme_fc_remote_port remoteport; 132 133 struct list_head endp_list; /* for lport->endp_list */ 134 struct list_head ctrl_list; 135 struct list_head ls_req_list; 136 struct list_head ls_rcv_list; 137 struct list_head disc_list; 138 struct device *dev; /* physical device for dma */ 139 struct nvme_fc_lport *lport; 140 spinlock_t lock; 141 struct kref ref; 142 atomic_t act_ctrl_cnt; 143 unsigned long dev_loss_end; 144 struct work_struct lsrcv_work; 145} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ 146 147/* fc_ctrl flags values - specified as bit positions */ 148#define ASSOC_ACTIVE 0 149#define ASSOC_FAILED 1 150#define FCCTRL_TERMIO 2 151 152struct nvme_fc_ctrl { 153 spinlock_t lock; 154 struct nvme_fc_queue *queues; 155 struct device *dev; 156 struct nvme_fc_lport *lport; 157 struct nvme_fc_rport *rport; 158 u32 cnum; 159 160 bool ioq_live; 161 u64 association_id; 162 struct nvmefc_ls_rcv_op *rcv_disconn; 163 164 struct list_head ctrl_list; /* rport->ctrl_list */ 165 166 struct blk_mq_tag_set admin_tag_set; 167 struct blk_mq_tag_set tag_set; 168 169 struct work_struct ioerr_work; 170 struct delayed_work connect_work; 171 172 struct kref ref; 173 unsigned long flags; 174 u32 iocnt; 175 wait_queue_head_t ioabort_wait; 176 177 struct nvme_fc_fcp_op aen_ops[NVME_NR_AEN_COMMANDS]; 178 179 struct nvme_ctrl ctrl; 180}; 181 182static inline struct nvme_fc_ctrl * 183to_fc_ctrl(struct nvme_ctrl *ctrl) 184{ 185 return container_of(ctrl, struct nvme_fc_ctrl, ctrl); 186} 187 188static inline struct nvme_fc_lport * 189localport_to_lport(struct nvme_fc_local_port *portptr) 190{ 191 return container_of(portptr, struct nvme_fc_lport, localport); 192} 193 194static inline struct nvme_fc_rport * 195remoteport_to_rport(struct nvme_fc_remote_port *portptr) 196{ 197 return container_of(portptr, struct nvme_fc_rport, remoteport); 198} 199 200static inline struct nvmefc_ls_req_op * 201ls_req_to_lsop(struct nvmefc_ls_req *lsreq) 202{ 203 return container_of(lsreq, struct nvmefc_ls_req_op, ls_req); 204} 205 206static inline struct nvme_fc_fcp_op * 207fcp_req_to_fcp_op(struct nvmefc_fcp_req *fcpreq) 208{ 209 return container_of(fcpreq, struct nvme_fc_fcp_op, fcp_req); 210} 211 212 213 214/* *************************** Globals **************************** */ 215 216 217static DEFINE_SPINLOCK(nvme_fc_lock); 218 219static LIST_HEAD(nvme_fc_lport_list); 220static DEFINE_IDA(nvme_fc_local_port_cnt); 221static DEFINE_IDA(nvme_fc_ctrl_cnt); 222 223static struct workqueue_struct *nvme_fc_wq; 224 225static bool nvme_fc_waiting_to_unload; 226static DECLARE_COMPLETION(nvme_fc_unload_proceed); 227 228/* 229 * These items are short-term. They will eventually be moved into 230 * a generic FC class. See comments in module init. 231 */ 232static struct device *fc_udev_device; 233 234static void nvme_fc_complete_rq(struct request *rq); 235 236/* *********************** FC-NVME Port Management ************************ */ 237 238static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *, 239 struct nvme_fc_queue *, unsigned int); 240 241static void nvme_fc_handle_ls_rqst_work(struct work_struct *work); 242 243 244static void 245nvme_fc_free_lport(struct kref *ref) 246{ 247 struct nvme_fc_lport *lport = 248 container_of(ref, struct nvme_fc_lport, ref); 249 unsigned long flags; 250 251 WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED); 252 WARN_ON(!list_empty(&lport->endp_list)); 253 254 /* remove from transport list */ 255 spin_lock_irqsave(&nvme_fc_lock, flags); 256 list_del(&lport->port_list); 257 if (nvme_fc_waiting_to_unload && list_empty(&nvme_fc_lport_list)) 258 complete(&nvme_fc_unload_proceed); 259 spin_unlock_irqrestore(&nvme_fc_lock, flags); 260 261 ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num); 262 ida_destroy(&lport->endp_cnt); 263 264 put_device(lport->dev); 265 266 kfree(lport); 267} 268 269static void 270nvme_fc_lport_put(struct nvme_fc_lport *lport) 271{ 272 kref_put(&lport->ref, nvme_fc_free_lport); 273} 274 275static int 276nvme_fc_lport_get(struct nvme_fc_lport *lport) 277{ 278 return kref_get_unless_zero(&lport->ref); 279} 280 281 282static struct nvme_fc_lport * 283nvme_fc_attach_to_unreg_lport(struct nvme_fc_port_info *pinfo, 284 struct nvme_fc_port_template *ops, 285 struct device *dev) 286{ 287 struct nvme_fc_lport *lport; 288 unsigned long flags; 289 290 spin_lock_irqsave(&nvme_fc_lock, flags); 291 292 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { 293 if (lport->localport.node_name != pinfo->node_name || 294 lport->localport.port_name != pinfo->port_name) 295 continue; 296 297 if (lport->dev != dev) { 298 lport = ERR_PTR(-EXDEV); 299 goto out_done; 300 } 301 302 if (lport->localport.port_state != FC_OBJSTATE_DELETED) { 303 lport = ERR_PTR(-EEXIST); 304 goto out_done; 305 } 306 307 if (!nvme_fc_lport_get(lport)) { 308 /* 309 * fails if ref cnt already 0. If so, 310 * act as if lport already deleted 311 */ 312 lport = NULL; 313 goto out_done; 314 } 315 316 /* resume the lport */ 317 318 lport->ops = ops; 319 lport->localport.port_role = pinfo->port_role; 320 lport->localport.port_id = pinfo->port_id; 321 lport->localport.port_state = FC_OBJSTATE_ONLINE; 322 323 spin_unlock_irqrestore(&nvme_fc_lock, flags); 324 325 return lport; 326 } 327 328 lport = NULL; 329 330out_done: 331 spin_unlock_irqrestore(&nvme_fc_lock, flags); 332 333 return lport; 334} 335 336/** 337 * nvme_fc_register_localport - transport entry point called by an 338 * LLDD to register the existence of a NVME 339 * host FC port. 340 * @pinfo: pointer to information about the port to be registered 341 * @template: LLDD entrypoints and operational parameters for the port 342 * @dev: physical hardware device node port corresponds to. Will be 343 * used for DMA mappings 344 * @portptr: pointer to a local port pointer. Upon success, the routine 345 * will allocate a nvme_fc_local_port structure and place its 346 * address in the local port pointer. Upon failure, local port 347 * pointer will be set to 0. 348 * 349 * Returns: 350 * a completion status. Must be 0 upon success; a negative errno 351 * (ex: -ENXIO) upon failure. 352 */ 353int 354nvme_fc_register_localport(struct nvme_fc_port_info *pinfo, 355 struct nvme_fc_port_template *template, 356 struct device *dev, 357 struct nvme_fc_local_port **portptr) 358{ 359 struct nvme_fc_lport *newrec; 360 unsigned long flags; 361 int ret, idx; 362 363 if (!template->localport_delete || !template->remoteport_delete || 364 !template->ls_req || !template->fcp_io || 365 !template->ls_abort || !template->fcp_abort || 366 !template->max_hw_queues || !template->max_sgl_segments || 367 !template->max_dif_sgl_segments || !template->dma_boundary) { 368 ret = -EINVAL; 369 goto out_reghost_failed; 370 } 371 372 /* 373 * look to see if there is already a localport that had been 374 * deregistered and in the process of waiting for all the 375 * references to fully be removed. If the references haven't 376 * expired, we can simply re-enable the localport. Remoteports 377 * and controller reconnections should resume naturally. 378 */ 379 newrec = nvme_fc_attach_to_unreg_lport(pinfo, template, dev); 380 381 /* found an lport, but something about its state is bad */ 382 if (IS_ERR(newrec)) { 383 ret = PTR_ERR(newrec); 384 goto out_reghost_failed; 385 386 /* found existing lport, which was resumed */ 387 } else if (newrec) { 388 *portptr = &newrec->localport; 389 return 0; 390 } 391 392 /* nothing found - allocate a new localport struct */ 393 394 newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz), 395 GFP_KERNEL); 396 if (!newrec) { 397 ret = -ENOMEM; 398 goto out_reghost_failed; 399 } 400 401 idx = ida_simple_get(&nvme_fc_local_port_cnt, 0, 0, GFP_KERNEL); 402 if (idx < 0) { 403 ret = -ENOSPC; 404 goto out_fail_kfree; 405 } 406 407 if (!get_device(dev) && dev) { 408 ret = -ENODEV; 409 goto out_ida_put; 410 } 411 412 INIT_LIST_HEAD(&newrec->port_list); 413 INIT_LIST_HEAD(&newrec->endp_list); 414 kref_init(&newrec->ref); 415 atomic_set(&newrec->act_rport_cnt, 0); 416 newrec->ops = template; 417 newrec->dev = dev; 418 ida_init(&newrec->endp_cnt); 419 if (template->local_priv_sz) 420 newrec->localport.private = &newrec[1]; 421 else 422 newrec->localport.private = NULL; 423 newrec->localport.node_name = pinfo->node_name; 424 newrec->localport.port_name = pinfo->port_name; 425 newrec->localport.port_role = pinfo->port_role; 426 newrec->localport.port_id = pinfo->port_id; 427 newrec->localport.port_state = FC_OBJSTATE_ONLINE; 428 newrec->localport.port_num = idx; 429 430 spin_lock_irqsave(&nvme_fc_lock, flags); 431 list_add_tail(&newrec->port_list, &nvme_fc_lport_list); 432 spin_unlock_irqrestore(&nvme_fc_lock, flags); 433 434 if (dev) 435 dma_set_seg_boundary(dev, template->dma_boundary); 436 437 *portptr = &newrec->localport; 438 return 0; 439 440out_ida_put: 441 ida_simple_remove(&nvme_fc_local_port_cnt, idx); 442out_fail_kfree: 443 kfree(newrec); 444out_reghost_failed: 445 *portptr = NULL; 446 447 return ret; 448} 449EXPORT_SYMBOL_GPL(nvme_fc_register_localport); 450 451/** 452 * nvme_fc_unregister_localport - transport entry point called by an 453 * LLDD to deregister/remove a previously 454 * registered a NVME host FC port. 455 * @portptr: pointer to the (registered) local port that is to be deregistered. 456 * 457 * Returns: 458 * a completion status. Must be 0 upon success; a negative errno 459 * (ex: -ENXIO) upon failure. 460 */ 461int 462nvme_fc_unregister_localport(struct nvme_fc_local_port *portptr) 463{ 464 struct nvme_fc_lport *lport = localport_to_lport(portptr); 465 unsigned long flags; 466 467 if (!portptr) 468 return -EINVAL; 469 470 spin_lock_irqsave(&nvme_fc_lock, flags); 471 472 if (portptr->port_state != FC_OBJSTATE_ONLINE) { 473 spin_unlock_irqrestore(&nvme_fc_lock, flags); 474 return -EINVAL; 475 } 476 portptr->port_state = FC_OBJSTATE_DELETED; 477 478 spin_unlock_irqrestore(&nvme_fc_lock, flags); 479 480 if (atomic_read(&lport->act_rport_cnt) == 0) 481 lport->ops->localport_delete(&lport->localport); 482 483 nvme_fc_lport_put(lport); 484 485 return 0; 486} 487EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport); 488 489/* 490 * TRADDR strings, per FC-NVME are fixed format: 491 * "nn-0x<16hexdigits>:pn-0x<16hexdigits>" - 43 characters 492 * udev event will only differ by prefix of what field is 493 * being specified: 494 * "NVMEFC_HOST_TRADDR=" or "NVMEFC_TRADDR=" - 19 max characters 495 * 19 + 43 + null_fudge = 64 characters 496 */ 497#define FCNVME_TRADDR_LENGTH 64 498 499static void 500nvme_fc_signal_discovery_scan(struct nvme_fc_lport *lport, 501 struct nvme_fc_rport *rport) 502{ 503 char hostaddr[FCNVME_TRADDR_LENGTH]; /* NVMEFC_HOST_TRADDR=...*/ 504 char tgtaddr[FCNVME_TRADDR_LENGTH]; /* NVMEFC_TRADDR=...*/ 505 char *envp[4] = { "FC_EVENT=nvmediscovery", hostaddr, tgtaddr, NULL }; 506 507 if (!(rport->remoteport.port_role & FC_PORT_ROLE_NVME_DISCOVERY)) 508 return; 509 510 snprintf(hostaddr, sizeof(hostaddr), 511 "NVMEFC_HOST_TRADDR=nn-0x%016llx:pn-0x%016llx", 512 lport->localport.node_name, lport->localport.port_name); 513 snprintf(tgtaddr, sizeof(tgtaddr), 514 "NVMEFC_TRADDR=nn-0x%016llx:pn-0x%016llx", 515 rport->remoteport.node_name, rport->remoteport.port_name); 516 kobject_uevent_env(&fc_udev_device->kobj, KOBJ_CHANGE, envp); 517} 518 519static void 520nvme_fc_free_rport(struct kref *ref) 521{ 522 struct nvme_fc_rport *rport = 523 container_of(ref, struct nvme_fc_rport, ref); 524 struct nvme_fc_lport *lport = 525 localport_to_lport(rport->remoteport.localport); 526 unsigned long flags; 527 528 WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED); 529 WARN_ON(!list_empty(&rport->ctrl_list)); 530 531 /* remove from lport list */ 532 spin_lock_irqsave(&nvme_fc_lock, flags); 533 list_del(&rport->endp_list); 534 spin_unlock_irqrestore(&nvme_fc_lock, flags); 535 536 WARN_ON(!list_empty(&rport->disc_list)); 537 ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num); 538 539 kfree(rport); 540 541 nvme_fc_lport_put(lport); 542} 543 544static void 545nvme_fc_rport_put(struct nvme_fc_rport *rport) 546{ 547 kref_put(&rport->ref, nvme_fc_free_rport); 548} 549 550static int 551nvme_fc_rport_get(struct nvme_fc_rport *rport) 552{ 553 return kref_get_unless_zero(&rport->ref); 554} 555 556static void 557nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl) 558{ 559 switch (ctrl->ctrl.state) { 560 case NVME_CTRL_NEW: 561 case NVME_CTRL_CONNECTING: 562 /* 563 * As all reconnects were suppressed, schedule a 564 * connect. 565 */ 566 dev_info(ctrl->ctrl.device, 567 "NVME-FC{%d}: connectivity re-established. " 568 "Attempting reconnect\n", ctrl->cnum); 569 570 queue_delayed_work(nvme_wq, &ctrl->connect_work, 0); 571 break; 572 573 case NVME_CTRL_RESETTING: 574 /* 575 * Controller is already in the process of terminating the 576 * association. No need to do anything further. The reconnect 577 * step will naturally occur after the reset completes. 578 */ 579 break; 580 581 default: 582 /* no action to take - let it delete */ 583 break; 584 } 585} 586 587static struct nvme_fc_rport * 588nvme_fc_attach_to_suspended_rport(struct nvme_fc_lport *lport, 589 struct nvme_fc_port_info *pinfo) 590{ 591 struct nvme_fc_rport *rport; 592 struct nvme_fc_ctrl *ctrl; 593 unsigned long flags; 594 595 spin_lock_irqsave(&nvme_fc_lock, flags); 596 597 list_for_each_entry(rport, &lport->endp_list, endp_list) { 598 if (rport->remoteport.node_name != pinfo->node_name || 599 rport->remoteport.port_name != pinfo->port_name) 600 continue; 601 602 if (!nvme_fc_rport_get(rport)) { 603 rport = ERR_PTR(-ENOLCK); 604 goto out_done; 605 } 606 607 spin_unlock_irqrestore(&nvme_fc_lock, flags); 608 609 spin_lock_irqsave(&rport->lock, flags); 610 611 /* has it been unregistered */ 612 if (rport->remoteport.port_state != FC_OBJSTATE_DELETED) { 613 /* means lldd called us twice */ 614 spin_unlock_irqrestore(&rport->lock, flags); 615 nvme_fc_rport_put(rport); 616 return ERR_PTR(-ESTALE); 617 } 618 619 rport->remoteport.port_role = pinfo->port_role; 620 rport->remoteport.port_id = pinfo->port_id; 621 rport->remoteport.port_state = FC_OBJSTATE_ONLINE; 622 rport->dev_loss_end = 0; 623 624 /* 625 * kick off a reconnect attempt on all associations to the 626 * remote port. A successful reconnects will resume i/o. 627 */ 628 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) 629 nvme_fc_resume_controller(ctrl); 630 631 spin_unlock_irqrestore(&rport->lock, flags); 632 633 return rport; 634 } 635 636 rport = NULL; 637 638out_done: 639 spin_unlock_irqrestore(&nvme_fc_lock, flags); 640 641 return rport; 642} 643 644static inline void 645__nvme_fc_set_dev_loss_tmo(struct nvme_fc_rport *rport, 646 struct nvme_fc_port_info *pinfo) 647{ 648 if (pinfo->dev_loss_tmo) 649 rport->remoteport.dev_loss_tmo = pinfo->dev_loss_tmo; 650 else 651 rport->remoteport.dev_loss_tmo = NVME_FC_DEFAULT_DEV_LOSS_TMO; 652} 653 654/** 655 * nvme_fc_register_remoteport - transport entry point called by an 656 * LLDD to register the existence of a NVME 657 * subsystem FC port on its fabric. 658 * @localport: pointer to the (registered) local port that the remote 659 * subsystem port is connected to. 660 * @pinfo: pointer to information about the port to be registered 661 * @portptr: pointer to a remote port pointer. Upon success, the routine 662 * will allocate a nvme_fc_remote_port structure and place its 663 * address in the remote port pointer. Upon failure, remote port 664 * pointer will be set to 0. 665 * 666 * Returns: 667 * a completion status. Must be 0 upon success; a negative errno 668 * (ex: -ENXIO) upon failure. 669 */ 670int 671nvme_fc_register_remoteport(struct nvme_fc_local_port *localport, 672 struct nvme_fc_port_info *pinfo, 673 struct nvme_fc_remote_port **portptr) 674{ 675 struct nvme_fc_lport *lport = localport_to_lport(localport); 676 struct nvme_fc_rport *newrec; 677 unsigned long flags; 678 int ret, idx; 679 680 if (!nvme_fc_lport_get(lport)) { 681 ret = -ESHUTDOWN; 682 goto out_reghost_failed; 683 } 684 685 /* 686 * look to see if there is already a remoteport that is waiting 687 * for a reconnect (within dev_loss_tmo) with the same WWN's. 688 * If so, transition to it and reconnect. 689 */ 690 newrec = nvme_fc_attach_to_suspended_rport(lport, pinfo); 691 692 /* found an rport, but something about its state is bad */ 693 if (IS_ERR(newrec)) { 694 ret = PTR_ERR(newrec); 695 goto out_lport_put; 696 697 /* found existing rport, which was resumed */ 698 } else if (newrec) { 699 nvme_fc_lport_put(lport); 700 __nvme_fc_set_dev_loss_tmo(newrec, pinfo); 701 nvme_fc_signal_discovery_scan(lport, newrec); 702 *portptr = &newrec->remoteport; 703 return 0; 704 } 705 706 /* nothing found - allocate a new remoteport struct */ 707 708 newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz), 709 GFP_KERNEL); 710 if (!newrec) { 711 ret = -ENOMEM; 712 goto out_lport_put; 713 } 714 715 idx = ida_simple_get(&lport->endp_cnt, 0, 0, GFP_KERNEL); 716 if (idx < 0) { 717 ret = -ENOSPC; 718 goto out_kfree_rport; 719 } 720 721 INIT_LIST_HEAD(&newrec->endp_list); 722 INIT_LIST_HEAD(&newrec->ctrl_list); 723 INIT_LIST_HEAD(&newrec->ls_req_list); 724 INIT_LIST_HEAD(&newrec->disc_list); 725 kref_init(&newrec->ref); 726 atomic_set(&newrec->act_ctrl_cnt, 0); 727 spin_lock_init(&newrec->lock); 728 newrec->remoteport.localport = &lport->localport; 729 INIT_LIST_HEAD(&newrec->ls_rcv_list); 730 newrec->dev = lport->dev; 731 newrec->lport = lport; 732 if (lport->ops->remote_priv_sz) 733 newrec->remoteport.private = &newrec[1]; 734 else 735 newrec->remoteport.private = NULL; 736 newrec->remoteport.port_role = pinfo->port_role; 737 newrec->remoteport.node_name = pinfo->node_name; 738 newrec->remoteport.port_name = pinfo->port_name; 739 newrec->remoteport.port_id = pinfo->port_id; 740 newrec->remoteport.port_state = FC_OBJSTATE_ONLINE; 741 newrec->remoteport.port_num = idx; 742 __nvme_fc_set_dev_loss_tmo(newrec, pinfo); 743 INIT_WORK(&newrec->lsrcv_work, nvme_fc_handle_ls_rqst_work); 744 745 spin_lock_irqsave(&nvme_fc_lock, flags); 746 list_add_tail(&newrec->endp_list, &lport->endp_list); 747 spin_unlock_irqrestore(&nvme_fc_lock, flags); 748 749 nvme_fc_signal_discovery_scan(lport, newrec); 750 751 *portptr = &newrec->remoteport; 752 return 0; 753 754out_kfree_rport: 755 kfree(newrec); 756out_lport_put: 757 nvme_fc_lport_put(lport); 758out_reghost_failed: 759 *portptr = NULL; 760 return ret; 761} 762EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport); 763 764static int 765nvme_fc_abort_lsops(struct nvme_fc_rport *rport) 766{ 767 struct nvmefc_ls_req_op *lsop; 768 unsigned long flags; 769 770restart: 771 spin_lock_irqsave(&rport->lock, flags); 772 773 list_for_each_entry(lsop, &rport->ls_req_list, lsreq_list) { 774 if (!(lsop->flags & FCOP_FLAGS_TERMIO)) { 775 lsop->flags |= FCOP_FLAGS_TERMIO; 776 spin_unlock_irqrestore(&rport->lock, flags); 777 rport->lport->ops->ls_abort(&rport->lport->localport, 778 &rport->remoteport, 779 &lsop->ls_req); 780 goto restart; 781 } 782 } 783 spin_unlock_irqrestore(&rport->lock, flags); 784 785 return 0; 786} 787 788static void 789nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl) 790{ 791 dev_info(ctrl->ctrl.device, 792 "NVME-FC{%d}: controller connectivity lost. Awaiting " 793 "Reconnect", ctrl->cnum); 794 795 switch (ctrl->ctrl.state) { 796 case NVME_CTRL_NEW: 797 case NVME_CTRL_LIVE: 798 /* 799 * Schedule a controller reset. The reset will terminate the 800 * association and schedule the reconnect timer. Reconnects 801 * will be attempted until either the ctlr_loss_tmo 802 * (max_retries * connect_delay) expires or the remoteport's 803 * dev_loss_tmo expires. 804 */ 805 if (nvme_reset_ctrl(&ctrl->ctrl)) { 806 dev_warn(ctrl->ctrl.device, 807 "NVME-FC{%d}: Couldn't schedule reset.\n", 808 ctrl->cnum); 809 nvme_delete_ctrl(&ctrl->ctrl); 810 } 811 break; 812 813 case NVME_CTRL_CONNECTING: 814 /* 815 * The association has already been terminated and the 816 * controller is attempting reconnects. No need to do anything 817 * futher. Reconnects will be attempted until either the 818 * ctlr_loss_tmo (max_retries * connect_delay) expires or the 819 * remoteport's dev_loss_tmo expires. 820 */ 821 break; 822 823 case NVME_CTRL_RESETTING: 824 /* 825 * Controller is already in the process of terminating the 826 * association. No need to do anything further. The reconnect 827 * step will kick in naturally after the association is 828 * terminated. 829 */ 830 break; 831 832 case NVME_CTRL_DELETING: 833 case NVME_CTRL_DELETING_NOIO: 834 default: 835 /* no action to take - let it delete */ 836 break; 837 } 838} 839 840/** 841 * nvme_fc_unregister_remoteport - transport entry point called by an 842 * LLDD to deregister/remove a previously 843 * registered a NVME subsystem FC port. 844 * @portptr: pointer to the (registered) remote port that is to be 845 * deregistered. 846 * 847 * Returns: 848 * a completion status. Must be 0 upon success; a negative errno 849 * (ex: -ENXIO) upon failure. 850 */ 851int 852nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr) 853{ 854 struct nvme_fc_rport *rport = remoteport_to_rport(portptr); 855 struct nvme_fc_ctrl *ctrl; 856 unsigned long flags; 857 858 if (!portptr) 859 return -EINVAL; 860 861 spin_lock_irqsave(&rport->lock, flags); 862 863 if (portptr->port_state != FC_OBJSTATE_ONLINE) { 864 spin_unlock_irqrestore(&rport->lock, flags); 865 return -EINVAL; 866 } 867 portptr->port_state = FC_OBJSTATE_DELETED; 868 869 rport->dev_loss_end = jiffies + (portptr->dev_loss_tmo * HZ); 870 871 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { 872 /* if dev_loss_tmo==0, dev loss is immediate */ 873 if (!portptr->dev_loss_tmo) { 874 dev_warn(ctrl->ctrl.device, 875 "NVME-FC{%d}: controller connectivity lost.\n", 876 ctrl->cnum); 877 nvme_delete_ctrl(&ctrl->ctrl); 878 } else 879 nvme_fc_ctrl_connectivity_loss(ctrl); 880 } 881 882 spin_unlock_irqrestore(&rport->lock, flags); 883 884 nvme_fc_abort_lsops(rport); 885 886 if (atomic_read(&rport->act_ctrl_cnt) == 0) 887 rport->lport->ops->remoteport_delete(portptr); 888 889 /* 890 * release the reference, which will allow, if all controllers 891 * go away, which should only occur after dev_loss_tmo occurs, 892 * for the rport to be torn down. 893 */ 894 nvme_fc_rport_put(rport); 895 896 return 0; 897} 898EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport); 899 900/** 901 * nvme_fc_rescan_remoteport - transport entry point called by an 902 * LLDD to request a nvme device rescan. 903 * @remoteport: pointer to the (registered) remote port that is to be 904 * rescanned. 905 * 906 * Returns: N/A 907 */ 908void 909nvme_fc_rescan_remoteport(struct nvme_fc_remote_port *remoteport) 910{ 911 struct nvme_fc_rport *rport = remoteport_to_rport(remoteport); 912 913 nvme_fc_signal_discovery_scan(rport->lport, rport); 914} 915EXPORT_SYMBOL_GPL(nvme_fc_rescan_remoteport); 916 917int 918nvme_fc_set_remoteport_devloss(struct nvme_fc_remote_port *portptr, 919 u32 dev_loss_tmo) 920{ 921 struct nvme_fc_rport *rport = remoteport_to_rport(portptr); 922 unsigned long flags; 923 924 spin_lock_irqsave(&rport->lock, flags); 925 926 if (portptr->port_state != FC_OBJSTATE_ONLINE) { 927 spin_unlock_irqrestore(&rport->lock, flags); 928 return -EINVAL; 929 } 930 931 /* a dev_loss_tmo of 0 (immediate) is allowed to be set */ 932 rport->remoteport.dev_loss_tmo = dev_loss_tmo; 933 934 spin_unlock_irqrestore(&rport->lock, flags); 935 936 return 0; 937} 938EXPORT_SYMBOL_GPL(nvme_fc_set_remoteport_devloss); 939 940 941/* *********************** FC-NVME DMA Handling **************************** */ 942 943/* 944 * The fcloop device passes in a NULL device pointer. Real LLD's will 945 * pass in a valid device pointer. If NULL is passed to the dma mapping 946 * routines, depending on the platform, it may or may not succeed, and 947 * may crash. 948 * 949 * As such: 950 * Wrapper all the dma routines and check the dev pointer. 951 * 952 * If simple mappings (return just a dma address, we'll noop them, 953 * returning a dma address of 0. 954 * 955 * On more complex mappings (dma_map_sg), a pseudo routine fills 956 * in the scatter list, setting all dma addresses to 0. 957 */ 958 959static inline dma_addr_t 960fc_dma_map_single(struct device *dev, void *ptr, size_t size, 961 enum dma_data_direction dir) 962{ 963 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L; 964} 965 966static inline int 967fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 968{ 969 return dev ? dma_mapping_error(dev, dma_addr) : 0; 970} 971 972static inline void 973fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, 974 enum dma_data_direction dir) 975{ 976 if (dev) 977 dma_unmap_single(dev, addr, size, dir); 978} 979 980static inline void 981fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, 982 enum dma_data_direction dir) 983{ 984 if (dev) 985 dma_sync_single_for_cpu(dev, addr, size, dir); 986} 987 988static inline void 989fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, 990 enum dma_data_direction dir) 991{ 992 if (dev) 993 dma_sync_single_for_device(dev, addr, size, dir); 994} 995 996/* pseudo dma_map_sg call */ 997static int 998fc_map_sg(struct scatterlist *sg, int nents) 999{ 1000 struct scatterlist *s; 1001 int i; 1002 1003 WARN_ON(nents == 0 || sg[0].length == 0); 1004 1005 for_each_sg(sg, s, nents, i) { 1006 s->dma_address = 0L; 1007#ifdef CONFIG_NEED_SG_DMA_LENGTH 1008 s->dma_length = s->length; 1009#endif 1010 } 1011 return nents; 1012} 1013 1014static inline int 1015fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 1016 enum dma_data_direction dir) 1017{ 1018 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents); 1019} 1020 1021static inline void 1022fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 1023 enum dma_data_direction dir) 1024{ 1025 if (dev) 1026 dma_unmap_sg(dev, sg, nents, dir); 1027} 1028 1029/* *********************** FC-NVME LS Handling **************************** */ 1030 1031static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *); 1032static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *); 1033 1034static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg); 1035 1036static void 1037__nvme_fc_finish_ls_req(struct nvmefc_ls_req_op *lsop) 1038{ 1039 struct nvme_fc_rport *rport = lsop->rport; 1040 struct nvmefc_ls_req *lsreq = &lsop->ls_req; 1041 unsigned long flags; 1042 1043 spin_lock_irqsave(&rport->lock, flags); 1044 1045 if (!lsop->req_queued) { 1046 spin_unlock_irqrestore(&rport->lock, flags); 1047 return; 1048 } 1049 1050 list_del(&lsop->lsreq_list); 1051 1052 lsop->req_queued = false; 1053 1054 spin_unlock_irqrestore(&rport->lock, flags); 1055 1056 fc_dma_unmap_single(rport->dev, lsreq->rqstdma, 1057 (lsreq->rqstlen + lsreq->rsplen), 1058 DMA_BIDIRECTIONAL); 1059 1060 nvme_fc_rport_put(rport); 1061} 1062 1063static int 1064__nvme_fc_send_ls_req(struct nvme_fc_rport *rport, 1065 struct nvmefc_ls_req_op *lsop, 1066 void (*done)(struct nvmefc_ls_req *req, int status)) 1067{ 1068 struct nvmefc_ls_req *lsreq = &lsop->ls_req; 1069 unsigned long flags; 1070 int ret = 0; 1071 1072 if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE) 1073 return -ECONNREFUSED; 1074 1075 if (!nvme_fc_rport_get(rport)) 1076 return -ESHUTDOWN; 1077 1078 lsreq->done = done; 1079 lsop->rport = rport; 1080 lsop->req_queued = false; 1081 INIT_LIST_HEAD(&lsop->lsreq_list); 1082 init_completion(&lsop->ls_done); 1083 1084 lsreq->rqstdma = fc_dma_map_single(rport->dev, lsreq->rqstaddr, 1085 lsreq->rqstlen + lsreq->rsplen, 1086 DMA_BIDIRECTIONAL); 1087 if (fc_dma_mapping_error(rport->dev, lsreq->rqstdma)) { 1088 ret = -EFAULT; 1089 goto out_putrport; 1090 } 1091 lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen; 1092 1093 spin_lock_irqsave(&rport->lock, flags); 1094 1095 list_add_tail(&lsop->lsreq_list, &rport->ls_req_list); 1096 1097 lsop->req_queued = true; 1098 1099 spin_unlock_irqrestore(&rport->lock, flags); 1100 1101 ret = rport->lport->ops->ls_req(&rport->lport->localport, 1102 &rport->remoteport, lsreq); 1103 if (ret) 1104 goto out_unlink; 1105 1106 return 0; 1107 1108out_unlink: 1109 lsop->ls_error = ret; 1110 spin_lock_irqsave(&rport->lock, flags); 1111 lsop->req_queued = false; 1112 list_del(&lsop->lsreq_list); 1113 spin_unlock_irqrestore(&rport->lock, flags); 1114 fc_dma_unmap_single(rport->dev, lsreq->rqstdma, 1115 (lsreq->rqstlen + lsreq->rsplen), 1116 DMA_BIDIRECTIONAL); 1117out_putrport: 1118 nvme_fc_rport_put(rport); 1119 1120 return ret; 1121} 1122 1123static void 1124nvme_fc_send_ls_req_done(struct nvmefc_ls_req *lsreq, int status) 1125{ 1126 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq); 1127 1128 lsop->ls_error = status; 1129 complete(&lsop->ls_done); 1130} 1131 1132static int 1133nvme_fc_send_ls_req(struct nvme_fc_rport *rport, struct nvmefc_ls_req_op *lsop) 1134{ 1135 struct nvmefc_ls_req *lsreq = &lsop->ls_req; 1136 struct fcnvme_ls_rjt *rjt = lsreq->rspaddr; 1137 int ret; 1138 1139 ret = __nvme_fc_send_ls_req(rport, lsop, nvme_fc_send_ls_req_done); 1140 1141 if (!ret) { 1142 /* 1143 * No timeout/not interruptible as we need the struct 1144 * to exist until the lldd calls us back. Thus mandate 1145 * wait until driver calls back. lldd responsible for 1146 * the timeout action 1147 */ 1148 wait_for_completion(&lsop->ls_done); 1149 1150 __nvme_fc_finish_ls_req(lsop); 1151 1152 ret = lsop->ls_error; 1153 } 1154 1155 if (ret) 1156 return ret; 1157 1158 /* ACC or RJT payload ? */ 1159 if (rjt->w0.ls_cmd == FCNVME_LS_RJT) 1160 return -ENXIO; 1161 1162 return 0; 1163} 1164 1165static int 1166nvme_fc_send_ls_req_async(struct nvme_fc_rport *rport, 1167 struct nvmefc_ls_req_op *lsop, 1168 void (*done)(struct nvmefc_ls_req *req, int status)) 1169{ 1170 /* don't wait for completion */ 1171 1172 return __nvme_fc_send_ls_req(rport, lsop, done); 1173} 1174 1175static int 1176nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl, 1177 struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio) 1178{ 1179 struct nvmefc_ls_req_op *lsop; 1180 struct nvmefc_ls_req *lsreq; 1181 struct fcnvme_ls_cr_assoc_rqst *assoc_rqst; 1182 struct fcnvme_ls_cr_assoc_acc *assoc_acc; 1183 unsigned long flags; 1184 int ret, fcret = 0; 1185 1186 lsop = kzalloc((sizeof(*lsop) + 1187 sizeof(*assoc_rqst) + sizeof(*assoc_acc) + 1188 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL); 1189 if (!lsop) { 1190 dev_info(ctrl->ctrl.device, 1191 "NVME-FC{%d}: send Create Association failed: ENOMEM\n", 1192 ctrl->cnum); 1193 ret = -ENOMEM; 1194 goto out_no_memory; 1195 } 1196 1197 assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *)&lsop[1]; 1198 assoc_acc = (struct fcnvme_ls_cr_assoc_acc *)&assoc_rqst[1]; 1199 lsreq = &lsop->ls_req; 1200 if (ctrl->lport->ops->lsrqst_priv_sz) 1201 lsreq->private = &assoc_acc[1]; 1202 else 1203 lsreq->private = NULL; 1204 1205 assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION; 1206 assoc_rqst->desc_list_len = 1207 cpu_to_be32(sizeof(struct fcnvme_lsdesc_cr_assoc_cmd)); 1208 1209 assoc_rqst->assoc_cmd.desc_tag = 1210 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD); 1211 assoc_rqst->assoc_cmd.desc_len = 1212 fcnvme_lsdesc_len( 1213 sizeof(struct fcnvme_lsdesc_cr_assoc_cmd)); 1214 1215 assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); 1216 assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize - 1); 1217 /* Linux supports only Dynamic controllers */ 1218 assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff); 1219 uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id); 1220 strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn, 1221 min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE)); 1222 strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn, 1223 min(FCNVME_ASSOC_SUBNQN_LEN, NVMF_NQN_SIZE)); 1224 1225 lsop->queue = queue; 1226 lsreq->rqstaddr = assoc_rqst; 1227 lsreq->rqstlen = sizeof(*assoc_rqst); 1228 lsreq->rspaddr = assoc_acc; 1229 lsreq->rsplen = sizeof(*assoc_acc); 1230 lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC; 1231 1232 ret = nvme_fc_send_ls_req(ctrl->rport, lsop); 1233 if (ret) 1234 goto out_free_buffer; 1235 1236 /* process connect LS completion */ 1237 1238 /* validate the ACC response */ 1239 if (assoc_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC) 1240 fcret = VERR_LSACC; 1241 else if (assoc_acc->hdr.desc_list_len != 1242 fcnvme_lsdesc_len( 1243 sizeof(struct fcnvme_ls_cr_assoc_acc))) 1244 fcret = VERR_CR_ASSOC_ACC_LEN; 1245 else if (assoc_acc->hdr.rqst.desc_tag != 1246 cpu_to_be32(FCNVME_LSDESC_RQST)) 1247 fcret = VERR_LSDESC_RQST; 1248 else if (assoc_acc->hdr.rqst.desc_len != 1249 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst))) 1250 fcret = VERR_LSDESC_RQST_LEN; 1251 else if (assoc_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_ASSOCIATION) 1252 fcret = VERR_CR_ASSOC; 1253 else if (assoc_acc->associd.desc_tag != 1254 cpu_to_be32(FCNVME_LSDESC_ASSOC_ID)) 1255 fcret = VERR_ASSOC_ID; 1256 else if (assoc_acc->associd.desc_len != 1257 fcnvme_lsdesc_len( 1258 sizeof(struct fcnvme_lsdesc_assoc_id))) 1259 fcret = VERR_ASSOC_ID_LEN; 1260 else if (assoc_acc->connectid.desc_tag != 1261 cpu_to_be32(FCNVME_LSDESC_CONN_ID)) 1262 fcret = VERR_CONN_ID; 1263 else if (assoc_acc->connectid.desc_len != 1264 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id))) 1265 fcret = VERR_CONN_ID_LEN; 1266 1267 if (fcret) { 1268 ret = -EBADF; 1269 dev_err(ctrl->dev, 1270 "q %d Create Association LS failed: %s\n", 1271 queue->qnum, validation_errors[fcret]); 1272 } else { 1273 spin_lock_irqsave(&ctrl->lock, flags); 1274 ctrl->association_id = 1275 be64_to_cpu(assoc_acc->associd.association_id); 1276 queue->connection_id = 1277 be64_to_cpu(assoc_acc->connectid.connection_id); 1278 set_bit(NVME_FC_Q_CONNECTED, &queue->flags); 1279 spin_unlock_irqrestore(&ctrl->lock, flags); 1280 } 1281 1282out_free_buffer: 1283 kfree(lsop); 1284out_no_memory: 1285 if (ret) 1286 dev_err(ctrl->dev, 1287 "queue %d connect admin queue failed (%d).\n", 1288 queue->qnum, ret); 1289 return ret; 1290} 1291 1292static int 1293nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, 1294 u16 qsize, u16 ersp_ratio) 1295{ 1296 struct nvmefc_ls_req_op *lsop; 1297 struct nvmefc_ls_req *lsreq; 1298 struct fcnvme_ls_cr_conn_rqst *conn_rqst; 1299 struct fcnvme_ls_cr_conn_acc *conn_acc; 1300 int ret, fcret = 0; 1301 1302 lsop = kzalloc((sizeof(*lsop) + 1303 sizeof(*conn_rqst) + sizeof(*conn_acc) + 1304 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL); 1305 if (!lsop) { 1306 dev_info(ctrl->ctrl.device, 1307 "NVME-FC{%d}: send Create Connection failed: ENOMEM\n", 1308 ctrl->cnum); 1309 ret = -ENOMEM; 1310 goto out_no_memory; 1311 } 1312 1313 conn_rqst = (struct fcnvme_ls_cr_conn_rqst *)&lsop[1]; 1314 conn_acc = (struct fcnvme_ls_cr_conn_acc *)&conn_rqst[1]; 1315 lsreq = &lsop->ls_req; 1316 if (ctrl->lport->ops->lsrqst_priv_sz) 1317 lsreq->private = (void *)&conn_acc[1]; 1318 else 1319 lsreq->private = NULL; 1320 1321 conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION; 1322 conn_rqst->desc_list_len = cpu_to_be32( 1323 sizeof(struct fcnvme_lsdesc_assoc_id) + 1324 sizeof(struct fcnvme_lsdesc_cr_conn_cmd)); 1325 1326 conn_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID); 1327 conn_rqst->associd.desc_len = 1328 fcnvme_lsdesc_len( 1329 sizeof(struct fcnvme_lsdesc_assoc_id)); 1330 conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id); 1331 conn_rqst->connect_cmd.desc_tag = 1332 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD); 1333 conn_rqst->connect_cmd.desc_len = 1334 fcnvme_lsdesc_len( 1335 sizeof(struct fcnvme_lsdesc_cr_conn_cmd)); 1336 conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); 1337 conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum); 1338 conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize - 1); 1339 1340 lsop->queue = queue; 1341 lsreq->rqstaddr = conn_rqst; 1342 lsreq->rqstlen = sizeof(*conn_rqst); 1343 lsreq->rspaddr = conn_acc; 1344 lsreq->rsplen = sizeof(*conn_acc); 1345 lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC; 1346 1347 ret = nvme_fc_send_ls_req(ctrl->rport, lsop); 1348 if (ret) 1349 goto out_free_buffer; 1350 1351 /* process connect LS completion */ 1352 1353 /* validate the ACC response */ 1354 if (conn_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC) 1355 fcret = VERR_LSACC; 1356 else if (conn_acc->hdr.desc_list_len != 1357 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc))) 1358 fcret = VERR_CR_CONN_ACC_LEN; 1359 else if (conn_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST)) 1360 fcret = VERR_LSDESC_RQST; 1361 else if (conn_acc->hdr.rqst.desc_len != 1362 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst))) 1363 fcret = VERR_LSDESC_RQST_LEN; 1364 else if (conn_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_CONNECTION) 1365 fcret = VERR_CR_CONN; 1366 else if (conn_acc->connectid.desc_tag != 1367 cpu_to_be32(FCNVME_LSDESC_CONN_ID)) 1368 fcret = VERR_CONN_ID; 1369 else if (conn_acc->connectid.desc_len != 1370 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id))) 1371 fcret = VERR_CONN_ID_LEN; 1372 1373 if (fcret) { 1374 ret = -EBADF; 1375 dev_err(ctrl->dev, 1376 "q %d Create I/O Connection LS failed: %s\n", 1377 queue->qnum, validation_errors[fcret]); 1378 } else { 1379 queue->connection_id = 1380 be64_to_cpu(conn_acc->connectid.connection_id); 1381 set_bit(NVME_FC_Q_CONNECTED, &queue->flags); 1382 } 1383 1384out_free_buffer: 1385 kfree(lsop); 1386out_no_memory: 1387 if (ret) 1388 dev_err(ctrl->dev, 1389 "queue %d connect I/O queue failed (%d).\n", 1390 queue->qnum, ret); 1391 return ret; 1392} 1393 1394static void 1395nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status) 1396{ 1397 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq); 1398 1399 __nvme_fc_finish_ls_req(lsop); 1400 1401 /* fc-nvme initiator doesn't care about success or failure of cmd */ 1402 1403 kfree(lsop); 1404} 1405 1406/* 1407 * This routine sends a FC-NVME LS to disconnect (aka terminate) 1408 * the FC-NVME Association. Terminating the association also 1409 * terminates the FC-NVME connections (per queue, both admin and io 1410 * queues) that are part of the association. E.g. things are torn 1411 * down, and the related FC-NVME Association ID and Connection IDs 1412 * become invalid. 1413 * 1414 * The behavior of the fc-nvme initiator is such that it's 1415 * understanding of the association and connections will implicitly 1416 * be torn down. The action is implicit as it may be due to a loss of 1417 * connectivity with the fc-nvme target, so you may never get a 1418 * response even if you tried. As such, the action of this routine 1419 * is to asynchronously send the LS, ignore any results of the LS, and 1420 * continue on with terminating the association. If the fc-nvme target 1421 * is present and receives the LS, it too can tear down. 1422 */ 1423static void 1424nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl) 1425{ 1426 struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst; 1427 struct fcnvme_ls_disconnect_assoc_acc *discon_acc; 1428 struct nvmefc_ls_req_op *lsop; 1429 struct nvmefc_ls_req *lsreq; 1430 int ret; 1431 1432 lsop = kzalloc((sizeof(*lsop) + 1433 sizeof(*discon_rqst) + sizeof(*discon_acc) + 1434 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL); 1435 if (!lsop) { 1436 dev_info(ctrl->ctrl.device, 1437 "NVME-FC{%d}: send Disconnect Association " 1438 "failed: ENOMEM\n", 1439 ctrl->cnum); 1440 return; 1441 } 1442 1443 discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1]; 1444 discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1]; 1445 lsreq = &lsop->ls_req; 1446 if (ctrl->lport->ops->lsrqst_priv_sz) 1447 lsreq->private = (void *)&discon_acc[1]; 1448 else 1449 lsreq->private = NULL; 1450 1451 nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc, 1452 ctrl->association_id); 1453 1454 ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop, 1455 nvme_fc_disconnect_assoc_done); 1456 if (ret) 1457 kfree(lsop); 1458} 1459 1460static void 1461nvme_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp) 1462{ 1463 struct nvmefc_ls_rcv_op *lsop = lsrsp->nvme_fc_private; 1464 struct nvme_fc_rport *rport = lsop->rport; 1465 struct nvme_fc_lport *lport = rport->lport; 1466 unsigned long flags; 1467 1468 spin_lock_irqsave(&rport->lock, flags); 1469 list_del(&lsop->lsrcv_list); 1470 spin_unlock_irqrestore(&rport->lock, flags); 1471 1472 fc_dma_sync_single_for_cpu(lport->dev, lsop->rspdma, 1473 sizeof(*lsop->rspbuf), DMA_TO_DEVICE); 1474 fc_dma_unmap_single(lport->dev, lsop->rspdma, 1475 sizeof(*lsop->rspbuf), DMA_TO_DEVICE); 1476 1477 kfree(lsop); 1478 1479 nvme_fc_rport_put(rport); 1480} 1481 1482static void 1483nvme_fc_xmt_ls_rsp(struct nvmefc_ls_rcv_op *lsop) 1484{ 1485 struct nvme_fc_rport *rport = lsop->rport; 1486 struct nvme_fc_lport *lport = rport->lport; 1487 struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0; 1488 int ret; 1489 1490 fc_dma_sync_single_for_device(lport->dev, lsop->rspdma, 1491 sizeof(*lsop->rspbuf), DMA_TO_DEVICE); 1492 1493 ret = lport->ops->xmt_ls_rsp(&lport->localport, &rport->remoteport, 1494 lsop->lsrsp); 1495 if (ret) { 1496 dev_warn(lport->dev, 1497 "LLDD rejected LS RSP xmt: LS %d status %d\n", 1498 w0->ls_cmd, ret); 1499 nvme_fc_xmt_ls_rsp_done(lsop->lsrsp); 1500 return; 1501 } 1502} 1503 1504static struct nvme_fc_ctrl * 1505nvme_fc_match_disconn_ls(struct nvme_fc_rport *rport, 1506 struct nvmefc_ls_rcv_op *lsop) 1507{ 1508 struct fcnvme_ls_disconnect_assoc_rqst *rqst = 1509 &lsop->rqstbuf->rq_dis_assoc; 1510 struct nvme_fc_ctrl *ctrl, *ret = NULL; 1511 struct nvmefc_ls_rcv_op *oldls = NULL; 1512 u64 association_id = be64_to_cpu(rqst->associd.association_id); 1513 unsigned long flags; 1514 1515 spin_lock_irqsave(&rport->lock, flags); 1516 1517 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { 1518 if (!nvme_fc_ctrl_get(ctrl)) 1519 continue; 1520 spin_lock(&ctrl->lock); 1521 if (association_id == ctrl->association_id) { 1522 oldls = ctrl->rcv_disconn; 1523 ctrl->rcv_disconn = lsop; 1524 ret = ctrl; 1525 } 1526 spin_unlock(&ctrl->lock); 1527 if (ret) 1528 /* leave the ctrl get reference */ 1529 break; 1530 nvme_fc_ctrl_put(ctrl); 1531 } 1532 1533 spin_unlock_irqrestore(&rport->lock, flags); 1534 1535 /* transmit a response for anything that was pending */ 1536 if (oldls) { 1537 dev_info(rport->lport->dev, 1538 "NVME-FC{%d}: Multiple Disconnect Association " 1539 "LS's received\n", ctrl->cnum); 1540 /* overwrite good response with bogus failure */ 1541 oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf, 1542 sizeof(*oldls->rspbuf), 1543 rqst->w0.ls_cmd, 1544 FCNVME_RJT_RC_UNAB, 1545 FCNVME_RJT_EXP_NONE, 0); 1546 nvme_fc_xmt_ls_rsp(oldls); 1547 } 1548 1549 return ret; 1550} 1551 1552/* 1553 * returns true to mean LS handled and ls_rsp can be sent 1554 * returns false to defer ls_rsp xmt (will be done as part of 1555 * association termination) 1556 */ 1557static bool 1558nvme_fc_ls_disconnect_assoc(struct nvmefc_ls_rcv_op *lsop) 1559{ 1560 struct nvme_fc_rport *rport = lsop->rport; 1561 struct fcnvme_ls_disconnect_assoc_rqst *rqst = 1562 &lsop->rqstbuf->rq_dis_assoc; 1563 struct fcnvme_ls_disconnect_assoc_acc *acc = 1564 &lsop->rspbuf->rsp_dis_assoc; 1565 struct nvme_fc_ctrl *ctrl = NULL; 1566 int ret = 0; 1567 1568 memset(acc, 0, sizeof(*acc)); 1569 1570 ret = nvmefc_vldt_lsreq_discon_assoc(lsop->rqstdatalen, rqst); 1571 if (!ret) { 1572 /* match an active association */ 1573 ctrl = nvme_fc_match_disconn_ls(rport, lsop); 1574 if (!ctrl) 1575 ret = VERR_NO_ASSOC; 1576 } 1577 1578 if (ret) { 1579 dev_info(rport->lport->dev, 1580 "Disconnect LS failed: %s\n", 1581 validation_errors[ret]); 1582 lsop->lsrsp->rsplen = nvme_fc_format_rjt(acc, 1583 sizeof(*acc), rqst->w0.ls_cmd, 1584 (ret == VERR_NO_ASSOC) ? 1585 FCNVME_RJT_RC_INV_ASSOC : 1586 FCNVME_RJT_RC_LOGIC, 1587 FCNVME_RJT_EXP_NONE, 0); 1588 return true; 1589 } 1590 1591 /* format an ACCept response */ 1592 1593 lsop->lsrsp->rsplen = sizeof(*acc); 1594 1595 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, 1596 fcnvme_lsdesc_len( 1597 sizeof(struct fcnvme_ls_disconnect_assoc_acc)), 1598 FCNVME_LS_DISCONNECT_ASSOC); 1599 1600 /* 1601 * the transmit of the response will occur after the exchanges 1602 * for the association have been ABTS'd by 1603 * nvme_fc_delete_association(). 1604 */ 1605 1606 /* fail the association */ 1607 nvme_fc_error_recovery(ctrl, "Disconnect Association LS received"); 1608 1609 /* release the reference taken by nvme_fc_match_disconn_ls() */ 1610 nvme_fc_ctrl_put(ctrl); 1611 1612 return false; 1613} 1614 1615/* 1616 * Actual Processing routine for received FC-NVME LS Requests from the LLD 1617 * returns true if a response should be sent afterward, false if rsp will 1618 * be sent asynchronously. 1619 */ 1620static bool 1621nvme_fc_handle_ls_rqst(struct nvmefc_ls_rcv_op *lsop) 1622{ 1623 struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0; 1624 bool ret = true; 1625 1626 lsop->lsrsp->nvme_fc_private = lsop; 1627 lsop->lsrsp->rspbuf = lsop->rspbuf; 1628 lsop->lsrsp->rspdma = lsop->rspdma; 1629 lsop->lsrsp->done = nvme_fc_xmt_ls_rsp_done; 1630 /* Be preventative. handlers will later set to valid length */ 1631 lsop->lsrsp->rsplen = 0; 1632 1633 /* 1634 * handlers: 1635 * parse request input, execute the request, and format the 1636 * LS response 1637 */ 1638 switch (w0->ls_cmd) { 1639 case FCNVME_LS_DISCONNECT_ASSOC: 1640 ret = nvme_fc_ls_disconnect_assoc(lsop); 1641 break; 1642 case FCNVME_LS_DISCONNECT_CONN: 1643 lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf, 1644 sizeof(*lsop->rspbuf), w0->ls_cmd, 1645 FCNVME_RJT_RC_UNSUP, FCNVME_RJT_EXP_NONE, 0); 1646 break; 1647 case FCNVME_LS_CREATE_ASSOCIATION: 1648 case FCNVME_LS_CREATE_CONNECTION: 1649 lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf, 1650 sizeof(*lsop->rspbuf), w0->ls_cmd, 1651 FCNVME_RJT_RC_LOGIC, FCNVME_RJT_EXP_NONE, 0); 1652 break; 1653 default: 1654 lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf, 1655 sizeof(*lsop->rspbuf), w0->ls_cmd, 1656 FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0); 1657 break; 1658 } 1659 1660 return(ret); 1661} 1662 1663static void 1664nvme_fc_handle_ls_rqst_work(struct work_struct *work) 1665{ 1666 struct nvme_fc_rport *rport = 1667 container_of(work, struct nvme_fc_rport, lsrcv_work); 1668 struct fcnvme_ls_rqst_w0 *w0; 1669 struct nvmefc_ls_rcv_op *lsop; 1670 unsigned long flags; 1671 bool sendrsp; 1672 1673restart: 1674 sendrsp = true; 1675 spin_lock_irqsave(&rport->lock, flags); 1676 list_for_each_entry(lsop, &rport->ls_rcv_list, lsrcv_list) { 1677 if (lsop->handled) 1678 continue; 1679 1680 lsop->handled = true; 1681 if (rport->remoteport.port_state == FC_OBJSTATE_ONLINE) { 1682 spin_unlock_irqrestore(&rport->lock, flags); 1683 sendrsp = nvme_fc_handle_ls_rqst(lsop); 1684 } else { 1685 spin_unlock_irqrestore(&rport->lock, flags); 1686 w0 = &lsop->rqstbuf->w0; 1687 lsop->lsrsp->rsplen = nvme_fc_format_rjt( 1688 lsop->rspbuf, 1689 sizeof(*lsop->rspbuf), 1690 w0->ls_cmd, 1691 FCNVME_RJT_RC_UNAB, 1692 FCNVME_RJT_EXP_NONE, 0); 1693 } 1694 if (sendrsp) 1695 nvme_fc_xmt_ls_rsp(lsop); 1696 goto restart; 1697 } 1698 spin_unlock_irqrestore(&rport->lock, flags); 1699} 1700 1701/** 1702 * nvme_fc_rcv_ls_req - transport entry point called by an LLDD 1703 * upon the reception of a NVME LS request. 1704 * 1705 * The nvme-fc layer will copy payload to an internal structure for 1706 * processing. As such, upon completion of the routine, the LLDD may 1707 * immediately free/reuse the LS request buffer passed in the call. 1708 * 1709 * If this routine returns error, the LLDD should abort the exchange. 1710 * 1711 * @remoteport: pointer to the (registered) remote port that the LS 1712 * was received from. The remoteport is associated with 1713 * a specific localport. 1714 * @lsrsp: pointer to a nvmefc_ls_rsp response structure to be 1715 * used to reference the exchange corresponding to the LS 1716 * when issuing an ls response. 1717 * @lsreqbuf: pointer to the buffer containing the LS Request 1718 * @lsreqbuf_len: length, in bytes, of the received LS request 1719 */ 1720int 1721nvme_fc_rcv_ls_req(struct nvme_fc_remote_port *portptr, 1722 struct nvmefc_ls_rsp *lsrsp, 1723 void *lsreqbuf, u32 lsreqbuf_len) 1724{ 1725 struct nvme_fc_rport *rport = remoteport_to_rport(portptr); 1726 struct nvme_fc_lport *lport = rport->lport; 1727 struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf; 1728 struct nvmefc_ls_rcv_op *lsop; 1729 unsigned long flags; 1730 int ret; 1731 1732 nvme_fc_rport_get(rport); 1733 1734 /* validate there's a routine to transmit a response */ 1735 if (!lport->ops->xmt_ls_rsp) { 1736 dev_info(lport->dev, 1737 "RCV %s LS failed: no LLDD xmt_ls_rsp\n", 1738 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 1739 nvmefc_ls_names[w0->ls_cmd] : ""); 1740 ret = -EINVAL; 1741 goto out_put; 1742 } 1743 1744 if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) { 1745 dev_info(lport->dev, 1746 "RCV %s LS failed: payload too large\n", 1747 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 1748 nvmefc_ls_names[w0->ls_cmd] : ""); 1749 ret = -E2BIG; 1750 goto out_put; 1751 } 1752 1753 lsop = kzalloc(sizeof(*lsop) + 1754 sizeof(union nvmefc_ls_requests) + 1755 sizeof(union nvmefc_ls_responses), 1756 GFP_KERNEL); 1757 if (!lsop) { 1758 dev_info(lport->dev, 1759 "RCV %s LS failed: No memory\n", 1760 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 1761 nvmefc_ls_names[w0->ls_cmd] : ""); 1762 ret = -ENOMEM; 1763 goto out_put; 1764 } 1765 lsop->rqstbuf = (union nvmefc_ls_requests *)&lsop[1]; 1766 lsop->rspbuf = (union nvmefc_ls_responses *)&lsop->rqstbuf[1]; 1767 1768 lsop->rspdma = fc_dma_map_single(lport->dev, lsop->rspbuf, 1769 sizeof(*lsop->rspbuf), 1770 DMA_TO_DEVICE); 1771 if (fc_dma_mapping_error(lport->dev, lsop->rspdma)) { 1772 dev_info(lport->dev, 1773 "RCV %s LS failed: DMA mapping failure\n", 1774 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 1775 nvmefc_ls_names[w0->ls_cmd] : ""); 1776 ret = -EFAULT; 1777 goto out_free; 1778 } 1779 1780 lsop->rport = rport; 1781 lsop->lsrsp = lsrsp; 1782 1783 memcpy(lsop->rqstbuf, lsreqbuf, lsreqbuf_len); 1784 lsop->rqstdatalen = lsreqbuf_len; 1785 1786 spin_lock_irqsave(&rport->lock, flags); 1787 if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE) { 1788 spin_unlock_irqrestore(&rport->lock, flags); 1789 ret = -ENOTCONN; 1790 goto out_unmap; 1791 } 1792 list_add_tail(&lsop->lsrcv_list, &rport->ls_rcv_list); 1793 spin_unlock_irqrestore(&rport->lock, flags); 1794 1795 schedule_work(&rport->lsrcv_work); 1796 1797 return 0; 1798 1799out_unmap: 1800 fc_dma_unmap_single(lport->dev, lsop->rspdma, 1801 sizeof(*lsop->rspbuf), DMA_TO_DEVICE); 1802out_free: 1803 kfree(lsop); 1804out_put: 1805 nvme_fc_rport_put(rport); 1806 return ret; 1807} 1808EXPORT_SYMBOL_GPL(nvme_fc_rcv_ls_req); 1809 1810 1811/* *********************** NVME Ctrl Routines **************************** */ 1812 1813static void 1814__nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl, 1815 struct nvme_fc_fcp_op *op) 1816{ 1817 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma, 1818 sizeof(op->rsp_iu), DMA_FROM_DEVICE); 1819 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma, 1820 sizeof(op->cmd_iu), DMA_TO_DEVICE); 1821 1822 atomic_set(&op->state, FCPOP_STATE_UNINIT); 1823} 1824 1825static void 1826nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq, 1827 unsigned int hctx_idx) 1828{ 1829 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); 1830 1831 return __nvme_fc_exit_request(set->driver_data, op); 1832} 1833 1834static int 1835__nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op) 1836{ 1837 unsigned long flags; 1838 int opstate; 1839 1840 spin_lock_irqsave(&ctrl->lock, flags); 1841 opstate = atomic_xchg(&op->state, FCPOP_STATE_ABORTED); 1842 if (opstate != FCPOP_STATE_ACTIVE) 1843 atomic_set(&op->state, opstate); 1844 else if (test_bit(FCCTRL_TERMIO, &ctrl->flags)) { 1845 op->flags |= FCOP_FLAGS_TERMIO; 1846 ctrl->iocnt++; 1847 } 1848 spin_unlock_irqrestore(&ctrl->lock, flags); 1849 1850 if (opstate != FCPOP_STATE_ACTIVE) 1851 return -ECANCELED; 1852 1853 ctrl->lport->ops->fcp_abort(&ctrl->lport->localport, 1854 &ctrl->rport->remoteport, 1855 op->queue->lldd_handle, 1856 &op->fcp_req); 1857 1858 return 0; 1859} 1860 1861static void 1862nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl) 1863{ 1864 struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops; 1865 int i; 1866 1867 /* ensure we've initialized the ops once */ 1868 if (!(aen_op->flags & FCOP_FLAGS_AEN)) 1869 return; 1870 1871 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) 1872 __nvme_fc_abort_op(ctrl, aen_op); 1873} 1874 1875static inline void 1876__nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl, 1877 struct nvme_fc_fcp_op *op, int opstate) 1878{ 1879 unsigned long flags; 1880 1881 if (opstate == FCPOP_STATE_ABORTED) { 1882 spin_lock_irqsave(&ctrl->lock, flags); 1883 if (test_bit(FCCTRL_TERMIO, &ctrl->flags) && 1884 op->flags & FCOP_FLAGS_TERMIO) { 1885 if (!--ctrl->iocnt) 1886 wake_up(&ctrl->ioabort_wait); 1887 } 1888 spin_unlock_irqrestore(&ctrl->lock, flags); 1889 } 1890} 1891 1892static void 1893nvme_fc_ctrl_ioerr_work(struct work_struct *work) 1894{ 1895 struct nvme_fc_ctrl *ctrl = 1896 container_of(work, struct nvme_fc_ctrl, ioerr_work); 1897 1898 nvme_fc_error_recovery(ctrl, "transport detected io error"); 1899} 1900 1901static void 1902nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) 1903{ 1904 struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req); 1905 struct request *rq = op->rq; 1906 struct nvmefc_fcp_req *freq = &op->fcp_req; 1907 struct nvme_fc_ctrl *ctrl = op->ctrl; 1908 struct nvme_fc_queue *queue = op->queue; 1909 struct nvme_completion *cqe = &op->rsp_iu.cqe; 1910 struct nvme_command *sqe = &op->cmd_iu.sqe; 1911 __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1); 1912 union nvme_result result; 1913 bool terminate_assoc = true; 1914 int opstate; 1915 1916 /* 1917 * WARNING: 1918 * The current linux implementation of a nvme controller 1919 * allocates a single tag set for all io queues and sizes 1920 * the io queues to fully hold all possible tags. Thus, the 1921 * implementation does not reference or care about the sqhd 1922 * value as it never needs to use the sqhd/sqtail pointers 1923 * for submission pacing. 1924 * 1925 * This affects the FC-NVME implementation in two ways: 1926 * 1) As the value doesn't matter, we don't need to waste 1927 * cycles extracting it from ERSPs and stamping it in the 1928 * cases where the transport fabricates CQEs on successful 1929 * completions. 1930 * 2) The FC-NVME implementation requires that delivery of 1931 * ERSP completions are to go back to the nvme layer in order 1932 * relative to the rsn, such that the sqhd value will always 1933 * be "in order" for the nvme layer. As the nvme layer in 1934 * linux doesn't care about sqhd, there's no need to return 1935 * them in order. 1936 * 1937 * Additionally: 1938 * As the core nvme layer in linux currently does not look at 1939 * every field in the cqe - in cases where the FC transport must 1940 * fabricate a CQE, the following fields will not be set as they 1941 * are not referenced: 1942 * cqe.sqid, cqe.sqhd, cqe.command_id 1943 * 1944 * Failure or error of an individual i/o, in a transport 1945 * detected fashion unrelated to the nvme completion status, 1946 * potentially cause the initiator and target sides to get out 1947 * of sync on SQ head/tail (aka outstanding io count allowed). 1948 * Per FC-NVME spec, failure of an individual command requires 1949 * the connection to be terminated, which in turn requires the 1950 * association to be terminated. 1951 */ 1952 1953 opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE); 1954 1955 fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma, 1956 sizeof(op->rsp_iu), DMA_FROM_DEVICE); 1957 1958 if (opstate == FCPOP_STATE_ABORTED) 1959 status = cpu_to_le16(NVME_SC_HOST_ABORTED_CMD << 1); 1960 else if (freq->status) { 1961 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1); 1962 dev_info(ctrl->ctrl.device, 1963 "NVME-FC{%d}: io failed due to lldd error %d\n", 1964 ctrl->cnum, freq->status); 1965 } 1966 1967 /* 1968 * For the linux implementation, if we have an unsuccesful 1969 * status, they blk-mq layer can typically be called with the 1970 * non-zero status and the content of the cqe isn't important. 1971 */ 1972 if (status) 1973 goto done; 1974 1975 /* 1976 * command completed successfully relative to the wire 1977 * protocol. However, validate anything received and 1978 * extract the status and result from the cqe (create it 1979 * where necessary). 1980 */ 1981 1982 switch (freq->rcv_rsplen) { 1983 1984 case 0: 1985 case NVME_FC_SIZEOF_ZEROS_RSP: 1986 /* 1987 * No response payload or 12 bytes of payload (which 1988 * should all be zeros) are considered successful and 1989 * no payload in the CQE by the transport. 1990 */ 1991 if (freq->transferred_length != 1992 be32_to_cpu(op->cmd_iu.data_len)) { 1993 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1); 1994 dev_info(ctrl->ctrl.device, 1995 "NVME-FC{%d}: io failed due to bad transfer " 1996 "length: %d vs expected %d\n", 1997 ctrl->cnum, freq->transferred_length, 1998 be32_to_cpu(op->cmd_iu.data_len)); 1999 goto done; 2000 } 2001 result.u64 = 0; 2002 break; 2003 2004 case sizeof(struct nvme_fc_ersp_iu): 2005 /* 2006 * The ERSP IU contains a full completion with CQE. 2007 * Validate ERSP IU and look at cqe. 2008 */ 2009 if (unlikely(be16_to_cpu(op->rsp_iu.iu_len) != 2010 (freq->rcv_rsplen / 4) || 2011 be32_to_cpu(op->rsp_iu.xfrd_len) != 2012 freq->transferred_length || 2013 op->rsp_iu.ersp_result || 2014 sqe->common.command_id != cqe->command_id)) { 2015 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1); 2016 dev_info(ctrl->ctrl.device, 2017 "NVME-FC{%d}: io failed due to bad NVMe_ERSP: " 2018 "iu len %d, xfr len %d vs %d, status code " 2019 "%d, cmdid %d vs %d\n", 2020 ctrl->cnum, be16_to_cpu(op->rsp_iu.iu_len), 2021 be32_to_cpu(op->rsp_iu.xfrd_len), 2022 freq->transferred_length, 2023 op->rsp_iu.ersp_result, 2024 sqe->common.command_id, 2025 cqe->command_id); 2026 goto done; 2027 } 2028 result = cqe->result; 2029 status = cqe->status; 2030 break; 2031 2032 default: 2033 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1); 2034 dev_info(ctrl->ctrl.device, 2035 "NVME-FC{%d}: io failed due to odd NVMe_xRSP iu " 2036 "len %d\n", 2037 ctrl->cnum, freq->rcv_rsplen); 2038 goto done; 2039 } 2040 2041 terminate_assoc = false; 2042 2043done: 2044 if (op->flags & FCOP_FLAGS_AEN) { 2045 nvme_complete_async_event(&queue->ctrl->ctrl, status, &result); 2046 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); 2047 atomic_set(&op->state, FCPOP_STATE_IDLE); 2048 op->flags = FCOP_FLAGS_AEN; /* clear other flags */ 2049 nvme_fc_ctrl_put(ctrl); 2050 goto check_error; 2051 } 2052 2053 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); 2054 if (!nvme_try_complete_req(rq, status, result)) 2055 nvme_fc_complete_rq(rq); 2056 2057check_error: 2058 if (terminate_assoc && ctrl->ctrl.state != NVME_CTRL_RESETTING) 2059 queue_work(nvme_reset_wq, &ctrl->ioerr_work); 2060} 2061 2062static int 2063__nvme_fc_init_request(struct nvme_fc_ctrl *ctrl, 2064 struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op, 2065 struct request *rq, u32 rqno) 2066{ 2067 struct nvme_fcp_op_w_sgl *op_w_sgl = 2068 container_of(op, typeof(*op_w_sgl), op); 2069 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; 2070 int ret = 0; 2071 2072 memset(op, 0, sizeof(*op)); 2073 op->fcp_req.cmdaddr = &op->cmd_iu; 2074 op->fcp_req.cmdlen = sizeof(op->cmd_iu); 2075 op->fcp_req.rspaddr = &op->rsp_iu; 2076 op->fcp_req.rsplen = sizeof(op->rsp_iu); 2077 op->fcp_req.done = nvme_fc_fcpio_done; 2078 op->ctrl = ctrl; 2079 op->queue = queue; 2080 op->rq = rq; 2081 op->rqno = rqno; 2082 2083 cmdiu->format_id = NVME_CMD_FORMAT_ID; 2084 cmdiu->fc_id = NVME_CMD_FC_ID; 2085 cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32)); 2086 if (queue->qnum) 2087 cmdiu->rsv_cat = fccmnd_set_cat_css(0, 2088 (NVME_CC_CSS_NVM >> NVME_CC_CSS_SHIFT)); 2089 else 2090 cmdiu->rsv_cat = fccmnd_set_cat_admin(0); 2091 2092 op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev, 2093 &op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE); 2094 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) { 2095 dev_err(ctrl->dev, 2096 "FCP Op failed - cmdiu dma mapping failed.\n"); 2097 ret = -EFAULT; 2098 goto out_on_error; 2099 } 2100 2101 op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev, 2102 &op->rsp_iu, sizeof(op->rsp_iu), 2103 DMA_FROM_DEVICE); 2104 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) { 2105 dev_err(ctrl->dev, 2106 "FCP Op failed - rspiu dma mapping failed.\n"); 2107 ret = -EFAULT; 2108 } 2109 2110 atomic_set(&op->state, FCPOP_STATE_IDLE); 2111out_on_error: 2112 return ret; 2113} 2114 2115static int 2116nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq, 2117 unsigned int hctx_idx, unsigned int numa_node) 2118{ 2119 struct nvme_fc_ctrl *ctrl = set->driver_data; 2120 struct nvme_fcp_op_w_sgl *op = blk_mq_rq_to_pdu(rq); 2121 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; 2122 struct nvme_fc_queue *queue = &ctrl->queues[queue_idx]; 2123 int res; 2124 2125 res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++); 2126 if (res) 2127 return res; 2128 op->op.fcp_req.first_sgl = op->sgl; 2129 op->op.fcp_req.private = &op->priv[0]; 2130 nvme_req(rq)->ctrl = &ctrl->ctrl; 2131 return res; 2132} 2133 2134static int 2135nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl) 2136{ 2137 struct nvme_fc_fcp_op *aen_op; 2138 struct nvme_fc_cmd_iu *cmdiu; 2139 struct nvme_command *sqe; 2140 void *private = NULL; 2141 int i, ret; 2142 2143 aen_op = ctrl->aen_ops; 2144 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) { 2145 if (ctrl->lport->ops->fcprqst_priv_sz) { 2146 private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz, 2147 GFP_KERNEL); 2148 if (!private) 2149 return -ENOMEM; 2150 } 2151 2152 cmdiu = &aen_op->cmd_iu; 2153 sqe = &cmdiu->sqe; 2154 ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0], 2155 aen_op, (struct request *)NULL, 2156 (NVME_AQ_BLK_MQ_DEPTH + i)); 2157 if (ret) { 2158 kfree(private); 2159 return ret; 2160 } 2161 2162 aen_op->flags = FCOP_FLAGS_AEN; 2163 aen_op->fcp_req.private = private; 2164 2165 memset(sqe, 0, sizeof(*sqe)); 2166 sqe->common.opcode = nvme_admin_async_event; 2167 /* Note: core layer may overwrite the sqe.command_id value */ 2168 sqe->common.command_id = NVME_AQ_BLK_MQ_DEPTH + i; 2169 } 2170 return 0; 2171} 2172 2173static void 2174nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl) 2175{ 2176 struct nvme_fc_fcp_op *aen_op; 2177 int i; 2178 2179 cancel_work_sync(&ctrl->ctrl.async_event_work); 2180 aen_op = ctrl->aen_ops; 2181 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) { 2182 __nvme_fc_exit_request(ctrl, aen_op); 2183 2184 kfree(aen_op->fcp_req.private); 2185 aen_op->fcp_req.private = NULL; 2186 } 2187} 2188 2189static inline void 2190__nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl, 2191 unsigned int qidx) 2192{ 2193 struct nvme_fc_queue *queue = &ctrl->queues[qidx]; 2194 2195 hctx->driver_data = queue; 2196 queue->hctx = hctx; 2197} 2198 2199static int 2200nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 2201 unsigned int hctx_idx) 2202{ 2203 struct nvme_fc_ctrl *ctrl = data; 2204 2205 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1); 2206 2207 return 0; 2208} 2209 2210static int 2211nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, 2212 unsigned int hctx_idx) 2213{ 2214 struct nvme_fc_ctrl *ctrl = data; 2215 2216 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx); 2217 2218 return 0; 2219} 2220 2221static void 2222nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx) 2223{ 2224 struct nvme_fc_queue *queue; 2225 2226 queue = &ctrl->queues[idx]; 2227 memset(queue, 0, sizeof(*queue)); 2228 queue->ctrl = ctrl; 2229 queue->qnum = idx; 2230 atomic_set(&queue->csn, 0); 2231 queue->dev = ctrl->dev; 2232 2233 if (idx > 0) 2234 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16; 2235 else 2236 queue->cmnd_capsule_len = sizeof(struct nvme_command); 2237 2238 /* 2239 * Considered whether we should allocate buffers for all SQEs 2240 * and CQEs and dma map them - mapping their respective entries 2241 * into the request structures (kernel vm addr and dma address) 2242 * thus the driver could use the buffers/mappings directly. 2243 * It only makes sense if the LLDD would use them for its 2244 * messaging api. It's very unlikely most adapter api's would use 2245 * a native NVME sqe/cqe. More reasonable if FC-NVME IU payload 2246 * structures were used instead. 2247 */ 2248} 2249 2250/* 2251 * This routine terminates a queue at the transport level. 2252 * The transport has already ensured that all outstanding ios on 2253 * the queue have been terminated. 2254 * The transport will send a Disconnect LS request to terminate 2255 * the queue's connection. Termination of the admin queue will also 2256 * terminate the association at the target. 2257 */ 2258static void 2259nvme_fc_free_queue(struct nvme_fc_queue *queue) 2260{ 2261 if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags)) 2262 return; 2263 2264 clear_bit(NVME_FC_Q_LIVE, &queue->flags); 2265 /* 2266 * Current implementation never disconnects a single queue. 2267 * It always terminates a whole association. So there is never 2268 * a disconnect(queue) LS sent to the target. 2269 */ 2270 2271 queue->connection_id = 0; 2272 atomic_set(&queue->csn, 0); 2273} 2274 2275static void 2276__nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl, 2277 struct nvme_fc_queue *queue, unsigned int qidx) 2278{ 2279 if (ctrl->lport->ops->delete_queue) 2280 ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx, 2281 queue->lldd_handle); 2282 queue->lldd_handle = NULL; 2283} 2284 2285static void 2286nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl) 2287{ 2288 int i; 2289 2290 for (i = 1; i < ctrl->ctrl.queue_count; i++) 2291 nvme_fc_free_queue(&ctrl->queues[i]); 2292} 2293 2294static int 2295__nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl, 2296 struct nvme_fc_queue *queue, unsigned int qidx, u16 qsize) 2297{ 2298 int ret = 0; 2299 2300 queue->lldd_handle = NULL; 2301 if (ctrl->lport->ops->create_queue) 2302 ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport, 2303 qidx, qsize, &queue->lldd_handle); 2304 2305 return ret; 2306} 2307 2308static void 2309nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl) 2310{ 2311 struct nvme_fc_queue *queue = &ctrl->queues[ctrl->ctrl.queue_count - 1]; 2312 int i; 2313 2314 for (i = ctrl->ctrl.queue_count - 1; i >= 1; i--, queue--) 2315 __nvme_fc_delete_hw_queue(ctrl, queue, i); 2316} 2317 2318static int 2319nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize) 2320{ 2321 struct nvme_fc_queue *queue = &ctrl->queues[1]; 2322 int i, ret; 2323 2324 for (i = 1; i < ctrl->ctrl.queue_count; i++, queue++) { 2325 ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize); 2326 if (ret) 2327 goto delete_queues; 2328 } 2329 2330 return 0; 2331 2332delete_queues: 2333 for (; i > 0; i--) 2334 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i); 2335 return ret; 2336} 2337 2338static int 2339nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize) 2340{ 2341 int i, ret = 0; 2342 2343 for (i = 1; i < ctrl->ctrl.queue_count; i++) { 2344 ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize, 2345 (qsize / 5)); 2346 if (ret) 2347 break; 2348 ret = nvmf_connect_io_queue(&ctrl->ctrl, i, false); 2349 if (ret) 2350 break; 2351 2352 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags); 2353 } 2354 2355 return ret; 2356} 2357 2358static void 2359nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl) 2360{ 2361 int i; 2362 2363 for (i = 1; i < ctrl->ctrl.queue_count; i++) 2364 nvme_fc_init_queue(ctrl, i); 2365} 2366 2367static void 2368nvme_fc_ctrl_free(struct kref *ref) 2369{ 2370 struct nvme_fc_ctrl *ctrl = 2371 container_of(ref, struct nvme_fc_ctrl, ref); 2372 unsigned long flags; 2373 2374 if (ctrl->ctrl.tagset) { 2375 blk_cleanup_queue(ctrl->ctrl.connect_q); 2376 blk_mq_free_tag_set(&ctrl->tag_set); 2377 } 2378 2379 /* remove from rport list */ 2380 spin_lock_irqsave(&ctrl->rport->lock, flags); 2381 list_del(&ctrl->ctrl_list); 2382 spin_unlock_irqrestore(&ctrl->rport->lock, flags); 2383 2384 nvme_start_admin_queue(&ctrl->ctrl); 2385 blk_cleanup_queue(ctrl->ctrl.admin_q); 2386 blk_cleanup_queue(ctrl->ctrl.fabrics_q); 2387 blk_mq_free_tag_set(&ctrl->admin_tag_set); 2388 2389 kfree(ctrl->queues); 2390 2391 put_device(ctrl->dev); 2392 nvme_fc_rport_put(ctrl->rport); 2393 2394 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum); 2395 if (ctrl->ctrl.opts) 2396 nvmf_free_options(ctrl->ctrl.opts); 2397 kfree(ctrl); 2398} 2399 2400static void 2401nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl) 2402{ 2403 kref_put(&ctrl->ref, nvme_fc_ctrl_free); 2404} 2405 2406static int 2407nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl) 2408{ 2409 return kref_get_unless_zero(&ctrl->ref); 2410} 2411 2412/* 2413 * All accesses from nvme core layer done - can now free the 2414 * controller. Called after last nvme_put_ctrl() call 2415 */ 2416static void 2417nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl) 2418{ 2419 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); 2420 2421 WARN_ON(nctrl != &ctrl->ctrl); 2422 2423 nvme_fc_ctrl_put(ctrl); 2424} 2425 2426/* 2427 * This routine is used by the transport when it needs to find active 2428 * io on a queue that is to be terminated. The transport uses 2429 * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke 2430 * this routine to kill them on a 1 by 1 basis. 2431 * 2432 * As FC allocates FC exchange for each io, the transport must contact 2433 * the LLDD to terminate the exchange, thus releasing the FC exchange. 2434 * After terminating the exchange the LLDD will call the transport's 2435 * normal io done path for the request, but it will have an aborted 2436 * status. The done path will return the io request back to the block 2437 * layer with an error status. 2438 */ 2439static bool 2440nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved) 2441{ 2442 struct nvme_ctrl *nctrl = data; 2443 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); 2444 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req); 2445 2446 op->nreq.flags |= NVME_REQ_CANCELLED; 2447 __nvme_fc_abort_op(ctrl, op); 2448 return true; 2449} 2450 2451/* 2452 * This routine runs through all outstanding commands on the association 2453 * and aborts them. This routine is typically be called by the 2454 * delete_association routine. It is also called due to an error during 2455 * reconnect. In that scenario, it is most likely a command that initializes 2456 * the controller, including fabric Connect commands on io queues, that 2457 * may have timed out or failed thus the io must be killed for the connect 2458 * thread to see the error. 2459 */ 2460static void 2461__nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues) 2462{ 2463 int q; 2464 2465 /* 2466 * if aborting io, the queues are no longer good, mark them 2467 * all as not live. 2468 */ 2469 if (ctrl->ctrl.queue_count > 1) { 2470 for (q = 1; q < ctrl->ctrl.queue_count; q++) 2471 clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[q].flags); 2472 } 2473 clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags); 2474 2475 /* 2476 * If io queues are present, stop them and terminate all outstanding 2477 * ios on them. As FC allocates FC exchange for each io, the 2478 * transport must contact the LLDD to terminate the exchange, 2479 * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr() 2480 * to tell us what io's are busy and invoke a transport routine 2481 * to kill them with the LLDD. After terminating the exchange 2482 * the LLDD will call the transport's normal io done path, but it 2483 * will have an aborted status. The done path will return the 2484 * io requests back to the block layer as part of normal completions 2485 * (but with error status). 2486 */ 2487 if (ctrl->ctrl.queue_count > 1) { 2488 nvme_stop_queues(&ctrl->ctrl); 2489 nvme_sync_io_queues(&ctrl->ctrl); 2490 blk_mq_tagset_busy_iter(&ctrl->tag_set, 2491 nvme_fc_terminate_exchange, &ctrl->ctrl); 2492 blk_mq_tagset_wait_completed_request(&ctrl->tag_set); 2493 if (start_queues) 2494 nvme_start_queues(&ctrl->ctrl); 2495 } 2496 2497 /* 2498 * Other transports, which don't have link-level contexts bound 2499 * to sqe's, would try to gracefully shutdown the controller by 2500 * writing the registers for shutdown and polling (call 2501 * nvme_shutdown_ctrl()). Given a bunch of i/o was potentially 2502 * just aborted and we will wait on those contexts, and given 2503 * there was no indication of how live the controlelr is on the 2504 * link, don't send more io to create more contexts for the 2505 * shutdown. Let the controller fail via keepalive failure if 2506 * its still present. 2507 */ 2508 2509 /* 2510 * clean up the admin queue. Same thing as above. 2511 */ 2512 nvme_stop_admin_queue(&ctrl->ctrl); 2513 blk_sync_queue(ctrl->ctrl.admin_q); 2514 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, 2515 nvme_fc_terminate_exchange, &ctrl->ctrl); 2516 blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set); 2517} 2518 2519static void 2520nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg) 2521{ 2522 /* 2523 * if an error (io timeout, etc) while (re)connecting, the remote 2524 * port requested terminating of the association (disconnect_ls) 2525 * or an error (timeout or abort) occurred on an io while creating 2526 * the controller. Abort any ios on the association and let the 2527 * create_association error path resolve things. 2528 */ 2529 if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) { 2530 __nvme_fc_abort_outstanding_ios(ctrl, true); 2531 set_bit(ASSOC_FAILED, &ctrl->flags); 2532 return; 2533 } 2534 2535 /* Otherwise, only proceed if in LIVE state - e.g. on first error */ 2536 if (ctrl->ctrl.state != NVME_CTRL_LIVE) 2537 return; 2538 2539 dev_warn(ctrl->ctrl.device, 2540 "NVME-FC{%d}: transport association event: %s\n", 2541 ctrl->cnum, errmsg); 2542 dev_warn(ctrl->ctrl.device, 2543 "NVME-FC{%d}: resetting controller\n", ctrl->cnum); 2544 2545 nvme_reset_ctrl(&ctrl->ctrl); 2546} 2547 2548static enum blk_eh_timer_return 2549nvme_fc_timeout(struct request *rq, bool reserved) 2550{ 2551 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); 2552 struct nvme_fc_ctrl *ctrl = op->ctrl; 2553 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; 2554 struct nvme_command *sqe = &cmdiu->sqe; 2555 2556 /* 2557 * Attempt to abort the offending command. Command completion 2558 * will detect the aborted io and will fail the connection. 2559 */ 2560 dev_info(ctrl->ctrl.device, 2561 "NVME-FC{%d.%d}: io timeout: opcode %d fctype %d w10/11: " 2562 "x%08x/x%08x\n", 2563 ctrl->cnum, op->queue->qnum, sqe->common.opcode, 2564 sqe->connect.fctype, sqe->common.cdw10, sqe->common.cdw11); 2565 if (__nvme_fc_abort_op(ctrl, op)) 2566 nvme_fc_error_recovery(ctrl, "io timeout abort failed"); 2567 2568 /* 2569 * the io abort has been initiated. Have the reset timer 2570 * restarted and the abort completion will complete the io 2571 * shortly. Avoids a synchronous wait while the abort finishes. 2572 */ 2573 return BLK_EH_RESET_TIMER; 2574} 2575 2576static int 2577nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq, 2578 struct nvme_fc_fcp_op *op) 2579{ 2580 struct nvmefc_fcp_req *freq = &op->fcp_req; 2581 int ret; 2582 2583 freq->sg_cnt = 0; 2584 2585 if (!blk_rq_nr_phys_segments(rq)) 2586 return 0; 2587 2588 freq->sg_table.sgl = freq->first_sgl; 2589 ret = sg_alloc_table_chained(&freq->sg_table, 2590 blk_rq_nr_phys_segments(rq), freq->sg_table.sgl, 2591 NVME_INLINE_SG_CNT); 2592 if (ret) 2593 return -ENOMEM; 2594 2595 op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl); 2596 WARN_ON(op->nents > blk_rq_nr_phys_segments(rq)); 2597 freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl, 2598 op->nents, rq_dma_dir(rq)); 2599 if (unlikely(freq->sg_cnt <= 0)) { 2600 sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT); 2601 freq->sg_cnt = 0; 2602 return -EFAULT; 2603 } 2604 2605 /* 2606 * TODO: blk_integrity_rq(rq) for DIF 2607 */ 2608 return 0; 2609} 2610 2611static void 2612nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq, 2613 struct nvme_fc_fcp_op *op) 2614{ 2615 struct nvmefc_fcp_req *freq = &op->fcp_req; 2616 2617 if (!freq->sg_cnt) 2618 return; 2619 2620 fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents, 2621 rq_dma_dir(rq)); 2622 2623 sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT); 2624 2625 freq->sg_cnt = 0; 2626} 2627 2628/* 2629 * In FC, the queue is a logical thing. At transport connect, the target 2630 * creates its "queue" and returns a handle that is to be given to the 2631 * target whenever it posts something to the corresponding SQ. When an 2632 * SQE is sent on a SQ, FC effectively considers the SQE, or rather the 2633 * command contained within the SQE, an io, and assigns a FC exchange 2634 * to it. The SQE and the associated SQ handle are sent in the initial 2635 * CMD IU sents on the exchange. All transfers relative to the io occur 2636 * as part of the exchange. The CQE is the last thing for the io, 2637 * which is transferred (explicitly or implicitly) with the RSP IU 2638 * sent on the exchange. After the CQE is received, the FC exchange is 2639 * terminaed and the Exchange may be used on a different io. 2640 * 2641 * The transport to LLDD api has the transport making a request for a 2642 * new fcp io request to the LLDD. The LLDD then allocates a FC exchange 2643 * resource and transfers the command. The LLDD will then process all 2644 * steps to complete the io. Upon completion, the transport done routine 2645 * is called. 2646 * 2647 * So - while the operation is outstanding to the LLDD, there is a link 2648 * level FC exchange resource that is also outstanding. This must be 2649 * considered in all cleanup operations. 2650 */ 2651static blk_status_t 2652nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, 2653 struct nvme_fc_fcp_op *op, u32 data_len, 2654 enum nvmefc_fcp_datadir io_dir) 2655{ 2656 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; 2657 struct nvme_command *sqe = &cmdiu->sqe; 2658 int ret, opstate; 2659 2660 /* 2661 * before attempting to send the io, check to see if we believe 2662 * the target device is present 2663 */ 2664 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE) 2665 return BLK_STS_RESOURCE; 2666 2667 if (!nvme_fc_ctrl_get(ctrl)) 2668 return BLK_STS_IOERR; 2669 2670 /* format the FC-NVME CMD IU and fcp_req */ 2671 cmdiu->connection_id = cpu_to_be64(queue->connection_id); 2672 cmdiu->data_len = cpu_to_be32(data_len); 2673 switch (io_dir) { 2674 case NVMEFC_FCP_WRITE: 2675 cmdiu->flags = FCNVME_CMD_FLAGS_WRITE; 2676 break; 2677 case NVMEFC_FCP_READ: 2678 cmdiu->flags = FCNVME_CMD_FLAGS_READ; 2679 break; 2680 case NVMEFC_FCP_NODATA: 2681 cmdiu->flags = 0; 2682 break; 2683 } 2684 op->fcp_req.payload_length = data_len; 2685 op->fcp_req.io_dir = io_dir; 2686 op->fcp_req.transferred_length = 0; 2687 op->fcp_req.rcv_rsplen = 0; 2688 op->fcp_req.status = NVME_SC_SUCCESS; 2689 op->fcp_req.sqid = cpu_to_le16(queue->qnum); 2690 2691 /* 2692 * validate per fabric rules, set fields mandated by fabric spec 2693 * as well as those by FC-NVME spec. 2694 */ 2695 WARN_ON_ONCE(sqe->common.metadata); 2696 sqe->common.flags |= NVME_CMD_SGL_METABUF; 2697 2698 /* 2699 * format SQE DPTR field per FC-NVME rules: 2700 * type=0x5 Transport SGL Data Block Descriptor 2701 * subtype=0xA Transport-specific value 2702 * address=0 2703 * length=length of the data series 2704 */ 2705 sqe->rw.dptr.sgl.type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) | 2706 NVME_SGL_FMT_TRANSPORT_A; 2707 sqe->rw.dptr.sgl.length = cpu_to_le32(data_len); 2708 sqe->rw.dptr.sgl.addr = 0; 2709 2710 if (!(op->flags & FCOP_FLAGS_AEN)) { 2711 ret = nvme_fc_map_data(ctrl, op->rq, op); 2712 if (ret < 0) { 2713 nvme_cleanup_cmd(op->rq); 2714 nvme_fc_ctrl_put(ctrl); 2715 if (ret == -ENOMEM || ret == -EAGAIN) 2716 return BLK_STS_RESOURCE; 2717 return BLK_STS_IOERR; 2718 } 2719 } 2720 2721 fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma, 2722 sizeof(op->cmd_iu), DMA_TO_DEVICE); 2723 2724 atomic_set(&op->state, FCPOP_STATE_ACTIVE); 2725 2726 if (!(op->flags & FCOP_FLAGS_AEN)) 2727 blk_mq_start_request(op->rq); 2728 2729 cmdiu->csn = cpu_to_be32(atomic_inc_return(&queue->csn)); 2730 ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport, 2731 &ctrl->rport->remoteport, 2732 queue->lldd_handle, &op->fcp_req); 2733 2734 if (ret) { 2735 /* 2736 * If the lld fails to send the command is there an issue with 2737 * the csn value? If the command that fails is the Connect, 2738 * no - as the connection won't be live. If it is a command 2739 * post-connect, it's possible a gap in csn may be created. 2740 * Does this matter? As Linux initiators don't send fused 2741 * commands, no. The gap would exist, but as there's nothing 2742 * that depends on csn order to be delivered on the target 2743 * side, it shouldn't hurt. It would be difficult for a 2744 * target to even detect the csn gap as it has no idea when the 2745 * cmd with the csn was supposed to arrive. 2746 */ 2747 opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE); 2748 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); 2749 2750 if (!(op->flags & FCOP_FLAGS_AEN)) { 2751 nvme_fc_unmap_data(ctrl, op->rq, op); 2752 nvme_cleanup_cmd(op->rq); 2753 } 2754 2755 nvme_fc_ctrl_put(ctrl); 2756 2757 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE && 2758 ret != -EBUSY) 2759 return BLK_STS_IOERR; 2760 2761 return BLK_STS_RESOURCE; 2762 } 2763 2764 return BLK_STS_OK; 2765} 2766 2767static blk_status_t 2768nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx, 2769 const struct blk_mq_queue_data *bd) 2770{ 2771 struct nvme_ns *ns = hctx->queue->queuedata; 2772 struct nvme_fc_queue *queue = hctx->driver_data; 2773 struct nvme_fc_ctrl *ctrl = queue->ctrl; 2774 struct request *rq = bd->rq; 2775 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); 2776 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; 2777 struct nvme_command *sqe = &cmdiu->sqe; 2778 enum nvmefc_fcp_datadir io_dir; 2779 bool queue_ready = test_bit(NVME_FC_Q_LIVE, &queue->flags); 2780 u32 data_len; 2781 blk_status_t ret; 2782 2783 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE || 2784 !nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) 2785 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq); 2786 2787 ret = nvme_setup_cmd(ns, rq, sqe); 2788 if (ret) 2789 return ret; 2790 2791 /* 2792 * nvme core doesn't quite treat the rq opaquely. Commands such 2793 * as WRITE ZEROES will return a non-zero rq payload_bytes yet 2794 * there is no actual payload to be transferred. 2795 * To get it right, key data transmission on there being 1 or 2796 * more physical segments in the sg list. If there is no 2797 * physical segments, there is no payload. 2798 */ 2799 if (blk_rq_nr_phys_segments(rq)) { 2800 data_len = blk_rq_payload_bytes(rq); 2801 io_dir = ((rq_data_dir(rq) == WRITE) ? 2802 NVMEFC_FCP_WRITE : NVMEFC_FCP_READ); 2803 } else { 2804 data_len = 0; 2805 io_dir = NVMEFC_FCP_NODATA; 2806 } 2807 2808 2809 return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir); 2810} 2811 2812static void 2813nvme_fc_submit_async_event(struct nvme_ctrl *arg) 2814{ 2815 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg); 2816 struct nvme_fc_fcp_op *aen_op; 2817 blk_status_t ret; 2818 2819 if (test_bit(FCCTRL_TERMIO, &ctrl->flags)) 2820 return; 2821 2822 aen_op = &ctrl->aen_ops[0]; 2823 2824 ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0, 2825 NVMEFC_FCP_NODATA); 2826 if (ret) 2827 dev_err(ctrl->ctrl.device, 2828 "failed async event work\n"); 2829} 2830 2831static void 2832nvme_fc_complete_rq(struct request *rq) 2833{ 2834 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); 2835 struct nvme_fc_ctrl *ctrl = op->ctrl; 2836 2837 atomic_set(&op->state, FCPOP_STATE_IDLE); 2838 op->flags &= ~FCOP_FLAGS_TERMIO; 2839 2840 nvme_fc_unmap_data(ctrl, rq, op); 2841 nvme_complete_rq(rq); 2842 nvme_fc_ctrl_put(ctrl); 2843} 2844 2845 2846static const struct blk_mq_ops nvme_fc_mq_ops = { 2847 .queue_rq = nvme_fc_queue_rq, 2848 .complete = nvme_fc_complete_rq, 2849 .init_request = nvme_fc_init_request, 2850 .exit_request = nvme_fc_exit_request, 2851 .init_hctx = nvme_fc_init_hctx, 2852 .timeout = nvme_fc_timeout, 2853}; 2854 2855static int 2856nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl) 2857{ 2858 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; 2859 unsigned int nr_io_queues; 2860 int ret; 2861 2862 nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()), 2863 ctrl->lport->ops->max_hw_queues); 2864 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); 2865 if (ret) { 2866 dev_info(ctrl->ctrl.device, 2867 "set_queue_count failed: %d\n", ret); 2868 return ret; 2869 } 2870 2871 ctrl->ctrl.queue_count = nr_io_queues + 1; 2872 if (!nr_io_queues) 2873 return 0; 2874 2875 nvme_fc_init_io_queues(ctrl); 2876 2877 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); 2878 ctrl->tag_set.ops = &nvme_fc_mq_ops; 2879 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; 2880 ctrl->tag_set.reserved_tags = 1; /* fabric connect */ 2881 ctrl->tag_set.numa_node = ctrl->ctrl.numa_node; 2882 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 2883 ctrl->tag_set.cmd_size = 2884 struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv, 2885 ctrl->lport->ops->fcprqst_priv_sz); 2886 ctrl->tag_set.driver_data = ctrl; 2887 ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1; 2888 ctrl->tag_set.timeout = NVME_IO_TIMEOUT; 2889 2890 ret = blk_mq_alloc_tag_set(&ctrl->tag_set); 2891 if (ret) 2892 return ret; 2893 2894 ctrl->ctrl.tagset = &ctrl->tag_set; 2895 2896 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set); 2897 if (IS_ERR(ctrl->ctrl.connect_q)) { 2898 ret = PTR_ERR(ctrl->ctrl.connect_q); 2899 goto out_free_tag_set; 2900 } 2901 2902 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1); 2903 if (ret) 2904 goto out_cleanup_blk_queue; 2905 2906 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1); 2907 if (ret) 2908 goto out_delete_hw_queues; 2909 2910 ctrl->ioq_live = true; 2911 2912 return 0; 2913 2914out_delete_hw_queues: 2915 nvme_fc_delete_hw_io_queues(ctrl); 2916out_cleanup_blk_queue: 2917 blk_cleanup_queue(ctrl->ctrl.connect_q); 2918out_free_tag_set: 2919 blk_mq_free_tag_set(&ctrl->tag_set); 2920 nvme_fc_free_io_queues(ctrl); 2921 2922 /* force put free routine to ignore io queues */ 2923 ctrl->ctrl.tagset = NULL; 2924 2925 return ret; 2926} 2927 2928static int 2929nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl) 2930{ 2931 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; 2932 u32 prior_ioq_cnt = ctrl->ctrl.queue_count - 1; 2933 unsigned int nr_io_queues; 2934 int ret; 2935 2936 nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()), 2937 ctrl->lport->ops->max_hw_queues); 2938 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); 2939 if (ret) { 2940 dev_info(ctrl->ctrl.device, 2941 "set_queue_count failed: %d\n", ret); 2942 return ret; 2943 } 2944 2945 if (!nr_io_queues && prior_ioq_cnt) { 2946 dev_info(ctrl->ctrl.device, 2947 "Fail Reconnect: At least 1 io queue " 2948 "required (was %d)\n", prior_ioq_cnt); 2949 return -ENOSPC; 2950 } 2951 2952 ctrl->ctrl.queue_count = nr_io_queues + 1; 2953 /* check for io queues existing */ 2954 if (ctrl->ctrl.queue_count == 1) 2955 return 0; 2956 2957 if (prior_ioq_cnt != nr_io_queues) { 2958 dev_info(ctrl->ctrl.device, 2959 "reconnect: revising io queue count from %d to %d\n", 2960 prior_ioq_cnt, nr_io_queues); 2961 nvme_wait_freeze(&ctrl->ctrl); 2962 blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues); 2963 nvme_unfreeze(&ctrl->ctrl); 2964 } 2965 2966 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1); 2967 if (ret) 2968 goto out_free_io_queues; 2969 2970 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1); 2971 if (ret) 2972 goto out_delete_hw_queues; 2973 2974 return 0; 2975 2976out_delete_hw_queues: 2977 nvme_fc_delete_hw_io_queues(ctrl); 2978out_free_io_queues: 2979 nvme_fc_free_io_queues(ctrl); 2980 return ret; 2981} 2982 2983static void 2984nvme_fc_rport_active_on_lport(struct nvme_fc_rport *rport) 2985{ 2986 struct nvme_fc_lport *lport = rport->lport; 2987 2988 atomic_inc(&lport->act_rport_cnt); 2989} 2990 2991static void 2992nvme_fc_rport_inactive_on_lport(struct nvme_fc_rport *rport) 2993{ 2994 struct nvme_fc_lport *lport = rport->lport; 2995 u32 cnt; 2996 2997 cnt = atomic_dec_return(&lport->act_rport_cnt); 2998 if (cnt == 0 && lport->localport.port_state == FC_OBJSTATE_DELETED) 2999 lport->ops->localport_delete(&lport->localport); 3000} 3001 3002static int 3003nvme_fc_ctlr_active_on_rport(struct nvme_fc_ctrl *ctrl) 3004{ 3005 struct nvme_fc_rport *rport = ctrl->rport; 3006 u32 cnt; 3007 3008 if (test_and_set_bit(ASSOC_ACTIVE, &ctrl->flags)) 3009 return 1; 3010 3011 cnt = atomic_inc_return(&rport->act_ctrl_cnt); 3012 if (cnt == 1) 3013 nvme_fc_rport_active_on_lport(rport); 3014 3015 return 0; 3016} 3017 3018static int 3019nvme_fc_ctlr_inactive_on_rport(struct nvme_fc_ctrl *ctrl) 3020{ 3021 struct nvme_fc_rport *rport = ctrl->rport; 3022 struct nvme_fc_lport *lport = rport->lport; 3023 u32 cnt; 3024 3025 /* clearing of ctrl->flags ASSOC_ACTIVE bit is in association delete */ 3026 3027 cnt = atomic_dec_return(&rport->act_ctrl_cnt); 3028 if (cnt == 0) { 3029 if (rport->remoteport.port_state == FC_OBJSTATE_DELETED) 3030 lport->ops->remoteport_delete(&rport->remoteport); 3031 nvme_fc_rport_inactive_on_lport(rport); 3032 } 3033 3034 return 0; 3035} 3036 3037/* 3038 * This routine restarts the controller on the host side, and 3039 * on the link side, recreates the controller association. 3040 */ 3041static int 3042nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) 3043{ 3044 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; 3045 struct nvmefc_ls_rcv_op *disls = NULL; 3046 unsigned long flags; 3047 int ret; 3048 bool changed; 3049 3050 ++ctrl->ctrl.nr_reconnects; 3051 3052 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE) 3053 return -ENODEV; 3054 3055 if (nvme_fc_ctlr_active_on_rport(ctrl)) 3056 return -ENOTUNIQ; 3057 3058 dev_info(ctrl->ctrl.device, 3059 "NVME-FC{%d}: create association : host wwpn 0x%016llx " 3060 " rport wwpn 0x%016llx: NQN \"%s\"\n", 3061 ctrl->cnum, ctrl->lport->localport.port_name, 3062 ctrl->rport->remoteport.port_name, ctrl->ctrl.opts->subsysnqn); 3063 3064 clear_bit(ASSOC_FAILED, &ctrl->flags); 3065 3066 /* 3067 * Create the admin queue 3068 */ 3069 3070 ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0, 3071 NVME_AQ_DEPTH); 3072 if (ret) 3073 goto out_free_queue; 3074 3075 ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0], 3076 NVME_AQ_DEPTH, (NVME_AQ_DEPTH / 4)); 3077 if (ret) 3078 goto out_delete_hw_queue; 3079 3080 ret = nvmf_connect_admin_queue(&ctrl->ctrl); 3081 if (ret) 3082 goto out_disconnect_admin_queue; 3083 3084 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags); 3085 3086 /* 3087 * Check controller capabilities 3088 * 3089 * todo:- add code to check if ctrl attributes changed from 3090 * prior connection values 3091 */ 3092 3093 ret = nvme_enable_ctrl(&ctrl->ctrl); 3094 if (ret || test_bit(ASSOC_FAILED, &ctrl->flags)) 3095 goto out_disconnect_admin_queue; 3096 3097 ctrl->ctrl.max_segments = ctrl->lport->ops->max_sgl_segments; 3098 ctrl->ctrl.max_hw_sectors = ctrl->ctrl.max_segments << 3099 (ilog2(SZ_4K) - 9); 3100 3101 nvme_start_admin_queue(&ctrl->ctrl); 3102 3103 ret = nvme_init_identify(&ctrl->ctrl); 3104 if (ret || test_bit(ASSOC_FAILED, &ctrl->flags)) 3105 goto out_disconnect_admin_queue; 3106 3107 /* sanity checks */ 3108 3109 /* FC-NVME does not have other data in the capsule */ 3110 if (ctrl->ctrl.icdoff) { 3111 dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n", 3112 ctrl->ctrl.icdoff); 3113 goto out_disconnect_admin_queue; 3114 } 3115 3116 /* FC-NVME supports normal SGL Data Block Descriptors */ 3117 3118 if (opts->queue_size > ctrl->ctrl.maxcmd) { 3119 /* warn if maxcmd is lower than queue_size */ 3120 dev_warn(ctrl->ctrl.device, 3121 "queue_size %zu > ctrl maxcmd %u, reducing " 3122 "to maxcmd\n", 3123 opts->queue_size, ctrl->ctrl.maxcmd); 3124 opts->queue_size = ctrl->ctrl.maxcmd; 3125 } 3126 3127 if (opts->queue_size > ctrl->ctrl.sqsize + 1) { 3128 /* warn if sqsize is lower than queue_size */ 3129 dev_warn(ctrl->ctrl.device, 3130 "queue_size %zu > ctrl sqsize %u, reducing " 3131 "to sqsize\n", 3132 opts->queue_size, ctrl->ctrl.sqsize + 1); 3133 opts->queue_size = ctrl->ctrl.sqsize + 1; 3134 } 3135 3136 ret = nvme_fc_init_aen_ops(ctrl); 3137 if (ret) 3138 goto out_term_aen_ops; 3139 3140 /* 3141 * Create the io queues 3142 */ 3143 3144 if (ctrl->ctrl.queue_count > 1) { 3145 if (!ctrl->ioq_live) 3146 ret = nvme_fc_create_io_queues(ctrl); 3147 else 3148 ret = nvme_fc_recreate_io_queues(ctrl); 3149 } 3150 if (ret || test_bit(ASSOC_FAILED, &ctrl->flags)) 3151 goto out_term_aen_ops; 3152 3153 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); 3154 3155 ctrl->ctrl.nr_reconnects = 0; 3156 3157 if (changed) 3158 nvme_start_ctrl(&ctrl->ctrl); 3159 3160 return 0; /* Success */ 3161 3162out_term_aen_ops: 3163 nvme_fc_term_aen_ops(ctrl); 3164out_disconnect_admin_queue: 3165 /* send a Disconnect(association) LS to fc-nvme target */ 3166 nvme_fc_xmt_disconnect_assoc(ctrl); 3167 spin_lock_irqsave(&ctrl->lock, flags); 3168 ctrl->association_id = 0; 3169 disls = ctrl->rcv_disconn; 3170 ctrl->rcv_disconn = NULL; 3171 spin_unlock_irqrestore(&ctrl->lock, flags); 3172 if (disls) 3173 nvme_fc_xmt_ls_rsp(disls); 3174out_delete_hw_queue: 3175 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0); 3176out_free_queue: 3177 nvme_fc_free_queue(&ctrl->queues[0]); 3178 clear_bit(ASSOC_ACTIVE, &ctrl->flags); 3179 nvme_fc_ctlr_inactive_on_rport(ctrl); 3180 3181 return ret; 3182} 3183 3184 3185/* 3186 * This routine stops operation of the controller on the host side. 3187 * On the host os stack side: Admin and IO queues are stopped, 3188 * outstanding ios on them terminated via FC ABTS. 3189 * On the link side: the association is terminated. 3190 */ 3191static void 3192nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl) 3193{ 3194 struct nvmefc_ls_rcv_op *disls = NULL; 3195 unsigned long flags; 3196 3197 if (!test_and_clear_bit(ASSOC_ACTIVE, &ctrl->flags)) 3198 return; 3199 3200 spin_lock_irqsave(&ctrl->lock, flags); 3201 set_bit(FCCTRL_TERMIO, &ctrl->flags); 3202 ctrl->iocnt = 0; 3203 spin_unlock_irqrestore(&ctrl->lock, flags); 3204 3205 __nvme_fc_abort_outstanding_ios(ctrl, false); 3206 3207 /* kill the aens as they are a separate path */ 3208 nvme_fc_abort_aen_ops(ctrl); 3209 3210 /* wait for all io that had to be aborted */ 3211 spin_lock_irq(&ctrl->lock); 3212 wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock); 3213 clear_bit(FCCTRL_TERMIO, &ctrl->flags); 3214 spin_unlock_irq(&ctrl->lock); 3215 3216 nvme_fc_term_aen_ops(ctrl); 3217 3218 /* 3219 * send a Disconnect(association) LS to fc-nvme target 3220 * Note: could have been sent at top of process, but 3221 * cleaner on link traffic if after the aborts complete. 3222 * Note: if association doesn't exist, association_id will be 0 3223 */ 3224 if (ctrl->association_id) 3225 nvme_fc_xmt_disconnect_assoc(ctrl); 3226 3227 spin_lock_irqsave(&ctrl->lock, flags); 3228 ctrl->association_id = 0; 3229 disls = ctrl->rcv_disconn; 3230 ctrl->rcv_disconn = NULL; 3231 spin_unlock_irqrestore(&ctrl->lock, flags); 3232 if (disls) 3233 /* 3234 * if a Disconnect Request was waiting for a response, send 3235 * now that all ABTS's have been issued (and are complete). 3236 */ 3237 nvme_fc_xmt_ls_rsp(disls); 3238 3239 if (ctrl->ctrl.tagset) { 3240 nvme_fc_delete_hw_io_queues(ctrl); 3241 nvme_fc_free_io_queues(ctrl); 3242 } 3243 3244 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0); 3245 nvme_fc_free_queue(&ctrl->queues[0]); 3246 3247 /* re-enable the admin_q so anything new can fast fail */ 3248 nvme_start_admin_queue(&ctrl->ctrl); 3249 3250 /* resume the io queues so that things will fast fail */ 3251 nvme_start_queues(&ctrl->ctrl); 3252 3253 nvme_fc_ctlr_inactive_on_rport(ctrl); 3254} 3255 3256static void 3257nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl) 3258{ 3259 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); 3260 3261 cancel_work_sync(&ctrl->ioerr_work); 3262 cancel_delayed_work_sync(&ctrl->connect_work); 3263 /* 3264 * kill the association on the link side. this will block 3265 * waiting for io to terminate 3266 */ 3267 nvme_fc_delete_association(ctrl); 3268} 3269 3270static void 3271nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status) 3272{ 3273 struct nvme_fc_rport *rport = ctrl->rport; 3274 struct nvme_fc_remote_port *portptr = &rport->remoteport; 3275 unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ; 3276 bool recon = true; 3277 3278 if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) 3279 return; 3280 3281 if (portptr->port_state == FC_OBJSTATE_ONLINE) 3282 dev_info(ctrl->ctrl.device, 3283 "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n", 3284 ctrl->cnum, status); 3285 else if (time_after_eq(jiffies, rport->dev_loss_end)) 3286 recon = false; 3287 3288 if (recon && nvmf_should_reconnect(&ctrl->ctrl)) { 3289 if (portptr->port_state == FC_OBJSTATE_ONLINE) 3290 dev_info(ctrl->ctrl.device, 3291 "NVME-FC{%d}: Reconnect attempt in %ld " 3292 "seconds\n", 3293 ctrl->cnum, recon_delay / HZ); 3294 else if (time_after(jiffies + recon_delay, rport->dev_loss_end)) 3295 recon_delay = rport->dev_loss_end - jiffies; 3296 3297 queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay); 3298 } else { 3299 if (portptr->port_state == FC_OBJSTATE_ONLINE) 3300 dev_warn(ctrl->ctrl.device, 3301 "NVME-FC{%d}: Max reconnect attempts (%d) " 3302 "reached.\n", 3303 ctrl->cnum, ctrl->ctrl.nr_reconnects); 3304 else 3305 dev_warn(ctrl->ctrl.device, 3306 "NVME-FC{%d}: dev_loss_tmo (%d) expired " 3307 "while waiting for remoteport connectivity.\n", 3308 ctrl->cnum, min_t(int, portptr->dev_loss_tmo, 3309 (ctrl->ctrl.opts->max_reconnects * 3310 ctrl->ctrl.opts->reconnect_delay))); 3311 WARN_ON(nvme_delete_ctrl(&ctrl->ctrl)); 3312 } 3313} 3314 3315static void 3316nvme_fc_reset_ctrl_work(struct work_struct *work) 3317{ 3318 struct nvme_fc_ctrl *ctrl = 3319 container_of(work, struct nvme_fc_ctrl, ctrl.reset_work); 3320 3321 nvme_stop_ctrl(&ctrl->ctrl); 3322 3323 /* will block will waiting for io to terminate */ 3324 nvme_fc_delete_association(ctrl); 3325 3326 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) 3327 dev_err(ctrl->ctrl.device, 3328 "NVME-FC{%d}: error_recovery: Couldn't change state " 3329 "to CONNECTING\n", ctrl->cnum); 3330 3331 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE) { 3332 if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) { 3333 dev_err(ctrl->ctrl.device, 3334 "NVME-FC{%d}: failed to schedule connect " 3335 "after reset\n", ctrl->cnum); 3336 } else { 3337 flush_delayed_work(&ctrl->connect_work); 3338 } 3339 } else { 3340 nvme_fc_reconnect_or_delete(ctrl, -ENOTCONN); 3341 } 3342} 3343 3344 3345static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = { 3346 .name = "fc", 3347 .module = THIS_MODULE, 3348 .flags = NVME_F_FABRICS, 3349 .reg_read32 = nvmf_reg_read32, 3350 .reg_read64 = nvmf_reg_read64, 3351 .reg_write32 = nvmf_reg_write32, 3352 .free_ctrl = nvme_fc_nvme_ctrl_freed, 3353 .submit_async_event = nvme_fc_submit_async_event, 3354 .delete_ctrl = nvme_fc_delete_ctrl, 3355 .get_address = nvmf_get_address, 3356}; 3357 3358static void 3359nvme_fc_connect_ctrl_work(struct work_struct *work) 3360{ 3361 int ret; 3362 3363 struct nvme_fc_ctrl *ctrl = 3364 container_of(to_delayed_work(work), 3365 struct nvme_fc_ctrl, connect_work); 3366 3367 ret = nvme_fc_create_association(ctrl); 3368 if (ret) 3369 nvme_fc_reconnect_or_delete(ctrl, ret); 3370 else 3371 dev_info(ctrl->ctrl.device, 3372 "NVME-FC{%d}: controller connect complete\n", 3373 ctrl->cnum); 3374} 3375 3376 3377static const struct blk_mq_ops nvme_fc_admin_mq_ops = { 3378 .queue_rq = nvme_fc_queue_rq, 3379 .complete = nvme_fc_complete_rq, 3380 .init_request = nvme_fc_init_request, 3381 .exit_request = nvme_fc_exit_request, 3382 .init_hctx = nvme_fc_init_admin_hctx, 3383 .timeout = nvme_fc_timeout, 3384}; 3385 3386 3387/* 3388 * Fails a controller request if it matches an existing controller 3389 * (association) with the same tuple: 3390 * <Host NQN, Host ID, local FC port, remote FC port, SUBSYS NQN> 3391 * 3392 * The ports don't need to be compared as they are intrinsically 3393 * already matched by the port pointers supplied. 3394 */ 3395static bool 3396nvme_fc_existing_controller(struct nvme_fc_rport *rport, 3397 struct nvmf_ctrl_options *opts) 3398{ 3399 struct nvme_fc_ctrl *ctrl; 3400 unsigned long flags; 3401 bool found = false; 3402 3403 spin_lock_irqsave(&rport->lock, flags); 3404 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { 3405 found = nvmf_ctlr_matches_baseopts(&ctrl->ctrl, opts); 3406 if (found) 3407 break; 3408 } 3409 spin_unlock_irqrestore(&rport->lock, flags); 3410 3411 return found; 3412} 3413 3414static struct nvme_ctrl * 3415nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, 3416 struct nvme_fc_lport *lport, struct nvme_fc_rport *rport) 3417{ 3418 struct nvme_fc_ctrl *ctrl; 3419 unsigned long flags; 3420 int ret, idx, ctrl_loss_tmo; 3421 3422 if (!(rport->remoteport.port_role & 3423 (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) { 3424 ret = -EBADR; 3425 goto out_fail; 3426 } 3427 3428 if (!opts->duplicate_connect && 3429 nvme_fc_existing_controller(rport, opts)) { 3430 ret = -EALREADY; 3431 goto out_fail; 3432 } 3433 3434 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); 3435 if (!ctrl) { 3436 ret = -ENOMEM; 3437 goto out_fail; 3438 } 3439 3440 idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL); 3441 if (idx < 0) { 3442 ret = -ENOSPC; 3443 goto out_free_ctrl; 3444 } 3445 3446 /* 3447 * if ctrl_loss_tmo is being enforced and the default reconnect delay 3448 * is being used, change to a shorter reconnect delay for FC. 3449 */ 3450 if (opts->max_reconnects != -1 && 3451 opts->reconnect_delay == NVMF_DEF_RECONNECT_DELAY && 3452 opts->reconnect_delay > NVME_FC_DEFAULT_RECONNECT_TMO) { 3453 ctrl_loss_tmo = opts->max_reconnects * opts->reconnect_delay; 3454 opts->reconnect_delay = NVME_FC_DEFAULT_RECONNECT_TMO; 3455 opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo, 3456 opts->reconnect_delay); 3457 } 3458 3459 ctrl->ctrl.opts = opts; 3460 ctrl->ctrl.nr_reconnects = 0; 3461 if (lport->dev) 3462 ctrl->ctrl.numa_node = dev_to_node(lport->dev); 3463 else 3464 ctrl->ctrl.numa_node = NUMA_NO_NODE; 3465 INIT_LIST_HEAD(&ctrl->ctrl_list); 3466 ctrl->lport = lport; 3467 ctrl->rport = rport; 3468 ctrl->dev = lport->dev; 3469 ctrl->cnum = idx; 3470 ctrl->ioq_live = false; 3471 init_waitqueue_head(&ctrl->ioabort_wait); 3472 3473 get_device(ctrl->dev); 3474 kref_init(&ctrl->ref); 3475 3476 INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work); 3477 INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work); 3478 INIT_WORK(&ctrl->ioerr_work, nvme_fc_ctrl_ioerr_work); 3479 spin_lock_init(&ctrl->lock); 3480 3481 /* io queue count */ 3482 ctrl->ctrl.queue_count = min_t(unsigned int, 3483 opts->nr_io_queues, 3484 lport->ops->max_hw_queues); 3485 ctrl->ctrl.queue_count++; /* +1 for admin queue */ 3486 3487 ctrl->ctrl.sqsize = opts->queue_size - 1; 3488 ctrl->ctrl.kato = opts->kato; 3489 ctrl->ctrl.cntlid = 0xffff; 3490 3491 ret = -ENOMEM; 3492 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, 3493 sizeof(struct nvme_fc_queue), GFP_KERNEL); 3494 if (!ctrl->queues) 3495 goto out_free_ida; 3496 3497 nvme_fc_init_queue(ctrl, 0); 3498 3499 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set)); 3500 ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops; 3501 ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH; 3502 ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */ 3503 ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node; 3504 ctrl->admin_tag_set.cmd_size = 3505 struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv, 3506 ctrl->lport->ops->fcprqst_priv_sz); 3507 ctrl->admin_tag_set.driver_data = ctrl; 3508 ctrl->admin_tag_set.nr_hw_queues = 1; 3509 ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT; 3510 ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED; 3511 3512 ret = blk_mq_alloc_tag_set(&ctrl->admin_tag_set); 3513 if (ret) 3514 goto out_free_queues; 3515 ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set; 3516 3517 ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set); 3518 if (IS_ERR(ctrl->ctrl.fabrics_q)) { 3519 ret = PTR_ERR(ctrl->ctrl.fabrics_q); 3520 goto out_free_admin_tag_set; 3521 } 3522 3523 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set); 3524 if (IS_ERR(ctrl->ctrl.admin_q)) { 3525 ret = PTR_ERR(ctrl->ctrl.admin_q); 3526 goto out_cleanup_fabrics_q; 3527 } 3528 3529 /* 3530 * Would have been nice to init io queues tag set as well. 3531 * However, we require interaction from the controller 3532 * for max io queue count before we can do so. 3533 * Defer this to the connect path. 3534 */ 3535 3536 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0); 3537 if (ret) 3538 goto out_cleanup_admin_q; 3539 3540 /* at this point, teardown path changes to ref counting on nvme ctrl */ 3541 3542 spin_lock_irqsave(&rport->lock, flags); 3543 list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list); 3544 spin_unlock_irqrestore(&rport->lock, flags); 3545 3546 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING) || 3547 !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { 3548 dev_err(ctrl->ctrl.device, 3549 "NVME-FC{%d}: failed to init ctrl state\n", ctrl->cnum); 3550 goto fail_ctrl; 3551 } 3552 3553 if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) { 3554 dev_err(ctrl->ctrl.device, 3555 "NVME-FC{%d}: failed to schedule initial connect\n", 3556 ctrl->cnum); 3557 goto fail_ctrl; 3558 } 3559 3560 flush_delayed_work(&ctrl->connect_work); 3561 3562 dev_info(ctrl->ctrl.device, 3563 "NVME-FC{%d}: new ctrl: NQN \"%s\"\n", 3564 ctrl->cnum, ctrl->ctrl.opts->subsysnqn); 3565 3566 return &ctrl->ctrl; 3567 3568fail_ctrl: 3569 nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING); 3570 cancel_work_sync(&ctrl->ioerr_work); 3571 cancel_work_sync(&ctrl->ctrl.reset_work); 3572 cancel_delayed_work_sync(&ctrl->connect_work); 3573 3574 ctrl->ctrl.opts = NULL; 3575 3576 /* initiate nvme ctrl ref counting teardown */ 3577 nvme_uninit_ctrl(&ctrl->ctrl); 3578 3579 /* Remove core ctrl ref. */ 3580 nvme_put_ctrl(&ctrl->ctrl); 3581 3582 /* as we're past the point where we transition to the ref 3583 * counting teardown path, if we return a bad pointer here, 3584 * the calling routine, thinking it's prior to the 3585 * transition, will do an rport put. Since the teardown 3586 * path also does a rport put, we do an extra get here to 3587 * so proper order/teardown happens. 3588 */ 3589 nvme_fc_rport_get(rport); 3590 3591 return ERR_PTR(-EIO); 3592 3593out_cleanup_admin_q: 3594 blk_cleanup_queue(ctrl->ctrl.admin_q); 3595out_cleanup_fabrics_q: 3596 blk_cleanup_queue(ctrl->ctrl.fabrics_q); 3597out_free_admin_tag_set: 3598 blk_mq_free_tag_set(&ctrl->admin_tag_set); 3599out_free_queues: 3600 kfree(ctrl->queues); 3601out_free_ida: 3602 put_device(ctrl->dev); 3603 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum); 3604out_free_ctrl: 3605 kfree(ctrl); 3606out_fail: 3607 /* exit via here doesn't follow ctlr ref points */ 3608 return ERR_PTR(ret); 3609} 3610 3611 3612struct nvmet_fc_traddr { 3613 u64 nn; 3614 u64 pn; 3615}; 3616 3617static int 3618__nvme_fc_parse_u64(substring_t *sstr, u64 *val) 3619{ 3620 u64 token64; 3621 3622 if (match_u64(sstr, &token64)) 3623 return -EINVAL; 3624 *val = token64; 3625 3626 return 0; 3627} 3628 3629/* 3630 * This routine validates and extracts the WWN's from the TRADDR string. 3631 * As kernel parsers need the 0x to determine number base, universally 3632 * build string to parse with 0x prefix before parsing name strings. 3633 */ 3634static int 3635nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen) 3636{ 3637 char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1]; 3638 substring_t wwn = { name, &name[sizeof(name)-1] }; 3639 int nnoffset, pnoffset; 3640 3641 /* validate if string is one of the 2 allowed formats */ 3642 if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH && 3643 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) && 3644 !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET], 3645 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) { 3646 nnoffset = NVME_FC_TRADDR_OXNNLEN; 3647 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET + 3648 NVME_FC_TRADDR_OXNNLEN; 3649 } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH && 3650 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) && 3651 !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET], 3652 "pn-", NVME_FC_TRADDR_NNLEN))) { 3653 nnoffset = NVME_FC_TRADDR_NNLEN; 3654 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN; 3655 } else 3656 goto out_einval; 3657 3658 name[0] = '0'; 3659 name[1] = 'x'; 3660 name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0; 3661 3662 memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN); 3663 if (__nvme_fc_parse_u64(&wwn, &traddr->nn)) 3664 goto out_einval; 3665 3666 memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN); 3667 if (__nvme_fc_parse_u64(&wwn, &traddr->pn)) 3668 goto out_einval; 3669 3670 return 0; 3671 3672out_einval: 3673 pr_warn("%s: bad traddr string\n", __func__); 3674 return -EINVAL; 3675} 3676 3677static struct nvme_ctrl * 3678nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts) 3679{ 3680 struct nvme_fc_lport *lport; 3681 struct nvme_fc_rport *rport; 3682 struct nvme_ctrl *ctrl; 3683 struct nvmet_fc_traddr laddr = { 0L, 0L }; 3684 struct nvmet_fc_traddr raddr = { 0L, 0L }; 3685 unsigned long flags; 3686 int ret; 3687 3688 ret = nvme_fc_parse_traddr(&raddr, opts->traddr, NVMF_TRADDR_SIZE); 3689 if (ret || !raddr.nn || !raddr.pn) 3690 return ERR_PTR(-EINVAL); 3691 3692 ret = nvme_fc_parse_traddr(&laddr, opts->host_traddr, NVMF_TRADDR_SIZE); 3693 if (ret || !laddr.nn || !laddr.pn) 3694 return ERR_PTR(-EINVAL); 3695 3696 /* find the host and remote ports to connect together */ 3697 spin_lock_irqsave(&nvme_fc_lock, flags); 3698 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { 3699 if (lport->localport.node_name != laddr.nn || 3700 lport->localport.port_name != laddr.pn || 3701 lport->localport.port_state != FC_OBJSTATE_ONLINE) 3702 continue; 3703 3704 list_for_each_entry(rport, &lport->endp_list, endp_list) { 3705 if (rport->remoteport.node_name != raddr.nn || 3706 rport->remoteport.port_name != raddr.pn || 3707 rport->remoteport.port_state != FC_OBJSTATE_ONLINE) 3708 continue; 3709 3710 /* if fail to get reference fall through. Will error */ 3711 if (!nvme_fc_rport_get(rport)) 3712 break; 3713 3714 spin_unlock_irqrestore(&nvme_fc_lock, flags); 3715 3716 ctrl = nvme_fc_init_ctrl(dev, opts, lport, rport); 3717 if (IS_ERR(ctrl)) 3718 nvme_fc_rport_put(rport); 3719 return ctrl; 3720 } 3721 } 3722 spin_unlock_irqrestore(&nvme_fc_lock, flags); 3723 3724 pr_warn("%s: %s - %s combination not found\n", 3725 __func__, opts->traddr, opts->host_traddr); 3726 return ERR_PTR(-ENOENT); 3727} 3728 3729 3730static struct nvmf_transport_ops nvme_fc_transport = { 3731 .name = "fc", 3732 .module = THIS_MODULE, 3733 .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR, 3734 .allowed_opts = NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO, 3735 .create_ctrl = nvme_fc_create_ctrl, 3736}; 3737 3738/* Arbitrary successive failures max. With lots of subsystems could be high */ 3739#define DISCOVERY_MAX_FAIL 20 3740 3741static ssize_t nvme_fc_nvme_discovery_store(struct device *dev, 3742 struct device_attribute *attr, const char *buf, size_t count) 3743{ 3744 unsigned long flags; 3745 LIST_HEAD(local_disc_list); 3746 struct nvme_fc_lport *lport; 3747 struct nvme_fc_rport *rport; 3748 int failcnt = 0; 3749 3750 spin_lock_irqsave(&nvme_fc_lock, flags); 3751restart: 3752 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { 3753 list_for_each_entry(rport, &lport->endp_list, endp_list) { 3754 if (!nvme_fc_lport_get(lport)) 3755 continue; 3756 if (!nvme_fc_rport_get(rport)) { 3757 /* 3758 * This is a temporary condition. Upon restart 3759 * this rport will be gone from the list. 3760 * 3761 * Revert the lport put and retry. Anything 3762 * added to the list already will be skipped (as 3763 * they are no longer list_empty). Loops should 3764 * resume at rports that were not yet seen. 3765 */ 3766 nvme_fc_lport_put(lport); 3767 3768 if (failcnt++ < DISCOVERY_MAX_FAIL) 3769 goto restart; 3770 3771 pr_err("nvme_discovery: too many reference " 3772 "failures\n"); 3773 goto process_local_list; 3774 } 3775 if (list_empty(&rport->disc_list)) 3776 list_add_tail(&rport->disc_list, 3777 &local_disc_list); 3778 } 3779 } 3780 3781process_local_list: 3782 while (!list_empty(&local_disc_list)) { 3783 rport = list_first_entry(&local_disc_list, 3784 struct nvme_fc_rport, disc_list); 3785 list_del_init(&rport->disc_list); 3786 spin_unlock_irqrestore(&nvme_fc_lock, flags); 3787 3788 lport = rport->lport; 3789 /* signal discovery. Won't hurt if it repeats */ 3790 nvme_fc_signal_discovery_scan(lport, rport); 3791 nvme_fc_rport_put(rport); 3792 nvme_fc_lport_put(lport); 3793 3794 spin_lock_irqsave(&nvme_fc_lock, flags); 3795 } 3796 spin_unlock_irqrestore(&nvme_fc_lock, flags); 3797 3798 return count; 3799} 3800static DEVICE_ATTR(nvme_discovery, 0200, NULL, nvme_fc_nvme_discovery_store); 3801 3802static struct attribute *nvme_fc_attrs[] = { 3803 &dev_attr_nvme_discovery.attr, 3804 NULL 3805}; 3806 3807static struct attribute_group nvme_fc_attr_group = { 3808 .attrs = nvme_fc_attrs, 3809}; 3810 3811static const struct attribute_group *nvme_fc_attr_groups[] = { 3812 &nvme_fc_attr_group, 3813 NULL 3814}; 3815 3816static struct class fc_class = { 3817 .name = "fc", 3818 .dev_groups = nvme_fc_attr_groups, 3819 .owner = THIS_MODULE, 3820}; 3821 3822static int __init nvme_fc_init_module(void) 3823{ 3824 int ret; 3825 3826 nvme_fc_wq = alloc_workqueue("nvme_fc_wq", WQ_MEM_RECLAIM, 0); 3827 if (!nvme_fc_wq) 3828 return -ENOMEM; 3829 3830 /* 3831 * NOTE: 3832 * It is expected that in the future the kernel will combine 3833 * the FC-isms that are currently under scsi and now being 3834 * added to by NVME into a new standalone FC class. The SCSI 3835 * and NVME protocols and their devices would be under this 3836 * new FC class. 3837 * 3838 * As we need something to post FC-specific udev events to, 3839 * specifically for nvme probe events, start by creating the 3840 * new device class. When the new standalone FC class is 3841 * put in place, this code will move to a more generic 3842 * location for the class. 3843 */ 3844 ret = class_register(&fc_class); 3845 if (ret) { 3846 pr_err("couldn't register class fc\n"); 3847 goto out_destroy_wq; 3848 } 3849 3850 /* 3851 * Create a device for the FC-centric udev events 3852 */ 3853 fc_udev_device = device_create(&fc_class, NULL, MKDEV(0, 0), NULL, 3854 "fc_udev_device"); 3855 if (IS_ERR(fc_udev_device)) { 3856 pr_err("couldn't create fc_udev device!\n"); 3857 ret = PTR_ERR(fc_udev_device); 3858 goto out_destroy_class; 3859 } 3860 3861 ret = nvmf_register_transport(&nvme_fc_transport); 3862 if (ret) 3863 goto out_destroy_device; 3864 3865 return 0; 3866 3867out_destroy_device: 3868 device_destroy(&fc_class, MKDEV(0, 0)); 3869out_destroy_class: 3870 class_unregister(&fc_class); 3871out_destroy_wq: 3872 destroy_workqueue(nvme_fc_wq); 3873 3874 return ret; 3875} 3876 3877static void 3878nvme_fc_delete_controllers(struct nvme_fc_rport *rport) 3879{ 3880 struct nvme_fc_ctrl *ctrl; 3881 3882 spin_lock(&rport->lock); 3883 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { 3884 dev_warn(ctrl->ctrl.device, 3885 "NVME-FC{%d}: transport unloading: deleting ctrl\n", 3886 ctrl->cnum); 3887 nvme_delete_ctrl(&ctrl->ctrl); 3888 } 3889 spin_unlock(&rport->lock); 3890} 3891 3892static void 3893nvme_fc_cleanup_for_unload(void) 3894{ 3895 struct nvme_fc_lport *lport; 3896 struct nvme_fc_rport *rport; 3897 3898 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { 3899 list_for_each_entry(rport, &lport->endp_list, endp_list) { 3900 nvme_fc_delete_controllers(rport); 3901 } 3902 } 3903} 3904 3905static void __exit nvme_fc_exit_module(void) 3906{ 3907 unsigned long flags; 3908 bool need_cleanup = false; 3909 3910 spin_lock_irqsave(&nvme_fc_lock, flags); 3911 nvme_fc_waiting_to_unload = true; 3912 if (!list_empty(&nvme_fc_lport_list)) { 3913 need_cleanup = true; 3914 nvme_fc_cleanup_for_unload(); 3915 } 3916 spin_unlock_irqrestore(&nvme_fc_lock, flags); 3917 if (need_cleanup) { 3918 pr_info("%s: waiting for ctlr deletes\n", __func__); 3919 wait_for_completion(&nvme_fc_unload_proceed); 3920 pr_info("%s: ctrl deletes complete\n", __func__); 3921 } 3922 3923 nvmf_unregister_transport(&nvme_fc_transport); 3924 3925 ida_destroy(&nvme_fc_local_port_cnt); 3926 ida_destroy(&nvme_fc_ctrl_cnt); 3927 3928 device_destroy(&fc_class, MKDEV(0, 0)); 3929 class_unregister(&fc_class); 3930 destroy_workqueue(nvme_fc_wq); 3931} 3932 3933module_init(nvme_fc_init_module); 3934module_exit(nvme_fc_exit_module); 3935 3936MODULE_LICENSE("GPL v2"); 3937