1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Copyright (c) 2016 Avago Technologies. All rights reserved. 4 */ 5#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 6#include <linux/module.h> 7#include <linux/parser.h> 8#include <uapi/scsi/fc/fc_fs.h> 9 10#include "../host/nvme.h" 11#include "../target/nvmet.h" 12#include <linux/nvme-fc-driver.h> 13#include <linux/nvme-fc.h> 14 15 16enum { 17 NVMF_OPT_ERR = 0, 18 NVMF_OPT_WWNN = 1 << 0, 19 NVMF_OPT_WWPN = 1 << 1, 20 NVMF_OPT_ROLES = 1 << 2, 21 NVMF_OPT_FCADDR = 1 << 3, 22 NVMF_OPT_LPWWNN = 1 << 4, 23 NVMF_OPT_LPWWPN = 1 << 5, 24}; 25 26struct fcloop_ctrl_options { 27 int mask; 28 u64 wwnn; 29 u64 wwpn; 30 u32 roles; 31 u32 fcaddr; 32 u64 lpwwnn; 33 u64 lpwwpn; 34}; 35 36static const match_table_t opt_tokens = { 37 { NVMF_OPT_WWNN, "wwnn=%s" }, 38 { NVMF_OPT_WWPN, "wwpn=%s" }, 39 { NVMF_OPT_ROLES, "roles=%d" }, 40 { NVMF_OPT_FCADDR, "fcaddr=%x" }, 41 { NVMF_OPT_LPWWNN, "lpwwnn=%s" }, 42 { NVMF_OPT_LPWWPN, "lpwwpn=%s" }, 43 { NVMF_OPT_ERR, NULL } 44}; 45 46static int fcloop_verify_addr(substring_t *s) 47{ 48 size_t blen = s->to - s->from + 1; 49 50 if (strnlen(s->from, blen) != NVME_FC_TRADDR_HEXNAMELEN + 2 || 51 strncmp(s->from, "0x", 2)) 52 return -EINVAL; 53 54 return 0; 55} 56 57static int 58fcloop_parse_options(struct fcloop_ctrl_options *opts, 59 const char *buf) 60{ 61 substring_t args[MAX_OPT_ARGS]; 62 char *options, *o, *p; 63 int token, ret = 0; 64 u64 token64; 65 66 options = o = kstrdup(buf, GFP_KERNEL); 67 if (!options) 68 return -ENOMEM; 69 70 while ((p = strsep(&o, ",\n")) != NULL) { 71 if (!*p) 72 continue; 73 74 token = match_token(p, opt_tokens, args); 75 opts->mask |= token; 76 switch (token) { 77 case NVMF_OPT_WWNN: 78 if (fcloop_verify_addr(args) || 79 match_u64(args, &token64)) { 80 ret = -EINVAL; 81 goto out_free_options; 82 } 83 opts->wwnn = token64; 84 break; 85 case NVMF_OPT_WWPN: 86 if (fcloop_verify_addr(args) || 87 match_u64(args, &token64)) { 88 ret = -EINVAL; 89 goto out_free_options; 90 } 91 opts->wwpn = token64; 92 break; 93 case NVMF_OPT_ROLES: 94 if (match_int(args, &token)) { 95 ret = -EINVAL; 96 goto out_free_options; 97 } 98 opts->roles = token; 99 break; 100 case NVMF_OPT_FCADDR: 101 if (match_hex(args, &token)) { 102 ret = -EINVAL; 103 goto out_free_options; 104 } 105 opts->fcaddr = token; 106 break; 107 case NVMF_OPT_LPWWNN: 108 if (fcloop_verify_addr(args) || 109 match_u64(args, &token64)) { 110 ret = -EINVAL; 111 goto out_free_options; 112 } 113 opts->lpwwnn = token64; 114 break; 115 case NVMF_OPT_LPWWPN: 116 if (fcloop_verify_addr(args) || 117 match_u64(args, &token64)) { 118 ret = -EINVAL; 119 goto out_free_options; 120 } 121 opts->lpwwpn = token64; 122 break; 123 default: 124 pr_warn("unknown parameter or missing value '%s'\n", p); 125 ret = -EINVAL; 126 goto out_free_options; 127 } 128 } 129 130out_free_options: 131 kfree(options); 132 return ret; 133} 134 135 136static int 137fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname, 138 const char *buf) 139{ 140 substring_t args[MAX_OPT_ARGS]; 141 char *options, *o, *p; 142 int token, ret = 0; 143 u64 token64; 144 145 *nname = -1; 146 *pname = -1; 147 148 options = o = kstrdup(buf, GFP_KERNEL); 149 if (!options) 150 return -ENOMEM; 151 152 while ((p = strsep(&o, ",\n")) != NULL) { 153 if (!*p) 154 continue; 155 156 token = match_token(p, opt_tokens, args); 157 switch (token) { 158 case NVMF_OPT_WWNN: 159 if (fcloop_verify_addr(args) || 160 match_u64(args, &token64)) { 161 ret = -EINVAL; 162 goto out_free_options; 163 } 164 *nname = token64; 165 break; 166 case NVMF_OPT_WWPN: 167 if (fcloop_verify_addr(args) || 168 match_u64(args, &token64)) { 169 ret = -EINVAL; 170 goto out_free_options; 171 } 172 *pname = token64; 173 break; 174 default: 175 pr_warn("unknown parameter or missing value '%s'\n", p); 176 ret = -EINVAL; 177 goto out_free_options; 178 } 179 } 180 181out_free_options: 182 kfree(options); 183 184 if (!ret) { 185 if (*nname == -1) 186 return -EINVAL; 187 if (*pname == -1) 188 return -EINVAL; 189 } 190 191 return ret; 192} 193 194 195#define LPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN) 196 197#define RPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | \ 198 NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN) 199 200#define TGTPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN) 201 202 203static DEFINE_SPINLOCK(fcloop_lock); 204static LIST_HEAD(fcloop_lports); 205static LIST_HEAD(fcloop_nports); 206 207struct fcloop_lport { 208 struct nvme_fc_local_port *localport; 209 struct list_head lport_list; 210 struct completion unreg_done; 211}; 212 213struct fcloop_lport_priv { 214 struct fcloop_lport *lport; 215}; 216 217struct fcloop_rport { 218 struct nvme_fc_remote_port *remoteport; 219 struct nvmet_fc_target_port *targetport; 220 struct fcloop_nport *nport; 221 struct fcloop_lport *lport; 222 spinlock_t lock; 223 struct list_head ls_list; 224 struct work_struct ls_work; 225}; 226 227struct fcloop_tport { 228 struct nvmet_fc_target_port *targetport; 229 struct nvme_fc_remote_port *remoteport; 230 struct fcloop_nport *nport; 231 struct fcloop_lport *lport; 232 spinlock_t lock; 233 struct list_head ls_list; 234 struct work_struct ls_work; 235}; 236 237struct fcloop_nport { 238 struct fcloop_rport *rport; 239 struct fcloop_tport *tport; 240 struct fcloop_lport *lport; 241 struct list_head nport_list; 242 struct kref ref; 243 u64 node_name; 244 u64 port_name; 245 u32 port_role; 246 u32 port_id; 247}; 248 249struct fcloop_lsreq { 250 struct nvmefc_ls_req *lsreq; 251 struct nvmefc_ls_rsp ls_rsp; 252 int lsdir; /* H2T or T2H */ 253 int status; 254 struct list_head ls_list; /* fcloop_rport->ls_list */ 255}; 256 257struct fcloop_rscn { 258 struct fcloop_tport *tport; 259 struct work_struct work; 260}; 261 262enum { 263 INI_IO_START = 0, 264 INI_IO_ACTIVE = 1, 265 INI_IO_ABORTED = 2, 266 INI_IO_COMPLETED = 3, 267}; 268 269struct fcloop_fcpreq { 270 struct fcloop_tport *tport; 271 struct nvmefc_fcp_req *fcpreq; 272 spinlock_t reqlock; 273 u16 status; 274 u32 inistate; 275 bool active; 276 bool aborted; 277 struct kref ref; 278 struct work_struct fcp_rcv_work; 279 struct work_struct abort_rcv_work; 280 struct work_struct tio_done_work; 281 struct nvmefc_tgt_fcp_req tgt_fcp_req; 282}; 283 284struct fcloop_ini_fcpreq { 285 struct nvmefc_fcp_req *fcpreq; 286 struct fcloop_fcpreq *tfcp_req; 287 spinlock_t inilock; 288}; 289 290static inline struct fcloop_lsreq * 291ls_rsp_to_lsreq(struct nvmefc_ls_rsp *lsrsp) 292{ 293 return container_of(lsrsp, struct fcloop_lsreq, ls_rsp); 294} 295 296static inline struct fcloop_fcpreq * 297tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req *tgt_fcpreq) 298{ 299 return container_of(tgt_fcpreq, struct fcloop_fcpreq, tgt_fcp_req); 300} 301 302 303static int 304fcloop_create_queue(struct nvme_fc_local_port *localport, 305 unsigned int qidx, u16 qsize, 306 void **handle) 307{ 308 *handle = localport; 309 return 0; 310} 311 312static void 313fcloop_delete_queue(struct nvme_fc_local_port *localport, 314 unsigned int idx, void *handle) 315{ 316} 317 318static void 319fcloop_rport_lsrqst_work(struct work_struct *work) 320{ 321 struct fcloop_rport *rport = 322 container_of(work, struct fcloop_rport, ls_work); 323 struct fcloop_lsreq *tls_req; 324 325 spin_lock(&rport->lock); 326 for (;;) { 327 tls_req = list_first_entry_or_null(&rport->ls_list, 328 struct fcloop_lsreq, ls_list); 329 if (!tls_req) 330 break; 331 332 list_del(&tls_req->ls_list); 333 spin_unlock(&rport->lock); 334 335 tls_req->lsreq->done(tls_req->lsreq, tls_req->status); 336 /* 337 * callee may free memory containing tls_req. 338 * do not reference lsreq after this. 339 */ 340 341 spin_lock(&rport->lock); 342 } 343 spin_unlock(&rport->lock); 344} 345 346static int 347fcloop_h2t_ls_req(struct nvme_fc_local_port *localport, 348 struct nvme_fc_remote_port *remoteport, 349 struct nvmefc_ls_req *lsreq) 350{ 351 struct fcloop_lsreq *tls_req = lsreq->private; 352 struct fcloop_rport *rport = remoteport->private; 353 int ret = 0; 354 355 tls_req->lsreq = lsreq; 356 INIT_LIST_HEAD(&tls_req->ls_list); 357 358 if (!rport->targetport) { 359 tls_req->status = -ECONNREFUSED; 360 spin_lock(&rport->lock); 361 list_add_tail(&rport->ls_list, &tls_req->ls_list); 362 spin_unlock(&rport->lock); 363 schedule_work(&rport->ls_work); 364 return ret; 365 } 366 367 tls_req->status = 0; 368 ret = nvmet_fc_rcv_ls_req(rport->targetport, rport, 369 &tls_req->ls_rsp, 370 lsreq->rqstaddr, lsreq->rqstlen); 371 372 return ret; 373} 374 375static int 376fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport, 377 struct nvmefc_ls_rsp *lsrsp) 378{ 379 struct fcloop_lsreq *tls_req = ls_rsp_to_lsreq(lsrsp); 380 struct nvmefc_ls_req *lsreq = tls_req->lsreq; 381 struct fcloop_tport *tport = targetport->private; 382 struct nvme_fc_remote_port *remoteport = tport->remoteport; 383 struct fcloop_rport *rport; 384 385 memcpy(lsreq->rspaddr, lsrsp->rspbuf, 386 ((lsreq->rsplen < lsrsp->rsplen) ? 387 lsreq->rsplen : lsrsp->rsplen)); 388 389 lsrsp->done(lsrsp); 390 391 if (remoteport) { 392 rport = remoteport->private; 393 spin_lock(&rport->lock); 394 list_add_tail(&rport->ls_list, &tls_req->ls_list); 395 spin_unlock(&rport->lock); 396 schedule_work(&rport->ls_work); 397 } 398 399 return 0; 400} 401 402static void 403fcloop_tport_lsrqst_work(struct work_struct *work) 404{ 405 struct fcloop_tport *tport = 406 container_of(work, struct fcloop_tport, ls_work); 407 struct fcloop_lsreq *tls_req; 408 409 spin_lock(&tport->lock); 410 for (;;) { 411 tls_req = list_first_entry_or_null(&tport->ls_list, 412 struct fcloop_lsreq, ls_list); 413 if (!tls_req) 414 break; 415 416 list_del(&tls_req->ls_list); 417 spin_unlock(&tport->lock); 418 419 tls_req->lsreq->done(tls_req->lsreq, tls_req->status); 420 /* 421 * callee may free memory containing tls_req. 422 * do not reference lsreq after this. 423 */ 424 425 spin_lock(&tport->lock); 426 } 427 spin_unlock(&tport->lock); 428} 429 430static int 431fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle, 432 struct nvmefc_ls_req *lsreq) 433{ 434 struct fcloop_lsreq *tls_req = lsreq->private; 435 struct fcloop_tport *tport = targetport->private; 436 int ret = 0; 437 438 /* 439 * hosthandle should be the dst.rport value. 440 * hosthandle ignored as fcloop currently is 441 * 1:1 tgtport vs remoteport 442 */ 443 tls_req->lsreq = lsreq; 444 INIT_LIST_HEAD(&tls_req->ls_list); 445 446 if (!tport->remoteport) { 447 tls_req->status = -ECONNREFUSED; 448 spin_lock(&tport->lock); 449 list_add_tail(&tport->ls_list, &tls_req->ls_list); 450 spin_unlock(&tport->lock); 451 schedule_work(&tport->ls_work); 452 return ret; 453 } 454 455 tls_req->status = 0; 456 ret = nvme_fc_rcv_ls_req(tport->remoteport, &tls_req->ls_rsp, 457 lsreq->rqstaddr, lsreq->rqstlen); 458 459 return ret; 460} 461 462static int 463fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport, 464 struct nvme_fc_remote_port *remoteport, 465 struct nvmefc_ls_rsp *lsrsp) 466{ 467 struct fcloop_lsreq *tls_req = ls_rsp_to_lsreq(lsrsp); 468 struct nvmefc_ls_req *lsreq = tls_req->lsreq; 469 struct fcloop_rport *rport = remoteport->private; 470 struct nvmet_fc_target_port *targetport = rport->targetport; 471 struct fcloop_tport *tport; 472 473 memcpy(lsreq->rspaddr, lsrsp->rspbuf, 474 ((lsreq->rsplen < lsrsp->rsplen) ? 475 lsreq->rsplen : lsrsp->rsplen)); 476 lsrsp->done(lsrsp); 477 478 if (targetport) { 479 tport = targetport->private; 480 spin_lock(&tport->lock); 481 list_add_tail(&tport->ls_list, &tls_req->ls_list); 482 spin_unlock(&tport->lock); 483 schedule_work(&tport->ls_work); 484 } 485 486 return 0; 487} 488 489static void 490fcloop_t2h_host_release(void *hosthandle) 491{ 492 /* host handle ignored for now */ 493} 494 495/* 496 * Simulate reception of RSCN and converting it to a initiator transport 497 * call to rescan a remote port. 498 */ 499static void 500fcloop_tgt_rscn_work(struct work_struct *work) 501{ 502 struct fcloop_rscn *tgt_rscn = 503 container_of(work, struct fcloop_rscn, work); 504 struct fcloop_tport *tport = tgt_rscn->tport; 505 506 if (tport->remoteport) 507 nvme_fc_rescan_remoteport(tport->remoteport); 508 kfree(tgt_rscn); 509} 510 511static void 512fcloop_tgt_discovery_evt(struct nvmet_fc_target_port *tgtport) 513{ 514 struct fcloop_rscn *tgt_rscn; 515 516 tgt_rscn = kzalloc(sizeof(*tgt_rscn), GFP_KERNEL); 517 if (!tgt_rscn) 518 return; 519 520 tgt_rscn->tport = tgtport->private; 521 INIT_WORK(&tgt_rscn->work, fcloop_tgt_rscn_work); 522 523 schedule_work(&tgt_rscn->work); 524} 525 526static void 527fcloop_tfcp_req_free(struct kref *ref) 528{ 529 struct fcloop_fcpreq *tfcp_req = 530 container_of(ref, struct fcloop_fcpreq, ref); 531 532 kfree(tfcp_req); 533} 534 535static void 536fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req) 537{ 538 kref_put(&tfcp_req->ref, fcloop_tfcp_req_free); 539} 540 541static int 542fcloop_tfcp_req_get(struct fcloop_fcpreq *tfcp_req) 543{ 544 return kref_get_unless_zero(&tfcp_req->ref); 545} 546 547static void 548fcloop_call_host_done(struct nvmefc_fcp_req *fcpreq, 549 struct fcloop_fcpreq *tfcp_req, int status) 550{ 551 struct fcloop_ini_fcpreq *inireq = NULL; 552 553 if (fcpreq) { 554 inireq = fcpreq->private; 555 spin_lock(&inireq->inilock); 556 inireq->tfcp_req = NULL; 557 spin_unlock(&inireq->inilock); 558 559 fcpreq->status = status; 560 fcpreq->done(fcpreq); 561 } 562 563 /* release original io reference on tgt struct */ 564 fcloop_tfcp_req_put(tfcp_req); 565} 566 567static void 568fcloop_fcp_recv_work(struct work_struct *work) 569{ 570 struct fcloop_fcpreq *tfcp_req = 571 container_of(work, struct fcloop_fcpreq, fcp_rcv_work); 572 struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq; 573 unsigned long flags; 574 int ret = 0; 575 bool aborted = false; 576 577 spin_lock_irqsave(&tfcp_req->reqlock, flags); 578 switch (tfcp_req->inistate) { 579 case INI_IO_START: 580 tfcp_req->inistate = INI_IO_ACTIVE; 581 break; 582 case INI_IO_ABORTED: 583 aborted = true; 584 break; 585 default: 586 spin_unlock_irqrestore(&tfcp_req->reqlock, flags); 587 WARN_ON(1); 588 return; 589 } 590 spin_unlock_irqrestore(&tfcp_req->reqlock, flags); 591 592 if (unlikely(aborted)) 593 ret = -ECANCELED; 594 else 595 ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport, 596 &tfcp_req->tgt_fcp_req, 597 fcpreq->cmdaddr, fcpreq->cmdlen); 598 if (ret) 599 fcloop_call_host_done(fcpreq, tfcp_req, ret); 600 601 return; 602} 603 604static void 605fcloop_fcp_abort_recv_work(struct work_struct *work) 606{ 607 struct fcloop_fcpreq *tfcp_req = 608 container_of(work, struct fcloop_fcpreq, abort_rcv_work); 609 struct nvmefc_fcp_req *fcpreq; 610 bool completed = false; 611 unsigned long flags; 612 613 spin_lock_irqsave(&tfcp_req->reqlock, flags); 614 fcpreq = tfcp_req->fcpreq; 615 switch (tfcp_req->inistate) { 616 case INI_IO_ABORTED: 617 break; 618 case INI_IO_COMPLETED: 619 completed = true; 620 break; 621 default: 622 spin_unlock_irqrestore(&tfcp_req->reqlock, flags); 623 WARN_ON(1); 624 return; 625 } 626 spin_unlock_irqrestore(&tfcp_req->reqlock, flags); 627 628 if (unlikely(completed)) { 629 /* remove reference taken in original abort downcall */ 630 fcloop_tfcp_req_put(tfcp_req); 631 return; 632 } 633 634 if (tfcp_req->tport->targetport) 635 nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport, 636 &tfcp_req->tgt_fcp_req); 637 638 spin_lock_irqsave(&tfcp_req->reqlock, flags); 639 tfcp_req->fcpreq = NULL; 640 spin_unlock_irqrestore(&tfcp_req->reqlock, flags); 641 642 fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED); 643 /* call_host_done releases reference for abort downcall */ 644} 645 646/* 647 * FCP IO operation done by target completion. 648 * call back up initiator "done" flows. 649 */ 650static void 651fcloop_tgt_fcprqst_done_work(struct work_struct *work) 652{ 653 struct fcloop_fcpreq *tfcp_req = 654 container_of(work, struct fcloop_fcpreq, tio_done_work); 655 struct nvmefc_fcp_req *fcpreq; 656 unsigned long flags; 657 658 spin_lock_irqsave(&tfcp_req->reqlock, flags); 659 fcpreq = tfcp_req->fcpreq; 660 tfcp_req->inistate = INI_IO_COMPLETED; 661 spin_unlock_irqrestore(&tfcp_req->reqlock, flags); 662 663 fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status); 664} 665 666 667static int 668fcloop_fcp_req(struct nvme_fc_local_port *localport, 669 struct nvme_fc_remote_port *remoteport, 670 void *hw_queue_handle, 671 struct nvmefc_fcp_req *fcpreq) 672{ 673 struct fcloop_rport *rport = remoteport->private; 674 struct fcloop_ini_fcpreq *inireq = fcpreq->private; 675 struct fcloop_fcpreq *tfcp_req; 676 677 if (!rport->targetport) 678 return -ECONNREFUSED; 679 680 tfcp_req = kzalloc(sizeof(*tfcp_req), GFP_ATOMIC); 681 if (!tfcp_req) 682 return -ENOMEM; 683 684 inireq->fcpreq = fcpreq; 685 inireq->tfcp_req = tfcp_req; 686 spin_lock_init(&inireq->inilock); 687 688 tfcp_req->fcpreq = fcpreq; 689 tfcp_req->tport = rport->targetport->private; 690 tfcp_req->inistate = INI_IO_START; 691 spin_lock_init(&tfcp_req->reqlock); 692 INIT_WORK(&tfcp_req->fcp_rcv_work, fcloop_fcp_recv_work); 693 INIT_WORK(&tfcp_req->abort_rcv_work, fcloop_fcp_abort_recv_work); 694 INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work); 695 kref_init(&tfcp_req->ref); 696 697 schedule_work(&tfcp_req->fcp_rcv_work); 698 699 return 0; 700} 701 702static void 703fcloop_fcp_copy_data(u8 op, struct scatterlist *data_sg, 704 struct scatterlist *io_sg, u32 offset, u32 length) 705{ 706 void *data_p, *io_p; 707 u32 data_len, io_len, tlen; 708 709 io_p = sg_virt(io_sg); 710 io_len = io_sg->length; 711 712 for ( ; offset; ) { 713 tlen = min_t(u32, offset, io_len); 714 offset -= tlen; 715 io_len -= tlen; 716 if (!io_len) { 717 io_sg = sg_next(io_sg); 718 io_p = sg_virt(io_sg); 719 io_len = io_sg->length; 720 } else 721 io_p += tlen; 722 } 723 724 data_p = sg_virt(data_sg); 725 data_len = data_sg->length; 726 727 for ( ; length; ) { 728 tlen = min_t(u32, io_len, data_len); 729 tlen = min_t(u32, tlen, length); 730 731 if (op == NVMET_FCOP_WRITEDATA) 732 memcpy(data_p, io_p, tlen); 733 else 734 memcpy(io_p, data_p, tlen); 735 736 length -= tlen; 737 738 io_len -= tlen; 739 if ((!io_len) && (length)) { 740 io_sg = sg_next(io_sg); 741 io_p = sg_virt(io_sg); 742 io_len = io_sg->length; 743 } else 744 io_p += tlen; 745 746 data_len -= tlen; 747 if ((!data_len) && (length)) { 748 data_sg = sg_next(data_sg); 749 data_p = sg_virt(data_sg); 750 data_len = data_sg->length; 751 } else 752 data_p += tlen; 753 } 754} 755 756static int 757fcloop_fcp_op(struct nvmet_fc_target_port *tgtport, 758 struct nvmefc_tgt_fcp_req *tgt_fcpreq) 759{ 760 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq); 761 struct nvmefc_fcp_req *fcpreq; 762 u32 rsplen = 0, xfrlen = 0; 763 int fcp_err = 0, active, aborted; 764 u8 op = tgt_fcpreq->op; 765 unsigned long flags; 766 767 spin_lock_irqsave(&tfcp_req->reqlock, flags); 768 fcpreq = tfcp_req->fcpreq; 769 active = tfcp_req->active; 770 aborted = tfcp_req->aborted; 771 tfcp_req->active = true; 772 spin_unlock_irqrestore(&tfcp_req->reqlock, flags); 773 774 if (unlikely(active)) 775 /* illegal - call while i/o active */ 776 return -EALREADY; 777 778 if (unlikely(aborted)) { 779 /* target transport has aborted i/o prior */ 780 spin_lock_irqsave(&tfcp_req->reqlock, flags); 781 tfcp_req->active = false; 782 spin_unlock_irqrestore(&tfcp_req->reqlock, flags); 783 tgt_fcpreq->transferred_length = 0; 784 tgt_fcpreq->fcp_error = -ECANCELED; 785 tgt_fcpreq->done(tgt_fcpreq); 786 return 0; 787 } 788 789 /* 790 * if fcpreq is NULL, the I/O has been aborted (from 791 * initiator side). For the target side, act as if all is well 792 * but don't actually move data. 793 */ 794 795 switch (op) { 796 case NVMET_FCOP_WRITEDATA: 797 xfrlen = tgt_fcpreq->transfer_length; 798 if (fcpreq) { 799 fcloop_fcp_copy_data(op, tgt_fcpreq->sg, 800 fcpreq->first_sgl, tgt_fcpreq->offset, 801 xfrlen); 802 fcpreq->transferred_length += xfrlen; 803 } 804 break; 805 806 case NVMET_FCOP_READDATA: 807 case NVMET_FCOP_READDATA_RSP: 808 xfrlen = tgt_fcpreq->transfer_length; 809 if (fcpreq) { 810 fcloop_fcp_copy_data(op, tgt_fcpreq->sg, 811 fcpreq->first_sgl, tgt_fcpreq->offset, 812 xfrlen); 813 fcpreq->transferred_length += xfrlen; 814 } 815 if (op == NVMET_FCOP_READDATA) 816 break; 817 818 /* Fall-Thru to RSP handling */ 819 fallthrough; 820 821 case NVMET_FCOP_RSP: 822 if (fcpreq) { 823 rsplen = ((fcpreq->rsplen < tgt_fcpreq->rsplen) ? 824 fcpreq->rsplen : tgt_fcpreq->rsplen); 825 memcpy(fcpreq->rspaddr, tgt_fcpreq->rspaddr, rsplen); 826 if (rsplen < tgt_fcpreq->rsplen) 827 fcp_err = -E2BIG; 828 fcpreq->rcv_rsplen = rsplen; 829 fcpreq->status = 0; 830 } 831 tfcp_req->status = 0; 832 break; 833 834 default: 835 fcp_err = -EINVAL; 836 break; 837 } 838 839 spin_lock_irqsave(&tfcp_req->reqlock, flags); 840 tfcp_req->active = false; 841 spin_unlock_irqrestore(&tfcp_req->reqlock, flags); 842 843 tgt_fcpreq->transferred_length = xfrlen; 844 tgt_fcpreq->fcp_error = fcp_err; 845 tgt_fcpreq->done(tgt_fcpreq); 846 847 return 0; 848} 849 850static void 851fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport, 852 struct nvmefc_tgt_fcp_req *tgt_fcpreq) 853{ 854 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq); 855 unsigned long flags; 856 857 /* 858 * mark aborted only in case there were 2 threads in transport 859 * (one doing io, other doing abort) and only kills ops posted 860 * after the abort request 861 */ 862 spin_lock_irqsave(&tfcp_req->reqlock, flags); 863 tfcp_req->aborted = true; 864 spin_unlock_irqrestore(&tfcp_req->reqlock, flags); 865 866 tfcp_req->status = NVME_SC_INTERNAL; 867 868 /* 869 * nothing more to do. If io wasn't active, the transport should 870 * immediately call the req_release. If it was active, the op 871 * will complete, and the lldd should call req_release. 872 */ 873} 874 875static void 876fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport, 877 struct nvmefc_tgt_fcp_req *tgt_fcpreq) 878{ 879 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq); 880 881 schedule_work(&tfcp_req->tio_done_work); 882} 883 884static void 885fcloop_h2t_ls_abort(struct nvme_fc_local_port *localport, 886 struct nvme_fc_remote_port *remoteport, 887 struct nvmefc_ls_req *lsreq) 888{ 889} 890 891static void 892fcloop_t2h_ls_abort(struct nvmet_fc_target_port *targetport, 893 void *hosthandle, struct nvmefc_ls_req *lsreq) 894{ 895} 896 897static void 898fcloop_fcp_abort(struct nvme_fc_local_port *localport, 899 struct nvme_fc_remote_port *remoteport, 900 void *hw_queue_handle, 901 struct nvmefc_fcp_req *fcpreq) 902{ 903 struct fcloop_ini_fcpreq *inireq = fcpreq->private; 904 struct fcloop_fcpreq *tfcp_req; 905 bool abortio = true; 906 unsigned long flags; 907 908 spin_lock(&inireq->inilock); 909 tfcp_req = inireq->tfcp_req; 910 if (tfcp_req) 911 fcloop_tfcp_req_get(tfcp_req); 912 spin_unlock(&inireq->inilock); 913 914 if (!tfcp_req) 915 /* abort has already been called */ 916 return; 917 918 /* break initiator/target relationship for io */ 919 spin_lock_irqsave(&tfcp_req->reqlock, flags); 920 switch (tfcp_req->inistate) { 921 case INI_IO_START: 922 case INI_IO_ACTIVE: 923 tfcp_req->inistate = INI_IO_ABORTED; 924 break; 925 case INI_IO_COMPLETED: 926 abortio = false; 927 break; 928 default: 929 spin_unlock_irqrestore(&tfcp_req->reqlock, flags); 930 WARN_ON(1); 931 return; 932 } 933 spin_unlock_irqrestore(&tfcp_req->reqlock, flags); 934 935 if (abortio) 936 /* leave the reference while the work item is scheduled */ 937 WARN_ON(!schedule_work(&tfcp_req->abort_rcv_work)); 938 else { 939 /* 940 * as the io has already had the done callback made, 941 * nothing more to do. So release the reference taken above 942 */ 943 fcloop_tfcp_req_put(tfcp_req); 944 } 945} 946 947static void 948fcloop_nport_free(struct kref *ref) 949{ 950 struct fcloop_nport *nport = 951 container_of(ref, struct fcloop_nport, ref); 952 unsigned long flags; 953 954 spin_lock_irqsave(&fcloop_lock, flags); 955 list_del(&nport->nport_list); 956 spin_unlock_irqrestore(&fcloop_lock, flags); 957 958 kfree(nport); 959} 960 961static void 962fcloop_nport_put(struct fcloop_nport *nport) 963{ 964 kref_put(&nport->ref, fcloop_nport_free); 965} 966 967static int 968fcloop_nport_get(struct fcloop_nport *nport) 969{ 970 return kref_get_unless_zero(&nport->ref); 971} 972 973static void 974fcloop_localport_delete(struct nvme_fc_local_port *localport) 975{ 976 struct fcloop_lport_priv *lport_priv = localport->private; 977 struct fcloop_lport *lport = lport_priv->lport; 978 979 /* release any threads waiting for the unreg to complete */ 980 complete(&lport->unreg_done); 981} 982 983static void 984fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport) 985{ 986 struct fcloop_rport *rport = remoteport->private; 987 988 flush_work(&rport->ls_work); 989 fcloop_nport_put(rport->nport); 990} 991 992static void 993fcloop_targetport_delete(struct nvmet_fc_target_port *targetport) 994{ 995 struct fcloop_tport *tport = targetport->private; 996 997 flush_work(&tport->ls_work); 998 fcloop_nport_put(tport->nport); 999} 1000 1001#define FCLOOP_HW_QUEUES 4 1002#define FCLOOP_SGL_SEGS 256 1003#define FCLOOP_DMABOUND_4G 0xFFFFFFFF 1004 1005static struct nvme_fc_port_template fctemplate = { 1006 .localport_delete = fcloop_localport_delete, 1007 .remoteport_delete = fcloop_remoteport_delete, 1008 .create_queue = fcloop_create_queue, 1009 .delete_queue = fcloop_delete_queue, 1010 .ls_req = fcloop_h2t_ls_req, 1011 .fcp_io = fcloop_fcp_req, 1012 .ls_abort = fcloop_h2t_ls_abort, 1013 .fcp_abort = fcloop_fcp_abort, 1014 .xmt_ls_rsp = fcloop_t2h_xmt_ls_rsp, 1015 .max_hw_queues = FCLOOP_HW_QUEUES, 1016 .max_sgl_segments = FCLOOP_SGL_SEGS, 1017 .max_dif_sgl_segments = FCLOOP_SGL_SEGS, 1018 .dma_boundary = FCLOOP_DMABOUND_4G, 1019 /* sizes of additional private data for data structures */ 1020 .local_priv_sz = sizeof(struct fcloop_lport_priv), 1021 .remote_priv_sz = sizeof(struct fcloop_rport), 1022 .lsrqst_priv_sz = sizeof(struct fcloop_lsreq), 1023 .fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq), 1024}; 1025 1026static struct nvmet_fc_target_template tgttemplate = { 1027 .targetport_delete = fcloop_targetport_delete, 1028 .xmt_ls_rsp = fcloop_h2t_xmt_ls_rsp, 1029 .fcp_op = fcloop_fcp_op, 1030 .fcp_abort = fcloop_tgt_fcp_abort, 1031 .fcp_req_release = fcloop_fcp_req_release, 1032 .discovery_event = fcloop_tgt_discovery_evt, 1033 .ls_req = fcloop_t2h_ls_req, 1034 .ls_abort = fcloop_t2h_ls_abort, 1035 .host_release = fcloop_t2h_host_release, 1036 .max_hw_queues = FCLOOP_HW_QUEUES, 1037 .max_sgl_segments = FCLOOP_SGL_SEGS, 1038 .max_dif_sgl_segments = FCLOOP_SGL_SEGS, 1039 .dma_boundary = FCLOOP_DMABOUND_4G, 1040 /* optional features */ 1041 .target_features = 0, 1042 /* sizes of additional private data for data structures */ 1043 .target_priv_sz = sizeof(struct fcloop_tport), 1044 .lsrqst_priv_sz = sizeof(struct fcloop_lsreq), 1045}; 1046 1047static ssize_t 1048fcloop_create_local_port(struct device *dev, struct device_attribute *attr, 1049 const char *buf, size_t count) 1050{ 1051 struct nvme_fc_port_info pinfo; 1052 struct fcloop_ctrl_options *opts; 1053 struct nvme_fc_local_port *localport; 1054 struct fcloop_lport *lport; 1055 struct fcloop_lport_priv *lport_priv; 1056 unsigned long flags; 1057 int ret = -ENOMEM; 1058 1059 lport = kzalloc(sizeof(*lport), GFP_KERNEL); 1060 if (!lport) 1061 return -ENOMEM; 1062 1063 opts = kzalloc(sizeof(*opts), GFP_KERNEL); 1064 if (!opts) 1065 goto out_free_lport; 1066 1067 ret = fcloop_parse_options(opts, buf); 1068 if (ret) 1069 goto out_free_opts; 1070 1071 /* everything there ? */ 1072 if ((opts->mask & LPORT_OPTS) != LPORT_OPTS) { 1073 ret = -EINVAL; 1074 goto out_free_opts; 1075 } 1076 1077 memset(&pinfo, 0, sizeof(pinfo)); 1078 pinfo.node_name = opts->wwnn; 1079 pinfo.port_name = opts->wwpn; 1080 pinfo.port_role = opts->roles; 1081 pinfo.port_id = opts->fcaddr; 1082 1083 ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport); 1084 if (!ret) { 1085 /* success */ 1086 lport_priv = localport->private; 1087 lport_priv->lport = lport; 1088 1089 lport->localport = localport; 1090 INIT_LIST_HEAD(&lport->lport_list); 1091 1092 spin_lock_irqsave(&fcloop_lock, flags); 1093 list_add_tail(&lport->lport_list, &fcloop_lports); 1094 spin_unlock_irqrestore(&fcloop_lock, flags); 1095 } 1096 1097out_free_opts: 1098 kfree(opts); 1099out_free_lport: 1100 /* free only if we're going to fail */ 1101 if (ret) 1102 kfree(lport); 1103 1104 return ret ? ret : count; 1105} 1106 1107 1108static void 1109__unlink_local_port(struct fcloop_lport *lport) 1110{ 1111 list_del(&lport->lport_list); 1112} 1113 1114static int 1115__wait_localport_unreg(struct fcloop_lport *lport) 1116{ 1117 int ret; 1118 1119 init_completion(&lport->unreg_done); 1120 1121 ret = nvme_fc_unregister_localport(lport->localport); 1122 1123 wait_for_completion(&lport->unreg_done); 1124 1125 kfree(lport); 1126 1127 return ret; 1128} 1129 1130 1131static ssize_t 1132fcloop_delete_local_port(struct device *dev, struct device_attribute *attr, 1133 const char *buf, size_t count) 1134{ 1135 struct fcloop_lport *tlport, *lport = NULL; 1136 u64 nodename, portname; 1137 unsigned long flags; 1138 int ret; 1139 1140 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf); 1141 if (ret) 1142 return ret; 1143 1144 spin_lock_irqsave(&fcloop_lock, flags); 1145 1146 list_for_each_entry(tlport, &fcloop_lports, lport_list) { 1147 if (tlport->localport->node_name == nodename && 1148 tlport->localport->port_name == portname) { 1149 lport = tlport; 1150 __unlink_local_port(lport); 1151 break; 1152 } 1153 } 1154 spin_unlock_irqrestore(&fcloop_lock, flags); 1155 1156 if (!lport) 1157 return -ENOENT; 1158 1159 ret = __wait_localport_unreg(lport); 1160 1161 return ret ? ret : count; 1162} 1163 1164static struct fcloop_nport * 1165fcloop_alloc_nport(const char *buf, size_t count, bool remoteport) 1166{ 1167 struct fcloop_nport *newnport, *nport = NULL; 1168 struct fcloop_lport *tmplport, *lport = NULL; 1169 struct fcloop_ctrl_options *opts; 1170 unsigned long flags; 1171 u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS; 1172 int ret; 1173 1174 opts = kzalloc(sizeof(*opts), GFP_KERNEL); 1175 if (!opts) 1176 return NULL; 1177 1178 ret = fcloop_parse_options(opts, buf); 1179 if (ret) 1180 goto out_free_opts; 1181 1182 /* everything there ? */ 1183 if ((opts->mask & opts_mask) != opts_mask) { 1184 ret = -EINVAL; 1185 goto out_free_opts; 1186 } 1187 1188 newnport = kzalloc(sizeof(*newnport), GFP_KERNEL); 1189 if (!newnport) 1190 goto out_free_opts; 1191 1192 INIT_LIST_HEAD(&newnport->nport_list); 1193 newnport->node_name = opts->wwnn; 1194 newnport->port_name = opts->wwpn; 1195 if (opts->mask & NVMF_OPT_ROLES) 1196 newnport->port_role = opts->roles; 1197 if (opts->mask & NVMF_OPT_FCADDR) 1198 newnport->port_id = opts->fcaddr; 1199 kref_init(&newnport->ref); 1200 1201 spin_lock_irqsave(&fcloop_lock, flags); 1202 1203 list_for_each_entry(tmplport, &fcloop_lports, lport_list) { 1204 if (tmplport->localport->node_name == opts->wwnn && 1205 tmplport->localport->port_name == opts->wwpn) 1206 goto out_invalid_opts; 1207 1208 if (tmplport->localport->node_name == opts->lpwwnn && 1209 tmplport->localport->port_name == opts->lpwwpn) 1210 lport = tmplport; 1211 } 1212 1213 if (remoteport) { 1214 if (!lport) 1215 goto out_invalid_opts; 1216 newnport->lport = lport; 1217 } 1218 1219 list_for_each_entry(nport, &fcloop_nports, nport_list) { 1220 if (nport->node_name == opts->wwnn && 1221 nport->port_name == opts->wwpn) { 1222 if ((remoteport && nport->rport) || 1223 (!remoteport && nport->tport)) { 1224 nport = NULL; 1225 goto out_invalid_opts; 1226 } 1227 1228 fcloop_nport_get(nport); 1229 1230 spin_unlock_irqrestore(&fcloop_lock, flags); 1231 1232 if (remoteport) 1233 nport->lport = lport; 1234 if (opts->mask & NVMF_OPT_ROLES) 1235 nport->port_role = opts->roles; 1236 if (opts->mask & NVMF_OPT_FCADDR) 1237 nport->port_id = opts->fcaddr; 1238 goto out_free_newnport; 1239 } 1240 } 1241 1242 list_add_tail(&newnport->nport_list, &fcloop_nports); 1243 1244 spin_unlock_irqrestore(&fcloop_lock, flags); 1245 1246 kfree(opts); 1247 return newnport; 1248 1249out_invalid_opts: 1250 spin_unlock_irqrestore(&fcloop_lock, flags); 1251out_free_newnport: 1252 kfree(newnport); 1253out_free_opts: 1254 kfree(opts); 1255 return nport; 1256} 1257 1258static ssize_t 1259fcloop_create_remote_port(struct device *dev, struct device_attribute *attr, 1260 const char *buf, size_t count) 1261{ 1262 struct nvme_fc_remote_port *remoteport; 1263 struct fcloop_nport *nport; 1264 struct fcloop_rport *rport; 1265 struct nvme_fc_port_info pinfo; 1266 int ret; 1267 1268 nport = fcloop_alloc_nport(buf, count, true); 1269 if (!nport) 1270 return -EIO; 1271 1272 memset(&pinfo, 0, sizeof(pinfo)); 1273 pinfo.node_name = nport->node_name; 1274 pinfo.port_name = nport->port_name; 1275 pinfo.port_role = nport->port_role; 1276 pinfo.port_id = nport->port_id; 1277 1278 ret = nvme_fc_register_remoteport(nport->lport->localport, 1279 &pinfo, &remoteport); 1280 if (ret || !remoteport) { 1281 fcloop_nport_put(nport); 1282 return ret; 1283 } 1284 1285 /* success */ 1286 rport = remoteport->private; 1287 rport->remoteport = remoteport; 1288 rport->targetport = (nport->tport) ? nport->tport->targetport : NULL; 1289 if (nport->tport) { 1290 nport->tport->remoteport = remoteport; 1291 nport->tport->lport = nport->lport; 1292 } 1293 rport->nport = nport; 1294 rport->lport = nport->lport; 1295 nport->rport = rport; 1296 spin_lock_init(&rport->lock); 1297 INIT_WORK(&rport->ls_work, fcloop_rport_lsrqst_work); 1298 INIT_LIST_HEAD(&rport->ls_list); 1299 1300 return count; 1301} 1302 1303 1304static struct fcloop_rport * 1305__unlink_remote_port(struct fcloop_nport *nport) 1306{ 1307 struct fcloop_rport *rport = nport->rport; 1308 1309 if (rport && nport->tport) 1310 nport->tport->remoteport = NULL; 1311 nport->rport = NULL; 1312 1313 return rport; 1314} 1315 1316static int 1317__remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport) 1318{ 1319 if (!rport) 1320 return -EALREADY; 1321 1322 return nvme_fc_unregister_remoteport(rport->remoteport); 1323} 1324 1325static ssize_t 1326fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr, 1327 const char *buf, size_t count) 1328{ 1329 struct fcloop_nport *nport = NULL, *tmpport; 1330 static struct fcloop_rport *rport; 1331 u64 nodename, portname; 1332 unsigned long flags; 1333 int ret; 1334 1335 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf); 1336 if (ret) 1337 return ret; 1338 1339 spin_lock_irqsave(&fcloop_lock, flags); 1340 1341 list_for_each_entry(tmpport, &fcloop_nports, nport_list) { 1342 if (tmpport->node_name == nodename && 1343 tmpport->port_name == portname && tmpport->rport) { 1344 nport = tmpport; 1345 rport = __unlink_remote_port(nport); 1346 break; 1347 } 1348 } 1349 1350 spin_unlock_irqrestore(&fcloop_lock, flags); 1351 1352 if (!nport) 1353 return -ENOENT; 1354 1355 ret = __remoteport_unreg(nport, rport); 1356 1357 return ret ? ret : count; 1358} 1359 1360static ssize_t 1361fcloop_create_target_port(struct device *dev, struct device_attribute *attr, 1362 const char *buf, size_t count) 1363{ 1364 struct nvmet_fc_target_port *targetport; 1365 struct fcloop_nport *nport; 1366 struct fcloop_tport *tport; 1367 struct nvmet_fc_port_info tinfo; 1368 int ret; 1369 1370 nport = fcloop_alloc_nport(buf, count, false); 1371 if (!nport) 1372 return -EIO; 1373 1374 tinfo.node_name = nport->node_name; 1375 tinfo.port_name = nport->port_name; 1376 tinfo.port_id = nport->port_id; 1377 1378 ret = nvmet_fc_register_targetport(&tinfo, &tgttemplate, NULL, 1379 &targetport); 1380 if (ret) { 1381 fcloop_nport_put(nport); 1382 return ret; 1383 } 1384 1385 /* success */ 1386 tport = targetport->private; 1387 tport->targetport = targetport; 1388 tport->remoteport = (nport->rport) ? nport->rport->remoteport : NULL; 1389 if (nport->rport) 1390 nport->rport->targetport = targetport; 1391 tport->nport = nport; 1392 tport->lport = nport->lport; 1393 nport->tport = tport; 1394 spin_lock_init(&tport->lock); 1395 INIT_WORK(&tport->ls_work, fcloop_tport_lsrqst_work); 1396 INIT_LIST_HEAD(&tport->ls_list); 1397 1398 return count; 1399} 1400 1401 1402static struct fcloop_tport * 1403__unlink_target_port(struct fcloop_nport *nport) 1404{ 1405 struct fcloop_tport *tport = nport->tport; 1406 1407 if (tport && nport->rport) 1408 nport->rport->targetport = NULL; 1409 nport->tport = NULL; 1410 1411 return tport; 1412} 1413 1414static int 1415__targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport) 1416{ 1417 if (!tport) 1418 return -EALREADY; 1419 1420 return nvmet_fc_unregister_targetport(tport->targetport); 1421} 1422 1423static ssize_t 1424fcloop_delete_target_port(struct device *dev, struct device_attribute *attr, 1425 const char *buf, size_t count) 1426{ 1427 struct fcloop_nport *nport = NULL, *tmpport; 1428 struct fcloop_tport *tport = NULL; 1429 u64 nodename, portname; 1430 unsigned long flags; 1431 int ret; 1432 1433 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf); 1434 if (ret) 1435 return ret; 1436 1437 spin_lock_irqsave(&fcloop_lock, flags); 1438 1439 list_for_each_entry(tmpport, &fcloop_nports, nport_list) { 1440 if (tmpport->node_name == nodename && 1441 tmpport->port_name == portname && tmpport->tport) { 1442 nport = tmpport; 1443 tport = __unlink_target_port(nport); 1444 break; 1445 } 1446 } 1447 1448 spin_unlock_irqrestore(&fcloop_lock, flags); 1449 1450 if (!nport) 1451 return -ENOENT; 1452 1453 ret = __targetport_unreg(nport, tport); 1454 1455 return ret ? ret : count; 1456} 1457 1458 1459static DEVICE_ATTR(add_local_port, 0200, NULL, fcloop_create_local_port); 1460static DEVICE_ATTR(del_local_port, 0200, NULL, fcloop_delete_local_port); 1461static DEVICE_ATTR(add_remote_port, 0200, NULL, fcloop_create_remote_port); 1462static DEVICE_ATTR(del_remote_port, 0200, NULL, fcloop_delete_remote_port); 1463static DEVICE_ATTR(add_target_port, 0200, NULL, fcloop_create_target_port); 1464static DEVICE_ATTR(del_target_port, 0200, NULL, fcloop_delete_target_port); 1465 1466static struct attribute *fcloop_dev_attrs[] = { 1467 &dev_attr_add_local_port.attr, 1468 &dev_attr_del_local_port.attr, 1469 &dev_attr_add_remote_port.attr, 1470 &dev_attr_del_remote_port.attr, 1471 &dev_attr_add_target_port.attr, 1472 &dev_attr_del_target_port.attr, 1473 NULL 1474}; 1475 1476static struct attribute_group fclopp_dev_attrs_group = { 1477 .attrs = fcloop_dev_attrs, 1478}; 1479 1480static const struct attribute_group *fcloop_dev_attr_groups[] = { 1481 &fclopp_dev_attrs_group, 1482 NULL, 1483}; 1484 1485static struct class *fcloop_class; 1486static struct device *fcloop_device; 1487 1488 1489static int __init fcloop_init(void) 1490{ 1491 int ret; 1492 1493 fcloop_class = class_create(THIS_MODULE, "fcloop"); 1494 if (IS_ERR(fcloop_class)) { 1495 pr_err("couldn't register class fcloop\n"); 1496 ret = PTR_ERR(fcloop_class); 1497 return ret; 1498 } 1499 1500 fcloop_device = device_create_with_groups( 1501 fcloop_class, NULL, MKDEV(0, 0), NULL, 1502 fcloop_dev_attr_groups, "ctl"); 1503 if (IS_ERR(fcloop_device)) { 1504 pr_err("couldn't create ctl device!\n"); 1505 ret = PTR_ERR(fcloop_device); 1506 goto out_destroy_class; 1507 } 1508 1509 get_device(fcloop_device); 1510 1511 return 0; 1512 1513out_destroy_class: 1514 class_destroy(fcloop_class); 1515 return ret; 1516} 1517 1518static void __exit fcloop_exit(void) 1519{ 1520 struct fcloop_lport *lport; 1521 struct fcloop_nport *nport; 1522 struct fcloop_tport *tport; 1523 struct fcloop_rport *rport; 1524 unsigned long flags; 1525 int ret; 1526 1527 spin_lock_irqsave(&fcloop_lock, flags); 1528 1529 for (;;) { 1530 nport = list_first_entry_or_null(&fcloop_nports, 1531 typeof(*nport), nport_list); 1532 if (!nport) 1533 break; 1534 1535 tport = __unlink_target_port(nport); 1536 rport = __unlink_remote_port(nport); 1537 1538 spin_unlock_irqrestore(&fcloop_lock, flags); 1539 1540 ret = __targetport_unreg(nport, tport); 1541 if (ret) 1542 pr_warn("%s: Failed deleting target port\n", __func__); 1543 1544 ret = __remoteport_unreg(nport, rport); 1545 if (ret) 1546 pr_warn("%s: Failed deleting remote port\n", __func__); 1547 1548 spin_lock_irqsave(&fcloop_lock, flags); 1549 } 1550 1551 for (;;) { 1552 lport = list_first_entry_or_null(&fcloop_lports, 1553 typeof(*lport), lport_list); 1554 if (!lport) 1555 break; 1556 1557 __unlink_local_port(lport); 1558 1559 spin_unlock_irqrestore(&fcloop_lock, flags); 1560 1561 ret = __wait_localport_unreg(lport); 1562 if (ret) 1563 pr_warn("%s: Failed deleting local port\n", __func__); 1564 1565 spin_lock_irqsave(&fcloop_lock, flags); 1566 } 1567 1568 spin_unlock_irqrestore(&fcloop_lock, flags); 1569 1570 put_device(fcloop_device); 1571 1572 device_destroy(fcloop_class, MKDEV(0, 0)); 1573 class_destroy(fcloop_class); 1574} 1575 1576module_init(fcloop_init); 1577module_exit(fcloop_exit); 1578 1579MODULE_LICENSE("GPL v2"); 1580