1// SPDX-License-Identifier: GPL-2.0-or-later 2/******************************************************************************* 3 * This file contains tcm implementation using v4 configfs fabric infrastructure 4 * for QLogic target mode HBAs 5 * 6 * (c) Copyright 2010-2013 Datera, Inc. 7 * 8 * Author: Nicholas A. Bellinger <nab@daterainc.com> 9 * 10 * tcm_qla2xxx_parse_wwn() and tcm_qla2xxx_format_wwn() contains code from 11 * the TCM_FC / Open-FCoE.org fabric module. 12 * 13 * Copyright (c) 2010 Cisco Systems, Inc 14 * 15 ****************************************************************************/ 16 17 18#include <linux/module.h> 19#include <linux/utsname.h> 20#include <linux/vmalloc.h> 21#include <linux/list.h> 22#include <linux/slab.h> 23#include <linux/types.h> 24#include <linux/string.h> 25#include <linux/configfs.h> 26#include <linux/ctype.h> 27#include <asm/unaligned.h> 28#include <scsi/scsi_host.h> 29#include <target/target_core_base.h> 30#include <target/target_core_fabric.h> 31 32#include "qla_def.h" 33#include "qla_target.h" 34#include "tcm_qla2xxx.h" 35 36static struct workqueue_struct *tcm_qla2xxx_free_wq; 37 38/* 39 * Parse WWN. 40 * If strict, we require lower-case hex and colon separators to be sure 41 * the name is the same as what would be generated by ft_format_wwn() 42 * so the name and wwn are mapped one-to-one. 43 */ 44static ssize_t tcm_qla2xxx_parse_wwn(const char *name, u64 *wwn, int strict) 45{ 46 const char *cp; 47 char c; 48 u32 nibble; 49 u32 byte = 0; 50 u32 pos = 0; 51 u32 err; 52 53 *wwn = 0; 54 for (cp = name; cp < &name[TCM_QLA2XXX_NAMELEN - 1]; cp++) { 55 c = *cp; 56 if (c == '\n' && cp[1] == '\0') 57 continue; 58 if (strict && pos++ == 2 && byte++ < 7) { 59 pos = 0; 60 if (c == ':') 61 continue; 62 err = 1; 63 goto fail; 64 } 65 if (c == '\0') { 66 err = 2; 67 if (strict && byte != 8) 68 goto fail; 69 return cp - name; 70 } 71 err = 3; 72 if (isdigit(c)) 73 nibble = c - '0'; 74 else if (isxdigit(c) && (islower(c) || !strict)) 75 nibble = tolower(c) - 'a' + 10; 76 else 77 goto fail; 78 *wwn = (*wwn << 4) | nibble; 79 } 80 err = 4; 81fail: 82 pr_debug("err %u len %zu pos %u byte %u\n", 83 err, cp - name, pos, byte); 84 return -1; 85} 86 87static ssize_t tcm_qla2xxx_format_wwn(char *buf, size_t len, u64 wwn) 88{ 89 u8 b[8]; 90 91 put_unaligned_be64(wwn, b); 92 return snprintf(buf, len, 93 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x", 94 b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]); 95} 96 97/* 98 * From drivers/scsi/scsi_transport_fc.c:fc_parse_wwn 99 */ 100static int tcm_qla2xxx_npiv_extract_wwn(const char *ns, u64 *nm) 101{ 102 unsigned int i, j; 103 u8 wwn[8]; 104 105 memset(wwn, 0, sizeof(wwn)); 106 107 /* Validate and store the new name */ 108 for (i = 0, j = 0; i < 16; i++) { 109 int value; 110 111 value = hex_to_bin(*ns++); 112 if (value >= 0) 113 j = (j << 4) | value; 114 else 115 return -EINVAL; 116 117 if (i % 2) { 118 wwn[i/2] = j & 0xff; 119 j = 0; 120 } 121 } 122 123 *nm = wwn_to_u64(wwn); 124 return 0; 125} 126 127/* 128 * This parsing logic follows drivers/scsi/scsi_transport_fc.c: 129 * store_fc_host_vport_create() 130 */ 131static int tcm_qla2xxx_npiv_parse_wwn( 132 const char *name, 133 size_t count, 134 u64 *wwpn, 135 u64 *wwnn) 136{ 137 unsigned int cnt = count; 138 int rc; 139 140 *wwpn = 0; 141 *wwnn = 0; 142 143 /* count may include a LF at end of string */ 144 if (name[cnt-1] == '\n' || name[cnt-1] == 0) 145 cnt--; 146 147 /* validate we have enough characters for WWPN */ 148 if ((cnt != (16+1+16)) || (name[16] != ':')) 149 return -EINVAL; 150 151 rc = tcm_qla2xxx_npiv_extract_wwn(&name[0], wwpn); 152 if (rc != 0) 153 return rc; 154 155 rc = tcm_qla2xxx_npiv_extract_wwn(&name[17], wwnn); 156 if (rc != 0) 157 return rc; 158 159 return 0; 160} 161 162static char *tcm_qla2xxx_get_fabric_wwn(struct se_portal_group *se_tpg) 163{ 164 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 165 struct tcm_qla2xxx_tpg, se_tpg); 166 struct tcm_qla2xxx_lport *lport = tpg->lport; 167 168 return lport->lport_naa_name; 169} 170 171static u16 tcm_qla2xxx_get_tag(struct se_portal_group *se_tpg) 172{ 173 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 174 struct tcm_qla2xxx_tpg, se_tpg); 175 return tpg->lport_tpgt; 176} 177 178static int tcm_qla2xxx_check_demo_mode(struct se_portal_group *se_tpg) 179{ 180 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 181 struct tcm_qla2xxx_tpg, se_tpg); 182 183 return tpg->tpg_attrib.generate_node_acls; 184} 185 186static int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *se_tpg) 187{ 188 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 189 struct tcm_qla2xxx_tpg, se_tpg); 190 191 return tpg->tpg_attrib.cache_dynamic_acls; 192} 193 194static int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *se_tpg) 195{ 196 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 197 struct tcm_qla2xxx_tpg, se_tpg); 198 199 return tpg->tpg_attrib.demo_mode_write_protect; 200} 201 202static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg) 203{ 204 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 205 struct tcm_qla2xxx_tpg, se_tpg); 206 207 return tpg->tpg_attrib.prod_mode_write_protect; 208} 209 210static int tcm_qla2xxx_check_demo_mode_login_only(struct se_portal_group *se_tpg) 211{ 212 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 213 struct tcm_qla2xxx_tpg, se_tpg); 214 215 return tpg->tpg_attrib.demo_mode_login_only; 216} 217 218static int tcm_qla2xxx_check_prot_fabric_only(struct se_portal_group *se_tpg) 219{ 220 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 221 struct tcm_qla2xxx_tpg, se_tpg); 222 223 return tpg->tpg_attrib.fabric_prot_type; 224} 225 226static u32 tcm_qla2xxx_tpg_get_inst_index(struct se_portal_group *se_tpg) 227{ 228 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 229 struct tcm_qla2xxx_tpg, se_tpg); 230 231 return tpg->lport_tpgt; 232} 233 234static void tcm_qla2xxx_complete_mcmd(struct work_struct *work) 235{ 236 struct qla_tgt_mgmt_cmd *mcmd = container_of(work, 237 struct qla_tgt_mgmt_cmd, free_work); 238 239 transport_generic_free_cmd(&mcmd->se_cmd, 0); 240} 241 242/* 243 * Called from qla_target_template->free_mcmd(), and will call 244 * tcm_qla2xxx_release_cmd() via normal struct target_core_fabric_ops 245 * release callback. qla_hw_data->hardware_lock is expected to be held 246 */ 247static void tcm_qla2xxx_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd) 248{ 249 if (!mcmd) 250 return; 251 INIT_WORK(&mcmd->free_work, tcm_qla2xxx_complete_mcmd); 252 queue_work(tcm_qla2xxx_free_wq, &mcmd->free_work); 253} 254 255static void tcm_qla2xxx_complete_free(struct work_struct *work) 256{ 257 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); 258 259 cmd->cmd_in_wq = 0; 260 261 WARN_ON(cmd->trc_flags & TRC_CMD_FREE); 262 263 /* To do: protect all tgt_counters manipulations with proper locking. */ 264 cmd->qpair->tgt_counters.qla_core_ret_sta_ctio++; 265 cmd->trc_flags |= TRC_CMD_FREE; 266 cmd->cmd_sent_to_fw = 0; 267 268 transport_generic_free_cmd(&cmd->se_cmd, 0); 269} 270 271static struct qla_tgt_cmd *tcm_qla2xxx_get_cmd(struct fc_port *sess) 272{ 273 struct se_session *se_sess = sess->se_sess; 274 struct qla_tgt_cmd *cmd; 275 int tag, cpu; 276 277 tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu); 278 if (tag < 0) 279 return NULL; 280 281 cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag]; 282 memset(cmd, 0, sizeof(struct qla_tgt_cmd)); 283 cmd->se_cmd.map_tag = tag; 284 cmd->se_cmd.map_cpu = cpu; 285 286 return cmd; 287} 288 289static void tcm_qla2xxx_rel_cmd(struct qla_tgt_cmd *cmd) 290{ 291 target_free_tag(cmd->sess->se_sess, &cmd->se_cmd); 292} 293 294/* 295 * Called from qla_target_template->free_cmd(), and will call 296 * tcm_qla2xxx_release_cmd via normal struct target_core_fabric_ops 297 * release callback. qla_hw_data->hardware_lock is expected to be held 298 */ 299static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd) 300{ 301 cmd->qpair->tgt_counters.core_qla_free_cmd++; 302 cmd->cmd_in_wq = 1; 303 304 WARN_ON(cmd->trc_flags & TRC_CMD_DONE); 305 cmd->trc_flags |= TRC_CMD_DONE; 306 307 INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free); 308 queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work); 309} 310 311/* 312 * Called from struct target_core_fabric_ops->check_stop_free() context 313 */ 314static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd) 315{ 316 struct qla_tgt_cmd *cmd; 317 318 if ((se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) == 0) { 319 cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); 320 cmd->trc_flags |= TRC_CMD_CHK_STOP; 321 } 322 323 return target_put_sess_cmd(se_cmd); 324} 325 326/* tcm_qla2xxx_release_cmd - Callback from TCM Core to release underlying 327 * fabric descriptor @se_cmd command to release 328 */ 329static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd) 330{ 331 struct qla_tgt_cmd *cmd; 332 333 if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) { 334 struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd, 335 struct qla_tgt_mgmt_cmd, se_cmd); 336 qlt_free_mcmd(mcmd); 337 return; 338 } 339 cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); 340 341 if (WARN_ON(cmd->cmd_sent_to_fw)) 342 return; 343 344 qlt_free_cmd(cmd); 345} 346 347static void tcm_qla2xxx_release_session(struct kref *kref) 348{ 349 struct fc_port *sess = container_of(kref, 350 struct fc_port, sess_kref); 351 352 qlt_unreg_sess(sess); 353} 354 355static void tcm_qla2xxx_put_sess(struct fc_port *sess) 356{ 357 if (!sess) 358 return; 359 360 kref_put(&sess->sess_kref, tcm_qla2xxx_release_session); 361} 362 363static void tcm_qla2xxx_close_session(struct se_session *se_sess) 364{ 365 struct fc_port *sess = se_sess->fabric_sess_ptr; 366 struct scsi_qla_host *vha; 367 unsigned long flags; 368 369 BUG_ON(!sess); 370 vha = sess->vha; 371 372 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 373 target_sess_cmd_list_set_waiting(se_sess); 374 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 375 376 sess->explicit_logout = 1; 377 tcm_qla2xxx_put_sess(sess); 378} 379 380static u32 tcm_qla2xxx_sess_get_index(struct se_session *se_sess) 381{ 382 return 0; 383} 384 385static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd) 386{ 387 struct qla_tgt_cmd *cmd = container_of(se_cmd, 388 struct qla_tgt_cmd, se_cmd); 389 390 if (cmd->aborted) { 391 /* Cmd can loop during Q-full. tcm_qla2xxx_aborted_task 392 * can get ahead of this cmd. tcm_qla2xxx_aborted_task 393 * already kick start the free. 394 */ 395 pr_debug("write_pending aborted cmd[%p] refcount %d " 396 "transport_state %x, t_state %x, se_cmd_flags %x\n", 397 cmd, kref_read(&cmd->se_cmd.cmd_kref), 398 cmd->se_cmd.transport_state, 399 cmd->se_cmd.t_state, 400 cmd->se_cmd.se_cmd_flags); 401 transport_generic_request_failure(&cmd->se_cmd, 402 TCM_CHECK_CONDITION_ABORT_CMD); 403 return 0; 404 } 405 cmd->trc_flags |= TRC_XFR_RDY; 406 cmd->bufflen = se_cmd->data_length; 407 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); 408 409 cmd->sg_cnt = se_cmd->t_data_nents; 410 cmd->sg = se_cmd->t_data_sg; 411 412 cmd->prot_sg_cnt = se_cmd->t_prot_nents; 413 cmd->prot_sg = se_cmd->t_prot_sg; 414 cmd->blk_sz = se_cmd->se_dev->dev_attrib.block_size; 415 se_cmd->pi_err = 0; 416 417 /* 418 * qla_target.c:qlt_rdy_to_xfer() will call dma_map_sg() to setup 419 * the SGL mappings into PCIe memory for incoming FCP WRITE data. 420 */ 421 return qlt_rdy_to_xfer(cmd); 422} 423 424static void tcm_qla2xxx_set_default_node_attrs(struct se_node_acl *nacl) 425{ 426 return; 427} 428 429static int tcm_qla2xxx_get_cmd_state(struct se_cmd *se_cmd) 430{ 431 if (!(se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { 432 struct qla_tgt_cmd *cmd = container_of(se_cmd, 433 struct qla_tgt_cmd, se_cmd); 434 return cmd->state; 435 } 436 437 return 0; 438} 439 440/* 441 * Called from process context in qla_target.c:qlt_do_work() code 442 */ 443static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd, 444 unsigned char *cdb, uint32_t data_length, int fcp_task_attr, 445 int data_dir, int bidi) 446{ 447 struct se_cmd *se_cmd = &cmd->se_cmd; 448 struct se_session *se_sess; 449 struct fc_port *sess; 450#ifdef CONFIG_TCM_QLA2XXX_DEBUG 451 struct se_portal_group *se_tpg; 452 struct tcm_qla2xxx_tpg *tpg; 453#endif 454 int flags = TARGET_SCF_ACK_KREF; 455 456 if (bidi) 457 flags |= TARGET_SCF_BIDI_OP; 458 459 if (se_cmd->cpuid != WORK_CPU_UNBOUND) 460 flags |= TARGET_SCF_USE_CPUID; 461 462 sess = cmd->sess; 463 if (!sess) { 464 pr_err("Unable to locate struct fc_port from qla_tgt_cmd\n"); 465 return -EINVAL; 466 } 467 468 se_sess = sess->se_sess; 469 if (!se_sess) { 470 pr_err("Unable to locate active struct se_session\n"); 471 return -EINVAL; 472 } 473 474#ifdef CONFIG_TCM_QLA2XXX_DEBUG 475 se_tpg = se_sess->se_tpg; 476 tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg); 477 if (unlikely(tpg->tpg_attrib.jam_host)) { 478 /* return, and dont run target_submit_cmd,discarding command */ 479 return 0; 480 } 481#endif 482 483 cmd->qpair->tgt_counters.qla_core_sbt_cmd++; 484 return target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0], 485 cmd->unpacked_lun, data_length, fcp_task_attr, 486 data_dir, flags); 487} 488 489static void tcm_qla2xxx_handle_data_work(struct work_struct *work) 490{ 491 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); 492 493 /* 494 * Ensure that the complete FCP WRITE payload has been received. 495 * Otherwise return an exception via CHECK_CONDITION status. 496 */ 497 cmd->cmd_in_wq = 0; 498 cmd->cmd_sent_to_fw = 0; 499 if (cmd->aborted) { 500 transport_generic_request_failure(&cmd->se_cmd, 501 TCM_CHECK_CONDITION_ABORT_CMD); 502 return; 503 } 504 505 cmd->qpair->tgt_counters.qla_core_ret_ctio++; 506 if (!cmd->write_data_transferred) { 507 switch (cmd->dif_err_code) { 508 case DIF_ERR_GRD: 509 cmd->se_cmd.pi_err = 510 TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; 511 break; 512 case DIF_ERR_REF: 513 cmd->se_cmd.pi_err = 514 TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; 515 break; 516 case DIF_ERR_APP: 517 cmd->se_cmd.pi_err = 518 TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED; 519 break; 520 case DIF_ERR_NONE: 521 default: 522 break; 523 } 524 525 if (cmd->se_cmd.pi_err) 526 transport_generic_request_failure(&cmd->se_cmd, 527 cmd->se_cmd.pi_err); 528 else 529 transport_generic_request_failure(&cmd->se_cmd, 530 TCM_CHECK_CONDITION_ABORT_CMD); 531 532 return; 533 } 534 535 return target_execute_cmd(&cmd->se_cmd); 536} 537 538/* 539 * Called from qla_target.c:qlt_do_ctio_completion() 540 */ 541static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd) 542{ 543 cmd->trc_flags |= TRC_DATA_IN; 544 cmd->cmd_in_wq = 1; 545 INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work); 546 queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work); 547} 548 549static int tcm_qla2xxx_chk_dif_tags(uint32_t tag) 550{ 551 return 0; 552} 553 554static int tcm_qla2xxx_dif_tags(struct qla_tgt_cmd *cmd, 555 uint16_t *pfw_prot_opts) 556{ 557 struct se_cmd *se_cmd = &cmd->se_cmd; 558 559 if (!(se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD)) 560 *pfw_prot_opts |= PO_DISABLE_GUARD_CHECK; 561 562 if (!(se_cmd->prot_checks & TARGET_DIF_CHECK_APPTAG)) 563 *pfw_prot_opts |= PO_DIS_APP_TAG_VALD; 564 565 return 0; 566} 567 568/* 569 * Called from qla_target.c:qlt_issue_task_mgmt() 570 */ 571static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, u64 lun, 572 uint16_t tmr_func, uint32_t tag) 573{ 574 struct fc_port *sess = mcmd->sess; 575 struct se_cmd *se_cmd = &mcmd->se_cmd; 576 int transl_tmr_func = 0; 577 int flags = TARGET_SCF_ACK_KREF; 578 579 switch (tmr_func) { 580 case QLA_TGT_ABTS: 581 pr_debug("%ld: ABTS received\n", sess->vha->host_no); 582 transl_tmr_func = TMR_ABORT_TASK; 583 flags |= TARGET_SCF_LOOKUP_LUN_FROM_TAG; 584 break; 585 case QLA_TGT_2G_ABORT_TASK: 586 pr_debug("%ld: 2G Abort Task received\n", sess->vha->host_no); 587 transl_tmr_func = TMR_ABORT_TASK; 588 break; 589 case QLA_TGT_CLEAR_ACA: 590 pr_debug("%ld: CLEAR_ACA received\n", sess->vha->host_no); 591 transl_tmr_func = TMR_CLEAR_ACA; 592 break; 593 case QLA_TGT_TARGET_RESET: 594 pr_debug("%ld: TARGET_RESET received\n", sess->vha->host_no); 595 transl_tmr_func = TMR_TARGET_WARM_RESET; 596 break; 597 case QLA_TGT_LUN_RESET: 598 pr_debug("%ld: LUN_RESET received\n", sess->vha->host_no); 599 transl_tmr_func = TMR_LUN_RESET; 600 break; 601 case QLA_TGT_CLEAR_TS: 602 pr_debug("%ld: CLEAR_TS received\n", sess->vha->host_no); 603 transl_tmr_func = TMR_CLEAR_TASK_SET; 604 break; 605 case QLA_TGT_ABORT_TS: 606 pr_debug("%ld: ABORT_TS received\n", sess->vha->host_no); 607 transl_tmr_func = TMR_ABORT_TASK_SET; 608 break; 609 default: 610 pr_debug("%ld: Unknown task mgmt fn 0x%x\n", 611 sess->vha->host_no, tmr_func); 612 return -ENOSYS; 613 } 614 615 return target_submit_tmr(se_cmd, sess->se_sess, NULL, lun, mcmd, 616 transl_tmr_func, GFP_ATOMIC, tag, flags); 617} 618 619static struct qla_tgt_cmd *tcm_qla2xxx_find_cmd_by_tag(struct fc_port *sess, 620 uint64_t tag) 621{ 622 struct qla_tgt_cmd *cmd = NULL; 623 struct se_cmd *secmd; 624 unsigned long flags; 625 626 if (!sess->se_sess) 627 return NULL; 628 629 spin_lock_irqsave(&sess->se_sess->sess_cmd_lock, flags); 630 list_for_each_entry(secmd, &sess->se_sess->sess_cmd_list, se_cmd_list) { 631 /* skip task management functions, including tmr->task_cmd */ 632 if (secmd->se_cmd_flags & SCF_SCSI_TMR_CDB) 633 continue; 634 635 if (secmd->tag == tag) { 636 cmd = container_of(secmd, struct qla_tgt_cmd, se_cmd); 637 break; 638 } 639 } 640 spin_unlock_irqrestore(&sess->se_sess->sess_cmd_lock, flags); 641 642 return cmd; 643} 644 645static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd) 646{ 647 struct qla_tgt_cmd *cmd = container_of(se_cmd, 648 struct qla_tgt_cmd, se_cmd); 649 650 if (cmd->aborted) { 651 /* Cmd can loop during Q-full. tcm_qla2xxx_aborted_task 652 * can get ahead of this cmd. tcm_qla2xxx_aborted_task 653 * already kick start the free. 654 */ 655 pr_debug("queue_data_in aborted cmd[%p] refcount %d " 656 "transport_state %x, t_state %x, se_cmd_flags %x\n", 657 cmd, kref_read(&cmd->se_cmd.cmd_kref), 658 cmd->se_cmd.transport_state, 659 cmd->se_cmd.t_state, 660 cmd->se_cmd.se_cmd_flags); 661 return 0; 662 } 663 664 cmd->trc_flags |= TRC_XMIT_DATA; 665 cmd->bufflen = se_cmd->data_length; 666 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); 667 668 cmd->sg_cnt = se_cmd->t_data_nents; 669 cmd->sg = se_cmd->t_data_sg; 670 cmd->offset = 0; 671 672 cmd->prot_sg_cnt = se_cmd->t_prot_nents; 673 cmd->prot_sg = se_cmd->t_prot_sg; 674 cmd->blk_sz = se_cmd->se_dev->dev_attrib.block_size; 675 se_cmd->pi_err = 0; 676 677 /* 678 * Now queue completed DATA_IN the qla2xxx LLD and response ring 679 */ 680 return qlt_xmit_response(cmd, QLA_TGT_XMIT_DATA|QLA_TGT_XMIT_STATUS, 681 se_cmd->scsi_status); 682} 683 684static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd) 685{ 686 struct qla_tgt_cmd *cmd = container_of(se_cmd, 687 struct qla_tgt_cmd, se_cmd); 688 int xmit_type = QLA_TGT_XMIT_STATUS; 689 690 if (cmd->aborted) { 691 /* 692 * Cmd can loop during Q-full. tcm_qla2xxx_aborted_task 693 * can get ahead of this cmd. tcm_qla2xxx_aborted_task 694 * already kick start the free. 695 */ 696 pr_debug( 697 "queue_data_in aborted cmd[%p] refcount %d transport_state %x, t_state %x, se_cmd_flags %x\n", 698 cmd, kref_read(&cmd->se_cmd.cmd_kref), 699 cmd->se_cmd.transport_state, cmd->se_cmd.t_state, 700 cmd->se_cmd.se_cmd_flags); 701 return 0; 702 } 703 cmd->bufflen = se_cmd->data_length; 704 cmd->sg = NULL; 705 cmd->sg_cnt = 0; 706 cmd->offset = 0; 707 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); 708 cmd->trc_flags |= TRC_XMIT_STATUS; 709 710 if (se_cmd->data_direction == DMA_FROM_DEVICE) { 711 /* 712 * For FCP_READ with CHECK_CONDITION status, clear cmd->bufflen 713 * for qla_tgt_xmit_response LLD code 714 */ 715 if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 716 se_cmd->se_cmd_flags &= ~SCF_OVERFLOW_BIT; 717 se_cmd->residual_count = 0; 718 } 719 se_cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 720 se_cmd->residual_count += se_cmd->data_length; 721 722 cmd->bufflen = 0; 723 } 724 /* 725 * Now queue status response to qla2xxx LLD code and response ring 726 */ 727 return qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status); 728} 729 730static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd) 731{ 732 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; 733 struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd, 734 struct qla_tgt_mgmt_cmd, se_cmd); 735 736 pr_debug("queue_tm_rsp: mcmd: %p func: 0x%02x response: 0x%02x\n", 737 mcmd, se_tmr->function, se_tmr->response); 738 /* 739 * Do translation between TCM TM response codes and 740 * QLA2xxx FC TM response codes. 741 */ 742 switch (se_tmr->response) { 743 case TMR_FUNCTION_COMPLETE: 744 mcmd->fc_tm_rsp = FC_TM_SUCCESS; 745 break; 746 case TMR_TASK_DOES_NOT_EXIST: 747 mcmd->fc_tm_rsp = FC_TM_BAD_CMD; 748 break; 749 case TMR_FUNCTION_REJECTED: 750 mcmd->fc_tm_rsp = FC_TM_REJECT; 751 break; 752 case TMR_LUN_DOES_NOT_EXIST: 753 default: 754 mcmd->fc_tm_rsp = FC_TM_FAILED; 755 break; 756 } 757 /* 758 * Queue the TM response to QLA2xxx LLD to build a 759 * CTIO response packet. 760 */ 761 qlt_xmit_tm_rsp(mcmd); 762} 763 764static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd) 765{ 766 struct qla_tgt_cmd *cmd = container_of(se_cmd, 767 struct qla_tgt_cmd, se_cmd); 768 769 if (qlt_abort_cmd(cmd)) 770 return; 771} 772 773static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *, 774 struct tcm_qla2xxx_nacl *, struct fc_port *); 775/* 776 * Expected to be called with struct qla_hw_data->tgt.sess_lock held 777 */ 778static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct fc_port *sess) 779{ 780 struct se_node_acl *se_nacl = sess->se_sess->se_node_acl; 781 struct se_portal_group *se_tpg = se_nacl->se_tpg; 782 struct se_wwn *se_wwn = se_tpg->se_tpg_wwn; 783 struct tcm_qla2xxx_lport *lport = container_of(se_wwn, 784 struct tcm_qla2xxx_lport, lport_wwn); 785 struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl, 786 struct tcm_qla2xxx_nacl, se_node_acl); 787 void *node; 788 789 pr_debug("fc_rport domain: port_id 0x%06x\n", nacl->nport_id); 790 791 node = btree_remove32(&lport->lport_fcport_map, nacl->nport_id); 792 if (WARN_ON(node && (node != se_nacl))) { 793 /* 794 * The nacl no longer matches what we think it should be. 795 * Most likely a new dynamic acl has been added while 796 * someone dropped the hardware lock. It clearly is a 797 * bug elsewhere, but this bit can't make things worse. 798 */ 799 btree_insert32(&lport->lport_fcport_map, nacl->nport_id, 800 node, GFP_ATOMIC); 801 } 802 803 pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n", 804 se_nacl, nacl->nport_wwnn, nacl->nport_id); 805 /* 806 * Now clear the se_nacl and session pointers from our HW lport lookup 807 * table mapping for this initiator's fabric S_ID and LOOP_ID entries. 808 * 809 * This is done ahead of callbacks into tcm_qla2xxx_free_session() -> 810 * target_wait_for_sess_cmds() before the session waits for outstanding 811 * I/O to complete, to avoid a race between session shutdown execution 812 * and incoming ATIOs or TMRs picking up a stale se_node_act reference. 813 */ 814 tcm_qla2xxx_clear_sess_lookup(lport, nacl, sess); 815} 816 817static void tcm_qla2xxx_shutdown_sess(struct fc_port *sess) 818{ 819 target_sess_cmd_list_set_waiting(sess->se_sess); 820} 821 822static int tcm_qla2xxx_init_nodeacl(struct se_node_acl *se_nacl, 823 const char *name) 824{ 825 struct tcm_qla2xxx_nacl *nacl = 826 container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); 827 u64 wwnn; 828 829 if (tcm_qla2xxx_parse_wwn(name, &wwnn, 1) < 0) 830 return -EINVAL; 831 832 nacl->nport_wwnn = wwnn; 833 tcm_qla2xxx_format_wwn(&nacl->nport_name[0], TCM_QLA2XXX_NAMELEN, wwnn); 834 835 return 0; 836} 837 838/* Start items for tcm_qla2xxx_tpg_attrib_cit */ 839 840#define DEF_QLA_TPG_ATTRIB(name) \ 841 \ 842static ssize_t tcm_qla2xxx_tpg_attrib_##name##_show( \ 843 struct config_item *item, char *page) \ 844{ \ 845 struct se_portal_group *se_tpg = attrib_to_tpg(item); \ 846 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \ 847 struct tcm_qla2xxx_tpg, se_tpg); \ 848 \ 849 return sprintf(page, "%d\n", tpg->tpg_attrib.name); \ 850} \ 851 \ 852static ssize_t tcm_qla2xxx_tpg_attrib_##name##_store( \ 853 struct config_item *item, const char *page, size_t count) \ 854{ \ 855 struct se_portal_group *se_tpg = attrib_to_tpg(item); \ 856 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \ 857 struct tcm_qla2xxx_tpg, se_tpg); \ 858 struct tcm_qla2xxx_tpg_attrib *a = &tpg->tpg_attrib; \ 859 unsigned long val; \ 860 int ret; \ 861 \ 862 ret = kstrtoul(page, 0, &val); \ 863 if (ret < 0) { \ 864 pr_err("kstrtoul() failed with" \ 865 " ret: %d\n", ret); \ 866 return -EINVAL; \ 867 } \ 868 \ 869 if ((val != 0) && (val != 1)) { \ 870 pr_err("Illegal boolean value %lu\n", val); \ 871 return -EINVAL; \ 872 } \ 873 \ 874 a->name = val; \ 875 \ 876 return count; \ 877} \ 878CONFIGFS_ATTR(tcm_qla2xxx_tpg_attrib_, name) 879 880DEF_QLA_TPG_ATTRIB(generate_node_acls); 881DEF_QLA_TPG_ATTRIB(cache_dynamic_acls); 882DEF_QLA_TPG_ATTRIB(demo_mode_write_protect); 883DEF_QLA_TPG_ATTRIB(prod_mode_write_protect); 884DEF_QLA_TPG_ATTRIB(demo_mode_login_only); 885#ifdef CONFIG_TCM_QLA2XXX_DEBUG 886DEF_QLA_TPG_ATTRIB(jam_host); 887#endif 888 889static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = { 890 &tcm_qla2xxx_tpg_attrib_attr_generate_node_acls, 891 &tcm_qla2xxx_tpg_attrib_attr_cache_dynamic_acls, 892 &tcm_qla2xxx_tpg_attrib_attr_demo_mode_write_protect, 893 &tcm_qla2xxx_tpg_attrib_attr_prod_mode_write_protect, 894 &tcm_qla2xxx_tpg_attrib_attr_demo_mode_login_only, 895#ifdef CONFIG_TCM_QLA2XXX_DEBUG 896 &tcm_qla2xxx_tpg_attrib_attr_jam_host, 897#endif 898 NULL, 899}; 900 901/* End items for tcm_qla2xxx_tpg_attrib_cit */ 902 903static ssize_t tcm_qla2xxx_tpg_enable_show(struct config_item *item, 904 char *page) 905{ 906 struct se_portal_group *se_tpg = to_tpg(item); 907 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 908 struct tcm_qla2xxx_tpg, se_tpg); 909 910 return snprintf(page, PAGE_SIZE, "%d\n", 911 atomic_read(&tpg->lport_tpg_enabled)); 912} 913 914static ssize_t tcm_qla2xxx_tpg_enable_store(struct config_item *item, 915 const char *page, size_t count) 916{ 917 struct se_portal_group *se_tpg = to_tpg(item); 918 struct se_wwn *se_wwn = se_tpg->se_tpg_wwn; 919 struct tcm_qla2xxx_lport *lport = container_of(se_wwn, 920 struct tcm_qla2xxx_lport, lport_wwn); 921 struct scsi_qla_host *vha = lport->qla_vha; 922 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 923 struct tcm_qla2xxx_tpg, se_tpg); 924 unsigned long op; 925 int rc; 926 927 rc = kstrtoul(page, 0, &op); 928 if (rc < 0) { 929 pr_err("kstrtoul() returned %d\n", rc); 930 return -EINVAL; 931 } 932 if ((op != 1) && (op != 0)) { 933 pr_err("Illegal value for tpg_enable: %lu\n", op); 934 return -EINVAL; 935 } 936 if (op) { 937 if (atomic_read(&tpg->lport_tpg_enabled)) 938 return -EEXIST; 939 940 atomic_set(&tpg->lport_tpg_enabled, 1); 941 qlt_enable_vha(vha); 942 } else { 943 if (!atomic_read(&tpg->lport_tpg_enabled)) 944 return count; 945 946 atomic_set(&tpg->lport_tpg_enabled, 0); 947 qlt_stop_phase1(vha->vha_tgt.qla_tgt); 948 qlt_stop_phase2(vha->vha_tgt.qla_tgt); 949 } 950 951 return count; 952} 953 954static ssize_t tcm_qla2xxx_tpg_dynamic_sessions_show(struct config_item *item, 955 char *page) 956{ 957 return target_show_dynamic_sessions(to_tpg(item), page); 958} 959 960static ssize_t tcm_qla2xxx_tpg_fabric_prot_type_store(struct config_item *item, 961 const char *page, size_t count) 962{ 963 struct se_portal_group *se_tpg = to_tpg(item); 964 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 965 struct tcm_qla2xxx_tpg, se_tpg); 966 unsigned long val; 967 int ret = kstrtoul(page, 0, &val); 968 969 if (ret) { 970 pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret); 971 return ret; 972 } 973 if (val != 0 && val != 1 && val != 3) { 974 pr_err("Invalid qla2xxx fabric_prot_type: %lu\n", val); 975 return -EINVAL; 976 } 977 tpg->tpg_attrib.fabric_prot_type = val; 978 979 return count; 980} 981 982static ssize_t tcm_qla2xxx_tpg_fabric_prot_type_show(struct config_item *item, 983 char *page) 984{ 985 struct se_portal_group *se_tpg = to_tpg(item); 986 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 987 struct tcm_qla2xxx_tpg, se_tpg); 988 989 return sprintf(page, "%d\n", tpg->tpg_attrib.fabric_prot_type); 990} 991 992CONFIGFS_ATTR(tcm_qla2xxx_tpg_, enable); 993CONFIGFS_ATTR_RO(tcm_qla2xxx_tpg_, dynamic_sessions); 994CONFIGFS_ATTR(tcm_qla2xxx_tpg_, fabric_prot_type); 995 996static struct configfs_attribute *tcm_qla2xxx_tpg_attrs[] = { 997 &tcm_qla2xxx_tpg_attr_enable, 998 &tcm_qla2xxx_tpg_attr_dynamic_sessions, 999 &tcm_qla2xxx_tpg_attr_fabric_prot_type, 1000 NULL, 1001}; 1002 1003static struct se_portal_group *tcm_qla2xxx_make_tpg(struct se_wwn *wwn, 1004 const char *name) 1005{ 1006 struct tcm_qla2xxx_lport *lport = container_of(wwn, 1007 struct tcm_qla2xxx_lport, lport_wwn); 1008 struct tcm_qla2xxx_tpg *tpg; 1009 unsigned long tpgt; 1010 int ret; 1011 1012 if (strstr(name, "tpgt_") != name) 1013 return ERR_PTR(-EINVAL); 1014 if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX) 1015 return ERR_PTR(-EINVAL); 1016 1017 if ((tpgt != 1)) { 1018 pr_err("In non NPIV mode, a single TPG=1 is used for HW port mappings\n"); 1019 return ERR_PTR(-ENOSYS); 1020 } 1021 1022 tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL); 1023 if (!tpg) { 1024 pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n"); 1025 return ERR_PTR(-ENOMEM); 1026 } 1027 tpg->lport = lport; 1028 tpg->lport_tpgt = tpgt; 1029 /* 1030 * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic 1031 * NodeACLs 1032 */ 1033 tpg->tpg_attrib.generate_node_acls = 1; 1034 tpg->tpg_attrib.demo_mode_write_protect = 1; 1035 tpg->tpg_attrib.cache_dynamic_acls = 1; 1036 tpg->tpg_attrib.demo_mode_login_only = 1; 1037 tpg->tpg_attrib.jam_host = 0; 1038 1039 ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP); 1040 if (ret < 0) { 1041 kfree(tpg); 1042 return NULL; 1043 } 1044 1045 lport->tpg_1 = tpg; 1046 1047 return &tpg->se_tpg; 1048} 1049 1050static void tcm_qla2xxx_drop_tpg(struct se_portal_group *se_tpg) 1051{ 1052 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 1053 struct tcm_qla2xxx_tpg, se_tpg); 1054 struct tcm_qla2xxx_lport *lport = tpg->lport; 1055 struct scsi_qla_host *vha = lport->qla_vha; 1056 /* 1057 * Call into qla2x_target.c LLD logic to shutdown the active 1058 * FC Nexuses and disable target mode operation for this qla_hw_data 1059 */ 1060 if (vha->vha_tgt.qla_tgt && !vha->vha_tgt.qla_tgt->tgt_stop) 1061 qlt_stop_phase1(vha->vha_tgt.qla_tgt); 1062 1063 core_tpg_deregister(se_tpg); 1064 /* 1065 * Clear local TPG=1 pointer for non NPIV mode. 1066 */ 1067 lport->tpg_1 = NULL; 1068 kfree(tpg); 1069} 1070 1071static ssize_t tcm_qla2xxx_npiv_tpg_enable_show(struct config_item *item, 1072 char *page) 1073{ 1074 return tcm_qla2xxx_tpg_enable_show(item, page); 1075} 1076 1077static ssize_t tcm_qla2xxx_npiv_tpg_enable_store(struct config_item *item, 1078 const char *page, size_t count) 1079{ 1080 struct se_portal_group *se_tpg = to_tpg(item); 1081 struct se_wwn *se_wwn = se_tpg->se_tpg_wwn; 1082 struct tcm_qla2xxx_lport *lport = container_of(se_wwn, 1083 struct tcm_qla2xxx_lport, lport_wwn); 1084 struct scsi_qla_host *vha = lport->qla_vha; 1085 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 1086 struct tcm_qla2xxx_tpg, se_tpg); 1087 unsigned long op; 1088 int rc; 1089 1090 rc = kstrtoul(page, 0, &op); 1091 if (rc < 0) { 1092 pr_err("kstrtoul() returned %d\n", rc); 1093 return -EINVAL; 1094 } 1095 if ((op != 1) && (op != 0)) { 1096 pr_err("Illegal value for tpg_enable: %lu\n", op); 1097 return -EINVAL; 1098 } 1099 if (op) { 1100 if (atomic_read(&tpg->lport_tpg_enabled)) 1101 return -EEXIST; 1102 1103 atomic_set(&tpg->lport_tpg_enabled, 1); 1104 qlt_enable_vha(vha); 1105 } else { 1106 if (!atomic_read(&tpg->lport_tpg_enabled)) 1107 return count; 1108 1109 atomic_set(&tpg->lport_tpg_enabled, 0); 1110 qlt_stop_phase1(vha->vha_tgt.qla_tgt); 1111 qlt_stop_phase2(vha->vha_tgt.qla_tgt); 1112 } 1113 1114 return count; 1115} 1116 1117CONFIGFS_ATTR(tcm_qla2xxx_npiv_tpg_, enable); 1118 1119static struct configfs_attribute *tcm_qla2xxx_npiv_tpg_attrs[] = { 1120 &tcm_qla2xxx_npiv_tpg_attr_enable, 1121 NULL, 1122}; 1123 1124static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(struct se_wwn *wwn, 1125 const char *name) 1126{ 1127 struct tcm_qla2xxx_lport *lport = container_of(wwn, 1128 struct tcm_qla2xxx_lport, lport_wwn); 1129 struct tcm_qla2xxx_tpg *tpg; 1130 unsigned long tpgt; 1131 int ret; 1132 1133 if (strstr(name, "tpgt_") != name) 1134 return ERR_PTR(-EINVAL); 1135 if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX) 1136 return ERR_PTR(-EINVAL); 1137 1138 tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL); 1139 if (!tpg) { 1140 pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n"); 1141 return ERR_PTR(-ENOMEM); 1142 } 1143 tpg->lport = lport; 1144 tpg->lport_tpgt = tpgt; 1145 1146 /* 1147 * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic 1148 * NodeACLs 1149 */ 1150 tpg->tpg_attrib.generate_node_acls = 1; 1151 tpg->tpg_attrib.demo_mode_write_protect = 1; 1152 tpg->tpg_attrib.cache_dynamic_acls = 1; 1153 tpg->tpg_attrib.demo_mode_login_only = 1; 1154 1155 ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP); 1156 if (ret < 0) { 1157 kfree(tpg); 1158 return NULL; 1159 } 1160 lport->tpg_1 = tpg; 1161 return &tpg->se_tpg; 1162} 1163 1164/* 1165 * Expected to be called with struct qla_hw_data->tgt.sess_lock held 1166 */ 1167static struct fc_port *tcm_qla2xxx_find_sess_by_s_id(scsi_qla_host_t *vha, 1168 const be_id_t s_id) 1169{ 1170 struct tcm_qla2xxx_lport *lport; 1171 struct se_node_acl *se_nacl; 1172 struct tcm_qla2xxx_nacl *nacl; 1173 u32 key; 1174 1175 lport = vha->vha_tgt.target_lport_ptr; 1176 if (!lport) { 1177 pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); 1178 dump_stack(); 1179 return NULL; 1180 } 1181 1182 key = sid_to_key(s_id); 1183 pr_debug("find_sess_by_s_id: 0x%06x\n", key); 1184 1185 se_nacl = btree_lookup32(&lport->lport_fcport_map, key); 1186 if (!se_nacl) { 1187 pr_debug("Unable to locate s_id: 0x%06x\n", key); 1188 return NULL; 1189 } 1190 pr_debug("find_sess_by_s_id: located se_nacl: %p, initiatorname: %s\n", 1191 se_nacl, se_nacl->initiatorname); 1192 1193 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); 1194 if (!nacl->fc_port) { 1195 pr_err("Unable to locate struct fc_port\n"); 1196 return NULL; 1197 } 1198 1199 return nacl->fc_port; 1200} 1201 1202/* 1203 * Expected to be called with struct qla_hw_data->tgt.sess_lock held 1204 */ 1205static void tcm_qla2xxx_set_sess_by_s_id( 1206 struct tcm_qla2xxx_lport *lport, 1207 struct se_node_acl *new_se_nacl, 1208 struct tcm_qla2xxx_nacl *nacl, 1209 struct se_session *se_sess, 1210 struct fc_port *fc_port, 1211 be_id_t s_id) 1212{ 1213 u32 key; 1214 void *slot; 1215 int rc; 1216 1217 key = sid_to_key(s_id); 1218 pr_debug("set_sess_by_s_id: %06x\n", key); 1219 1220 slot = btree_lookup32(&lport->lport_fcport_map, key); 1221 if (!slot) { 1222 if (new_se_nacl) { 1223 pr_debug("Setting up new fc_port entry to new_se_nacl\n"); 1224 nacl->nport_id = key; 1225 rc = btree_insert32(&lport->lport_fcport_map, key, 1226 new_se_nacl, GFP_ATOMIC); 1227 if (rc) 1228 printk(KERN_ERR "Unable to insert s_id into fcport_map: %06x\n", 1229 (int)key); 1230 } else { 1231 pr_debug("Wiping nonexisting fc_port entry\n"); 1232 } 1233 1234 fc_port->se_sess = se_sess; 1235 nacl->fc_port = fc_port; 1236 return; 1237 } 1238 1239 if (nacl->fc_port) { 1240 if (new_se_nacl == NULL) { 1241 pr_debug("Clearing existing nacl->fc_port and fc_port entry\n"); 1242 btree_remove32(&lport->lport_fcport_map, key); 1243 nacl->fc_port = NULL; 1244 return; 1245 } 1246 pr_debug("Replacing existing nacl->fc_port and fc_port entry\n"); 1247 btree_update32(&lport->lport_fcport_map, key, new_se_nacl); 1248 fc_port->se_sess = se_sess; 1249 nacl->fc_port = fc_port; 1250 return; 1251 } 1252 1253 if (new_se_nacl == NULL) { 1254 pr_debug("Clearing existing fc_port entry\n"); 1255 btree_remove32(&lport->lport_fcport_map, key); 1256 return; 1257 } 1258 1259 pr_debug("Replacing existing fc_port entry w/o active nacl->fc_port\n"); 1260 btree_update32(&lport->lport_fcport_map, key, new_se_nacl); 1261 fc_port->se_sess = se_sess; 1262 nacl->fc_port = fc_port; 1263 1264 pr_debug("Setup nacl->fc_port %p by s_id for se_nacl: %p, initiatorname: %s\n", 1265 nacl->fc_port, new_se_nacl, new_se_nacl->initiatorname); 1266} 1267 1268/* 1269 * Expected to be called with struct qla_hw_data->tgt.sess_lock held 1270 */ 1271static struct fc_port *tcm_qla2xxx_find_sess_by_loop_id( 1272 scsi_qla_host_t *vha, 1273 const uint16_t loop_id) 1274{ 1275 struct tcm_qla2xxx_lport *lport; 1276 struct se_node_acl *se_nacl; 1277 struct tcm_qla2xxx_nacl *nacl; 1278 struct tcm_qla2xxx_fc_loopid *fc_loopid; 1279 1280 lport = vha->vha_tgt.target_lport_ptr; 1281 if (!lport) { 1282 pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); 1283 dump_stack(); 1284 return NULL; 1285 } 1286 1287 pr_debug("find_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id); 1288 1289 fc_loopid = lport->lport_loopid_map + loop_id; 1290 se_nacl = fc_loopid->se_nacl; 1291 if (!se_nacl) { 1292 pr_debug("Unable to locate se_nacl by loop_id: 0x%04x\n", 1293 loop_id); 1294 return NULL; 1295 } 1296 1297 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); 1298 1299 if (!nacl->fc_port) { 1300 pr_err("Unable to locate struct fc_port\n"); 1301 return NULL; 1302 } 1303 1304 return nacl->fc_port; 1305} 1306 1307/* 1308 * Expected to be called with struct qla_hw_data->tgt.sess_lock held 1309 */ 1310static void tcm_qla2xxx_set_sess_by_loop_id( 1311 struct tcm_qla2xxx_lport *lport, 1312 struct se_node_acl *new_se_nacl, 1313 struct tcm_qla2xxx_nacl *nacl, 1314 struct se_session *se_sess, 1315 struct fc_port *fc_port, 1316 uint16_t loop_id) 1317{ 1318 struct se_node_acl *saved_nacl; 1319 struct tcm_qla2xxx_fc_loopid *fc_loopid; 1320 1321 pr_debug("set_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id); 1322 1323 fc_loopid = &((struct tcm_qla2xxx_fc_loopid *) 1324 lport->lport_loopid_map)[loop_id]; 1325 1326 saved_nacl = fc_loopid->se_nacl; 1327 if (!saved_nacl) { 1328 pr_debug("Setting up new fc_loopid->se_nacl to new_se_nacl\n"); 1329 fc_loopid->se_nacl = new_se_nacl; 1330 if (fc_port->se_sess != se_sess) 1331 fc_port->se_sess = se_sess; 1332 if (nacl->fc_port != fc_port) 1333 nacl->fc_port = fc_port; 1334 return; 1335 } 1336 1337 if (nacl->fc_port) { 1338 if (new_se_nacl == NULL) { 1339 pr_debug("Clearing nacl->fc_port and fc_loopid->se_nacl\n"); 1340 fc_loopid->se_nacl = NULL; 1341 nacl->fc_port = NULL; 1342 return; 1343 } 1344 1345 pr_debug("Replacing existing nacl->fc_port and fc_loopid->se_nacl\n"); 1346 fc_loopid->se_nacl = new_se_nacl; 1347 if (fc_port->se_sess != se_sess) 1348 fc_port->se_sess = se_sess; 1349 if (nacl->fc_port != fc_port) 1350 nacl->fc_port = fc_port; 1351 return; 1352 } 1353 1354 if (new_se_nacl == NULL) { 1355 pr_debug("Clearing fc_loopid->se_nacl\n"); 1356 fc_loopid->se_nacl = NULL; 1357 return; 1358 } 1359 1360 pr_debug("Replacing existing fc_loopid->se_nacl w/o active nacl->fc_port\n"); 1361 fc_loopid->se_nacl = new_se_nacl; 1362 if (fc_port->se_sess != se_sess) 1363 fc_port->se_sess = se_sess; 1364 if (nacl->fc_port != fc_port) 1365 nacl->fc_port = fc_port; 1366 1367 pr_debug("Setup nacl->fc_port %p by loop_id for se_nacl: %p, initiatorname: %s\n", 1368 nacl->fc_port, new_se_nacl, new_se_nacl->initiatorname); 1369} 1370 1371/* 1372 * Should always be called with qla_hw_data->tgt.sess_lock held. 1373 */ 1374static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *lport, 1375 struct tcm_qla2xxx_nacl *nacl, struct fc_port *sess) 1376{ 1377 struct se_session *se_sess = sess->se_sess; 1378 1379 tcm_qla2xxx_set_sess_by_s_id(lport, NULL, nacl, se_sess, 1380 sess, port_id_to_be_id(sess->d_id)); 1381 tcm_qla2xxx_set_sess_by_loop_id(lport, NULL, nacl, se_sess, 1382 sess, sess->loop_id); 1383} 1384 1385static void tcm_qla2xxx_free_session(struct fc_port *sess) 1386{ 1387 struct qla_tgt *tgt = sess->tgt; 1388 struct qla_hw_data *ha = tgt->ha; 1389 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 1390 struct se_session *se_sess; 1391 struct tcm_qla2xxx_lport *lport; 1392 1393 BUG_ON(in_interrupt()); 1394 1395 se_sess = sess->se_sess; 1396 if (!se_sess) { 1397 pr_err("struct fc_port->se_sess is NULL\n"); 1398 dump_stack(); 1399 return; 1400 } 1401 1402 lport = vha->vha_tgt.target_lport_ptr; 1403 if (!lport) { 1404 pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); 1405 dump_stack(); 1406 return; 1407 } 1408 target_wait_for_sess_cmds(se_sess); 1409 1410 target_remove_session(se_sess); 1411} 1412 1413static int tcm_qla2xxx_session_cb(struct se_portal_group *se_tpg, 1414 struct se_session *se_sess, void *p) 1415{ 1416 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, 1417 struct tcm_qla2xxx_tpg, se_tpg); 1418 struct tcm_qla2xxx_lport *lport = tpg->lport; 1419 struct qla_hw_data *ha = lport->qla_vha->hw; 1420 struct se_node_acl *se_nacl = se_sess->se_node_acl; 1421 struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl, 1422 struct tcm_qla2xxx_nacl, se_node_acl); 1423 struct fc_port *qlat_sess = p; 1424 uint16_t loop_id = qlat_sess->loop_id; 1425 unsigned long flags; 1426 1427 /* 1428 * And now setup se_nacl and session pointers into HW lport internal 1429 * mappings for fabric S_ID and LOOP_ID. 1430 */ 1431 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1432 tcm_qla2xxx_set_sess_by_s_id(lport, se_nacl, nacl, se_sess, qlat_sess, 1433 port_id_to_be_id(qlat_sess->d_id)); 1434 tcm_qla2xxx_set_sess_by_loop_id(lport, se_nacl, nacl, 1435 se_sess, qlat_sess, loop_id); 1436 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1437 1438 return 0; 1439} 1440 1441/* 1442 * Called via qlt_create_sess():ha->qla2x_tmpl->check_initiator_node_acl() 1443 * to locate struct se_node_acl 1444 */ 1445static int tcm_qla2xxx_check_initiator_node_acl( 1446 scsi_qla_host_t *vha, 1447 unsigned char *fc_wwpn, 1448 struct fc_port *qlat_sess) 1449{ 1450 struct qla_hw_data *ha = vha->hw; 1451 struct tcm_qla2xxx_lport *lport; 1452 struct tcm_qla2xxx_tpg *tpg; 1453 struct se_session *se_sess; 1454 unsigned char port_name[36]; 1455 int num_tags = (ha->cur_fw_xcb_count) ? ha->cur_fw_xcb_count : 1456 TCM_QLA2XXX_DEFAULT_TAGS; 1457 1458 lport = vha->vha_tgt.target_lport_ptr; 1459 if (!lport) { 1460 pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); 1461 dump_stack(); 1462 return -EINVAL; 1463 } 1464 /* 1465 * Locate the TPG=1 reference.. 1466 */ 1467 tpg = lport->tpg_1; 1468 if (!tpg) { 1469 pr_err("Unable to locate struct tcm_qla2xxx_lport->tpg_1\n"); 1470 return -EINVAL; 1471 } 1472 /* 1473 * Format the FCP Initiator port_name into colon seperated values to 1474 * match the format by tcm_qla2xxx explict ConfigFS NodeACLs. 1475 */ 1476 memset(&port_name, 0, 36); 1477 snprintf(port_name, sizeof(port_name), "%8phC", fc_wwpn); 1478 /* 1479 * Locate our struct se_node_acl either from an explict NodeACL created 1480 * via ConfigFS, or via running in TPG demo mode. 1481 */ 1482 se_sess = target_setup_session(&tpg->se_tpg, num_tags, 1483 sizeof(struct qla_tgt_cmd), 1484 TARGET_PROT_ALL, port_name, 1485 qlat_sess, tcm_qla2xxx_session_cb); 1486 if (IS_ERR(se_sess)) 1487 return PTR_ERR(se_sess); 1488 1489 return 0; 1490} 1491 1492static void tcm_qla2xxx_update_sess(struct fc_port *sess, port_id_t s_id, 1493 uint16_t loop_id, bool conf_compl_supported) 1494{ 1495 struct qla_tgt *tgt = sess->tgt; 1496 struct qla_hw_data *ha = tgt->ha; 1497 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 1498 struct tcm_qla2xxx_lport *lport = vha->vha_tgt.target_lport_ptr; 1499 struct se_node_acl *se_nacl = sess->se_sess->se_node_acl; 1500 struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl, 1501 struct tcm_qla2xxx_nacl, se_node_acl); 1502 u32 key; 1503 1504 1505 if (sess->loop_id != loop_id || sess->d_id.b24 != s_id.b24) 1506 pr_info("Updating session %p from port %8phC loop_id %d -> %d s_id %x:%x:%x -> %x:%x:%x\n", 1507 sess, sess->port_name, 1508 sess->loop_id, loop_id, sess->d_id.b.domain, 1509 sess->d_id.b.area, sess->d_id.b.al_pa, s_id.b.domain, 1510 s_id.b.area, s_id.b.al_pa); 1511 1512 if (sess->loop_id != loop_id) { 1513 /* 1514 * Because we can shuffle loop IDs around and we 1515 * update different sessions non-atomically, we might 1516 * have overwritten this session's old loop ID 1517 * already, and we might end up overwriting some other 1518 * session that will be updated later. So we have to 1519 * be extra careful and we can't warn about those things... 1520 */ 1521 if (lport->lport_loopid_map[sess->loop_id].se_nacl == se_nacl) 1522 lport->lport_loopid_map[sess->loop_id].se_nacl = NULL; 1523 1524 lport->lport_loopid_map[loop_id].se_nacl = se_nacl; 1525 1526 sess->loop_id = loop_id; 1527 } 1528 1529 if (sess->d_id.b24 != s_id.b24) { 1530 key = (((u32) sess->d_id.b.domain << 16) | 1531 ((u32) sess->d_id.b.area << 8) | 1532 ((u32) sess->d_id.b.al_pa)); 1533 1534 if (btree_lookup32(&lport->lport_fcport_map, key)) 1535 WARN(btree_remove32(&lport->lport_fcport_map, key) != 1536 se_nacl, "Found wrong se_nacl when updating s_id %x:%x:%x\n", 1537 sess->d_id.b.domain, sess->d_id.b.area, 1538 sess->d_id.b.al_pa); 1539 else 1540 WARN(1, "No lport_fcport_map entry for s_id %x:%x:%x\n", 1541 sess->d_id.b.domain, sess->d_id.b.area, 1542 sess->d_id.b.al_pa); 1543 1544 key = (((u32) s_id.b.domain << 16) | 1545 ((u32) s_id.b.area << 8) | 1546 ((u32) s_id.b.al_pa)); 1547 1548 if (btree_lookup32(&lport->lport_fcport_map, key)) { 1549 WARN(1, "Already have lport_fcport_map entry for s_id %x:%x:%x\n", 1550 s_id.b.domain, s_id.b.area, s_id.b.al_pa); 1551 btree_update32(&lport->lport_fcport_map, key, se_nacl); 1552 } else { 1553 btree_insert32(&lport->lport_fcport_map, key, se_nacl, 1554 GFP_ATOMIC); 1555 } 1556 1557 sess->d_id = s_id; 1558 nacl->nport_id = key; 1559 } 1560 1561 sess->conf_compl_supported = conf_compl_supported; 1562 1563} 1564 1565/* 1566 * Calls into tcm_qla2xxx used by qla2xxx LLD I/O path. 1567 */ 1568static struct qla_tgt_func_tmpl tcm_qla2xxx_template = { 1569 .find_cmd_by_tag = tcm_qla2xxx_find_cmd_by_tag, 1570 .handle_cmd = tcm_qla2xxx_handle_cmd, 1571 .handle_data = tcm_qla2xxx_handle_data, 1572 .handle_tmr = tcm_qla2xxx_handle_tmr, 1573 .get_cmd = tcm_qla2xxx_get_cmd, 1574 .rel_cmd = tcm_qla2xxx_rel_cmd, 1575 .free_cmd = tcm_qla2xxx_free_cmd, 1576 .free_mcmd = tcm_qla2xxx_free_mcmd, 1577 .free_session = tcm_qla2xxx_free_session, 1578 .update_sess = tcm_qla2xxx_update_sess, 1579 .check_initiator_node_acl = tcm_qla2xxx_check_initiator_node_acl, 1580 .find_sess_by_s_id = tcm_qla2xxx_find_sess_by_s_id, 1581 .find_sess_by_loop_id = tcm_qla2xxx_find_sess_by_loop_id, 1582 .clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map, 1583 .put_sess = tcm_qla2xxx_put_sess, 1584 .shutdown_sess = tcm_qla2xxx_shutdown_sess, 1585 .get_dif_tags = tcm_qla2xxx_dif_tags, 1586 .chk_dif_tags = tcm_qla2xxx_chk_dif_tags, 1587}; 1588 1589static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport) 1590{ 1591 int rc; 1592 1593 rc = btree_init32(&lport->lport_fcport_map); 1594 if (rc) { 1595 pr_err("Unable to initialize lport->lport_fcport_map btree\n"); 1596 return rc; 1597 } 1598 1599 lport->lport_loopid_map = 1600 vzalloc(array_size(65536, 1601 sizeof(struct tcm_qla2xxx_fc_loopid))); 1602 if (!lport->lport_loopid_map) { 1603 pr_err("Unable to allocate lport->lport_loopid_map of %zu bytes\n", 1604 sizeof(struct tcm_qla2xxx_fc_loopid) * 65536); 1605 btree_destroy32(&lport->lport_fcport_map); 1606 return -ENOMEM; 1607 } 1608 pr_debug("qla2xxx: Allocated lport_loopid_map of %zu bytes\n", 1609 sizeof(struct tcm_qla2xxx_fc_loopid) * 65536); 1610 return 0; 1611} 1612 1613static int tcm_qla2xxx_lport_register_cb(struct scsi_qla_host *vha, 1614 void *target_lport_ptr, 1615 u64 npiv_wwpn, u64 npiv_wwnn) 1616{ 1617 struct qla_hw_data *ha = vha->hw; 1618 struct tcm_qla2xxx_lport *lport = 1619 (struct tcm_qla2xxx_lport *)target_lport_ptr; 1620 /* 1621 * Setup tgt_ops, local pointer to vha and target_lport_ptr 1622 */ 1623 ha->tgt.tgt_ops = &tcm_qla2xxx_template; 1624 vha->vha_tgt.target_lport_ptr = target_lport_ptr; 1625 lport->qla_vha = vha; 1626 1627 return 0; 1628} 1629 1630static struct se_wwn *tcm_qla2xxx_make_lport( 1631 struct target_fabric_configfs *tf, 1632 struct config_group *group, 1633 const char *name) 1634{ 1635 struct tcm_qla2xxx_lport *lport; 1636 u64 wwpn; 1637 int ret = -ENODEV; 1638 1639 if (tcm_qla2xxx_parse_wwn(name, &wwpn, 1) < 0) 1640 return ERR_PTR(-EINVAL); 1641 1642 lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL); 1643 if (!lport) { 1644 pr_err("Unable to allocate struct tcm_qla2xxx_lport\n"); 1645 return ERR_PTR(-ENOMEM); 1646 } 1647 lport->lport_wwpn = wwpn; 1648 tcm_qla2xxx_format_wwn(&lport->lport_name[0], TCM_QLA2XXX_NAMELEN, 1649 wwpn); 1650 sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) wwpn); 1651 1652 ret = tcm_qla2xxx_init_lport(lport); 1653 if (ret != 0) 1654 goto out; 1655 1656 ret = qlt_lport_register(lport, wwpn, 0, 0, 1657 tcm_qla2xxx_lport_register_cb); 1658 if (ret != 0) 1659 goto out_lport; 1660 1661 return &lport->lport_wwn; 1662out_lport: 1663 vfree(lport->lport_loopid_map); 1664 btree_destroy32(&lport->lport_fcport_map); 1665out: 1666 kfree(lport); 1667 return ERR_PTR(ret); 1668} 1669 1670static void tcm_qla2xxx_drop_lport(struct se_wwn *wwn) 1671{ 1672 struct tcm_qla2xxx_lport *lport = container_of(wwn, 1673 struct tcm_qla2xxx_lport, lport_wwn); 1674 struct scsi_qla_host *vha = lport->qla_vha; 1675 struct se_node_acl *node; 1676 u32 key = 0; 1677 1678 /* 1679 * Call into qla2x_target.c LLD logic to complete the 1680 * shutdown of struct qla_tgt after the call to 1681 * qlt_stop_phase1() from tcm_qla2xxx_drop_tpg() above.. 1682 */ 1683 if (vha->vha_tgt.qla_tgt && !vha->vha_tgt.qla_tgt->tgt_stopped) 1684 qlt_stop_phase2(vha->vha_tgt.qla_tgt); 1685 1686 qlt_lport_deregister(vha); 1687 1688 vfree(lport->lport_loopid_map); 1689 btree_for_each_safe32(&lport->lport_fcport_map, key, node) 1690 btree_remove32(&lport->lport_fcport_map, key); 1691 btree_destroy32(&lport->lport_fcport_map); 1692 kfree(lport); 1693} 1694 1695static int tcm_qla2xxx_lport_register_npiv_cb(struct scsi_qla_host *base_vha, 1696 void *target_lport_ptr, 1697 u64 npiv_wwpn, u64 npiv_wwnn) 1698{ 1699 struct fc_vport *vport; 1700 struct Scsi_Host *sh = base_vha->host; 1701 struct scsi_qla_host *npiv_vha; 1702 struct tcm_qla2xxx_lport *lport = 1703 (struct tcm_qla2xxx_lport *)target_lport_ptr; 1704 struct tcm_qla2xxx_lport *base_lport = 1705 (struct tcm_qla2xxx_lport *)base_vha->vha_tgt.target_lport_ptr; 1706 struct fc_vport_identifiers vport_id; 1707 1708 if (qla_ini_mode_enabled(base_vha)) { 1709 pr_err("qla2xxx base_vha not enabled for target mode\n"); 1710 return -EPERM; 1711 } 1712 1713 if (!base_lport || !base_lport->tpg_1 || 1714 !atomic_read(&base_lport->tpg_1->lport_tpg_enabled)) { 1715 pr_err("qla2xxx base_lport or tpg_1 not available\n"); 1716 return -EPERM; 1717 } 1718 1719 memset(&vport_id, 0, sizeof(vport_id)); 1720 vport_id.port_name = npiv_wwpn; 1721 vport_id.node_name = npiv_wwnn; 1722 vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR; 1723 vport_id.vport_type = FC_PORTTYPE_NPIV; 1724 vport_id.disable = false; 1725 1726 vport = fc_vport_create(sh, 0, &vport_id); 1727 if (!vport) { 1728 pr_err("fc_vport_create failed for qla2xxx_npiv\n"); 1729 return -ENODEV; 1730 } 1731 /* 1732 * Setup local pointer to NPIV vhba + target_lport_ptr 1733 */ 1734 npiv_vha = (struct scsi_qla_host *)vport->dd_data; 1735 npiv_vha->vha_tgt.target_lport_ptr = target_lport_ptr; 1736 lport->qla_vha = npiv_vha; 1737 scsi_host_get(npiv_vha->host); 1738 return 0; 1739} 1740 1741 1742static struct se_wwn *tcm_qla2xxx_npiv_make_lport( 1743 struct target_fabric_configfs *tf, 1744 struct config_group *group, 1745 const char *name) 1746{ 1747 struct tcm_qla2xxx_lport *lport; 1748 u64 phys_wwpn, npiv_wwpn, npiv_wwnn; 1749 char *p, tmp[128]; 1750 int ret; 1751 1752 snprintf(tmp, 128, "%s", name); 1753 1754 p = strchr(tmp, '@'); 1755 if (!p) { 1756 pr_err("Unable to locate NPIV '@' separator\n"); 1757 return ERR_PTR(-EINVAL); 1758 } 1759 *p++ = '\0'; 1760 1761 if (tcm_qla2xxx_parse_wwn(tmp, &phys_wwpn, 1) < 0) 1762 return ERR_PTR(-EINVAL); 1763 1764 if (tcm_qla2xxx_npiv_parse_wwn(p, strlen(p)+1, 1765 &npiv_wwpn, &npiv_wwnn) < 0) 1766 return ERR_PTR(-EINVAL); 1767 1768 lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL); 1769 if (!lport) { 1770 pr_err("Unable to allocate struct tcm_qla2xxx_lport for NPIV\n"); 1771 return ERR_PTR(-ENOMEM); 1772 } 1773 lport->lport_npiv_wwpn = npiv_wwpn; 1774 lport->lport_npiv_wwnn = npiv_wwnn; 1775 sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) npiv_wwpn); 1776 1777 ret = tcm_qla2xxx_init_lport(lport); 1778 if (ret != 0) 1779 goto out; 1780 1781 ret = qlt_lport_register(lport, phys_wwpn, npiv_wwpn, npiv_wwnn, 1782 tcm_qla2xxx_lport_register_npiv_cb); 1783 if (ret != 0) 1784 goto out_lport; 1785 1786 return &lport->lport_wwn; 1787out_lport: 1788 vfree(lport->lport_loopid_map); 1789 btree_destroy32(&lport->lport_fcport_map); 1790out: 1791 kfree(lport); 1792 return ERR_PTR(ret); 1793} 1794 1795static void tcm_qla2xxx_npiv_drop_lport(struct se_wwn *wwn) 1796{ 1797 struct tcm_qla2xxx_lport *lport = container_of(wwn, 1798 struct tcm_qla2xxx_lport, lport_wwn); 1799 struct scsi_qla_host *npiv_vha = lport->qla_vha; 1800 struct qla_hw_data *ha = npiv_vha->hw; 1801 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 1802 1803 scsi_host_put(npiv_vha->host); 1804 /* 1805 * Notify libfc that we want to release the vha->fc_vport 1806 */ 1807 fc_vport_terminate(npiv_vha->fc_vport); 1808 scsi_host_put(base_vha->host); 1809 kfree(lport); 1810} 1811 1812 1813static ssize_t tcm_qla2xxx_wwn_version_show(struct config_item *item, 1814 char *page) 1815{ 1816 return sprintf(page, 1817 "TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on %s\n", 1818 QLA2XXX_VERSION, utsname()->sysname, 1819 utsname()->machine, utsname()->release); 1820} 1821 1822CONFIGFS_ATTR_RO(tcm_qla2xxx_wwn_, version); 1823 1824static struct configfs_attribute *tcm_qla2xxx_wwn_attrs[] = { 1825 &tcm_qla2xxx_wwn_attr_version, 1826 NULL, 1827}; 1828 1829static const struct target_core_fabric_ops tcm_qla2xxx_ops = { 1830 .module = THIS_MODULE, 1831 .fabric_name = "qla2xxx", 1832 .node_acl_size = sizeof(struct tcm_qla2xxx_nacl), 1833 /* 1834 * XXX: Limit assumes single page per scatter-gather-list entry. 1835 * Current maximum is ~4.9 MB per se_cmd->t_data_sg with PAGE_SIZE=4096 1836 */ 1837 .max_data_sg_nents = 1200, 1838 .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn, 1839 .tpg_get_tag = tcm_qla2xxx_get_tag, 1840 .tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode, 1841 .tpg_check_demo_mode_cache = tcm_qla2xxx_check_demo_mode_cache, 1842 .tpg_check_demo_mode_write_protect = 1843 tcm_qla2xxx_check_demo_write_protect, 1844 .tpg_check_prod_mode_write_protect = 1845 tcm_qla2xxx_check_prod_write_protect, 1846 .tpg_check_prot_fabric_only = tcm_qla2xxx_check_prot_fabric_only, 1847 .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only, 1848 .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, 1849 .check_stop_free = tcm_qla2xxx_check_stop_free, 1850 .release_cmd = tcm_qla2xxx_release_cmd, 1851 .close_session = tcm_qla2xxx_close_session, 1852 .sess_get_index = tcm_qla2xxx_sess_get_index, 1853 .sess_get_initiator_sid = NULL, 1854 .write_pending = tcm_qla2xxx_write_pending, 1855 .set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs, 1856 .get_cmd_state = tcm_qla2xxx_get_cmd_state, 1857 .queue_data_in = tcm_qla2xxx_queue_data_in, 1858 .queue_status = tcm_qla2xxx_queue_status, 1859 .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp, 1860 .aborted_task = tcm_qla2xxx_aborted_task, 1861 /* 1862 * Setup function pointers for generic logic in 1863 * target_core_fabric_configfs.c 1864 */ 1865 .fabric_make_wwn = tcm_qla2xxx_make_lport, 1866 .fabric_drop_wwn = tcm_qla2xxx_drop_lport, 1867 .fabric_make_tpg = tcm_qla2xxx_make_tpg, 1868 .fabric_drop_tpg = tcm_qla2xxx_drop_tpg, 1869 .fabric_init_nodeacl = tcm_qla2xxx_init_nodeacl, 1870 1871 .tfc_wwn_attrs = tcm_qla2xxx_wwn_attrs, 1872 .tfc_tpg_base_attrs = tcm_qla2xxx_tpg_attrs, 1873 .tfc_tpg_attrib_attrs = tcm_qla2xxx_tpg_attrib_attrs, 1874}; 1875 1876static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = { 1877 .module = THIS_MODULE, 1878 .fabric_name = "qla2xxx_npiv", 1879 .node_acl_size = sizeof(struct tcm_qla2xxx_nacl), 1880 .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn, 1881 .tpg_get_tag = tcm_qla2xxx_get_tag, 1882 .tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode, 1883 .tpg_check_demo_mode_cache = tcm_qla2xxx_check_demo_mode_cache, 1884 .tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_demo_mode, 1885 .tpg_check_prod_mode_write_protect = 1886 tcm_qla2xxx_check_prod_write_protect, 1887 .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only, 1888 .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, 1889 .check_stop_free = tcm_qla2xxx_check_stop_free, 1890 .release_cmd = tcm_qla2xxx_release_cmd, 1891 .close_session = tcm_qla2xxx_close_session, 1892 .sess_get_index = tcm_qla2xxx_sess_get_index, 1893 .sess_get_initiator_sid = NULL, 1894 .write_pending = tcm_qla2xxx_write_pending, 1895 .set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs, 1896 .get_cmd_state = tcm_qla2xxx_get_cmd_state, 1897 .queue_data_in = tcm_qla2xxx_queue_data_in, 1898 .queue_status = tcm_qla2xxx_queue_status, 1899 .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp, 1900 .aborted_task = tcm_qla2xxx_aborted_task, 1901 /* 1902 * Setup function pointers for generic logic in 1903 * target_core_fabric_configfs.c 1904 */ 1905 .fabric_make_wwn = tcm_qla2xxx_npiv_make_lport, 1906 .fabric_drop_wwn = tcm_qla2xxx_npiv_drop_lport, 1907 .fabric_make_tpg = tcm_qla2xxx_npiv_make_tpg, 1908 .fabric_drop_tpg = tcm_qla2xxx_drop_tpg, 1909 .fabric_init_nodeacl = tcm_qla2xxx_init_nodeacl, 1910 1911 .tfc_wwn_attrs = tcm_qla2xxx_wwn_attrs, 1912 .tfc_tpg_base_attrs = tcm_qla2xxx_npiv_tpg_attrs, 1913}; 1914 1915static int tcm_qla2xxx_register_configfs(void) 1916{ 1917 int ret; 1918 1919 pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on %s\n", 1920 QLA2XXX_VERSION, utsname()->sysname, 1921 utsname()->machine, utsname()->release); 1922 1923 ret = target_register_template(&tcm_qla2xxx_ops); 1924 if (ret) 1925 return ret; 1926 1927 ret = target_register_template(&tcm_qla2xxx_npiv_ops); 1928 if (ret) 1929 goto out_fabric; 1930 1931 tcm_qla2xxx_free_wq = alloc_workqueue("tcm_qla2xxx_free", 1932 WQ_MEM_RECLAIM, 0); 1933 if (!tcm_qla2xxx_free_wq) { 1934 ret = -ENOMEM; 1935 goto out_fabric_npiv; 1936 } 1937 1938 return 0; 1939 1940out_fabric_npiv: 1941 target_unregister_template(&tcm_qla2xxx_npiv_ops); 1942out_fabric: 1943 target_unregister_template(&tcm_qla2xxx_ops); 1944 return ret; 1945} 1946 1947static void tcm_qla2xxx_deregister_configfs(void) 1948{ 1949 destroy_workqueue(tcm_qla2xxx_free_wq); 1950 1951 target_unregister_template(&tcm_qla2xxx_ops); 1952 target_unregister_template(&tcm_qla2xxx_npiv_ops); 1953} 1954 1955static int __init tcm_qla2xxx_init(void) 1956{ 1957 int ret; 1958 1959 BUILD_BUG_ON(sizeof(struct abts_recv_from_24xx) != 64); 1960 BUILD_BUG_ON(sizeof(struct abts_resp_from_24xx_fw) != 64); 1961 BUILD_BUG_ON(sizeof(struct atio7_fcp_cmnd) != 32); 1962 BUILD_BUG_ON(sizeof(struct atio_from_isp) != 64); 1963 BUILD_BUG_ON(sizeof(struct ba_acc_le) != 12); 1964 BUILD_BUG_ON(sizeof(struct ba_rjt_le) != 4); 1965 BUILD_BUG_ON(sizeof(struct ctio7_from_24xx) != 64); 1966 BUILD_BUG_ON(sizeof(struct ctio7_to_24xx) != 64); 1967 BUILD_BUG_ON(sizeof(struct ctio_crc2_to_fw) != 64); 1968 BUILD_BUG_ON(sizeof(struct ctio_crc_from_fw) != 64); 1969 BUILD_BUG_ON(sizeof(struct ctio_to_2xxx) != 64); 1970 BUILD_BUG_ON(sizeof(struct fcp_hdr) != 24); 1971 BUILD_BUG_ON(sizeof(struct fcp_hdr_le) != 24); 1972 BUILD_BUG_ON(sizeof(struct nack_to_isp) != 64); 1973 1974 ret = tcm_qla2xxx_register_configfs(); 1975 if (ret < 0) 1976 return ret; 1977 1978 return 0; 1979} 1980 1981static void __exit tcm_qla2xxx_exit(void) 1982{ 1983 tcm_qla2xxx_deregister_configfs(); 1984} 1985 1986MODULE_DESCRIPTION("TCM QLA24XX+ series NPIV enabled fabric driver"); 1987MODULE_LICENSE("GPL"); 1988module_init(tcm_qla2xxx_init); 1989module_exit(tcm_qla2xxx_exit); 1990