1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * QLogic Fibre Channel HBA Driver 4 * Copyright (c) 2003-2014 QLogic Corporation 5 */ 6#include "qla_def.h" 7#include "qla_target.h" 8 9#include <linux/delay.h> 10#include <linux/gfp.h> 11 12#ifdef CONFIG_PPC 13#define IS_PPCARCH true 14#else 15#define IS_PPCARCH false 16#endif 17 18static struct mb_cmd_name { 19 uint16_t cmd; 20 const char *str; 21} mb_str[] = { 22 {MBC_GET_PORT_DATABASE, "GPDB"}, 23 {MBC_GET_ID_LIST, "GIDList"}, 24 {MBC_GET_LINK_PRIV_STATS, "Stats"}, 25 {MBC_GET_RESOURCE_COUNTS, "ResCnt"}, 26}; 27 28static const char *mb_to_str(uint16_t cmd) 29{ 30 int i; 31 struct mb_cmd_name *e; 32 33 for (i = 0; i < ARRAY_SIZE(mb_str); i++) { 34 e = mb_str + i; 35 if (cmd == e->cmd) 36 return e->str; 37 } 38 return "unknown"; 39} 40 41static struct rom_cmd { 42 uint16_t cmd; 43} rom_cmds[] = { 44 { MBC_LOAD_RAM }, 45 { MBC_EXECUTE_FIRMWARE }, 46 { MBC_READ_RAM_WORD }, 47 { MBC_MAILBOX_REGISTER_TEST }, 48 { MBC_VERIFY_CHECKSUM }, 49 { MBC_GET_FIRMWARE_VERSION }, 50 { MBC_LOAD_RISC_RAM }, 51 { MBC_DUMP_RISC_RAM }, 52 { MBC_LOAD_RISC_RAM_EXTENDED }, 53 { MBC_DUMP_RISC_RAM_EXTENDED }, 54 { MBC_WRITE_RAM_WORD_EXTENDED }, 55 { MBC_READ_RAM_EXTENDED }, 56 { MBC_GET_RESOURCE_COUNTS }, 57 { MBC_SET_FIRMWARE_OPTION }, 58 { MBC_MID_INITIALIZE_FIRMWARE }, 59 { MBC_GET_FIRMWARE_STATE }, 60 { MBC_GET_MEM_OFFLOAD_CNTRL_STAT }, 61 { MBC_GET_RETRY_COUNT }, 62 { MBC_TRACE_CONTROL }, 63 { MBC_INITIALIZE_MULTIQ }, 64 { MBC_IOCB_COMMAND_A64 }, 65 { MBC_GET_ADAPTER_LOOP_ID }, 66 { MBC_READ_SFP }, 67 { MBC_SET_RNID_PARAMS }, 68 { MBC_GET_RNID_PARAMS }, 69 { MBC_GET_SET_ZIO_THRESHOLD }, 70}; 71 72static int is_rom_cmd(uint16_t cmd) 73{ 74 int i; 75 struct rom_cmd *wc; 76 77 for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) { 78 wc = rom_cmds + i; 79 if (wc->cmd == cmd) 80 return 1; 81 } 82 83 return 0; 84} 85 86/* 87 * qla2x00_mailbox_command 88 * Issue mailbox command and waits for completion. 89 * 90 * Input: 91 * ha = adapter block pointer. 92 * mcp = driver internal mbx struct pointer. 93 * 94 * Output: 95 * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data. 96 * 97 * Returns: 98 * 0 : QLA_SUCCESS = cmd performed success 99 * 1 : QLA_FUNCTION_FAILED (error encountered) 100 * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered) 101 * 102 * Context: 103 * Kernel context. 104 */ 105static int 106qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) 107{ 108 int rval, i; 109 unsigned long flags = 0; 110 device_reg_t *reg; 111 uint8_t abort_active; 112 uint8_t io_lock_on; 113 uint16_t command = 0; 114 uint16_t *iptr; 115 __le16 __iomem *optr; 116 uint32_t cnt; 117 uint32_t mboxes; 118 unsigned long wait_time; 119 struct qla_hw_data *ha = vha->hw; 120 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 121 u32 chip_reset; 122 123 124 ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__); 125 126 if (ha->pdev->error_state == pci_channel_io_perm_failure) { 127 ql_log(ql_log_warn, vha, 0x1001, 128 "PCI channel failed permanently, exiting.\n"); 129 return QLA_FUNCTION_TIMEOUT; 130 } 131 132 if (vha->device_flags & DFLG_DEV_FAILED) { 133 ql_log(ql_log_warn, vha, 0x1002, 134 "Device in failed state, exiting.\n"); 135 return QLA_FUNCTION_TIMEOUT; 136 } 137 138 /* if PCI error, then avoid mbx processing.*/ 139 if (test_bit(PFLG_DISCONNECTED, &base_vha->dpc_flags) && 140 test_bit(UNLOADING, &base_vha->dpc_flags)) { 141 ql_log(ql_log_warn, vha, 0xd04e, 142 "PCI error, exiting.\n"); 143 return QLA_FUNCTION_TIMEOUT; 144 } 145 146 reg = ha->iobase; 147 io_lock_on = base_vha->flags.init_done; 148 149 rval = QLA_SUCCESS; 150 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); 151 chip_reset = ha->chip_reset; 152 153 if (ha->flags.pci_channel_io_perm_failure) { 154 ql_log(ql_log_warn, vha, 0x1003, 155 "Perm failure on EEH timeout MBX, exiting.\n"); 156 return QLA_FUNCTION_TIMEOUT; 157 } 158 159 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) { 160 /* Setting Link-Down error */ 161 mcp->mb[0] = MBS_LINK_DOWN_ERROR; 162 ql_log(ql_log_warn, vha, 0x1004, 163 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); 164 return QLA_FUNCTION_TIMEOUT; 165 } 166 167 /* check if ISP abort is active and return cmd with timeout */ 168 if ((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || 169 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || 170 test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) || 171 ha->flags.eeh_busy) && 172 !is_rom_cmd(mcp->mb[0])) { 173 ql_log(ql_log_info, vha, 0x1005, 174 "Cmd 0x%x aborted with timeout since ISP Abort is pending\n", 175 mcp->mb[0]); 176 return QLA_FUNCTION_TIMEOUT; 177 } 178 179 atomic_inc(&ha->num_pend_mbx_stage1); 180 /* 181 * Wait for active mailbox commands to finish by waiting at most tov 182 * seconds. This is to serialize actual issuing of mailbox cmds during 183 * non ISP abort time. 184 */ 185 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) { 186 /* Timeout occurred. Return error. */ 187 ql_log(ql_log_warn, vha, 0xd035, 188 "Cmd access timeout, cmd=0x%x, Exiting.\n", 189 mcp->mb[0]); 190 atomic_dec(&ha->num_pend_mbx_stage1); 191 return QLA_FUNCTION_TIMEOUT; 192 } 193 atomic_dec(&ha->num_pend_mbx_stage1); 194 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset) { 195 rval = QLA_ABORTED; 196 goto premature_exit; 197 } 198 199 200 /* Save mailbox command for debug */ 201 ha->mcp = mcp; 202 203 ql_dbg(ql_dbg_mbx, vha, 0x1006, 204 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]); 205 206 spin_lock_irqsave(&ha->hardware_lock, flags); 207 208 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset || 209 ha->flags.mbox_busy) { 210 rval = QLA_ABORTED; 211 spin_unlock_irqrestore(&ha->hardware_lock, flags); 212 goto premature_exit; 213 } 214 ha->flags.mbox_busy = 1; 215 216 /* Load mailbox registers. */ 217 if (IS_P3P_TYPE(ha)) 218 optr = ®->isp82.mailbox_in[0]; 219 else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) 220 optr = ®->isp24.mailbox0; 221 else 222 optr = MAILBOX_REG(ha, ®->isp, 0); 223 224 iptr = mcp->mb; 225 command = mcp->mb[0]; 226 mboxes = mcp->out_mb; 227 228 ql_dbg(ql_dbg_mbx, vha, 0x1111, 229 "Mailbox registers (OUT):\n"); 230 for (cnt = 0; cnt < ha->mbx_count; cnt++) { 231 if (IS_QLA2200(ha) && cnt == 8) 232 optr = MAILBOX_REG(ha, ®->isp, 8); 233 if (mboxes & BIT_0) { 234 ql_dbg(ql_dbg_mbx, vha, 0x1112, 235 "mbox[%d]<-0x%04x\n", cnt, *iptr); 236 wrt_reg_word(optr, *iptr); 237 } else { 238 wrt_reg_word(optr, 0); 239 } 240 241 mboxes >>= 1; 242 optr++; 243 iptr++; 244 } 245 246 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117, 247 "I/O Address = %p.\n", optr); 248 249 /* Issue set host interrupt command to send cmd out. */ 250 ha->flags.mbox_int = 0; 251 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 252 253 /* Unlock mbx registers and wait for interrupt */ 254 ql_dbg(ql_dbg_mbx, vha, 0x100f, 255 "Going to unlock irq & waiting for interrupts. " 256 "jiffies=%lx.\n", jiffies); 257 258 /* Wait for mbx cmd completion until timeout */ 259 atomic_inc(&ha->num_pend_mbx_stage2); 260 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) { 261 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 262 263 if (IS_P3P_TYPE(ha)) 264 wrt_reg_dword(®->isp82.hint, HINT_MBX_INT_PENDING); 265 else if (IS_FWI2_CAPABLE(ha)) 266 wrt_reg_dword(®->isp24.hccr, HCCRX_SET_HOST_INT); 267 else 268 wrt_reg_word(®->isp.hccr, HCCR_SET_HOST_INT); 269 spin_unlock_irqrestore(&ha->hardware_lock, flags); 270 271 wait_time = jiffies; 272 if (!wait_for_completion_timeout(&ha->mbx_intr_comp, 273 mcp->tov * HZ)) { 274 ql_dbg(ql_dbg_mbx, vha, 0x117a, 275 "cmd=%x Timeout.\n", command); 276 spin_lock_irqsave(&ha->hardware_lock, flags); 277 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 278 spin_unlock_irqrestore(&ha->hardware_lock, flags); 279 280 if (chip_reset != ha->chip_reset) { 281 spin_lock_irqsave(&ha->hardware_lock, flags); 282 ha->flags.mbox_busy = 0; 283 spin_unlock_irqrestore(&ha->hardware_lock, 284 flags); 285 atomic_dec(&ha->num_pend_mbx_stage2); 286 rval = QLA_ABORTED; 287 goto premature_exit; 288 } 289 } else if (ha->flags.purge_mbox || 290 chip_reset != ha->chip_reset) { 291 spin_lock_irqsave(&ha->hardware_lock, flags); 292 ha->flags.mbox_busy = 0; 293 spin_unlock_irqrestore(&ha->hardware_lock, flags); 294 atomic_dec(&ha->num_pend_mbx_stage2); 295 rval = QLA_ABORTED; 296 goto premature_exit; 297 } 298 299 if (time_after(jiffies, wait_time + 5 * HZ)) 300 ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n", 301 command, jiffies_to_msecs(jiffies - wait_time)); 302 } else { 303 ql_dbg(ql_dbg_mbx, vha, 0x1011, 304 "Cmd=%x Polling Mode.\n", command); 305 306 if (IS_P3P_TYPE(ha)) { 307 if (rd_reg_dword(®->isp82.hint) & 308 HINT_MBX_INT_PENDING) { 309 ha->flags.mbox_busy = 0; 310 spin_unlock_irqrestore(&ha->hardware_lock, 311 flags); 312 atomic_dec(&ha->num_pend_mbx_stage2); 313 ql_dbg(ql_dbg_mbx, vha, 0x1012, 314 "Pending mailbox timeout, exiting.\n"); 315 rval = QLA_FUNCTION_TIMEOUT; 316 goto premature_exit; 317 } 318 wrt_reg_dword(®->isp82.hint, HINT_MBX_INT_PENDING); 319 } else if (IS_FWI2_CAPABLE(ha)) 320 wrt_reg_dword(®->isp24.hccr, HCCRX_SET_HOST_INT); 321 else 322 wrt_reg_word(®->isp.hccr, HCCR_SET_HOST_INT); 323 spin_unlock_irqrestore(&ha->hardware_lock, flags); 324 325 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */ 326 while (!ha->flags.mbox_int) { 327 if (ha->flags.purge_mbox || 328 chip_reset != ha->chip_reset) { 329 spin_lock_irqsave(&ha->hardware_lock, flags); 330 ha->flags.mbox_busy = 0; 331 spin_unlock_irqrestore(&ha->hardware_lock, 332 flags); 333 atomic_dec(&ha->num_pend_mbx_stage2); 334 rval = QLA_ABORTED; 335 goto premature_exit; 336 } 337 338 if (time_after(jiffies, wait_time)) 339 break; 340 341 /* Check for pending interrupts. */ 342 qla2x00_poll(ha->rsp_q_map[0]); 343 344 if (!ha->flags.mbox_int && 345 !(IS_QLA2200(ha) && 346 command == MBC_LOAD_RISC_RAM_EXTENDED)) 347 msleep(10); 348 } /* while */ 349 ql_dbg(ql_dbg_mbx, vha, 0x1013, 350 "Waited %d sec.\n", 351 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ)); 352 } 353 atomic_dec(&ha->num_pend_mbx_stage2); 354 355 /* Check whether we timed out */ 356 if (ha->flags.mbox_int) { 357 uint16_t *iptr2; 358 359 ql_dbg(ql_dbg_mbx, vha, 0x1014, 360 "Cmd=%x completed.\n", command); 361 362 /* Got interrupt. Clear the flag. */ 363 ha->flags.mbox_int = 0; 364 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 365 366 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) { 367 spin_lock_irqsave(&ha->hardware_lock, flags); 368 ha->flags.mbox_busy = 0; 369 spin_unlock_irqrestore(&ha->hardware_lock, flags); 370 371 /* Setting Link-Down error */ 372 mcp->mb[0] = MBS_LINK_DOWN_ERROR; 373 ha->mcp = NULL; 374 rval = QLA_FUNCTION_FAILED; 375 ql_log(ql_log_warn, vha, 0xd048, 376 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); 377 goto premature_exit; 378 } 379 380 if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE) { 381 ql_dbg(ql_dbg_mbx, vha, 0x11ff, 382 "mb_out[0] = %#x <> %#x\n", ha->mailbox_out[0], 383 MBS_COMMAND_COMPLETE); 384 rval = QLA_FUNCTION_FAILED; 385 } 386 387 /* Load return mailbox registers. */ 388 iptr2 = mcp->mb; 389 iptr = (uint16_t *)&ha->mailbox_out[0]; 390 mboxes = mcp->in_mb; 391 392 ql_dbg(ql_dbg_mbx, vha, 0x1113, 393 "Mailbox registers (IN):\n"); 394 for (cnt = 0; cnt < ha->mbx_count; cnt++) { 395 if (mboxes & BIT_0) { 396 *iptr2 = *iptr; 397 ql_dbg(ql_dbg_mbx, vha, 0x1114, 398 "mbox[%d]->0x%04x\n", cnt, *iptr2); 399 } 400 401 mboxes >>= 1; 402 iptr2++; 403 iptr++; 404 } 405 } else { 406 407 uint16_t mb[8]; 408 uint32_t ictrl, host_status, hccr; 409 uint16_t w; 410 411 if (IS_FWI2_CAPABLE(ha)) { 412 mb[0] = rd_reg_word(®->isp24.mailbox0); 413 mb[1] = rd_reg_word(®->isp24.mailbox1); 414 mb[2] = rd_reg_word(®->isp24.mailbox2); 415 mb[3] = rd_reg_word(®->isp24.mailbox3); 416 mb[7] = rd_reg_word(®->isp24.mailbox7); 417 ictrl = rd_reg_dword(®->isp24.ictrl); 418 host_status = rd_reg_dword(®->isp24.host_status); 419 hccr = rd_reg_dword(®->isp24.hccr); 420 421 ql_log(ql_log_warn, vha, 0xd04c, 422 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx " 423 "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n", 424 command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3], 425 mb[7], host_status, hccr); 426 427 } else { 428 mb[0] = RD_MAILBOX_REG(ha, ®->isp, 0); 429 ictrl = rd_reg_word(®->isp.ictrl); 430 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119, 431 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx " 432 "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]); 433 } 434 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019); 435 436 /* Capture FW dump only, if PCI device active */ 437 if (!pci_channel_offline(vha->hw->pdev)) { 438 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); 439 if (w == 0xffff || ictrl == 0xffffffff || 440 (chip_reset != ha->chip_reset)) { 441 /* This is special case if there is unload 442 * of driver happening and if PCI device go 443 * into bad state due to PCI error condition 444 * then only PCI ERR flag would be set. 445 * we will do premature exit for above case. 446 */ 447 spin_lock_irqsave(&ha->hardware_lock, flags); 448 ha->flags.mbox_busy = 0; 449 spin_unlock_irqrestore(&ha->hardware_lock, 450 flags); 451 rval = QLA_FUNCTION_TIMEOUT; 452 goto premature_exit; 453 } 454 455 /* Attempt to capture firmware dump for further 456 * anallysis of the current formware state. we do not 457 * need to do this if we are intentionally generating 458 * a dump 459 */ 460 if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) 461 qla2xxx_dump_fw(vha); 462 rval = QLA_FUNCTION_TIMEOUT; 463 } 464 } 465 spin_lock_irqsave(&ha->hardware_lock, flags); 466 ha->flags.mbox_busy = 0; 467 spin_unlock_irqrestore(&ha->hardware_lock, flags); 468 469 /* Clean up */ 470 ha->mcp = NULL; 471 472 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) { 473 ql_dbg(ql_dbg_mbx, vha, 0x101a, 474 "Checking for additional resp interrupt.\n"); 475 476 /* polling mode for non isp_abort commands. */ 477 qla2x00_poll(ha->rsp_q_map[0]); 478 } 479 480 if (rval == QLA_FUNCTION_TIMEOUT && 481 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) { 482 if (!io_lock_on || (mcp->flags & IOCTL_CMD) || 483 ha->flags.eeh_busy) { 484 /* not in dpc. schedule it for dpc to take over. */ 485 ql_dbg(ql_dbg_mbx, vha, 0x101b, 486 "Timeout, schedule isp_abort_needed.\n"); 487 488 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && 489 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && 490 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 491 if (IS_QLA82XX(ha)) { 492 ql_dbg(ql_dbg_mbx, vha, 0x112a, 493 "disabling pause transmit on port " 494 "0 & 1.\n"); 495 qla82xx_wr_32(ha, 496 QLA82XX_CRB_NIU + 0x98, 497 CRB_NIU_XG_PAUSE_CTL_P0| 498 CRB_NIU_XG_PAUSE_CTL_P1); 499 } 500 ql_log(ql_log_info, base_vha, 0x101c, 501 "Mailbox cmd timeout occurred, cmd=0x%x, " 502 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP " 503 "abort.\n", command, mcp->mb[0], 504 ha->flags.eeh_busy); 505 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 506 qla2xxx_wake_dpc(vha); 507 } 508 } else if (current == ha->dpc_thread) { 509 /* call abort directly since we are in the DPC thread */ 510 ql_dbg(ql_dbg_mbx, vha, 0x101d, 511 "Timeout, calling abort_isp.\n"); 512 513 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && 514 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && 515 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 516 if (IS_QLA82XX(ha)) { 517 ql_dbg(ql_dbg_mbx, vha, 0x112b, 518 "disabling pause transmit on port " 519 "0 & 1.\n"); 520 qla82xx_wr_32(ha, 521 QLA82XX_CRB_NIU + 0x98, 522 CRB_NIU_XG_PAUSE_CTL_P0| 523 CRB_NIU_XG_PAUSE_CTL_P1); 524 } 525 ql_log(ql_log_info, base_vha, 0x101e, 526 "Mailbox cmd timeout occurred, cmd=0x%x, " 527 "mb[0]=0x%x. Scheduling ISP abort ", 528 command, mcp->mb[0]); 529 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 530 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 531 /* Allow next mbx cmd to come in. */ 532 complete(&ha->mbx_cmd_comp); 533 if (ha->isp_ops->abort_isp(vha)) { 534 /* Failed. retry later. */ 535 set_bit(ISP_ABORT_NEEDED, 536 &vha->dpc_flags); 537 } 538 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 539 ql_dbg(ql_dbg_mbx, vha, 0x101f, 540 "Finished abort_isp.\n"); 541 goto mbx_done; 542 } 543 } 544 } 545 546premature_exit: 547 /* Allow next mbx cmd to come in. */ 548 complete(&ha->mbx_cmd_comp); 549 550mbx_done: 551 if (rval == QLA_ABORTED) { 552 ql_log(ql_log_info, vha, 0xd035, 553 "Chip Reset in progress. Purging Mbox cmd=0x%x.\n", 554 mcp->mb[0]); 555 } else if (rval) { 556 if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) { 557 pr_warn("%s [%s]-%04x:%ld: **** Failed=%x", QL_MSGHDR, 558 dev_name(&ha->pdev->dev), 0x1020+0x800, 559 vha->host_no, rval); 560 mboxes = mcp->in_mb; 561 cnt = 4; 562 for (i = 0; i < ha->mbx_count && cnt; i++, mboxes >>= 1) 563 if (mboxes & BIT_0) { 564 printk(" mb[%u]=%x", i, mcp->mb[i]); 565 cnt--; 566 } 567 pr_warn(" cmd=%x ****\n", command); 568 } 569 if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) { 570 ql_dbg(ql_dbg_mbx, vha, 0x1198, 571 "host_status=%#x intr_ctrl=%#x intr_status=%#x\n", 572 rd_reg_dword(®->isp24.host_status), 573 rd_reg_dword(®->isp24.ictrl), 574 rd_reg_dword(®->isp24.istatus)); 575 } else { 576 ql_dbg(ql_dbg_mbx, vha, 0x1206, 577 "ctrl_status=%#x ictrl=%#x istatus=%#x\n", 578 rd_reg_word(®->isp.ctrl_status), 579 rd_reg_word(®->isp.ictrl), 580 rd_reg_word(®->isp.istatus)); 581 } 582 } else { 583 ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__); 584 } 585 586 return rval; 587} 588 589int 590qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr, 591 uint32_t risc_code_size) 592{ 593 int rval; 594 struct qla_hw_data *ha = vha->hw; 595 mbx_cmd_t mc; 596 mbx_cmd_t *mcp = &mc; 597 598 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022, 599 "Entered %s.\n", __func__); 600 601 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) { 602 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED; 603 mcp->mb[8] = MSW(risc_addr); 604 mcp->out_mb = MBX_8|MBX_0; 605 } else { 606 mcp->mb[0] = MBC_LOAD_RISC_RAM; 607 mcp->out_mb = MBX_0; 608 } 609 mcp->mb[1] = LSW(risc_addr); 610 mcp->mb[2] = MSW(req_dma); 611 mcp->mb[3] = LSW(req_dma); 612 mcp->mb[6] = MSW(MSD(req_dma)); 613 mcp->mb[7] = LSW(MSD(req_dma)); 614 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1; 615 if (IS_FWI2_CAPABLE(ha)) { 616 mcp->mb[4] = MSW(risc_code_size); 617 mcp->mb[5] = LSW(risc_code_size); 618 mcp->out_mb |= MBX_5|MBX_4; 619 } else { 620 mcp->mb[4] = LSW(risc_code_size); 621 mcp->out_mb |= MBX_4; 622 } 623 624 mcp->in_mb = MBX_1|MBX_0; 625 mcp->tov = MBX_TOV_SECONDS; 626 mcp->flags = 0; 627 rval = qla2x00_mailbox_command(vha, mcp); 628 629 if (rval != QLA_SUCCESS) { 630 ql_dbg(ql_dbg_mbx, vha, 0x1023, 631 "Failed=%x mb[0]=%x mb[1]=%x.\n", 632 rval, mcp->mb[0], mcp->mb[1]); 633 } else { 634 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024, 635 "Done %s.\n", __func__); 636 } 637 638 return rval; 639} 640 641#define NVME_ENABLE_FLAG BIT_3 642 643/* 644 * qla2x00_execute_fw 645 * Start adapter firmware. 646 * 647 * Input: 648 * ha = adapter block pointer. 649 * TARGET_QUEUE_LOCK must be released. 650 * ADAPTER_STATE_LOCK must be released. 651 * 652 * Returns: 653 * qla2x00 local function return status code. 654 * 655 * Context: 656 * Kernel context. 657 */ 658int 659qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) 660{ 661 int rval; 662 struct qla_hw_data *ha = vha->hw; 663 mbx_cmd_t mc; 664 mbx_cmd_t *mcp = &mc; 665 u8 semaphore = 0; 666#define EXE_FW_FORCE_SEMAPHORE BIT_7 667 u8 retry = 3; 668 669 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025, 670 "Entered %s.\n", __func__); 671 672again: 673 mcp->mb[0] = MBC_EXECUTE_FIRMWARE; 674 mcp->out_mb = MBX_0; 675 mcp->in_mb = MBX_0; 676 if (IS_FWI2_CAPABLE(ha)) { 677 mcp->mb[1] = MSW(risc_addr); 678 mcp->mb[2] = LSW(risc_addr); 679 mcp->mb[3] = 0; 680 mcp->mb[4] = 0; 681 mcp->mb[11] = 0; 682 683 /* Enable BPM? */ 684 if (ha->flags.lr_detected) { 685 mcp->mb[4] = BIT_0; 686 if (IS_BPM_RANGE_CAPABLE(ha)) 687 mcp->mb[4] |= 688 ha->lr_distance << LR_DIST_FW_POS; 689 } 690 691 if (ql2xnvmeenable && (IS_QLA27XX(ha) || IS_QLA28XX(ha))) 692 mcp->mb[4] |= NVME_ENABLE_FLAG; 693 694 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 695 struct nvram_81xx *nv = ha->nvram; 696 /* set minimum speed if specified in nvram */ 697 if (nv->min_supported_speed >= 2 && 698 nv->min_supported_speed <= 5) { 699 mcp->mb[4] |= BIT_4; 700 mcp->mb[11] |= nv->min_supported_speed & 0xF; 701 mcp->out_mb |= MBX_11; 702 mcp->in_mb |= BIT_5; 703 vha->min_supported_speed = 704 nv->min_supported_speed; 705 } 706 707 if (IS_PPCARCH) 708 mcp->mb[11] |= BIT_4; 709 } 710 711 if (ha->flags.exlogins_enabled) 712 mcp->mb[4] |= ENABLE_EXTENDED_LOGIN; 713 714 if (ha->flags.exchoffld_enabled) 715 mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD; 716 717 if (semaphore) 718 mcp->mb[11] |= EXE_FW_FORCE_SEMAPHORE; 719 720 mcp->out_mb |= MBX_4 | MBX_3 | MBX_2 | MBX_1 | MBX_11; 721 mcp->in_mb |= MBX_3 | MBX_2 | MBX_1; 722 } else { 723 mcp->mb[1] = LSW(risc_addr); 724 mcp->out_mb |= MBX_1; 725 if (IS_QLA2322(ha) || IS_QLA6322(ha)) { 726 mcp->mb[2] = 0; 727 mcp->out_mb |= MBX_2; 728 } 729 } 730 731 mcp->tov = MBX_TOV_SECONDS; 732 mcp->flags = 0; 733 rval = qla2x00_mailbox_command(vha, mcp); 734 735 if (rval != QLA_SUCCESS) { 736 if (IS_QLA28XX(ha) && rval == QLA_COMMAND_ERROR && 737 mcp->mb[1] == 0x27 && retry) { 738 semaphore = 1; 739 retry--; 740 ql_dbg(ql_dbg_async, vha, 0x1026, 741 "Exe FW: force semaphore.\n"); 742 goto again; 743 } 744 745 ql_dbg(ql_dbg_mbx, vha, 0x1026, 746 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 747 return rval; 748 } 749 750 if (!IS_FWI2_CAPABLE(ha)) 751 goto done; 752 753 ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2]; 754 ql_dbg(ql_dbg_mbx, vha, 0x119a, 755 "fw_ability_mask=%x.\n", ha->fw_ability_mask); 756 ql_dbg(ql_dbg_mbx, vha, 0x1027, "exchanges=%x.\n", mcp->mb[1]); 757 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 758 ha->max_supported_speed = mcp->mb[2] & (BIT_0|BIT_1); 759 ql_dbg(ql_dbg_mbx, vha, 0x119b, "max_supported_speed=%s.\n", 760 ha->max_supported_speed == 0 ? "16Gps" : 761 ha->max_supported_speed == 1 ? "32Gps" : 762 ha->max_supported_speed == 2 ? "64Gps" : "unknown"); 763 if (vha->min_supported_speed) { 764 ha->min_supported_speed = mcp->mb[5] & 765 (BIT_0 | BIT_1 | BIT_2); 766 ql_dbg(ql_dbg_mbx, vha, 0x119c, 767 "min_supported_speed=%s.\n", 768 ha->min_supported_speed == 6 ? "64Gps" : 769 ha->min_supported_speed == 5 ? "32Gps" : 770 ha->min_supported_speed == 4 ? "16Gps" : 771 ha->min_supported_speed == 3 ? "8Gps" : 772 ha->min_supported_speed == 2 ? "4Gps" : "unknown"); 773 } 774 } 775 776done: 777 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028, 778 "Done %s.\n", __func__); 779 780 return rval; 781} 782 783/* 784 * qla_get_exlogin_status 785 * Get extended login status 786 * uses the memory offload control/status Mailbox 787 * 788 * Input: 789 * ha: adapter state pointer. 790 * fwopt: firmware options 791 * 792 * Returns: 793 * qla2x00 local function status 794 * 795 * Context: 796 * Kernel context. 797 */ 798#define FETCH_XLOGINS_STAT 0x8 799int 800qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz, 801 uint16_t *ex_logins_cnt) 802{ 803 int rval; 804 mbx_cmd_t mc; 805 mbx_cmd_t *mcp = &mc; 806 807 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118f, 808 "Entered %s\n", __func__); 809 810 memset(mcp->mb, 0 , sizeof(mcp->mb)); 811 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 812 mcp->mb[1] = FETCH_XLOGINS_STAT; 813 mcp->out_mb = MBX_1|MBX_0; 814 mcp->in_mb = MBX_10|MBX_4|MBX_0; 815 mcp->tov = MBX_TOV_SECONDS; 816 mcp->flags = 0; 817 818 rval = qla2x00_mailbox_command(vha, mcp); 819 if (rval != QLA_SUCCESS) { 820 ql_dbg(ql_dbg_mbx, vha, 0x1115, "Failed=%x.\n", rval); 821 } else { 822 *buf_sz = mcp->mb[4]; 823 *ex_logins_cnt = mcp->mb[10]; 824 825 ql_log(ql_log_info, vha, 0x1190, 826 "buffer size 0x%x, exchange login count=%d\n", 827 mcp->mb[4], mcp->mb[10]); 828 829 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1116, 830 "Done %s.\n", __func__); 831 } 832 833 return rval; 834} 835 836/* 837 * qla_set_exlogin_mem_cfg 838 * set extended login memory configuration 839 * Mbx needs to be issues before init_cb is set 840 * 841 * Input: 842 * ha: adapter state pointer. 843 * buffer: buffer pointer 844 * phys_addr: physical address of buffer 845 * size: size of buffer 846 * TARGET_QUEUE_LOCK must be released 847 * ADAPTER_STATE_LOCK must be release 848 * 849 * Returns: 850 * qla2x00 local funxtion status code. 851 * 852 * Context: 853 * Kernel context. 854 */ 855#define CONFIG_XLOGINS_MEM 0x9 856int 857qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr) 858{ 859 int rval; 860 mbx_cmd_t mc; 861 mbx_cmd_t *mcp = &mc; 862 struct qla_hw_data *ha = vha->hw; 863 864 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a, 865 "Entered %s.\n", __func__); 866 867 memset(mcp->mb, 0 , sizeof(mcp->mb)); 868 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 869 mcp->mb[1] = CONFIG_XLOGINS_MEM; 870 mcp->mb[2] = MSW(phys_addr); 871 mcp->mb[3] = LSW(phys_addr); 872 mcp->mb[6] = MSW(MSD(phys_addr)); 873 mcp->mb[7] = LSW(MSD(phys_addr)); 874 mcp->mb[8] = MSW(ha->exlogin_size); 875 mcp->mb[9] = LSW(ha->exlogin_size); 876 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 877 mcp->in_mb = MBX_11|MBX_0; 878 mcp->tov = MBX_TOV_SECONDS; 879 mcp->flags = 0; 880 rval = qla2x00_mailbox_command(vha, mcp); 881 if (rval != QLA_SUCCESS) { 882 ql_dbg(ql_dbg_mbx, vha, 0x111b, 883 "EXlogin Failed=%x. MB0=%x MB11=%x\n", 884 rval, mcp->mb[0], mcp->mb[11]); 885 } else { 886 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c, 887 "Done %s.\n", __func__); 888 } 889 890 return rval; 891} 892 893/* 894 * qla_get_exchoffld_status 895 * Get exchange offload status 896 * uses the memory offload control/status Mailbox 897 * 898 * Input: 899 * ha: adapter state pointer. 900 * fwopt: firmware options 901 * 902 * Returns: 903 * qla2x00 local function status 904 * 905 * Context: 906 * Kernel context. 907 */ 908#define FETCH_XCHOFFLD_STAT 0x2 909int 910qla_get_exchoffld_status(scsi_qla_host_t *vha, uint16_t *buf_sz, 911 uint16_t *ex_logins_cnt) 912{ 913 int rval; 914 mbx_cmd_t mc; 915 mbx_cmd_t *mcp = &mc; 916 917 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1019, 918 "Entered %s\n", __func__); 919 920 memset(mcp->mb, 0 , sizeof(mcp->mb)); 921 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 922 mcp->mb[1] = FETCH_XCHOFFLD_STAT; 923 mcp->out_mb = MBX_1|MBX_0; 924 mcp->in_mb = MBX_10|MBX_4|MBX_0; 925 mcp->tov = MBX_TOV_SECONDS; 926 mcp->flags = 0; 927 928 rval = qla2x00_mailbox_command(vha, mcp); 929 if (rval != QLA_SUCCESS) { 930 ql_dbg(ql_dbg_mbx, vha, 0x1155, "Failed=%x.\n", rval); 931 } else { 932 *buf_sz = mcp->mb[4]; 933 *ex_logins_cnt = mcp->mb[10]; 934 935 ql_log(ql_log_info, vha, 0x118e, 936 "buffer size 0x%x, exchange offload count=%d\n", 937 mcp->mb[4], mcp->mb[10]); 938 939 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1156, 940 "Done %s.\n", __func__); 941 } 942 943 return rval; 944} 945 946/* 947 * qla_set_exchoffld_mem_cfg 948 * Set exchange offload memory configuration 949 * Mbx needs to be issues before init_cb is set 950 * 951 * Input: 952 * ha: adapter state pointer. 953 * buffer: buffer pointer 954 * phys_addr: physical address of buffer 955 * size: size of buffer 956 * TARGET_QUEUE_LOCK must be released 957 * ADAPTER_STATE_LOCK must be release 958 * 959 * Returns: 960 * qla2x00 local funxtion status code. 961 * 962 * Context: 963 * Kernel context. 964 */ 965#define CONFIG_XCHOFFLD_MEM 0x3 966int 967qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha) 968{ 969 int rval; 970 mbx_cmd_t mc; 971 mbx_cmd_t *mcp = &mc; 972 struct qla_hw_data *ha = vha->hw; 973 974 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1157, 975 "Entered %s.\n", __func__); 976 977 memset(mcp->mb, 0 , sizeof(mcp->mb)); 978 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT; 979 mcp->mb[1] = CONFIG_XCHOFFLD_MEM; 980 mcp->mb[2] = MSW(ha->exchoffld_buf_dma); 981 mcp->mb[3] = LSW(ha->exchoffld_buf_dma); 982 mcp->mb[6] = MSW(MSD(ha->exchoffld_buf_dma)); 983 mcp->mb[7] = LSW(MSD(ha->exchoffld_buf_dma)); 984 mcp->mb[8] = MSW(ha->exchoffld_size); 985 mcp->mb[9] = LSW(ha->exchoffld_size); 986 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 987 mcp->in_mb = MBX_11|MBX_0; 988 mcp->tov = MBX_TOV_SECONDS; 989 mcp->flags = 0; 990 rval = qla2x00_mailbox_command(vha, mcp); 991 if (rval != QLA_SUCCESS) { 992 /*EMPTY*/ 993 ql_dbg(ql_dbg_mbx, vha, 0x1158, "Failed=%x.\n", rval); 994 } else { 995 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192, 996 "Done %s.\n", __func__); 997 } 998 999 return rval; 1000} 1001 1002/* 1003 * qla2x00_get_fw_version 1004 * Get firmware version. 1005 * 1006 * Input: 1007 * ha: adapter state pointer. 1008 * major: pointer for major number. 1009 * minor: pointer for minor number. 1010 * subminor: pointer for subminor number. 1011 * 1012 * Returns: 1013 * qla2x00 local function return status code. 1014 * 1015 * Context: 1016 * Kernel context. 1017 */ 1018int 1019qla2x00_get_fw_version(scsi_qla_host_t *vha) 1020{ 1021 int rval; 1022 mbx_cmd_t mc; 1023 mbx_cmd_t *mcp = &mc; 1024 struct qla_hw_data *ha = vha->hw; 1025 1026 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029, 1027 "Entered %s.\n", __func__); 1028 1029 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION; 1030 mcp->out_mb = MBX_0; 1031 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 1032 if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha)) 1033 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8; 1034 if (IS_FWI2_CAPABLE(ha)) 1035 mcp->in_mb |= MBX_17|MBX_16|MBX_15; 1036 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) 1037 mcp->in_mb |= 1038 MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18| 1039 MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7; 1040 1041 mcp->flags = 0; 1042 mcp->tov = MBX_TOV_SECONDS; 1043 rval = qla2x00_mailbox_command(vha, mcp); 1044 if (rval != QLA_SUCCESS) 1045 goto failed; 1046 1047 /* Return mailbox data. */ 1048 ha->fw_major_version = mcp->mb[1]; 1049 ha->fw_minor_version = mcp->mb[2]; 1050 ha->fw_subminor_version = mcp->mb[3]; 1051 ha->fw_attributes = mcp->mb[6]; 1052 if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw)) 1053 ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */ 1054 else 1055 ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4]; 1056 1057 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) { 1058 ha->mpi_version[0] = mcp->mb[10] & 0xff; 1059 ha->mpi_version[1] = mcp->mb[11] >> 8; 1060 ha->mpi_version[2] = mcp->mb[11] & 0xff; 1061 ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13]; 1062 ha->phy_version[0] = mcp->mb[8] & 0xff; 1063 ha->phy_version[1] = mcp->mb[9] >> 8; 1064 ha->phy_version[2] = mcp->mb[9] & 0xff; 1065 } 1066 1067 if (IS_FWI2_CAPABLE(ha)) { 1068 ha->fw_attributes_h = mcp->mb[15]; 1069 ha->fw_attributes_ext[0] = mcp->mb[16]; 1070 ha->fw_attributes_ext[1] = mcp->mb[17]; 1071 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139, 1072 "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n", 1073 __func__, mcp->mb[15], mcp->mb[6]); 1074 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f, 1075 "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n", 1076 __func__, mcp->mb[17], mcp->mb[16]); 1077 1078 if (ha->fw_attributes_h & 0x4) 1079 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118d, 1080 "%s: Firmware supports Extended Login 0x%x\n", 1081 __func__, ha->fw_attributes_h); 1082 1083 if (ha->fw_attributes_h & 0x8) 1084 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191, 1085 "%s: Firmware supports Exchange Offload 0x%x\n", 1086 __func__, ha->fw_attributes_h); 1087 1088 /* 1089 * FW supports nvme and driver load parameter requested nvme. 1090 * BIT 26 of fw_attributes indicates NVMe support. 1091 */ 1092 if ((ha->fw_attributes_h & 1093 (FW_ATTR_H_NVME | FW_ATTR_H_NVME_UPDATED)) && 1094 ql2xnvmeenable) { 1095 if (ha->fw_attributes_h & FW_ATTR_H_NVME_FBURST) 1096 vha->flags.nvme_first_burst = 1; 1097 1098 vha->flags.nvme_enabled = 1; 1099 ql_log(ql_log_info, vha, 0xd302, 1100 "%s: FC-NVMe is Enabled (0x%x)\n", 1101 __func__, ha->fw_attributes_h); 1102 } 1103 1104 /* BIT_13 of Extended FW Attributes informs about NVMe2 support */ 1105 if (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_NVME2) { 1106 ql_log(ql_log_info, vha, 0xd302, 1107 "Firmware supports NVMe2 0x%x\n", 1108 ha->fw_attributes_ext[0]); 1109 vha->flags.nvme2_enabled = 1; 1110 } 1111 } 1112 1113 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 1114 ha->serdes_version[0] = mcp->mb[7] & 0xff; 1115 ha->serdes_version[1] = mcp->mb[8] >> 8; 1116 ha->serdes_version[2] = mcp->mb[8] & 0xff; 1117 ha->mpi_version[0] = mcp->mb[10] & 0xff; 1118 ha->mpi_version[1] = mcp->mb[11] >> 8; 1119 ha->mpi_version[2] = mcp->mb[11] & 0xff; 1120 ha->pep_version[0] = mcp->mb[13] & 0xff; 1121 ha->pep_version[1] = mcp->mb[14] >> 8; 1122 ha->pep_version[2] = mcp->mb[14] & 0xff; 1123 ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18]; 1124 ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20]; 1125 ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22]; 1126 ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24]; 1127 if (IS_QLA28XX(ha)) { 1128 if (mcp->mb[16] & BIT_10) 1129 ha->flags.secure_fw = 1; 1130 1131 ql_log(ql_log_info, vha, 0xffff, 1132 "Secure Flash Update in FW: %s\n", 1133 (ha->flags.secure_fw) ? "Supported" : 1134 "Not Supported"); 1135 } 1136 1137 if (ha->flags.scm_supported_a && 1138 (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_SCM_SUPPORTED)) { 1139 ha->flags.scm_supported_f = 1; 1140 ha->sf_init_cb->flags |= cpu_to_le16(BIT_13); 1141 } 1142 ql_log(ql_log_info, vha, 0x11a3, "SCM in FW: %s\n", 1143 (ha->flags.scm_supported_f) ? "Supported" : 1144 "Not Supported"); 1145 1146 if (vha->flags.nvme2_enabled) { 1147 /* set BIT_15 of special feature control block for SLER */ 1148 ha->sf_init_cb->flags |= cpu_to_le16(BIT_15); 1149 /* set BIT_14 of special feature control block for PI CTRL*/ 1150 ha->sf_init_cb->flags |= cpu_to_le16(BIT_14); 1151 } 1152 } 1153 1154failed: 1155 if (rval != QLA_SUCCESS) { 1156 /*EMPTY*/ 1157 ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval); 1158 } else { 1159 /*EMPTY*/ 1160 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b, 1161 "Done %s.\n", __func__); 1162 } 1163 return rval; 1164} 1165 1166/* 1167 * qla2x00_get_fw_options 1168 * Set firmware options. 1169 * 1170 * Input: 1171 * ha = adapter block pointer. 1172 * fwopt = pointer for firmware options. 1173 * 1174 * Returns: 1175 * qla2x00 local function return status code. 1176 * 1177 * Context: 1178 * Kernel context. 1179 */ 1180int 1181qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts) 1182{ 1183 int rval; 1184 mbx_cmd_t mc; 1185 mbx_cmd_t *mcp = &mc; 1186 1187 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c, 1188 "Entered %s.\n", __func__); 1189 1190 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION; 1191 mcp->out_mb = MBX_0; 1192 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1193 mcp->tov = MBX_TOV_SECONDS; 1194 mcp->flags = 0; 1195 rval = qla2x00_mailbox_command(vha, mcp); 1196 1197 if (rval != QLA_SUCCESS) { 1198 /*EMPTY*/ 1199 ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval); 1200 } else { 1201 fwopts[0] = mcp->mb[0]; 1202 fwopts[1] = mcp->mb[1]; 1203 fwopts[2] = mcp->mb[2]; 1204 fwopts[3] = mcp->mb[3]; 1205 1206 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e, 1207 "Done %s.\n", __func__); 1208 } 1209 1210 return rval; 1211} 1212 1213 1214/* 1215 * qla2x00_set_fw_options 1216 * Set firmware options. 1217 * 1218 * Input: 1219 * ha = adapter block pointer. 1220 * fwopt = pointer for firmware options. 1221 * 1222 * Returns: 1223 * qla2x00 local function return status code. 1224 * 1225 * Context: 1226 * Kernel context. 1227 */ 1228int 1229qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts) 1230{ 1231 int rval; 1232 mbx_cmd_t mc; 1233 mbx_cmd_t *mcp = &mc; 1234 1235 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f, 1236 "Entered %s.\n", __func__); 1237 1238 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION; 1239 mcp->mb[1] = fwopts[1]; 1240 mcp->mb[2] = fwopts[2]; 1241 mcp->mb[3] = fwopts[3]; 1242 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1243 mcp->in_mb = MBX_0; 1244 if (IS_FWI2_CAPABLE(vha->hw)) { 1245 mcp->in_mb |= MBX_1; 1246 mcp->mb[10] = fwopts[10]; 1247 mcp->out_mb |= MBX_10; 1248 } else { 1249 mcp->mb[10] = fwopts[10]; 1250 mcp->mb[11] = fwopts[11]; 1251 mcp->mb[12] = 0; /* Undocumented, but used */ 1252 mcp->out_mb |= MBX_12|MBX_11|MBX_10; 1253 } 1254 mcp->tov = MBX_TOV_SECONDS; 1255 mcp->flags = 0; 1256 rval = qla2x00_mailbox_command(vha, mcp); 1257 1258 fwopts[0] = mcp->mb[0]; 1259 1260 if (rval != QLA_SUCCESS) { 1261 /*EMPTY*/ 1262 ql_dbg(ql_dbg_mbx, vha, 0x1030, 1263 "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]); 1264 } else { 1265 /*EMPTY*/ 1266 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031, 1267 "Done %s.\n", __func__); 1268 } 1269 1270 return rval; 1271} 1272 1273/* 1274 * qla2x00_mbx_reg_test 1275 * Mailbox register wrap test. 1276 * 1277 * Input: 1278 * ha = adapter block pointer. 1279 * TARGET_QUEUE_LOCK must be released. 1280 * ADAPTER_STATE_LOCK must be released. 1281 * 1282 * Returns: 1283 * qla2x00 local function return status code. 1284 * 1285 * Context: 1286 * Kernel context. 1287 */ 1288int 1289qla2x00_mbx_reg_test(scsi_qla_host_t *vha) 1290{ 1291 int rval; 1292 mbx_cmd_t mc; 1293 mbx_cmd_t *mcp = &mc; 1294 1295 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032, 1296 "Entered %s.\n", __func__); 1297 1298 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST; 1299 mcp->mb[1] = 0xAAAA; 1300 mcp->mb[2] = 0x5555; 1301 mcp->mb[3] = 0xAA55; 1302 mcp->mb[4] = 0x55AA; 1303 mcp->mb[5] = 0xA5A5; 1304 mcp->mb[6] = 0x5A5A; 1305 mcp->mb[7] = 0x2525; 1306 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 1307 mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 1308 mcp->tov = MBX_TOV_SECONDS; 1309 mcp->flags = 0; 1310 rval = qla2x00_mailbox_command(vha, mcp); 1311 1312 if (rval == QLA_SUCCESS) { 1313 if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 || 1314 mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA) 1315 rval = QLA_FUNCTION_FAILED; 1316 if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A || 1317 mcp->mb[7] != 0x2525) 1318 rval = QLA_FUNCTION_FAILED; 1319 } 1320 1321 if (rval != QLA_SUCCESS) { 1322 /*EMPTY*/ 1323 ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval); 1324 } else { 1325 /*EMPTY*/ 1326 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034, 1327 "Done %s.\n", __func__); 1328 } 1329 1330 return rval; 1331} 1332 1333/* 1334 * qla2x00_verify_checksum 1335 * Verify firmware checksum. 1336 * 1337 * Input: 1338 * ha = adapter block pointer. 1339 * TARGET_QUEUE_LOCK must be released. 1340 * ADAPTER_STATE_LOCK must be released. 1341 * 1342 * Returns: 1343 * qla2x00 local function return status code. 1344 * 1345 * Context: 1346 * Kernel context. 1347 */ 1348int 1349qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr) 1350{ 1351 int rval; 1352 mbx_cmd_t mc; 1353 mbx_cmd_t *mcp = &mc; 1354 1355 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035, 1356 "Entered %s.\n", __func__); 1357 1358 mcp->mb[0] = MBC_VERIFY_CHECKSUM; 1359 mcp->out_mb = MBX_0; 1360 mcp->in_mb = MBX_0; 1361 if (IS_FWI2_CAPABLE(vha->hw)) { 1362 mcp->mb[1] = MSW(risc_addr); 1363 mcp->mb[2] = LSW(risc_addr); 1364 mcp->out_mb |= MBX_2|MBX_1; 1365 mcp->in_mb |= MBX_2|MBX_1; 1366 } else { 1367 mcp->mb[1] = LSW(risc_addr); 1368 mcp->out_mb |= MBX_1; 1369 mcp->in_mb |= MBX_1; 1370 } 1371 1372 mcp->tov = MBX_TOV_SECONDS; 1373 mcp->flags = 0; 1374 rval = qla2x00_mailbox_command(vha, mcp); 1375 1376 if (rval != QLA_SUCCESS) { 1377 ql_dbg(ql_dbg_mbx, vha, 0x1036, 1378 "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ? 1379 (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]); 1380 } else { 1381 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037, 1382 "Done %s.\n", __func__); 1383 } 1384 1385 return rval; 1386} 1387 1388/* 1389 * qla2x00_issue_iocb 1390 * Issue IOCB using mailbox command 1391 * 1392 * Input: 1393 * ha = adapter state pointer. 1394 * buffer = buffer pointer. 1395 * phys_addr = physical address of buffer. 1396 * size = size of buffer. 1397 * TARGET_QUEUE_LOCK must be released. 1398 * ADAPTER_STATE_LOCK must be released. 1399 * 1400 * Returns: 1401 * qla2x00 local function return status code. 1402 * 1403 * Context: 1404 * Kernel context. 1405 */ 1406int 1407qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer, 1408 dma_addr_t phys_addr, size_t size, uint32_t tov) 1409{ 1410 int rval; 1411 mbx_cmd_t mc; 1412 mbx_cmd_t *mcp = &mc; 1413 1414 if (!vha->hw->flags.fw_started) 1415 return QLA_INVALID_COMMAND; 1416 1417 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038, 1418 "Entered %s.\n", __func__); 1419 1420 mcp->mb[0] = MBC_IOCB_COMMAND_A64; 1421 mcp->mb[1] = 0; 1422 mcp->mb[2] = MSW(LSD(phys_addr)); 1423 mcp->mb[3] = LSW(LSD(phys_addr)); 1424 mcp->mb[6] = MSW(MSD(phys_addr)); 1425 mcp->mb[7] = LSW(MSD(phys_addr)); 1426 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1427 mcp->in_mb = MBX_1|MBX_0; 1428 mcp->tov = tov; 1429 mcp->flags = 0; 1430 rval = qla2x00_mailbox_command(vha, mcp); 1431 1432 if (rval != QLA_SUCCESS) { 1433 /*EMPTY*/ 1434 ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval); 1435 } else { 1436 sts_entry_t *sts_entry = buffer; 1437 1438 /* Mask reserved bits. */ 1439 sts_entry->entry_status &= 1440 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK; 1441 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a, 1442 "Done %s (status=%x).\n", __func__, 1443 sts_entry->entry_status); 1444 } 1445 1446 return rval; 1447} 1448 1449int 1450qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr, 1451 size_t size) 1452{ 1453 return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size, 1454 MBX_TOV_SECONDS); 1455} 1456 1457/* 1458 * qla2x00_abort_command 1459 * Abort command aborts a specified IOCB. 1460 * 1461 * Input: 1462 * ha = adapter block pointer. 1463 * sp = SB structure pointer. 1464 * 1465 * Returns: 1466 * qla2x00 local function return status code. 1467 * 1468 * Context: 1469 * Kernel context. 1470 */ 1471int 1472qla2x00_abort_command(srb_t *sp) 1473{ 1474 unsigned long flags = 0; 1475 int rval; 1476 uint32_t handle = 0; 1477 mbx_cmd_t mc; 1478 mbx_cmd_t *mcp = &mc; 1479 fc_port_t *fcport = sp->fcport; 1480 scsi_qla_host_t *vha = fcport->vha; 1481 struct qla_hw_data *ha = vha->hw; 1482 struct req_que *req; 1483 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1484 1485 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b, 1486 "Entered %s.\n", __func__); 1487 1488 if (sp->qpair) 1489 req = sp->qpair->req; 1490 else 1491 req = vha->req; 1492 1493 spin_lock_irqsave(&ha->hardware_lock, flags); 1494 for (handle = 1; handle < req->num_outstanding_cmds; handle++) { 1495 if (req->outstanding_cmds[handle] == sp) 1496 break; 1497 } 1498 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1499 1500 if (handle == req->num_outstanding_cmds) { 1501 /* command not found */ 1502 return QLA_FUNCTION_FAILED; 1503 } 1504 1505 mcp->mb[0] = MBC_ABORT_COMMAND; 1506 if (HAS_EXTENDED_IDS(ha)) 1507 mcp->mb[1] = fcport->loop_id; 1508 else 1509 mcp->mb[1] = fcport->loop_id << 8; 1510 mcp->mb[2] = (uint16_t)handle; 1511 mcp->mb[3] = (uint16_t)(handle >> 16); 1512 mcp->mb[6] = (uint16_t)cmd->device->lun; 1513 mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1514 mcp->in_mb = MBX_0; 1515 mcp->tov = MBX_TOV_SECONDS; 1516 mcp->flags = 0; 1517 rval = qla2x00_mailbox_command(vha, mcp); 1518 1519 if (rval != QLA_SUCCESS) { 1520 ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval); 1521 } else { 1522 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d, 1523 "Done %s.\n", __func__); 1524 } 1525 1526 return rval; 1527} 1528 1529int 1530qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag) 1531{ 1532 int rval, rval2; 1533 mbx_cmd_t mc; 1534 mbx_cmd_t *mcp = &mc; 1535 scsi_qla_host_t *vha; 1536 1537 vha = fcport->vha; 1538 1539 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e, 1540 "Entered %s.\n", __func__); 1541 1542 mcp->mb[0] = MBC_ABORT_TARGET; 1543 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0; 1544 if (HAS_EXTENDED_IDS(vha->hw)) { 1545 mcp->mb[1] = fcport->loop_id; 1546 mcp->mb[10] = 0; 1547 mcp->out_mb |= MBX_10; 1548 } else { 1549 mcp->mb[1] = fcport->loop_id << 8; 1550 } 1551 mcp->mb[2] = vha->hw->loop_reset_delay; 1552 mcp->mb[9] = vha->vp_idx; 1553 1554 mcp->in_mb = MBX_0; 1555 mcp->tov = MBX_TOV_SECONDS; 1556 mcp->flags = 0; 1557 rval = qla2x00_mailbox_command(vha, mcp); 1558 if (rval != QLA_SUCCESS) { 1559 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f, 1560 "Failed=%x.\n", rval); 1561 } 1562 1563 /* Issue marker IOCB. */ 1564 rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, 0, 1565 MK_SYNC_ID); 1566 if (rval2 != QLA_SUCCESS) { 1567 ql_dbg(ql_dbg_mbx, vha, 0x1040, 1568 "Failed to issue marker IOCB (%x).\n", rval2); 1569 } else { 1570 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041, 1571 "Done %s.\n", __func__); 1572 } 1573 1574 return rval; 1575} 1576 1577int 1578qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag) 1579{ 1580 int rval, rval2; 1581 mbx_cmd_t mc; 1582 mbx_cmd_t *mcp = &mc; 1583 scsi_qla_host_t *vha; 1584 1585 vha = fcport->vha; 1586 1587 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042, 1588 "Entered %s.\n", __func__); 1589 1590 mcp->mb[0] = MBC_LUN_RESET; 1591 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 1592 if (HAS_EXTENDED_IDS(vha->hw)) 1593 mcp->mb[1] = fcport->loop_id; 1594 else 1595 mcp->mb[1] = fcport->loop_id << 8; 1596 mcp->mb[2] = (u32)l; 1597 mcp->mb[3] = 0; 1598 mcp->mb[9] = vha->vp_idx; 1599 1600 mcp->in_mb = MBX_0; 1601 mcp->tov = MBX_TOV_SECONDS; 1602 mcp->flags = 0; 1603 rval = qla2x00_mailbox_command(vha, mcp); 1604 if (rval != QLA_SUCCESS) { 1605 ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval); 1606 } 1607 1608 /* Issue marker IOCB. */ 1609 rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, l, 1610 MK_SYNC_ID_LUN); 1611 if (rval2 != QLA_SUCCESS) { 1612 ql_dbg(ql_dbg_mbx, vha, 0x1044, 1613 "Failed to issue marker IOCB (%x).\n", rval2); 1614 } else { 1615 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045, 1616 "Done %s.\n", __func__); 1617 } 1618 1619 return rval; 1620} 1621 1622/* 1623 * qla2x00_get_adapter_id 1624 * Get adapter ID and topology. 1625 * 1626 * Input: 1627 * ha = adapter block pointer. 1628 * id = pointer for loop ID. 1629 * al_pa = pointer for AL_PA. 1630 * area = pointer for area. 1631 * domain = pointer for domain. 1632 * top = pointer for topology. 1633 * TARGET_QUEUE_LOCK must be released. 1634 * ADAPTER_STATE_LOCK must be released. 1635 * 1636 * Returns: 1637 * qla2x00 local function return status code. 1638 * 1639 * Context: 1640 * Kernel context. 1641 */ 1642int 1643qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa, 1644 uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap) 1645{ 1646 int rval; 1647 mbx_cmd_t mc; 1648 mbx_cmd_t *mcp = &mc; 1649 1650 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046, 1651 "Entered %s.\n", __func__); 1652 1653 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID; 1654 mcp->mb[9] = vha->vp_idx; 1655 mcp->out_mb = MBX_9|MBX_0; 1656 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1657 if (IS_CNA_CAPABLE(vha->hw)) 1658 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10; 1659 if (IS_FWI2_CAPABLE(vha->hw)) 1660 mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16; 1661 if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) 1662 mcp->in_mb |= MBX_15|MBX_21|MBX_22|MBX_23; 1663 1664 mcp->tov = MBX_TOV_SECONDS; 1665 mcp->flags = 0; 1666 rval = qla2x00_mailbox_command(vha, mcp); 1667 if (mcp->mb[0] == MBS_COMMAND_ERROR) 1668 rval = QLA_COMMAND_ERROR; 1669 else if (mcp->mb[0] == MBS_INVALID_COMMAND) 1670 rval = QLA_INVALID_COMMAND; 1671 1672 /* Return data. */ 1673 *id = mcp->mb[1]; 1674 *al_pa = LSB(mcp->mb[2]); 1675 *area = MSB(mcp->mb[2]); 1676 *domain = LSB(mcp->mb[3]); 1677 *top = mcp->mb[6]; 1678 *sw_cap = mcp->mb[7]; 1679 1680 if (rval != QLA_SUCCESS) { 1681 /*EMPTY*/ 1682 ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval); 1683 } else { 1684 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048, 1685 "Done %s.\n", __func__); 1686 1687 if (IS_CNA_CAPABLE(vha->hw)) { 1688 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff; 1689 vha->fcoe_fcf_idx = mcp->mb[10]; 1690 vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8; 1691 vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff; 1692 vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8; 1693 vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff; 1694 vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8; 1695 vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff; 1696 } 1697 /* If FA-WWN supported */ 1698 if (IS_FAWWN_CAPABLE(vha->hw)) { 1699 if (mcp->mb[7] & BIT_14) { 1700 vha->port_name[0] = MSB(mcp->mb[16]); 1701 vha->port_name[1] = LSB(mcp->mb[16]); 1702 vha->port_name[2] = MSB(mcp->mb[17]); 1703 vha->port_name[3] = LSB(mcp->mb[17]); 1704 vha->port_name[4] = MSB(mcp->mb[18]); 1705 vha->port_name[5] = LSB(mcp->mb[18]); 1706 vha->port_name[6] = MSB(mcp->mb[19]); 1707 vha->port_name[7] = LSB(mcp->mb[19]); 1708 fc_host_port_name(vha->host) = 1709 wwn_to_u64(vha->port_name); 1710 ql_dbg(ql_dbg_mbx, vha, 0x10ca, 1711 "FA-WWN acquired %016llx\n", 1712 wwn_to_u64(vha->port_name)); 1713 } 1714 } 1715 1716 if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) { 1717 vha->bbcr = mcp->mb[15]; 1718 if (mcp->mb[7] & SCM_EDC_ACC_RECEIVED) { 1719 ql_log(ql_log_info, vha, 0x11a4, 1720 "SCM: EDC ELS completed, flags 0x%x\n", 1721 mcp->mb[21]); 1722 } 1723 if (mcp->mb[7] & SCM_RDF_ACC_RECEIVED) { 1724 vha->hw->flags.scm_enabled = 1; 1725 vha->scm_fabric_connection_flags |= 1726 SCM_FLAG_RDF_COMPLETED; 1727 ql_log(ql_log_info, vha, 0x11a5, 1728 "SCM: RDF ELS completed, flags 0x%x\n", 1729 mcp->mb[23]); 1730 } 1731 } 1732 } 1733 1734 return rval; 1735} 1736 1737/* 1738 * qla2x00_get_retry_cnt 1739 * Get current firmware login retry count and delay. 1740 * 1741 * Input: 1742 * ha = adapter block pointer. 1743 * retry_cnt = pointer to login retry count. 1744 * tov = pointer to login timeout value. 1745 * 1746 * Returns: 1747 * qla2x00 local function return status code. 1748 * 1749 * Context: 1750 * Kernel context. 1751 */ 1752int 1753qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov, 1754 uint16_t *r_a_tov) 1755{ 1756 int rval; 1757 uint16_t ratov; 1758 mbx_cmd_t mc; 1759 mbx_cmd_t *mcp = &mc; 1760 1761 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049, 1762 "Entered %s.\n", __func__); 1763 1764 mcp->mb[0] = MBC_GET_RETRY_COUNT; 1765 mcp->out_mb = MBX_0; 1766 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1767 mcp->tov = MBX_TOV_SECONDS; 1768 mcp->flags = 0; 1769 rval = qla2x00_mailbox_command(vha, mcp); 1770 1771 if (rval != QLA_SUCCESS) { 1772 /*EMPTY*/ 1773 ql_dbg(ql_dbg_mbx, vha, 0x104a, 1774 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 1775 } else { 1776 /* Convert returned data and check our values. */ 1777 *r_a_tov = mcp->mb[3] / 2; 1778 ratov = (mcp->mb[3]/2) / 10; /* mb[3] value is in 100ms */ 1779 if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) { 1780 /* Update to the larger values */ 1781 *retry_cnt = (uint8_t)mcp->mb[1]; 1782 *tov = ratov; 1783 } 1784 1785 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b, 1786 "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov); 1787 } 1788 1789 return rval; 1790} 1791 1792/* 1793 * qla2x00_init_firmware 1794 * Initialize adapter firmware. 1795 * 1796 * Input: 1797 * ha = adapter block pointer. 1798 * dptr = Initialization control block pointer. 1799 * size = size of initialization control block. 1800 * TARGET_QUEUE_LOCK must be released. 1801 * ADAPTER_STATE_LOCK must be released. 1802 * 1803 * Returns: 1804 * qla2x00 local function return status code. 1805 * 1806 * Context: 1807 * Kernel context. 1808 */ 1809int 1810qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size) 1811{ 1812 int rval; 1813 mbx_cmd_t mc; 1814 mbx_cmd_t *mcp = &mc; 1815 struct qla_hw_data *ha = vha->hw; 1816 1817 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c, 1818 "Entered %s.\n", __func__); 1819 1820 if (IS_P3P_TYPE(ha) && ql2xdbwr) 1821 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, 1822 (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16))); 1823 1824 if (ha->flags.npiv_supported) 1825 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE; 1826 else 1827 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE; 1828 1829 mcp->mb[1] = 0; 1830 mcp->mb[2] = MSW(ha->init_cb_dma); 1831 mcp->mb[3] = LSW(ha->init_cb_dma); 1832 mcp->mb[6] = MSW(MSD(ha->init_cb_dma)); 1833 mcp->mb[7] = LSW(MSD(ha->init_cb_dma)); 1834 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1835 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) { 1836 mcp->mb[1] = BIT_0; 1837 mcp->mb[10] = MSW(ha->ex_init_cb_dma); 1838 mcp->mb[11] = LSW(ha->ex_init_cb_dma); 1839 mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma)); 1840 mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma)); 1841 mcp->mb[14] = sizeof(*ha->ex_init_cb); 1842 mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10; 1843 } 1844 1845 if (ha->flags.scm_supported_f || vha->flags.nvme2_enabled) { 1846 mcp->mb[1] |= BIT_1; 1847 mcp->mb[16] = MSW(ha->sf_init_cb_dma); 1848 mcp->mb[17] = LSW(ha->sf_init_cb_dma); 1849 mcp->mb[18] = MSW(MSD(ha->sf_init_cb_dma)); 1850 mcp->mb[19] = LSW(MSD(ha->sf_init_cb_dma)); 1851 mcp->mb[15] = sizeof(*ha->sf_init_cb); 1852 mcp->out_mb |= MBX_19|MBX_18|MBX_17|MBX_16|MBX_15; 1853 } 1854 1855 /* 1 and 2 should normally be captured. */ 1856 mcp->in_mb = MBX_2|MBX_1|MBX_0; 1857 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 1858 /* mb3 is additional info about the installed SFP. */ 1859 mcp->in_mb |= MBX_3; 1860 mcp->buf_size = size; 1861 mcp->flags = MBX_DMA_OUT; 1862 mcp->tov = MBX_TOV_SECONDS; 1863 rval = qla2x00_mailbox_command(vha, mcp); 1864 1865 if (rval != QLA_SUCCESS) { 1866 /*EMPTY*/ 1867 ql_dbg(ql_dbg_mbx, vha, 0x104d, 1868 "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x.\n", 1869 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]); 1870 if (ha->init_cb) { 1871 ql_dbg(ql_dbg_mbx, vha, 0x104d, "init_cb:\n"); 1872 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 1873 0x0104d, ha->init_cb, sizeof(*ha->init_cb)); 1874 } 1875 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) { 1876 ql_dbg(ql_dbg_mbx, vha, 0x104d, "ex_init_cb:\n"); 1877 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 1878 0x0104d, ha->ex_init_cb, sizeof(*ha->ex_init_cb)); 1879 } 1880 } else { 1881 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 1882 if (mcp->mb[2] == 6 || mcp->mb[3] == 2) 1883 ql_dbg(ql_dbg_mbx, vha, 0x119d, 1884 "Invalid SFP/Validation Failed\n"); 1885 } 1886 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e, 1887 "Done %s.\n", __func__); 1888 } 1889 1890 return rval; 1891} 1892 1893 1894/* 1895 * qla2x00_get_port_database 1896 * Issue normal/enhanced get port database mailbox command 1897 * and copy device name as necessary. 1898 * 1899 * Input: 1900 * ha = adapter state pointer. 1901 * dev = structure pointer. 1902 * opt = enhanced cmd option byte. 1903 * 1904 * Returns: 1905 * qla2x00 local function return status code. 1906 * 1907 * Context: 1908 * Kernel context. 1909 */ 1910int 1911qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt) 1912{ 1913 int rval; 1914 mbx_cmd_t mc; 1915 mbx_cmd_t *mcp = &mc; 1916 port_database_t *pd; 1917 struct port_database_24xx *pd24; 1918 dma_addr_t pd_dma; 1919 struct qla_hw_data *ha = vha->hw; 1920 1921 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f, 1922 "Entered %s.\n", __func__); 1923 1924 pd24 = NULL; 1925 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); 1926 if (pd == NULL) { 1927 ql_log(ql_log_warn, vha, 0x1050, 1928 "Failed to allocate port database structure.\n"); 1929 fcport->query = 0; 1930 return QLA_MEMORY_ALLOC_FAILED; 1931 } 1932 1933 mcp->mb[0] = MBC_GET_PORT_DATABASE; 1934 if (opt != 0 && !IS_FWI2_CAPABLE(ha)) 1935 mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE; 1936 mcp->mb[2] = MSW(pd_dma); 1937 mcp->mb[3] = LSW(pd_dma); 1938 mcp->mb[6] = MSW(MSD(pd_dma)); 1939 mcp->mb[7] = LSW(MSD(pd_dma)); 1940 mcp->mb[9] = vha->vp_idx; 1941 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 1942 mcp->in_mb = MBX_0; 1943 if (IS_FWI2_CAPABLE(ha)) { 1944 mcp->mb[1] = fcport->loop_id; 1945 mcp->mb[10] = opt; 1946 mcp->out_mb |= MBX_10|MBX_1; 1947 mcp->in_mb |= MBX_1; 1948 } else if (HAS_EXTENDED_IDS(ha)) { 1949 mcp->mb[1] = fcport->loop_id; 1950 mcp->mb[10] = opt; 1951 mcp->out_mb |= MBX_10|MBX_1; 1952 } else { 1953 mcp->mb[1] = fcport->loop_id << 8 | opt; 1954 mcp->out_mb |= MBX_1; 1955 } 1956 mcp->buf_size = IS_FWI2_CAPABLE(ha) ? 1957 PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE; 1958 mcp->flags = MBX_DMA_IN; 1959 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 1960 rval = qla2x00_mailbox_command(vha, mcp); 1961 if (rval != QLA_SUCCESS) 1962 goto gpd_error_out; 1963 1964 if (IS_FWI2_CAPABLE(ha)) { 1965 uint64_t zero = 0; 1966 u8 current_login_state, last_login_state; 1967 1968 pd24 = (struct port_database_24xx *) pd; 1969 1970 /* Check for logged in state. */ 1971 if (NVME_TARGET(ha, fcport)) { 1972 current_login_state = pd24->current_login_state >> 4; 1973 last_login_state = pd24->last_login_state >> 4; 1974 } else { 1975 current_login_state = pd24->current_login_state & 0xf; 1976 last_login_state = pd24->last_login_state & 0xf; 1977 } 1978 fcport->current_login_state = pd24->current_login_state; 1979 fcport->last_login_state = pd24->last_login_state; 1980 1981 /* Check for logged in state. */ 1982 if (current_login_state != PDS_PRLI_COMPLETE && 1983 last_login_state != PDS_PRLI_COMPLETE) { 1984 ql_dbg(ql_dbg_mbx, vha, 0x119a, 1985 "Unable to verify login-state (%x/%x) for loop_id %x.\n", 1986 current_login_state, last_login_state, 1987 fcport->loop_id); 1988 rval = QLA_FUNCTION_FAILED; 1989 1990 if (!fcport->query) 1991 goto gpd_error_out; 1992 } 1993 1994 if (fcport->loop_id == FC_NO_LOOP_ID || 1995 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && 1996 memcmp(fcport->port_name, pd24->port_name, 8))) { 1997 /* We lost the device mid way. */ 1998 rval = QLA_NOT_LOGGED_IN; 1999 goto gpd_error_out; 2000 } 2001 2002 /* Names are little-endian. */ 2003 memcpy(fcport->node_name, pd24->node_name, WWN_SIZE); 2004 memcpy(fcport->port_name, pd24->port_name, WWN_SIZE); 2005 2006 /* Get port_id of device. */ 2007 fcport->d_id.b.domain = pd24->port_id[0]; 2008 fcport->d_id.b.area = pd24->port_id[1]; 2009 fcport->d_id.b.al_pa = pd24->port_id[2]; 2010 fcport->d_id.b.rsvd_1 = 0; 2011 2012 /* If not target must be initiator or unknown type. */ 2013 if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0) 2014 fcport->port_type = FCT_INITIATOR; 2015 else 2016 fcport->port_type = FCT_TARGET; 2017 2018 /* Passback COS information. */ 2019 fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ? 2020 FC_COS_CLASS2 : FC_COS_CLASS3; 2021 2022 if (pd24->prli_svc_param_word_3[0] & BIT_7) 2023 fcport->flags |= FCF_CONF_COMP_SUPPORTED; 2024 } else { 2025 uint64_t zero = 0; 2026 2027 /* Check for logged in state. */ 2028 if (pd->master_state != PD_STATE_PORT_LOGGED_IN && 2029 pd->slave_state != PD_STATE_PORT_LOGGED_IN) { 2030 ql_dbg(ql_dbg_mbx, vha, 0x100a, 2031 "Unable to verify login-state (%x/%x) - " 2032 "portid=%02x%02x%02x.\n", pd->master_state, 2033 pd->slave_state, fcport->d_id.b.domain, 2034 fcport->d_id.b.area, fcport->d_id.b.al_pa); 2035 rval = QLA_FUNCTION_FAILED; 2036 goto gpd_error_out; 2037 } 2038 2039 if (fcport->loop_id == FC_NO_LOOP_ID || 2040 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && 2041 memcmp(fcport->port_name, pd->port_name, 8))) { 2042 /* We lost the device mid way. */ 2043 rval = QLA_NOT_LOGGED_IN; 2044 goto gpd_error_out; 2045 } 2046 2047 /* Names are little-endian. */ 2048 memcpy(fcport->node_name, pd->node_name, WWN_SIZE); 2049 memcpy(fcport->port_name, pd->port_name, WWN_SIZE); 2050 2051 /* Get port_id of device. */ 2052 fcport->d_id.b.domain = pd->port_id[0]; 2053 fcport->d_id.b.area = pd->port_id[3]; 2054 fcport->d_id.b.al_pa = pd->port_id[2]; 2055 fcport->d_id.b.rsvd_1 = 0; 2056 2057 /* If not target must be initiator or unknown type. */ 2058 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) 2059 fcport->port_type = FCT_INITIATOR; 2060 else 2061 fcport->port_type = FCT_TARGET; 2062 2063 /* Passback COS information. */ 2064 fcport->supported_classes = (pd->options & BIT_4) ? 2065 FC_COS_CLASS2 : FC_COS_CLASS3; 2066 } 2067 2068gpd_error_out: 2069 dma_pool_free(ha->s_dma_pool, pd, pd_dma); 2070 fcport->query = 0; 2071 2072 if (rval != QLA_SUCCESS) { 2073 ql_dbg(ql_dbg_mbx, vha, 0x1052, 2074 "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, 2075 mcp->mb[0], mcp->mb[1]); 2076 } else { 2077 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053, 2078 "Done %s.\n", __func__); 2079 } 2080 2081 return rval; 2082} 2083 2084int 2085qla24xx_get_port_database(scsi_qla_host_t *vha, u16 nport_handle, 2086 struct port_database_24xx *pdb) 2087{ 2088 mbx_cmd_t mc; 2089 mbx_cmd_t *mcp = &mc; 2090 dma_addr_t pdb_dma; 2091 int rval; 2092 2093 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1115, 2094 "Entered %s.\n", __func__); 2095 2096 memset(pdb, 0, sizeof(*pdb)); 2097 2098 pdb_dma = dma_map_single(&vha->hw->pdev->dev, pdb, 2099 sizeof(*pdb), DMA_FROM_DEVICE); 2100 if (!pdb_dma) { 2101 ql_log(ql_log_warn, vha, 0x1116, "Failed to map dma buffer.\n"); 2102 return QLA_MEMORY_ALLOC_FAILED; 2103 } 2104 2105 mcp->mb[0] = MBC_GET_PORT_DATABASE; 2106 mcp->mb[1] = nport_handle; 2107 mcp->mb[2] = MSW(LSD(pdb_dma)); 2108 mcp->mb[3] = LSW(LSD(pdb_dma)); 2109 mcp->mb[6] = MSW(MSD(pdb_dma)); 2110 mcp->mb[7] = LSW(MSD(pdb_dma)); 2111 mcp->mb[9] = 0; 2112 mcp->mb[10] = 0; 2113 mcp->out_mb = MBX_10|MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 2114 mcp->in_mb = MBX_1|MBX_0; 2115 mcp->buf_size = sizeof(*pdb); 2116 mcp->flags = MBX_DMA_IN; 2117 mcp->tov = vha->hw->login_timeout * 2; 2118 rval = qla2x00_mailbox_command(vha, mcp); 2119 2120 if (rval != QLA_SUCCESS) { 2121 ql_dbg(ql_dbg_mbx, vha, 0x111a, 2122 "Failed=%x mb[0]=%x mb[1]=%x.\n", 2123 rval, mcp->mb[0], mcp->mb[1]); 2124 } else { 2125 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111b, 2126 "Done %s.\n", __func__); 2127 } 2128 2129 dma_unmap_single(&vha->hw->pdev->dev, pdb_dma, 2130 sizeof(*pdb), DMA_FROM_DEVICE); 2131 2132 return rval; 2133} 2134 2135/* 2136 * qla2x00_get_firmware_state 2137 * Get adapter firmware state. 2138 * 2139 * Input: 2140 * ha = adapter block pointer. 2141 * dptr = pointer for firmware state. 2142 * TARGET_QUEUE_LOCK must be released. 2143 * ADAPTER_STATE_LOCK must be released. 2144 * 2145 * Returns: 2146 * qla2x00 local function return status code. 2147 * 2148 * Context: 2149 * Kernel context. 2150 */ 2151int 2152qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states) 2153{ 2154 int rval; 2155 mbx_cmd_t mc; 2156 mbx_cmd_t *mcp = &mc; 2157 struct qla_hw_data *ha = vha->hw; 2158 2159 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054, 2160 "Entered %s.\n", __func__); 2161 2162 mcp->mb[0] = MBC_GET_FIRMWARE_STATE; 2163 mcp->out_mb = MBX_0; 2164 if (IS_FWI2_CAPABLE(vha->hw)) 2165 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 2166 else 2167 mcp->in_mb = MBX_1|MBX_0; 2168 mcp->tov = MBX_TOV_SECONDS; 2169 mcp->flags = 0; 2170 rval = qla2x00_mailbox_command(vha, mcp); 2171 2172 /* Return firmware states. */ 2173 states[0] = mcp->mb[1]; 2174 if (IS_FWI2_CAPABLE(vha->hw)) { 2175 states[1] = mcp->mb[2]; 2176 states[2] = mcp->mb[3]; /* SFP info */ 2177 states[3] = mcp->mb[4]; 2178 states[4] = mcp->mb[5]; 2179 states[5] = mcp->mb[6]; /* DPORT status */ 2180 } 2181 2182 if (rval != QLA_SUCCESS) { 2183 /*EMPTY*/ 2184 ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval); 2185 } else { 2186 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 2187 if (mcp->mb[2] == 6 || mcp->mb[3] == 2) 2188 ql_dbg(ql_dbg_mbx, vha, 0x119e, 2189 "Invalid SFP/Validation Failed\n"); 2190 } 2191 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056, 2192 "Done %s.\n", __func__); 2193 } 2194 2195 return rval; 2196} 2197 2198/* 2199 * qla2x00_get_port_name 2200 * Issue get port name mailbox command. 2201 * Returned name is in big endian format. 2202 * 2203 * Input: 2204 * ha = adapter block pointer. 2205 * loop_id = loop ID of device. 2206 * name = pointer for name. 2207 * TARGET_QUEUE_LOCK must be released. 2208 * ADAPTER_STATE_LOCK must be released. 2209 * 2210 * Returns: 2211 * qla2x00 local function return status code. 2212 * 2213 * Context: 2214 * Kernel context. 2215 */ 2216int 2217qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name, 2218 uint8_t opt) 2219{ 2220 int rval; 2221 mbx_cmd_t mc; 2222 mbx_cmd_t *mcp = &mc; 2223 2224 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057, 2225 "Entered %s.\n", __func__); 2226 2227 mcp->mb[0] = MBC_GET_PORT_NAME; 2228 mcp->mb[9] = vha->vp_idx; 2229 mcp->out_mb = MBX_9|MBX_1|MBX_0; 2230 if (HAS_EXTENDED_IDS(vha->hw)) { 2231 mcp->mb[1] = loop_id; 2232 mcp->mb[10] = opt; 2233 mcp->out_mb |= MBX_10; 2234 } else { 2235 mcp->mb[1] = loop_id << 8 | opt; 2236 } 2237 2238 mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 2239 mcp->tov = MBX_TOV_SECONDS; 2240 mcp->flags = 0; 2241 rval = qla2x00_mailbox_command(vha, mcp); 2242 2243 if (rval != QLA_SUCCESS) { 2244 /*EMPTY*/ 2245 ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval); 2246 } else { 2247 if (name != NULL) { 2248 /* This function returns name in big endian. */ 2249 name[0] = MSB(mcp->mb[2]); 2250 name[1] = LSB(mcp->mb[2]); 2251 name[2] = MSB(mcp->mb[3]); 2252 name[3] = LSB(mcp->mb[3]); 2253 name[4] = MSB(mcp->mb[6]); 2254 name[5] = LSB(mcp->mb[6]); 2255 name[6] = MSB(mcp->mb[7]); 2256 name[7] = LSB(mcp->mb[7]); 2257 } 2258 2259 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059, 2260 "Done %s.\n", __func__); 2261 } 2262 2263 return rval; 2264} 2265 2266/* 2267 * qla24xx_link_initialization 2268 * Issue link initialization mailbox command. 2269 * 2270 * Input: 2271 * ha = adapter block pointer. 2272 * TARGET_QUEUE_LOCK must be released. 2273 * ADAPTER_STATE_LOCK must be released. 2274 * 2275 * Returns: 2276 * qla2x00 local function return status code. 2277 * 2278 * Context: 2279 * Kernel context. 2280 */ 2281int 2282qla24xx_link_initialize(scsi_qla_host_t *vha) 2283{ 2284 int rval; 2285 mbx_cmd_t mc; 2286 mbx_cmd_t *mcp = &mc; 2287 2288 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152, 2289 "Entered %s.\n", __func__); 2290 2291 if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw)) 2292 return QLA_FUNCTION_FAILED; 2293 2294 mcp->mb[0] = MBC_LINK_INITIALIZATION; 2295 mcp->mb[1] = BIT_4; 2296 if (vha->hw->operating_mode == LOOP) 2297 mcp->mb[1] |= BIT_6; 2298 else 2299 mcp->mb[1] |= BIT_5; 2300 mcp->mb[2] = 0; 2301 mcp->mb[3] = 0; 2302 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2303 mcp->in_mb = MBX_0; 2304 mcp->tov = MBX_TOV_SECONDS; 2305 mcp->flags = 0; 2306 rval = qla2x00_mailbox_command(vha, mcp); 2307 2308 if (rval != QLA_SUCCESS) { 2309 ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval); 2310 } else { 2311 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154, 2312 "Done %s.\n", __func__); 2313 } 2314 2315 return rval; 2316} 2317 2318/* 2319 * qla2x00_lip_reset 2320 * Issue LIP reset mailbox command. 2321 * 2322 * Input: 2323 * ha = adapter block pointer. 2324 * TARGET_QUEUE_LOCK must be released. 2325 * ADAPTER_STATE_LOCK must be released. 2326 * 2327 * Returns: 2328 * qla2x00 local function return status code. 2329 * 2330 * Context: 2331 * Kernel context. 2332 */ 2333int 2334qla2x00_lip_reset(scsi_qla_host_t *vha) 2335{ 2336 int rval; 2337 mbx_cmd_t mc; 2338 mbx_cmd_t *mcp = &mc; 2339 2340 ql_dbg(ql_dbg_disc, vha, 0x105a, 2341 "Entered %s.\n", __func__); 2342 2343 if (IS_CNA_CAPABLE(vha->hw)) { 2344 /* Logout across all FCFs. */ 2345 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 2346 mcp->mb[1] = BIT_1; 2347 mcp->mb[2] = 0; 2348 mcp->out_mb = MBX_2|MBX_1|MBX_0; 2349 } else if (IS_FWI2_CAPABLE(vha->hw)) { 2350 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 2351 mcp->mb[1] = BIT_4; 2352 mcp->mb[2] = 0; 2353 mcp->mb[3] = vha->hw->loop_reset_delay; 2354 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2355 } else { 2356 mcp->mb[0] = MBC_LIP_RESET; 2357 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2358 if (HAS_EXTENDED_IDS(vha->hw)) { 2359 mcp->mb[1] = 0x00ff; 2360 mcp->mb[10] = 0; 2361 mcp->out_mb |= MBX_10; 2362 } else { 2363 mcp->mb[1] = 0xff00; 2364 } 2365 mcp->mb[2] = vha->hw->loop_reset_delay; 2366 mcp->mb[3] = 0; 2367 } 2368 mcp->in_mb = MBX_0; 2369 mcp->tov = MBX_TOV_SECONDS; 2370 mcp->flags = 0; 2371 rval = qla2x00_mailbox_command(vha, mcp); 2372 2373 if (rval != QLA_SUCCESS) { 2374 /*EMPTY*/ 2375 ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval); 2376 } else { 2377 /*EMPTY*/ 2378 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c, 2379 "Done %s.\n", __func__); 2380 } 2381 2382 return rval; 2383} 2384 2385/* 2386 * qla2x00_send_sns 2387 * Send SNS command. 2388 * 2389 * Input: 2390 * ha = adapter block pointer. 2391 * sns = pointer for command. 2392 * cmd_size = command size. 2393 * buf_size = response/command size. 2394 * TARGET_QUEUE_LOCK must be released. 2395 * ADAPTER_STATE_LOCK must be released. 2396 * 2397 * Returns: 2398 * qla2x00 local function return status code. 2399 * 2400 * Context: 2401 * Kernel context. 2402 */ 2403int 2404qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address, 2405 uint16_t cmd_size, size_t buf_size) 2406{ 2407 int rval; 2408 mbx_cmd_t mc; 2409 mbx_cmd_t *mcp = &mc; 2410 2411 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d, 2412 "Entered %s.\n", __func__); 2413 2414 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e, 2415 "Retry cnt=%d ratov=%d total tov=%d.\n", 2416 vha->hw->retry_count, vha->hw->login_timeout, mcp->tov); 2417 2418 mcp->mb[0] = MBC_SEND_SNS_COMMAND; 2419 mcp->mb[1] = cmd_size; 2420 mcp->mb[2] = MSW(sns_phys_address); 2421 mcp->mb[3] = LSW(sns_phys_address); 2422 mcp->mb[6] = MSW(MSD(sns_phys_address)); 2423 mcp->mb[7] = LSW(MSD(sns_phys_address)); 2424 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 2425 mcp->in_mb = MBX_0|MBX_1; 2426 mcp->buf_size = buf_size; 2427 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN; 2428 mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2); 2429 rval = qla2x00_mailbox_command(vha, mcp); 2430 2431 if (rval != QLA_SUCCESS) { 2432 /*EMPTY*/ 2433 ql_dbg(ql_dbg_mbx, vha, 0x105f, 2434 "Failed=%x mb[0]=%x mb[1]=%x.\n", 2435 rval, mcp->mb[0], mcp->mb[1]); 2436 } else { 2437 /*EMPTY*/ 2438 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060, 2439 "Done %s.\n", __func__); 2440 } 2441 2442 return rval; 2443} 2444 2445int 2446qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2447 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt) 2448{ 2449 int rval; 2450 2451 struct logio_entry_24xx *lg; 2452 dma_addr_t lg_dma; 2453 uint32_t iop[2]; 2454 struct qla_hw_data *ha = vha->hw; 2455 struct req_que *req; 2456 2457 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061, 2458 "Entered %s.\n", __func__); 2459 2460 if (vha->vp_idx && vha->qpair) 2461 req = vha->qpair->req; 2462 else 2463 req = ha->req_q_map[0]; 2464 2465 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 2466 if (lg == NULL) { 2467 ql_log(ql_log_warn, vha, 0x1062, 2468 "Failed to allocate login IOCB.\n"); 2469 return QLA_MEMORY_ALLOC_FAILED; 2470 } 2471 2472 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2473 lg->entry_count = 1; 2474 lg->handle = make_handle(req->id, lg->handle); 2475 lg->nport_handle = cpu_to_le16(loop_id); 2476 lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI); 2477 if (opt & BIT_0) 2478 lg->control_flags |= cpu_to_le16(LCF_COND_PLOGI); 2479 if (opt & BIT_1) 2480 lg->control_flags |= cpu_to_le16(LCF_SKIP_PRLI); 2481 lg->port_id[0] = al_pa; 2482 lg->port_id[1] = area; 2483 lg->port_id[2] = domain; 2484 lg->vp_index = vha->vp_idx; 2485 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0, 2486 (ha->r_a_tov / 10 * 2) + 2); 2487 if (rval != QLA_SUCCESS) { 2488 ql_dbg(ql_dbg_mbx, vha, 0x1063, 2489 "Failed to issue login IOCB (%x).\n", rval); 2490 } else if (lg->entry_status != 0) { 2491 ql_dbg(ql_dbg_mbx, vha, 0x1064, 2492 "Failed to complete IOCB -- error status (%x).\n", 2493 lg->entry_status); 2494 rval = QLA_FUNCTION_FAILED; 2495 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) { 2496 iop[0] = le32_to_cpu(lg->io_parameter[0]); 2497 iop[1] = le32_to_cpu(lg->io_parameter[1]); 2498 2499 ql_dbg(ql_dbg_mbx, vha, 0x1065, 2500 "Failed to complete IOCB -- completion status (%x) " 2501 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status), 2502 iop[0], iop[1]); 2503 2504 switch (iop[0]) { 2505 case LSC_SCODE_PORTID_USED: 2506 mb[0] = MBS_PORT_ID_USED; 2507 mb[1] = LSW(iop[1]); 2508 break; 2509 case LSC_SCODE_NPORT_USED: 2510 mb[0] = MBS_LOOP_ID_USED; 2511 break; 2512 case LSC_SCODE_NOLINK: 2513 case LSC_SCODE_NOIOCB: 2514 case LSC_SCODE_NOXCB: 2515 case LSC_SCODE_CMD_FAILED: 2516 case LSC_SCODE_NOFABRIC: 2517 case LSC_SCODE_FW_NOT_READY: 2518 case LSC_SCODE_NOT_LOGGED_IN: 2519 case LSC_SCODE_NOPCB: 2520 case LSC_SCODE_ELS_REJECT: 2521 case LSC_SCODE_CMD_PARAM_ERR: 2522 case LSC_SCODE_NONPORT: 2523 case LSC_SCODE_LOGGED_IN: 2524 case LSC_SCODE_NOFLOGI_ACC: 2525 default: 2526 mb[0] = MBS_COMMAND_ERROR; 2527 break; 2528 } 2529 } else { 2530 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066, 2531 "Done %s.\n", __func__); 2532 2533 iop[0] = le32_to_cpu(lg->io_parameter[0]); 2534 2535 mb[0] = MBS_COMMAND_COMPLETE; 2536 mb[1] = 0; 2537 if (iop[0] & BIT_4) { 2538 if (iop[0] & BIT_8) 2539 mb[1] |= BIT_1; 2540 } else 2541 mb[1] = BIT_0; 2542 2543 /* Passback COS information. */ 2544 mb[10] = 0; 2545 if (lg->io_parameter[7] || lg->io_parameter[8]) 2546 mb[10] |= BIT_0; /* Class 2. */ 2547 if (lg->io_parameter[9] || lg->io_parameter[10]) 2548 mb[10] |= BIT_1; /* Class 3. */ 2549 if (lg->io_parameter[0] & cpu_to_le32(BIT_7)) 2550 mb[10] |= BIT_7; /* Confirmed Completion 2551 * Allowed 2552 */ 2553 } 2554 2555 dma_pool_free(ha->s_dma_pool, lg, lg_dma); 2556 2557 return rval; 2558} 2559 2560/* 2561 * qla2x00_login_fabric 2562 * Issue login fabric port mailbox command. 2563 * 2564 * Input: 2565 * ha = adapter block pointer. 2566 * loop_id = device loop ID. 2567 * domain = device domain. 2568 * area = device area. 2569 * al_pa = device AL_PA. 2570 * status = pointer for return status. 2571 * opt = command options. 2572 * TARGET_QUEUE_LOCK must be released. 2573 * ADAPTER_STATE_LOCK must be released. 2574 * 2575 * Returns: 2576 * qla2x00 local function return status code. 2577 * 2578 * Context: 2579 * Kernel context. 2580 */ 2581int 2582qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2583 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt) 2584{ 2585 int rval; 2586 mbx_cmd_t mc; 2587 mbx_cmd_t *mcp = &mc; 2588 struct qla_hw_data *ha = vha->hw; 2589 2590 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067, 2591 "Entered %s.\n", __func__); 2592 2593 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT; 2594 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2595 if (HAS_EXTENDED_IDS(ha)) { 2596 mcp->mb[1] = loop_id; 2597 mcp->mb[10] = opt; 2598 mcp->out_mb |= MBX_10; 2599 } else { 2600 mcp->mb[1] = (loop_id << 8) | opt; 2601 } 2602 mcp->mb[2] = domain; 2603 mcp->mb[3] = area << 8 | al_pa; 2604 2605 mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0; 2606 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 2607 mcp->flags = 0; 2608 rval = qla2x00_mailbox_command(vha, mcp); 2609 2610 /* Return mailbox statuses. */ 2611 if (mb != NULL) { 2612 mb[0] = mcp->mb[0]; 2613 mb[1] = mcp->mb[1]; 2614 mb[2] = mcp->mb[2]; 2615 mb[6] = mcp->mb[6]; 2616 mb[7] = mcp->mb[7]; 2617 /* COS retrieved from Get-Port-Database mailbox command. */ 2618 mb[10] = 0; 2619 } 2620 2621 if (rval != QLA_SUCCESS) { 2622 /* RLU tmp code: need to change main mailbox_command function to 2623 * return ok even when the mailbox completion value is not 2624 * SUCCESS. The caller needs to be responsible to interpret 2625 * the return values of this mailbox command if we're not 2626 * to change too much of the existing code. 2627 */ 2628 if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 || 2629 mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 || 2630 mcp->mb[0] == 0x4006) 2631 rval = QLA_SUCCESS; 2632 2633 /*EMPTY*/ 2634 ql_dbg(ql_dbg_mbx, vha, 0x1068, 2635 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 2636 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 2637 } else { 2638 /*EMPTY*/ 2639 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069, 2640 "Done %s.\n", __func__); 2641 } 2642 2643 return rval; 2644} 2645 2646/* 2647 * qla2x00_login_local_device 2648 * Issue login loop port mailbox command. 2649 * 2650 * Input: 2651 * ha = adapter block pointer. 2652 * loop_id = device loop ID. 2653 * opt = command options. 2654 * 2655 * Returns: 2656 * Return status code. 2657 * 2658 * Context: 2659 * Kernel context. 2660 * 2661 */ 2662int 2663qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport, 2664 uint16_t *mb_ret, uint8_t opt) 2665{ 2666 int rval; 2667 mbx_cmd_t mc; 2668 mbx_cmd_t *mcp = &mc; 2669 struct qla_hw_data *ha = vha->hw; 2670 2671 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a, 2672 "Entered %s.\n", __func__); 2673 2674 if (IS_FWI2_CAPABLE(ha)) 2675 return qla24xx_login_fabric(vha, fcport->loop_id, 2676 fcport->d_id.b.domain, fcport->d_id.b.area, 2677 fcport->d_id.b.al_pa, mb_ret, opt); 2678 2679 mcp->mb[0] = MBC_LOGIN_LOOP_PORT; 2680 if (HAS_EXTENDED_IDS(ha)) 2681 mcp->mb[1] = fcport->loop_id; 2682 else 2683 mcp->mb[1] = fcport->loop_id << 8; 2684 mcp->mb[2] = opt; 2685 mcp->out_mb = MBX_2|MBX_1|MBX_0; 2686 mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0; 2687 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 2688 mcp->flags = 0; 2689 rval = qla2x00_mailbox_command(vha, mcp); 2690 2691 /* Return mailbox statuses. */ 2692 if (mb_ret != NULL) { 2693 mb_ret[0] = mcp->mb[0]; 2694 mb_ret[1] = mcp->mb[1]; 2695 mb_ret[6] = mcp->mb[6]; 2696 mb_ret[7] = mcp->mb[7]; 2697 } 2698 2699 if (rval != QLA_SUCCESS) { 2700 /* AV tmp code: need to change main mailbox_command function to 2701 * return ok even when the mailbox completion value is not 2702 * SUCCESS. The caller needs to be responsible to interpret 2703 * the return values of this mailbox command if we're not 2704 * to change too much of the existing code. 2705 */ 2706 if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006) 2707 rval = QLA_SUCCESS; 2708 2709 ql_dbg(ql_dbg_mbx, vha, 0x106b, 2710 "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n", 2711 rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]); 2712 } else { 2713 /*EMPTY*/ 2714 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c, 2715 "Done %s.\n", __func__); 2716 } 2717 2718 return (rval); 2719} 2720 2721int 2722qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2723 uint8_t area, uint8_t al_pa) 2724{ 2725 int rval; 2726 struct logio_entry_24xx *lg; 2727 dma_addr_t lg_dma; 2728 struct qla_hw_data *ha = vha->hw; 2729 struct req_que *req; 2730 2731 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d, 2732 "Entered %s.\n", __func__); 2733 2734 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 2735 if (lg == NULL) { 2736 ql_log(ql_log_warn, vha, 0x106e, 2737 "Failed to allocate logout IOCB.\n"); 2738 return QLA_MEMORY_ALLOC_FAILED; 2739 } 2740 2741 req = vha->req; 2742 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2743 lg->entry_count = 1; 2744 lg->handle = make_handle(req->id, lg->handle); 2745 lg->nport_handle = cpu_to_le16(loop_id); 2746 lg->control_flags = 2747 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO| 2748 LCF_FREE_NPORT); 2749 lg->port_id[0] = al_pa; 2750 lg->port_id[1] = area; 2751 lg->port_id[2] = domain; 2752 lg->vp_index = vha->vp_idx; 2753 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0, 2754 (ha->r_a_tov / 10 * 2) + 2); 2755 if (rval != QLA_SUCCESS) { 2756 ql_dbg(ql_dbg_mbx, vha, 0x106f, 2757 "Failed to issue logout IOCB (%x).\n", rval); 2758 } else if (lg->entry_status != 0) { 2759 ql_dbg(ql_dbg_mbx, vha, 0x1070, 2760 "Failed to complete IOCB -- error status (%x).\n", 2761 lg->entry_status); 2762 rval = QLA_FUNCTION_FAILED; 2763 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) { 2764 ql_dbg(ql_dbg_mbx, vha, 0x1071, 2765 "Failed to complete IOCB -- completion status (%x) " 2766 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status), 2767 le32_to_cpu(lg->io_parameter[0]), 2768 le32_to_cpu(lg->io_parameter[1])); 2769 } else { 2770 /*EMPTY*/ 2771 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072, 2772 "Done %s.\n", __func__); 2773 } 2774 2775 dma_pool_free(ha->s_dma_pool, lg, lg_dma); 2776 2777 return rval; 2778} 2779 2780/* 2781 * qla2x00_fabric_logout 2782 * Issue logout fabric port mailbox command. 2783 * 2784 * Input: 2785 * ha = adapter block pointer. 2786 * loop_id = device loop ID. 2787 * TARGET_QUEUE_LOCK must be released. 2788 * ADAPTER_STATE_LOCK must be released. 2789 * 2790 * Returns: 2791 * qla2x00 local function return status code. 2792 * 2793 * Context: 2794 * Kernel context. 2795 */ 2796int 2797qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, 2798 uint8_t area, uint8_t al_pa) 2799{ 2800 int rval; 2801 mbx_cmd_t mc; 2802 mbx_cmd_t *mcp = &mc; 2803 2804 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073, 2805 "Entered %s.\n", __func__); 2806 2807 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT; 2808 mcp->out_mb = MBX_1|MBX_0; 2809 if (HAS_EXTENDED_IDS(vha->hw)) { 2810 mcp->mb[1] = loop_id; 2811 mcp->mb[10] = 0; 2812 mcp->out_mb |= MBX_10; 2813 } else { 2814 mcp->mb[1] = loop_id << 8; 2815 } 2816 2817 mcp->in_mb = MBX_1|MBX_0; 2818 mcp->tov = MBX_TOV_SECONDS; 2819 mcp->flags = 0; 2820 rval = qla2x00_mailbox_command(vha, mcp); 2821 2822 if (rval != QLA_SUCCESS) { 2823 /*EMPTY*/ 2824 ql_dbg(ql_dbg_mbx, vha, 0x1074, 2825 "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]); 2826 } else { 2827 /*EMPTY*/ 2828 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075, 2829 "Done %s.\n", __func__); 2830 } 2831 2832 return rval; 2833} 2834 2835/* 2836 * qla2x00_full_login_lip 2837 * Issue full login LIP mailbox command. 2838 * 2839 * Input: 2840 * ha = adapter block pointer. 2841 * TARGET_QUEUE_LOCK must be released. 2842 * ADAPTER_STATE_LOCK must be released. 2843 * 2844 * Returns: 2845 * qla2x00 local function return status code. 2846 * 2847 * Context: 2848 * Kernel context. 2849 */ 2850int 2851qla2x00_full_login_lip(scsi_qla_host_t *vha) 2852{ 2853 int rval; 2854 mbx_cmd_t mc; 2855 mbx_cmd_t *mcp = &mc; 2856 2857 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076, 2858 "Entered %s.\n", __func__); 2859 2860 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 2861 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_4 : 0; 2862 mcp->mb[2] = 0; 2863 mcp->mb[3] = 0; 2864 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 2865 mcp->in_mb = MBX_0; 2866 mcp->tov = MBX_TOV_SECONDS; 2867 mcp->flags = 0; 2868 rval = qla2x00_mailbox_command(vha, mcp); 2869 2870 if (rval != QLA_SUCCESS) { 2871 /*EMPTY*/ 2872 ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval); 2873 } else { 2874 /*EMPTY*/ 2875 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078, 2876 "Done %s.\n", __func__); 2877 } 2878 2879 return rval; 2880} 2881 2882/* 2883 * qla2x00_get_id_list 2884 * 2885 * Input: 2886 * ha = adapter block pointer. 2887 * 2888 * Returns: 2889 * qla2x00 local function return status code. 2890 * 2891 * Context: 2892 * Kernel context. 2893 */ 2894int 2895qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma, 2896 uint16_t *entries) 2897{ 2898 int rval; 2899 mbx_cmd_t mc; 2900 mbx_cmd_t *mcp = &mc; 2901 2902 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079, 2903 "Entered %s.\n", __func__); 2904 2905 if (id_list == NULL) 2906 return QLA_FUNCTION_FAILED; 2907 2908 mcp->mb[0] = MBC_GET_ID_LIST; 2909 mcp->out_mb = MBX_0; 2910 if (IS_FWI2_CAPABLE(vha->hw)) { 2911 mcp->mb[2] = MSW(id_list_dma); 2912 mcp->mb[3] = LSW(id_list_dma); 2913 mcp->mb[6] = MSW(MSD(id_list_dma)); 2914 mcp->mb[7] = LSW(MSD(id_list_dma)); 2915 mcp->mb[8] = 0; 2916 mcp->mb[9] = vha->vp_idx; 2917 mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2; 2918 } else { 2919 mcp->mb[1] = MSW(id_list_dma); 2920 mcp->mb[2] = LSW(id_list_dma); 2921 mcp->mb[3] = MSW(MSD(id_list_dma)); 2922 mcp->mb[6] = LSW(MSD(id_list_dma)); 2923 mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1; 2924 } 2925 mcp->in_mb = MBX_1|MBX_0; 2926 mcp->tov = MBX_TOV_SECONDS; 2927 mcp->flags = 0; 2928 rval = qla2x00_mailbox_command(vha, mcp); 2929 2930 if (rval != QLA_SUCCESS) { 2931 /*EMPTY*/ 2932 ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval); 2933 } else { 2934 *entries = mcp->mb[1]; 2935 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b, 2936 "Done %s.\n", __func__); 2937 } 2938 2939 return rval; 2940} 2941 2942/* 2943 * qla2x00_get_resource_cnts 2944 * Get current firmware resource counts. 2945 * 2946 * Input: 2947 * ha = adapter block pointer. 2948 * 2949 * Returns: 2950 * qla2x00 local function return status code. 2951 * 2952 * Context: 2953 * Kernel context. 2954 */ 2955int 2956qla2x00_get_resource_cnts(scsi_qla_host_t *vha) 2957{ 2958 struct qla_hw_data *ha = vha->hw; 2959 int rval; 2960 mbx_cmd_t mc; 2961 mbx_cmd_t *mcp = &mc; 2962 2963 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c, 2964 "Entered %s.\n", __func__); 2965 2966 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS; 2967 mcp->out_mb = MBX_0; 2968 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 2969 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || 2970 IS_QLA27XX(ha) || IS_QLA28XX(ha)) 2971 mcp->in_mb |= MBX_12; 2972 mcp->tov = MBX_TOV_SECONDS; 2973 mcp->flags = 0; 2974 rval = qla2x00_mailbox_command(vha, mcp); 2975 2976 if (rval != QLA_SUCCESS) { 2977 /*EMPTY*/ 2978 ql_dbg(ql_dbg_mbx, vha, 0x107d, 2979 "Failed mb[0]=%x.\n", mcp->mb[0]); 2980 } else { 2981 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e, 2982 "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x " 2983 "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2], 2984 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10], 2985 mcp->mb[11], mcp->mb[12]); 2986 2987 ha->orig_fw_tgt_xcb_count = mcp->mb[1]; 2988 ha->cur_fw_tgt_xcb_count = mcp->mb[2]; 2989 ha->cur_fw_xcb_count = mcp->mb[3]; 2990 ha->orig_fw_xcb_count = mcp->mb[6]; 2991 ha->cur_fw_iocb_count = mcp->mb[7]; 2992 ha->orig_fw_iocb_count = mcp->mb[10]; 2993 if (ha->flags.npiv_supported) 2994 ha->max_npiv_vports = mcp->mb[11]; 2995 if (IS_QLA81XX(ha) || IS_QLA83XX(ha)) 2996 ha->fw_max_fcf_count = mcp->mb[12]; 2997 } 2998 2999 return (rval); 3000} 3001 3002/* 3003 * qla2x00_get_fcal_position_map 3004 * Get FCAL (LILP) position map using mailbox command 3005 * 3006 * Input: 3007 * ha = adapter state pointer. 3008 * pos_map = buffer pointer (can be NULL). 3009 * 3010 * Returns: 3011 * qla2x00 local function return status code. 3012 * 3013 * Context: 3014 * Kernel context. 3015 */ 3016int 3017qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map, 3018 u8 *num_entries) 3019{ 3020 int rval; 3021 mbx_cmd_t mc; 3022 mbx_cmd_t *mcp = &mc; 3023 char *pmap; 3024 dma_addr_t pmap_dma; 3025 struct qla_hw_data *ha = vha->hw; 3026 3027 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f, 3028 "Entered %s.\n", __func__); 3029 3030 pmap = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma); 3031 if (pmap == NULL) { 3032 ql_log(ql_log_warn, vha, 0x1080, 3033 "Memory alloc failed.\n"); 3034 return QLA_MEMORY_ALLOC_FAILED; 3035 } 3036 3037 mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP; 3038 mcp->mb[2] = MSW(pmap_dma); 3039 mcp->mb[3] = LSW(pmap_dma); 3040 mcp->mb[6] = MSW(MSD(pmap_dma)); 3041 mcp->mb[7] = LSW(MSD(pmap_dma)); 3042 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 3043 mcp->in_mb = MBX_1|MBX_0; 3044 mcp->buf_size = FCAL_MAP_SIZE; 3045 mcp->flags = MBX_DMA_IN; 3046 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 3047 rval = qla2x00_mailbox_command(vha, mcp); 3048 3049 if (rval == QLA_SUCCESS) { 3050 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081, 3051 "mb0/mb1=%x/%X FC/AL position map size (%x).\n", 3052 mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]); 3053 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d, 3054 pmap, pmap[0] + 1); 3055 3056 if (pos_map) 3057 memcpy(pos_map, pmap, FCAL_MAP_SIZE); 3058 if (num_entries) 3059 *num_entries = pmap[0]; 3060 } 3061 dma_pool_free(ha->s_dma_pool, pmap, pmap_dma); 3062 3063 if (rval != QLA_SUCCESS) { 3064 ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval); 3065 } else { 3066 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083, 3067 "Done %s.\n", __func__); 3068 } 3069 3070 return rval; 3071} 3072 3073/* 3074 * qla2x00_get_link_status 3075 * 3076 * Input: 3077 * ha = adapter block pointer. 3078 * loop_id = device loop ID. 3079 * ret_buf = pointer to link status return buffer. 3080 * 3081 * Returns: 3082 * 0 = success. 3083 * BIT_0 = mem alloc error. 3084 * BIT_1 = mailbox error. 3085 */ 3086int 3087qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id, 3088 struct link_statistics *stats, dma_addr_t stats_dma) 3089{ 3090 int rval; 3091 mbx_cmd_t mc; 3092 mbx_cmd_t *mcp = &mc; 3093 uint32_t *iter = (uint32_t *)stats; 3094 ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter); 3095 struct qla_hw_data *ha = vha->hw; 3096 3097 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084, 3098 "Entered %s.\n", __func__); 3099 3100 mcp->mb[0] = MBC_GET_LINK_STATUS; 3101 mcp->mb[2] = MSW(LSD(stats_dma)); 3102 mcp->mb[3] = LSW(LSD(stats_dma)); 3103 mcp->mb[6] = MSW(MSD(stats_dma)); 3104 mcp->mb[7] = LSW(MSD(stats_dma)); 3105 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 3106 mcp->in_mb = MBX_0; 3107 if (IS_FWI2_CAPABLE(ha)) { 3108 mcp->mb[1] = loop_id; 3109 mcp->mb[4] = 0; 3110 mcp->mb[10] = 0; 3111 mcp->out_mb |= MBX_10|MBX_4|MBX_1; 3112 mcp->in_mb |= MBX_1; 3113 } else if (HAS_EXTENDED_IDS(ha)) { 3114 mcp->mb[1] = loop_id; 3115 mcp->mb[10] = 0; 3116 mcp->out_mb |= MBX_10|MBX_1; 3117 } else { 3118 mcp->mb[1] = loop_id << 8; 3119 mcp->out_mb |= MBX_1; 3120 } 3121 mcp->tov = MBX_TOV_SECONDS; 3122 mcp->flags = IOCTL_CMD; 3123 rval = qla2x00_mailbox_command(vha, mcp); 3124 3125 if (rval == QLA_SUCCESS) { 3126 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 3127 ql_dbg(ql_dbg_mbx, vha, 0x1085, 3128 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3129 rval = QLA_FUNCTION_FAILED; 3130 } else { 3131 /* Re-endianize - firmware data is le32. */ 3132 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086, 3133 "Done %s.\n", __func__); 3134 for ( ; dwords--; iter++) 3135 le32_to_cpus(iter); 3136 } 3137 } else { 3138 /* Failed. */ 3139 ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval); 3140 } 3141 3142 return rval; 3143} 3144 3145int 3146qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, 3147 dma_addr_t stats_dma, uint16_t options) 3148{ 3149 int rval; 3150 mbx_cmd_t mc; 3151 mbx_cmd_t *mcp = &mc; 3152 uint32_t *iter = (uint32_t *)stats; 3153 ushort dwords = sizeof(*stats)/sizeof(*iter); 3154 3155 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088, 3156 "Entered %s.\n", __func__); 3157 3158 memset(&mc, 0, sizeof(mc)); 3159 mc.mb[0] = MBC_GET_LINK_PRIV_STATS; 3160 mc.mb[2] = MSW(LSD(stats_dma)); 3161 mc.mb[3] = LSW(LSD(stats_dma)); 3162 mc.mb[6] = MSW(MSD(stats_dma)); 3163 mc.mb[7] = LSW(MSD(stats_dma)); 3164 mc.mb[8] = dwords; 3165 mc.mb[9] = vha->vp_idx; 3166 mc.mb[10] = options; 3167 3168 rval = qla24xx_send_mb_cmd(vha, &mc); 3169 3170 if (rval == QLA_SUCCESS) { 3171 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 3172 ql_dbg(ql_dbg_mbx, vha, 0x1089, 3173 "Failed mb[0]=%x.\n", mcp->mb[0]); 3174 rval = QLA_FUNCTION_FAILED; 3175 } else { 3176 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a, 3177 "Done %s.\n", __func__); 3178 /* Re-endianize - firmware data is le32. */ 3179 for ( ; dwords--; iter++) 3180 le32_to_cpus(iter); 3181 } 3182 } else { 3183 /* Failed. */ 3184 ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval); 3185 } 3186 3187 return rval; 3188} 3189 3190int 3191qla24xx_abort_command(srb_t *sp) 3192{ 3193 int rval; 3194 unsigned long flags = 0; 3195 3196 struct abort_entry_24xx *abt; 3197 dma_addr_t abt_dma; 3198 uint32_t handle; 3199 fc_port_t *fcport = sp->fcport; 3200 struct scsi_qla_host *vha = fcport->vha; 3201 struct qla_hw_data *ha = vha->hw; 3202 struct req_que *req = vha->req; 3203 struct qla_qpair *qpair = sp->qpair; 3204 3205 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c, 3206 "Entered %s.\n", __func__); 3207 3208 if (sp->qpair) 3209 req = sp->qpair->req; 3210 else 3211 return QLA_FUNCTION_FAILED; 3212 3213 if (ql2xasynctmfenable) 3214 return qla24xx_async_abort_command(sp); 3215 3216 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 3217 for (handle = 1; handle < req->num_outstanding_cmds; handle++) { 3218 if (req->outstanding_cmds[handle] == sp) 3219 break; 3220 } 3221 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3222 if (handle == req->num_outstanding_cmds) { 3223 /* Command not found. */ 3224 return QLA_FUNCTION_FAILED; 3225 } 3226 3227 abt = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma); 3228 if (abt == NULL) { 3229 ql_log(ql_log_warn, vha, 0x108d, 3230 "Failed to allocate abort IOCB.\n"); 3231 return QLA_MEMORY_ALLOC_FAILED; 3232 } 3233 3234 abt->entry_type = ABORT_IOCB_TYPE; 3235 abt->entry_count = 1; 3236 abt->handle = make_handle(req->id, abt->handle); 3237 abt->nport_handle = cpu_to_le16(fcport->loop_id); 3238 abt->handle_to_abort = make_handle(req->id, handle); 3239 abt->port_id[0] = fcport->d_id.b.al_pa; 3240 abt->port_id[1] = fcport->d_id.b.area; 3241 abt->port_id[2] = fcport->d_id.b.domain; 3242 abt->vp_index = fcport->vha->vp_idx; 3243 3244 abt->req_que_no = cpu_to_le16(req->id); 3245 3246 rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0); 3247 if (rval != QLA_SUCCESS) { 3248 ql_dbg(ql_dbg_mbx, vha, 0x108e, 3249 "Failed to issue IOCB (%x).\n", rval); 3250 } else if (abt->entry_status != 0) { 3251 ql_dbg(ql_dbg_mbx, vha, 0x108f, 3252 "Failed to complete IOCB -- error status (%x).\n", 3253 abt->entry_status); 3254 rval = QLA_FUNCTION_FAILED; 3255 } else if (abt->nport_handle != cpu_to_le16(0)) { 3256 ql_dbg(ql_dbg_mbx, vha, 0x1090, 3257 "Failed to complete IOCB -- completion status (%x).\n", 3258 le16_to_cpu(abt->nport_handle)); 3259 if (abt->nport_handle == cpu_to_le16(CS_IOCB_ERROR)) 3260 rval = QLA_FUNCTION_PARAMETER_ERROR; 3261 else 3262 rval = QLA_FUNCTION_FAILED; 3263 } else { 3264 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091, 3265 "Done %s.\n", __func__); 3266 } 3267 3268 dma_pool_free(ha->s_dma_pool, abt, abt_dma); 3269 3270 return rval; 3271} 3272 3273struct tsk_mgmt_cmd { 3274 union { 3275 struct tsk_mgmt_entry tsk; 3276 struct sts_entry_24xx sts; 3277 } p; 3278}; 3279 3280static int 3281__qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, 3282 uint64_t l, int tag) 3283{ 3284 int rval, rval2; 3285 struct tsk_mgmt_cmd *tsk; 3286 struct sts_entry_24xx *sts; 3287 dma_addr_t tsk_dma; 3288 scsi_qla_host_t *vha; 3289 struct qla_hw_data *ha; 3290 struct req_que *req; 3291 struct qla_qpair *qpair; 3292 3293 vha = fcport->vha; 3294 ha = vha->hw; 3295 req = vha->req; 3296 3297 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092, 3298 "Entered %s.\n", __func__); 3299 3300 if (vha->vp_idx && vha->qpair) { 3301 /* NPIV port */ 3302 qpair = vha->qpair; 3303 req = qpair->req; 3304 } 3305 3306 tsk = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma); 3307 if (tsk == NULL) { 3308 ql_log(ql_log_warn, vha, 0x1093, 3309 "Failed to allocate task management IOCB.\n"); 3310 return QLA_MEMORY_ALLOC_FAILED; 3311 } 3312 3313 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE; 3314 tsk->p.tsk.entry_count = 1; 3315 tsk->p.tsk.handle = make_handle(req->id, tsk->p.tsk.handle); 3316 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id); 3317 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 3318 tsk->p.tsk.control_flags = cpu_to_le32(type); 3319 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa; 3320 tsk->p.tsk.port_id[1] = fcport->d_id.b.area; 3321 tsk->p.tsk.port_id[2] = fcport->d_id.b.domain; 3322 tsk->p.tsk.vp_index = fcport->vha->vp_idx; 3323 if (type == TCF_LUN_RESET) { 3324 int_to_scsilun(l, &tsk->p.tsk.lun); 3325 host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun, 3326 sizeof(tsk->p.tsk.lun)); 3327 } 3328 3329 sts = &tsk->p.sts; 3330 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0); 3331 if (rval != QLA_SUCCESS) { 3332 ql_dbg(ql_dbg_mbx, vha, 0x1094, 3333 "Failed to issue %s reset IOCB (%x).\n", name, rval); 3334 } else if (sts->entry_status != 0) { 3335 ql_dbg(ql_dbg_mbx, vha, 0x1095, 3336 "Failed to complete IOCB -- error status (%x).\n", 3337 sts->entry_status); 3338 rval = QLA_FUNCTION_FAILED; 3339 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) { 3340 ql_dbg(ql_dbg_mbx, vha, 0x1096, 3341 "Failed to complete IOCB -- completion status (%x).\n", 3342 le16_to_cpu(sts->comp_status)); 3343 rval = QLA_FUNCTION_FAILED; 3344 } else if (le16_to_cpu(sts->scsi_status) & 3345 SS_RESPONSE_INFO_LEN_VALID) { 3346 if (le32_to_cpu(sts->rsp_data_len) < 4) { 3347 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097, 3348 "Ignoring inconsistent data length -- not enough " 3349 "response info (%d).\n", 3350 le32_to_cpu(sts->rsp_data_len)); 3351 } else if (sts->data[3]) { 3352 ql_dbg(ql_dbg_mbx, vha, 0x1098, 3353 "Failed to complete IOCB -- response (%x).\n", 3354 sts->data[3]); 3355 rval = QLA_FUNCTION_FAILED; 3356 } 3357 } 3358 3359 /* Issue marker IOCB. */ 3360 rval2 = qla2x00_marker(vha, ha->base_qpair, fcport->loop_id, l, 3361 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID); 3362 if (rval2 != QLA_SUCCESS) { 3363 ql_dbg(ql_dbg_mbx, vha, 0x1099, 3364 "Failed to issue marker IOCB (%x).\n", rval2); 3365 } else { 3366 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a, 3367 "Done %s.\n", __func__); 3368 } 3369 3370 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma); 3371 3372 return rval; 3373} 3374 3375int 3376qla24xx_abort_target(struct fc_port *fcport, uint64_t l, int tag) 3377{ 3378 struct qla_hw_data *ha = fcport->vha->hw; 3379 3380 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha)) 3381 return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag); 3382 3383 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag); 3384} 3385 3386int 3387qla24xx_lun_reset(struct fc_port *fcport, uint64_t l, int tag) 3388{ 3389 struct qla_hw_data *ha = fcport->vha->hw; 3390 3391 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha)) 3392 return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag); 3393 3394 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag); 3395} 3396 3397int 3398qla2x00_system_error(scsi_qla_host_t *vha) 3399{ 3400 int rval; 3401 mbx_cmd_t mc; 3402 mbx_cmd_t *mcp = &mc; 3403 struct qla_hw_data *ha = vha->hw; 3404 3405 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha)) 3406 return QLA_FUNCTION_FAILED; 3407 3408 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b, 3409 "Entered %s.\n", __func__); 3410 3411 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR; 3412 mcp->out_mb = MBX_0; 3413 mcp->in_mb = MBX_0; 3414 mcp->tov = 5; 3415 mcp->flags = 0; 3416 rval = qla2x00_mailbox_command(vha, mcp); 3417 3418 if (rval != QLA_SUCCESS) { 3419 ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval); 3420 } else { 3421 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d, 3422 "Done %s.\n", __func__); 3423 } 3424 3425 return rval; 3426} 3427 3428int 3429qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data) 3430{ 3431 int rval; 3432 mbx_cmd_t mc; 3433 mbx_cmd_t *mcp = &mc; 3434 3435 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) && 3436 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 3437 return QLA_FUNCTION_FAILED; 3438 3439 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182, 3440 "Entered %s.\n", __func__); 3441 3442 mcp->mb[0] = MBC_WRITE_SERDES; 3443 mcp->mb[1] = addr; 3444 if (IS_QLA2031(vha->hw)) 3445 mcp->mb[2] = data & 0xff; 3446 else 3447 mcp->mb[2] = data; 3448 3449 mcp->mb[3] = 0; 3450 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 3451 mcp->in_mb = MBX_0; 3452 mcp->tov = MBX_TOV_SECONDS; 3453 mcp->flags = 0; 3454 rval = qla2x00_mailbox_command(vha, mcp); 3455 3456 if (rval != QLA_SUCCESS) { 3457 ql_dbg(ql_dbg_mbx, vha, 0x1183, 3458 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3459 } else { 3460 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1184, 3461 "Done %s.\n", __func__); 3462 } 3463 3464 return rval; 3465} 3466 3467int 3468qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data) 3469{ 3470 int rval; 3471 mbx_cmd_t mc; 3472 mbx_cmd_t *mcp = &mc; 3473 3474 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) && 3475 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 3476 return QLA_FUNCTION_FAILED; 3477 3478 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185, 3479 "Entered %s.\n", __func__); 3480 3481 mcp->mb[0] = MBC_READ_SERDES; 3482 mcp->mb[1] = addr; 3483 mcp->mb[3] = 0; 3484 mcp->out_mb = MBX_3|MBX_1|MBX_0; 3485 mcp->in_mb = MBX_1|MBX_0; 3486 mcp->tov = MBX_TOV_SECONDS; 3487 mcp->flags = 0; 3488 rval = qla2x00_mailbox_command(vha, mcp); 3489 3490 if (IS_QLA2031(vha->hw)) 3491 *data = mcp->mb[1] & 0xff; 3492 else 3493 *data = mcp->mb[1]; 3494 3495 if (rval != QLA_SUCCESS) { 3496 ql_dbg(ql_dbg_mbx, vha, 0x1186, 3497 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3498 } else { 3499 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1187, 3500 "Done %s.\n", __func__); 3501 } 3502 3503 return rval; 3504} 3505 3506int 3507qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data) 3508{ 3509 int rval; 3510 mbx_cmd_t mc; 3511 mbx_cmd_t *mcp = &mc; 3512 3513 if (!IS_QLA8044(vha->hw)) 3514 return QLA_FUNCTION_FAILED; 3515 3516 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x11a0, 3517 "Entered %s.\n", __func__); 3518 3519 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG; 3520 mcp->mb[1] = HCS_WRITE_SERDES; 3521 mcp->mb[3] = LSW(addr); 3522 mcp->mb[4] = MSW(addr); 3523 mcp->mb[5] = LSW(data); 3524 mcp->mb[6] = MSW(data); 3525 mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0; 3526 mcp->in_mb = MBX_0; 3527 mcp->tov = MBX_TOV_SECONDS; 3528 mcp->flags = 0; 3529 rval = qla2x00_mailbox_command(vha, mcp); 3530 3531 if (rval != QLA_SUCCESS) { 3532 ql_dbg(ql_dbg_mbx, vha, 0x11a1, 3533 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3534 } else { 3535 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188, 3536 "Done %s.\n", __func__); 3537 } 3538 3539 return rval; 3540} 3541 3542int 3543qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data) 3544{ 3545 int rval; 3546 mbx_cmd_t mc; 3547 mbx_cmd_t *mcp = &mc; 3548 3549 if (!IS_QLA8044(vha->hw)) 3550 return QLA_FUNCTION_FAILED; 3551 3552 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189, 3553 "Entered %s.\n", __func__); 3554 3555 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG; 3556 mcp->mb[1] = HCS_READ_SERDES; 3557 mcp->mb[3] = LSW(addr); 3558 mcp->mb[4] = MSW(addr); 3559 mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0; 3560 mcp->in_mb = MBX_2|MBX_1|MBX_0; 3561 mcp->tov = MBX_TOV_SECONDS; 3562 mcp->flags = 0; 3563 rval = qla2x00_mailbox_command(vha, mcp); 3564 3565 *data = mcp->mb[2] << 16 | mcp->mb[1]; 3566 3567 if (rval != QLA_SUCCESS) { 3568 ql_dbg(ql_dbg_mbx, vha, 0x118a, 3569 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3570 } else { 3571 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b, 3572 "Done %s.\n", __func__); 3573 } 3574 3575 return rval; 3576} 3577 3578/** 3579 * qla2x00_set_serdes_params() - 3580 * @vha: HA context 3581 * @sw_em_1g: serial link options 3582 * @sw_em_2g: serial link options 3583 * @sw_em_4g: serial link options 3584 * 3585 * Returns 3586 */ 3587int 3588qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g, 3589 uint16_t sw_em_2g, uint16_t sw_em_4g) 3590{ 3591 int rval; 3592 mbx_cmd_t mc; 3593 mbx_cmd_t *mcp = &mc; 3594 3595 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e, 3596 "Entered %s.\n", __func__); 3597 3598 mcp->mb[0] = MBC_SERDES_PARAMS; 3599 mcp->mb[1] = BIT_0; 3600 mcp->mb[2] = sw_em_1g | BIT_15; 3601 mcp->mb[3] = sw_em_2g | BIT_15; 3602 mcp->mb[4] = sw_em_4g | BIT_15; 3603 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 3604 mcp->in_mb = MBX_0; 3605 mcp->tov = MBX_TOV_SECONDS; 3606 mcp->flags = 0; 3607 rval = qla2x00_mailbox_command(vha, mcp); 3608 3609 if (rval != QLA_SUCCESS) { 3610 /*EMPTY*/ 3611 ql_dbg(ql_dbg_mbx, vha, 0x109f, 3612 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3613 } else { 3614 /*EMPTY*/ 3615 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0, 3616 "Done %s.\n", __func__); 3617 } 3618 3619 return rval; 3620} 3621 3622int 3623qla2x00_stop_firmware(scsi_qla_host_t *vha) 3624{ 3625 int rval; 3626 mbx_cmd_t mc; 3627 mbx_cmd_t *mcp = &mc; 3628 3629 if (!IS_FWI2_CAPABLE(vha->hw)) 3630 return QLA_FUNCTION_FAILED; 3631 3632 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1, 3633 "Entered %s.\n", __func__); 3634 3635 mcp->mb[0] = MBC_STOP_FIRMWARE; 3636 mcp->mb[1] = 0; 3637 mcp->out_mb = MBX_1|MBX_0; 3638 mcp->in_mb = MBX_0; 3639 mcp->tov = 5; 3640 mcp->flags = 0; 3641 rval = qla2x00_mailbox_command(vha, mcp); 3642 3643 if (rval != QLA_SUCCESS) { 3644 ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval); 3645 if (mcp->mb[0] == MBS_INVALID_COMMAND) 3646 rval = QLA_INVALID_COMMAND; 3647 } else { 3648 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3, 3649 "Done %s.\n", __func__); 3650 } 3651 3652 return rval; 3653} 3654 3655int 3656qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma, 3657 uint16_t buffers) 3658{ 3659 int rval; 3660 mbx_cmd_t mc; 3661 mbx_cmd_t *mcp = &mc; 3662 3663 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4, 3664 "Entered %s.\n", __func__); 3665 3666 if (!IS_FWI2_CAPABLE(vha->hw)) 3667 return QLA_FUNCTION_FAILED; 3668 3669 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3670 return QLA_FUNCTION_FAILED; 3671 3672 mcp->mb[0] = MBC_TRACE_CONTROL; 3673 mcp->mb[1] = TC_EFT_ENABLE; 3674 mcp->mb[2] = LSW(eft_dma); 3675 mcp->mb[3] = MSW(eft_dma); 3676 mcp->mb[4] = LSW(MSD(eft_dma)); 3677 mcp->mb[5] = MSW(MSD(eft_dma)); 3678 mcp->mb[6] = buffers; 3679 mcp->mb[7] = TC_AEN_DISABLE; 3680 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 3681 mcp->in_mb = MBX_1|MBX_0; 3682 mcp->tov = MBX_TOV_SECONDS; 3683 mcp->flags = 0; 3684 rval = qla2x00_mailbox_command(vha, mcp); 3685 if (rval != QLA_SUCCESS) { 3686 ql_dbg(ql_dbg_mbx, vha, 0x10a5, 3687 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3688 rval, mcp->mb[0], mcp->mb[1]); 3689 } else { 3690 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6, 3691 "Done %s.\n", __func__); 3692 } 3693 3694 return rval; 3695} 3696 3697int 3698qla2x00_disable_eft_trace(scsi_qla_host_t *vha) 3699{ 3700 int rval; 3701 mbx_cmd_t mc; 3702 mbx_cmd_t *mcp = &mc; 3703 3704 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7, 3705 "Entered %s.\n", __func__); 3706 3707 if (!IS_FWI2_CAPABLE(vha->hw)) 3708 return QLA_FUNCTION_FAILED; 3709 3710 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3711 return QLA_FUNCTION_FAILED; 3712 3713 mcp->mb[0] = MBC_TRACE_CONTROL; 3714 mcp->mb[1] = TC_EFT_DISABLE; 3715 mcp->out_mb = MBX_1|MBX_0; 3716 mcp->in_mb = MBX_1|MBX_0; 3717 mcp->tov = MBX_TOV_SECONDS; 3718 mcp->flags = 0; 3719 rval = qla2x00_mailbox_command(vha, mcp); 3720 if (rval != QLA_SUCCESS) { 3721 ql_dbg(ql_dbg_mbx, vha, 0x10a8, 3722 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3723 rval, mcp->mb[0], mcp->mb[1]); 3724 } else { 3725 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9, 3726 "Done %s.\n", __func__); 3727 } 3728 3729 return rval; 3730} 3731 3732int 3733qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma, 3734 uint16_t buffers, uint16_t *mb, uint32_t *dwords) 3735{ 3736 int rval; 3737 mbx_cmd_t mc; 3738 mbx_cmd_t *mcp = &mc; 3739 3740 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa, 3741 "Entered %s.\n", __func__); 3742 3743 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) && 3744 !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) && 3745 !IS_QLA28XX(vha->hw)) 3746 return QLA_FUNCTION_FAILED; 3747 3748 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3749 return QLA_FUNCTION_FAILED; 3750 3751 mcp->mb[0] = MBC_TRACE_CONTROL; 3752 mcp->mb[1] = TC_FCE_ENABLE; 3753 mcp->mb[2] = LSW(fce_dma); 3754 mcp->mb[3] = MSW(fce_dma); 3755 mcp->mb[4] = LSW(MSD(fce_dma)); 3756 mcp->mb[5] = MSW(MSD(fce_dma)); 3757 mcp->mb[6] = buffers; 3758 mcp->mb[7] = TC_AEN_DISABLE; 3759 mcp->mb[8] = 0; 3760 mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE; 3761 mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE; 3762 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2| 3763 MBX_1|MBX_0; 3764 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 3765 mcp->tov = MBX_TOV_SECONDS; 3766 mcp->flags = 0; 3767 rval = qla2x00_mailbox_command(vha, mcp); 3768 if (rval != QLA_SUCCESS) { 3769 ql_dbg(ql_dbg_mbx, vha, 0x10ab, 3770 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3771 rval, mcp->mb[0], mcp->mb[1]); 3772 } else { 3773 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac, 3774 "Done %s.\n", __func__); 3775 3776 if (mb) 3777 memcpy(mb, mcp->mb, 8 * sizeof(*mb)); 3778 if (dwords) 3779 *dwords = buffers; 3780 } 3781 3782 return rval; 3783} 3784 3785int 3786qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd) 3787{ 3788 int rval; 3789 mbx_cmd_t mc; 3790 mbx_cmd_t *mcp = &mc; 3791 3792 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad, 3793 "Entered %s.\n", __func__); 3794 3795 if (!IS_FWI2_CAPABLE(vha->hw)) 3796 return QLA_FUNCTION_FAILED; 3797 3798 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3799 return QLA_FUNCTION_FAILED; 3800 3801 mcp->mb[0] = MBC_TRACE_CONTROL; 3802 mcp->mb[1] = TC_FCE_DISABLE; 3803 mcp->mb[2] = TC_FCE_DISABLE_TRACE; 3804 mcp->out_mb = MBX_2|MBX_1|MBX_0; 3805 mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2| 3806 MBX_1|MBX_0; 3807 mcp->tov = MBX_TOV_SECONDS; 3808 mcp->flags = 0; 3809 rval = qla2x00_mailbox_command(vha, mcp); 3810 if (rval != QLA_SUCCESS) { 3811 ql_dbg(ql_dbg_mbx, vha, 0x10ae, 3812 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3813 rval, mcp->mb[0], mcp->mb[1]); 3814 } else { 3815 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af, 3816 "Done %s.\n", __func__); 3817 3818 if (wr) 3819 *wr = (uint64_t) mcp->mb[5] << 48 | 3820 (uint64_t) mcp->mb[4] << 32 | 3821 (uint64_t) mcp->mb[3] << 16 | 3822 (uint64_t) mcp->mb[2]; 3823 if (rd) 3824 *rd = (uint64_t) mcp->mb[9] << 48 | 3825 (uint64_t) mcp->mb[8] << 32 | 3826 (uint64_t) mcp->mb[7] << 16 | 3827 (uint64_t) mcp->mb[6]; 3828 } 3829 3830 return rval; 3831} 3832 3833int 3834qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, 3835 uint16_t *port_speed, uint16_t *mb) 3836{ 3837 int rval; 3838 mbx_cmd_t mc; 3839 mbx_cmd_t *mcp = &mc; 3840 3841 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0, 3842 "Entered %s.\n", __func__); 3843 3844 if (!IS_IIDMA_CAPABLE(vha->hw)) 3845 return QLA_FUNCTION_FAILED; 3846 3847 mcp->mb[0] = MBC_PORT_PARAMS; 3848 mcp->mb[1] = loop_id; 3849 mcp->mb[2] = mcp->mb[3] = 0; 3850 mcp->mb[9] = vha->vp_idx; 3851 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 3852 mcp->in_mb = MBX_3|MBX_1|MBX_0; 3853 mcp->tov = MBX_TOV_SECONDS; 3854 mcp->flags = 0; 3855 rval = qla2x00_mailbox_command(vha, mcp); 3856 3857 /* Return mailbox statuses. */ 3858 if (mb) { 3859 mb[0] = mcp->mb[0]; 3860 mb[1] = mcp->mb[1]; 3861 mb[3] = mcp->mb[3]; 3862 } 3863 3864 if (rval != QLA_SUCCESS) { 3865 ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval); 3866 } else { 3867 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2, 3868 "Done %s.\n", __func__); 3869 if (port_speed) 3870 *port_speed = mcp->mb[3]; 3871 } 3872 3873 return rval; 3874} 3875 3876int 3877qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, 3878 uint16_t port_speed, uint16_t *mb) 3879{ 3880 int rval; 3881 mbx_cmd_t mc; 3882 mbx_cmd_t *mcp = &mc; 3883 3884 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3, 3885 "Entered %s.\n", __func__); 3886 3887 if (!IS_IIDMA_CAPABLE(vha->hw)) 3888 return QLA_FUNCTION_FAILED; 3889 3890 mcp->mb[0] = MBC_PORT_PARAMS; 3891 mcp->mb[1] = loop_id; 3892 mcp->mb[2] = BIT_0; 3893 mcp->mb[3] = port_speed & 0x3F; 3894 mcp->mb[9] = vha->vp_idx; 3895 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 3896 mcp->in_mb = MBX_3|MBX_1|MBX_0; 3897 mcp->tov = MBX_TOV_SECONDS; 3898 mcp->flags = 0; 3899 rval = qla2x00_mailbox_command(vha, mcp); 3900 3901 /* Return mailbox statuses. */ 3902 if (mb) { 3903 mb[0] = mcp->mb[0]; 3904 mb[1] = mcp->mb[1]; 3905 mb[3] = mcp->mb[3]; 3906 } 3907 3908 if (rval != QLA_SUCCESS) { 3909 ql_dbg(ql_dbg_mbx, vha, 0x10b4, 3910 "Failed=%x.\n", rval); 3911 } else { 3912 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5, 3913 "Done %s.\n", __func__); 3914 } 3915 3916 return rval; 3917} 3918 3919void 3920qla24xx_report_id_acquisition(scsi_qla_host_t *vha, 3921 struct vp_rpt_id_entry_24xx *rptid_entry) 3922{ 3923 struct qla_hw_data *ha = vha->hw; 3924 scsi_qla_host_t *vp = NULL; 3925 unsigned long flags; 3926 int found; 3927 port_id_t id; 3928 struct fc_port *fcport; 3929 3930 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6, 3931 "Entered %s.\n", __func__); 3932 3933 if (rptid_entry->entry_status != 0) 3934 return; 3935 3936 id.b.domain = rptid_entry->port_id[2]; 3937 id.b.area = rptid_entry->port_id[1]; 3938 id.b.al_pa = rptid_entry->port_id[0]; 3939 id.b.rsvd_1 = 0; 3940 ha->flags.n2n_ae = 0; 3941 3942 if (rptid_entry->format == 0) { 3943 /* loop */ 3944 ql_dbg(ql_dbg_async, vha, 0x10b7, 3945 "Format 0 : Number of VPs setup %d, number of " 3946 "VPs acquired %d.\n", rptid_entry->vp_setup, 3947 rptid_entry->vp_acquired); 3948 ql_dbg(ql_dbg_async, vha, 0x10b8, 3949 "Primary port id %02x%02x%02x.\n", 3950 rptid_entry->port_id[2], rptid_entry->port_id[1], 3951 rptid_entry->port_id[0]); 3952 ha->current_topology = ISP_CFG_NL; 3953 qlt_update_host_map(vha, id); 3954 3955 } else if (rptid_entry->format == 1) { 3956 /* fabric */ 3957 ql_dbg(ql_dbg_async, vha, 0x10b9, 3958 "Format 1: VP[%d] enabled - status %d - with " 3959 "port id %02x%02x%02x.\n", rptid_entry->vp_idx, 3960 rptid_entry->vp_status, 3961 rptid_entry->port_id[2], rptid_entry->port_id[1], 3962 rptid_entry->port_id[0]); 3963 ql_dbg(ql_dbg_async, vha, 0x5075, 3964 "Format 1: Remote WWPN %8phC.\n", 3965 rptid_entry->u.f1.port_name); 3966 3967 ql_dbg(ql_dbg_async, vha, 0x5075, 3968 "Format 1: WWPN %8phC.\n", 3969 vha->port_name); 3970 3971 switch (rptid_entry->u.f1.flags & TOPO_MASK) { 3972 case TOPO_N2N: 3973 ha->current_topology = ISP_CFG_N; 3974 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 3975 list_for_each_entry(fcport, &vha->vp_fcports, list) { 3976 fcport->scan_state = QLA_FCPORT_SCAN; 3977 fcport->n2n_flag = 0; 3978 } 3979 id.b24 = 0; 3980 if (wwn_to_u64(vha->port_name) > 3981 wwn_to_u64(rptid_entry->u.f1.port_name)) { 3982 vha->d_id.b24 = 0; 3983 vha->d_id.b.al_pa = 1; 3984 ha->flags.n2n_bigger = 1; 3985 3986 id.b.al_pa = 2; 3987 ql_dbg(ql_dbg_async, vha, 0x5075, 3988 "Format 1: assign local id %x remote id %x\n", 3989 vha->d_id.b24, id.b24); 3990 } else { 3991 ql_dbg(ql_dbg_async, vha, 0x5075, 3992 "Format 1: Remote login - Waiting for WWPN %8phC.\n", 3993 rptid_entry->u.f1.port_name); 3994 ha->flags.n2n_bigger = 0; 3995 } 3996 3997 fcport = qla2x00_find_fcport_by_wwpn(vha, 3998 rptid_entry->u.f1.port_name, 1); 3999 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 4000 4001 4002 if (fcport) { 4003 fcport->plogi_nack_done_deadline = jiffies + HZ; 4004 fcport->dm_login_expire = jiffies + 4005 QLA_N2N_WAIT_TIME * HZ; 4006 fcport->scan_state = QLA_FCPORT_FOUND; 4007 fcport->n2n_flag = 1; 4008 fcport->keep_nport_handle = 1; 4009 4010 if (wwn_to_u64(vha->port_name) > 4011 wwn_to_u64(fcport->port_name)) { 4012 fcport->d_id = id; 4013 } 4014 4015 switch (fcport->disc_state) { 4016 case DSC_DELETED: 4017 set_bit(RELOGIN_NEEDED, 4018 &vha->dpc_flags); 4019 break; 4020 case DSC_DELETE_PEND: 4021 break; 4022 default: 4023 qlt_schedule_sess_for_deletion(fcport); 4024 break; 4025 } 4026 } else { 4027 qla24xx_post_newsess_work(vha, &id, 4028 rptid_entry->u.f1.port_name, 4029 rptid_entry->u.f1.node_name, 4030 NULL, 4031 FS_FCP_IS_N2N); 4032 } 4033 4034 /* if our portname is higher then initiate N2N login */ 4035 4036 set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags); 4037 return; 4038 break; 4039 case TOPO_FL: 4040 ha->current_topology = ISP_CFG_FL; 4041 break; 4042 case TOPO_F: 4043 ha->current_topology = ISP_CFG_F; 4044 break; 4045 default: 4046 break; 4047 } 4048 4049 ha->flags.gpsc_supported = 1; 4050 ha->current_topology = ISP_CFG_F; 4051 /* buffer to buffer credit flag */ 4052 vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0; 4053 4054 if (rptid_entry->vp_idx == 0) { 4055 if (rptid_entry->vp_status == VP_STAT_COMPL) { 4056 /* FA-WWN is only for physical port */ 4057 if (qla_ini_mode_enabled(vha) && 4058 ha->flags.fawwpn_enabled && 4059 (rptid_entry->u.f1.flags & 4060 BIT_6)) { 4061 memcpy(vha->port_name, 4062 rptid_entry->u.f1.port_name, 4063 WWN_SIZE); 4064 } 4065 4066 qlt_update_host_map(vha, id); 4067 } 4068 4069 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 4070 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 4071 } else { 4072 if (rptid_entry->vp_status != VP_STAT_COMPL && 4073 rptid_entry->vp_status != VP_STAT_ID_CHG) { 4074 ql_dbg(ql_dbg_mbx, vha, 0x10ba, 4075 "Could not acquire ID for VP[%d].\n", 4076 rptid_entry->vp_idx); 4077 return; 4078 } 4079 4080 found = 0; 4081 spin_lock_irqsave(&ha->vport_slock, flags); 4082 list_for_each_entry(vp, &ha->vp_list, list) { 4083 if (rptid_entry->vp_idx == vp->vp_idx) { 4084 found = 1; 4085 break; 4086 } 4087 } 4088 spin_unlock_irqrestore(&ha->vport_slock, flags); 4089 4090 if (!found) 4091 return; 4092 4093 qlt_update_host_map(vp, id); 4094 4095 /* 4096 * Cannot configure here as we are still sitting on the 4097 * response queue. Handle it in dpc context. 4098 */ 4099 set_bit(VP_IDX_ACQUIRED, &vp->vp_flags); 4100 set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags); 4101 set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags); 4102 } 4103 set_bit(VP_DPC_NEEDED, &vha->dpc_flags); 4104 qla2xxx_wake_dpc(vha); 4105 } else if (rptid_entry->format == 2) { 4106 ql_dbg(ql_dbg_async, vha, 0x505f, 4107 "RIDA: format 2/N2N Primary port id %02x%02x%02x.\n", 4108 rptid_entry->port_id[2], rptid_entry->port_id[1], 4109 rptid_entry->port_id[0]); 4110 4111 ql_dbg(ql_dbg_async, vha, 0x5075, 4112 "N2N: Remote WWPN %8phC.\n", 4113 rptid_entry->u.f2.port_name); 4114 4115 /* N2N. direct connect */ 4116 ha->current_topology = ISP_CFG_N; 4117 ha->flags.rida_fmt2 = 1; 4118 vha->d_id.b.domain = rptid_entry->port_id[2]; 4119 vha->d_id.b.area = rptid_entry->port_id[1]; 4120 vha->d_id.b.al_pa = rptid_entry->port_id[0]; 4121 4122 ha->flags.n2n_ae = 1; 4123 spin_lock_irqsave(&ha->vport_slock, flags); 4124 qlt_update_vp_map(vha, SET_AL_PA); 4125 spin_unlock_irqrestore(&ha->vport_slock, flags); 4126 4127 list_for_each_entry(fcport, &vha->vp_fcports, list) { 4128 fcport->scan_state = QLA_FCPORT_SCAN; 4129 fcport->n2n_flag = 0; 4130 } 4131 4132 fcport = qla2x00_find_fcport_by_wwpn(vha, 4133 rptid_entry->u.f2.port_name, 1); 4134 4135 if (fcport) { 4136 fcport->login_retry = vha->hw->login_retry_count; 4137 fcport->plogi_nack_done_deadline = jiffies + HZ; 4138 fcport->scan_state = QLA_FCPORT_FOUND; 4139 fcport->keep_nport_handle = 1; 4140 fcport->n2n_flag = 1; 4141 fcport->d_id.b.domain = 4142 rptid_entry->u.f2.remote_nport_id[2]; 4143 fcport->d_id.b.area = 4144 rptid_entry->u.f2.remote_nport_id[1]; 4145 fcport->d_id.b.al_pa = 4146 rptid_entry->u.f2.remote_nport_id[0]; 4147 } 4148 } 4149} 4150 4151/* 4152 * qla24xx_modify_vp_config 4153 * Change VP configuration for vha 4154 * 4155 * Input: 4156 * vha = adapter block pointer. 4157 * 4158 * Returns: 4159 * qla2xxx local function return status code. 4160 * 4161 * Context: 4162 * Kernel context. 4163 */ 4164int 4165qla24xx_modify_vp_config(scsi_qla_host_t *vha) 4166{ 4167 int rval; 4168 struct vp_config_entry_24xx *vpmod; 4169 dma_addr_t vpmod_dma; 4170 struct qla_hw_data *ha = vha->hw; 4171 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 4172 4173 /* This can be called by the parent */ 4174 4175 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb, 4176 "Entered %s.\n", __func__); 4177 4178 vpmod = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma); 4179 if (!vpmod) { 4180 ql_log(ql_log_warn, vha, 0x10bc, 4181 "Failed to allocate modify VP IOCB.\n"); 4182 return QLA_MEMORY_ALLOC_FAILED; 4183 } 4184 4185 vpmod->entry_type = VP_CONFIG_IOCB_TYPE; 4186 vpmod->entry_count = 1; 4187 vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS; 4188 vpmod->vp_count = 1; 4189 vpmod->vp_index1 = vha->vp_idx; 4190 vpmod->options_idx1 = BIT_3|BIT_4|BIT_5; 4191 4192 qlt_modify_vp_config(vha, vpmod); 4193 4194 memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE); 4195 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE); 4196 vpmod->entry_count = 1; 4197 4198 rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0); 4199 if (rval != QLA_SUCCESS) { 4200 ql_dbg(ql_dbg_mbx, vha, 0x10bd, 4201 "Failed to issue VP config IOCB (%x).\n", rval); 4202 } else if (vpmod->comp_status != 0) { 4203 ql_dbg(ql_dbg_mbx, vha, 0x10be, 4204 "Failed to complete IOCB -- error status (%x).\n", 4205 vpmod->comp_status); 4206 rval = QLA_FUNCTION_FAILED; 4207 } else if (vpmod->comp_status != cpu_to_le16(CS_COMPLETE)) { 4208 ql_dbg(ql_dbg_mbx, vha, 0x10bf, 4209 "Failed to complete IOCB -- completion status (%x).\n", 4210 le16_to_cpu(vpmod->comp_status)); 4211 rval = QLA_FUNCTION_FAILED; 4212 } else { 4213 /* EMPTY */ 4214 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0, 4215 "Done %s.\n", __func__); 4216 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING); 4217 } 4218 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma); 4219 4220 return rval; 4221} 4222 4223/* 4224 * qla2x00_send_change_request 4225 * Receive or disable RSCN request from fabric controller 4226 * 4227 * Input: 4228 * ha = adapter block pointer 4229 * format = registration format: 4230 * 0 - Reserved 4231 * 1 - Fabric detected registration 4232 * 2 - N_port detected registration 4233 * 3 - Full registration 4234 * FF - clear registration 4235 * vp_idx = Virtual port index 4236 * 4237 * Returns: 4238 * qla2x00 local function return status code. 4239 * 4240 * Context: 4241 * Kernel Context 4242 */ 4243 4244int 4245qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format, 4246 uint16_t vp_idx) 4247{ 4248 int rval; 4249 mbx_cmd_t mc; 4250 mbx_cmd_t *mcp = &mc; 4251 4252 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7, 4253 "Entered %s.\n", __func__); 4254 4255 mcp->mb[0] = MBC_SEND_CHANGE_REQUEST; 4256 mcp->mb[1] = format; 4257 mcp->mb[9] = vp_idx; 4258 mcp->out_mb = MBX_9|MBX_1|MBX_0; 4259 mcp->in_mb = MBX_0|MBX_1; 4260 mcp->tov = MBX_TOV_SECONDS; 4261 mcp->flags = 0; 4262 rval = qla2x00_mailbox_command(vha, mcp); 4263 4264 if (rval == QLA_SUCCESS) { 4265 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 4266 rval = BIT_1; 4267 } 4268 } else 4269 rval = BIT_1; 4270 4271 return rval; 4272} 4273 4274int 4275qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr, 4276 uint32_t size) 4277{ 4278 int rval; 4279 mbx_cmd_t mc; 4280 mbx_cmd_t *mcp = &mc; 4281 4282 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009, 4283 "Entered %s.\n", __func__); 4284 4285 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) { 4286 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED; 4287 mcp->mb[8] = MSW(addr); 4288 mcp->mb[10] = 0; 4289 mcp->out_mb = MBX_10|MBX_8|MBX_0; 4290 } else { 4291 mcp->mb[0] = MBC_DUMP_RISC_RAM; 4292 mcp->out_mb = MBX_0; 4293 } 4294 mcp->mb[1] = LSW(addr); 4295 mcp->mb[2] = MSW(req_dma); 4296 mcp->mb[3] = LSW(req_dma); 4297 mcp->mb[6] = MSW(MSD(req_dma)); 4298 mcp->mb[7] = LSW(MSD(req_dma)); 4299 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1; 4300 if (IS_FWI2_CAPABLE(vha->hw)) { 4301 mcp->mb[4] = MSW(size); 4302 mcp->mb[5] = LSW(size); 4303 mcp->out_mb |= MBX_5|MBX_4; 4304 } else { 4305 mcp->mb[4] = LSW(size); 4306 mcp->out_mb |= MBX_4; 4307 } 4308 4309 mcp->in_mb = MBX_0; 4310 mcp->tov = MBX_TOV_SECONDS; 4311 mcp->flags = 0; 4312 rval = qla2x00_mailbox_command(vha, mcp); 4313 4314 if (rval != QLA_SUCCESS) { 4315 ql_dbg(ql_dbg_mbx, vha, 0x1008, 4316 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4317 } else { 4318 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007, 4319 "Done %s.\n", __func__); 4320 } 4321 4322 return rval; 4323} 4324/* 84XX Support **************************************************************/ 4325 4326struct cs84xx_mgmt_cmd { 4327 union { 4328 struct verify_chip_entry_84xx req; 4329 struct verify_chip_rsp_84xx rsp; 4330 } p; 4331}; 4332 4333int 4334qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status) 4335{ 4336 int rval, retry; 4337 struct cs84xx_mgmt_cmd *mn; 4338 dma_addr_t mn_dma; 4339 uint16_t options; 4340 unsigned long flags; 4341 struct qla_hw_data *ha = vha->hw; 4342 4343 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8, 4344 "Entered %s.\n", __func__); 4345 4346 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 4347 if (mn == NULL) { 4348 return QLA_MEMORY_ALLOC_FAILED; 4349 } 4350 4351 /* Force Update? */ 4352 options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0; 4353 /* Diagnostic firmware? */ 4354 /* options |= MENLO_DIAG_FW; */ 4355 /* We update the firmware with only one data sequence. */ 4356 options |= VCO_END_OF_DATA; 4357 4358 do { 4359 retry = 0; 4360 memset(mn, 0, sizeof(*mn)); 4361 mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE; 4362 mn->p.req.entry_count = 1; 4363 mn->p.req.options = cpu_to_le16(options); 4364 4365 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c, 4366 "Dump of Verify Request.\n"); 4367 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e, 4368 mn, sizeof(*mn)); 4369 4370 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120); 4371 if (rval != QLA_SUCCESS) { 4372 ql_dbg(ql_dbg_mbx, vha, 0x10cb, 4373 "Failed to issue verify IOCB (%x).\n", rval); 4374 goto verify_done; 4375 } 4376 4377 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110, 4378 "Dump of Verify Response.\n"); 4379 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118, 4380 mn, sizeof(*mn)); 4381 4382 status[0] = le16_to_cpu(mn->p.rsp.comp_status); 4383 status[1] = status[0] == CS_VCS_CHIP_FAILURE ? 4384 le16_to_cpu(mn->p.rsp.failure_code) : 0; 4385 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce, 4386 "cs=%x fc=%x.\n", status[0], status[1]); 4387 4388 if (status[0] != CS_COMPLETE) { 4389 rval = QLA_FUNCTION_FAILED; 4390 if (!(options & VCO_DONT_UPDATE_FW)) { 4391 ql_dbg(ql_dbg_mbx, vha, 0x10cf, 4392 "Firmware update failed. Retrying " 4393 "without update firmware.\n"); 4394 options |= VCO_DONT_UPDATE_FW; 4395 options &= ~VCO_FORCE_UPDATE; 4396 retry = 1; 4397 } 4398 } else { 4399 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0, 4400 "Firmware updated to %x.\n", 4401 le32_to_cpu(mn->p.rsp.fw_ver)); 4402 4403 /* NOTE: we only update OP firmware. */ 4404 spin_lock_irqsave(&ha->cs84xx->access_lock, flags); 4405 ha->cs84xx->op_fw_version = 4406 le32_to_cpu(mn->p.rsp.fw_ver); 4407 spin_unlock_irqrestore(&ha->cs84xx->access_lock, 4408 flags); 4409 } 4410 } while (retry); 4411 4412verify_done: 4413 dma_pool_free(ha->s_dma_pool, mn, mn_dma); 4414 4415 if (rval != QLA_SUCCESS) { 4416 ql_dbg(ql_dbg_mbx, vha, 0x10d1, 4417 "Failed=%x.\n", rval); 4418 } else { 4419 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2, 4420 "Done %s.\n", __func__); 4421 } 4422 4423 return rval; 4424} 4425 4426int 4427qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) 4428{ 4429 int rval; 4430 unsigned long flags; 4431 mbx_cmd_t mc; 4432 mbx_cmd_t *mcp = &mc; 4433 struct qla_hw_data *ha = vha->hw; 4434 4435 if (!ha->flags.fw_started) 4436 return QLA_SUCCESS; 4437 4438 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3, 4439 "Entered %s.\n", __func__); 4440 4441 if (IS_SHADOW_REG_CAPABLE(ha)) 4442 req->options |= BIT_13; 4443 4444 mcp->mb[0] = MBC_INITIALIZE_MULTIQ; 4445 mcp->mb[1] = req->options; 4446 mcp->mb[2] = MSW(LSD(req->dma)); 4447 mcp->mb[3] = LSW(LSD(req->dma)); 4448 mcp->mb[6] = MSW(MSD(req->dma)); 4449 mcp->mb[7] = LSW(MSD(req->dma)); 4450 mcp->mb[5] = req->length; 4451 if (req->rsp) 4452 mcp->mb[10] = req->rsp->id; 4453 mcp->mb[12] = req->qos; 4454 mcp->mb[11] = req->vp_idx; 4455 mcp->mb[13] = req->rid; 4456 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 4457 mcp->mb[15] = 0; 4458 4459 mcp->mb[4] = req->id; 4460 /* que in ptr index */ 4461 mcp->mb[8] = 0; 4462 /* que out ptr index */ 4463 mcp->mb[9] = *req->out_ptr = 0; 4464 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7| 4465 MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4466 mcp->in_mb = MBX_0; 4467 mcp->flags = MBX_DMA_OUT; 4468 mcp->tov = MBX_TOV_SECONDS * 2; 4469 4470 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || 4471 IS_QLA28XX(ha)) 4472 mcp->in_mb |= MBX_1; 4473 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 4474 mcp->out_mb |= MBX_15; 4475 /* debug q create issue in SR-IOV */ 4476 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7; 4477 } 4478 4479 spin_lock_irqsave(&ha->hardware_lock, flags); 4480 if (!(req->options & BIT_0)) { 4481 wrt_reg_dword(req->req_q_in, 0); 4482 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 4483 wrt_reg_dword(req->req_q_out, 0); 4484 } 4485 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4486 4487 rval = qla2x00_mailbox_command(vha, mcp); 4488 if (rval != QLA_SUCCESS) { 4489 ql_dbg(ql_dbg_mbx, vha, 0x10d4, 4490 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4491 } else { 4492 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5, 4493 "Done %s.\n", __func__); 4494 } 4495 4496 return rval; 4497} 4498 4499int 4500qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) 4501{ 4502 int rval; 4503 unsigned long flags; 4504 mbx_cmd_t mc; 4505 mbx_cmd_t *mcp = &mc; 4506 struct qla_hw_data *ha = vha->hw; 4507 4508 if (!ha->flags.fw_started) 4509 return QLA_SUCCESS; 4510 4511 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6, 4512 "Entered %s.\n", __func__); 4513 4514 if (IS_SHADOW_REG_CAPABLE(ha)) 4515 rsp->options |= BIT_13; 4516 4517 mcp->mb[0] = MBC_INITIALIZE_MULTIQ; 4518 mcp->mb[1] = rsp->options; 4519 mcp->mb[2] = MSW(LSD(rsp->dma)); 4520 mcp->mb[3] = LSW(LSD(rsp->dma)); 4521 mcp->mb[6] = MSW(MSD(rsp->dma)); 4522 mcp->mb[7] = LSW(MSD(rsp->dma)); 4523 mcp->mb[5] = rsp->length; 4524 mcp->mb[14] = rsp->msix->entry; 4525 mcp->mb[13] = rsp->rid; 4526 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 4527 mcp->mb[15] = 0; 4528 4529 mcp->mb[4] = rsp->id; 4530 /* que in ptr index */ 4531 mcp->mb[8] = *rsp->in_ptr = 0; 4532 /* que out ptr index */ 4533 mcp->mb[9] = 0; 4534 mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7 4535 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4536 mcp->in_mb = MBX_0; 4537 mcp->flags = MBX_DMA_OUT; 4538 mcp->tov = MBX_TOV_SECONDS * 2; 4539 4540 if (IS_QLA81XX(ha)) { 4541 mcp->out_mb |= MBX_12|MBX_11|MBX_10; 4542 mcp->in_mb |= MBX_1; 4543 } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 4544 mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10; 4545 mcp->in_mb |= MBX_1; 4546 /* debug q create issue in SR-IOV */ 4547 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7; 4548 } 4549 4550 spin_lock_irqsave(&ha->hardware_lock, flags); 4551 if (!(rsp->options & BIT_0)) { 4552 wrt_reg_dword(rsp->rsp_q_out, 0); 4553 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 4554 wrt_reg_dword(rsp->rsp_q_in, 0); 4555 } 4556 4557 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4558 4559 rval = qla2x00_mailbox_command(vha, mcp); 4560 if (rval != QLA_SUCCESS) { 4561 ql_dbg(ql_dbg_mbx, vha, 0x10d7, 4562 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4563 } else { 4564 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8, 4565 "Done %s.\n", __func__); 4566 } 4567 4568 return rval; 4569} 4570 4571int 4572qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb) 4573{ 4574 int rval; 4575 mbx_cmd_t mc; 4576 mbx_cmd_t *mcp = &mc; 4577 4578 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9, 4579 "Entered %s.\n", __func__); 4580 4581 mcp->mb[0] = MBC_IDC_ACK; 4582 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); 4583 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4584 mcp->in_mb = MBX_0; 4585 mcp->tov = MBX_TOV_SECONDS; 4586 mcp->flags = 0; 4587 rval = qla2x00_mailbox_command(vha, mcp); 4588 4589 if (rval != QLA_SUCCESS) { 4590 ql_dbg(ql_dbg_mbx, vha, 0x10da, 4591 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4592 } else { 4593 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db, 4594 "Done %s.\n", __func__); 4595 } 4596 4597 return rval; 4598} 4599 4600int 4601qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size) 4602{ 4603 int rval; 4604 mbx_cmd_t mc; 4605 mbx_cmd_t *mcp = &mc; 4606 4607 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc, 4608 "Entered %s.\n", __func__); 4609 4610 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && 4611 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 4612 return QLA_FUNCTION_FAILED; 4613 4614 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 4615 mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE; 4616 mcp->out_mb = MBX_1|MBX_0; 4617 mcp->in_mb = MBX_1|MBX_0; 4618 mcp->tov = MBX_TOV_SECONDS; 4619 mcp->flags = 0; 4620 rval = qla2x00_mailbox_command(vha, mcp); 4621 4622 if (rval != QLA_SUCCESS) { 4623 ql_dbg(ql_dbg_mbx, vha, 0x10dd, 4624 "Failed=%x mb[0]=%x mb[1]=%x.\n", 4625 rval, mcp->mb[0], mcp->mb[1]); 4626 } else { 4627 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de, 4628 "Done %s.\n", __func__); 4629 *sector_size = mcp->mb[1]; 4630 } 4631 4632 return rval; 4633} 4634 4635int 4636qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable) 4637{ 4638 int rval; 4639 mbx_cmd_t mc; 4640 mbx_cmd_t *mcp = &mc; 4641 4642 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && 4643 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 4644 return QLA_FUNCTION_FAILED; 4645 4646 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df, 4647 "Entered %s.\n", __func__); 4648 4649 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 4650 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE : 4651 FAC_OPT_CMD_WRITE_PROTECT; 4652 mcp->out_mb = MBX_1|MBX_0; 4653 mcp->in_mb = MBX_1|MBX_0; 4654 mcp->tov = MBX_TOV_SECONDS; 4655 mcp->flags = 0; 4656 rval = qla2x00_mailbox_command(vha, mcp); 4657 4658 if (rval != QLA_SUCCESS) { 4659 ql_dbg(ql_dbg_mbx, vha, 0x10e0, 4660 "Failed=%x mb[0]=%x mb[1]=%x.\n", 4661 rval, mcp->mb[0], mcp->mb[1]); 4662 } else { 4663 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1, 4664 "Done %s.\n", __func__); 4665 } 4666 4667 return rval; 4668} 4669 4670int 4671qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish) 4672{ 4673 int rval; 4674 mbx_cmd_t mc; 4675 mbx_cmd_t *mcp = &mc; 4676 4677 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && 4678 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) 4679 return QLA_FUNCTION_FAILED; 4680 4681 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2, 4682 "Entered %s.\n", __func__); 4683 4684 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 4685 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR; 4686 mcp->mb[2] = LSW(start); 4687 mcp->mb[3] = MSW(start); 4688 mcp->mb[4] = LSW(finish); 4689 mcp->mb[5] = MSW(finish); 4690 mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4691 mcp->in_mb = MBX_2|MBX_1|MBX_0; 4692 mcp->tov = MBX_TOV_SECONDS; 4693 mcp->flags = 0; 4694 rval = qla2x00_mailbox_command(vha, mcp); 4695 4696 if (rval != QLA_SUCCESS) { 4697 ql_dbg(ql_dbg_mbx, vha, 0x10e3, 4698 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 4699 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 4700 } else { 4701 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4, 4702 "Done %s.\n", __func__); 4703 } 4704 4705 return rval; 4706} 4707 4708int 4709qla81xx_fac_semaphore_access(scsi_qla_host_t *vha, int lock) 4710{ 4711 int rval = QLA_SUCCESS; 4712 mbx_cmd_t mc; 4713 mbx_cmd_t *mcp = &mc; 4714 struct qla_hw_data *ha = vha->hw; 4715 4716 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && 4717 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 4718 return rval; 4719 4720 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2, 4721 "Entered %s.\n", __func__); 4722 4723 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 4724 mcp->mb[1] = (lock ? FAC_OPT_CMD_LOCK_SEMAPHORE : 4725 FAC_OPT_CMD_UNLOCK_SEMAPHORE); 4726 mcp->out_mb = MBX_1|MBX_0; 4727 mcp->in_mb = MBX_1|MBX_0; 4728 mcp->tov = MBX_TOV_SECONDS; 4729 mcp->flags = 0; 4730 rval = qla2x00_mailbox_command(vha, mcp); 4731 4732 if (rval != QLA_SUCCESS) { 4733 ql_dbg(ql_dbg_mbx, vha, 0x10e3, 4734 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 4735 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 4736 } else { 4737 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4, 4738 "Done %s.\n", __func__); 4739 } 4740 4741 return rval; 4742} 4743 4744int 4745qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha) 4746{ 4747 int rval = 0; 4748 mbx_cmd_t mc; 4749 mbx_cmd_t *mcp = &mc; 4750 4751 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5, 4752 "Entered %s.\n", __func__); 4753 4754 mcp->mb[0] = MBC_RESTART_MPI_FW; 4755 mcp->out_mb = MBX_0; 4756 mcp->in_mb = MBX_0|MBX_1; 4757 mcp->tov = MBX_TOV_SECONDS; 4758 mcp->flags = 0; 4759 rval = qla2x00_mailbox_command(vha, mcp); 4760 4761 if (rval != QLA_SUCCESS) { 4762 ql_dbg(ql_dbg_mbx, vha, 0x10e6, 4763 "Failed=%x mb[0]=%x mb[1]=%x.\n", 4764 rval, mcp->mb[0], mcp->mb[1]); 4765 } else { 4766 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7, 4767 "Done %s.\n", __func__); 4768 } 4769 4770 return rval; 4771} 4772 4773int 4774qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version) 4775{ 4776 int rval; 4777 mbx_cmd_t mc; 4778 mbx_cmd_t *mcp = &mc; 4779 int i; 4780 int len; 4781 __le16 *str; 4782 struct qla_hw_data *ha = vha->hw; 4783 4784 if (!IS_P3P_TYPE(ha)) 4785 return QLA_FUNCTION_FAILED; 4786 4787 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b, 4788 "Entered %s.\n", __func__); 4789 4790 str = (__force __le16 *)version; 4791 len = strlen(version); 4792 4793 mcp->mb[0] = MBC_SET_RNID_PARAMS; 4794 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8; 4795 mcp->out_mb = MBX_1|MBX_0; 4796 for (i = 4; i < 16 && len; i++, str++, len -= 2) { 4797 mcp->mb[i] = le16_to_cpup(str); 4798 mcp->out_mb |= 1<<i; 4799 } 4800 for (; i < 16; i++) { 4801 mcp->mb[i] = 0; 4802 mcp->out_mb |= 1<<i; 4803 } 4804 mcp->in_mb = MBX_1|MBX_0; 4805 mcp->tov = MBX_TOV_SECONDS; 4806 mcp->flags = 0; 4807 rval = qla2x00_mailbox_command(vha, mcp); 4808 4809 if (rval != QLA_SUCCESS) { 4810 ql_dbg(ql_dbg_mbx, vha, 0x117c, 4811 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 4812 } else { 4813 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d, 4814 "Done %s.\n", __func__); 4815 } 4816 4817 return rval; 4818} 4819 4820int 4821qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version) 4822{ 4823 int rval; 4824 mbx_cmd_t mc; 4825 mbx_cmd_t *mcp = &mc; 4826 int len; 4827 uint16_t dwlen; 4828 uint8_t *str; 4829 dma_addr_t str_dma; 4830 struct qla_hw_data *ha = vha->hw; 4831 4832 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) || 4833 IS_P3P_TYPE(ha)) 4834 return QLA_FUNCTION_FAILED; 4835 4836 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e, 4837 "Entered %s.\n", __func__); 4838 4839 str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma); 4840 if (!str) { 4841 ql_log(ql_log_warn, vha, 0x117f, 4842 "Failed to allocate driver version param.\n"); 4843 return QLA_MEMORY_ALLOC_FAILED; 4844 } 4845 4846 memcpy(str, "\x7\x3\x11\x0", 4); 4847 dwlen = str[0]; 4848 len = dwlen * 4 - 4; 4849 memset(str + 4, 0, len); 4850 if (len > strlen(version)) 4851 len = strlen(version); 4852 memcpy(str + 4, version, len); 4853 4854 mcp->mb[0] = MBC_SET_RNID_PARAMS; 4855 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen; 4856 mcp->mb[2] = MSW(LSD(str_dma)); 4857 mcp->mb[3] = LSW(LSD(str_dma)); 4858 mcp->mb[6] = MSW(MSD(str_dma)); 4859 mcp->mb[7] = LSW(MSD(str_dma)); 4860 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 4861 mcp->in_mb = MBX_1|MBX_0; 4862 mcp->tov = MBX_TOV_SECONDS; 4863 mcp->flags = 0; 4864 rval = qla2x00_mailbox_command(vha, mcp); 4865 4866 if (rval != QLA_SUCCESS) { 4867 ql_dbg(ql_dbg_mbx, vha, 0x1180, 4868 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 4869 } else { 4870 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181, 4871 "Done %s.\n", __func__); 4872 } 4873 4874 dma_pool_free(ha->s_dma_pool, str, str_dma); 4875 4876 return rval; 4877} 4878 4879int 4880qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma, 4881 void *buf, uint16_t bufsiz) 4882{ 4883 int rval, i; 4884 mbx_cmd_t mc; 4885 mbx_cmd_t *mcp = &mc; 4886 uint32_t *bp; 4887 4888 if (!IS_FWI2_CAPABLE(vha->hw)) 4889 return QLA_FUNCTION_FAILED; 4890 4891 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159, 4892 "Entered %s.\n", __func__); 4893 4894 mcp->mb[0] = MBC_GET_RNID_PARAMS; 4895 mcp->mb[1] = RNID_TYPE_PORT_LOGIN << 8; 4896 mcp->mb[2] = MSW(buf_dma); 4897 mcp->mb[3] = LSW(buf_dma); 4898 mcp->mb[6] = MSW(MSD(buf_dma)); 4899 mcp->mb[7] = LSW(MSD(buf_dma)); 4900 mcp->mb[8] = bufsiz/4; 4901 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 4902 mcp->in_mb = MBX_1|MBX_0; 4903 mcp->tov = MBX_TOV_SECONDS; 4904 mcp->flags = 0; 4905 rval = qla2x00_mailbox_command(vha, mcp); 4906 4907 if (rval != QLA_SUCCESS) { 4908 ql_dbg(ql_dbg_mbx, vha, 0x115a, 4909 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 4910 } else { 4911 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b, 4912 "Done %s.\n", __func__); 4913 bp = (uint32_t *) buf; 4914 for (i = 0; i < (bufsiz-4)/4; i++, bp++) 4915 *bp = le32_to_cpu((__force __le32)*bp); 4916 } 4917 4918 return rval; 4919} 4920 4921#define PUREX_CMD_COUNT 2 4922int 4923qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha) 4924{ 4925 int rval; 4926 mbx_cmd_t mc; 4927 mbx_cmd_t *mcp = &mc; 4928 uint8_t *els_cmd_map; 4929 dma_addr_t els_cmd_map_dma; 4930 uint8_t cmd_opcode[PUREX_CMD_COUNT]; 4931 uint8_t i, index, purex_bit; 4932 struct qla_hw_data *ha = vha->hw; 4933 4934 if (!IS_QLA25XX(ha) && !IS_QLA2031(ha) && 4935 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 4936 return QLA_SUCCESS; 4937 4938 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1197, 4939 "Entered %s.\n", __func__); 4940 4941 els_cmd_map = dma_alloc_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE, 4942 &els_cmd_map_dma, GFP_KERNEL); 4943 if (!els_cmd_map) { 4944 ql_log(ql_log_warn, vha, 0x7101, 4945 "Failed to allocate RDP els command param.\n"); 4946 return QLA_MEMORY_ALLOC_FAILED; 4947 } 4948 4949 /* List of Purex ELS */ 4950 cmd_opcode[0] = ELS_FPIN; 4951 cmd_opcode[1] = ELS_RDP; 4952 4953 for (i = 0; i < PUREX_CMD_COUNT; i++) { 4954 index = cmd_opcode[i] / 8; 4955 purex_bit = cmd_opcode[i] % 8; 4956 els_cmd_map[index] |= 1 << purex_bit; 4957 } 4958 4959 mcp->mb[0] = MBC_SET_RNID_PARAMS; 4960 mcp->mb[1] = RNID_TYPE_ELS_CMD << 8; 4961 mcp->mb[2] = MSW(LSD(els_cmd_map_dma)); 4962 mcp->mb[3] = LSW(LSD(els_cmd_map_dma)); 4963 mcp->mb[6] = MSW(MSD(els_cmd_map_dma)); 4964 mcp->mb[7] = LSW(MSD(els_cmd_map_dma)); 4965 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 4966 mcp->in_mb = MBX_1|MBX_0; 4967 mcp->tov = MBX_TOV_SECONDS; 4968 mcp->flags = MBX_DMA_OUT; 4969 mcp->buf_size = ELS_CMD_MAP_SIZE; 4970 rval = qla2x00_mailbox_command(vha, mcp); 4971 4972 if (rval != QLA_SUCCESS) { 4973 ql_dbg(ql_dbg_mbx, vha, 0x118d, 4974 "Failed=%x (%x,%x).\n", rval, mcp->mb[0], mcp->mb[1]); 4975 } else { 4976 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c, 4977 "Done %s.\n", __func__); 4978 } 4979 4980 dma_free_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE, 4981 els_cmd_map, els_cmd_map_dma); 4982 4983 return rval; 4984} 4985 4986static int 4987qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp) 4988{ 4989 int rval; 4990 mbx_cmd_t mc; 4991 mbx_cmd_t *mcp = &mc; 4992 4993 if (!IS_FWI2_CAPABLE(vha->hw)) 4994 return QLA_FUNCTION_FAILED; 4995 4996 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159, 4997 "Entered %s.\n", __func__); 4998 4999 mcp->mb[0] = MBC_GET_RNID_PARAMS; 5000 mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8; 5001 mcp->out_mb = MBX_1|MBX_0; 5002 mcp->in_mb = MBX_1|MBX_0; 5003 mcp->tov = MBX_TOV_SECONDS; 5004 mcp->flags = 0; 5005 rval = qla2x00_mailbox_command(vha, mcp); 5006 *temp = mcp->mb[1]; 5007 5008 if (rval != QLA_SUCCESS) { 5009 ql_dbg(ql_dbg_mbx, vha, 0x115a, 5010 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); 5011 } else { 5012 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b, 5013 "Done %s.\n", __func__); 5014 } 5015 5016 return rval; 5017} 5018 5019int 5020qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, 5021 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt) 5022{ 5023 int rval; 5024 mbx_cmd_t mc; 5025 mbx_cmd_t *mcp = &mc; 5026 struct qla_hw_data *ha = vha->hw; 5027 5028 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8, 5029 "Entered %s.\n", __func__); 5030 5031 if (!IS_FWI2_CAPABLE(ha)) 5032 return QLA_FUNCTION_FAILED; 5033 5034 if (len == 1) 5035 opt |= BIT_0; 5036 5037 mcp->mb[0] = MBC_READ_SFP; 5038 mcp->mb[1] = dev; 5039 mcp->mb[2] = MSW(LSD(sfp_dma)); 5040 mcp->mb[3] = LSW(LSD(sfp_dma)); 5041 mcp->mb[6] = MSW(MSD(sfp_dma)); 5042 mcp->mb[7] = LSW(MSD(sfp_dma)); 5043 mcp->mb[8] = len; 5044 mcp->mb[9] = off; 5045 mcp->mb[10] = opt; 5046 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 5047 mcp->in_mb = MBX_1|MBX_0; 5048 mcp->tov = MBX_TOV_SECONDS; 5049 mcp->flags = 0; 5050 rval = qla2x00_mailbox_command(vha, mcp); 5051 5052 if (opt & BIT_0) 5053 *sfp = mcp->mb[1]; 5054 5055 if (rval != QLA_SUCCESS) { 5056 ql_dbg(ql_dbg_mbx, vha, 0x10e9, 5057 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5058 if (mcp->mb[0] == MBS_COMMAND_ERROR && mcp->mb[1] == 0x22) { 5059 /* sfp is not there */ 5060 rval = QLA_INTERFACE_ERROR; 5061 } 5062 } else { 5063 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea, 5064 "Done %s.\n", __func__); 5065 } 5066 5067 return rval; 5068} 5069 5070int 5071qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, 5072 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt) 5073{ 5074 int rval; 5075 mbx_cmd_t mc; 5076 mbx_cmd_t *mcp = &mc; 5077 struct qla_hw_data *ha = vha->hw; 5078 5079 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb, 5080 "Entered %s.\n", __func__); 5081 5082 if (!IS_FWI2_CAPABLE(ha)) 5083 return QLA_FUNCTION_FAILED; 5084 5085 if (len == 1) 5086 opt |= BIT_0; 5087 5088 if (opt & BIT_0) 5089 len = *sfp; 5090 5091 mcp->mb[0] = MBC_WRITE_SFP; 5092 mcp->mb[1] = dev; 5093 mcp->mb[2] = MSW(LSD(sfp_dma)); 5094 mcp->mb[3] = LSW(LSD(sfp_dma)); 5095 mcp->mb[6] = MSW(MSD(sfp_dma)); 5096 mcp->mb[7] = LSW(MSD(sfp_dma)); 5097 mcp->mb[8] = len; 5098 mcp->mb[9] = off; 5099 mcp->mb[10] = opt; 5100 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 5101 mcp->in_mb = MBX_1|MBX_0; 5102 mcp->tov = MBX_TOV_SECONDS; 5103 mcp->flags = 0; 5104 rval = qla2x00_mailbox_command(vha, mcp); 5105 5106 if (rval != QLA_SUCCESS) { 5107 ql_dbg(ql_dbg_mbx, vha, 0x10ec, 5108 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5109 } else { 5110 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed, 5111 "Done %s.\n", __func__); 5112 } 5113 5114 return rval; 5115} 5116 5117int 5118qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma, 5119 uint16_t size_in_bytes, uint16_t *actual_size) 5120{ 5121 int rval; 5122 mbx_cmd_t mc; 5123 mbx_cmd_t *mcp = &mc; 5124 5125 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee, 5126 "Entered %s.\n", __func__); 5127 5128 if (!IS_CNA_CAPABLE(vha->hw)) 5129 return QLA_FUNCTION_FAILED; 5130 5131 mcp->mb[0] = MBC_GET_XGMAC_STATS; 5132 mcp->mb[2] = MSW(stats_dma); 5133 mcp->mb[3] = LSW(stats_dma); 5134 mcp->mb[6] = MSW(MSD(stats_dma)); 5135 mcp->mb[7] = LSW(MSD(stats_dma)); 5136 mcp->mb[8] = size_in_bytes >> 2; 5137 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 5138 mcp->in_mb = MBX_2|MBX_1|MBX_0; 5139 mcp->tov = MBX_TOV_SECONDS; 5140 mcp->flags = 0; 5141 rval = qla2x00_mailbox_command(vha, mcp); 5142 5143 if (rval != QLA_SUCCESS) { 5144 ql_dbg(ql_dbg_mbx, vha, 0x10ef, 5145 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 5146 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 5147 } else { 5148 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0, 5149 "Done %s.\n", __func__); 5150 5151 5152 *actual_size = mcp->mb[2] << 2; 5153 } 5154 5155 return rval; 5156} 5157 5158int 5159qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma, 5160 uint16_t size) 5161{ 5162 int rval; 5163 mbx_cmd_t mc; 5164 mbx_cmd_t *mcp = &mc; 5165 5166 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1, 5167 "Entered %s.\n", __func__); 5168 5169 if (!IS_CNA_CAPABLE(vha->hw)) 5170 return QLA_FUNCTION_FAILED; 5171 5172 mcp->mb[0] = MBC_GET_DCBX_PARAMS; 5173 mcp->mb[1] = 0; 5174 mcp->mb[2] = MSW(tlv_dma); 5175 mcp->mb[3] = LSW(tlv_dma); 5176 mcp->mb[6] = MSW(MSD(tlv_dma)); 5177 mcp->mb[7] = LSW(MSD(tlv_dma)); 5178 mcp->mb[8] = size; 5179 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 5180 mcp->in_mb = MBX_2|MBX_1|MBX_0; 5181 mcp->tov = MBX_TOV_SECONDS; 5182 mcp->flags = 0; 5183 rval = qla2x00_mailbox_command(vha, mcp); 5184 5185 if (rval != QLA_SUCCESS) { 5186 ql_dbg(ql_dbg_mbx, vha, 0x10f2, 5187 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 5188 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 5189 } else { 5190 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3, 5191 "Done %s.\n", __func__); 5192 } 5193 5194 return rval; 5195} 5196 5197int 5198qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data) 5199{ 5200 int rval; 5201 mbx_cmd_t mc; 5202 mbx_cmd_t *mcp = &mc; 5203 5204 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4, 5205 "Entered %s.\n", __func__); 5206 5207 if (!IS_FWI2_CAPABLE(vha->hw)) 5208 return QLA_FUNCTION_FAILED; 5209 5210 mcp->mb[0] = MBC_READ_RAM_EXTENDED; 5211 mcp->mb[1] = LSW(risc_addr); 5212 mcp->mb[8] = MSW(risc_addr); 5213 mcp->out_mb = MBX_8|MBX_1|MBX_0; 5214 mcp->in_mb = MBX_3|MBX_2|MBX_0; 5215 mcp->tov = MBX_TOV_SECONDS; 5216 mcp->flags = 0; 5217 rval = qla2x00_mailbox_command(vha, mcp); 5218 if (rval != QLA_SUCCESS) { 5219 ql_dbg(ql_dbg_mbx, vha, 0x10f5, 5220 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5221 } else { 5222 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6, 5223 "Done %s.\n", __func__); 5224 *data = mcp->mb[3] << 16 | mcp->mb[2]; 5225 } 5226 5227 return rval; 5228} 5229 5230int 5231qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, 5232 uint16_t *mresp) 5233{ 5234 int rval; 5235 mbx_cmd_t mc; 5236 mbx_cmd_t *mcp = &mc; 5237 5238 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7, 5239 "Entered %s.\n", __func__); 5240 5241 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5242 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK; 5243 mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing 5244 5245 /* transfer count */ 5246 mcp->mb[10] = LSW(mreq->transfer_size); 5247 mcp->mb[11] = MSW(mreq->transfer_size); 5248 5249 /* send data address */ 5250 mcp->mb[14] = LSW(mreq->send_dma); 5251 mcp->mb[15] = MSW(mreq->send_dma); 5252 mcp->mb[20] = LSW(MSD(mreq->send_dma)); 5253 mcp->mb[21] = MSW(MSD(mreq->send_dma)); 5254 5255 /* receive data address */ 5256 mcp->mb[16] = LSW(mreq->rcv_dma); 5257 mcp->mb[17] = MSW(mreq->rcv_dma); 5258 mcp->mb[6] = LSW(MSD(mreq->rcv_dma)); 5259 mcp->mb[7] = MSW(MSD(mreq->rcv_dma)); 5260 5261 /* Iteration count */ 5262 mcp->mb[18] = LSW(mreq->iteration_count); 5263 mcp->mb[19] = MSW(mreq->iteration_count); 5264 5265 mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15| 5266 MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0; 5267 if (IS_CNA_CAPABLE(vha->hw)) 5268 mcp->out_mb |= MBX_2; 5269 mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0; 5270 5271 mcp->buf_size = mreq->transfer_size; 5272 mcp->tov = MBX_TOV_SECONDS; 5273 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5274 5275 rval = qla2x00_mailbox_command(vha, mcp); 5276 5277 if (rval != QLA_SUCCESS) { 5278 ql_dbg(ql_dbg_mbx, vha, 0x10f8, 5279 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x " 5280 "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], 5281 mcp->mb[3], mcp->mb[18], mcp->mb[19]); 5282 } else { 5283 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9, 5284 "Done %s.\n", __func__); 5285 } 5286 5287 /* Copy mailbox information */ 5288 memcpy( mresp, mcp->mb, 64); 5289 return rval; 5290} 5291 5292int 5293qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, 5294 uint16_t *mresp) 5295{ 5296 int rval; 5297 mbx_cmd_t mc; 5298 mbx_cmd_t *mcp = &mc; 5299 struct qla_hw_data *ha = vha->hw; 5300 5301 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa, 5302 "Entered %s.\n", __func__); 5303 5304 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5305 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO; 5306 /* BIT_6 specifies 64bit address */ 5307 mcp->mb[1] = mreq->options | BIT_15 | BIT_6; 5308 if (IS_CNA_CAPABLE(ha)) { 5309 mcp->mb[2] = vha->fcoe_fcf_idx; 5310 } 5311 mcp->mb[16] = LSW(mreq->rcv_dma); 5312 mcp->mb[17] = MSW(mreq->rcv_dma); 5313 mcp->mb[6] = LSW(MSD(mreq->rcv_dma)); 5314 mcp->mb[7] = MSW(MSD(mreq->rcv_dma)); 5315 5316 mcp->mb[10] = LSW(mreq->transfer_size); 5317 5318 mcp->mb[14] = LSW(mreq->send_dma); 5319 mcp->mb[15] = MSW(mreq->send_dma); 5320 mcp->mb[20] = LSW(MSD(mreq->send_dma)); 5321 mcp->mb[21] = MSW(MSD(mreq->send_dma)); 5322 5323 mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15| 5324 MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0; 5325 if (IS_CNA_CAPABLE(ha)) 5326 mcp->out_mb |= MBX_2; 5327 5328 mcp->in_mb = MBX_0; 5329 if (IS_CNA_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || 5330 IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 5331 mcp->in_mb |= MBX_1; 5332 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || 5333 IS_QLA28XX(ha)) 5334 mcp->in_mb |= MBX_3; 5335 5336 mcp->tov = MBX_TOV_SECONDS; 5337 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5338 mcp->buf_size = mreq->transfer_size; 5339 5340 rval = qla2x00_mailbox_command(vha, mcp); 5341 5342 if (rval != QLA_SUCCESS) { 5343 ql_dbg(ql_dbg_mbx, vha, 0x10fb, 5344 "Failed=%x mb[0]=%x mb[1]=%x.\n", 5345 rval, mcp->mb[0], mcp->mb[1]); 5346 } else { 5347 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc, 5348 "Done %s.\n", __func__); 5349 } 5350 5351 /* Copy mailbox information */ 5352 memcpy(mresp, mcp->mb, 64); 5353 return rval; 5354} 5355 5356int 5357qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic) 5358{ 5359 int rval; 5360 mbx_cmd_t mc; 5361 mbx_cmd_t *mcp = &mc; 5362 5363 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd, 5364 "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic); 5365 5366 mcp->mb[0] = MBC_ISP84XX_RESET; 5367 mcp->mb[1] = enable_diagnostic; 5368 mcp->out_mb = MBX_1|MBX_0; 5369 mcp->in_mb = MBX_1|MBX_0; 5370 mcp->tov = MBX_TOV_SECONDS; 5371 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5372 rval = qla2x00_mailbox_command(vha, mcp); 5373 5374 if (rval != QLA_SUCCESS) 5375 ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval); 5376 else 5377 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff, 5378 "Done %s.\n", __func__); 5379 5380 return rval; 5381} 5382 5383int 5384qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data) 5385{ 5386 int rval; 5387 mbx_cmd_t mc; 5388 mbx_cmd_t *mcp = &mc; 5389 5390 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100, 5391 "Entered %s.\n", __func__); 5392 5393 if (!IS_FWI2_CAPABLE(vha->hw)) 5394 return QLA_FUNCTION_FAILED; 5395 5396 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED; 5397 mcp->mb[1] = LSW(risc_addr); 5398 mcp->mb[2] = LSW(data); 5399 mcp->mb[3] = MSW(data); 5400 mcp->mb[8] = MSW(risc_addr); 5401 mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0; 5402 mcp->in_mb = MBX_1|MBX_0; 5403 mcp->tov = MBX_TOV_SECONDS; 5404 mcp->flags = 0; 5405 rval = qla2x00_mailbox_command(vha, mcp); 5406 if (rval != QLA_SUCCESS) { 5407 ql_dbg(ql_dbg_mbx, vha, 0x1101, 5408 "Failed=%x mb[0]=%x mb[1]=%x.\n", 5409 rval, mcp->mb[0], mcp->mb[1]); 5410 } else { 5411 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102, 5412 "Done %s.\n", __func__); 5413 } 5414 5415 return rval; 5416} 5417 5418int 5419qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb) 5420{ 5421 int rval; 5422 uint32_t stat, timer; 5423 uint16_t mb0 = 0; 5424 struct qla_hw_data *ha = vha->hw; 5425 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 5426 5427 rval = QLA_SUCCESS; 5428 5429 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103, 5430 "Entered %s.\n", __func__); 5431 5432 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 5433 5434 /* Write the MBC data to the registers */ 5435 wrt_reg_word(®->mailbox0, MBC_WRITE_MPI_REGISTER); 5436 wrt_reg_word(®->mailbox1, mb[0]); 5437 wrt_reg_word(®->mailbox2, mb[1]); 5438 wrt_reg_word(®->mailbox3, mb[2]); 5439 wrt_reg_word(®->mailbox4, mb[3]); 5440 5441 wrt_reg_dword(®->hccr, HCCRX_SET_HOST_INT); 5442 5443 /* Poll for MBC interrupt */ 5444 for (timer = 6000000; timer; timer--) { 5445 /* Check for pending interrupts. */ 5446 stat = rd_reg_dword(®->host_status); 5447 if (stat & HSRX_RISC_INT) { 5448 stat &= 0xff; 5449 5450 if (stat == 0x1 || stat == 0x2 || 5451 stat == 0x10 || stat == 0x11) { 5452 set_bit(MBX_INTERRUPT, 5453 &ha->mbx_cmd_flags); 5454 mb0 = rd_reg_word(®->mailbox0); 5455 wrt_reg_dword(®->hccr, 5456 HCCRX_CLR_RISC_INT); 5457 rd_reg_dword(®->hccr); 5458 break; 5459 } 5460 } 5461 udelay(5); 5462 } 5463 5464 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) 5465 rval = mb0 & MBS_MASK; 5466 else 5467 rval = QLA_FUNCTION_FAILED; 5468 5469 if (rval != QLA_SUCCESS) { 5470 ql_dbg(ql_dbg_mbx, vha, 0x1104, 5471 "Failed=%x mb[0]=%x.\n", rval, mb[0]); 5472 } else { 5473 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105, 5474 "Done %s.\n", __func__); 5475 } 5476 5477 return rval; 5478} 5479 5480/* Set the specified data rate */ 5481int 5482qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode) 5483{ 5484 int rval; 5485 mbx_cmd_t mc; 5486 mbx_cmd_t *mcp = &mc; 5487 struct qla_hw_data *ha = vha->hw; 5488 uint16_t val; 5489 5490 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106, 5491 "Entered %s speed:0x%x mode:0x%x.\n", __func__, ha->set_data_rate, 5492 mode); 5493 5494 if (!IS_FWI2_CAPABLE(ha)) 5495 return QLA_FUNCTION_FAILED; 5496 5497 memset(mcp, 0, sizeof(*mcp)); 5498 switch (ha->set_data_rate) { 5499 case PORT_SPEED_AUTO: 5500 case PORT_SPEED_4GB: 5501 case PORT_SPEED_8GB: 5502 case PORT_SPEED_16GB: 5503 case PORT_SPEED_32GB: 5504 val = ha->set_data_rate; 5505 break; 5506 default: 5507 ql_log(ql_log_warn, vha, 0x1199, 5508 "Unrecognized speed setting:%d. Setting Autoneg\n", 5509 ha->set_data_rate); 5510 val = ha->set_data_rate = PORT_SPEED_AUTO; 5511 break; 5512 } 5513 5514 mcp->mb[0] = MBC_DATA_RATE; 5515 mcp->mb[1] = mode; 5516 mcp->mb[2] = val; 5517 5518 mcp->out_mb = MBX_2|MBX_1|MBX_0; 5519 mcp->in_mb = MBX_2|MBX_1|MBX_0; 5520 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 5521 mcp->in_mb |= MBX_4|MBX_3; 5522 mcp->tov = MBX_TOV_SECONDS; 5523 mcp->flags = 0; 5524 rval = qla2x00_mailbox_command(vha, mcp); 5525 if (rval != QLA_SUCCESS) { 5526 ql_dbg(ql_dbg_mbx, vha, 0x1107, 5527 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5528 } else { 5529 if (mcp->mb[1] != 0x7) 5530 ql_dbg(ql_dbg_mbx, vha, 0x1179, 5531 "Speed set:0x%x\n", mcp->mb[1]); 5532 5533 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108, 5534 "Done %s.\n", __func__); 5535 } 5536 5537 return rval; 5538} 5539 5540int 5541qla2x00_get_data_rate(scsi_qla_host_t *vha) 5542{ 5543 int rval; 5544 mbx_cmd_t mc; 5545 mbx_cmd_t *mcp = &mc; 5546 struct qla_hw_data *ha = vha->hw; 5547 5548 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106, 5549 "Entered %s.\n", __func__); 5550 5551 if (!IS_FWI2_CAPABLE(ha)) 5552 return QLA_FUNCTION_FAILED; 5553 5554 mcp->mb[0] = MBC_DATA_RATE; 5555 mcp->mb[1] = QLA_GET_DATA_RATE; 5556 mcp->out_mb = MBX_1|MBX_0; 5557 mcp->in_mb = MBX_2|MBX_1|MBX_0; 5558 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) 5559 mcp->in_mb |= MBX_4|MBX_3; 5560 mcp->tov = MBX_TOV_SECONDS; 5561 mcp->flags = 0; 5562 rval = qla2x00_mailbox_command(vha, mcp); 5563 if (rval != QLA_SUCCESS) { 5564 ql_dbg(ql_dbg_mbx, vha, 0x1107, 5565 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5566 } else { 5567 if (mcp->mb[1] != 0x7) 5568 ha->link_data_rate = mcp->mb[1]; 5569 5570 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 5571 if (mcp->mb[4] & BIT_0) 5572 ql_log(ql_log_info, vha, 0x11a2, 5573 "FEC=enabled (data rate).\n"); 5574 } 5575 5576 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108, 5577 "Done %s.\n", __func__); 5578 if (mcp->mb[1] != 0x7) 5579 ha->link_data_rate = mcp->mb[1]; 5580 } 5581 5582 return rval; 5583} 5584 5585int 5586qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb) 5587{ 5588 int rval; 5589 mbx_cmd_t mc; 5590 mbx_cmd_t *mcp = &mc; 5591 struct qla_hw_data *ha = vha->hw; 5592 5593 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109, 5594 "Entered %s.\n", __func__); 5595 5596 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) && 5597 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 5598 return QLA_FUNCTION_FAILED; 5599 mcp->mb[0] = MBC_GET_PORT_CONFIG; 5600 mcp->out_mb = MBX_0; 5601 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5602 mcp->tov = MBX_TOV_SECONDS; 5603 mcp->flags = 0; 5604 5605 rval = qla2x00_mailbox_command(vha, mcp); 5606 5607 if (rval != QLA_SUCCESS) { 5608 ql_dbg(ql_dbg_mbx, vha, 0x110a, 5609 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5610 } else { 5611 /* Copy all bits to preserve original value */ 5612 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4); 5613 5614 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b, 5615 "Done %s.\n", __func__); 5616 } 5617 return rval; 5618} 5619 5620int 5621qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb) 5622{ 5623 int rval; 5624 mbx_cmd_t mc; 5625 mbx_cmd_t *mcp = &mc; 5626 5627 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c, 5628 "Entered %s.\n", __func__); 5629 5630 mcp->mb[0] = MBC_SET_PORT_CONFIG; 5631 /* Copy all bits to preserve original setting */ 5632 memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4); 5633 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5634 mcp->in_mb = MBX_0; 5635 mcp->tov = MBX_TOV_SECONDS; 5636 mcp->flags = 0; 5637 rval = qla2x00_mailbox_command(vha, mcp); 5638 5639 if (rval != QLA_SUCCESS) { 5640 ql_dbg(ql_dbg_mbx, vha, 0x110d, 5641 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5642 } else 5643 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e, 5644 "Done %s.\n", __func__); 5645 5646 return rval; 5647} 5648 5649 5650int 5651qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority, 5652 uint16_t *mb) 5653{ 5654 int rval; 5655 mbx_cmd_t mc; 5656 mbx_cmd_t *mcp = &mc; 5657 struct qla_hw_data *ha = vha->hw; 5658 5659 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f, 5660 "Entered %s.\n", __func__); 5661 5662 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha)) 5663 return QLA_FUNCTION_FAILED; 5664 5665 mcp->mb[0] = MBC_PORT_PARAMS; 5666 mcp->mb[1] = loop_id; 5667 if (ha->flags.fcp_prio_enabled) 5668 mcp->mb[2] = BIT_1; 5669 else 5670 mcp->mb[2] = BIT_2; 5671 mcp->mb[4] = priority & 0xf; 5672 mcp->mb[9] = vha->vp_idx; 5673 mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5674 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0; 5675 mcp->tov = MBX_TOV_SECONDS; 5676 mcp->flags = 0; 5677 rval = qla2x00_mailbox_command(vha, mcp); 5678 if (mb != NULL) { 5679 mb[0] = mcp->mb[0]; 5680 mb[1] = mcp->mb[1]; 5681 mb[3] = mcp->mb[3]; 5682 mb[4] = mcp->mb[4]; 5683 } 5684 5685 if (rval != QLA_SUCCESS) { 5686 ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval); 5687 } else { 5688 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc, 5689 "Done %s.\n", __func__); 5690 } 5691 5692 return rval; 5693} 5694 5695int 5696qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp) 5697{ 5698 int rval = QLA_FUNCTION_FAILED; 5699 struct qla_hw_data *ha = vha->hw; 5700 uint8_t byte; 5701 5702 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) { 5703 ql_dbg(ql_dbg_mbx, vha, 0x1150, 5704 "Thermal not supported by this card.\n"); 5705 return rval; 5706 } 5707 5708 if (IS_QLA25XX(ha)) { 5709 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 5710 ha->pdev->subsystem_device == 0x0175) { 5711 rval = qla2x00_read_sfp(vha, 0, &byte, 5712 0x98, 0x1, 1, BIT_13|BIT_0); 5713 *temp = byte; 5714 return rval; 5715 } 5716 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && 5717 ha->pdev->subsystem_device == 0x338e) { 5718 rval = qla2x00_read_sfp(vha, 0, &byte, 5719 0x98, 0x1, 1, BIT_15|BIT_14|BIT_0); 5720 *temp = byte; 5721 return rval; 5722 } 5723 ql_dbg(ql_dbg_mbx, vha, 0x10c9, 5724 "Thermal not supported by this card.\n"); 5725 return rval; 5726 } 5727 5728 if (IS_QLA82XX(ha)) { 5729 *temp = qla82xx_read_temperature(vha); 5730 rval = QLA_SUCCESS; 5731 return rval; 5732 } else if (IS_QLA8044(ha)) { 5733 *temp = qla8044_read_temperature(vha); 5734 rval = QLA_SUCCESS; 5735 return rval; 5736 } 5737 5738 rval = qla2x00_read_asic_temperature(vha, temp); 5739 return rval; 5740} 5741 5742int 5743qla82xx_mbx_intr_enable(scsi_qla_host_t *vha) 5744{ 5745 int rval; 5746 struct qla_hw_data *ha = vha->hw; 5747 mbx_cmd_t mc; 5748 mbx_cmd_t *mcp = &mc; 5749 5750 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017, 5751 "Entered %s.\n", __func__); 5752 5753 if (!IS_FWI2_CAPABLE(ha)) 5754 return QLA_FUNCTION_FAILED; 5755 5756 memset(mcp, 0, sizeof(mbx_cmd_t)); 5757 mcp->mb[0] = MBC_TOGGLE_INTERRUPT; 5758 mcp->mb[1] = 1; 5759 5760 mcp->out_mb = MBX_1|MBX_0; 5761 mcp->in_mb = MBX_0; 5762 mcp->tov = MBX_TOV_SECONDS; 5763 mcp->flags = 0; 5764 5765 rval = qla2x00_mailbox_command(vha, mcp); 5766 if (rval != QLA_SUCCESS) { 5767 ql_dbg(ql_dbg_mbx, vha, 0x1016, 5768 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5769 } else { 5770 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e, 5771 "Done %s.\n", __func__); 5772 } 5773 5774 return rval; 5775} 5776 5777int 5778qla82xx_mbx_intr_disable(scsi_qla_host_t *vha) 5779{ 5780 int rval; 5781 struct qla_hw_data *ha = vha->hw; 5782 mbx_cmd_t mc; 5783 mbx_cmd_t *mcp = &mc; 5784 5785 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d, 5786 "Entered %s.\n", __func__); 5787 5788 if (!IS_P3P_TYPE(ha)) 5789 return QLA_FUNCTION_FAILED; 5790 5791 memset(mcp, 0, sizeof(mbx_cmd_t)); 5792 mcp->mb[0] = MBC_TOGGLE_INTERRUPT; 5793 mcp->mb[1] = 0; 5794 5795 mcp->out_mb = MBX_1|MBX_0; 5796 mcp->in_mb = MBX_0; 5797 mcp->tov = MBX_TOV_SECONDS; 5798 mcp->flags = 0; 5799 5800 rval = qla2x00_mailbox_command(vha, mcp); 5801 if (rval != QLA_SUCCESS) { 5802 ql_dbg(ql_dbg_mbx, vha, 0x100c, 5803 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5804 } else { 5805 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b, 5806 "Done %s.\n", __func__); 5807 } 5808 5809 return rval; 5810} 5811 5812int 5813qla82xx_md_get_template_size(scsi_qla_host_t *vha) 5814{ 5815 struct qla_hw_data *ha = vha->hw; 5816 mbx_cmd_t mc; 5817 mbx_cmd_t *mcp = &mc; 5818 int rval = QLA_FUNCTION_FAILED; 5819 5820 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f, 5821 "Entered %s.\n", __func__); 5822 5823 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5824 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5825 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5826 mcp->mb[2] = LSW(RQST_TMPLT_SIZE); 5827 mcp->mb[3] = MSW(RQST_TMPLT_SIZE); 5828 5829 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 5830 mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8| 5831 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5832 5833 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5834 mcp->tov = MBX_TOV_SECONDS; 5835 rval = qla2x00_mailbox_command(vha, mcp); 5836 5837 /* Always copy back return mailbox values. */ 5838 if (rval != QLA_SUCCESS) { 5839 ql_dbg(ql_dbg_mbx, vha, 0x1120, 5840 "mailbox command FAILED=0x%x, subcode=%x.\n", 5841 (mcp->mb[1] << 16) | mcp->mb[0], 5842 (mcp->mb[3] << 16) | mcp->mb[2]); 5843 } else { 5844 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121, 5845 "Done %s.\n", __func__); 5846 ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]); 5847 if (!ha->md_template_size) { 5848 ql_dbg(ql_dbg_mbx, vha, 0x1122, 5849 "Null template size obtained.\n"); 5850 rval = QLA_FUNCTION_FAILED; 5851 } 5852 } 5853 return rval; 5854} 5855 5856int 5857qla82xx_md_get_template(scsi_qla_host_t *vha) 5858{ 5859 struct qla_hw_data *ha = vha->hw; 5860 mbx_cmd_t mc; 5861 mbx_cmd_t *mcp = &mc; 5862 int rval = QLA_FUNCTION_FAILED; 5863 5864 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123, 5865 "Entered %s.\n", __func__); 5866 5867 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev, 5868 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL); 5869 if (!ha->md_tmplt_hdr) { 5870 ql_log(ql_log_warn, vha, 0x1124, 5871 "Unable to allocate memory for Minidump template.\n"); 5872 return rval; 5873 } 5874 5875 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5876 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5877 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5878 mcp->mb[2] = LSW(RQST_TMPLT); 5879 mcp->mb[3] = MSW(RQST_TMPLT); 5880 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma)); 5881 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma)); 5882 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma)); 5883 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma)); 5884 mcp->mb[8] = LSW(ha->md_template_size); 5885 mcp->mb[9] = MSW(ha->md_template_size); 5886 5887 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5888 mcp->tov = MBX_TOV_SECONDS; 5889 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8| 5890 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5891 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 5892 rval = qla2x00_mailbox_command(vha, mcp); 5893 5894 if (rval != QLA_SUCCESS) { 5895 ql_dbg(ql_dbg_mbx, vha, 0x1125, 5896 "mailbox command FAILED=0x%x, subcode=%x.\n", 5897 ((mcp->mb[1] << 16) | mcp->mb[0]), 5898 ((mcp->mb[3] << 16) | mcp->mb[2])); 5899 } else 5900 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126, 5901 "Done %s.\n", __func__); 5902 return rval; 5903} 5904 5905int 5906qla8044_md_get_template(scsi_qla_host_t *vha) 5907{ 5908 struct qla_hw_data *ha = vha->hw; 5909 mbx_cmd_t mc; 5910 mbx_cmd_t *mcp = &mc; 5911 int rval = QLA_FUNCTION_FAILED; 5912 int offset = 0, size = MINIDUMP_SIZE_36K; 5913 5914 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f, 5915 "Entered %s.\n", __func__); 5916 5917 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev, 5918 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL); 5919 if (!ha->md_tmplt_hdr) { 5920 ql_log(ql_log_warn, vha, 0xb11b, 5921 "Unable to allocate memory for Minidump template.\n"); 5922 return rval; 5923 } 5924 5925 memset(mcp->mb, 0 , sizeof(mcp->mb)); 5926 while (offset < ha->md_template_size) { 5927 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5928 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 5929 mcp->mb[2] = LSW(RQST_TMPLT); 5930 mcp->mb[3] = MSW(RQST_TMPLT); 5931 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset)); 5932 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset)); 5933 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset)); 5934 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset)); 5935 mcp->mb[8] = LSW(size); 5936 mcp->mb[9] = MSW(size); 5937 mcp->mb[10] = offset & 0x0000FFFF; 5938 mcp->mb[11] = offset & 0xFFFF0000; 5939 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; 5940 mcp->tov = MBX_TOV_SECONDS; 5941 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8| 5942 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 5943 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 5944 rval = qla2x00_mailbox_command(vha, mcp); 5945 5946 if (rval != QLA_SUCCESS) { 5947 ql_dbg(ql_dbg_mbx, vha, 0xb11c, 5948 "mailbox command FAILED=0x%x, subcode=%x.\n", 5949 ((mcp->mb[1] << 16) | mcp->mb[0]), 5950 ((mcp->mb[3] << 16) | mcp->mb[2])); 5951 return rval; 5952 } else 5953 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d, 5954 "Done %s.\n", __func__); 5955 offset = offset + size; 5956 } 5957 return rval; 5958} 5959 5960int 5961qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg) 5962{ 5963 int rval; 5964 struct qla_hw_data *ha = vha->hw; 5965 mbx_cmd_t mc; 5966 mbx_cmd_t *mcp = &mc; 5967 5968 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) 5969 return QLA_FUNCTION_FAILED; 5970 5971 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133, 5972 "Entered %s.\n", __func__); 5973 5974 memset(mcp, 0, sizeof(mbx_cmd_t)); 5975 mcp->mb[0] = MBC_SET_LED_CONFIG; 5976 mcp->mb[1] = led_cfg[0]; 5977 mcp->mb[2] = led_cfg[1]; 5978 if (IS_QLA8031(ha)) { 5979 mcp->mb[3] = led_cfg[2]; 5980 mcp->mb[4] = led_cfg[3]; 5981 mcp->mb[5] = led_cfg[4]; 5982 mcp->mb[6] = led_cfg[5]; 5983 } 5984 5985 mcp->out_mb = MBX_2|MBX_1|MBX_0; 5986 if (IS_QLA8031(ha)) 5987 mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3; 5988 mcp->in_mb = MBX_0; 5989 mcp->tov = MBX_TOV_SECONDS; 5990 mcp->flags = 0; 5991 5992 rval = qla2x00_mailbox_command(vha, mcp); 5993 if (rval != QLA_SUCCESS) { 5994 ql_dbg(ql_dbg_mbx, vha, 0x1134, 5995 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 5996 } else { 5997 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135, 5998 "Done %s.\n", __func__); 5999 } 6000 6001 return rval; 6002} 6003 6004int 6005qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg) 6006{ 6007 int rval; 6008 struct qla_hw_data *ha = vha->hw; 6009 mbx_cmd_t mc; 6010 mbx_cmd_t *mcp = &mc; 6011 6012 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) 6013 return QLA_FUNCTION_FAILED; 6014 6015 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136, 6016 "Entered %s.\n", __func__); 6017 6018 memset(mcp, 0, sizeof(mbx_cmd_t)); 6019 mcp->mb[0] = MBC_GET_LED_CONFIG; 6020 6021 mcp->out_mb = MBX_0; 6022 mcp->in_mb = MBX_2|MBX_1|MBX_0; 6023 if (IS_QLA8031(ha)) 6024 mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3; 6025 mcp->tov = MBX_TOV_SECONDS; 6026 mcp->flags = 0; 6027 6028 rval = qla2x00_mailbox_command(vha, mcp); 6029 if (rval != QLA_SUCCESS) { 6030 ql_dbg(ql_dbg_mbx, vha, 0x1137, 6031 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6032 } else { 6033 led_cfg[0] = mcp->mb[1]; 6034 led_cfg[1] = mcp->mb[2]; 6035 if (IS_QLA8031(ha)) { 6036 led_cfg[2] = mcp->mb[3]; 6037 led_cfg[3] = mcp->mb[4]; 6038 led_cfg[4] = mcp->mb[5]; 6039 led_cfg[5] = mcp->mb[6]; 6040 } 6041 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138, 6042 "Done %s.\n", __func__); 6043 } 6044 6045 return rval; 6046} 6047 6048int 6049qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable) 6050{ 6051 int rval; 6052 struct qla_hw_data *ha = vha->hw; 6053 mbx_cmd_t mc; 6054 mbx_cmd_t *mcp = &mc; 6055 6056 if (!IS_P3P_TYPE(ha)) 6057 return QLA_FUNCTION_FAILED; 6058 6059 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127, 6060 "Entered %s.\n", __func__); 6061 6062 memset(mcp, 0, sizeof(mbx_cmd_t)); 6063 mcp->mb[0] = MBC_SET_LED_CONFIG; 6064 if (enable) 6065 mcp->mb[7] = 0xE; 6066 else 6067 mcp->mb[7] = 0xD; 6068 6069 mcp->out_mb = MBX_7|MBX_0; 6070 mcp->in_mb = MBX_0; 6071 mcp->tov = MBX_TOV_SECONDS; 6072 mcp->flags = 0; 6073 6074 rval = qla2x00_mailbox_command(vha, mcp); 6075 if (rval != QLA_SUCCESS) { 6076 ql_dbg(ql_dbg_mbx, vha, 0x1128, 6077 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6078 } else { 6079 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129, 6080 "Done %s.\n", __func__); 6081 } 6082 6083 return rval; 6084} 6085 6086int 6087qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data) 6088{ 6089 int rval; 6090 struct qla_hw_data *ha = vha->hw; 6091 mbx_cmd_t mc; 6092 mbx_cmd_t *mcp = &mc; 6093 6094 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 6095 return QLA_FUNCTION_FAILED; 6096 6097 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130, 6098 "Entered %s.\n", __func__); 6099 6100 mcp->mb[0] = MBC_WRITE_REMOTE_REG; 6101 mcp->mb[1] = LSW(reg); 6102 mcp->mb[2] = MSW(reg); 6103 mcp->mb[3] = LSW(data); 6104 mcp->mb[4] = MSW(data); 6105 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 6106 6107 mcp->in_mb = MBX_1|MBX_0; 6108 mcp->tov = MBX_TOV_SECONDS; 6109 mcp->flags = 0; 6110 rval = qla2x00_mailbox_command(vha, mcp); 6111 6112 if (rval != QLA_SUCCESS) { 6113 ql_dbg(ql_dbg_mbx, vha, 0x1131, 6114 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6115 } else { 6116 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132, 6117 "Done %s.\n", __func__); 6118 } 6119 6120 return rval; 6121} 6122 6123int 6124qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport) 6125{ 6126 int rval; 6127 struct qla_hw_data *ha = vha->hw; 6128 mbx_cmd_t mc; 6129 mbx_cmd_t *mcp = &mc; 6130 6131 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 6132 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b, 6133 "Implicit LOGO Unsupported.\n"); 6134 return QLA_FUNCTION_FAILED; 6135 } 6136 6137 6138 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c, 6139 "Entering %s.\n", __func__); 6140 6141 /* Perform Implicit LOGO. */ 6142 mcp->mb[0] = MBC_PORT_LOGOUT; 6143 mcp->mb[1] = fcport->loop_id; 6144 mcp->mb[10] = BIT_15; 6145 mcp->out_mb = MBX_10|MBX_1|MBX_0; 6146 mcp->in_mb = MBX_0; 6147 mcp->tov = MBX_TOV_SECONDS; 6148 mcp->flags = 0; 6149 rval = qla2x00_mailbox_command(vha, mcp); 6150 if (rval != QLA_SUCCESS) 6151 ql_dbg(ql_dbg_mbx, vha, 0x113d, 6152 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6153 else 6154 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e, 6155 "Done %s.\n", __func__); 6156 6157 return rval; 6158} 6159 6160int 6161qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data) 6162{ 6163 int rval; 6164 mbx_cmd_t mc; 6165 mbx_cmd_t *mcp = &mc; 6166 struct qla_hw_data *ha = vha->hw; 6167 unsigned long retry_max_time = jiffies + (2 * HZ); 6168 6169 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 6170 return QLA_FUNCTION_FAILED; 6171 6172 ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__); 6173 6174retry_rd_reg: 6175 mcp->mb[0] = MBC_READ_REMOTE_REG; 6176 mcp->mb[1] = LSW(reg); 6177 mcp->mb[2] = MSW(reg); 6178 mcp->out_mb = MBX_2|MBX_1|MBX_0; 6179 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0; 6180 mcp->tov = MBX_TOV_SECONDS; 6181 mcp->flags = 0; 6182 rval = qla2x00_mailbox_command(vha, mcp); 6183 6184 if (rval != QLA_SUCCESS) { 6185 ql_dbg(ql_dbg_mbx, vha, 0x114c, 6186 "Failed=%x mb[0]=%x mb[1]=%x.\n", 6187 rval, mcp->mb[0], mcp->mb[1]); 6188 } else { 6189 *data = (mcp->mb[3] | (mcp->mb[4] << 16)); 6190 if (*data == QLA8XXX_BAD_VALUE) { 6191 /* 6192 * During soft-reset CAMRAM register reads might 6193 * return 0xbad0bad0. So retry for MAX of 2 sec 6194 * while reading camram registers. 6195 */ 6196 if (time_after(jiffies, retry_max_time)) { 6197 ql_dbg(ql_dbg_mbx, vha, 0x1141, 6198 "Failure to read CAMRAM register. " 6199 "data=0x%x.\n", *data); 6200 return QLA_FUNCTION_FAILED; 6201 } 6202 msleep(100); 6203 goto retry_rd_reg; 6204 } 6205 ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__); 6206 } 6207 6208 return rval; 6209} 6210 6211int 6212qla83xx_restart_nic_firmware(scsi_qla_host_t *vha) 6213{ 6214 int rval; 6215 mbx_cmd_t mc; 6216 mbx_cmd_t *mcp = &mc; 6217 struct qla_hw_data *ha = vha->hw; 6218 6219 if (!IS_QLA83XX(ha)) 6220 return QLA_FUNCTION_FAILED; 6221 6222 ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__); 6223 6224 mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE; 6225 mcp->out_mb = MBX_0; 6226 mcp->in_mb = MBX_1|MBX_0; 6227 mcp->tov = MBX_TOV_SECONDS; 6228 mcp->flags = 0; 6229 rval = qla2x00_mailbox_command(vha, mcp); 6230 6231 if (rval != QLA_SUCCESS) { 6232 ql_dbg(ql_dbg_mbx, vha, 0x1144, 6233 "Failed=%x mb[0]=%x mb[1]=%x.\n", 6234 rval, mcp->mb[0], mcp->mb[1]); 6235 qla2xxx_dump_fw(vha); 6236 } else { 6237 ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__); 6238 } 6239 6240 return rval; 6241} 6242 6243int 6244qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options, 6245 uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size) 6246{ 6247 int rval; 6248 mbx_cmd_t mc; 6249 mbx_cmd_t *mcp = &mc; 6250 uint8_t subcode = (uint8_t)options; 6251 struct qla_hw_data *ha = vha->hw; 6252 6253 if (!IS_QLA8031(ha)) 6254 return QLA_FUNCTION_FAILED; 6255 6256 ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__); 6257 6258 mcp->mb[0] = MBC_SET_ACCESS_CONTROL; 6259 mcp->mb[1] = options; 6260 mcp->out_mb = MBX_1|MBX_0; 6261 if (subcode & BIT_2) { 6262 mcp->mb[2] = LSW(start_addr); 6263 mcp->mb[3] = MSW(start_addr); 6264 mcp->mb[4] = LSW(end_addr); 6265 mcp->mb[5] = MSW(end_addr); 6266 mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2; 6267 } 6268 mcp->in_mb = MBX_2|MBX_1|MBX_0; 6269 if (!(subcode & (BIT_2 | BIT_5))) 6270 mcp->in_mb |= MBX_4|MBX_3; 6271 mcp->tov = MBX_TOV_SECONDS; 6272 mcp->flags = 0; 6273 rval = qla2x00_mailbox_command(vha, mcp); 6274 6275 if (rval != QLA_SUCCESS) { 6276 ql_dbg(ql_dbg_mbx, vha, 0x1147, 6277 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n", 6278 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], 6279 mcp->mb[4]); 6280 qla2xxx_dump_fw(vha); 6281 } else { 6282 if (subcode & BIT_5) 6283 *sector_size = mcp->mb[1]; 6284 else if (subcode & (BIT_6 | BIT_7)) { 6285 ql_dbg(ql_dbg_mbx, vha, 0x1148, 6286 "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]); 6287 } else if (subcode & (BIT_3 | BIT_4)) { 6288 ql_dbg(ql_dbg_mbx, vha, 0x1149, 6289 "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]); 6290 } 6291 ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__); 6292 } 6293 6294 return rval; 6295} 6296 6297int 6298qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr, 6299 uint32_t size) 6300{ 6301 int rval; 6302 mbx_cmd_t mc; 6303 mbx_cmd_t *mcp = &mc; 6304 6305 if (!IS_MCTP_CAPABLE(vha->hw)) 6306 return QLA_FUNCTION_FAILED; 6307 6308 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f, 6309 "Entered %s.\n", __func__); 6310 6311 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED; 6312 mcp->mb[1] = LSW(addr); 6313 mcp->mb[2] = MSW(req_dma); 6314 mcp->mb[3] = LSW(req_dma); 6315 mcp->mb[4] = MSW(size); 6316 mcp->mb[5] = LSW(size); 6317 mcp->mb[6] = MSW(MSD(req_dma)); 6318 mcp->mb[7] = LSW(MSD(req_dma)); 6319 mcp->mb[8] = MSW(addr); 6320 /* Setting RAM ID to valid */ 6321 /* For MCTP RAM ID is 0x40 */ 6322 mcp->mb[10] = BIT_7 | 0x40; 6323 6324 mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1| 6325 MBX_0; 6326 6327 mcp->in_mb = MBX_0; 6328 mcp->tov = MBX_TOV_SECONDS; 6329 mcp->flags = 0; 6330 rval = qla2x00_mailbox_command(vha, mcp); 6331 6332 if (rval != QLA_SUCCESS) { 6333 ql_dbg(ql_dbg_mbx, vha, 0x114e, 6334 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6335 } else { 6336 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d, 6337 "Done %s.\n", __func__); 6338 } 6339 6340 return rval; 6341} 6342 6343int 6344qla26xx_dport_diagnostics(scsi_qla_host_t *vha, 6345 void *dd_buf, uint size, uint options) 6346{ 6347 int rval; 6348 mbx_cmd_t mc; 6349 mbx_cmd_t *mcp = &mc; 6350 dma_addr_t dd_dma; 6351 6352 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) && 6353 !IS_QLA28XX(vha->hw)) 6354 return QLA_FUNCTION_FAILED; 6355 6356 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f, 6357 "Entered %s.\n", __func__); 6358 6359 dd_dma = dma_map_single(&vha->hw->pdev->dev, 6360 dd_buf, size, DMA_FROM_DEVICE); 6361 if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) { 6362 ql_log(ql_log_warn, vha, 0x1194, "Failed to map dma buffer.\n"); 6363 return QLA_MEMORY_ALLOC_FAILED; 6364 } 6365 6366 memset(dd_buf, 0, size); 6367 6368 mcp->mb[0] = MBC_DPORT_DIAGNOSTICS; 6369 mcp->mb[1] = options; 6370 mcp->mb[2] = MSW(LSD(dd_dma)); 6371 mcp->mb[3] = LSW(LSD(dd_dma)); 6372 mcp->mb[6] = MSW(MSD(dd_dma)); 6373 mcp->mb[7] = LSW(MSD(dd_dma)); 6374 mcp->mb[8] = size; 6375 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 6376 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 6377 mcp->buf_size = size; 6378 mcp->flags = MBX_DMA_IN; 6379 mcp->tov = MBX_TOV_SECONDS * 4; 6380 rval = qla2x00_mailbox_command(vha, mcp); 6381 6382 if (rval != QLA_SUCCESS) { 6383 ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval); 6384 } else { 6385 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196, 6386 "Done %s.\n", __func__); 6387 } 6388 6389 dma_unmap_single(&vha->hw->pdev->dev, dd_dma, 6390 size, DMA_FROM_DEVICE); 6391 6392 return rval; 6393} 6394 6395static void qla2x00_async_mb_sp_done(srb_t *sp, int res) 6396{ 6397 sp->u.iocb_cmd.u.mbx.rc = res; 6398 6399 complete(&sp->u.iocb_cmd.u.mbx.comp); 6400 /* don't free sp here. Let the caller do the free */ 6401} 6402 6403/* 6404 * This mailbox uses the iocb interface to send MB command. 6405 * This allows non-critial (non chip setup) command to go 6406 * out in parrallel. 6407 */ 6408int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp) 6409{ 6410 int rval = QLA_FUNCTION_FAILED; 6411 srb_t *sp; 6412 struct srb_iocb *c; 6413 6414 if (!vha->hw->flags.fw_started) 6415 goto done; 6416 6417 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); 6418 if (!sp) 6419 goto done; 6420 6421 sp->type = SRB_MB_IOCB; 6422 sp->name = mb_to_str(mcp->mb[0]); 6423 6424 c = &sp->u.iocb_cmd; 6425 c->timeout = qla2x00_async_iocb_timeout; 6426 init_completion(&c->u.mbx.comp); 6427 6428 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 6429 6430 memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG); 6431 6432 sp->done = qla2x00_async_mb_sp_done; 6433 6434 rval = qla2x00_start_sp(sp); 6435 if (rval != QLA_SUCCESS) { 6436 ql_dbg(ql_dbg_mbx, vha, 0x1018, 6437 "%s: %s Failed submission. %x.\n", 6438 __func__, sp->name, rval); 6439 goto done_free_sp; 6440 } 6441 6442 ql_dbg(ql_dbg_mbx, vha, 0x113f, "MB:%s hndl %x submitted\n", 6443 sp->name, sp->handle); 6444 6445 wait_for_completion(&c->u.mbx.comp); 6446 memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG); 6447 6448 rval = c->u.mbx.rc; 6449 switch (rval) { 6450 case QLA_FUNCTION_TIMEOUT: 6451 ql_dbg(ql_dbg_mbx, vha, 0x1140, "%s: %s Timeout. %x.\n", 6452 __func__, sp->name, rval); 6453 break; 6454 case QLA_SUCCESS: 6455 ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n", 6456 __func__, sp->name); 6457 break; 6458 default: 6459 ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n", 6460 __func__, sp->name, rval); 6461 break; 6462 } 6463 6464done_free_sp: 6465 sp->free(sp); 6466done: 6467 return rval; 6468} 6469 6470/* 6471 * qla24xx_gpdb_wait 6472 * NOTE: Do not call this routine from DPC thread 6473 */ 6474int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) 6475{ 6476 int rval = QLA_FUNCTION_FAILED; 6477 dma_addr_t pd_dma; 6478 struct port_database_24xx *pd; 6479 struct qla_hw_data *ha = vha->hw; 6480 mbx_cmd_t mc; 6481 6482 if (!vha->hw->flags.fw_started) 6483 goto done; 6484 6485 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); 6486 if (pd == NULL) { 6487 ql_log(ql_log_warn, vha, 0xd047, 6488 "Failed to allocate port database structure.\n"); 6489 goto done_free_sp; 6490 } 6491 6492 memset(&mc, 0, sizeof(mc)); 6493 mc.mb[0] = MBC_GET_PORT_DATABASE; 6494 mc.mb[1] = fcport->loop_id; 6495 mc.mb[2] = MSW(pd_dma); 6496 mc.mb[3] = LSW(pd_dma); 6497 mc.mb[6] = MSW(MSD(pd_dma)); 6498 mc.mb[7] = LSW(MSD(pd_dma)); 6499 mc.mb[9] = vha->vp_idx; 6500 mc.mb[10] = opt; 6501 6502 rval = qla24xx_send_mb_cmd(vha, &mc); 6503 if (rval != QLA_SUCCESS) { 6504 ql_dbg(ql_dbg_mbx, vha, 0x1193, 6505 "%s: %8phC fail\n", __func__, fcport->port_name); 6506 goto done_free_sp; 6507 } 6508 6509 rval = __qla24xx_parse_gpdb(vha, fcport, pd); 6510 6511 ql_dbg(ql_dbg_mbx, vha, 0x1197, "%s: %8phC done\n", 6512 __func__, fcport->port_name); 6513 6514done_free_sp: 6515 if (pd) 6516 dma_pool_free(ha->s_dma_pool, pd, pd_dma); 6517done: 6518 return rval; 6519} 6520 6521int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, 6522 struct port_database_24xx *pd) 6523{ 6524 int rval = QLA_SUCCESS; 6525 uint64_t zero = 0; 6526 u8 current_login_state, last_login_state; 6527 6528 if (NVME_TARGET(vha->hw, fcport)) { 6529 current_login_state = pd->current_login_state >> 4; 6530 last_login_state = pd->last_login_state >> 4; 6531 } else { 6532 current_login_state = pd->current_login_state & 0xf; 6533 last_login_state = pd->last_login_state & 0xf; 6534 } 6535 6536 /* Check for logged in state. */ 6537 if (current_login_state != PDS_PRLI_COMPLETE) { 6538 ql_dbg(ql_dbg_mbx, vha, 0x119a, 6539 "Unable to verify login-state (%x/%x) for loop_id %x.\n", 6540 current_login_state, last_login_state, fcport->loop_id); 6541 rval = QLA_FUNCTION_FAILED; 6542 goto gpd_error_out; 6543 } 6544 6545 if (fcport->loop_id == FC_NO_LOOP_ID || 6546 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && 6547 memcmp(fcport->port_name, pd->port_name, 8))) { 6548 /* We lost the device mid way. */ 6549 rval = QLA_NOT_LOGGED_IN; 6550 goto gpd_error_out; 6551 } 6552 6553 /* Names are little-endian. */ 6554 memcpy(fcport->node_name, pd->node_name, WWN_SIZE); 6555 memcpy(fcport->port_name, pd->port_name, WWN_SIZE); 6556 6557 /* Get port_id of device. */ 6558 fcport->d_id.b.domain = pd->port_id[0]; 6559 fcport->d_id.b.area = pd->port_id[1]; 6560 fcport->d_id.b.al_pa = pd->port_id[2]; 6561 fcport->d_id.b.rsvd_1 = 0; 6562 6563 if (NVME_TARGET(vha->hw, fcport)) { 6564 fcport->port_type = FCT_NVME; 6565 if ((pd->prli_svc_param_word_3[0] & BIT_5) == 0) 6566 fcport->port_type |= FCT_NVME_INITIATOR; 6567 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) 6568 fcport->port_type |= FCT_NVME_TARGET; 6569 if ((pd->prli_svc_param_word_3[0] & BIT_3) == 0) 6570 fcport->port_type |= FCT_NVME_DISCOVERY; 6571 } else { 6572 /* If not target must be initiator or unknown type. */ 6573 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) 6574 fcport->port_type = FCT_INITIATOR; 6575 else 6576 fcport->port_type = FCT_TARGET; 6577 } 6578 /* Passback COS information. */ 6579 fcport->supported_classes = (pd->flags & PDF_CLASS_2) ? 6580 FC_COS_CLASS2 : FC_COS_CLASS3; 6581 6582 if (pd->prli_svc_param_word_3[0] & BIT_7) { 6583 fcport->flags |= FCF_CONF_COMP_SUPPORTED; 6584 fcport->conf_compl_supported = 1; 6585 } 6586 6587gpd_error_out: 6588 return rval; 6589} 6590 6591/* 6592 * qla24xx_gidlist__wait 6593 * NOTE: don't call this routine from DPC thread. 6594 */ 6595int qla24xx_gidlist_wait(struct scsi_qla_host *vha, 6596 void *id_list, dma_addr_t id_list_dma, uint16_t *entries) 6597{ 6598 int rval = QLA_FUNCTION_FAILED; 6599 mbx_cmd_t mc; 6600 6601 if (!vha->hw->flags.fw_started) 6602 goto done; 6603 6604 memset(&mc, 0, sizeof(mc)); 6605 mc.mb[0] = MBC_GET_ID_LIST; 6606 mc.mb[2] = MSW(id_list_dma); 6607 mc.mb[3] = LSW(id_list_dma); 6608 mc.mb[6] = MSW(MSD(id_list_dma)); 6609 mc.mb[7] = LSW(MSD(id_list_dma)); 6610 mc.mb[8] = 0; 6611 mc.mb[9] = vha->vp_idx; 6612 6613 rval = qla24xx_send_mb_cmd(vha, &mc); 6614 if (rval != QLA_SUCCESS) { 6615 ql_dbg(ql_dbg_mbx, vha, 0x119b, 6616 "%s: fail\n", __func__); 6617 } else { 6618 *entries = mc.mb[1]; 6619 ql_dbg(ql_dbg_mbx, vha, 0x119c, 6620 "%s: done\n", __func__); 6621 } 6622done: 6623 return rval; 6624} 6625 6626int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value) 6627{ 6628 int rval; 6629 mbx_cmd_t mc; 6630 mbx_cmd_t *mcp = &mc; 6631 6632 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1200, 6633 "Entered %s\n", __func__); 6634 6635 memset(mcp->mb, 0 , sizeof(mcp->mb)); 6636 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD; 6637 mcp->mb[1] = 1; 6638 mcp->mb[2] = value; 6639 mcp->out_mb = MBX_2 | MBX_1 | MBX_0; 6640 mcp->in_mb = MBX_2 | MBX_0; 6641 mcp->tov = MBX_TOV_SECONDS; 6642 mcp->flags = 0; 6643 6644 rval = qla2x00_mailbox_command(vha, mcp); 6645 6646 ql_dbg(ql_dbg_mbx, vha, 0x1201, "%s %x\n", 6647 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval); 6648 6649 return rval; 6650} 6651 6652int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value) 6653{ 6654 int rval; 6655 mbx_cmd_t mc; 6656 mbx_cmd_t *mcp = &mc; 6657 6658 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1203, 6659 "Entered %s\n", __func__); 6660 6661 memset(mcp->mb, 0, sizeof(mcp->mb)); 6662 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD; 6663 mcp->mb[1] = 0; 6664 mcp->out_mb = MBX_1 | MBX_0; 6665 mcp->in_mb = MBX_2 | MBX_0; 6666 mcp->tov = MBX_TOV_SECONDS; 6667 mcp->flags = 0; 6668 6669 rval = qla2x00_mailbox_command(vha, mcp); 6670 if (rval == QLA_SUCCESS) 6671 *value = mc.mb[2]; 6672 6673 ql_dbg(ql_dbg_mbx, vha, 0x1205, "%s %x\n", 6674 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval); 6675 6676 return rval; 6677} 6678 6679int 6680qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count) 6681{ 6682 struct qla_hw_data *ha = vha->hw; 6683 uint16_t iter, addr, offset; 6684 dma_addr_t phys_addr; 6685 int rval, c; 6686 u8 *sfp_data; 6687 6688 memset(ha->sfp_data, 0, SFP_DEV_SIZE); 6689 addr = 0xa0; 6690 phys_addr = ha->sfp_data_dma; 6691 sfp_data = ha->sfp_data; 6692 offset = c = 0; 6693 6694 for (iter = 0; iter < SFP_DEV_SIZE / SFP_BLOCK_SIZE; iter++) { 6695 if (iter == 4) { 6696 /* Skip to next device address. */ 6697 addr = 0xa2; 6698 offset = 0; 6699 } 6700 6701 rval = qla2x00_read_sfp(vha, phys_addr, sfp_data, 6702 addr, offset, SFP_BLOCK_SIZE, BIT_1); 6703 if (rval != QLA_SUCCESS) { 6704 ql_log(ql_log_warn, vha, 0x706d, 6705 "Unable to read SFP data (%x/%x/%x).\n", rval, 6706 addr, offset); 6707 6708 return rval; 6709 } 6710 6711 if (buf && (c < count)) { 6712 u16 sz; 6713 6714 if ((count - c) >= SFP_BLOCK_SIZE) 6715 sz = SFP_BLOCK_SIZE; 6716 else 6717 sz = count - c; 6718 6719 memcpy(buf, sfp_data, sz); 6720 buf += SFP_BLOCK_SIZE; 6721 c += sz; 6722 } 6723 phys_addr += SFP_BLOCK_SIZE; 6724 sfp_data += SFP_BLOCK_SIZE; 6725 offset += SFP_BLOCK_SIZE; 6726 } 6727 6728 return rval; 6729} 6730 6731int qla24xx_res_count_wait(struct scsi_qla_host *vha, 6732 uint16_t *out_mb, int out_mb_sz) 6733{ 6734 int rval = QLA_FUNCTION_FAILED; 6735 mbx_cmd_t mc; 6736 6737 if (!vha->hw->flags.fw_started) 6738 goto done; 6739 6740 memset(&mc, 0, sizeof(mc)); 6741 mc.mb[0] = MBC_GET_RESOURCE_COUNTS; 6742 6743 rval = qla24xx_send_mb_cmd(vha, &mc); 6744 if (rval != QLA_SUCCESS) { 6745 ql_dbg(ql_dbg_mbx, vha, 0xffff, 6746 "%s: fail\n", __func__); 6747 } else { 6748 if (out_mb_sz <= SIZEOF_IOCB_MB_REG) 6749 memcpy(out_mb, mc.mb, out_mb_sz); 6750 else 6751 memcpy(out_mb, mc.mb, SIZEOF_IOCB_MB_REG); 6752 6753 ql_dbg(ql_dbg_mbx, vha, 0xffff, 6754 "%s: done\n", __func__); 6755 } 6756done: 6757 return rval; 6758} 6759 6760int qla28xx_secure_flash_update(scsi_qla_host_t *vha, uint16_t opts, 6761 uint16_t region, uint32_t len, dma_addr_t sfub_dma_addr, 6762 uint32_t sfub_len) 6763{ 6764 int rval; 6765 mbx_cmd_t mc; 6766 mbx_cmd_t *mcp = &mc; 6767 6768 mcp->mb[0] = MBC_SECURE_FLASH_UPDATE; 6769 mcp->mb[1] = opts; 6770 mcp->mb[2] = region; 6771 mcp->mb[3] = MSW(len); 6772 mcp->mb[4] = LSW(len); 6773 mcp->mb[5] = MSW(sfub_dma_addr); 6774 mcp->mb[6] = LSW(sfub_dma_addr); 6775 mcp->mb[7] = MSW(MSD(sfub_dma_addr)); 6776 mcp->mb[8] = LSW(MSD(sfub_dma_addr)); 6777 mcp->mb[9] = sfub_len; 6778 mcp->out_mb = 6779 MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 6780 mcp->in_mb = MBX_2|MBX_1|MBX_0; 6781 mcp->tov = MBX_TOV_SECONDS; 6782 mcp->flags = 0; 6783 rval = qla2x00_mailbox_command(vha, mcp); 6784 6785 if (rval != QLA_SUCCESS) { 6786 ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s(%ld): failed rval 0x%x, %x %x %x", 6787 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1], 6788 mcp->mb[2]); 6789 } 6790 6791 return rval; 6792} 6793 6794int qla2xxx_write_remote_register(scsi_qla_host_t *vha, uint32_t addr, 6795 uint32_t data) 6796{ 6797 int rval; 6798 mbx_cmd_t mc; 6799 mbx_cmd_t *mcp = &mc; 6800 6801 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8, 6802 "Entered %s.\n", __func__); 6803 6804 mcp->mb[0] = MBC_WRITE_REMOTE_REG; 6805 mcp->mb[1] = LSW(addr); 6806 mcp->mb[2] = MSW(addr); 6807 mcp->mb[3] = LSW(data); 6808 mcp->mb[4] = MSW(data); 6809 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 6810 mcp->in_mb = MBX_1|MBX_0; 6811 mcp->tov = MBX_TOV_SECONDS; 6812 mcp->flags = 0; 6813 rval = qla2x00_mailbox_command(vha, mcp); 6814 6815 if (rval != QLA_SUCCESS) { 6816 ql_dbg(ql_dbg_mbx, vha, 0x10e9, 6817 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6818 } else { 6819 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea, 6820 "Done %s.\n", __func__); 6821 } 6822 6823 return rval; 6824} 6825 6826int qla2xxx_read_remote_register(scsi_qla_host_t *vha, uint32_t addr, 6827 uint32_t *data) 6828{ 6829 int rval; 6830 mbx_cmd_t mc; 6831 mbx_cmd_t *mcp = &mc; 6832 6833 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8, 6834 "Entered %s.\n", __func__); 6835 6836 mcp->mb[0] = MBC_READ_REMOTE_REG; 6837 mcp->mb[1] = LSW(addr); 6838 mcp->mb[2] = MSW(addr); 6839 mcp->out_mb = MBX_2|MBX_1|MBX_0; 6840 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 6841 mcp->tov = MBX_TOV_SECONDS; 6842 mcp->flags = 0; 6843 rval = qla2x00_mailbox_command(vha, mcp); 6844 6845 *data = (uint32_t)((((uint32_t)mcp->mb[4]) << 16) | mcp->mb[3]); 6846 6847 if (rval != QLA_SUCCESS) { 6848 ql_dbg(ql_dbg_mbx, vha, 0x10e9, 6849 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 6850 } else { 6851 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea, 6852 "Done %s.\n", __func__); 6853 } 6854 6855 return rval; 6856} 6857 6858int 6859ql26xx_led_config(scsi_qla_host_t *vha, uint16_t options, uint16_t *led) 6860{ 6861 struct qla_hw_data *ha = vha->hw; 6862 mbx_cmd_t mc; 6863 mbx_cmd_t *mcp = &mc; 6864 int rval; 6865 6866 if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 6867 return QLA_FUNCTION_FAILED; 6868 6869 ql_dbg(ql_dbg_mbx, vha, 0x7070, "Entered %s (options=%x).\n", 6870 __func__, options); 6871 6872 mcp->mb[0] = MBC_SET_GET_FC_LED_CONFIG; 6873 mcp->mb[1] = options; 6874 mcp->out_mb = MBX_1|MBX_0; 6875 mcp->in_mb = MBX_1|MBX_0; 6876 if (options & BIT_0) { 6877 if (options & BIT_1) { 6878 mcp->mb[2] = led[2]; 6879 mcp->out_mb |= MBX_2; 6880 } 6881 if (options & BIT_2) { 6882 mcp->mb[3] = led[0]; 6883 mcp->out_mb |= MBX_3; 6884 } 6885 if (options & BIT_3) { 6886 mcp->mb[4] = led[1]; 6887 mcp->out_mb |= MBX_4; 6888 } 6889 } else { 6890 mcp->in_mb |= MBX_4|MBX_3|MBX_2; 6891 } 6892 mcp->tov = MBX_TOV_SECONDS; 6893 mcp->flags = 0; 6894 rval = qla2x00_mailbox_command(vha, mcp); 6895 if (rval) { 6896 ql_dbg(ql_dbg_mbx, vha, 0x7071, "Failed %s %x (mb=%x,%x)\n", 6897 __func__, rval, mcp->mb[0], mcp->mb[1]); 6898 return rval; 6899 } 6900 6901 if (options & BIT_0) { 6902 ha->beacon_blink_led = 0; 6903 ql_dbg(ql_dbg_mbx, vha, 0x7072, "Done %s\n", __func__); 6904 } else { 6905 led[2] = mcp->mb[2]; 6906 led[0] = mcp->mb[3]; 6907 led[1] = mcp->mb[4]; 6908 ql_dbg(ql_dbg_mbx, vha, 0x7073, "Done %s (led=%x,%x,%x)\n", 6909 __func__, led[0], led[1], led[2]); 6910 } 6911 6912 return rval; 6913} 6914