Lines Matching refs:vha

37 static void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha,
50 scsi_qla_host_t *vha = sp->vha;
59 if (vha && qla2x00_isp_reg_stat(vha->hw)) {
60 ql_log(ql_log_info, vha, 0x9008,
62 qla_pci_set_eeh_busy(vha);
87 qla2x00_get_async_timeout(struct scsi_qla_host *vha)
90 struct qla_hw_data *ha = vha->hw;
116 ql_dbg(ql_dbg_async, sp->vha, 0x507c,
121 ql_dbg(ql_dbg_async, sp->vha, 0x507c,
175 scsi_qla_host_t *vha = cmd_sp->vha;
181 sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport,
186 qla_vha_mark_busy(vha);
203 ql_dbg(ql_dbg_async, vha, 0x507c,
235 ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
297 struct scsi_qla_host *vha = sp->vha;
301 ql_dbg(ql_dbg_disc, vha, 0x20dd,
306 if (!test_bit(UNLOADING, &vha->dpc_flags)) {
316 qla24xx_handle_plogi_done_event(vha, &ea);
324 qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
331 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) ||
333 ql_log(ql_log_warn, vha, 0xffff,
340 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
352 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
356 if (N2N_TOPO(fcport->vha->hw) && fcport_is_bigger(fcport)) {
359 if (vha->hw->flags.edif_enabled &&
360 DBELL_ACTIVE(vha)) {
368 if (NVME_TARGET(vha->hw, fcport))
373 ql_dbg(ql_dbg_disc, vha, 0x2072,
381 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
412 qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
419 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
425 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
428 ql_dbg(ql_dbg_disc, vha, 0x2070,
448 qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport,
454 qla2x00_mark_device_lost(vha, fcport, 1);
461 struct scsi_qla_host *vha = sp->vha;
464 if (!test_bit(UNLOADING, &vha->dpc_flags))
465 qla2x00_post_async_prlo_done_work(sp->fcport->vha, sp->fcport,
472 qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport)
479 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
485 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
488 ql_dbg(ql_dbg_disc, vha, 0x2070,
508 void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
513 ql_dbg(ql_dbg_disc, vha, 0x20d2,
523 ql_dbg(ql_dbg_disc, vha, 0x2066,
527 spin_lock_irqsave(&vha->work_lock, flags);
533 spin_unlock_irqrestore(&vha->work_lock, flags);
544 ql_dbg(ql_dbg_disc, vha, 0x20d3,
554 __qla24xx_handle_gpdb_event(vha, ea);
557 static int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport)
561 e = qla2x00_alloc_work(vha, QLA_EVT_ELS_PLOGI);
568 return qla2x00_post_work(vha, e);
573 struct scsi_qla_host *vha = sp->vha;
577 ql_dbg(ql_dbg_disc, vha, 0x2066,
594 qla24xx_handle_adisc_event(vha, &ea);
600 qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
608 ql_log(ql_log_warn, vha, 0xffff,
615 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
620 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
628 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
636 ql_dbg(ql_dbg_disc, vha, 0x206f,
651 qla2x00_post_async_adisc_work(vha, fcport, data);
655 static bool qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id)
657 struct qla_hw_data *ha = vha->hw;
668 * @vha: adapter state pointer.
677 static int qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
680 struct qla_hw_data *ha = vha->hw;
689 qla2x00_is_reserved_id(vha, dev->loop_id)) {
698 ql_dbg(ql_dbg_disc, dev->vha, 0x2086,
702 ql_log(ql_log_warn, dev->vha, 0x2087,
711 struct qla_hw_data *ha = fcport->vha->hw;
714 qla2x00_is_reserved_id(fcport->vha, fcport->loop_id))
721 static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
733 ql_dbg(ql_dbg_disc, vha, 0xffff,
738 fcport->rscn_gen, fcport->last_rscn_gen, vha->loop_id, fcport->edif.enable);
745 ql_dbg(ql_dbg_disc, vha, 0x20de,
757 ql_dbg(ql_dbg_disc, vha, 0x20e0,
760 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
766 ql_dbg(ql_dbg_disc, vha, 0x20e1,
773 e = &vha->gnl.l[i];
802 ql_dbg(ql_dbg_disc, vha, 0x20e2,
819 ql_dbg(ql_dbg_disc, vha, 0x20e3,
835 qlt_find_sess_invalidate_other(vha, wwn,
848 switch (vha->hw->current_topology) {
853 vha, 0x20e4, "%s %d %8phC post gpdb\n",
861 qla2x00_post_async_adisc_work(vha, fcport,
865 if (vha->hw->flags.edif_enabled) {
867 qla24xx_post_gpdb_work(vha, fcport, 0);
874 qla2x00_find_new_loop_id(vha, fcport);
878 ql_dbg(ql_dbg_disc, vha, 0x20e5,
881 qla24xx_fcport_handle_login(vha, fcport);
898 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
907 qla2x00_post_async_adisc_work(vha, fcport,
911 if (vha->hw->flags.edif_enabled &&
912 DBELL_ACTIVE(vha)) {
914 qla24xx_post_gpdb_work(vha, fcport, 0);
923 qla24xx_fcport_handle_login(vha,
935 qla24xx_fcport_handle_login(vha,
945 switch (vha->hw->current_topology) {
949 e = &vha->gnl.l[i];
958 qla2x00_find_fcport_by_wwpn(vha,
962 vha, 0x20e5,
977 qla24xx_fcport_handle_login(vha, fcport);
990 &vha->dpc_flags);
993 ql_log(ql_log_info, vha, 0x705d,
996 &vha->dpc_flags);
999 ql_log(ql_log_info, vha, 0x705d,
1005 qla2xxx_wake_dpc(vha);
1011 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1015 qla24xx_fcport_handle_login(vha, fcport);
1025 struct scsi_qla_host *vha = sp->vha;
1035 ql_dbg(ql_dbg_disc, vha, 0x20e7,
1054 e = &vha->gnl.l[i];
1058 set_bit(loop_id, vha->hw->loop_id_map);
1061 ql_dbg(ql_dbg_disc, vha, 0x20e8,
1068 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1072 if (!list_empty(&vha->gnl.fcports))
1073 list_splice_init(&vha->gnl.fcports, &h);
1074 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1077 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1080 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1083 qla24xx_handle_gnl_done_event(vha, &ea);
1091 e = &vha->gnl.l[i];
1095 list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
1109 ql_dbg(ql_dbg_disc, vha, 0x2065,
1113 qla24xx_post_newsess_work(vha, &id, (u8 *)&wwn,
1118 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1119 vha->gnl.sent = 0;
1120 if (!list_empty(&vha->gnl.fcports)) {
1122 list_for_each_entry_safe(fcport, tf, &vha->gnl.fcports,
1126 if (qla24xx_post_gnl_work(vha, fcport) == QLA_SUCCESS)
1130 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1136 int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
1143 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
1146 ql_dbg(ql_dbg_disc, vha, 0x20d9,
1149 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1155 list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports);
1156 if (vha->gnl.sent) {
1157 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1160 vha->gnl.sent = 1;
1161 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1164 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1172 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
1178 mb[2] = MSW(vha->gnl.ldma);
1179 mb[3] = LSW(vha->gnl.ldma);
1180 mb[6] = MSW(MSD(vha->gnl.ldma));
1181 mb[7] = LSW(MSD(vha->gnl.ldma));
1182 mb[8] = vha->gnl.size;
1183 mb[9] = vha->vp_idx;
1185 ql_dbg(ql_dbg_disc, vha, 0x20da,
1204 int qla24xx_post_gnl_work(struct scsi_qla_host *vha, fc_port_t *fcport)
1208 e = qla2x00_alloc_work(vha, QLA_EVT_GNL);
1214 return qla2x00_post_work(vha, e);
1219 struct scsi_qla_host *vha = sp->vha;
1220 struct qla_hw_data *ha = vha->hw;
1225 ql_dbg(ql_dbg_disc, vha, 0x20db,
1238 qla24xx_handle_gpdb_event(vha, &ea);
1247 int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport)
1251 if (vha->host->active_mode == MODE_TARGET)
1254 e = qla2x00_alloc_work(vha, QLA_EVT_PRLI);
1260 return qla2x00_post_work(vha, e);
1265 struct scsi_qla_host *vha = sp->vha;
1269 ql_dbg(ql_dbg_disc, vha, 0x2129,
1275 if (!test_bit(UNLOADING, &vha->dpc_flags)) {
1288 qla24xx_handle_prli_done_event(vha, &ea);
1295 qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
1301 if (!vha->flags.online) {
1302 ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC exit\n",
1309 qla_dual_mode_enabled(vha)) {
1310 ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC exit\n",
1315 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1324 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
1330 if (NVME_TARGET(vha->hw, fcport))
1333 ql_dbg(ql_dbg_disc, vha, 0x211b,
1336 fcport->login_retry, fcport->fc4_type, vha->hw->fc4_type_priority,
1337 NVME_TARGET(vha->hw, fcport) ? "nvme" : "fcp");
1342 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1355 int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
1359 e = qla2x00_alloc_work(vha, QLA_EVT_GPDB);
1366 return qla2x00_post_work(vha, e);
1369 int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
1377 struct qla_hw_data *ha = vha->hw;
1380 ql_log(ql_log_warn, vha, 0xffff,
1387 if (!vha->flags.online || fcport->flags & FCF_ASYNC_SENT) {
1388 ql_log(ql_log_warn, vha, 0xffff,
1390 __func__, fcport->port_name, vha->flags.online, fcport->flags);
1394 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1405 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
1410 ql_log(ql_log_warn, vha, 0xd043,
1422 mb[9] = vha->vp_idx;
1429 ql_dbg(ql_dbg_disc, vha, 0x20dc,
1446 qla24xx_post_gpdb_work(vha, fcport, opt);
1451 void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
1455 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1460 vha->fcport_count++;
1463 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1465 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1472 ql_dbg(ql_dbg_disc, vha, 0x20d6,
1477 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1480 static int qla_chk_secure_login(scsi_qla_host_t *vha, fc_port_t *fcport,
1486 ql_dbg(ql_dbg_disc, vha, 0x104d,
1491 ql_dbg(ql_dbg_disc, vha, 0x104d,
1496 if (vha->hw->flags.edif_enabled) {
1505 qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE,
1508 if (DBELL_ACTIVE(vha)) {
1509 ql_dbg(ql_dbg_disc, vha, 0x20ef,
1514 qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_NEEDED,
1519 } else if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
1520 ql_dbg(ql_dbg_disc, vha, 0x2117,
1523 qla24xx_post_prli_work(vha, fcport);
1531 void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
1542 ql_dbg(ql_dbg_disc, vha, 0x20d2,
1548 ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC\n",
1553 if (NVME_TARGET(vha->hw, fcport))
1561 ql_dbg(ql_dbg_disc, vha, 0x20d3,
1568 ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC, ls %x\n",
1575 __qla24xx_parse_gpdb(vha, fcport, pd);
1578 if (qla_chk_secure_login(vha, fcport, pd)) {
1579 ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC, ls %x\n",
1588 if (qla_dual_mode_enabled(vha) ||
1589 qla_ini_mode_enabled(vha)) {
1591 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1593 ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC, ls %x\n",
1599 ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC post del sess\n",
1604 __qla24xx_handle_gpdb_event(vha, ea);
1607 static void qla_chk_n2n_b4_login(struct scsi_qla_host *vha, fc_port_t *fcport)
1612 ql_dbg(ql_dbg_disc, vha, 0x307b,
1617 if (qla_tgt_mode_enabled(vha))
1620 if (qla_dual_mode_enabled(vha)) {
1621 if (N2N_TOPO(vha->hw)) {
1624 mywwn = wwn_to_u64(vha->port_name);
1644 rc = qla2x00_find_new_loop_id(vha, fcport);
1646 ql_dbg(ql_dbg_disc, vha, 0x20e6,
1654 ql_dbg(ql_dbg_disc, vha, 0x20bf,
1657 qla2x00_post_async_login_work(vha, fcport, NULL);
1661 int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
1666 ql_dbg(ql_dbg_disc, vha, 0x20d8,
1679 qla_dual_mode_enabled(vha) &&
1685 !N2N_TOPO(vha->hw)) {
1687 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1693 if (vha->host->active_mode == MODE_TARGET && !N2N_TOPO(vha->hw))
1697 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1703 switch (vha->hw->current_topology) {
1709 qla2x00_find_new_loop_id(vha,
1715 qla_post_els_plogi_work(vha, fcport);
1717 ql_log(ql_log_info, vha, 0x705d,
1722 qla24xx_post_gnl_work(vha, fcport);
1727 ql_dbg(ql_dbg_disc, vha, 0x20bd,
1730 qla24xx_post_gnl_work(vha, fcport);
1732 qla_chk_n2n_b4_login(vha, fcport);
1739 switch (vha->hw->current_topology) {
1742 ql_dbg(ql_dbg_disc, vha, 0x2118,
1746 vha->hw->base_qpair->chip_reset;
1747 qla24xx_post_gpdb_work(vha, fcport, 0);
1749 ql_dbg(ql_dbg_disc, vha, 0x2118,
1752 NVME_TARGET(vha->hw, fcport) ? "NVME" :
1754 qla24xx_post_prli_work(vha, fcport);
1759 ql_dbg(ql_dbg_disc, vha, 0x20d8,
1765 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1768 qla_chk_n2n_b4_login(vha, fcport);
1774 if (N2N_TOPO(vha->hw))
1775 qla_chk_n2n_b4_login(vha, fcport);
1783 qla2x00_post_async_adisc_work(vha, fcport, data);
1787 if (vha->hw->flags.edif_enabled)
1791 ql_dbg(ql_dbg_disc, vha, 0x2118,
1794 NVME_TARGET(vha->hw, fcport) ? "NVME" : "FC");
1795 qla24xx_post_prli_work(vha, fcport);
1805 ql_dbg(ql_dbg_disc, fcport->vha, 0xffff,
1812 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1822 int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id,
1827 e = qla2x00_alloc_work(vha, QLA_EVT_NEW_SESS);
1838 return qla2x00_post_work(vha, e);
1841 void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
1848 fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
1853 ql_dbg(ql_dbg_disc, vha, 0x2115,
1859 if (vha->hw->flags.edif_enabled && DBELL_ACTIVE(vha)) {
1883 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1895 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1908 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1919 spin_lock_irqsave(&vha->work_lock, flags);
1920 if (vha->scan.scan_flags == 0) {
1921 ql_dbg(ql_dbg_disc, vha, 0xffff, "%s: schedule\n", __func__);
1922 vha->scan.scan_flags |= SF_QUEUED;
1923 schedule_delayed_work(&vha->scan.scan_work, 5);
1925 spin_unlock_irqrestore(&vha->work_lock, flags);
1928 void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
1933 if (test_bit(UNLOADING, &vha->dpc_flags))
1936 ql_dbg(ql_dbg_disc, vha, 0x2102,
1946 ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gnl\n",
1948 qla24xx_post_gnl_work(vha, fcport);
1952 qla24xx_fcport_handle_login(vha, fcport);
1955 void qla_handle_els_plogi_done(scsi_qla_host_t *vha,
1958 if (N2N_TOPO(vha->hw) && fcport_is_smaller(ea->fcport) &&
1959 vha->hw->flags.edif_enabled) {
1961 qla24xx_post_gpdb_work(vha, ea->fcport, 0);
1966 if (vha->host->active_mode == MODE_TARGET)
1969 ql_dbg(ql_dbg_disc, vha, 0x2118,
1972 qla24xx_post_prli_work(vha, ea->fcport);
1994 qla2x00_handle_rscn(fcport->vha, &ea);
2032 ql_dbg(ql_dbg_taskm, sp->vha, 0x8004,
2045 if (_chip_gen != sp->vha->hw->chip_reset || _login_gen != sp->fcport->login_gen) {\
2066 struct scsi_qla_host *vha = arg->vha;
2074 ql_dbg(ql_dbg_taskm, vha, 0x8039,
2081 chip_gen = vha->hw->chip_reset;
2085 sp = qla2xxx_get_qpair_sp(vha, arg->qpair, fcport, GFP_KERNEL);
2091 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha), qla_marker_sp_done);
2099 tm_iocb->u.tmf.vp_index = vha->vp_idx;
2103 ql_dbg(ql_dbg_taskm, vha, 0x8006,
2109 ql_log(ql_log_warn, vha, 0x8031,
2118 ql_log(ql_log_warn, vha, 0x8019,
2144 return qla2x00_eh_wait_for_pending_commands(arg->vha,
2147 return qla2x00_eh_wait_for_pending_commands(arg->vha,
2154 struct scsi_qla_host *vha = arg->vha;
2163 ql_dbg(ql_dbg_taskm, vha, 0x8032,
2170 chip_gen = vha->hw->chip_reset;
2174 sp = qla2xxx_get_qpair_sp(vha, arg->qpair, fcport, GFP_KERNEL);
2178 qla_vha_mark_busy(vha);
2181 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha),
2192 ql_dbg(ql_dbg_taskm, vha, 0x802f,
2204 ql_log(ql_log_warn, vha, 0x8030,
2208 if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) {
2211 ql_log(ql_log_info, vha, 0x803e,
2213 jiffies_to_msecs(jiffies - jif), vha->host_no,
2217 if (chip_gen == vha->hw->chip_reset && login_gen == fcport->login_gen) {
2220 ql_log(ql_log_info, vha, 0x803e,
2222 vha->host_no, fcport->d_id.b24, arg->lun);
2238 struct scsi_qla_host *vha = arg->vha;
2239 struct qla_hw_data *ha = vha->hw;
2251 struct scsi_qla_host *vha = arg->vha;
2252 struct qla_hw_data *ha = vha->hw;
2262 ql_log(ql_log_warn, vha, 0x802c,
2264 vha->host_no, fcport->d_id.b24, arg->lun);
2278 ql_log(ql_log_warn, vha, 0x802c,
2304 struct scsi_qla_host *vha = fcport->vha;
2311 a.vha = fcport->vha;
2326 a.qpair = vha->hw->base_qpair;
2341 struct scsi_qla_host *vha = fcport->vha;
2356 return qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
2363 qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
2371 ql_dbg(ql_dbg_disc, vha, 0x2118,
2375 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
2383 qla24xx_post_gpdb_work(vha, ea->fcport, 0);
2387 ql_dbg(ql_dbg_disc, vha, 0x2118,
2390 vha->hw->fc4_type_priority == FC4_PRIORITY_FCP ?
2404 if (N2N_TOPO(vha->hw)) {
2406 vha->hw->login_retry_count &&
2413 vha->hw->login_retry_count) {
2415 vha->relogin_jif = jiffies + 2 * HZ;
2420 set_bit(N2N_LINK_RESET, &vha->dpc_flags);
2421 qla2xxx_wake_dpc(vha);
2423 ql_log(ql_log_warn, vha, 0x2119,
2443 qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
2451 ql_dbg(ql_dbg_disc, vha, 0xffff,
2460 ql_dbg(ql_dbg_disc, vha, 0x20ea,
2468 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2474 ql_dbg(ql_dbg_disc, vha, 0x20d3,
2477 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2480 ql_dbg(ql_dbg_disc, vha, 0x20d3,
2498 if (vha->hw->flags.edif_enabled) {
2499 set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
2500 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
2501 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
2505 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
2507 qla24xx_post_gpdb_work(vha, ea->fcport, 0);
2509 if (NVME_TARGET(vha->hw, fcport)) {
2510 ql_dbg(ql_dbg_disc, vha, 0x2117,
2513 qla24xx_post_prli_work(vha, fcport);
2515 ql_dbg(ql_dbg_disc, vha, 0x20ea,
2520 set_bit(fcport->loop_id, vha->hw->loop_id_map);
2521 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
2522 fcport->chip_reset = vha->hw->base_qpair->chip_reset;
2526 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
2528 qla24xx_post_gpdb_work(vha, fcport, 0);
2533 ql_dbg(ql_dbg_disc, vha, 0x20eb, "%s %d %8phC cmd error %x\n",
2545 ql_dbg(ql_dbg_disc, vha, 0x20ec,
2550 set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
2552 qla24xx_post_gnl_work(vha, ea->fcport);
2556 qlt_find_sess_invalidate_other(vha,
2569 ql_dbg(ql_dbg_disc, vha, 0x20ed,
2574 ql_dbg(ql_dbg_disc, vha, 0x20ed,
2580 set_bit(lid, vha->hw->loop_id_map);
2596 qla83xx_nic_core_fw_load(scsi_qla_host_t *vha)
2599 struct qla_hw_data *ha = vha->hw;
2603 qla83xx_idc_lock(vha, 0);
2612 if (__qla83xx_set_drv_presence(vha) != QLA_SUCCESS) {
2613 ql_dbg(ql_dbg_p3p, vha, 0xb077,
2620 qla83xx_reset_ownership(vha);
2628 qla83xx_rd_reg(vha, QLA83XX_IDC_MAJOR_VERSION, &idc_major_ver);
2632 qla83xx_wr_reg(vha, QLA83XX_IDC_MAJOR_VERSION, idc_major_ver);
2635 qla83xx_wr_reg(vha, QLA83XX_IDC_LOCK_RECOVERY, 0);
2641 ql_log(ql_log_warn, vha, 0xb07d,
2644 __qla83xx_clear_drv_presence(vha);
2649 qla83xx_rd_reg(vha, QLA83XX_IDC_MINOR_VERSION, &idc_minor_ver);
2651 qla83xx_wr_reg(vha, QLA83XX_IDC_MINOR_VERSION, idc_minor_ver);
2655 if (!qla81xx_get_port_config(vha, config))
2656 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
2660 rval = qla83xx_idc_state_handler(vha);
2663 qla83xx_idc_unlock(vha, 0);
2679 qla2x00_initialize_adapter(scsi_qla_host_t *vha)
2682 struct qla_hw_data *ha = vha->hw;
2686 memset(&vha->qla_stats, 0, sizeof(vha->qla_stats));
2687 memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
2690 vha->flags.online = 0;
2692 vha->flags.reset_active = 0;
2695 vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
2696 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
2697 atomic_set(&vha->loop_state, LOOP_DOWN);
2698 vha->device_flags = DFLG_NO_CABLE;
2699 vha->dpc_flags = 0;
2700 vha->flags.management_server_logged_in = 0;
2701 vha->marker_needed = 0;
2708 ql_dbg(ql_dbg_init, vha, 0x0040,
2710 rval = ha->isp_ops->pci_config(vha);
2712 ql_log(ql_log_warn, vha, 0x0044,
2717 ha->isp_ops->reset_chip(vha);
2723 ql_log(ql_log_info, vha, 0xffff, "Secure Adapter: %s\n",
2728 rval = qla2xxx_get_flash_info(vha);
2730 ql_log(ql_log_fatal, vha, 0x004f,
2736 qla8044_read_reset_template(vha);
2743 qla8044_set_idc_dontreset(vha);
2746 ha->isp_ops->get_flash_version(vha, req->ring);
2747 ql_dbg(ql_dbg_init, vha, 0x0061,
2753 ha->isp_ops->nvram_config(vha);
2759 ql_log(ql_log_info, vha, 0xffff, "FC4 priority set to %s\n",
2764 ql_log(ql_log_info, vha, 0x0077,
2765 "Masking HBA WWPN %8phN (via NVRAM).\n", vha->port_name);
2769 ql_dbg(ql_dbg_init, vha, 0x0078,
2778 if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
2779 rval = ha->isp_ops->chip_diag(vha);
2782 rval = qla2x00_setup_chip(vha);
2788 ha->cs84xx = qla84xx_get_chip(vha);
2790 ql_log(ql_log_warn, vha, 0x00d0,
2796 if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha))
2797 rval = qla2x00_init_rings(vha);
2807 rval = qla84xx_init_chip(vha);
2809 ql_log(ql_log_warn, vha, 0x00d4,
2811 qla84xx_put_chip(vha);
2817 rval = qla83xx_nic_core_fw_load(vha);
2819 ql_log(ql_log_warn, vha, 0x0124,
2824 qla24xx_read_fcp_prio_cfg(vha);
2827 qla82xx_set_driver_version(vha, QLA2XXX_VERSION);
2829 qla25xx_set_driver_version(vha, QLA2XXX_VERSION);
2836 * @vha: HA context
2841 qla2100_pci_config(scsi_qla_host_t *vha)
2845 struct qla_hw_data *ha = vha->hw;
2867 * @vha: HA context
2872 qla2300_pci_config(scsi_qla_host_t *vha)
2877 struct qla_hw_data *ha = vha->hw;
2949 * @vha: HA context
2954 qla24xx_pci_config(scsi_qla_host_t *vha)
2958 struct qla_hw_data *ha = vha->hw;
2993 * @vha: HA context
2998 qla25xx_pci_config(scsi_qla_host_t *vha)
3001 struct qla_hw_data *ha = vha->hw;
3024 * @vha: HA context
3029 qla2x00_isp_firmware(scsi_qla_host_t *vha)
3034 struct qla_hw_data *ha = vha->hw;
3040 ql_log(ql_log_info, vha, 0x0079, "RISC CODE NOT loaded.\n");
3043 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address);
3046 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
3052 ql_dbg(ql_dbg_init, vha, 0x007a,
3060 * @vha: HA context
3065 qla2x00_reset_chip(scsi_qla_host_t *vha)
3068 struct qla_hw_data *ha = vha->hw;
3207 * @vha: HA context
3212 qla81xx_reset_mpi(scsi_qla_host_t *vha)
3216 if (!IS_QLA81XX(vha->hw))
3219 return qla81xx_write_mpi_register(vha, mb);
3223 qla_chk_risc_recovery(scsi_qla_host_t *vha)
3225 struct qla_hw_data *ha = vha->hw;
3246 ql_log(ql_log_warn, vha, 0x1015,
3249 ql_log(ql_log_warn, vha, 0x1015,
3253 ql_log(ql_log_warn, vha, 0x1015,
3257 ql_log(ql_log_warn, vha, 0x1015,
3267 * @vha: HA context
3272 qla24xx_reset_risc(scsi_qla_host_t *vha)
3275 struct qla_hw_data *ha = vha->hw;
3297 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017e,
3323 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017f,
3341 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015d,
3347 if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) {
3348 if (qla81xx_reset_mpi(vha) != QLA_SUCCESS) {
3350 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3351 set_bit(MPI_RESET_NEEDED, &vha->dpc_flags);
3358 vha->flags.online = 0;
3378 if (print && qla_chk_risc_recovery(vha))
3385 ql_log(ql_log_warn, vha, 0x015e,
3393 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015e,
3400 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015f,
3411 qla25xx_read_risc_sema_reg(scsi_qla_host_t *vha, uint32_t *data)
3413 struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
3420 qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha, uint32_t data)
3422 struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
3429 qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha)
3437 if (vha->hw->pdev->subsystem_device != 0x0175 &&
3438 vha->hw->pdev->subsystem_device != 0x0240)
3441 wrt_reg_dword(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE);
3448 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_SET);
3449 qla25xx_read_risc_sema_reg(vha, &wd32);
3464 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_CLR);
3468 qla25xx_read_risc_sema_reg(vha, &wd32);
3478 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_CLR);
3483 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_SET);
3491 * @vha: HA context
3496 qla24xx_reset_chip(scsi_qla_host_t *vha)
3498 struct qla_hw_data *ha = vha->hw;
3508 qla25xx_manipulate_risc_semaphore(vha);
3511 rval = qla24xx_reset_risc(vha);
3518 * @vha: HA context
3523 qla2x00_chip_diag(scsi_qla_host_t *vha)
3526 struct qla_hw_data *ha = vha->hw;
3537 ql_dbg(ql_dbg_init, vha, 0x007b, "Testing device at %p.\n",
3560 ql_dbg(ql_dbg_init, vha, 0x007c,
3582 ql_dbg(ql_dbg_init, vha, 0x007d, "Checking product ID of chip.\n");
3590 ql_log(ql_log_warn, vha, 0x0062,
3611 ql_dbg(ql_dbg_init, vha, 0x007e, "Found QLA2200A Chip.\n");
3620 ql_dbg(ql_dbg_init, vha, 0x007f, "Checking mailboxes.\n");
3621 rval = qla2x00_mbx_reg_test(vha);
3623 ql_log(ql_log_warn, vha, 0x0080,
3632 ql_log(ql_log_info, vha, 0x0081,
3642 * @vha: HA context
3647 qla24xx_chip_diag(scsi_qla_host_t *vha)
3650 struct qla_hw_data *ha = vha->hw;
3658 rval = qla2x00_mbx_reg_test(vha);
3660 ql_log(ql_log_warn, vha, 0x0082,
3671 qla2x00_init_fce_trace(scsi_qla_host_t *vha)
3676 struct qla_hw_data *ha = vha->hw;
3686 ql_dbg(ql_dbg_init, vha, 0x00bd,
3696 ql_log(ql_log_warn, vha, 0x00be,
3702 rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
3705 ql_log(ql_log_warn, vha, 0x00bf,
3711 ql_dbg(ql_dbg_init, vha, 0x00c0,
3720 qla2x00_init_eft_trace(scsi_qla_host_t *vha)
3725 struct qla_hw_data *ha = vha->hw;
3731 ql_dbg(ql_dbg_init, vha, 0x00bd,
3741 ql_log(ql_log_warn, vha, 0x00c1,
3747 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
3749 ql_log(ql_log_warn, vha, 0x00c2,
3755 ql_dbg(ql_dbg_init, vha, 0x00c3,
3763 qla2x00_alloc_offload_mem(scsi_qla_host_t *vha)
3765 qla2x00_init_fce_trace(vha);
3766 qla2x00_init_eft_trace(vha);
3770 qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
3774 struct qla_hw_data *ha = vha->hw;
3780 ql_dbg(ql_dbg_init, vha, 0x00bd,
3823 qla2x00_init_fce_trace(vha);
3826 qla2x00_init_eft_trace(vha);
3837 ql_dbg(ql_dbg_init, vha, 0x00ba,
3841 ql_dbg(ql_dbg_init, vha, 0x00fa,
3844 vha, fwdt->template);
3845 ql_dbg(ql_dbg_init, vha, 0x00fa,
3870 ql_dbg(ql_dbg_init, vha, 0x00c5,
3877 ql_log(ql_log_warn, vha, 0x00c4,
3887 ql_dbg(ql_dbg_init, vha, 0x00c5,
3896 ql_dbg(ql_dbg_init, vha, 0x00c5,
3934 qla81xx_mpi_sync(scsi_qla_host_t *vha)
3941 if (!IS_QLA81XX(vha->hw))
3944 rval = qla2x00_write_ram_word(vha, 0x7c00, 1);
3946 ql_log(ql_log_warn, vha, 0x0105,
3951 pci_read_config_word(vha->hw->pdev, 0x54, &dc);
3952 rval = qla2x00_read_ram_word(vha, 0x7a15, &dw);
3954 ql_log(ql_log_warn, vha, 0x0067, "Unable to read sync.\n");
3964 rval = qla2x00_write_ram_word(vha, 0x7a15, dw);
3966 ql_log(ql_log_warn, vha, 0x0114, "Unable to gain sync.\n");
3970 rval = qla2x00_write_ram_word(vha, 0x7c00, 0);
3972 ql_log(ql_log_warn, vha, 0x006d,
4036 static void qla2xxx_print_sfp_info(struct scsi_qla_host *vha)
4039 struct sff_8247_a0 *a0 = (struct sff_8247_a0 *)vha->hw->sfp_data;
4045 ql_dbg(ql_dbg_init, vha, 0x015a,
4050 ql_dbg(ql_dbg_init, vha, 0x015c,
4065 ql_dbg(ql_dbg_init, vha, 0x0160,
4078 ql_dbg(ql_dbg_init, vha, 0x0196,
4090 ql_dbg(ql_dbg_init, vha, 0x016e,
4094 ql_dbg(ql_dbg_init, vha, 0x016f,
4097 ql_dbg(ql_dbg_init, vha, 0x0170,
4100 ql_dbg(ql_dbg_init, vha, 0x0189,
4103 ql_dbg(ql_dbg_init, vha, 0x018a,
4106 ql_dbg(ql_dbg_init, vha, 0x0194,
4109 ql_dbg(ql_dbg_init, vha, 0x0195,
4117 * @vha: adapter state pointer.
4129 qla24xx_detect_sfp(scsi_qla_host_t *vha)
4133 struct qla_hw_data *ha = vha->hw;
4152 if (!IS_BPM_ENABLED(vha))
4155 rc = qla2x00_read_sfp_dev(vha, NULL, 0);
4160 a = (struct sff_8247_a0 *)vha->hw->sfp_data;
4161 qla2xxx_print_sfp_info(vha);
4176 ql_dbg(ql_dbg_async, vha, 0x507b,
4189 struct qla_hw_data *ha = qpair->vha->hw;
4203 void qla_init_iocb_limit(scsi_qla_host_t *vha)
4206 struct qla_hw_data *ha = vha->hw;
4229 void qla_adjust_iocb_limit(scsi_qla_host_t *vha)
4232 struct qla_hw_data *ha = vha->hw;
4244 * @vha: HA context
4249 qla2x00_setup_chip(scsi_qla_host_t *vha)
4253 struct qla_hw_data *ha = vha->hw;
4260 rval = ha->isp_ops->load_risc(vha, &srisc_address);
4262 qla2x00_stop_firmware(vha);
4276 qla81xx_mpi_sync(vha);
4280 rval = ha->isp_ops->load_risc(vha, &srisc_address);
4282 ql_dbg(ql_dbg_init, vha, 0x00c9,
4285 rval = qla2x00_verify_checksum(vha, srisc_address);
4288 ql_dbg(ql_dbg_init, vha, 0x00ca,
4294 if (qla_is_exch_offld_enabled(vha))
4297 rval = qla2x00_execute_fw(vha, srisc_address);
4301 if (!done_once++ && qla24xx_detect_sfp(vha)) {
4302 ql_dbg(ql_dbg_init, vha, 0x00ca,
4305 ha->isp_ops->reset_chip(vha);
4306 ha->isp_ops->chip_diag(vha);
4311 qla27xx_set_zio_threshold(vha,
4314 rval = qla2x00_set_exlogins_buffer(vha);
4318 rval = qla2x00_set_exchoffld_buffer(vha);
4325 qla82xx_check_md_needed(vha);
4327 rval = qla2x00_get_fw_version(vha);
4340 qla2x00_get_resource_cnts(vha);
4341 qla_init_iocb_limit(vha);
4348 vha->req);
4353 qla2x00_alloc_offload_mem(vha);
4356 qla2x00_alloc_fw_dump(vha);
4362 ql_log(ql_log_fatal, vha, 0x00cd,
4370 qla25xx_set_els_cmds_supported(vha);
4392 rval = qla81xx_fac_get_sector_size(vha, &size);
4397 ql_log(ql_log_warn, vha, 0x00ce,
4410 ql_log(ql_log_fatal, vha, 0x00cf,
4444 * @vha: HA context
4449 qla2x00_update_fw_options(scsi_qla_host_t *vha)
4452 struct qla_hw_data *ha = vha->hw;
4455 qla2x00_get_fw_options(vha, ha->fw_options);
4461 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0115,
4463 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0109,
4522 ql_dbg(ql_dbg_disc, vha, 0x2100,
4528 qla2x00_set_fw_options(vha, ha->fw_options);
4532 qla24xx_update_fw_options(scsi_qla_host_t *vha)
4535 struct qla_hw_data *ha = vha->hw;
4547 ql_dbg(ql_dbg_disc, vha, 0x2101,
4555 if (qla_tgt_mode_enabled(vha) ||
4556 qla_dual_mode_enabled(vha))
4568 if (qla_tgt_mode_enabled(vha) ||
4569 qla_dual_mode_enabled(vha))
4585 DBELL_ACTIVE(vha)) {
4602 ql_dbg(ql_dbg_init, vha, 0x00e8,
4605 ha->fw_options[3], vha->host->active_mode);
4608 qla2x00_set_fw_options(vha, ha->fw_options);
4614 rval = qla2x00_set_serdes_params(vha,
4619 ql_log(ql_log_warn, vha, 0x0104,
4625 qla2x00_config_rings(struct scsi_qla_host *vha)
4627 struct qla_hw_data *ha = vha->hw;
4648 qla24xx_config_rings(struct scsi_qla_host *vha)
4650 struct qla_hw_data *ha = vha->hw;
4682 ql_dbg(ql_dbg_init, vha, 0x0019,
4699 ql_dbg(ql_dbg_init, vha, 0x00fe,
4717 qlt_24xx_config_rings(vha);
4721 ql_dbg(ql_dbg_init, vha, 0x00fd,
4733 * @vha: HA context
4741 qla2x00_init_rings(scsi_qla_host_t *vha)
4746 struct qla_hw_data *ha = vha->hw;
4788 qlt_init_atio_q_entries(vha);
4790 ha->isp_ops->config_rings(vha);
4795 rval = qlafx00_init_firmware(vha, ha->init_cb_size);
4800 ha->isp_ops->update_fw_options(vha);
4802 ql_dbg(ql_dbg_init, vha, 0x00d1,
4821 ql_dbg(ql_dbg_init, vha, 0x0191, "DPORT Support: %s.\n",
4827 ql_dbg(ql_dbg_init, vha, 0x00bc, "FA-WWPN Support: %s.\n",
4838 rval = qla2x00_init_firmware(vha, ha->init_cb_size);
4842 ql_log(ql_log_fatal, vha, 0x00d2,
4845 ql_dbg(ql_dbg_init, vha, 0x00d3,
4847 vha->u_ql2xexchoffld = vha->u_ql2xiniexchg = 0;
4855 * @vha: HA context
4860 qla2x00_fw_ready(scsi_qla_host_t *vha)
4867 struct qla_hw_data *ha = vha->hw;
4869 if (IS_QLAFX00(vha->hw))
4870 return qlafx00_fw_ready(vha);
4893 if (!vha->flags.init_done)
4894 ql_log(ql_log_info, vha, 0x801e,
4899 rval = qla2x00_get_firmware_state(vha, state);
4902 vha->device_flags &= ~DFLG_NO_CABLE;
4905 ql_dbg(ql_dbg_taskm, vha, 0x801f,
4910 ql_dbg(ql_dbg_taskm, vha, 0x8028,
4914 rval = qla84xx_init_chip(vha);
4917 vha, 0x8007,
4926 ql_dbg(ql_dbg_taskm, vha, 0x8008,
4932 ql_dbg(ql_dbg_taskm, vha, 0x8037,
4935 qla2x00_get_retry_cnt(vha, &ha->retry_count,
4944 if (atomic_read(&vha->loop_down_timer) &&
4950 ql_log(ql_log_info, vha, 0x8038,
4953 vha->device_flags |= DFLG_NO_CABLE;
4971 ql_dbg(ql_dbg_taskm, vha, 0x803a,
4975 if (rval && !(vha->device_flags & DFLG_NO_CABLE)) {
4976 ql_log(ql_log_warn, vha, 0x803b,
4997 qla2x00_configure_hba(scsi_qla_host_t *vha)
5007 struct qla_hw_data *ha = vha->hw;
5013 rval = qla2x00_get_adapter_id(vha,
5016 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
5019 ql_dbg(ql_dbg_disc, vha, 0x2008,
5022 ql_log(ql_log_warn, vha, 0x2009,
5024 if (IS_FWI2_CAPABLE(ha) && (vha == base_vha) &&
5026 ql_log(ql_log_warn, vha, 0x1151,
5028 if (qla24xx_link_initialize(vha) == QLA_SUCCESS)
5031 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
5037 ql_log(ql_log_info, vha, 0x200a,
5042 vha->loop_id = loop_id;
5050 ql_dbg(ql_dbg_disc, vha, 0x200b, "HBA in NL topology.\n");
5057 ql_dbg(ql_dbg_disc, vha, 0x200c, "HBA in FL topology.\n");
5064 ql_dbg(ql_dbg_disc, vha, 0x200d, "HBA in N P2P topology.\n");
5072 ql_dbg(ql_dbg_disc, vha, 0x200e, "HBA in F P2P topology.\n");
5080 ql_dbg(ql_dbg_disc, vha, 0x200f,
5095 if (vha->hw->flags.edif_enabled) {
5097 qla_update_host_map(vha, id);
5099 qla_update_host_map(vha, id);
5102 if (!vha->flags.init_done)
5103 ql_log(ql_log_info, vha, 0x2010,
5105 connect_type, vha->loop_id);
5111 qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
5117 struct qla_hw_data *ha = vha->hw;
5157 qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc,
5164 static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv)
5167 struct qla_hw_data *ha = vha->hw;
5197 qla2x00_nvram_config(scsi_qla_host_t *vha)
5203 struct qla_hw_data *ha = vha->hw;
5219 ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size);
5223 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010f,
5225 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0110,
5232 ql_log(ql_log_warn, vha, 0x0064,
5235 ql_log(ql_log_warn, vha, 0x0065,
5273 qla2xxx_nvram_wwn_from_ofw(vha, nv);
5313 qla2x00_set_model_info(vha, nv->model_number,
5394 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
5395 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
5462 vha->flags.process_response_queue = 1;
5465 if (!vha->flags.init_done) {
5473 vha->flags.process_response_queue = 0;
5477 ql_log(ql_log_info, vha, 0x0068,
5483 vha->flags.process_response_queue = 1;
5488 ql_log(ql_log_warn, vha, 0x0069,
5503 ql_dbg(ql_dbg_disc, fcport->vha, 0x207d,
5513 * @vha: HA context
5519 qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
5527 fcport->ct_desc.ct_sns = dma_alloc_coherent(&vha->hw->pdev->dev,
5531 ql_log(ql_log_warn, vha, 0xd049,
5538 fcport->vha = vha;
5548 fcport->login_retry = vha->hw->login_retry_count;
5549 fcport->chip_reset = vha->hw->base_qpair->chip_reset;
5556 ql_log(ql_log_warn, vha, 0xd049,
5586 dma_free_coherent(&fcport->vha->hw->pdev->dev,
5602 static void qla_get_login_template(scsi_qla_host_t *vha)
5604 struct qla_hw_data *ha = vha->hw;
5611 rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma,
5614 ql_dbg(ql_dbg_init, vha, 0x00d1,
5638 qla2x00_configure_loop(scsi_qla_host_t *vha)
5642 struct qla_hw_data *ha = vha->hw;
5647 if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) {
5648 rval = qla2x00_configure_hba(vha);
5650 ql_dbg(ql_dbg_disc, vha, 0x2013,
5656 save_flags = flags = vha->dpc_flags;
5657 ql_dbg(ql_dbg_disc, vha, 0x2014,
5664 clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5665 clear_bit(RSCN_UPDATE, &vha->dpc_flags);
5667 qla2x00_get_data_rate(vha);
5668 qla_get_login_template(vha);
5682 } else if (!vha->flags.online ||
5689 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
5690 ql_dbg(ql_dbg_disc, vha, 0x2015,
5694 rval = qla2x00_configure_local_loop(vha);
5698 if (LOOP_TRANSITION(vha)) {
5699 ql_dbg(ql_dbg_disc, vha, 0x2099,
5704 rval = qla2x00_configure_fabric(vha);
5708 if (atomic_read(&vha->loop_down_timer) ||
5709 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
5712 atomic_set(&vha->loop_state, LOOP_READY);
5713 ql_dbg(ql_dbg_disc, vha, 0x2069,
5721 if (ha->flags.edif_enabled && DBELL_INACTIVE(vha))
5722 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP,
5729 if (qla_tgt_mode_enabled(vha) ||
5730 qla_dual_mode_enabled(vha)) {
5732 qlt_24xx_process_atio_queue(vha, 0);
5740 ql_dbg(ql_dbg_disc, vha, 0x206a,
5743 ql_dbg(ql_dbg_disc, vha, 0x206b,
5745 __func__, vha->port_name, vha->d_id.b24);
5749 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
5751 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5753 set_bit(RSCN_UPDATE, &vha->dpc_flags);
5760 static int qla2x00_configure_n2n_loop(scsi_qla_host_t *vha)
5765 ql_dbg(ql_dbg_disc, vha, 0x206a, "%s %d.\n", __func__, __LINE__);
5767 if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags))
5768 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
5770 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5772 qla24xx_fcport_handle_login(vha, fcport);
5777 spin_lock_irqsave(&vha->work_lock, flags);
5778 vha->scan.scan_retry++;
5779 spin_unlock_irqrestore(&vha->work_lock, flags);
5781 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
5782 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5783 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5789 qla_reinitialize_link(scsi_qla_host_t *vha)
5793 atomic_set(&vha->loop_state, LOOP_DOWN);
5794 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
5795 rval = qla2x00_full_login_lip(vha);
5797 ql_dbg(ql_dbg_disc, vha, 0xd050, "Link reinitialized\n");
5799 ql_dbg(ql_dbg_disc, vha, 0xd051,
5815 qla2x00_configure_local_loop(scsi_qla_host_t *vha)
5825 struct qla_hw_data *ha = vha->hw;
5830 return qla2x00_configure_n2n_loop(vha);
5837 rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
5842 ql_dbg(ql_dbg_disc, vha, 0x2011,
5844 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075,
5848 spin_lock_irqsave(&vha->work_lock, flags);
5849 vha->scan.scan_retry++;
5850 spin_unlock_irqrestore(&vha->work_lock, flags);
5852 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
5856 rc = qla2x00_get_fcal_position_map(vha, NULL,
5863 qla_reinitialize_link(vha);
5866 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5867 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5870 vha->scan.scan_retry = 0;
5873 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5878 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
5880 ql_log(ql_log_warn, vha, 0x2012,
5904 if (area && domain && ((area != vha->d_id.b.area) ||
5905 (domain != vha->d_id.b.domain)) &&
5923 rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
5925 ql_dbg(ql_dbg_disc, vha, 0x2097,
5931 ql_dbg(ql_dbg_disc, vha, 0x2105,
5933 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5938 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
5942 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5955 fcport->login_retry = vha->hw->login_retry_count;
5956 ql_dbg(ql_dbg_disc, vha, 0x2135,
5967 list_add_tail(&new_fcport->list, &vha->vp_fcports);
5972 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
5974 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
5977 ql_log(ql_log_warn, vha, 0xd031,
5982 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
5986 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
5992 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5993 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5997 if ((qla_dual_mode_enabled(vha) ||
5998 qla_ini_mode_enabled(vha)) &&
6000 qla2x00_mark_device_lost(vha, fcport,
6006 ql_dbg(ql_dbg_disc, vha, 0x20f0,
6018 qla24xx_fcport_handle_login(vha, fcport);
6026 ql_dbg(ql_dbg_disc, vha, 0x2098,
6032 qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
6036 struct qla_hw_data *ha = vha->hw;
6049 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
6052 ql_dbg(ql_dbg_disc, vha, 0x2004,
6056 ql_dbg(ql_dbg_disc, vha, 0x2005,
6063 void qla_do_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport)
6065 qla2x00_iidma_fcport(vha, fcport);
6066 qla24xx_update_fcport_fcp_prio(vha, fcport);
6069 int qla_post_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport)
6073 e = qla2x00_alloc_work(vha, QLA_EVT_IIDMA);
6078 return qla2x00_post_work(vha, e);
6083 qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
6097 fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids);
6099 ql_log(ql_log_warn, vha, 0x2006,
6104 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
6106 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
6125 ql_dbg(ql_dbg_disc, vha, 0x20ee,
6127 __func__, fcport->port_name, vha->host_no,
6149 qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
6156 ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n",
6160 fcport->login_retry = vha->hw->login_retry_count;
6163 spin_lock_irqsave(&vha->work_lock, flags);
6165 spin_unlock_irqrestore(&vha->work_lock, flags);
6167 if (vha->hw->current_topology == ISP_CFG_NL)
6178 switch (vha->hw->current_topology) {
6187 qla2x00_iidma_fcport(vha, fcport);
6189 qla2x00_dfs_create_rport(vha, fcport);
6191 qla24xx_update_fcport_fcp_prio(vha, fcport);
6193 switch (vha->host->active_mode) {
6195 qla2x00_reg_remote_port(vha, fcport);
6198 if (!vha->vha_tgt.qla_tgt->tgt_stop &&
6199 !vha->vha_tgt.qla_tgt->tgt_stopped)
6200 qlt_fc_port_added(vha, fcport);
6203 qla2x00_reg_remote_port(vha, fcport);
6204 if (!vha->vha_tgt.qla_tgt->tgt_stop &&
6205 !vha->vha_tgt.qla_tgt->tgt_stopped)
6206 qlt_fc_port_added(vha, fcport);
6212 if (NVME_TARGET(vha->hw, fcport))
6213 qla_nvme_register_remote(vha, fcport);
6217 if (IS_IIDMA_CAPABLE(vha->hw) && vha->hw->flags.gpsc_supported) {
6220 ql_dbg(ql_dbg_disc, vha, 0x20d7,
6223 vha->fcport_count);
6224 qla24xx_post_gfpnid_work(vha, fcport);
6226 ql_dbg(ql_dbg_disc, vha, 0x20d7,
6229 vha->fcport_count);
6230 qla24xx_post_gpsc_work(vha, fcport);
6246 qla2x00_update_fcport(fcport->vha, fcport);
6248 ql_dbg(ql_dbg_disc, fcport->vha, 0x911e,
6260 qla2x00_post_async_adisc_work(fcport->vha, fcport,
6281 qla2x00_configure_fabric(scsi_qla_host_t *vha)
6287 struct qla_hw_data *ha = vha->hw;
6295 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1);
6297 ql_dbg(ql_dbg_disc, vha, 0x20a0,
6300 vha->device_flags &= ~SWITCH_FOUND;
6303 vha->device_flags |= SWITCH_FOUND;
6305 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_port_name, 0);
6307 ql_dbg(ql_dbg_disc, vha, 0x20ff,
6310 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
6311 rval = qla2x00_send_change_request(vha, 0x3, 0);
6313 ql_log(ql_log_warn, vha, 0x121,
6319 qla2x00_mgmt_svr_login(vha);
6323 rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
6326 ql_dbg(ql_dbg_disc, vha, 0x20a1,
6329 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
6335 test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags))
6336 qla2x00_fdmi_register(vha);
6338 if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
6339 if (qla2x00_rft_id(vha)) {
6341 ql_dbg(ql_dbg_disc, vha, 0x20a2,
6344 &vha->dpc_flags))
6347 if (qla2x00_rff_id(vha, FC4_TYPE_FCP_SCSI)) {
6349 ql_dbg(ql_dbg_disc, vha, 0x209a,
6352 &vha->dpc_flags))
6355 if (vha->flags.nvme_enabled) {
6356 if (qla2x00_rff_id(vha, FC_TYPE_NVME)) {
6357 ql_dbg(ql_dbg_disc, vha, 0x2049,
6361 if (qla2x00_rnn_id(vha)) {
6363 ql_dbg(ql_dbg_disc, vha, 0x2104,
6366 &vha->dpc_flags))
6368 } else if (qla2x00_rsnn_nn(vha)) {
6370 ql_dbg(ql_dbg_disc, vha, 0x209b,
6372 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
6384 qlt_do_generation_tick(vha, &discovery_gen);
6387 rval = qla24xx_async_gpnft(vha, FC4_TYPE_FCP_SCSI,
6390 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
6392 list_for_each_entry(fcport, &vha->vp_fcports, list)
6395 rval = qla2x00_find_all_fabric_devs(vha);
6401 if (!vha->nvme_local_port && vha->flags.nvme_enabled)
6402 qla_nvme_register_hba(vha);
6405 ql_dbg(ql_dbg_disc, vha, 0x2068,
6425 qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
6436 struct qla_hw_data *ha = vha->hw;
6449 ql_dbg(ql_dbg_disc, vha, 0x209c,
6453 if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
6455 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
6457 } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) {
6459 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
6461 } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) {
6463 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
6465 } else if (qla2x00_gfpn_id(vha, swl) != QLA_SUCCESS) {
6467 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
6473 qla2x00_gff_id(vha, swl);
6474 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
6481 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
6483 ql_log(ql_log_warn, vha, 0x209d,
6495 if (qla2x00_is_reserved_id(vha, loop_id))
6499 (atomic_read(&vha->loop_down_timer) ||
6500 LOOP_TRANSITION(vha))) {
6501 atomic_set(&vha->loop_down_timer, 0);
6502 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
6503 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
6522 if (vha->flags.nvme_enabled &&
6524 ql_log(ql_log_info, vha, 0x2131,
6536 rval = qla2x00_ga_nxt(vha, new_fcport);
6538 ql_log(ql_log_warn, vha, 0x209e,
6551 ql_dbg(ql_dbg_disc, vha, 0x209f,
6564 if (qla2x00_is_a_vp_did(vha, new_fcport->d_id.b24))
6569 (vha->d_id.b24 & 0xffff00)) && ha->current_topology ==
6583 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
6587 list_for_each_entry(fcport, &vha->vp_fcports, list) {
6607 (vha->host->active_mode == MODE_TARGET))) {
6613 vha->hw->login_retry_count;
6631 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080,
6651 if (found && NVME_TARGET(vha->hw, fcport)) {
6654 vha->fcport_count--;
6660 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
6665 list_add_tail(&new_fcport->list, &vha->vp_fcports);
6667 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
6672 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
6674 ql_log(ql_log_warn, vha, 0xd032,
6687 list_for_each_entry(fcport, &vha->vp_fcports, list) {
6688 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
6695 if ((qla_dual_mode_enabled(vha) ||
6696 qla_ini_mode_enabled(vha)) &&
6698 qla2x00_mark_device_lost(vha, fcport,
6704 ql_dbg(ql_dbg_disc, vha, 0x20f0,
6716 qla24xx_fcport_handle_login(vha, fcport);
6723 qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t *vha)
6726 int lid = NPH_MGMT_SERVER - vha->vp_idx;
6728 struct qla_hw_data *ha = vha->hw;
6730 if (vha->vp_idx == 0) {
6738 if (!test_bit(lid, vha->hw->loop_id_map)) {
6739 set_bit(lid, vha->hw->loop_id_map);
6764 qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
6771 struct qla_hw_data *ha = vha->hw;
6777 ql_dbg(ql_dbg_disc, vha, 0x2000,
6784 rval = ha->isp_ops->fabric_login(vha, fcport->loop_id,
6802 ql_dbg(ql_dbg_disc, vha, 0x2001,
6850 rval = qla2x00_find_new_loop_id(vha, fcport);
6862 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
6865 qla2x00_mark_device_lost(vha, fcport, 1);
6873 ql_dbg(ql_dbg_disc, vha, 0x2002,
6880 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
6908 qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport)
6914 rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0);
6938 qla2x00_loop_resync(scsi_qla_host_t *vha)
6943 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
6944 if (vha->flags.online) {
6945 if (!(rval = qla2x00_fw_ready(vha))) {
6949 if (!IS_QLAFX00(vha->hw)) {
6954 qla2x00_marker(vha, vha->hw->base_qpair,
6956 vha->marker_needed = 0;
6960 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
6962 if (IS_QLAFX00(vha->hw))
6963 qlafx00_configure_devices(vha);
6965 qla2x00_configure_loop(vha);
6968 } while (!atomic_read(&vha->loop_down_timer) &&
6969 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
6971 &vha->dpc_flags)));
6975 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
6979 ql_dbg(ql_dbg_disc, vha, 0x206c,
7018 qla83xx_reset_ownership(scsi_qla_host_t *vha)
7020 struct qla_hw_data *ha = vha->hw;
7027 drv_presence = qla8044_rd_direct(vha,
7029 dev_part_info1 = qla8044_rd_direct(vha,
7031 dev_part_info2 = qla8044_rd_direct(vha,
7034 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
7035 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1);
7036 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2);
7070 ql_dbg(ql_dbg_p3p, vha, 0xb07f,
7077 __qla83xx_set_drv_ack(scsi_qla_host_t *vha)
7080 struct qla_hw_data *ha = vha->hw;
7083 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
7086 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
7093 __qla83xx_clear_drv_ack(scsi_qla_host_t *vha)
7096 struct qla_hw_data *ha = vha->hw;
7099 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
7102 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
7110 qla83xx_idc_audit(scsi_qla_host_t *vha, int audit_type)
7112 struct qla_hw_data *ha = vha->hw;
7120 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
7128 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
7132 ql_log(ql_log_warn, vha, 0xb078,
7140 qla83xx_initiating_reset(scsi_qla_host_t *vha)
7142 struct qla_hw_data *ha = vha->hw;
7145 __qla83xx_get_idc_control(vha, &idc_control);
7147 ql_log(ql_log_info, vha, 0xb080,
7154 qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
7156 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
7158 ql_log(ql_log_info, vha, 0xb056, "HW State: NEED RESET.\n");
7159 qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
7161 ql_log(ql_log_info, vha, 0xb057, "HW State: %s.\n",
7167 qla83xx_idc_unlock(vha, 0);
7169 qla83xx_idc_lock(vha, 0);
7170 qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
7175 __qla83xx_set_drv_ack(vha);
7181 __qla83xx_set_idc_control(scsi_qla_host_t *vha, uint32_t idc_control)
7183 return qla83xx_wr_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
7187 __qla83xx_get_idc_control(scsi_qla_host_t *vha, uint32_t *idc_control)
7189 return qla83xx_rd_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
7193 qla83xx_check_driver_presence(scsi_qla_host_t *vha)
7196 struct qla_hw_data *ha = vha->hw;
7198 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
7206 qla83xx_nic_core_reset(scsi_qla_host_t *vha)
7209 struct qla_hw_data *ha = vha->hw;
7211 ql_dbg(ql_dbg_p3p, vha, 0xb058,
7214 if (vha->device_flags & DFLG_DEV_FAILED) {
7215 ql_log(ql_log_warn, vha, 0xb059,
7220 qla83xx_idc_lock(vha, 0);
7222 if (qla83xx_check_driver_presence(vha) != QLA_SUCCESS) {
7223 ql_log(ql_log_warn, vha, 0xb05a,
7230 qla83xx_reset_ownership(vha);
7232 rval = qla83xx_initiating_reset(vha);
7239 rval = qla83xx_idc_state_handler(vha);
7243 __qla83xx_clear_drv_ack(vha);
7247 qla83xx_idc_unlock(vha, 0);
7249 ql_dbg(ql_dbg_p3p, vha, 0xb05b, "Exiting %s.\n", __func__);
7255 qla2xxx_mctp_dump(scsi_qla_host_t *vha)
7257 struct qla_hw_data *ha = vha->hw;
7262 ql_log(ql_log_info, vha, 0x506d,
7272 ql_log(ql_log_warn, vha, 0x506e,
7279 rval = qla2x00_dump_mctp_data(vha, ha->mctp_dump_dma,
7282 ql_log(ql_log_warn, vha, 0x506f,
7285 ql_log(ql_log_info, vha, 0x5070,
7287 vha->host_no, ha->mctp_dump);
7293 rval = qla83xx_restart_nic_firmware(vha);
7296 ql_log(ql_log_warn, vha, 0x5071,
7299 ql_dbg(ql_dbg_p3p, vha, 0xb084,
7317 qla2x00_quiesce_io(scsi_qla_host_t *vha)
7319 struct qla_hw_data *ha = vha->hw;
7323 ql_dbg(ql_dbg_dpc, vha, 0x401d,
7327 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
7328 atomic_set(&vha->loop_state, LOOP_DOWN);
7329 qla2x00_mark_all_devices_lost(vha);
7343 if (!atomic_read(&vha->loop_down_timer))
7344 atomic_set(&vha->loop_down_timer,
7348 WARN_ON_ONCE(qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST)
7353 qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
7355 struct qla_hw_data *ha = vha->hw;
7365 vha->flags.online = 0;
7367 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
7368 vha->qla_stats.total_isp_aborts++;
7370 ql_log(ql_log_info, vha, 0x00af,
7379 ha->isp_ops->reset_chip(vha);
7421 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
7422 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
7423 atomic_set(&vha->loop_state, LOOP_DOWN);
7424 qla2x00_mark_all_devices_lost(vha);
7438 if (!atomic_read(&vha->loop_down_timer))
7439 atomic_set(&vha->loop_down_timer,
7444 list_for_each_entry(fcport, &vha->vp_fcports, list) {
7463 qla82xx_chip_reset_cleanup(vha);
7464 ql_log(ql_log_info, vha, 0x00b4,
7468 vha->flags.online = 0;
7472 qla2x00_abort_all_cmds(vha, DID_RESET << 16);
7488 qla2x00_abort_isp(scsi_qla_host_t *vha)
7492 struct qla_hw_data *ha = vha->hw;
7497 if (vha->flags.online) {
7498 qla2x00_abort_isp_cleanup(vha);
7500 vha->dport_status |= DPORT_DIAG_CHIP_RESET_IN_PROGRESS;
7501 vha->dport_status &= ~DPORT_DIAG_IN_PROGRESS;
7503 if (vha->hw->flags.port_isolated)
7507 ql_log(ql_log_info, vha, 0x803f,
7512 if (test_and_clear_bit(ISP_ABORT_TO_ROM, &vha->dpc_flags)) {
7514 vha->flags.online = 1;
7516 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
7521 ql_dbg(ql_dbg_p3p, vha, 0xb05c,
7523 if (qla83xx_clear_drv_presence(vha) != QLA_SUCCESS)
7524 ql_dbg(ql_dbg_p3p, vha, 0xb073,
7530 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
7535 switch (vha->qlini_mode) {
7537 if (!qla_tgt_mode_enabled(vha))
7541 if (!qla_dual_mode_enabled(vha) &&
7542 !qla_ini_mode_enabled(vha))
7550 ha->isp_ops->get_flash_version(vha, req->ring);
7553 ql_log(ql_log_info, vha, 0x803f,
7557 ha->isp_ops->nvram_config(vha);
7560 ql_log(ql_log_info, vha, 0x803f,
7564 if (!qla2x00_restart_isp(vha)) {
7565 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
7567 if (!atomic_read(&vha->loop_down_timer)) {
7572 vha->marker_needed = 1;
7575 vha->flags.online = 1;
7580 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
7583 qla2x00_get_fw_version(vha);
7588 rval = qla2x00_enable_fce_trace(vha,
7592 ql_log(ql_log_warn, vha, 0x8033,
7601 rval = qla2x00_enable_eft_trace(vha,
7604 ql_log(ql_log_warn, vha, 0x8034,
7610 vha->flags.online = 1;
7611 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
7613 ql_log(ql_log_fatal, vha, 0x8035,
7620 qla2x00_abort_isp_cleanup(vha);
7621 vha->flags.online = 0;
7623 &vha->dpc_flags);
7627 ql_dbg(ql_dbg_taskm, vha, 0x8020,
7634 ql_dbg(ql_dbg_taskm, vha, 0x8021,
7637 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
7644 if (vha->hw->flags.port_isolated) {
7645 qla2x00_abort_isp_cleanup(vha);
7650 ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__);
7651 qla2x00_configure_hba(vha);
7667 ql_dbg(ql_dbg_p3p, vha, 0xb05d,
7669 if (qla83xx_set_drv_presence(vha) != QLA_SUCCESS)
7670 ql_dbg(ql_dbg_p3p, vha, 0xb074,
7674 ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n",
7692 qla2x00_restart_isp(scsi_qla_host_t *vha)
7695 struct qla_hw_data *ha = vha->hw;
7698 if (qla2x00_isp_firmware(vha)) {
7699 vha->flags.online = 0;
7700 status = ha->isp_ops->chip_diag(vha);
7703 status = qla2x00_setup_chip(vha);
7708 status = qla2x00_init_rings(vha);
7712 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
7718 status = qla2x00_fw_ready(vha);
7721 return vha->device_flags & DFLG_NO_CABLE ? 0 : status;
7725 qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL);
7726 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
7782 qla2x00_reset_adapter(scsi_qla_host_t *vha)
7785 struct qla_hw_data *ha = vha->hw;
7788 vha->flags.online = 0;
7802 qla24xx_reset_adapter(scsi_qla_host_t *vha)
7805 struct qla_hw_data *ha = vha->hw;
7811 vha->flags.online = 0;
7830 static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha,
7834 struct qla_hw_data *ha = vha->hw;
7851 qla24xx_nvram_config(scsi_qla_host_t *vha)
7860 struct qla_hw_data *ha = vha->hw;
7880 ha->isp_ops->read_nvram(vha, ha->vpd,
7885 ha->isp_ops->read_nvram(vha, dptr, ha->nvram_base, ha->nvram_size);
7889 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a,
7891 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010d,
7898 ql_log(ql_log_warn, vha, 0x006b,
7901 ql_dump_buffer(ql_dbg_init, vha, 0x006b, nv, sizeof(*nv));
7902 ql_log(ql_log_warn, vha, 0x006c,
7932 qla24xx_nvram_wwn_from_ofw(vha, nv);
7951 if (qla_tgt_mode_enabled(vha)) {
7958 qlt_24xx_config_nvram_stage1(vha, nv);
7984 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
7987 qlt_24xx_config_nvram_stage2(vha, icb);
8025 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
8026 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
8083 if (!vha->flags.init_done) {
8094 ql_log(ql_log_info, vha, 0x006f,
8104 ql_log(ql_log_warn, vha, 0x0070,
8111 qla27xx_print_image(struct scsi_qla_host *vha, char *name,
8114 ql_dbg(ql_dbg_init, vha, 0x018b,
8202 struct scsi_qla_host *vha, struct active_regions *active_regions)
8204 struct qla_hw_data *ha = vha->hw;
8210 ql_dbg(ql_dbg_init, vha, 0x018a, "Primary aux image not addressed\n");
8214 qla24xx_read_flash_data(vha, (uint32_t *)&pri_aux_image_status,
8217 qla27xx_print_image(vha, "Primary aux image", &pri_aux_image_status);
8220 ql_dbg(ql_dbg_init, vha, 0x018b,
8227 ql_dbg(ql_dbg_init, vha, 0x018c,
8235 ql_dbg(ql_dbg_init, vha, 0x018d,
8242 ql_dbg(ql_dbg_init, vha, 0x018a,
8247 qla24xx_read_flash_data(vha, (uint32_t *)&sec_aux_image_status,
8250 qla27xx_print_image(vha, "Secondary aux image", &sec_aux_image_status);
8253 ql_dbg(ql_dbg_init, vha, 0x018b,
8260 ql_dbg(ql_dbg_init, vha, 0x018c,
8268 ql_dbg(ql_dbg_init, vha, 0x018d,
8290 ql_dbg(ql_dbg_init, vha, 0x018f,
8300 qla27xx_get_active_image(struct scsi_qla_host *vha,
8303 struct qla_hw_data *ha = vha->hw;
8309 ql_dbg(ql_dbg_init, vha, 0x018a, "Primary image not addressed\n");
8313 if (qla24xx_read_flash_data(vha, (uint32_t *)&pri_image_status,
8319 qla27xx_print_image(vha, "Primary image", &pri_image_status);
8322 ql_dbg(ql_dbg_init, vha, 0x018b,
8329 ql_dbg(ql_dbg_init, vha, 0x018c,
8337 ql_dbg(ql_dbg_init, vha, 0x018d,
8344 ql_dbg(ql_dbg_init, vha, 0x018a, "Secondary image not addressed\n");
8348 qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status),
8350 qla27xx_print_image(vha, "Secondary image", &sec_image_status);
8353 ql_dbg(ql_dbg_init, vha, 0x018b,
8360 ql_dbg(ql_dbg_init, vha, 0x018c,
8368 ql_dbg(ql_dbg_init, vha, 0x018d,
8385 ql_dbg(ql_dbg_init, vha, 0x018f, "active image %s (%u)\n",
8403 qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
8413 struct qla_hw_data *ha = vha->hw;
8417 ql_dbg(ql_dbg_init, vha, 0x008b,
8421 qla24xx_read_flash_data(vha, dcode, faddr, 8);
8423 ql_log(ql_log_fatal, vha, 0x008c,
8426 ql_log(ql_log_fatal, vha, 0x008d,
8437 ql_dbg(ql_dbg_init, vha, 0x008d,
8439 qla24xx_read_flash_data(vha, dcode, faddr, 10);
8452 ql_dbg(ql_dbg_init, vha, 0x008e,
8455 qla24xx_read_flash_data(vha, dcode, faddr, dlen);
8459 rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen);
8461 ql_log(ql_log_fatal, vha, 0x008f,
8477 ql_dbg(ql_dbg_init, vha, 0x0160, "-> templates = %u\n", templates);
8484 qla24xx_read_flash_data(vha, dcode, faddr, 7);
8486 ql_dbg(ql_dbg_init, vha, 0x0161,
8490 ql_dbg(ql_dbg_init, vha, 0x0162,
8499 ql_dbg(ql_dbg_init, vha, 0x0163,
8504 ql_log(ql_log_warn, vha, 0x0164,
8510 qla24xx_read_flash_data(vha, dcode, faddr, risc_size);
8513 ql_log(ql_log_warn, vha, 0x0165,
8519 ql_dbg(ql_dbg_init, vha, 0x0166,
8523 ql_log(ql_log_warn, vha, 0x0167,
8530 ql_dbg(ql_dbg_init, vha, 0x0168,
8549 qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
8557 struct qla_hw_data *ha = vha->hw;
8561 blob = qla2x00_request_firmware(vha);
8563 ql_log(ql_log_info, vha, 0x0083,
8565 ql_log(ql_log_info, vha, 0x0084,
8579 ql_log(ql_log_fatal, vha, 0x0085,
8589 ql_log(ql_log_fatal, vha, 0x0086,
8591 ql_log(ql_log_fatal, vha, 0x0087,
8606 ql_log(ql_log_fatal, vha, 0x0088,
8617 ql_dbg(ql_dbg_init, vha, 0x0089,
8624 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
8627 ql_log(ql_log_fatal, vha, 0x008a,
8649 qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
8660 struct qla_hw_data *ha = vha->hw;
8664 ql_dbg(ql_dbg_init, vha, 0x0090,
8667 blob = qla2x00_request_firmware(vha);
8669 ql_log(ql_log_warn, vha, 0x0092,
8678 ql_log(ql_log_fatal, vha, 0x0093,
8681 ql_log(ql_log_fatal, vha, 0x0095,
8691 ql_dbg(ql_dbg_init, vha, 0x0096,
8706 ql_dbg(ql_dbg_init, vha, 0x0097,
8715 rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen);
8717 ql_log(ql_log_fatal, vha, 0x0098,
8733 ql_dbg(ql_dbg_init, vha, 0x0170, "-> templates = %u\n", templates);
8740 ql_dbg(ql_dbg_init, vha, 0x0171,
8745 ql_dbg(ql_dbg_init, vha, 0x0172,
8754 ql_dbg(ql_dbg_init, vha, 0x0173,
8759 ql_log(ql_log_warn, vha, 0x0174,
8769 ql_log(ql_log_warn, vha, 0x0175,
8775 ql_dbg(ql_dbg_init, vha, 0x0176,
8779 ql_log(ql_log_warn, vha, 0x0177,
8786 ql_dbg(ql_dbg_init, vha, 0x0178,
8803 qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
8808 return qla81xx_load_risc(vha, srisc_addr);
8815 rval = qla24xx_load_risc_blob(vha, srisc_addr);
8819 return qla24xx_load_risc_flash(vha, srisc_addr,
8820 vha->hw->flt_region_fw);
8824 qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
8827 struct qla_hw_data *ha = vha->hw;
8842 qla27xx_get_active_image(vha, &active_regions);
8847 ql_dbg(ql_dbg_init, vha, 0x008b,
8849 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw_sec);
8854 ql_dbg(ql_dbg_init, vha, 0x008b,
8856 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw);
8861 rval = qla24xx_load_risc_blob(vha, srisc_addr);
8865 ql_log(ql_log_info, vha, 0x0099,
8867 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
8871 ql_log(ql_log_info, vha, 0x009a, "Need firmware flash update.\n");
8877 qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
8880 struct qla_hw_data *ha = vha->hw;
8891 ret = qla2x00_stop_firmware(vha);
8894 ha->isp_ops->reset_chip(vha);
8895 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS)
8897 if (qla2x00_setup_chip(vha) != QLA_SUCCESS)
8899 ql_log(ql_log_info, vha, 0x8015,
8901 ret = qla2x00_stop_firmware(vha);
8909 qla24xx_configure_vhba(scsi_qla_host_t *vha)
8914 struct qla_hw_data *ha = vha->hw;
8917 if (!vha->vp_idx)
8923 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
8924 qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL);
8927 vha->flags.management_server_logged_in = 0;
8930 rval2 = ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb,
8934 ql_dbg(ql_dbg_init, vha, 0x0120,
8938 ql_dbg(ql_dbg_init, vha, 0x0103,
8945 atomic_set(&vha->loop_down_timer, 0);
8946 atomic_set(&vha->loop_state, LOOP_UP);
8947 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
8948 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
8960 qla84xx_get_chip(struct scsi_qla_host *vha)
8963 struct qla_hw_data *ha = vha->hw;
9003 qla84xx_put_chip(struct scsi_qla_host *vha)
9005 struct qla_hw_data *ha = vha->hw;
9012 qla84xx_init_chip(scsi_qla_host_t *vha)
9016 struct qla_hw_data *ha = vha->hw;
9020 rval = qla84xx_verify_chip(vha, status);
9031 qla81xx_nvram_config(scsi_qla_host_t *vha)
9040 struct qla_hw_data *ha = vha->hw;
9055 qla28xx_get_aux_images(vha, &active_regions);
9064 ql_dbg(ql_dbg_init, vha, 0x0110,
9069 ha->isp_ops->read_optrom(vha, ha->vpd, faddr << 2, ha->vpd_size);
9077 ql_dbg(ql_dbg_init, vha, 0x0110,
9081 ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size);
9087 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111,
9089 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0112,
9096 ql_log(ql_log_info, vha, 0x0073,
9099 ql_dump_buffer(ql_dbg_init, vha, 0x0073, nv, sizeof(*nv));
9100 ql_log(ql_log_info, vha, 0x0074,
9156 qlt_81xx_config_nvram_stage1(vha, nv);
9195 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
9198 qlt_81xx_config_nvram_stage2(vha, icb);
9238 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
9239 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
9293 if (!vha->hw->flags.msix_enabled &&
9298 if (!vha->flags.init_done) {
9306 vha->flags.process_response_queue = 0;
9310 ql_log(ql_log_info, vha, 0x0075,
9318 vha->flags.process_response_queue = 1;
9328 ha->fc4_type_priority = qla2xxx_get_fc4_priority(vha);
9331 ql_log(ql_log_warn, vha, 0x0076,
9338 qla82xx_restart_isp(scsi_qla_host_t *vha)
9341 struct qla_hw_data *ha = vha->hw;
9345 status = qla2x00_init_rings(vha);
9347 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
9350 status = qla2x00_fw_ready(vha);
9353 qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL);
9354 vha->flags.online = 1;
9355 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
9359 if ((vha->device_flags & DFLG_NO_CABLE))
9364 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
9366 if (!atomic_read(&vha->loop_down_timer)) {
9371 vha->marker_needed = 1;
9377 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
9380 status = qla82xx_check_md_needed(vha);
9386 rval = qla2x00_enable_fce_trace(vha,
9390 ql_log(ql_log_warn, vha, 0x8001,
9399 rval = qla2x00_enable_eft_trace(vha,
9402 ql_log(ql_log_warn, vha, 0x8010,
9410 ql_dbg(ql_dbg_taskm, vha, 0x8011,
9428 ql_log(ql_log_warn, vha, 0x8016,
9443 * vha = scsi host structure pointer.
9454 qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
9462 struct qla_hw_data *ha = vha->hw;
9482 pid2 = vha->d_id.b24 & INVALID_PORT_ID;
9501 wwn1 = wwn_to_u64(vha->port_name);
9537 * vha = scsi host structure pointer.
9547 qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
9557 priority = qla24xx_get_fcp_prio(vha, fcport);
9561 if (IS_P3P_TYPE(vha->hw)) {
9566 ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb);
9569 ql_dbg(ql_dbg_user, vha, 0x709e,
9576 ql_dbg(ql_dbg_user, vha, 0x704f,
9598 qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha)
9605 list_for_each_entry(fcport, &vha->vp_fcports, list)
9606 ret = qla24xx_update_fcport_fcp_prio(vha, fcport);
9611 struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
9617 struct qla_hw_data *ha = vha->hw;
9623 ql_log(ql_log_warn, vha, 0x00181,
9631 ql_log(ql_log_warn, vha, 0x0182,
9636 qpair->hw = vha->hw;
9637 qpair->vha = vha;
9647 ql_log(ql_log_warn, vha, 0x0183,
9669 ql_dbg(ql_dbg_multiq, vha, 0xc00f,
9674 ql_log(ql_log_warn, vha, 0x0184,
9680 list_add_tail(&qpair->qp_list_elem, &vha->qp_list);
9690 ql_log(ql_log_warn, vha, 0x0185,
9701 ql_log(ql_log_warn, vha, 0x0186,
9720 ql_log(ql_log_warn, vha, 0xd036,
9726 if (qla_create_buf_pool(vha, qpair)) {
9727 ql_log(ql_log_warn, vha, 0xd036,
9736 if (!vha->flags.qpairs_available)
9737 vha->flags.qpairs_available = 1;
9739 ql_dbg(ql_dbg_multiq, vha, 0xc00d,
9742 ql_dbg(ql_dbg_init, vha, 0x0187,
9751 qla25xx_delete_req_que(vha, qpair->req);
9753 qla25xx_delete_rsp_que(vha, qpair->rsp);
9758 if (list_empty(&vha->qp_list))
9759 vha->flags.qpairs_available = 0;
9770 int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
9779 ret = qla25xx_delete_req_que(vha, qpair->req);
9783 ret = qla25xx_delete_rsp_que(vha, qpair->rsp);
9805 if (list_empty(&vha->qp_list)) {
9806 vha->flags.qpairs_available = 0;
9807 vha->flags.qpairs_req_created = 0;
9808 vha->flags.qpairs_rsp_created = 0;
9833 qla2x00_get_num_tgts(scsi_qla_host_t *vha)
9841 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
9851 scsi_qla_host_t *vha = shost_priv(host);
9856 vha->hw_err_cnt = 0;
9858 vha->short_link_down_cnt = 0;
9860 vha->interface_err_cnt = 0;
9862 vha->cmd_timeout_cnt = 0;
9864 vha->reset_cmd_err_cnt = 0;
9866 spin_lock_irqsave(&vha->hw->tgt.sess_lock, int_flags);
9867 list_for_each_entry(fcport, &vha->vp_fcports, list) {
9871 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, int_flags);
9873 vha->link_down_time = QLA2XX_MAX_LINK_DOWN_TIME;
9890 scsi_qla_host_t *vha = shost_priv(host);
9905 num_tgt = qla2x00_get_num_tgts(vha);
9919 rsp_data->entry[i].cnt = vha->hw_err_cnt;
9926 rsp_data->entry[i].cnt = vha->short_link_down_cnt;
9933 rsp_data->entry[i].cnt = vha->interface_err_cnt;
9940 rsp_data->entry[i].cnt = vha->cmd_timeout_cnt;
9947 rsp_data->entry[i].cnt = vha->reset_cmd_err_cnt;
9955 spin_lock_irqsave(&vha->hw->tgt.sess_lock, int_flags);
9956 list_for_each_entry(fcport, &vha->vp_fcports, list) {
9966 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, int_flags);
9990 scsi_qla_host_t *vha = shost_priv(host);
9992 vha->hw->flags.port_isolated = 1;
9994 if (qla2x00_isp_reg_stat(vha->hw)) {
9995 ql_log(ql_log_info, vha, 0x9006,
9997 qla_pci_set_eeh_busy(vha);
10000 if (qla2x00_chip_is_down(vha))
10003 if (vha->flags.online) {
10004 qla2x00_abort_isp_cleanup(vha);
10005 qla2x00_wait_for_sess_deletion(vha);
10013 scsi_qla_host_t *vha = shost_priv(host);
10015 if (qla2x00_isp_reg_stat(vha->hw)) {
10016 ql_log(ql_log_info, vha, 0x9001,
10018 qla_pci_set_eeh_busy(vha);
10022 vha->hw->flags.port_isolated = 0;
10024 vha->flags.online = 1;
10025 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
10026 qla2xxx_wake_dpc(vha);