Lines Matching refs:vha

109 static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
111 static void qlt_disable_vha(struct scsi_qla_host *vha);
117 static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
119 static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha,
155 void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
157 scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
164 static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
167 if (unlikely(vha->marker_needed != 0)) {
168 int rc = qla2x00_issue_marker(vha, vha_locked);
171 ql_dbg(ql_dbg_tgt, vha, 0xe03d,
173 vha->vp_idx);
180 struct scsi_qla_host *qla_find_host_by_d_id(struct scsi_qla_host *vha,
186 if (vha->d_id.b.area == d_id.area &&
187 vha->d_id.b.domain == d_id.domain &&
188 vha->d_id.b.al_pa == d_id.al_pa)
189 return vha;
193 host = btree_lookup32(&vha->hw->host_map, key);
195 ql_dbg(ql_dbg_tgt_mgt + ql_dbg_verbose, vha, 0xf005,
201 static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha)
205 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
207 vha->hw->tgt.num_pend_cmds++;
208 if (vha->hw->tgt.num_pend_cmds > vha->qla_stats.stat_max_pend_cmds)
209 vha->qla_stats.stat_max_pend_cmds =
210 vha->hw->tgt.num_pend_cmds;
211 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
213 static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha)
217 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
218 vha->hw->tgt.num_pend_cmds--;
219 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
223 static void qlt_queue_unknown_atio(scsi_qla_host_t *vha,
227 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
231 ql_dbg(ql_dbg_async, vha, 0x502c,
233 vha->vp_idx);
241 u->vha = vha;
245 spin_lock_irqsave(&vha->cmd_list_lock, flags);
246 list_add_tail(&u->cmd_list, &vha->unknown_atio_list);
247 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
249 schedule_delayed_work(&vha->unknown_atio_work, 1);
255 qlt_send_term_exchange(vha->hw->base_qpair, NULL, atio, ha_locked, 0);
259 static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha,
264 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
268 list_for_each_entry_safe(u, t, &vha->unknown_atio_list, cmd_list) {
270 ql_dbg(ql_dbg_async, vha, 0x502e,
273 qlt_send_term_exchange(vha->hw->base_qpair, NULL,
278 host = qla_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id);
280 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x502f,
284 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503a,
287 qlt_send_term_exchange(vha->hw->base_qpair, NULL,
290 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503d,
291 "Reschedule u %p, vha %p, host %p\n", u, vha, host);
294 schedule_delayed_work(&vha->unknown_atio_work,
301 spin_lock_irqsave(&vha->cmd_list_lock, flags);
303 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
310 struct scsi_qla_host *vha = container_of(to_delayed_work(work),
313 qlt_try_to_dequeue_unknown_atios(vha, 0);
316 static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
319 ql_dbg(ql_dbg_tgt, vha, 0xe072,
321 __func__, vha->vp_idx, atio->u.raw.entry_type,
327 struct scsi_qla_host *host = qla_find_host_by_d_id(vha,
330 ql_dbg(ql_dbg_tgt, vha, 0xe03e,
332 "with unknown d_id %x:%x:%x\n", vha->vp_idx,
338 qlt_queue_unknown_atio(vha, atio, ha_locked);
341 if (unlikely(!list_empty(&vha->unknown_atio_list)))
342 qlt_try_to_dequeue_unknown_atios(vha, ha_locked);
350 struct scsi_qla_host *host = vha;
354 qlt_issue_marker(vha, ha_locked);
358 host = qla_find_host_by_vp_idx(vha,
361 ql_dbg(ql_dbg_tgt, vha, 0xe03f,
365 vha->vp_idx, entry->u.isp24.vp_index);
374 qla24xx_report_id_acquisition(vha,
382 struct scsi_qla_host *host = qla_find_host_by_vp_idx(vha,
387 ql_dbg(ql_dbg_tgt, vha, 0xe00a,
390 vha->vp_idx, entry->vp_index);
404 ql_dbg(ql_dbg_tgt, vha, 0xe040,
406 "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
413 void qlt_response_pkt_all_vps(struct scsi_qla_host *vha,
418 ql_dbg(ql_dbg_tgt, vha, 0xe073,
420 vha->vp_idx, __func__);
425 struct scsi_qla_host *host = qla_find_host_by_vp_idx(vha,
428 ql_dbg(ql_dbg_tgt, vha, 0xe041,
431 vha->vp_idx, entry->vp_index);
444 host = qla_find_host_by_vp_idx(vha, entry->u.isp24.vp_index);
446 ql_dbg(ql_dbg_tgt, vha, 0xe042,
449 vha->vp_idx, entry->u.isp24.vp_index);
458 struct scsi_qla_host *host = vha;
462 host = qla_find_host_by_vp_idx(vha,
465 ql_dbg(ql_dbg_tgt, vha, 0xe043,
469 "vp_index %d\n", vha->vp_idx,
482 struct scsi_qla_host *host = qla_find_host_by_vp_idx(vha,
485 ql_dbg(ql_dbg_tgt, vha, 0xe044,
488 "vp_index %d\n", vha->vp_idx, entry->vp_index);
499 struct scsi_qla_host *host = qla_find_host_by_vp_idx(vha,
502 ql_dbg(ql_dbg_tgt, vha, 0xe045,
505 "vp_index %d\n", vha->vp_idx, entry->vp_index);
512 qlt_response_pkt(vha, rsp, pkt);
521 static int qla24xx_post_nack_work(struct scsi_qla_host *vha, fc_port_t *fcport,
526 e = qla2x00_alloc_work(vha, QLA_EVT_NACK);
533 return qla2x00_post_work(vha, e);
538 struct scsi_qla_host *vha = sp->vha;
541 ql_dbg(ql_dbg_disc, vha, 0x20f2,
545 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
547 sp->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
558 ql_dbg(ql_dbg_edif, vha, 0x20ef,
563 qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE,
565 qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_NEEDED, sp->fcport->d_id.b24,
579 vha->fcport_count++;
580 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
582 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
598 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
603 int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
615 if (vha->hw->flags.edif_enabled &&
630 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
636 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
641 ql_dbg(ql_dbg_disc, vha, 0x20f4,
658 void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
667 mutex_lock(&vha->vha_tgt.tgt_mutex);
668 t = qlt_create_sess(vha, e->u.nack.fcport, 0);
669 mutex_unlock(&vha->vha_tgt.tgt_mutex);
671 ql_log(ql_log_info, vha, 0xd034,
674 vha->hw->tgt.tgt_ops->put_sess(e->u.nack.fcport);
678 qla24xx_async_notify_ack(vha, e->u.nack.fcport,
687 if (!fcport || !fcport->vha || !fcport->vha->hw)
690 ha = fcport->vha->hw;
703 void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
705 struct qla_hw_data *ha = vha->hw;
706 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
710 if (!vha->hw->tgt.tgt_ops)
727 mutex_lock(&vha->vha_tgt.tgt_mutex);
728 sess = qlt_create_sess(vha, fcport, false);
729 mutex_unlock(&vha->vha_tgt.tgt_mutex);
739 ql_dbg(ql_dbg_disc, vha, 0x2107,
746 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c,
748 "(loop ID %d) reappeared\n", vha->vp_idx,
751 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
760 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d,
762 "port %8phC (loop ID %d) became global\n", vha->vp_idx,
777 qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id,
782 lockdep_assert_held(&vha->hw->hardware_lock);
784 list_for_each_entry(pla, &vha->plogi_ack_list, list) {
786 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x210d,
790 qlt_send_term_imm_notif(vha, &pla->iocb, 1);
798 ql_dbg(ql_dbg_async, vha, 0x5088,
800 vha->vp_idx);
806 list_add_tail(&pla->list, &vha->plogi_ack_list);
811 void qlt_plogi_ack_unref(struct scsi_qla_host *vha,
825 ql_dbg(ql_dbg_disc, vha, 0x5089,
843 qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PLOGI);
845 qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PRLI);
847 list_for_each_entry(fcport, &vha->vp_fcports, list) {
859 qlt_plogi_ack_link(struct scsi_qla_host *vha, struct qlt_plogi_ack_t *pla,
866 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf097,
886 qlt_plogi_ack_unref(vha, sess->plogi_link[link]);
909 qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo)
914 if (test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags)) {
919 mutex_lock(&vha->vha_tgt.tgt_mutex);
921 list_for_each_entry(tmp, &vha->logo_list, list) {
924 mutex_unlock(&vha->vha_tgt.tgt_mutex);
929 list_add_tail(&logo->list, &vha->logo_list);
931 mutex_unlock(&vha->vha_tgt.tgt_mutex);
933 res = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, logo->id);
935 mutex_lock(&vha->vha_tgt.tgt_mutex);
937 mutex_unlock(&vha->vha_tgt.tgt_mutex);
940 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf098,
951 struct scsi_qla_host *vha = sess->vha;
952 struct qla_hw_data *ha = vha->hw;
959 ql_dbg(ql_dbg_disc, vha, 0xf084,
968 qla2x00_mark_device_lost(vha, sess, 0);
977 qlt_send_first_logo(vha, &logo);
987 rc = qla2x00_post_async_logout_work(vha, sess,
990 ql_log(ql_log_warn, vha, 0xf085,
997 rc = qla2x00_post_async_prlo_work(vha, sess,
1000 ql_log(ql_log_warn, vha, 0xf085,
1018 ql_dbg(ql_dbg_edif, vha, 0x911e,
1021 qla2x00_release_all_sadb(vha, sess);
1023 ql_dbg(ql_dbg_edif, vha, 0x911e,
1028 qla_edif_clear_appdata(vha, sess);
1029 qla_edif_sess_down(vha, sess);
1045 ql_dbg(ql_dbg_disc, vha, 0xf086,
1061 ql_dbg(ql_dbg_disc, vha, 0xf087,
1067 qla24xx_async_notify_ack(vha, sess,
1082 vha->fcport_count--;
1091 if (!test_bit(UNLOADING, &vha->dpc_flags))
1092 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1104 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf099,
1111 qlt_plogi_ack_unref(vha, con);
1114 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09a,
1124 qlt_plogi_ack_unref(vha, own);
1132 qla2x00_dfs_remove_rport(vha, sess);
1134 spin_lock_irqsave(&vha->work_lock, flags);
1138 spin_unlock_irqrestore(&vha->work_lock, flags);
1140 ql_dbg(ql_dbg_disc, vha, 0xf001,
1142 sess, sess->port_name, vha->fcport_count);
1148 !(vha->vp_idx && test_bit(VPORT_DELETE, &vha->dpc_flags)) &&
1149 (!tgt || !tgt->tgt_stop) && !LOOP_TRANSITION(vha)) {
1150 switch (vha->host->active_mode) {
1153 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1154 qla2xxx_wake_dpc(vha);
1163 if (vha->fcport_count == 0)
1164 wake_up_all(&vha->fcport_waitQ);
1170 struct scsi_qla_host *vha = sess->vha;
1173 ql_dbg(ql_dbg_disc, sess->vha, 0x210a,
1177 spin_lock_irqsave(&sess->vha->work_lock, flags);
1179 spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1189 spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1192 vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
1198 queue_work(sess->vha->hw->wq, &sess->free_work);
1202 static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
1204 struct qla_hw_data *ha = vha->hw;
1214 atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
1216 qlt_clear_tgt_db(vha->vha_tgt.qla_tgt);
1220 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
1224 ql_dbg(ql_dbg_tgt, vha, 0xe000,
1231 ql_dbg(ql_dbg_tgt, vha, 0xe047,
1233 "loop_id %d)\n", vha->host_no, sess, sess->port_name,
1241 if (sess->chip_reset != sess->vha->hw->base_qpair->chip_reset) {
1263 if (sess->vha->fcport_count == 0)
1264 wake_up_all(&sess->vha->fcport_waitQ);
1278 ql_dbg(ql_dbg_disc, sess->vha, 0xffff,
1287 spin_lock_irqsave(&sess->vha->work_lock, flags);
1289 spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1293 spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1300 ql_dbg(ql_log_warn, sess->vha, 0xe001,
1304 WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work));
1310 scsi_qla_host_t *vha = tgt->vha;
1312 list_for_each_entry(sess, &vha->vp_fcports, list) {
1320 static int qla24xx_get_loop_id(struct scsi_qla_host *vha, be_id_t s_id,
1323 struct qla_hw_data *ha = vha->hw;
1332 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044,
1334 vha->vp_idx, qla2x00_gid_list_size(ha));
1339 rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, &entries);
1341 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
1343 vha->vp_idx, rc);
1372 struct scsi_qla_host *vha,
1376 struct qla_hw_data *ha = vha->hw;
1380 if (vha->vha_tgt.qla_tgt->tgt_stop)
1385 ql_dbg(ql_dbg_disc, vha, 0x20f6,
1392 sess->tgt = vha->vha_tgt.qla_tgt;
1405 if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
1407 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf015,
1409 vha->vp_idx, fcport->port_name);
1418 ql_dbg(ql_dbg_disc, vha, 0x20f7,
1426 vha->vha_tgt.qla_tgt->sess_count++;
1428 qlt_do_generation_tick(vha, &sess->generation);
1432 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
1434 sess, sess->se_sess, vha->vha_tgt.qla_tgt,
1435 vha->vha_tgt.qla_tgt->sess_count);
1437 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
1440 vha->vp_idx, local ? "local " : "", fcport->port_name,
1452 qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
1454 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
1458 if (!vha->hw->tgt.tgt_ops)
1464 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1466 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1470 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1475 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1476 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092,
1484 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);
1487 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1501 ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002,
1513 struct scsi_qla_host *vha = tgt->vha;
1521 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
1528 ql_dbg(ql_dbg_tgt_mgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
1529 vha->host_no, vha);
1534 mutex_lock(&vha->vha_tgt.tgt_mutex);
1537 mutex_unlock(&vha->vha_tgt.tgt_mutex);
1540 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
1550 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a,
1557 (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)))
1558 qlt_disable_vha(vha);
1571 scsi_qla_host_t *vha = tgt->vha;
1574 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f,
1580 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b,
1587 mutex_lock(&vha->vha_tgt.tgt_mutex);
1590 mutex_unlock(&vha->vha_tgt.tgt_mutex);
1593 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished\n",
1596 switch (vha->qlini_mode) {
1598 vha->flags.online = 1;
1599 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1610 scsi_qla_host_t *vha = tgt->vha;
1615 struct qla_hw_data *ha = vha->hw;
1623 for (i = 0; i < vha->hw->max_qpairs + 1; i++) {
1636 list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry);
1644 if (vha->vp_idx)
1647 vha->vha_tgt.target_lport_ptr)
1648 ha->tgt.tgt_ops->remove_target(vha);
1650 vha->vha_tgt.qla_tgt = NULL;
1652 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d,
1667 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050,
1673 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e,
1698 struct scsi_qla_host *vha = qpair->vha;
1699 struct qla_hw_data *ha = vha->hw;
1706 ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);
1710 ql_dbg(ql_dbg_tgt, vha, 0xe049,
1712 "request packet\n", vha->vp_idx, __func__);
1716 if (vha->vha_tgt.qla_tgt != NULL)
1717 vha->vha_tgt.qla_tgt->notify_ack_expected++;
1749 ql_dbg(ql_dbg_tgt, vha, 0xe005,
1751 vha->vp_idx, nack->u.isp24.status);
1755 qla2x00_start_iocbs(vha, qpair->req);
1760 struct scsi_qla_host *vha = mcmd->vha;
1761 struct qla_hw_data *ha = vha->hw;
1770 ql_dbg(ql_dbg_tgt, vha, 0xe006,
1776 ql_dbg(ql_dbg_tgt, vha, 0xe04a,
1778 vha->vp_idx, __func__);
1801 resp->vp_index = vha->vp_idx;
1831 vha->vha_tgt.qla_tgt->abts_resp_expected++;
1838 qla2x00_start_iocbs(vha, qpair->req);
1850 struct scsi_qla_host *vha = qpair->vha;
1851 struct qla_hw_data *ha = vha->hw;
1856 ql_dbg(ql_dbg_tgt, vha, 0xe006,
1863 ql_dbg(ql_dbg_tgt, vha, 0xe04a,
1865 "request packet", vha->vp_idx, __func__);
1873 resp->vp_index = vha->vp_idx;
1906 vha->vha_tgt.qla_tgt->abts_resp_expected++;
1913 qla2x00_start_iocbs(vha, qpair->req);
1919 static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
1928 ql_dbg(ql_dbg_tgt, vha, 0xe04b,
1930 "request packet\n", vha->vp_idx, __func__);
1951 ctio->vp_index = vha->vp_idx;
1971 ql_dbg(ql_dbg_tgt, vha, 0xe007,
1982 qla2x00_start_iocbs(vha, qpair->req);
1997 static void abort_cmds_for_lun(struct scsi_qla_host *vha, u64 lun, be_id_t s_id)
2005 spin_lock_irqsave(&vha->cmd_list_lock, flags);
2006 list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
2017 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
2027 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
2030 static struct qla_qpair_hint *qlt_find_qphint(struct scsi_qla_host *vha,
2033 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
2036 if (vha->flags.qpairs_available) {
2051 struct qla_hw_data *ha = mcmd->vha->hw;
2093 ql_dbg(ql_dbg_tgt_mgt, mcmd->vha, 0xf052,
2095 mcmd->vha->vp_idx, rc);
2101 static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
2104 struct qla_hw_data *ha = vha->hw;
2106 struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0];
2109 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
2111 vha->vp_idx, abts->exchange_addr_to_abort);
2115 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051,
2117 vha->vp_idx, __func__);
2127 mcmd->vha = vha;
2159 static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
2162 struct qla_hw_data *ha = vha->hw;
2170 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053,
2172 "supported\n", vha->vp_idx);
2179 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010,
2181 "Address received\n", vha->vp_idx);
2187 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011,
2189 "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id.domain,
2196 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
2198 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
2200 vha->vp_idx);
2216 rc = __qlt_24xx_handle_abts(vha, abts, sess);
2218 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
2220 vha->vp_idx, rc);
2233 struct scsi_qla_host *ha = mcmd->vha;
2293 struct scsi_qla_host *vha = cmd->vha;
2295 ql_dbg(ql_dbg_tgt_dif, vha, 0x3066,
2296 "Sending response CTIO7 (vha=%p, atio=%p, scsi_status=%02x, "
2298 vha, atio, scsi_status, sense_key, asc, ascq);
2300 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
2302 ql_dbg(ql_dbg_async, vha, 0x3067,
2304 vha->host_no, __func__);
2313 ctio->vp_index = vha->vp_idx;
2345 qla2x00_start_iocbs(vha, qpair->req);
2354 struct scsi_qla_host *vha = mcmd->sess->vha;
2355 struct qla_hw_data *ha = vha->hw;
2360 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013,
2366 if (!vha->flags.online || mcmd->reset_count != qpair->chip_reset) {
2371 ql_dbg(ql_dbg_async, vha, 0xe100,
2373 vha->flags.online, qla2x00_reset_active(vha),
2385 ql_dbg(ql_dbg_disc, vha, 0x2106,
2392 qlt_send_notify_ack(vha->hw->base_qpair,
2480 static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
2500 ha = vha->hw;
2574 ql_dbg(ql_dbg_io, qpair->vha, 0x305b,
2576 qpair->vha->vp_idx);
2723 struct scsi_qla_host *vha;
2728 vha = cmd->vha;
2732 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00b,
2739 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00c,
2746 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00f,
2753 ql_dbg(ql_dbg_tgt_dif, vha, 0xe010,
2760 ql_dump_buffer(ql_dbg_tgt_dif, vha, 0xe011, cmd->cdb, 16);
2820 (IS_FWI2_CAPABLE(cmd->vha->hw) &&
2953 scsi_qla_host_t *vha = cmd->tgt->vha;
2954 struct qla_hw_data *ha = vha->hw;
3052 scsi_qla_host_t *vha = cmd->vha;
3054 ha = vha->hw;
3204 tc.vha = vha;
3248 struct scsi_qla_host *vha = cmd->vha;
3289 vha->flags.online, qla2x00_reset_active(vha),
3388 qla2x00_start_iocbs(vha, qpair->req);
3394 qlt_unmap_sg(vha, cmd);
3404 struct scsi_qla_host *vha = cmd->vha;
3426 vha->hw->tgt.tgt_ops->handle_data(cmd);
3429 vha->flags.online, qla2x00_reset_active(vha),
3469 qla2x00_start_iocbs(vha, qpair->req);
3475 qlt_unmap_sg(vha, cmd);
3495 struct scsi_qla_host *vha = cmd->vha;
3507 ql_dbg(ql_dbg_tgt_dif, vha, 0xf075,
3514 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00d,
3530 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00e,
3547 ql_dbg(ql_dbg_tgt_dif, vha, 0xe012,
3565 vha->hw->tgt.tgt_ops->handle_data(cmd);
3571 vha->hw->tgt.tgt_ops->free_cmd(cmd);
3581 vha->hw->tgt.tgt_ops->free_cmd(cmd);
3588 static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
3592 struct qla_hw_data *ha = vha->hw;
3596 ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c,
3599 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
3601 ql_dbg(ql_dbg_tgt, vha, 0xe080,
3603 "request packet\n", vha->vp_idx, __func__);
3633 qla2x00_start_iocbs(vha, vha->req);
3637 static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
3643 rc = __qlt_send_term_imm_notif(vha, imm);
3655 struct scsi_qla_host *vha = qpair->vha;
3657 struct qla_hw_data *ha = vha->hw;
3662 ql_dbg(ql_dbg_tgt, vha, 0xe009, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
3665 vha = cmd->vha;
3669 ql_dbg(ql_dbg_tgt, vha, 0xe050,
3671 "request packet\n", vha->vp_idx, __func__);
3677 ql_dbg(ql_dbg_tgt, vha, 0xe051,
3679 "incorrect state %d\n", vha->vp_idx, cmd,
3693 ctio24->vp_index = vha->vp_idx;
3707 qla2x00_start_iocbs(vha, qpair->req);
3715 struct scsi_qla_host *vha;
3719 /* why use different vha? NPIV */
3721 vha = cmd->vha;
3723 vha = qpair->vha;
3728 qlt_alloc_qfull_cmd(vha, atio, 0, 0);
3734 qlt_alloc_qfull_cmd(vha, atio, 0, 0);
3739 qlt_unmap_sg(vha, cmd);
3740 vha->hw->tgt.tgt_ops->free_cmd(cmd);
3749 static void qlt_init_term_exchange(struct scsi_qla_host *vha)
3754 vha->hw->tgt.leak_exchg_thresh_hold =
3755 (vha->hw->cur_fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT;
3758 if (!list_empty(&vha->hw->tgt.q_full_list)) {
3760 list_splice_init(&vha->hw->tgt.q_full_list, &free_list);
3768 vha->hw->tgt.num_qfull_cmds_alloc--;
3771 vha->hw->tgt.num_qfull_cmds_dropped = 0;
3774 static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
3778 total_leaked = vha->hw->tgt.num_qfull_cmds_dropped;
3780 if (vha->hw->tgt.leak_exchg_thresh_hold &&
3781 (total_leaked > vha->hw->tgt.leak_exchg_thresh_hold)) {
3783 ql_dbg(ql_dbg_tgt, vha, 0xe079,
3785 total_leaked, vha->hw->cur_fw_xcb_count);
3787 if (IS_P3P_TYPE(vha->hw))
3788 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
3790 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3791 qla2xxx_wake_dpc(vha);
3799 struct scsi_qla_host *vha = tgt->vha;
3803 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
3805 "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd,
3811 qlt_unmap_sg(vha, cmd);
3819 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf016,
3838 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074,
3846 qlt_decr_num_pend_cmds(cmd->vha);
3856 cmd->vha->hw->tgt.tgt_ops->rel_cmd(cmd);
3867 struct scsi_qla_host *vha = qpair->vha;
3870 ql_dbg(ql_dbg_tgt_dif, vha, 0xe013,
3895 static void *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
3908 } else if (vha->hw->req_q_map[qid]) {
3909 ql_dbg(ql_dbg_tgt_mgt, vha, 0x1000a,
3911 vha->vp_idx, rsp->id, handle);
3912 req = vha->hw->req_q_map[qid];
3921 ql_dbg(ql_dbg_tgt, vha, 0xe052,
3923 vha->vp_idx, handle);
3929 ql_dbg(ql_dbg_async, vha, 0xe053,
3931 vha->vp_idx, handle, req->id, rsp->id);
3937 ql_dbg(ql_dbg_tgt, vha, 0xe054,
3939 "support NULL handles\n", vha->vp_idx);
3949 static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
3952 struct qla_hw_data *ha = vha->hw;
3960 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d,
3967 cmd = qlt_ctio_to_cmd(vha, rsp, handle, ctio);
3973 qlt_chk_edif_rx_sa_delete_pending(vha, cmd->sess,
3980 qlt_unmap_sg(vha, cmd);
3986 dev_info(&vha->hw->pdev->dev,
3988 vha->vp_idx, cmd->atio.u.isp24.attr,
3999 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058,
4003 "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx,
4013 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
4015 "received (state %x, se_cmd %p)\n", vha->vp_idx,
4025 ql_dbg(ql_dbg_disc, vha, 0x20f8,
4036 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073,
4040 vha->vp_idx, status, cmd->state, se_cmd,
4052 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
4054 vha->vp_idx, status, cmd->state, se_cmd);
4058 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
4060 vha->vp_idx, status, cmd->state, se_cmd);
4091 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
4095 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
4097 "not return a CTIO complete\n", vha->vp_idx, cmd->state);
4102 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n");
4109 static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
4131 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d,
4146 scsi_qla_host_t *vha = cmd->vha;
4147 struct qla_hw_data *ha = vha->hw;
4160 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082,
4181 fcp_task_attr = qlt_get_fcp_task_attr(vha,
4185 ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
4196 ql_dbg(ql_dbg_io, vha, 0x3060, "Terminating work cmd %p", cmd);
4205 qlt_decr_num_pend_cmds(vha);
4206 cmd->vha->hw->tgt.tgt_ops->rel_cmd(cmd);
4215 scsi_qla_host_t *vha = cmd->vha;
4218 spin_lock_irqsave(&vha->cmd_list_lock, flags);
4220 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4225 void qlt_clr_qp_table(struct scsi_qla_host *vha)
4228 struct qla_hw_data *ha = vha->hw;
4229 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4233 ql_log(ql_log_info, vha, 0x706c,
4250 static void qlt_assign_qpair(struct scsi_qla_host *vha,
4254 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4257 if (vha->flags.qpairs_available) {
4263 pci_get_drvdata(vha->hw->pdev);
4265 qpair = vha->hw->base_qpair;
4274 ql_log(ql_log_info, vha, 0xd037,
4294 ql_log(ql_log_info, vha, 0xd038,
4316 ql_log(ql_log_info, vha, 0xd039,
4329 static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
4335 cmd = vha->hw->tgt.tgt_ops->get_cmd(sess);
4343 cmd->tgt = vha->vha_tgt.qla_tgt;
4344 qlt_incr_num_pend_cmds(vha);
4345 cmd->vha = vha;
4355 qlt_assign_qpair(vha, cmd);
4356 cmd->reset_count = vha->hw->base_qpair->chip_reset;
4357 cmd->vp_idx = vha->vp_idx;
4364 static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
4367 struct qla_hw_data *ha = vha->hw;
4368 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4375 ql_dbg(ql_dbg_io, vha, 0x3061,
4384 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id);
4391 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf002,
4401 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
4408 cmd = qlt_get_tag(vha, sess, atio);
4410 ql_dbg(ql_dbg_io, vha, 0x3062,
4411 "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
4419 spin_lock_irqsave(&vha->cmd_list_lock, flags);
4420 list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
4421 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4424 if (vha->flags.qpairs_available) {
4443 struct scsi_qla_host *vha = sess->vha;
4444 struct qla_hw_data *ha = vha->hw;
4447 struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0];
4451 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009,
4454 "leak\n", vha->vp_idx);
4468 mcmd->vha = vha;
4476 abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
4479 h = qlt_find_qphint(vha, mcmd->unpacked_lun);
4501 static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
4504 struct qla_hw_data *ha = vha->hw;
4513 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
4527 static int __qlt_abort_task(struct scsi_qla_host *vha,
4531 struct qla_hw_data *ha = vha->hw;
4538 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f,
4540 vha->vp_idx, __func__);
4558 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060,
4560 vha->vp_idx, rc);
4569 static int qlt_abort_task(struct scsi_qla_host *vha,
4572 struct qla_hw_data *ha = vha->hw;
4580 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
4584 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,
4586 "session\n", vha->vp_idx);
4587 return qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
4591 return __qlt_abort_task(vha, iocb, sess);
4597 ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093,
4620 qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
4628 list_for_each_entry(other_sess, &vha->vp_fcports, list) {
4641 ql_dbg(ql_dbg_disc, vha, 0x1000c,
4657 ql_dbg(ql_dbg_disc, vha, 0xf01b,
4672 ql_dbg(ql_dbg_disc, vha, 0x1000d,
4686 static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
4698 spin_lock_irqsave(&vha->cmd_list_lock, flags);
4699 list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
4708 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
4716 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4721 static int qlt_handle_login(struct scsi_qla_host *vha,
4732 lockdep_assert_held(&vha->hw->hardware_lock);
4744 abort_cmds_for_s_id(vha, &port_id);
4747 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
4748 sess = qlt_find_sess_invalidate_other(vha, wwn,
4750 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4752 ql_dbg(ql_dbg_disc, vha, 0xffff,
4755 qlt_send_term_imm_notif(vha, iocb, 1);
4764 if (vha->hw->flags.edif_enabled &&
4765 !(vha->e_dbell.db_flags & EDB_ACTIVE) &&
4768 ql_dbg(ql_dbg_disc, vha, 0xffff,
4771 qlt_send_term_imm_notif(vha, iocb, 1);
4775 if (vha->hw->flags.edif_enabled) {
4776 if (DBELL_INACTIVE(vha)) {
4777 ql_dbg(ql_dbg_disc, vha, 0xffff,
4780 qlt_send_term_imm_notif(vha, iocb, 1);
4784 ql_dbg(ql_dbg_disc, vha, 0xffff,
4787 qlt_send_term_imm_notif(vha, iocb, 1);
4792 pla = qlt_plogi_ack_find_add(vha, &port_id, iocb);
4794 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
4798 qlt_send_term_imm_notif(vha, iocb, 1);
4804 qlt_plogi_ack_link(vha, pla, conflict_sess,
4810 ql_dbg(ql_dbg_disc, vha, 0xffff,
4814 qla24xx_post_newsess_work(vha, &port_id,
4819 qla24xx_post_newsess_work(vha, &port_id,
4840 ql_dbg(ql_dbg_disc, vha, 0xffff,
4850 qlt_send_term_imm_notif(vha, iocb, 1);
4854 qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN);
4861 if (N2N_TOPO(vha->hw) && fcport_is_bigger(sess))
4862 vha->d_id = sess->d_id;
4864 ql_dbg(ql_dbg_disc, vha, 0xffff,
4868 qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE,
4892 ql_dbg(ql_dbg_disc, vha, 0x20f9,
4899 qlt_plogi_ack_unref(vha, pla);
4924 ql_dbg(ql_dbg_disc, vha, 0x20f9,
4939 static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
4942 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4943 struct qla_hw_data *ha = vha->hw;
4963 ql_dbg(ql_dbg_disc, vha, 0xf026,
4965 vha->vp_idx, iocb->u.isp24.port_id[2],
4975 res = qlt_handle_login(vha, iocb);
4980 sess = qla2x00_find_fcport_by_wwpn(vha,
4983 if (vha->hw->flags.edif_enabled && sess &&
4986 ql_dbg(ql_dbg_disc, vha, 0xffff,
4989 qlt_send_term_imm_notif(vha, iocb, 1);
4994 ql_dbg(ql_dbg_disc, vha, 0xffff,
4998 qlt_send_term_imm_notif(vha, iocb, 1);
5002 res = qlt_handle_login(vha, iocb);
5015 sess = qlt_find_sess_invalidate_other(vha, wwn, port_id,
5026 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b,
5031 qlt_send_term_imm_notif(vha, iocb, 1);
5041 if (vha->hw->flags.edif_enabled && sess &&
5044 ql_dbg(ql_dbg_disc, vha, 0xffff,
5047 qlt_send_term_imm_notif(vha, iocb, 1);
5072 ql_dbg(ql_dbg_disc, sess->vha, 0xffff,
5076 qlt_send_term_imm_notif(vha, iocb, 1);
5097 ql_log(ql_log_warn, sess->vha, 0xf095,
5100 qlt_send_term_imm_notif(vha, iocb, 1);
5109 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096,
5133 ql_dbg(ql_dbg_disc, vha, 0x20fa,
5136 qla24xx_post_nack_work(vha, sess, iocb,
5140 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5141 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5142 qla2xxx_wake_dpc(vha);
5146 ql_dbg(ql_dbg_disc, vha, 0x20fb,
5149 qla24xx_post_nack_work(vha, sess, iocb,
5160 qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS);
5168 sess = qla2x00_find_fcport_by_loopid(vha, loop_id);
5178 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
5180 ql_dbg(ql_dbg_disc, vha, 0x20fc,
5202 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5210 sess = qla2x00_find_fcport_by_wwpn(vha,
5213 ql_dbg(ql_dbg_disc, vha, 0x20fd,
5225 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
5227 "received\n", vha->vp_idx, iocb->u.isp24.status_subcode);
5228 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
5232 ql_dbg(ql_dbg_disc, vha, 0xf026,
5234 vha->vp_idx, iocb->u.isp24.status_subcode, res);
5243 static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
5246 struct qla_hw_data *ha = vha->hw;
5257 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032,
5259 vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle),
5262 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
5269 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5271 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,
5273 "subcode %x)\n", vha->vp_idx,
5291 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034,
5293 "%#x, subcode %x)\n", vha->vp_idx,
5297 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0)
5303 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035,
5304 "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status);
5305 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
5311 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036,
5312 "qla_target(%d): Port config changed (%x)\n", vha->vp_idx,
5314 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
5320 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a,
5322 vha->vp_idx);
5324 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
5329 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b,
5332 "resource count)\n", vha->vp_idx);
5336 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037,
5338 "L %#x)\n", vha->vp_idx,
5342 if (qlt_abort_task(vha, iocb) == 0)
5347 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c,
5349 vha->vp_idx, vha->host_no);
5353 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038,
5355 vha->vp_idx, iocb->u.isp2x.task_flags);
5359 if (qlt_24xx_handle_els(vha, iocb) == 0)
5363 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d,
5365 "notify status %x\n", vha->vp_idx, status);
5381 struct scsi_qla_host *vha = qpair->vha;
5383 struct qla_hw_data *ha = vha->hw;
5393 sess = qla2x00_find_fcport_by_nportid(vha, &id, 1);
5403 ql_dbg(ql_dbg_io, vha, 0x3063,
5405 "request packet", vha->vp_idx, __func__);
5417 ctio24->vp_index = vha->vp_idx;
5442 qla2x00_start_iocbs(vha, qpair->req);
5452 qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
5455 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5456 struct qla_hw_data *ha = vha->hw;
5462 ql_dbg(ql_dbg_io, vha, 0x300a,
5467 if ((vha->hw->tgt.num_qfull_cmds_alloc + 1) > MAX_QFULL_CMDS_ALLOC) {
5468 vha->hw->tgt.num_qfull_cmds_dropped++;
5469 if (vha->hw->tgt.num_qfull_cmds_dropped >
5470 vha->qla_stats.stat_max_qfull_cmds_dropped)
5471 vha->qla_stats.stat_max_qfull_cmds_dropped =
5472 vha->hw->tgt.num_qfull_cmds_dropped;
5474 ql_dbg(ql_dbg_io, vha, 0x3068,
5476 vha->vp_idx, __func__,
5477 vha->hw->tgt.num_qfull_cmds_dropped);
5479 qlt_chk_exch_leak_thresh_hold(vha);
5484 (vha, atio->u.isp24.fcp_hdr.s_id);
5490 ql_dbg(ql_dbg_io, vha, 0x3009,
5492 vha->vp_idx, __func__);
5494 vha->hw->tgt.num_qfull_cmds_dropped++;
5495 if (vha->hw->tgt.num_qfull_cmds_dropped >
5496 vha->qla_stats.stat_max_qfull_cmds_dropped)
5497 vha->qla_stats.stat_max_qfull_cmds_dropped =
5498 vha->hw->tgt.num_qfull_cmds_dropped;
5500 qlt_chk_exch_leak_thresh_hold(vha);
5504 qlt_incr_num_pend_cmds(vha);
5508 cmd->tgt = vha->vha_tgt.qla_tgt;
5509 cmd->vha = vha;
5521 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
5522 list_add_tail(&cmd->cmd_list, &vha->hw->tgt.q_full_list);
5524 vha->hw->tgt.num_qfull_cmds_alloc++;
5525 if (vha->hw->tgt.num_qfull_cmds_alloc >
5526 vha->qla_stats.stat_max_qfull_cmds_alloc)
5527 vha->qla_stats.stat_max_qfull_cmds_alloc =
5528 vha->hw->tgt.num_qfull_cmds_alloc;
5529 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5535 struct scsi_qla_host *vha = qpair->vha;
5536 struct qla_hw_data *ha = vha->hw;
5548 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
5550 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5554 list_splice_init(&vha->hw->tgt.q_full_list, &q_full_list);
5555 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5569 ql_dbg(ql_dbg_io, vha, 0x3006,
5573 ql_dbg(ql_dbg_io, vha, 0x3007,
5577 ql_dbg(ql_dbg_io, vha, 0x3008,
5584 vha->hw->tgt.num_qfull_cmds_alloc--;
5599 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
5600 list_splice(&q_full_list, &vha->hw->tgt.q_full_list);
5601 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5612 struct scsi_qla_host *vha = qpair->vha;
5616 qlt_alloc_qfull_cmd(vha, atio, status, 1);
5620 qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, struct qla_qpair *qpair,
5623 struct qla_hw_data *ha = vha->hw;
5640 static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
5643 struct qla_hw_data *ha = vha->hw;
5644 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5649 ql_dbg(ql_dbg_tgt, vha, 0x3064,
5664 ql_dbg(ql_dbg_io, vha, 0x3065,
5667 "sending QUEUE_FULL\n", vha->vp_idx);
5678 rc = qlt_chk_qfull_thresh_hold(vha, ha->base_qpair,
5684 rc = qlt_handle_cmd_for_atio(vha, atio);
5686 rc = qlt_handle_task_mgmt(vha, atio);
5693 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5697 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5703 ql_dbg(ql_dbg_tgt, vha, 0xe060,
5705 vha->vp_idx);
5710 ql_dbg(ql_dbg_tgt, vha, 0xe060,
5712 vha->vp_idx);
5726 ql_dbg(ql_dbg_tgt, vha, 0xe05b,
5728 "with error status %x\n", vha->vp_idx,
5733 ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
5737 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio);
5744 ql_dbg(ql_dbg_tgt, vha, 0xe05c,
5746 "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
5758 static int qlt_chk_unresolv_exchg(struct scsi_qla_host *vha,
5761 struct qla_hw_data *ha = vha->hw;
5777 ql_log(ql_log_warn, vha, 0xffff,
5780 vha, 0xffff, (uint8_t *)entry, sizeof(*entry));
5783 ha->isp_ops->fw_dump(vha);
5785 qla2xxx_dump_fw(vha);
5787 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
5788 qla2xxx_wake_dpc(vha);
5800 static void qlt_handle_abts_completion(struct scsi_qla_host *vha,
5807 struct qla_hw_data *ha = vha->hw;
5809 mcmd = qlt_ctio_to_cmd(vha, rsp, pkt->handle, pkt);
5811 ql_dbg(ql_dbg_async, vha, 0xe064,
5813 vha->vp_idx);
5818 vha = mcmd->vha;
5819 vha->vha_tgt.qla_tgt->abts_resp_expected--;
5821 ql_dbg(ql_dbg_tgt, vha, 0xe038,
5828 if (qlt_chk_unresolv_exchg(vha, rsp->qpair, entry)) {
5832 qlt_24xx_retry_term_exchange(vha, rsp->qpair,
5835 ql_dbg(ql_dbg_tgt, vha, 0xe063,
5837 vha->vp_idx, entry->compl_status,
5849 static void qlt_response_pkt(struct scsi_qla_host *vha,
5852 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5855 ql_dbg(ql_dbg_tgt, vha, 0xe05d,
5857 vha->vp_idx, pkt->entry_type, vha->hw);
5872 qlt_do_ctio_completion(vha, rsp, entry->handle,
5885 ql_dbg(ql_dbg_tgt, vha, 0xe05e,
5887 "status %x received\n", vha->vp_idx,
5892 rc = qlt_chk_qfull_thresh_hold(vha, rsp->qpair, atio, 1);
5896 rc = qlt_handle_cmd_for_atio(vha, atio);
5900 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5904 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5910 ql_dbg(ql_dbg_tgt, vha, 0xe060,
5912 vha->vp_idx);
5917 ql_dbg(ql_dbg_tgt, vha, 0xe060,
5919 vha->vp_idx);
5932 qlt_do_ctio_completion(vha, rsp, entry->handle,
5942 qlt_do_ctio_completion(vha, rsp, entry->handle,
5949 ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n");
5950 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt);
5957 ql_dbg(ql_dbg_tgt, vha, 0xe036,
5964 ql_dbg(ql_dbg_tgt, vha, 0xe061,
5966 "failed %x\n", vha->vp_idx,
5970 ql_dbg(ql_dbg_tgt, vha, 0xe062,
5972 vha->vp_idx);
5977 ql_dbg(ql_dbg_tgt, vha, 0xe037,
5978 "ABTS_RECV_24XX: instance %d\n", vha->vp_idx);
5979 qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt);
5984 qlt_handle_abts_completion(vha, rsp, pkt);
5986 ql_dbg(ql_dbg_tgt, vha, 0xe064,
5988 "received\n", vha->vp_idx);
5993 ql_dbg(ql_dbg_tgt, vha, 0xe065,
5995 "type %x\n", vha->vp_idx, pkt->entry_type);
6004 void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
6007 struct qla_hw_data *ha = vha->hw;
6008 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6028 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a,
6030 "occurred", vha->vp_idx, code);
6033 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
6038 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
6040 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
6055 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
6057 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code,
6062 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf017,
6064 vha->vp_idx,
6069 vha->hw->exch_starvation++;
6070 if (vha->hw->exch_starvation > 5) {
6071 ql_log(ql_log_warn, vha, 0xd03a,
6074 vha->hw->exch_starvation = 0;
6075 if (IS_P3P_TYPE(vha->hw))
6077 &vha->dpc_flags);
6080 &vha->dpc_flags);
6081 qla2xxx_wake_dpc(vha);
6087 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d,
6090 "m[2]=%x, m[3]=%x)", vha->vp_idx, code,
6095 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
6097 vha->hw->exch_starvation = 0;
6099 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f,
6108 static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
6116 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
6118 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
6120 vha->vp_idx);
6126 rc = qla24xx_gpdb_wait(vha, fcport, 0);
6128 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070,
6131 "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
6137 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
6138 tfcp = qla2x00_find_fcport_by_wwpn(vha, fcport->port_name, 1);
6150 if (vha->hw->current_topology == ISP_CFG_F)
6153 list_add_tail(&fcport->list, &vha->vp_fcports);
6155 vha->fcport_count++;
6163 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
6165 switch (vha->host->active_mode) {
6169 if (!IS_IIDMA_CAPABLE(vha->hw) || !vha->hw->flags.gpsc_supported) {
6172 ql_dbg(ql_dbg_disc, vha, 0x20ff,
6174 __func__, __LINE__, fcport->port_name, vha->fcport_count);
6175 qla24xx_post_gpsc_work(vha, fcport);
6191 static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *vha,
6204 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042,
6210 mutex_lock(&vha->vha_tgt.tgt_mutex);
6214 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
6216 rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
6218 mutex_unlock(&vha->vha_tgt.tgt_mutex);
6220 ql_log(ql_log_info, vha, 0xf071,
6223 vha->vp_idx, s_id.domain, s_id.area, s_id.al_pa);
6230 qlt_send_first_logo(vha, &logo);
6236 fcport = qlt_get_port_database(vha, loop_id);
6238 mutex_unlock(&vha->vha_tgt.tgt_mutex);
6243 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) {
6244 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043,
6246 "(counter was %d, new %d), retrying", vha->vp_idx,
6248 atomic_read(&vha->vha_tgt.
6253 sess = qlt_create_sess(vha, fcport, true);
6255 mutex_unlock(&vha->vha_tgt.tgt_mutex);
6263 struct scsi_qla_host *vha = tgt->vha;
6264 struct qla_hw_data *ha = vha->hw;
6277 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
6281 sess = qlt_make_local_sess(vha, s_id);
6294 ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01c,
6302 rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
6324 struct scsi_qla_host *vha = tgt->vha;
6327 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt);
6430 tgt->vha = base_vha;
6456 int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
6458 if (!vha->vha_tgt.qla_tgt)
6461 if (vha->fc_vport) {
6462 qlt_release(vha->vha_tgt.qla_tgt);
6467 qlt_init_term_exchange(vha);
6469 ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)",
6470 vha->host_no, ha);
6471 qlt_release(vha->vha_tgt.qla_tgt);
6487 static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
6490 pr_debug("qla2xxx HW vha->node_name: %8phC\n", vha->node_name);
6491 pr_debug("qla2xxx HW vha->port_name: %8phC\n", vha->port_name);
6510 struct scsi_qla_host *vha;
6519 vha = tgt->vha;
6520 ha = vha->hw;
6522 host = vha->host;
6529 if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED)
6548 ql_dbg(ql_dbg_tgt, vha, 0xe068,
6553 qlt_lport_dump(vha, phys_wwpn, b);
6555 if (memcmp(vha->port_name, b, WWN_SIZE)) {
6559 rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn);
6575 * @vha: Registered scsi_qla_host pointer
6577 void qlt_lport_deregister(struct scsi_qla_host *vha)
6579 struct qla_hw_data *ha = vha->hw;
6580 struct Scsi_Host *sh = vha->host;
6584 vha->vha_tgt.target_lport_ptr = NULL;
6594 void qlt_set_mode(struct scsi_qla_host *vha)
6596 switch (vha->qlini_mode) {
6599 vha->host->active_mode = MODE_TARGET;
6602 vha->host->active_mode = MODE_INITIATOR;
6605 vha->host->active_mode = MODE_DUAL;
6613 static void qlt_clear_mode(struct scsi_qla_host *vha)
6615 switch (vha->qlini_mode) {
6617 vha->host->active_mode = MODE_UNKNOWN;
6620 vha->host->active_mode = MODE_INITIATOR;
6624 vha->host->active_mode = MODE_INITIATOR;
6637 qlt_enable_vha(struct scsi_qla_host *vha)
6639 struct qla_hw_data *ha = vha->hw;
6640 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6645 ql_dbg(ql_dbg_tgt, vha, 0xe069,
6651 if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED)
6658 qlt_set_mode(vha);
6662 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
6664 if (vha->vp_idx) {
6665 qla24xx_disable_vp(vha);
6666 qla24xx_enable_vp(vha);
6682 static void qlt_disable_vha(struct scsi_qla_host *vha)
6684 struct qla_hw_data *ha = vha->hw;
6685 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6689 ql_dbg(ql_dbg_tgt, vha, 0xe06a,
6697 qlt_clear_mode(vha);
6700 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
6701 qla2xxx_wake_dpc(vha);
6707 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
6708 ql_dbg(ql_dbg_tgt, vha, 0xe081,
6718 qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
6720 vha->vha_tgt.qla_tgt = NULL;
6722 mutex_init(&vha->vha_tgt.tgt_mutex);
6723 mutex_init(&vha->vha_tgt.tgt_host_action_mutex);
6725 INIT_LIST_HEAD(&vha->unknown_atio_list);
6726 INIT_DELAYED_WORK(&vha->unknown_atio_work, qlt_unknown_atio_work_fn);
6728 qlt_clear_mode(vha);
6738 qlt_add_target(ha, vha);
6742 qlt_rff_id(struct scsi_qla_host *vha)
6748 if (qla_tgt_mode_enabled(vha)) {
6750 } else if (qla_ini_mode_enabled(vha)) {
6752 } else if (qla_dual_mode_enabled(vha))
6768 qlt_init_atio_q_entries(struct scsi_qla_host *vha)
6770 struct qla_hw_data *ha = vha->hw;
6774 if (qla_ini_mode_enabled(vha))
6789 qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
6791 struct qla_hw_data *ha = vha->hw;
6809 ql_log(ql_log_warn, vha, 0xd03c,
6819 qlt_24xx_atio_pkt_all_vps(vha,
6838 wrt_reg_dword(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
6842 qlt_24xx_config_rings(struct scsi_qla_host *vha)
6844 struct qla_hw_data *ha = vha->hw;
6851 wrt_reg_dword(ISP_ATIO_Q_IN(vha), 0);
6852 wrt_reg_dword(ISP_ATIO_Q_OUT(vha), 0);
6853 rd_reg_dword(ISP_ATIO_Q_OUT(vha));
6859 ql_dbg(ql_dbg_init, vha, 0xf072,
6868 ql_dbg(ql_dbg_init, vha, 0xf072,
6875 qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
6877 struct qla_hw_data *ha = vha->hw;
6883 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
6896 if (qla_tgt_mode_enabled(vha))
6899 nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
6905 if (qla_tgt_mode_enabled(vha))
6952 if (vha->flags.init_done)
6953 fc_host_supported_classes(vha->host) =
6958 if (vha->flags.init_done)
6959 fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
6966 qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
6969 struct qla_hw_data *ha = vha->hw;
6981 qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
6983 struct qla_hw_data *ha = vha->hw;
6989 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
7002 if (qla_tgt_mode_enabled(vha))
7005 nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
7011 if (qla_tgt_mode_enabled(vha))
7055 if (vha->flags.init_done)
7056 fc_host_supported_classes(vha->host) =
7061 if (vha->flags.init_done)
7062 fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
7069 qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
7072 struct qla_hw_data *ha = vha->hw;
7094 qlt_modify_vp_config(struct scsi_qla_host *vha,
7098 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha))
7102 if (qla_tgt_mode_enabled(vha))
7136 scsi_qla_host_t *vha;
7142 vha = pci_get_drvdata(ha->pdev);
7146 qlt_24xx_process_atio_queue(vha, 0);
7158 scsi_qla_host_t *vha = op->vha;
7159 struct qla_hw_data *ha = vha->hw;
7162 if (qla2x00_reset_active(vha) ||
7167 qlt_24xx_process_atio_queue(vha, 0);
7171 qlt_response_pkt_all_vps(vha, op->rsp, (response_t *)&op->atio);
7178 qlt_handle_abts_recv(struct scsi_qla_host *vha, struct rsp_que *rsp,
7189 qlt_response_pkt_all_vps(vha, rsp, pkt);
7194 op->vha = vha;
7195 op->chip_reset = vha->hw->base_qpair->chip_reset;