Lines Matching refs:vha

116 static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
118 static void qlt_disable_vha(struct scsi_qla_host *vha);
124 static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
126 static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha,
162 void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
164 scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
171 static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
174 if (unlikely(vha->marker_needed != 0)) {
175 int rc = qla2x00_issue_marker(vha, vha_locked);
178 ql_dbg(ql_dbg_tgt, vha, 0xe03d,
180 vha->vp_idx);
188 struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
194 if (vha->d_id.b.area == d_id.area &&
195 vha->d_id.b.domain == d_id.domain &&
196 vha->d_id.b.al_pa == d_id.al_pa)
197 return vha;
201 host = btree_lookup32(&vha->hw->tgt.host_map, key);
203 ql_dbg(ql_dbg_tgt_mgt + ql_dbg_verbose, vha, 0xf005,
210 struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha,
213 struct qla_hw_data *ha = vha->hw;
215 if (vha->vp_idx == vp_idx)
216 return vha;
220 return ha->tgt.tgt_vp_map[vp_idx].vha;
225 static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha)
229 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
231 vha->hw->tgt.num_pend_cmds++;
232 if (vha->hw->tgt.num_pend_cmds > vha->qla_stats.stat_max_pend_cmds)
233 vha->qla_stats.stat_max_pend_cmds =
234 vha->hw->tgt.num_pend_cmds;
235 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
237 static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha)
241 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
242 vha->hw->tgt.num_pend_cmds--;
243 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
247 static void qlt_queue_unknown_atio(scsi_qla_host_t *vha,
251 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
255 ql_dbg(ql_dbg_async, vha, 0x502c,
257 vha->vp_idx);
265 u->vha = vha;
269 spin_lock_irqsave(&vha->cmd_list_lock, flags);
270 list_add_tail(&u->cmd_list, &vha->unknown_atio_list);
271 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
273 schedule_delayed_work(&vha->unknown_atio_work, 1);
279 qlt_send_term_exchange(vha->hw->base_qpair, NULL, atio, ha_locked, 0);
283 static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha,
288 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
292 list_for_each_entry_safe(u, t, &vha->unknown_atio_list, cmd_list) {
294 ql_dbg(ql_dbg_async, vha, 0x502e,
297 qlt_send_term_exchange(vha->hw->base_qpair, NULL,
302 host = qlt_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id);
304 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x502f,
308 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503a,
311 qlt_send_term_exchange(vha->hw->base_qpair, NULL,
314 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503d,
315 "Reschedule u %p, vha %p, host %p\n", u, vha, host);
318 schedule_delayed_work(&vha->unknown_atio_work,
325 spin_lock_irqsave(&vha->cmd_list_lock, flags);
327 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
334 struct scsi_qla_host *vha = container_of(to_delayed_work(work),
337 qlt_try_to_dequeue_unknown_atios(vha, 0);
340 static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
343 ql_dbg(ql_dbg_tgt, vha, 0xe072,
345 __func__, vha->vp_idx, atio->u.raw.entry_type,
351 struct scsi_qla_host *host = qlt_find_host_by_d_id(vha,
354 ql_dbg(ql_dbg_tgt, vha, 0xe03e,
356 "with unknown d_id %x:%x:%x\n", vha->vp_idx,
362 qlt_queue_unknown_atio(vha, atio, ha_locked);
365 if (unlikely(!list_empty(&vha->unknown_atio_list)))
366 qlt_try_to_dequeue_unknown_atios(vha, ha_locked);
374 struct scsi_qla_host *host = vha;
378 qlt_issue_marker(vha, ha_locked);
382 host = qlt_find_host_by_vp_idx(vha,
385 ql_dbg(ql_dbg_tgt, vha, 0xe03f,
389 vha->vp_idx, entry->u.isp24.vp_index);
398 qla24xx_report_id_acquisition(vha,
406 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
411 ql_dbg(ql_dbg_tgt, vha, 0xe00a,
414 vha->vp_idx, entry->vp_index);
428 ql_dbg(ql_dbg_tgt, vha, 0xe040,
430 "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
437 void qlt_response_pkt_all_vps(struct scsi_qla_host *vha,
442 ql_dbg(ql_dbg_tgt, vha, 0xe073,
444 vha->vp_idx, __func__);
449 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
452 ql_dbg(ql_dbg_tgt, vha, 0xe041,
455 vha->vp_idx, entry->vp_index);
468 host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index);
470 ql_dbg(ql_dbg_tgt, vha, 0xe042,
473 vha->vp_idx, entry->u.isp24.vp_index);
482 struct scsi_qla_host *host = vha;
486 host = qlt_find_host_by_vp_idx(vha,
489 ql_dbg(ql_dbg_tgt, vha, 0xe043,
493 "vp_index %d\n", vha->vp_idx,
506 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
509 ql_dbg(ql_dbg_tgt, vha, 0xe044,
512 "vp_index %d\n", vha->vp_idx, entry->vp_index);
523 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
526 ql_dbg(ql_dbg_tgt, vha, 0xe045,
529 "vp_index %d\n", vha->vp_idx, entry->vp_index);
536 qlt_response_pkt(vha, rsp, pkt);
545 static int qla24xx_post_nack_work(struct scsi_qla_host *vha, fc_port_t *fcport,
550 e = qla2x00_alloc_work(vha, QLA_EVT_NACK);
557 return qla2x00_post_work(vha, e);
562 struct scsi_qla_host *vha = sp->vha;
565 ql_dbg(ql_dbg_disc, vha, 0x20f2,
569 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
571 sp->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
591 vha->fcport_count++;
592 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
594 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
610 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
615 int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
639 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
647 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);
652 ql_dbg(ql_dbg_disc, vha, 0x20f4,
669 void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
678 mutex_lock(&vha->vha_tgt.tgt_mutex);
679 t = qlt_create_sess(vha, e->u.nack.fcport, 0);
680 mutex_unlock(&vha->vha_tgt.tgt_mutex);
682 ql_log(ql_log_info, vha, 0xd034,
685 vha->hw->tgt.tgt_ops->put_sess(e->u.nack.fcport);
689 qla24xx_async_notify_ack(vha, e->u.nack.fcport,
696 struct qla_hw_data *ha = fcport->vha->hw;
709 void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
711 struct qla_hw_data *ha = vha->hw;
712 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
716 if (!vha->hw->tgt.tgt_ops)
733 mutex_lock(&vha->vha_tgt.tgt_mutex);
734 sess = qlt_create_sess(vha, fcport, false);
735 mutex_unlock(&vha->vha_tgt.tgt_mutex);
745 ql_dbg(ql_dbg_disc, vha, 0x2107,
752 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c,
754 "(loop ID %d) reappeared\n", vha->vp_idx,
757 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
766 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d,
768 "port %8phC (loop ID %d) became global\n", vha->vp_idx,
783 qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id,
788 lockdep_assert_held(&vha->hw->hardware_lock);
790 list_for_each_entry(pla, &vha->plogi_ack_list, list) {
792 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x210d,
796 qlt_send_term_imm_notif(vha, &pla->iocb, 1);
804 ql_dbg(ql_dbg_async, vha, 0x5088,
806 vha->vp_idx);
812 list_add_tail(&pla->list, &vha->plogi_ack_list);
817 void qlt_plogi_ack_unref(struct scsi_qla_host *vha,
831 ql_dbg(ql_dbg_disc, vha, 0x5089,
849 qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PLOGI);
851 qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PRLI);
853 list_for_each_entry(fcport, &vha->vp_fcports, list) {
865 qlt_plogi_ack_link(struct scsi_qla_host *vha, struct qlt_plogi_ack_t *pla,
872 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf097,
892 qlt_plogi_ack_unref(vha, sess->plogi_link[link]);
915 qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo)
920 mutex_lock(&vha->vha_tgt.tgt_mutex);
922 list_for_each_entry(tmp, &vha->logo_list, list) {
925 mutex_unlock(&vha->vha_tgt.tgt_mutex);
930 list_add_tail(&logo->list, &vha->logo_list);
932 mutex_unlock(&vha->vha_tgt.tgt_mutex);
934 res = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, logo->id);
936 mutex_lock(&vha->vha_tgt.tgt_mutex);
938 mutex_unlock(&vha->vha_tgt.tgt_mutex);
940 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf098,
951 struct scsi_qla_host *vha = sess->vha;
952 struct qla_hw_data *ha = vha->hw;
959 ql_dbg(ql_dbg_disc, vha, 0xf084,
968 qla2x00_mark_device_lost(vha, sess, 0);
976 qlt_send_first_logo(vha, &logo);
986 rc = qla2x00_post_async_logout_work(vha, sess,
989 ql_log(ql_log_warn, vha, 0xf085,
996 rc = qla2x00_post_async_prlo_work(vha, sess,
999 ql_log(ql_log_warn, vha, 0xf085,
1026 ql_dbg(ql_dbg_disc, vha, 0xf086,
1037 ql_dbg(ql_dbg_disc, vha, 0xf087,
1043 qla24xx_async_notify_ack(vha, sess,
1058 vha->fcport_count--;
1067 if (!test_bit(UNLOADING, &vha->dpc_flags))
1068 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1080 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf099,
1087 qlt_plogi_ack_unref(vha, con);
1090 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09a,
1100 qlt_plogi_ack_unref(vha, own);
1108 qla2x00_dfs_remove_rport(vha, sess);
1110 spin_lock_irqsave(&vha->work_lock, flags);
1114 spin_unlock_irqrestore(&vha->work_lock, flags);
1116 ql_dbg(ql_dbg_disc, vha, 0xf001,
1118 sess, sess->port_name, vha->fcport_count);
1124 !(vha->vp_idx && test_bit(VPORT_DELETE, &vha->dpc_flags)) &&
1125 (!tgt || !tgt->tgt_stop) && !LOOP_TRANSITION(vha)) {
1126 switch (vha->host->active_mode) {
1129 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1130 qla2xxx_wake_dpc(vha);
1139 if (vha->fcport_count == 0)
1140 wake_up_all(&vha->fcport_waitQ);
1146 struct scsi_qla_host *vha = sess->vha;
1149 ql_dbg(ql_dbg_disc, sess->vha, 0x210a,
1153 spin_lock_irqsave(&sess->vha->work_lock, flags);
1155 spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1165 spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1168 vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
1174 queue_work(sess->vha->hw->wq, &sess->free_work);
1178 static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
1180 struct qla_hw_data *ha = vha->hw;
1190 atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
1192 qlt_clear_tgt_db(vha->vha_tgt.qla_tgt);
1196 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
1200 ql_dbg(ql_dbg_tgt, vha, 0xe000,
1207 ql_dbg(ql_dbg_tgt, vha, 0xe047,
1209 "loop_id %d)\n", vha->host_no, sess, sess->port_name,
1217 if (sess->chip_reset != sess->vha->hw->base_qpair->chip_reset) {
1239 if (sess->vha->fcport_count == 0)
1240 wake_up_all(&sess->vha->fcport_waitQ);
1254 ql_dbg(ql_dbg_disc, sess->vha, 0xffff,
1263 spin_lock_irqsave(&sess->vha->work_lock, flags);
1265 spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1269 spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1276 ql_dbg(ql_dbg_disc, sess->vha, 0xe001,
1280 WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work));
1286 scsi_qla_host_t *vha = tgt->vha;
1288 list_for_each_entry(sess, &vha->vp_fcports, list) {
1296 static int qla24xx_get_loop_id(struct scsi_qla_host *vha, be_id_t s_id,
1299 struct qla_hw_data *ha = vha->hw;
1308 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044,
1310 vha->vp_idx, qla2x00_gid_list_size(ha));
1315 rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, &entries);
1317 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
1319 vha->vp_idx, rc);
1348 struct scsi_qla_host *vha,
1352 struct qla_hw_data *ha = vha->hw;
1356 if (vha->vha_tgt.qla_tgt->tgt_stop)
1361 ql_dbg(ql_dbg_disc, vha, 0x20f6,
1368 sess->tgt = vha->vha_tgt.qla_tgt;
1381 if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
1383 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf015,
1385 vha->vp_idx, fcport->port_name);
1394 ql_dbg(ql_dbg_disc, vha, 0x20f7,
1402 vha->vha_tgt.qla_tgt->sess_count++;
1404 qlt_do_generation_tick(vha, &sess->generation);
1408 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
1410 sess, sess->se_sess, vha->vha_tgt.qla_tgt,
1411 vha->vha_tgt.qla_tgt->sess_count);
1413 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
1416 vha->vp_idx, local ? "local " : "", fcport->port_name,
1428 qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
1430 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
1434 if (!vha->hw->tgt.tgt_ops)
1440 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1442 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1446 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1451 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1452 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092,
1460 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);
1463 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1477 ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002,
1489 struct scsi_qla_host *vha = tgt->vha;
1497 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
1504 ql_dbg(ql_dbg_tgt_mgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
1505 vha->host_no, vha);
1510 mutex_lock(&vha->vha_tgt.tgt_mutex);
1513 mutex_unlock(&vha->vha_tgt.tgt_mutex);
1516 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
1526 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a,
1533 (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)))
1534 qlt_disable_vha(vha);
1547 scsi_qla_host_t *vha = tgt->vha;
1550 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f,
1556 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b,
1563 mutex_lock(&vha->vha_tgt.tgt_mutex);
1566 mutex_unlock(&vha->vha_tgt.tgt_mutex);
1569 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished\n",
1572 switch (vha->qlini_mode) {
1574 vha->flags.online = 1;
1575 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1586 scsi_qla_host_t *vha = tgt->vha;
1591 struct qla_hw_data *ha = vha->hw;
1599 for (i = 0; i < vha->hw->max_qpairs + 1; i++) {
1612 list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry);
1620 if (vha->vp_idx)
1623 vha->vha_tgt.target_lport_ptr)
1624 ha->tgt.tgt_ops->remove_target(vha);
1626 vha->vha_tgt.qla_tgt = NULL;
1628 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d,
1643 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050,
1649 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e,
1674 struct scsi_qla_host *vha = qpair->vha;
1675 struct qla_hw_data *ha = vha->hw;
1682 ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);
1686 ql_dbg(ql_dbg_tgt, vha, 0xe049,
1688 "request packet\n", vha->vp_idx, __func__);
1692 if (vha->vha_tgt.qla_tgt != NULL)
1693 vha->vha_tgt.qla_tgt->notify_ack_expected++;
1719 ql_dbg(ql_dbg_tgt, vha, 0xe005,
1721 vha->vp_idx, nack->u.isp24.status);
1725 qla2x00_start_iocbs(vha, qpair->req);
1730 struct scsi_qla_host *vha = mcmd->vha;
1731 struct qla_hw_data *ha = vha->hw;
1740 ql_dbg(ql_dbg_tgt, vha, 0xe006,
1746 ql_dbg(ql_dbg_tgt, vha, 0xe04a,
1748 vha->vp_idx, __func__);
1771 resp->vp_index = vha->vp_idx;
1801 vha->vha_tgt.qla_tgt->abts_resp_expected++;
1808 qla2x00_start_iocbs(vha, qpair->req);
1820 struct scsi_qla_host *vha = qpair->vha;
1821 struct qla_hw_data *ha = vha->hw;
1826 ql_dbg(ql_dbg_tgt, vha, 0xe006,
1833 ql_dbg(ql_dbg_tgt, vha, 0xe04a,
1835 "request packet", vha->vp_idx, __func__);
1843 resp->vp_index = vha->vp_idx;
1876 vha->vha_tgt.qla_tgt->abts_resp_expected++;
1883 qla2x00_start_iocbs(vha, qpair->req);
1889 static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
1898 ql_dbg(ql_dbg_tgt, vha, 0xe04b,
1900 "request packet\n", vha->vp_idx, __func__);
1921 ctio->vp_index = vha->vp_idx;
1941 ql_dbg(ql_dbg_tgt, vha, 0xe007,
1952 qla2x00_start_iocbs(vha, qpair->req);
1967 static void abort_cmds_for_lun(struct scsi_qla_host *vha, u64 lun, be_id_t s_id)
1975 spin_lock_irqsave(&vha->cmd_list_lock, flags);
1976 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
1987 list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
1998 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
2008 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
2011 static struct qla_qpair_hint *qlt_find_qphint(struct scsi_qla_host *vha,
2014 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
2017 if (vha->flags.qpairs_available) {
2032 struct qla_hw_data *ha = mcmd->vha->hw;
2074 ql_dbg(ql_dbg_tgt_mgt, mcmd->vha, 0xf052,
2076 mcmd->vha->vp_idx, rc);
2082 static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
2085 struct qla_hw_data *ha = vha->hw;
2087 struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0];
2089 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
2091 vha->vp_idx, abts->exchange_addr_to_abort);
2095 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051,
2097 vha->vp_idx, __func__);
2107 mcmd->vha = vha;
2137 static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
2140 struct qla_hw_data *ha = vha->hw;
2148 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053,
2150 "supported\n", vha->vp_idx);
2157 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010,
2159 "Address received\n", vha->vp_idx);
2165 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011,
2167 "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id.domain,
2174 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
2176 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
2178 vha->vp_idx);
2194 rc = __qlt_24xx_handle_abts(vha, abts, sess);
2196 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
2198 vha->vp_idx, rc);
2211 struct scsi_qla_host *ha = mcmd->vha;
2271 struct scsi_qla_host *vha = cmd->vha;
2273 ql_dbg(ql_dbg_tgt_dif, vha, 0x3066,
2274 "Sending response CTIO7 (vha=%p, atio=%p, scsi_status=%02x, "
2276 vha, atio, scsi_status, sense_key, asc, ascq);
2278 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
2280 ql_dbg(ql_dbg_async, vha, 0x3067,
2282 vha->host_no, __func__);
2291 ctio->vp_index = vha->vp_idx;
2323 qla2x00_start_iocbs(vha, qpair->req);
2332 struct scsi_qla_host *vha = mcmd->sess->vha;
2333 struct qla_hw_data *ha = vha->hw;
2338 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013,
2344 if (!vha->flags.online || mcmd->reset_count != qpair->chip_reset) {
2349 ql_dbg(ql_dbg_async, vha, 0xe100,
2351 vha->flags.online, qla2x00_reset_active(vha),
2363 ql_dbg(ql_dbg_disc, vha, 0x2106,
2370 qlt_send_notify_ack(vha->hw->base_qpair,
2458 static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
2478 ha = vha->hw;
2552 ql_dbg(ql_dbg_io, qpair->vha, 0x305b,
2554 qpair->vha->vp_idx);
2691 struct scsi_qla_host *vha;
2696 vha = cmd->vha;
2700 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00b,
2707 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00c,
2714 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00f,
2721 ql_dbg(ql_dbg_tgt_dif, vha, 0xe010,
2728 ql_dump_buffer(ql_dbg_tgt_dif, vha, 0xe011, cmd->cdb, 16);
2788 (IS_FWI2_CAPABLE(cmd->vha->hw) &&
2921 scsi_qla_host_t *vha = cmd->tgt->vha;
2922 struct qla_hw_data *ha = vha->hw;
3020 scsi_qla_host_t *vha = cmd->vha;
3022 ha = vha->hw;
3172 tc.vha = vha;
3216 struct scsi_qla_host *vha = cmd->vha;
3257 vha->flags.online, qla2x00_reset_active(vha),
3354 qla2x00_start_iocbs(vha, qpair->req);
3360 qlt_unmap_sg(vha, cmd);
3370 struct scsi_qla_host *vha = cmd->vha;
3392 vha->hw->tgt.tgt_ops->handle_data(cmd);
3395 vha->flags.online, qla2x00_reset_active(vha),
3435 qla2x00_start_iocbs(vha, qpair->req);
3441 qlt_unmap_sg(vha, cmd);
3461 struct scsi_qla_host *vha = cmd->vha;
3473 ql_dbg(ql_dbg_tgt_dif, vha, 0xf075,
3480 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00d,
3496 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00e,
3513 ql_dbg(ql_dbg_tgt_dif, vha, 0xe012,
3531 vha->hw->tgt.tgt_ops->handle_data(cmd);
3537 vha->hw->tgt.tgt_ops->free_cmd(cmd);
3547 vha->hw->tgt.tgt_ops->free_cmd(cmd);
3554 static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
3558 struct qla_hw_data *ha = vha->hw;
3562 ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c,
3565 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
3567 ql_dbg(ql_dbg_tgt, vha, 0xe080,
3569 "request packet\n", vha->vp_idx, __func__);
3599 qla2x00_start_iocbs(vha, vha->req);
3603 static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
3609 rc = __qlt_send_term_imm_notif(vha, imm);
3621 struct scsi_qla_host *vha = qpair->vha;
3623 struct qla_hw_data *ha = vha->hw;
3628 ql_dbg(ql_dbg_tgt, vha, 0xe009, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
3631 vha = cmd->vha;
3635 ql_dbg(ql_dbg_tgt, vha, 0xe050,
3637 "request packet\n", vha->vp_idx, __func__);
3643 ql_dbg(ql_dbg_tgt, vha, 0xe051,
3645 "incorrect state %d\n", vha->vp_idx, cmd,
3659 ctio24->vp_index = vha->vp_idx;
3673 qla2x00_start_iocbs(vha, qpair->req);
3681 struct scsi_qla_host *vha;
3685 /* why use different vha? NPIV */
3687 vha = cmd->vha;
3689 vha = qpair->vha;
3694 qlt_alloc_qfull_cmd(vha, atio, 0, 0);
3700 qlt_alloc_qfull_cmd(vha, atio, 0, 0);
3705 qlt_unmap_sg(vha, cmd);
3706 vha->hw->tgt.tgt_ops->free_cmd(cmd);
3715 static void qlt_init_term_exchange(struct scsi_qla_host *vha)
3720 vha->hw->tgt.leak_exchg_thresh_hold =
3721 (vha->hw->cur_fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT;
3724 if (!list_empty(&vha->hw->tgt.q_full_list)) {
3726 list_splice_init(&vha->hw->tgt.q_full_list, &free_list);
3734 vha->hw->tgt.num_qfull_cmds_alloc--;
3737 vha->hw->tgt.num_qfull_cmds_dropped = 0;
3740 static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
3744 total_leaked = vha->hw->tgt.num_qfull_cmds_dropped;
3746 if (vha->hw->tgt.leak_exchg_thresh_hold &&
3747 (total_leaked > vha->hw->tgt.leak_exchg_thresh_hold)) {
3749 ql_dbg(ql_dbg_tgt, vha, 0xe079,
3751 total_leaked, vha->hw->cur_fw_xcb_count);
3753 if (IS_P3P_TYPE(vha->hw))
3754 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
3756 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3757 qla2xxx_wake_dpc(vha);
3765 struct scsi_qla_host *vha = tgt->vha;
3769 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
3771 "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd,
3777 qlt_unmap_sg(vha, cmd);
3785 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf016,
3804 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074,
3812 qlt_decr_num_pend_cmds(cmd->vha);
3824 cmd->vha->hw->tgt.tgt_ops->rel_cmd(cmd);
3835 struct scsi_qla_host *vha = qpair->vha;
3838 ql_dbg(ql_dbg_tgt_dif, vha, 0xe013,
3863 static void *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
3876 } else if (vha->hw->req_q_map[qid]) {
3877 ql_dbg(ql_dbg_tgt_mgt, vha, 0x1000a,
3879 vha->vp_idx, rsp->id, handle);
3880 req = vha->hw->req_q_map[qid];
3889 ql_dbg(ql_dbg_tgt, vha, 0xe052,
3891 vha->vp_idx, handle);
3897 ql_dbg(ql_dbg_async, vha, 0xe053,
3899 vha->vp_idx, handle, req->id, rsp->id);
3905 ql_dbg(ql_dbg_tgt, vha, 0xe054,
3907 "support NULL handles\n", vha->vp_idx);
3917 static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
3920 struct qla_hw_data *ha = vha->hw;
3928 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d,
3935 cmd = qlt_ctio_to_cmd(vha, rsp, handle, ctio);
3942 qlt_unmap_sg(vha, cmd);
3948 dev_info(&vha->hw->pdev->dev,
3950 vha->vp_idx, cmd->atio.u.isp24.attr,
3961 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058,
3965 "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx,
3975 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
3977 "received (state %x, se_cmd %p)\n", vha->vp_idx,
3987 ql_dbg(ql_dbg_disc, vha, 0x20f8,
3998 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073,
4002 vha->vp_idx, status, cmd->state, se_cmd,
4010 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
4012 vha->vp_idx, status, cmd->state, se_cmd);
4043 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
4047 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
4049 "not return a CTIO complete\n", vha->vp_idx, cmd->state);
4054 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n");
4061 static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
4083 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d,
4098 scsi_qla_host_t *vha = cmd->vha;
4099 struct qla_hw_data *ha = vha->hw;
4112 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082,
4133 fcp_task_attr = qlt_get_fcp_task_attr(vha,
4137 ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
4148 ql_dbg(ql_dbg_io, vha, 0x3060, "Terminating work cmd %p", cmd);
4157 qlt_decr_num_pend_cmds(vha);
4158 cmd->vha->hw->tgt.tgt_ops->rel_cmd(cmd);
4167 scsi_qla_host_t *vha = cmd->vha;
4170 spin_lock_irqsave(&vha->cmd_list_lock, flags);
4172 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4177 void qlt_clr_qp_table(struct scsi_qla_host *vha)
4180 struct qla_hw_data *ha = vha->hw;
4181 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4185 ql_log(ql_log_info, vha, 0x706c,
4202 static void qlt_assign_qpair(struct scsi_qla_host *vha,
4206 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4209 if (vha->flags.qpairs_available) {
4215 pci_get_drvdata(vha->hw->pdev);
4217 qpair = vha->hw->base_qpair;
4226 ql_log(ql_log_info, vha, 0xd037,
4246 ql_log(ql_log_info, vha, 0xd038,
4268 ql_log(ql_log_info, vha, 0xd039,
4281 static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
4287 cmd = vha->hw->tgt.tgt_ops->get_cmd(sess);
4294 cmd->tgt = vha->vha_tgt.qla_tgt;
4295 qlt_incr_num_pend_cmds(vha);
4296 cmd->vha = vha;
4306 qlt_assign_qpair(vha, cmd);
4307 cmd->reset_count = vha->hw->base_qpair->chip_reset;
4308 cmd->vp_idx = vha->vp_idx;
4314 static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
4317 struct qla_hw_data *ha = vha->hw;
4318 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4325 ql_dbg(ql_dbg_io, vha, 0x3061,
4334 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id);
4341 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf002,
4351 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
4358 cmd = qlt_get_tag(vha, sess, atio);
4360 ql_dbg(ql_dbg_io, vha, 0x3062,
4361 "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
4369 spin_lock_irqsave(&vha->cmd_list_lock, flags);
4370 list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
4371 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4374 if (vha->flags.qpairs_available) {
4394 struct scsi_qla_host *vha = sess->vha;
4395 struct qla_hw_data *ha = vha->hw;
4398 struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0];
4402 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009,
4405 "leak\n", vha->vp_idx);
4419 mcmd->vha = vha;
4427 abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
4430 h = qlt_find_qphint(vha, mcmd->unpacked_lun);
4452 static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
4455 struct qla_hw_data *ha = vha->hw;
4464 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
4478 static int __qlt_abort_task(struct scsi_qla_host *vha,
4482 struct qla_hw_data *ha = vha->hw;
4489 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f,
4491 vha->vp_idx, __func__);
4509 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060,
4511 vha->vp_idx, rc);
4520 static int qlt_abort_task(struct scsi_qla_host *vha,
4523 struct qla_hw_data *ha = vha->hw;
4531 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
4535 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,
4537 "session\n", vha->vp_idx);
4538 return qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
4542 return __qlt_abort_task(vha, iocb, sess);
4548 ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093,
4571 qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
4579 list_for_each_entry(other_sess, &vha->vp_fcports, list) {
4592 ql_dbg(ql_dbg_disc, vha, 0x1000c,
4608 ql_dbg(ql_dbg_disc, vha, 0xf01b,
4623 ql_dbg(ql_dbg_disc, vha, 0x1000d,
4637 static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
4649 spin_lock_irqsave(&vha->cmd_list_lock, flags);
4650 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
4659 list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
4668 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
4676 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4681 static int qlt_handle_login(struct scsi_qla_host *vha,
4692 lockdep_assert_held(&vha->hw->hardware_lock);
4704 abort_cmds_for_s_id(vha, &port_id);
4707 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
4708 sess = qlt_find_sess_invalidate_other(vha, wwn,
4710 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4712 ql_dbg(ql_dbg_disc, vha, 0xffff,
4715 qlt_send_term_imm_notif(vha, iocb, 1);
4724 pla = qlt_plogi_ack_find_add(vha, &port_id, iocb);
4726 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
4730 qlt_send_term_imm_notif(vha, iocb, 1);
4736 qlt_plogi_ack_link(vha, pla, conflict_sess,
4742 ql_dbg(ql_dbg_disc, vha, 0xffff,
4746 qla24xx_post_newsess_work(vha, &port_id,
4751 qla24xx_post_newsess_work(vha, &port_id,
4772 ql_dbg(ql_dbg_disc, vha, 0xffff,
4782 qlt_send_term_imm_notif(vha, iocb, 1);
4786 qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN);
4810 ql_dbg(ql_dbg_disc, vha, 0x20f9,
4817 qlt_plogi_ack_unref(vha, pla);
4842 ql_dbg(ql_dbg_disc, vha, 0x20f9,
4857 static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
4860 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4861 struct qla_hw_data *ha = vha->hw;
4881 ql_dbg(ql_dbg_disc, vha, 0xf026,
4883 vha->vp_idx, iocb->u.isp24.port_id[2],
4893 res = qlt_handle_login(vha, iocb);
4898 sess = qla2x00_find_fcport_by_wwpn(vha,
4902 ql_dbg(ql_dbg_disc, vha, 0xffff,
4906 qlt_send_term_imm_notif(vha, iocb, 1);
4910 res = qlt_handle_login(vha, iocb);
4923 sess = qlt_find_sess_invalidate_other(vha, wwn, port_id,
4934 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b,
4939 qlt_send_term_imm_notif(vha, iocb, 1);
4970 ql_dbg(ql_dbg_disc, sess->vha, 0xffff,
4974 qlt_send_term_imm_notif(vha, iocb, 1);
4995 ql_log(ql_log_warn, sess->vha, 0xf095,
4998 qlt_send_term_imm_notif(vha, iocb, 1);
5007 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096,
5031 ql_dbg(ql_dbg_disc, vha, 0x20fa,
5034 qla24xx_post_nack_work(vha, sess, iocb,
5038 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5039 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5040 qla2xxx_wake_dpc(vha);
5044 ql_dbg(ql_dbg_disc, vha, 0x20fb,
5047 qla24xx_post_nack_work(vha, sess, iocb,
5058 qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS);
5066 sess = qla2x00_find_fcport_by_loopid(vha, loop_id);
5076 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
5078 ql_dbg(ql_dbg_disc, vha, 0x20fc,
5100 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5108 sess = qla2x00_find_fcport_by_wwpn(vha,
5111 ql_dbg(ql_dbg_disc, vha, 0x20fd,
5123 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
5125 "received\n", vha->vp_idx, iocb->u.isp24.status_subcode);
5126 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
5130 ql_dbg(ql_dbg_disc, vha, 0xf026,
5132 vha->vp_idx, iocb->u.isp24.status_subcode, res);
5140 static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
5143 struct qla_hw_data *ha = vha->hw;
5154 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032,
5156 vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle),
5159 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
5166 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5168 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,
5170 "subcode %x)\n", vha->vp_idx,
5188 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034,
5190 "%#x, subcode %x)\n", vha->vp_idx,
5194 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0)
5200 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035,
5201 "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status);
5202 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
5208 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036,
5209 "qla_target(%d): Port config changed (%x)\n", vha->vp_idx,
5211 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
5217 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a,
5219 vha->vp_idx);
5221 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
5226 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b,
5229 "resource count)\n", vha->vp_idx);
5233 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037,
5235 "L %#x)\n", vha->vp_idx,
5239 if (qlt_abort_task(vha, iocb) == 0)
5244 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c,
5246 vha->vp_idx, vha->host_no);
5250 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038,
5252 vha->vp_idx, iocb->u.isp2x.task_flags);
5256 if (qlt_24xx_handle_els(vha, iocb) == 0)
5260 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d,
5262 "notify status %x\n", vha->vp_idx, status);
5278 struct scsi_qla_host *vha = qpair->vha;
5280 struct qla_hw_data *ha = vha->hw;
5290 sess = qla2x00_find_fcport_by_nportid(vha, &id, 1);
5300 ql_dbg(ql_dbg_io, vha, 0x3063,
5302 "request packet", vha->vp_idx, __func__);
5314 ctio24->vp_index = vha->vp_idx;
5339 qla2x00_start_iocbs(vha, qpair->req);
5349 qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
5352 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5353 struct qla_hw_data *ha = vha->hw;
5359 ql_dbg(ql_dbg_io, vha, 0x300a,
5364 if ((vha->hw->tgt.num_qfull_cmds_alloc + 1) > MAX_QFULL_CMDS_ALLOC) {
5365 vha->hw->tgt.num_qfull_cmds_dropped++;
5366 if (vha->hw->tgt.num_qfull_cmds_dropped >
5367 vha->qla_stats.stat_max_qfull_cmds_dropped)
5368 vha->qla_stats.stat_max_qfull_cmds_dropped =
5369 vha->hw->tgt.num_qfull_cmds_dropped;
5371 ql_dbg(ql_dbg_io, vha, 0x3068,
5373 vha->vp_idx, __func__,
5374 vha->hw->tgt.num_qfull_cmds_dropped);
5376 qlt_chk_exch_leak_thresh_hold(vha);
5381 (vha, atio->u.isp24.fcp_hdr.s_id);
5387 ql_dbg(ql_dbg_io, vha, 0x3009,
5389 vha->vp_idx, __func__);
5391 vha->hw->tgt.num_qfull_cmds_dropped++;
5392 if (vha->hw->tgt.num_qfull_cmds_dropped >
5393 vha->qla_stats.stat_max_qfull_cmds_dropped)
5394 vha->qla_stats.stat_max_qfull_cmds_dropped =
5395 vha->hw->tgt.num_qfull_cmds_dropped;
5397 qlt_chk_exch_leak_thresh_hold(vha);
5401 qlt_incr_num_pend_cmds(vha);
5405 cmd->tgt = vha->vha_tgt.qla_tgt;
5406 cmd->vha = vha;
5418 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
5419 list_add_tail(&cmd->cmd_list, &vha->hw->tgt.q_full_list);
5421 vha->hw->tgt.num_qfull_cmds_alloc++;
5422 if (vha->hw->tgt.num_qfull_cmds_alloc >
5423 vha->qla_stats.stat_max_qfull_cmds_alloc)
5424 vha->qla_stats.stat_max_qfull_cmds_alloc =
5425 vha->hw->tgt.num_qfull_cmds_alloc;
5426 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5432 struct scsi_qla_host *vha = qpair->vha;
5433 struct qla_hw_data *ha = vha->hw;
5445 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
5447 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5451 list_splice_init(&vha->hw->tgt.q_full_list, &q_full_list);
5452 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5466 ql_dbg(ql_dbg_io, vha, 0x3006,
5470 ql_dbg(ql_dbg_io, vha, 0x3007,
5474 ql_dbg(ql_dbg_io, vha, 0x3008,
5482 vha->hw->tgt.num_qfull_cmds_alloc--;
5497 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
5498 list_splice(&q_full_list, &vha->hw->tgt.q_full_list);
5499 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5510 struct scsi_qla_host *vha = qpair->vha;
5514 qlt_alloc_qfull_cmd(vha, atio, status, 1);
5518 qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, struct qla_qpair *qpair,
5521 struct qla_hw_data *ha = vha->hw;
5538 static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
5541 struct qla_hw_data *ha = vha->hw;
5542 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5547 ql_dbg(ql_dbg_tgt, vha, 0x3064,
5562 ql_dbg(ql_dbg_io, vha, 0x3065,
5565 "sending QUEUE_FULL\n", vha->vp_idx);
5576 rc = qlt_chk_qfull_thresh_hold(vha, ha->base_qpair,
5582 rc = qlt_handle_cmd_for_atio(vha, atio);
5584 rc = qlt_handle_task_mgmt(vha, atio);
5591 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5595 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5601 ql_dbg(ql_dbg_tgt, vha, 0xe060,
5603 vha->vp_idx);
5608 ql_dbg(ql_dbg_tgt, vha, 0xe060,
5610 vha->vp_idx);
5624 ql_dbg(ql_dbg_tgt, vha, 0xe05b,
5626 "with error status %x\n", vha->vp_idx,
5631 ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
5635 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio);
5642 ql_dbg(ql_dbg_tgt, vha, 0xe05c,
5644 "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
5656 static int qlt_chk_unresolv_exchg(struct scsi_qla_host *vha,
5659 struct qla_hw_data *ha = vha->hw;
5675 ql_log(ql_log_warn, vha, 0xffff,
5678 vha, 0xffff, (uint8_t *)entry, sizeof(*entry));
5681 ha->isp_ops->fw_dump(vha);
5683 qla2xxx_dump_fw(vha);
5685 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
5686 qla2xxx_wake_dpc(vha);
5698 static void qlt_handle_abts_completion(struct scsi_qla_host *vha,
5705 struct qla_hw_data *ha = vha->hw;
5707 mcmd = qlt_ctio_to_cmd(vha, rsp, pkt->handle, pkt);
5709 ql_dbg(ql_dbg_async, vha, 0xe064,
5711 vha->vp_idx);
5716 vha = mcmd->vha;
5717 vha->vha_tgt.qla_tgt->abts_resp_expected--;
5719 ql_dbg(ql_dbg_tgt, vha, 0xe038,
5726 if (qlt_chk_unresolv_exchg(vha, rsp->qpair, entry)) {
5730 qlt_24xx_retry_term_exchange(vha, rsp->qpair,
5733 ql_dbg(ql_dbg_tgt, vha, 0xe063,
5735 vha->vp_idx, entry->compl_status,
5747 static void qlt_response_pkt(struct scsi_qla_host *vha,
5750 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5753 ql_dbg(ql_dbg_tgt, vha, 0xe05d,
5755 vha->vp_idx, pkt->entry_type, vha->hw);
5770 qlt_do_ctio_completion(vha, rsp, entry->handle,
5783 ql_dbg(ql_dbg_tgt, vha, 0xe05e,
5785 "status %x received\n", vha->vp_idx,
5790 rc = qlt_chk_qfull_thresh_hold(vha, rsp->qpair, atio, 1);
5794 rc = qlt_handle_cmd_for_atio(vha, atio);
5798 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5802 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5808 ql_dbg(ql_dbg_tgt, vha, 0xe060,
5810 vha->vp_idx);
5815 ql_dbg(ql_dbg_tgt, vha, 0xe060,
5817 vha->vp_idx);
5830 qlt_do_ctio_completion(vha, rsp, entry->handle,
5840 qlt_do_ctio_completion(vha, rsp, entry->handle,
5847 ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n");
5848 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt);
5855 ql_dbg(ql_dbg_tgt, vha, 0xe036,
5862 ql_dbg(ql_dbg_tgt, vha, 0xe061,
5864 "failed %x\n", vha->vp_idx,
5868 ql_dbg(ql_dbg_tgt, vha, 0xe062,
5870 vha->vp_idx);
5875 ql_dbg(ql_dbg_tgt, vha, 0xe037,
5876 "ABTS_RECV_24XX: instance %d\n", vha->vp_idx);
5877 qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt);
5882 qlt_handle_abts_completion(vha, rsp, pkt);
5884 ql_dbg(ql_dbg_tgt, vha, 0xe064,
5886 "received\n", vha->vp_idx);
5891 ql_dbg(ql_dbg_tgt, vha, 0xe065,
5893 "type %x\n", vha->vp_idx, pkt->entry_type);
5902 void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
5905 struct qla_hw_data *ha = vha->hw;
5906 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5926 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a,
5928 "occurred", vha->vp_idx, code);
5931 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
5936 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
5938 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
5953 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
5955 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code,
5960 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf017,
5962 vha->vp_idx,
5967 vha->hw->exch_starvation++;
5968 if (vha->hw->exch_starvation > 5) {
5969 ql_log(ql_log_warn, vha, 0xd03a,
5972 vha->hw->exch_starvation = 0;
5973 if (IS_P3P_TYPE(vha->hw))
5975 &vha->dpc_flags);
5978 &vha->dpc_flags);
5979 qla2xxx_wake_dpc(vha);
5985 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d,
5988 "m[2]=%x, m[3]=%x)", vha->vp_idx, code,
5993 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
5995 vha->hw->exch_starvation = 0;
5997 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f,
6006 static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
6014 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
6016 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
6018 vha->vp_idx);
6024 rc = qla24xx_gpdb_wait(vha, fcport, 0);
6026 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070,
6029 "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
6035 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
6036 tfcp = qla2x00_find_fcport_by_wwpn(vha, fcport->port_name, 1);
6048 if (vha->hw->current_topology == ISP_CFG_F)
6051 list_add_tail(&fcport->list, &vha->vp_fcports);
6053 vha->fcport_count++;
6061 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
6063 switch (vha->host->active_mode) {
6067 if (!IS_IIDMA_CAPABLE(vha->hw) || !vha->hw->flags.gpsc_supported) {
6070 ql_dbg(ql_dbg_disc, vha, 0x20ff,
6072 __func__, __LINE__, fcport->port_name, vha->fcport_count);
6073 qla24xx_post_gpsc_work(vha, fcport);
6089 static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *vha,
6102 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042,
6108 mutex_lock(&vha->vha_tgt.tgt_mutex);
6112 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
6114 rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
6116 mutex_unlock(&vha->vha_tgt.tgt_mutex);
6118 ql_log(ql_log_info, vha, 0xf071,
6121 vha->vp_idx, s_id.domain, s_id.area, s_id.al_pa);
6128 qlt_send_first_logo(vha, &logo);
6134 fcport = qlt_get_port_database(vha, loop_id);
6136 mutex_unlock(&vha->vha_tgt.tgt_mutex);
6141 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) {
6142 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043,
6144 "(counter was %d, new %d), retrying", vha->vp_idx,
6146 atomic_read(&vha->vha_tgt.
6151 sess = qlt_create_sess(vha, fcport, true);
6153 mutex_unlock(&vha->vha_tgt.tgt_mutex);
6161 struct scsi_qla_host *vha = tgt->vha;
6162 struct qla_hw_data *ha = vha->hw;
6175 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
6179 sess = qlt_make_local_sess(vha, s_id);
6192 ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01c,
6200 rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
6223 struct scsi_qla_host *vha = tgt->vha;
6224 struct qla_hw_data *ha = vha->hw;
6239 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
6243 sess = qlt_make_local_sess(vha, s_id);
6255 ql_dbg(ql_dbg_tgt_tmr, vha, 0xf020,
6285 struct scsi_qla_host *vha = tgt->vha;
6288 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt);
6395 tgt->vha = base_vha;
6422 int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
6424 if (!vha->vha_tgt.qla_tgt)
6427 if (vha->fc_vport) {
6428 qlt_release(vha->vha_tgt.qla_tgt);
6433 qlt_init_term_exchange(vha);
6435 ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)",
6436 vha->host_no, ha);
6437 qlt_release(vha->vha_tgt.qla_tgt);
6453 static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
6456 pr_debug("qla2xxx HW vha->node_name: %8phC\n", vha->node_name);
6457 pr_debug("qla2xxx HW vha->port_name: %8phC\n", vha->port_name);
6476 struct scsi_qla_host *vha;
6485 vha = tgt->vha;
6486 ha = vha->hw;
6488 host = vha->host;
6495 if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED)
6514 ql_dbg(ql_dbg_tgt, vha, 0xe068,
6519 qlt_lport_dump(vha, phys_wwpn, b);
6521 if (memcmp(vha->port_name, b, WWN_SIZE)) {
6525 rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn);
6541 * @vha: Registered scsi_qla_host pointer
6543 void qlt_lport_deregister(struct scsi_qla_host *vha)
6545 struct qla_hw_data *ha = vha->hw;
6546 struct Scsi_Host *sh = vha->host;
6550 vha->vha_tgt.target_lport_ptr = NULL;
6560 void qlt_set_mode(struct scsi_qla_host *vha)
6562 switch (vha->qlini_mode) {
6565 vha->host->active_mode = MODE_TARGET;
6568 vha->host->active_mode = MODE_INITIATOR;
6571 vha->host->active_mode = MODE_DUAL;
6579 static void qlt_clear_mode(struct scsi_qla_host *vha)
6581 switch (vha->qlini_mode) {
6583 vha->host->active_mode = MODE_UNKNOWN;
6586 vha->host->active_mode = MODE_INITIATOR;
6590 vha->host->active_mode = MODE_INITIATOR;
6603 qlt_enable_vha(struct scsi_qla_host *vha)
6605 struct qla_hw_data *ha = vha->hw;
6606 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6611 ql_dbg(ql_dbg_tgt, vha, 0xe069,
6617 if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED)
6624 qlt_set_mode(vha);
6628 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
6630 if (vha->vp_idx) {
6631 qla24xx_disable_vp(vha);
6632 qla24xx_enable_vp(vha);
6648 static void qlt_disable_vha(struct scsi_qla_host *vha)
6650 struct qla_hw_data *ha = vha->hw;
6651 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6655 ql_dbg(ql_dbg_tgt, vha, 0xe06a,
6663 qlt_clear_mode(vha);
6666 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
6667 qla2xxx_wake_dpc(vha);
6673 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
6674 ql_dbg(ql_dbg_tgt, vha, 0xe081,
6684 qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
6686 vha->vha_tgt.qla_tgt = NULL;
6688 mutex_init(&vha->vha_tgt.tgt_mutex);
6689 mutex_init(&vha->vha_tgt.tgt_host_action_mutex);
6691 qlt_clear_mode(vha);
6701 qlt_add_target(ha, vha);
6705 qlt_rff_id(struct scsi_qla_host *vha)
6711 if (qla_tgt_mode_enabled(vha)) {
6713 } else if (qla_ini_mode_enabled(vha)) {
6715 } else if (qla_dual_mode_enabled(vha))
6731 qlt_init_atio_q_entries(struct scsi_qla_host *vha)
6733 struct qla_hw_data *ha = vha->hw;
6737 if (qla_ini_mode_enabled(vha))
6752 qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
6754 struct qla_hw_data *ha = vha->hw;
6772 ql_log(ql_log_warn, vha, 0xd03c,
6782 qlt_24xx_atio_pkt_all_vps(vha,
6801 wrt_reg_dword(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
6805 qlt_24xx_config_rings(struct scsi_qla_host *vha)
6807 struct qla_hw_data *ha = vha->hw;
6814 wrt_reg_dword(ISP_ATIO_Q_IN(vha), 0);
6815 wrt_reg_dword(ISP_ATIO_Q_OUT(vha), 0);
6816 rd_reg_dword(ISP_ATIO_Q_OUT(vha));
6822 ql_dbg(ql_dbg_init, vha, 0xf072,
6831 ql_dbg(ql_dbg_init, vha, 0xf072,
6838 qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
6840 struct qla_hw_data *ha = vha->hw;
6846 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
6859 if (qla_tgt_mode_enabled(vha))
6862 nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
6868 if (qla_tgt_mode_enabled(vha))
6915 if (vha->flags.init_done)
6916 fc_host_supported_classes(vha->host) =
6921 if (vha->flags.init_done)
6922 fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
6929 qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
6932 struct qla_hw_data *ha = vha->hw;
6944 qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
6946 struct qla_hw_data *ha = vha->hw;
6952 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
6965 if (qla_tgt_mode_enabled(vha))
6968 nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
6974 if (qla_tgt_mode_enabled(vha))
7018 if (vha->flags.init_done)
7019 fc_host_supported_classes(vha->host) =
7024 if (vha->flags.init_done)
7025 fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
7032 qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
7035 struct qla_hw_data *ha = vha->hw;
7057 qlt_modify_vp_config(struct scsi_qla_host *vha,
7061 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha))
7065 if (qla_tgt_mode_enabled(vha))
7106 scsi_qla_host_t *vha;
7112 vha = pci_get_drvdata(ha->pdev);
7116 qlt_24xx_process_atio_queue(vha, 0);
7128 scsi_qla_host_t *vha = op->vha;
7129 struct qla_hw_data *ha = vha->hw;
7132 if (qla2x00_reset_active(vha) ||
7137 qlt_24xx_process_atio_queue(vha, 0);
7141 qlt_response_pkt_all_vps(vha, op->rsp, (response_t *)&op->atio);
7148 qlt_handle_abts_recv(struct scsi_qla_host *vha, struct rsp_que *rsp,
7159 qlt_response_pkt_all_vps(vha, rsp, pkt);
7164 op->vha = vha;
7165 op->chip_reset = vha->hw->base_qpair->chip_reset;
7213 qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
7222 key = vha->d_id.b24;
7226 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
7229 slot = btree_lookup32(&vha->hw->tgt.host_map, key);
7231 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf018,
7232 "Save vha in host_map %p %06x\n", vha, key);
7233 rc = btree_insert32(&vha->hw->tgt.host_map,
7234 key, vha, GFP_ATOMIC);
7236 ql_log(ql_log_info, vha, 0xd03e,
7241 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
7242 "replace existing vha in host_map %p %06x\n", vha, key);
7243 btree_update32(&vha->hw->tgt.host_map, key, vha);
7246 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
7249 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a,
7250 "clear vha in host_map %p %06x\n", vha, key);
7251 slot = btree_lookup32(&vha->hw->tgt.host_map, key);
7253 btree_remove32(&vha->hw->tgt.host_map, key);
7254 vha->d_id.b24 = 0;
7259 void qlt_update_host_map(struct scsi_qla_host *vha, port_id_t id)
7262 if (!vha->d_id.b24) {
7263 vha->d_id = id;
7264 qlt_update_vp_map(vha, SET_AL_PA);
7265 } else if (vha->d_id.b24 != id.b24) {
7266 qlt_update_vp_map(vha, RESET_AL_PA);
7267 vha->d_id = id;
7268 qlt_update_vp_map(vha, SET_AL_PA);