1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 *  qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx
4 *
5 *  based on qla2x00t.c code:
6 *
7 *  Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
8 *  Copyright (C) 2004 - 2005 Leonid Stoljar
9 *  Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
10 *  Copyright (C) 2006 - 2010 ID7 Ltd.
11 *
12 *  Forward port and refactoring to modern qla2xxx and target/configfs
13 *
14 *  Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org>
15 */
16
17#include <linux/module.h>
18#include <linux/init.h>
19#include <linux/types.h>
20#include <linux/blkdev.h>
21#include <linux/interrupt.h>
22#include <linux/pci.h>
23#include <linux/delay.h>
24#include <linux/list.h>
25#include <linux/workqueue.h>
26#include <asm/unaligned.h>
27#include <scsi/scsi.h>
28#include <scsi/scsi_host.h>
29#include <scsi/scsi_tcq.h>
30
31#include "qla_def.h"
32#include "qla_target.h"
33
34static int ql2xtgt_tape_enable;
35module_param(ql2xtgt_tape_enable, int, S_IRUGO|S_IWUSR);
36MODULE_PARM_DESC(ql2xtgt_tape_enable,
37		"Enables Sequence level error recovery (aka FC Tape). Default is 0 - no SLER. 1 - Enable SLER.");
38
39static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED;
40module_param(qlini_mode, charp, S_IRUGO);
41MODULE_PARM_DESC(qlini_mode,
42	"Determines when initiator mode will be enabled. Possible values: "
43	"\"exclusive\" - initiator mode will be enabled on load, "
44	"disabled on enabling target mode and then on disabling target mode "
45	"enabled back; "
46	"\"disabled\" - initiator mode will never be enabled; "
47	"\"dual\" - Initiator Modes will be enabled. Target Mode can be activated "
48	"when ready "
49	"\"enabled\" (default) - initiator mode will always stay enabled.");
50
51static int ql_dm_tgt_ex_pct = 0;
52module_param(ql_dm_tgt_ex_pct, int, S_IRUGO|S_IWUSR);
53MODULE_PARM_DESC(ql_dm_tgt_ex_pct,
54	"For Dual Mode (qlini_mode=dual), this parameter determines "
55	"the percentage of exchanges/cmds FW will allocate resources "
56	"for Target mode.");
57
58int ql2xuctrlirq = 1;
59module_param(ql2xuctrlirq, int, 0644);
60MODULE_PARM_DESC(ql2xuctrlirq,
61    "User to control IRQ placement via smp_affinity."
62    "Valid with qlini_mode=disabled."
63    "1(default): enable");
64
65int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
66
67static int qla_sam_status = SAM_STAT_BUSY;
68static int tc_sam_status = SAM_STAT_TASK_SET_FULL; /* target core */
69
70/*
71 * From scsi/fc/fc_fcp.h
72 */
73enum fcp_resp_rsp_codes {
74	FCP_TMF_CMPL = 0,
75	FCP_DATA_LEN_INVALID = 1,
76	FCP_CMND_FIELDS_INVALID = 2,
77	FCP_DATA_PARAM_MISMATCH = 3,
78	FCP_TMF_REJECTED = 4,
79	FCP_TMF_FAILED = 5,
80	FCP_TMF_INVALID_LUN = 9,
81};
82
83/*
84 * fc_pri_ta from scsi/fc/fc_fcp.h
85 */
86#define FCP_PTA_SIMPLE      0   /* simple task attribute */
87#define FCP_PTA_HEADQ       1   /* head of queue task attribute */
88#define FCP_PTA_ORDERED     2   /* ordered task attribute */
89#define FCP_PTA_ACA         4   /* auto. contingent allegiance */
90#define FCP_PTA_MASK        7   /* mask for task attribute field */
91#define FCP_PRI_SHIFT       3   /* priority field starts in bit 3 */
92#define FCP_PRI_RESVD_MASK  0x80        /* reserved bits in priority field */
93
94/*
95 * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which
96 * must be called under HW lock and could unlock/lock it inside.
97 * It isn't an issue, since in the current implementation on the time when
98 * those functions are called:
99 *
100 *   - Either context is IRQ and only IRQ handler can modify HW data,
101 *     including rings related fields,
102 *
103 *   - Or access to target mode variables from struct qla_tgt doesn't
104 *     cross those functions boundaries, except tgt_stop, which
105 *     additionally protected by irq_cmd_count.
106 */
107/* Predefs for callbacks handed to qla2xxx LLD */
108static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
109	struct atio_from_isp *pkt, uint8_t);
110static void qlt_response_pkt(struct scsi_qla_host *ha, struct rsp_que *rsp,
111	response_t *pkt);
112static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
113	int fn, void *iocb, int flags);
114static void qlt_send_term_exchange(struct qla_qpair *, struct qla_tgt_cmd
115	*cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort);
116static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
117	struct atio_from_isp *atio, uint16_t status, int qfull);
118static void qlt_disable_vha(struct scsi_qla_host *vha);
119static void qlt_clear_tgt_db(struct qla_tgt *tgt);
120static void qlt_send_notify_ack(struct qla_qpair *qpair,
121	struct imm_ntfy_from_isp *ntfy,
122	uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
123	uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan);
124static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
125	struct imm_ntfy_from_isp *imm, int ha_locked);
126static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha,
127	fc_port_t *fcport, bool local);
128void qlt_unreg_sess(struct fc_port *sess);
129static void qlt_24xx_handle_abts(struct scsi_qla_host *,
130	struct abts_recv_from_24xx *);
131static void qlt_send_busy(struct qla_qpair *, struct atio_from_isp *,
132    uint16_t);
133static int qlt_check_reserve_free_req(struct qla_qpair *qpair, uint32_t);
134static inline uint32_t qlt_make_handle(struct qla_qpair *);
135
136/*
137 * Global Variables
138 */
139static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
140struct kmem_cache *qla_tgt_plogi_cachep;
141static mempool_t *qla_tgt_mgmt_cmd_mempool;
142static struct workqueue_struct *qla_tgt_wq;
143static DEFINE_MUTEX(qla_tgt_mutex);
144static LIST_HEAD(qla_tgt_glist);
145
146static const char *prot_op_str(u32 prot_op)
147{
148	switch (prot_op) {
149	case TARGET_PROT_NORMAL:	return "NORMAL";
150	case TARGET_PROT_DIN_INSERT:	return "DIN_INSERT";
151	case TARGET_PROT_DOUT_INSERT:	return "DOUT_INSERT";
152	case TARGET_PROT_DIN_STRIP:	return "DIN_STRIP";
153	case TARGET_PROT_DOUT_STRIP:	return "DOUT_STRIP";
154	case TARGET_PROT_DIN_PASS:	return "DIN_PASS";
155	case TARGET_PROT_DOUT_PASS:	return "DOUT_PASS";
156	default:			return "UNKNOWN";
157	}
158}
159
160/* This API intentionally takes dest as a parameter, rather than returning
161 * int value to avoid caller forgetting to issue wmb() after the store */
162void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
163{
164	scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
165	*dest = atomic_inc_return(&base_vha->generation_tick);
166	/* memory barrier */
167	wmb();
168}
169
170/* Might release hw lock, then reaquire!! */
171static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
172{
173	/* Send marker if required */
174	if (unlikely(vha->marker_needed != 0)) {
175		int rc = qla2x00_issue_marker(vha, vha_locked);
176
177		if (rc != QLA_SUCCESS) {
178			ql_dbg(ql_dbg_tgt, vha, 0xe03d,
179			    "qla_target(%d): issue_marker() failed\n",
180			    vha->vp_idx);
181		}
182		return rc;
183	}
184	return QLA_SUCCESS;
185}
186
187static inline
188struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
189					    be_id_t d_id)
190{
191	struct scsi_qla_host *host;
192	uint32_t key;
193
194	if (vha->d_id.b.area == d_id.area &&
195	    vha->d_id.b.domain == d_id.domain &&
196	    vha->d_id.b.al_pa == d_id.al_pa)
197		return vha;
198
199	key = be_to_port_id(d_id).b24;
200
201	host = btree_lookup32(&vha->hw->tgt.host_map, key);
202	if (!host)
203		ql_dbg(ql_dbg_tgt_mgt + ql_dbg_verbose, vha, 0xf005,
204		    "Unable to find host %06x\n", key);
205
206	return host;
207}
208
209static inline
210struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha,
211	uint16_t vp_idx)
212{
213	struct qla_hw_data *ha = vha->hw;
214
215	if (vha->vp_idx == vp_idx)
216		return vha;
217
218	BUG_ON(ha->tgt.tgt_vp_map == NULL);
219	if (likely(test_bit(vp_idx, ha->vp_idx_map)))
220		return ha->tgt.tgt_vp_map[vp_idx].vha;
221
222	return NULL;
223}
224
225static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha)
226{
227	unsigned long flags;
228
229	spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
230
231	vha->hw->tgt.num_pend_cmds++;
232	if (vha->hw->tgt.num_pend_cmds > vha->qla_stats.stat_max_pend_cmds)
233		vha->qla_stats.stat_max_pend_cmds =
234			vha->hw->tgt.num_pend_cmds;
235	spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
236}
237static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha)
238{
239	unsigned long flags;
240
241	spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
242	vha->hw->tgt.num_pend_cmds--;
243	spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
244}
245
246
247static void qlt_queue_unknown_atio(scsi_qla_host_t *vha,
248	struct atio_from_isp *atio, uint8_t ha_locked)
249{
250	struct qla_tgt_sess_op *u;
251	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
252	unsigned long flags;
253
254	if (tgt->tgt_stop) {
255		ql_dbg(ql_dbg_async, vha, 0x502c,
256		    "qla_target(%d): dropping unknown ATIO_TYPE7, because tgt is being stopped",
257		    vha->vp_idx);
258		goto out_term;
259	}
260
261	u = kzalloc(sizeof(*u), GFP_ATOMIC);
262	if (u == NULL)
263		goto out_term;
264
265	u->vha = vha;
266	memcpy(&u->atio, atio, sizeof(*atio));
267	INIT_LIST_HEAD(&u->cmd_list);
268
269	spin_lock_irqsave(&vha->cmd_list_lock, flags);
270	list_add_tail(&u->cmd_list, &vha->unknown_atio_list);
271	spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
272
273	schedule_delayed_work(&vha->unknown_atio_work, 1);
274
275out:
276	return;
277
278out_term:
279	qlt_send_term_exchange(vha->hw->base_qpair, NULL, atio, ha_locked, 0);
280	goto out;
281}
282
283static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha,
284	uint8_t ha_locked)
285{
286	struct qla_tgt_sess_op *u, *t;
287	scsi_qla_host_t *host;
288	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
289	unsigned long flags;
290	uint8_t queued = 0;
291
292	list_for_each_entry_safe(u, t, &vha->unknown_atio_list, cmd_list) {
293		if (u->aborted) {
294			ql_dbg(ql_dbg_async, vha, 0x502e,
295			    "Freeing unknown %s %p, because of Abort\n",
296			    "ATIO_TYPE7", u);
297			qlt_send_term_exchange(vha->hw->base_qpair, NULL,
298			    &u->atio, ha_locked, 0);
299			goto abort;
300		}
301
302		host = qlt_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id);
303		if (host != NULL) {
304			ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x502f,
305			    "Requeuing unknown ATIO_TYPE7 %p\n", u);
306			qlt_24xx_atio_pkt(host, &u->atio, ha_locked);
307		} else if (tgt->tgt_stop) {
308			ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503a,
309			    "Freeing unknown %s %p, because tgt is being stopped\n",
310			    "ATIO_TYPE7", u);
311			qlt_send_term_exchange(vha->hw->base_qpair, NULL,
312			    &u->atio, ha_locked, 0);
313		} else {
314			ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503d,
315			    "Reschedule u %p, vha %p, host %p\n", u, vha, host);
316			if (!queued) {
317				queued = 1;
318				schedule_delayed_work(&vha->unknown_atio_work,
319				    1);
320			}
321			continue;
322		}
323
324abort:
325		spin_lock_irqsave(&vha->cmd_list_lock, flags);
326		list_del(&u->cmd_list);
327		spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
328		kfree(u);
329	}
330}
331
332void qlt_unknown_atio_work_fn(struct work_struct *work)
333{
334	struct scsi_qla_host *vha = container_of(to_delayed_work(work),
335	    struct scsi_qla_host, unknown_atio_work);
336
337	qlt_try_to_dequeue_unknown_atios(vha, 0);
338}
339
340static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
341	struct atio_from_isp *atio, uint8_t ha_locked)
342{
343	ql_dbg(ql_dbg_tgt, vha, 0xe072,
344		"%s: qla_target(%d): type %x ox_id %04x\n",
345		__func__, vha->vp_idx, atio->u.raw.entry_type,
346		be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
347
348	switch (atio->u.raw.entry_type) {
349	case ATIO_TYPE7:
350	{
351		struct scsi_qla_host *host = qlt_find_host_by_d_id(vha,
352		    atio->u.isp24.fcp_hdr.d_id);
353		if (unlikely(NULL == host)) {
354			ql_dbg(ql_dbg_tgt, vha, 0xe03e,
355			    "qla_target(%d): Received ATIO_TYPE7 "
356			    "with unknown d_id %x:%x:%x\n", vha->vp_idx,
357			    atio->u.isp24.fcp_hdr.d_id.domain,
358			    atio->u.isp24.fcp_hdr.d_id.area,
359			    atio->u.isp24.fcp_hdr.d_id.al_pa);
360
361
362			qlt_queue_unknown_atio(vha, atio, ha_locked);
363			break;
364		}
365		if (unlikely(!list_empty(&vha->unknown_atio_list)))
366			qlt_try_to_dequeue_unknown_atios(vha, ha_locked);
367
368		qlt_24xx_atio_pkt(host, atio, ha_locked);
369		break;
370	}
371
372	case IMMED_NOTIFY_TYPE:
373	{
374		struct scsi_qla_host *host = vha;
375		struct imm_ntfy_from_isp *entry =
376		    (struct imm_ntfy_from_isp *)atio;
377
378		qlt_issue_marker(vha, ha_locked);
379
380		if ((entry->u.isp24.vp_index != 0xFF) &&
381		    (entry->u.isp24.nport_handle != cpu_to_le16(0xFFFF))) {
382			host = qlt_find_host_by_vp_idx(vha,
383			    entry->u.isp24.vp_index);
384			if (unlikely(!host)) {
385				ql_dbg(ql_dbg_tgt, vha, 0xe03f,
386				    "qla_target(%d): Received "
387				    "ATIO (IMMED_NOTIFY_TYPE) "
388				    "with unknown vp_index %d\n",
389				    vha->vp_idx, entry->u.isp24.vp_index);
390				break;
391			}
392		}
393		qlt_24xx_atio_pkt(host, atio, ha_locked);
394		break;
395	}
396
397	case VP_RPT_ID_IOCB_TYPE:
398		qla24xx_report_id_acquisition(vha,
399			(struct vp_rpt_id_entry_24xx *)atio);
400		break;
401
402	case ABTS_RECV_24XX:
403	{
404		struct abts_recv_from_24xx *entry =
405			(struct abts_recv_from_24xx *)atio;
406		struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
407			entry->vp_index);
408		unsigned long flags;
409
410		if (unlikely(!host)) {
411			ql_dbg(ql_dbg_tgt, vha, 0xe00a,
412			    "qla_target(%d): Response pkt (ABTS_RECV_24XX) "
413			    "received, with unknown vp_index %d\n",
414			    vha->vp_idx, entry->vp_index);
415			break;
416		}
417		if (!ha_locked)
418			spin_lock_irqsave(&host->hw->hardware_lock, flags);
419		qlt_24xx_handle_abts(host, (struct abts_recv_from_24xx *)atio);
420		if (!ha_locked)
421			spin_unlock_irqrestore(&host->hw->hardware_lock, flags);
422		break;
423	}
424
425	/* case PUREX_IOCB_TYPE: ql2xmvasynctoatio */
426
427	default:
428		ql_dbg(ql_dbg_tgt, vha, 0xe040,
429		    "qla_target(%d): Received unknown ATIO atio "
430		    "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
431		break;
432	}
433
434	return false;
435}
436
437void qlt_response_pkt_all_vps(struct scsi_qla_host *vha,
438	struct rsp_que *rsp, response_t *pkt)
439{
440	switch (pkt->entry_type) {
441	case CTIO_CRC2:
442		ql_dbg(ql_dbg_tgt, vha, 0xe073,
443			"qla_target(%d):%s: CRC2 Response pkt\n",
444			vha->vp_idx, __func__);
445		fallthrough;
446	case CTIO_TYPE7:
447	{
448		struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
449		struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
450		    entry->vp_index);
451		if (unlikely(!host)) {
452			ql_dbg(ql_dbg_tgt, vha, 0xe041,
453			    "qla_target(%d): Response pkt (CTIO_TYPE7) "
454			    "received, with unknown vp_index %d\n",
455			    vha->vp_idx, entry->vp_index);
456			break;
457		}
458		qlt_response_pkt(host, rsp, pkt);
459		break;
460	}
461
462	case IMMED_NOTIFY_TYPE:
463	{
464		struct scsi_qla_host *host;
465		struct imm_ntfy_from_isp *entry =
466		    (struct imm_ntfy_from_isp *)pkt;
467
468		host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index);
469		if (unlikely(!host)) {
470			ql_dbg(ql_dbg_tgt, vha, 0xe042,
471			    "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) "
472			    "received, with unknown vp_index %d\n",
473			    vha->vp_idx, entry->u.isp24.vp_index);
474			break;
475		}
476		qlt_response_pkt(host, rsp, pkt);
477		break;
478	}
479
480	case NOTIFY_ACK_TYPE:
481	{
482		struct scsi_qla_host *host = vha;
483		struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
484
485		if (0xFF != entry->u.isp24.vp_index) {
486			host = qlt_find_host_by_vp_idx(vha,
487			    entry->u.isp24.vp_index);
488			if (unlikely(!host)) {
489				ql_dbg(ql_dbg_tgt, vha, 0xe043,
490				    "qla_target(%d): Response "
491				    "pkt (NOTIFY_ACK_TYPE) "
492				    "received, with unknown "
493				    "vp_index %d\n", vha->vp_idx,
494				    entry->u.isp24.vp_index);
495				break;
496			}
497		}
498		qlt_response_pkt(host, rsp, pkt);
499		break;
500	}
501
502	case ABTS_RECV_24XX:
503	{
504		struct abts_recv_from_24xx *entry =
505		    (struct abts_recv_from_24xx *)pkt;
506		struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
507		    entry->vp_index);
508		if (unlikely(!host)) {
509			ql_dbg(ql_dbg_tgt, vha, 0xe044,
510			    "qla_target(%d): Response pkt "
511			    "(ABTS_RECV_24XX) received, with unknown "
512			    "vp_index %d\n", vha->vp_idx, entry->vp_index);
513			break;
514		}
515		qlt_response_pkt(host, rsp, pkt);
516		break;
517	}
518
519	case ABTS_RESP_24XX:
520	{
521		struct abts_resp_to_24xx *entry =
522		    (struct abts_resp_to_24xx *)pkt;
523		struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
524		    entry->vp_index);
525		if (unlikely(!host)) {
526			ql_dbg(ql_dbg_tgt, vha, 0xe045,
527			    "qla_target(%d): Response pkt "
528			    "(ABTS_RECV_24XX) received, with unknown "
529			    "vp_index %d\n", vha->vp_idx, entry->vp_index);
530			break;
531		}
532		qlt_response_pkt(host, rsp, pkt);
533		break;
534	}
535	default:
536		qlt_response_pkt(vha, rsp, pkt);
537		break;
538	}
539
540}
541
542/*
543 * All qlt_plogi_ack_t operations are protected by hardware_lock
544 */
545static int qla24xx_post_nack_work(struct scsi_qla_host *vha, fc_port_t *fcport,
546	struct imm_ntfy_from_isp *ntfy, int type)
547{
548	struct qla_work_evt *e;
549
550	e = qla2x00_alloc_work(vha, QLA_EVT_NACK);
551	if (!e)
552		return QLA_FUNCTION_FAILED;
553
554	e->u.nack.fcport = fcport;
555	e->u.nack.type = type;
556	memcpy(e->u.nack.iocb, ntfy, sizeof(struct imm_ntfy_from_isp));
557	return qla2x00_post_work(vha, e);
558}
559
560static void qla2x00_async_nack_sp_done(srb_t *sp, int res)
561{
562	struct scsi_qla_host *vha = sp->vha;
563	unsigned long flags;
564
565	ql_dbg(ql_dbg_disc, vha, 0x20f2,
566	    "Async done-%s res %x %8phC  type %d\n",
567	    sp->name, res, sp->fcport->port_name, sp->type);
568
569	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
570	sp->fcport->flags &= ~FCF_ASYNC_SENT;
571	sp->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
572
573	switch (sp->type) {
574	case SRB_NACK_PLOGI:
575		sp->fcport->login_gen++;
576		sp->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
577		sp->fcport->logout_on_delete = 1;
578		sp->fcport->plogi_nack_done_deadline = jiffies + HZ;
579		sp->fcport->send_els_logo = 0;
580		break;
581
582	case SRB_NACK_PRLI:
583		sp->fcport->fw_login_state = DSC_LS_PRLI_COMP;
584		sp->fcport->deleted = 0;
585		sp->fcport->send_els_logo = 0;
586
587		if (!sp->fcport->login_succ &&
588		    !IS_SW_RESV_ADDR(sp->fcport->d_id)) {
589			sp->fcport->login_succ = 1;
590
591			vha->fcport_count++;
592			spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
593			qla24xx_sched_upd_fcport(sp->fcport);
594			spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
595		} else {
596			sp->fcport->login_retry = 0;
597			qla2x00_set_fcport_disc_state(sp->fcport,
598			    DSC_LOGIN_COMPLETE);
599			sp->fcport->deleted = 0;
600			sp->fcport->logout_on_delete = 1;
601		}
602		break;
603
604	case SRB_NACK_LOGO:
605		sp->fcport->login_gen++;
606		sp->fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
607		qlt_logo_completion_handler(sp->fcport, MBS_COMMAND_COMPLETE);
608		break;
609	}
610	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
611
612	sp->free(sp);
613}
614
615int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
616	struct imm_ntfy_from_isp *ntfy, int type)
617{
618	int rval = QLA_FUNCTION_FAILED;
619	srb_t *sp;
620	char *c = NULL;
621
622	fcport->flags |= FCF_ASYNC_SENT;
623	switch (type) {
624	case SRB_NACK_PLOGI:
625		fcport->fw_login_state = DSC_LS_PLOGI_PEND;
626		c = "PLOGI";
627		break;
628	case SRB_NACK_PRLI:
629		fcport->fw_login_state = DSC_LS_PRLI_PEND;
630		fcport->deleted = 0;
631		c = "PRLI";
632		break;
633	case SRB_NACK_LOGO:
634		fcport->fw_login_state = DSC_LS_LOGO_PEND;
635		c = "LOGO";
636		break;
637	}
638
639	sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
640	if (!sp)
641		goto done;
642
643	sp->type = type;
644	sp->name = "nack";
645
646	sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
647	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);
648
649	sp->u.iocb_cmd.u.nack.ntfy = ntfy;
650	sp->done = qla2x00_async_nack_sp_done;
651
652	ql_dbg(ql_dbg_disc, vha, 0x20f4,
653	    "Async-%s %8phC hndl %x %s\n",
654	    sp->name, fcport->port_name, sp->handle, c);
655
656	rval = qla2x00_start_sp(sp);
657	if (rval != QLA_SUCCESS)
658		goto done_free_sp;
659
660	return rval;
661
662done_free_sp:
663	sp->free(sp);
664done:
665	fcport->flags &= ~FCF_ASYNC_SENT;
666	return rval;
667}
668
669void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
670{
671	fc_port_t *t;
672
673	switch (e->u.nack.type) {
674	case SRB_NACK_PRLI:
675		t = e->u.nack.fcport;
676		flush_work(&t->del_work);
677		flush_work(&t->free_work);
678		mutex_lock(&vha->vha_tgt.tgt_mutex);
679		t = qlt_create_sess(vha, e->u.nack.fcport, 0);
680		mutex_unlock(&vha->vha_tgt.tgt_mutex);
681		if (t) {
682			ql_log(ql_log_info, vha, 0xd034,
683			    "%s create sess success %p", __func__, t);
684			/* create sess has an extra kref */
685			vha->hw->tgt.tgt_ops->put_sess(e->u.nack.fcport);
686		}
687		break;
688	}
689	qla24xx_async_notify_ack(vha, e->u.nack.fcport,
690	    (struct imm_ntfy_from_isp *)e->u.nack.iocb, e->u.nack.type);
691}
692
693void qla24xx_delete_sess_fn(struct work_struct *work)
694{
695	fc_port_t *fcport = container_of(work, struct fc_port, del_work);
696	struct qla_hw_data *ha = fcport->vha->hw;
697
698	if (fcport->se_sess) {
699		ha->tgt.tgt_ops->shutdown_sess(fcport);
700		ha->tgt.tgt_ops->put_sess(fcport);
701	} else {
702		qlt_unreg_sess(fcport);
703	}
704}
705
706/*
707 * Called from qla2x00_reg_remote_port()
708 */
709void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
710{
711	struct qla_hw_data *ha = vha->hw;
712	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
713	struct fc_port *sess = fcport;
714	unsigned long flags;
715
716	if (!vha->hw->tgt.tgt_ops)
717		return;
718
719	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
720	if (tgt->tgt_stop) {
721		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
722		return;
723	}
724
725	if (fcport->disc_state == DSC_DELETE_PEND) {
726		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
727		return;
728	}
729
730	if (!sess->se_sess) {
731		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
732
733		mutex_lock(&vha->vha_tgt.tgt_mutex);
734		sess = qlt_create_sess(vha, fcport, false);
735		mutex_unlock(&vha->vha_tgt.tgt_mutex);
736
737		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
738	} else {
739		if (fcport->fw_login_state == DSC_LS_PRLI_COMP) {
740			spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
741			return;
742		}
743
744		if (!kref_get_unless_zero(&sess->sess_kref)) {
745			ql_dbg(ql_dbg_disc, vha, 0x2107,
746			    "%s: kref_get fail sess %8phC \n",
747			    __func__, sess->port_name);
748			spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
749			return;
750		}
751
752		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c,
753		    "qla_target(%u): %ssession for port %8phC "
754		    "(loop ID %d) reappeared\n", vha->vp_idx,
755		    sess->local ? "local " : "", sess->port_name, sess->loop_id);
756
757		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
758		    "Reappeared sess %p\n", sess);
759
760		ha->tgt.tgt_ops->update_sess(sess, fcport->d_id,
761		    fcport->loop_id,
762		    (fcport->flags & FCF_CONF_COMP_SUPPORTED));
763	}
764
765	if (sess && sess->local) {
766		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d,
767		    "qla_target(%u): local session for "
768		    "port %8phC (loop ID %d) became global\n", vha->vp_idx,
769		    fcport->port_name, sess->loop_id);
770		sess->local = 0;
771	}
772	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
773
774	ha->tgt.tgt_ops->put_sess(sess);
775}
776
777/*
778 * This is a zero-base ref-counting solution, since hardware_lock
779 * guarantees that ref_count is not modified concurrently.
780 * Upon successful return content of iocb is undefined
781 */
782static struct qlt_plogi_ack_t *
783qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id,
784		       struct imm_ntfy_from_isp *iocb)
785{
786	struct qlt_plogi_ack_t *pla;
787
788	lockdep_assert_held(&vha->hw->hardware_lock);
789
790	list_for_each_entry(pla, &vha->plogi_ack_list, list) {
791		if (pla->id.b24 == id->b24) {
792			ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x210d,
793			    "%s %d %8phC Term INOT due to new INOT",
794			    __func__, __LINE__,
795			    pla->iocb.u.isp24.port_name);
796			qlt_send_term_imm_notif(vha, &pla->iocb, 1);
797			memcpy(&pla->iocb, iocb, sizeof(pla->iocb));
798			return pla;
799		}
800	}
801
802	pla = kmem_cache_zalloc(qla_tgt_plogi_cachep, GFP_ATOMIC);
803	if (!pla) {
804		ql_dbg(ql_dbg_async, vha, 0x5088,
805		       "qla_target(%d): Allocation of plogi_ack failed\n",
806		       vha->vp_idx);
807		return NULL;
808	}
809
810	memcpy(&pla->iocb, iocb, sizeof(pla->iocb));
811	pla->id = *id;
812	list_add_tail(&pla->list, &vha->plogi_ack_list);
813
814	return pla;
815}
816
817void qlt_plogi_ack_unref(struct scsi_qla_host *vha,
818    struct qlt_plogi_ack_t *pla)
819{
820	struct imm_ntfy_from_isp *iocb = &pla->iocb;
821	port_id_t port_id;
822	uint16_t loop_id;
823	fc_port_t *fcport = pla->fcport;
824
825	BUG_ON(!pla->ref_count);
826	pla->ref_count--;
827
828	if (pla->ref_count)
829		return;
830
831	ql_dbg(ql_dbg_disc, vha, 0x5089,
832	    "Sending PLOGI ACK to wwn %8phC s_id %02x:%02x:%02x loop_id %#04x"
833	    " exch %#x ox_id %#x\n", iocb->u.isp24.port_name,
834	    iocb->u.isp24.port_id[2], iocb->u.isp24.port_id[1],
835	    iocb->u.isp24.port_id[0],
836	    le16_to_cpu(iocb->u.isp24.nport_handle),
837	    iocb->u.isp24.exchange_address, iocb->ox_id);
838
839	port_id.b.domain = iocb->u.isp24.port_id[2];
840	port_id.b.area   = iocb->u.isp24.port_id[1];
841	port_id.b.al_pa  = iocb->u.isp24.port_id[0];
842	port_id.b.rsvd_1 = 0;
843
844	loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
845
846	fcport->loop_id = loop_id;
847	fcport->d_id = port_id;
848	if (iocb->u.isp24.status_subcode == ELS_PLOGI)
849		qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PLOGI);
850	else
851		qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PRLI);
852
853	list_for_each_entry(fcport, &vha->vp_fcports, list) {
854		if (fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] == pla)
855			fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL;
856		if (fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] == pla)
857			fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL;
858	}
859
860	list_del(&pla->list);
861	kmem_cache_free(qla_tgt_plogi_cachep, pla);
862}
863
864void
865qlt_plogi_ack_link(struct scsi_qla_host *vha, struct qlt_plogi_ack_t *pla,
866    struct fc_port *sess, enum qlt_plogi_link_t link)
867{
868	struct imm_ntfy_from_isp *iocb = &pla->iocb;
869	/* Inc ref_count first because link might already be pointing at pla */
870	pla->ref_count++;
871
872	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf097,
873		"Linking sess %p [%d] wwn %8phC with PLOGI ACK to wwn %8phC"
874		" s_id %02x:%02x:%02x, ref=%d pla %p link %d\n",
875		sess, link, sess->port_name,
876		iocb->u.isp24.port_name, iocb->u.isp24.port_id[2],
877		iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0],
878		pla->ref_count, pla, link);
879
880	if (link == QLT_PLOGI_LINK_CONFLICT) {
881		switch (sess->disc_state) {
882		case DSC_DELETED:
883		case DSC_DELETE_PEND:
884			pla->ref_count--;
885			return;
886		default:
887			break;
888		}
889	}
890
891	if (sess->plogi_link[link])
892		qlt_plogi_ack_unref(vha, sess->plogi_link[link]);
893
894	if (link == QLT_PLOGI_LINK_SAME_WWN)
895		pla->fcport = sess;
896
897	sess->plogi_link[link] = pla;
898}
899
900typedef struct {
901	/* These fields must be initialized by the caller */
902	port_id_t id;
903	/*
904	 * number of cmds dropped while we were waiting for
905	 * initiator to ack LOGO initialize to 1 if LOGO is
906	 * triggered by a command, otherwise, to 0
907	 */
908	int cmd_count;
909
910	/* These fields are used by callee */
911	struct list_head list;
912} qlt_port_logo_t;
913
914static void
915qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo)
916{
917	qlt_port_logo_t *tmp;
918	int res;
919
920	mutex_lock(&vha->vha_tgt.tgt_mutex);
921
922	list_for_each_entry(tmp, &vha->logo_list, list) {
923		if (tmp->id.b24 == logo->id.b24) {
924			tmp->cmd_count += logo->cmd_count;
925			mutex_unlock(&vha->vha_tgt.tgt_mutex);
926			return;
927		}
928	}
929
930	list_add_tail(&logo->list, &vha->logo_list);
931
932	mutex_unlock(&vha->vha_tgt.tgt_mutex);
933
934	res = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, logo->id);
935
936	mutex_lock(&vha->vha_tgt.tgt_mutex);
937	list_del(&logo->list);
938	mutex_unlock(&vha->vha_tgt.tgt_mutex);
939
940	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf098,
941	    "Finished LOGO to %02x:%02x:%02x, dropped %d cmds, res = %#x\n",
942	    logo->id.b.domain, logo->id.b.area, logo->id.b.al_pa,
943	    logo->cmd_count, res);
944}
945
946void qlt_free_session_done(struct work_struct *work)
947{
948	struct fc_port *sess = container_of(work, struct fc_port,
949	    free_work);
950	struct qla_tgt *tgt = sess->tgt;
951	struct scsi_qla_host *vha = sess->vha;
952	struct qla_hw_data *ha = vha->hw;
953	unsigned long flags;
954	bool logout_started = false;
955	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
956	struct qlt_plogi_ack_t *own =
957		sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
958
959	ql_dbg(ql_dbg_disc, vha, 0xf084,
960		"%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
961		" s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n",
962		__func__, sess->se_sess, sess, sess->port_name, sess->loop_id,
963		sess->d_id.b.domain, sess->d_id.b.area, sess->d_id.b.al_pa,
964		sess->logout_on_delete, sess->keep_nport_handle,
965		sess->send_els_logo);
966
967	if (!IS_SW_RESV_ADDR(sess->d_id)) {
968		qla2x00_mark_device_lost(vha, sess, 0);
969
970		if (sess->send_els_logo) {
971			qlt_port_logo_t logo;
972
973			logo.id = sess->d_id;
974			logo.cmd_count = 0;
975			if (!own)
976				qlt_send_first_logo(vha, &logo);
977			sess->send_els_logo = 0;
978		}
979
980		if (sess->logout_on_delete && sess->loop_id != FC_NO_LOOP_ID) {
981			int rc;
982
983			if (!own ||
984			    (own &&
985			     (own->iocb.u.isp24.status_subcode == ELS_PLOGI))) {
986				rc = qla2x00_post_async_logout_work(vha, sess,
987				    NULL);
988				if (rc != QLA_SUCCESS)
989					ql_log(ql_log_warn, vha, 0xf085,
990					    "Schedule logo failed sess %p rc %d\n",
991					    sess, rc);
992				else
993					logout_started = true;
994			} else if (own && (own->iocb.u.isp24.status_subcode ==
995				ELS_PRLI) && ha->flags.rida_fmt2) {
996				rc = qla2x00_post_async_prlo_work(vha, sess,
997				    NULL);
998				if (rc != QLA_SUCCESS)
999					ql_log(ql_log_warn, vha, 0xf085,
1000					    "Schedule PRLO failed sess %p rc %d\n",
1001					    sess, rc);
1002				else
1003					logout_started = true;
1004			}
1005		} /* if sess->logout_on_delete */
1006
1007		if (sess->nvme_flag & NVME_FLAG_REGISTERED &&
1008		    !(sess->nvme_flag & NVME_FLAG_DELETING)) {
1009			sess->nvme_flag |= NVME_FLAG_DELETING;
1010			qla_nvme_unregister_remote_port(sess);
1011		}
1012	}
1013
1014	/*
1015	 * Release the target session for FC Nexus from fabric module code.
1016	 */
1017	if (sess->se_sess != NULL)
1018		ha->tgt.tgt_ops->free_session(sess);
1019
1020	if (logout_started) {
1021		bool traced = false;
1022		u16 cnt = 0;
1023
1024		while (!READ_ONCE(sess->logout_completed)) {
1025			if (!traced) {
1026				ql_dbg(ql_dbg_disc, vha, 0xf086,
1027					"%s: waiting for sess %p logout\n",
1028					__func__, sess);
1029				traced = true;
1030			}
1031			msleep(100);
1032			cnt++;
1033			if (cnt > 200)
1034				break;
1035		}
1036
1037		ql_dbg(ql_dbg_disc, vha, 0xf087,
1038		    "%s: sess %p logout completed\n", __func__, sess);
1039	}
1040
1041	if (sess->logo_ack_needed) {
1042		sess->logo_ack_needed = 0;
1043		qla24xx_async_notify_ack(vha, sess,
1044			(struct imm_ntfy_from_isp *)sess->iocb, SRB_NACK_LOGO);
1045	}
1046
1047	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1048	if (sess->se_sess) {
1049		sess->se_sess = NULL;
1050		if (tgt && !IS_SW_RESV_ADDR(sess->d_id))
1051			tgt->sess_count--;
1052	}
1053
1054	qla2x00_set_fcport_disc_state(sess, DSC_DELETED);
1055	sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
1056
1057	if (sess->login_succ && !IS_SW_RESV_ADDR(sess->d_id)) {
1058		vha->fcport_count--;
1059		sess->login_succ = 0;
1060	}
1061
1062	qla2x00_clear_loop_id(sess);
1063
1064	if (sess->conflict) {
1065		sess->conflict->login_pause = 0;
1066		sess->conflict = NULL;
1067		if (!test_bit(UNLOADING, &vha->dpc_flags))
1068			set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1069	}
1070
1071	{
1072		struct qlt_plogi_ack_t *con =
1073		    sess->plogi_link[QLT_PLOGI_LINK_CONFLICT];
1074		struct imm_ntfy_from_isp *iocb;
1075
1076		own = sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
1077
1078		if (con) {
1079			iocb = &con->iocb;
1080			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf099,
1081				 "se_sess %p / sess %p port %8phC is gone,"
1082				 " %s (ref=%d), releasing PLOGI for %8phC (ref=%d)\n",
1083				 sess->se_sess, sess, sess->port_name,
1084				 own ? "releasing own PLOGI" : "no own PLOGI pending",
1085				 own ? own->ref_count : -1,
1086				 iocb->u.isp24.port_name, con->ref_count);
1087			qlt_plogi_ack_unref(vha, con);
1088			sess->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL;
1089		} else {
1090			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09a,
1091			    "se_sess %p / sess %p port %8phC is gone, %s (ref=%d)\n",
1092			    sess->se_sess, sess, sess->port_name,
1093			    own ? "releasing own PLOGI" :
1094			    "no own PLOGI pending",
1095			    own ? own->ref_count : -1);
1096		}
1097
1098		if (own) {
1099			sess->fw_login_state = DSC_LS_PLOGI_PEND;
1100			qlt_plogi_ack_unref(vha, own);
1101			sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL;
1102		}
1103	}
1104
1105	sess->explicit_logout = 0;
1106	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1107
1108	qla2x00_dfs_remove_rport(vha, sess);
1109
1110	spin_lock_irqsave(&vha->work_lock, flags);
1111	sess->flags &= ~FCF_ASYNC_SENT;
1112	sess->deleted = QLA_SESS_DELETED;
1113	sess->free_pending = 0;
1114	spin_unlock_irqrestore(&vha->work_lock, flags);
1115
1116	ql_dbg(ql_dbg_disc, vha, 0xf001,
1117	    "Unregistration of sess %p %8phC finished fcp_cnt %d\n",
1118		sess, sess->port_name, vha->fcport_count);
1119
1120	if (tgt && (tgt->sess_count == 0))
1121		wake_up_all(&tgt->waitQ);
1122
1123	if (!test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags) &&
1124	    !(vha->vp_idx && test_bit(VPORT_DELETE, &vha->dpc_flags)) &&
1125	    (!tgt || !tgt->tgt_stop) && !LOOP_TRANSITION(vha)) {
1126		switch (vha->host->active_mode) {
1127		case MODE_INITIATOR:
1128		case MODE_DUAL:
1129			set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1130			qla2xxx_wake_dpc(vha);
1131			break;
1132		case MODE_TARGET:
1133		default:
1134			/* no-op */
1135			break;
1136		}
1137	}
1138
1139	if (vha->fcport_count == 0)
1140		wake_up_all(&vha->fcport_waitQ);
1141}
1142
1143/* ha->tgt.sess_lock supposed to be held on entry */
1144void qlt_unreg_sess(struct fc_port *sess)
1145{
1146	struct scsi_qla_host *vha = sess->vha;
1147	unsigned long flags;
1148
1149	ql_dbg(ql_dbg_disc, sess->vha, 0x210a,
1150	    "%s sess %p for deletion %8phC\n",
1151	    __func__, sess, sess->port_name);
1152
1153	spin_lock_irqsave(&sess->vha->work_lock, flags);
1154	if (sess->free_pending) {
1155		spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1156		return;
1157	}
1158	sess->free_pending = 1;
1159	/*
1160	 * Use FCF_ASYNC_SENT flag to block other cmds used in sess
1161	 * management from being sent.
1162	 */
1163	sess->flags |= FCF_ASYNC_SENT;
1164	sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
1165	spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1166
1167	if (sess->se_sess)
1168		vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
1169
1170	qla2x00_set_fcport_disc_state(sess, DSC_DELETE_PEND);
1171	sess->last_rscn_gen = sess->rscn_gen;
1172	sess->last_login_gen = sess->login_gen;
1173
1174	queue_work(sess->vha->hw->wq, &sess->free_work);
1175}
1176EXPORT_SYMBOL(qlt_unreg_sess);
1177
1178static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
1179{
1180	struct qla_hw_data *ha = vha->hw;
1181	struct fc_port *sess = NULL;
1182	uint16_t loop_id;
1183	int res = 0;
1184	struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
1185	unsigned long flags;
1186
1187	loop_id = le16_to_cpu(n->u.isp24.nport_handle);
1188	if (loop_id == 0xFFFF) {
1189		/* Global event */
1190		atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
1191		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1192		qlt_clear_tgt_db(vha->vha_tgt.qla_tgt);
1193		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1194	} else {
1195		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1196		sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
1197		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1198	}
1199
1200	ql_dbg(ql_dbg_tgt, vha, 0xe000,
1201	    "Using sess for qla_tgt_reset: %p\n", sess);
1202	if (!sess) {
1203		res = -ESRCH;
1204		return res;
1205	}
1206
1207	ql_dbg(ql_dbg_tgt, vha, 0xe047,
1208	    "scsi(%ld): resetting (session %p from port %8phC mcmd %x, "
1209	    "loop_id %d)\n", vha->host_no, sess, sess->port_name,
1210	    mcmd, loop_id);
1211
1212	return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK);
1213}
1214
1215static void qla24xx_chk_fcp_state(struct fc_port *sess)
1216{
1217	if (sess->chip_reset != sess->vha->hw->base_qpair->chip_reset) {
1218		sess->logout_on_delete = 0;
1219		sess->logo_ack_needed = 0;
1220		sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
1221	}
1222}
1223
1224void qlt_schedule_sess_for_deletion(struct fc_port *sess)
1225{
1226	struct qla_tgt *tgt = sess->tgt;
1227	unsigned long flags;
1228	u16 sec;
1229
1230	switch (sess->disc_state) {
1231	case DSC_DELETE_PEND:
1232		return;
1233	case DSC_DELETED:
1234		if (!sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] &&
1235			!sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]) {
1236			if (tgt && tgt->tgt_stop && tgt->sess_count == 0)
1237				wake_up_all(&tgt->waitQ);
1238
1239			if (sess->vha->fcport_count == 0)
1240				wake_up_all(&sess->vha->fcport_waitQ);
1241			return;
1242		}
1243		break;
1244	case DSC_UPD_FCPORT:
1245		/*
1246		 * This port is not done reporting to upper layer.
1247		 * let it finish
1248		 */
1249		sess->next_disc_state = DSC_DELETE_PEND;
1250		sec = jiffies_to_msecs(jiffies -
1251		    sess->jiffies_at_registration)/1000;
1252		if (sess->sec_since_registration < sec && sec && !(sec % 5)) {
1253			sess->sec_since_registration = sec;
1254			ql_dbg(ql_dbg_disc, sess->vha, 0xffff,
1255			    "%s %8phC : Slow Rport registration(%d Sec)\n",
1256			    __func__, sess->port_name, sec);
1257		}
1258		return;
1259	default:
1260		break;
1261	}
1262
1263	spin_lock_irqsave(&sess->vha->work_lock, flags);
1264	if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
1265		spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1266		return;
1267	}
1268	sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
1269	spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1270
1271	sess->prli_pend_timer = 0;
1272	qla2x00_set_fcport_disc_state(sess, DSC_DELETE_PEND);
1273
1274	qla24xx_chk_fcp_state(sess);
1275
1276	ql_dbg(ql_dbg_disc, sess->vha, 0xe001,
1277	    "Scheduling sess %p for deletion %8phC\n",
1278	    sess, sess->port_name);
1279
1280	WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work));
1281}
1282
1283static void qlt_clear_tgt_db(struct qla_tgt *tgt)
1284{
1285	struct fc_port *sess;
1286	scsi_qla_host_t *vha = tgt->vha;
1287
1288	list_for_each_entry(sess, &vha->vp_fcports, list) {
1289		if (sess->se_sess)
1290			qlt_schedule_sess_for_deletion(sess);
1291	}
1292
1293	/* At this point tgt could be already dead */
1294}
1295
1296static int qla24xx_get_loop_id(struct scsi_qla_host *vha, be_id_t s_id,
1297	uint16_t *loop_id)
1298{
1299	struct qla_hw_data *ha = vha->hw;
1300	dma_addr_t gid_list_dma;
1301	struct gid_list_info *gid_list, *gid;
1302	int res, rc, i;
1303	uint16_t entries;
1304
1305	gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
1306	    &gid_list_dma, GFP_KERNEL);
1307	if (!gid_list) {
1308		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044,
1309		    "qla_target(%d): DMA Alloc failed of %u\n",
1310		    vha->vp_idx, qla2x00_gid_list_size(ha));
1311		return -ENOMEM;
1312	}
1313
1314	/* Get list of logged in devices */
1315	rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, &entries);
1316	if (rc != QLA_SUCCESS) {
1317		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
1318		    "qla_target(%d): get_id_list() failed: %x\n",
1319		    vha->vp_idx, rc);
1320		res = -EBUSY;
1321		goto out_free_id_list;
1322	}
1323
1324	gid = gid_list;
1325	res = -ENOENT;
1326	for (i = 0; i < entries; i++) {
1327		if (gid->al_pa == s_id.al_pa &&
1328		    gid->area == s_id.area &&
1329		    gid->domain == s_id.domain) {
1330			*loop_id = le16_to_cpu(gid->loop_id);
1331			res = 0;
1332			break;
1333		}
1334		gid = (void *)gid + ha->gid_list_info_size;
1335	}
1336
1337out_free_id_list:
1338	dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
1339	    gid_list, gid_list_dma);
1340	return res;
1341}
1342
1343/*
1344 * Adds an extra ref to allow to drop hw lock after adding sess to the list.
1345 * Caller must put it.
1346 */
1347static struct fc_port *qlt_create_sess(
1348	struct scsi_qla_host *vha,
1349	fc_port_t *fcport,
1350	bool local)
1351{
1352	struct qla_hw_data *ha = vha->hw;
1353	struct fc_port *sess = fcport;
1354	unsigned long flags;
1355
1356	if (vha->vha_tgt.qla_tgt->tgt_stop)
1357		return NULL;
1358
1359	if (fcport->se_sess) {
1360		if (!kref_get_unless_zero(&sess->sess_kref)) {
1361			ql_dbg(ql_dbg_disc, vha, 0x20f6,
1362			    "%s: kref_get_unless_zero failed for %8phC\n",
1363			    __func__, sess->port_name);
1364			return NULL;
1365		}
1366		return fcport;
1367	}
1368	sess->tgt = vha->vha_tgt.qla_tgt;
1369	sess->local = local;
1370
1371	/*
1372	 * Under normal circumstances we want to logout from firmware when
1373	 * session eventually ends and release corresponding nport handle.
1374	 * In the exception cases (e.g. when new PLOGI is waiting) corresponding
1375	 * code will adjust these flags as necessary.
1376	 */
1377	sess->logout_on_delete = 1;
1378	sess->keep_nport_handle = 0;
1379	sess->logout_completed = 0;
1380
1381	if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
1382	    &fcport->port_name[0], sess) < 0) {
1383		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf015,
1384		    "(%d) %8phC check_initiator_node_acl failed\n",
1385		    vha->vp_idx, fcport->port_name);
1386		return NULL;
1387	} else {
1388		kref_init(&fcport->sess_kref);
1389		/*
1390		 * Take an extra reference to ->sess_kref here to handle
1391		 * fc_port access across ->tgt.sess_lock reaquire.
1392		 */
1393		if (!kref_get_unless_zero(&sess->sess_kref)) {
1394			ql_dbg(ql_dbg_disc, vha, 0x20f7,
1395			    "%s: kref_get_unless_zero failed for %8phC\n",
1396			    __func__, sess->port_name);
1397			return NULL;
1398		}
1399
1400		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1401		if (!IS_SW_RESV_ADDR(sess->d_id))
1402			vha->vha_tgt.qla_tgt->sess_count++;
1403
1404		qlt_do_generation_tick(vha, &sess->generation);
1405		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1406	}
1407
1408	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
1409	    "Adding sess %p se_sess %p  to tgt %p sess_count %d\n",
1410	    sess, sess->se_sess, vha->vha_tgt.qla_tgt,
1411	    vha->vha_tgt.qla_tgt->sess_count);
1412
1413	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
1414	    "qla_target(%d): %ssession for wwn %8phC (loop_id %d, "
1415	    "s_id %x:%x:%x, confirmed completion %ssupported) added\n",
1416	    vha->vp_idx, local ?  "local " : "", fcport->port_name,
1417	    fcport->loop_id, sess->d_id.b.domain, sess->d_id.b.area,
1418	    sess->d_id.b.al_pa, sess->conf_compl_supported ?  "" : "not ");
1419
1420	return sess;
1421}
1422
1423/*
1424 * max_gen - specifies maximum session generation
1425 * at which this deletion requestion is still valid
1426 */
1427void
1428qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
1429{
1430	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
1431	struct fc_port *sess = fcport;
1432	unsigned long flags;
1433
1434	if (!vha->hw->tgt.tgt_ops)
1435		return;
1436
1437	if (!tgt)
1438		return;
1439
1440	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1441	if (tgt->tgt_stop) {
1442		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1443		return;
1444	}
1445	if (!sess->se_sess) {
1446		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1447		return;
1448	}
1449
1450	if (max_gen - sess->generation < 0) {
1451		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1452		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092,
1453		    "Ignoring stale deletion request for se_sess %p / sess %p"
1454		    " for port %8phC, req_gen %d, sess_gen %d\n",
1455		    sess->se_sess, sess, sess->port_name, max_gen,
1456		    sess->generation);
1457		return;
1458	}
1459
1460	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);
1461
1462	sess->local = 1;
1463	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1464	qlt_schedule_sess_for_deletion(sess);
1465}
1466
1467static inline int test_tgt_sess_count(struct qla_tgt *tgt)
1468{
1469	struct qla_hw_data *ha = tgt->ha;
1470	unsigned long flags;
1471	int res;
1472	/*
1473	 * We need to protect against race, when tgt is freed before or
1474	 * inside wake_up()
1475	 */
1476	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1477	ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002,
1478	    "tgt %p, sess_count=%d\n",
1479	    tgt, tgt->sess_count);
1480	res = (tgt->sess_count == 0);
1481	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1482
1483	return res;
1484}
1485
1486/* Called by tcm_qla2xxx configfs code */
1487int qlt_stop_phase1(struct qla_tgt *tgt)
1488{
1489	struct scsi_qla_host *vha = tgt->vha;
1490	struct qla_hw_data *ha = tgt->ha;
1491	unsigned long flags;
1492
1493	mutex_lock(&ha->optrom_mutex);
1494	mutex_lock(&qla_tgt_mutex);
1495
1496	if (tgt->tgt_stop || tgt->tgt_stopped) {
1497		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
1498		    "Already in tgt->tgt_stop or tgt_stopped state\n");
1499		mutex_unlock(&qla_tgt_mutex);
1500		mutex_unlock(&ha->optrom_mutex);
1501		return -EPERM;
1502	}
1503
1504	ql_dbg(ql_dbg_tgt_mgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
1505	    vha->host_no, vha);
1506	/*
1507	 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
1508	 * Lock is needed, because we still can get an incoming packet.
1509	 */
1510	mutex_lock(&vha->vha_tgt.tgt_mutex);
1511	tgt->tgt_stop = 1;
1512	qlt_clear_tgt_db(tgt);
1513	mutex_unlock(&vha->vha_tgt.tgt_mutex);
1514	mutex_unlock(&qla_tgt_mutex);
1515
1516	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
1517	    "Waiting for sess works (tgt %p)", tgt);
1518	spin_lock_irqsave(&tgt->sess_work_lock, flags);
1519	while (!list_empty(&tgt->sess_works_list)) {
1520		spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
1521		flush_scheduled_work();
1522		spin_lock_irqsave(&tgt->sess_work_lock, flags);
1523	}
1524	spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
1525
1526	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a,
1527	    "Waiting for tgt %p: sess_count=%d\n", tgt, tgt->sess_count);
1528
1529	wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ);
1530
1531	/* Big hammer */
1532	if (!ha->flags.host_shutting_down &&
1533	    (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)))
1534		qlt_disable_vha(vha);
1535
1536	/* Wait for sessions to clear out (just in case) */
1537	wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ);
1538	mutex_unlock(&ha->optrom_mutex);
1539
1540	return 0;
1541}
1542EXPORT_SYMBOL(qlt_stop_phase1);
1543
1544/* Called by tcm_qla2xxx configfs code */
1545void qlt_stop_phase2(struct qla_tgt *tgt)
1546{
1547	scsi_qla_host_t *vha = tgt->vha;
1548
1549	if (tgt->tgt_stopped) {
1550		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f,
1551		    "Already in tgt->tgt_stopped state\n");
1552		dump_stack();
1553		return;
1554	}
1555	if (!tgt->tgt_stop) {
1556		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b,
1557		    "%s: phase1 stop is not completed\n", __func__);
1558		dump_stack();
1559		return;
1560	}
1561
1562	mutex_lock(&tgt->ha->optrom_mutex);
1563	mutex_lock(&vha->vha_tgt.tgt_mutex);
1564	tgt->tgt_stop = 0;
1565	tgt->tgt_stopped = 1;
1566	mutex_unlock(&vha->vha_tgt.tgt_mutex);
1567	mutex_unlock(&tgt->ha->optrom_mutex);
1568
1569	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished\n",
1570	    tgt);
1571
1572	switch (vha->qlini_mode) {
1573	case QLA2XXX_INI_MODE_EXCLUSIVE:
1574		vha->flags.online = 1;
1575		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1576		break;
1577	default:
1578		break;
1579	}
1580}
1581EXPORT_SYMBOL(qlt_stop_phase2);
1582
1583/* Called from qlt_remove_target() -> qla2x00_remove_one() */
1584static void qlt_release(struct qla_tgt *tgt)
1585{
1586	scsi_qla_host_t *vha = tgt->vha;
1587	void *node;
1588	u64 key = 0;
1589	u16 i;
1590	struct qla_qpair_hint *h;
1591	struct qla_hw_data *ha = vha->hw;
1592
1593	if (!tgt->tgt_stop && !tgt->tgt_stopped)
1594		qlt_stop_phase1(tgt);
1595
1596	if (!tgt->tgt_stopped)
1597		qlt_stop_phase2(tgt);
1598
1599	for (i = 0; i < vha->hw->max_qpairs + 1; i++) {
1600		unsigned long flags;
1601
1602		h = &tgt->qphints[i];
1603		if (h->qpair) {
1604			spin_lock_irqsave(h->qpair->qp_lock_ptr, flags);
1605			list_del(&h->hint_elem);
1606			spin_unlock_irqrestore(h->qpair->qp_lock_ptr, flags);
1607			h->qpair = NULL;
1608		}
1609	}
1610	kfree(tgt->qphints);
1611	mutex_lock(&qla_tgt_mutex);
1612	list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry);
1613	mutex_unlock(&qla_tgt_mutex);
1614
1615	btree_for_each_safe64(&tgt->lun_qpair_map, key, node)
1616		btree_remove64(&tgt->lun_qpair_map, key);
1617
1618	btree_destroy64(&tgt->lun_qpair_map);
1619
1620	if (vha->vp_idx)
1621		if (ha->tgt.tgt_ops &&
1622		    ha->tgt.tgt_ops->remove_target &&
1623		    vha->vha_tgt.target_lport_ptr)
1624			ha->tgt.tgt_ops->remove_target(vha);
1625
1626	vha->vha_tgt.qla_tgt = NULL;
1627
1628	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d,
1629	    "Release of tgt %p finished\n", tgt);
1630
1631	kfree(tgt);
1632}
1633
1634/* ha->hardware_lock supposed to be held on entry */
1635static int qlt_sched_sess_work(struct qla_tgt *tgt, int type,
1636	const void *param, unsigned int param_size)
1637{
1638	struct qla_tgt_sess_work_param *prm;
1639	unsigned long flags;
1640
1641	prm = kzalloc(sizeof(*prm), GFP_ATOMIC);
1642	if (!prm) {
1643		ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050,
1644		    "qla_target(%d): Unable to create session "
1645		    "work, command will be refused", 0);
1646		return -ENOMEM;
1647	}
1648
1649	ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e,
1650	    "Scheduling work (type %d, prm %p)"
1651	    " to find session for param %p (size %d, tgt %p)\n",
1652	    type, prm, param, param_size, tgt);
1653
1654	prm->type = type;
1655	memcpy(&prm->tm_iocb, param, param_size);
1656
1657	spin_lock_irqsave(&tgt->sess_work_lock, flags);
1658	list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list);
1659	spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
1660
1661	schedule_work(&tgt->sess_work);
1662
1663	return 0;
1664}
1665
1666/*
1667 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1668 */
1669static void qlt_send_notify_ack(struct qla_qpair *qpair,
1670	struct imm_ntfy_from_isp *ntfy,
1671	uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
1672	uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan)
1673{
1674	struct scsi_qla_host *vha = qpair->vha;
1675	struct qla_hw_data *ha = vha->hw;
1676	request_t *pkt;
1677	struct nack_to_isp *nack;
1678
1679	if (!ha->flags.fw_started)
1680		return;
1681
1682	ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);
1683
1684	pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL);
1685	if (!pkt) {
1686		ql_dbg(ql_dbg_tgt, vha, 0xe049,
1687		    "qla_target(%d): %s failed: unable to allocate "
1688		    "request packet\n", vha->vp_idx, __func__);
1689		return;
1690	}
1691
1692	if (vha->vha_tgt.qla_tgt != NULL)
1693		vha->vha_tgt.qla_tgt->notify_ack_expected++;
1694
1695	pkt->entry_type = NOTIFY_ACK_TYPE;
1696	pkt->entry_count = 1;
1697
1698	nack = (struct nack_to_isp *)pkt;
1699	nack->ox_id = ntfy->ox_id;
1700
1701	nack->u.isp24.handle = QLA_TGT_SKIP_HANDLE;
1702	nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
1703	if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
1704		nack->u.isp24.flags = ntfy->u.isp24.flags &
1705			cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB);
1706	}
1707	nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
1708	nack->u.isp24.status = ntfy->u.isp24.status;
1709	nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
1710	nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
1711	nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
1712	nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
1713	nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
1714	nack->u.isp24.srr_flags = cpu_to_le16(srr_flags);
1715	nack->u.isp24.srr_reject_code = srr_reject_code;
1716	nack->u.isp24.srr_reject_code_expl = srr_explan;
1717	nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
1718
1719	ql_dbg(ql_dbg_tgt, vha, 0xe005,
1720	    "qla_target(%d): Sending 24xx Notify Ack %d\n",
1721	    vha->vp_idx, nack->u.isp24.status);
1722
1723	/* Memory Barrier */
1724	wmb();
1725	qla2x00_start_iocbs(vha, qpair->req);
1726}
1727
1728static int qlt_build_abts_resp_iocb(struct qla_tgt_mgmt_cmd *mcmd)
1729{
1730	struct scsi_qla_host *vha = mcmd->vha;
1731	struct qla_hw_data *ha = vha->hw;
1732	struct abts_resp_to_24xx *resp;
1733	__le32 f_ctl;
1734	uint32_t h;
1735	uint8_t *p;
1736	int rc;
1737	struct abts_recv_from_24xx *abts = &mcmd->orig_iocb.abts;
1738	struct qla_qpair *qpair = mcmd->qpair;
1739
1740	ql_dbg(ql_dbg_tgt, vha, 0xe006,
1741	    "Sending task mgmt ABTS response (ha=%p, status=%x)\n",
1742	    ha, mcmd->fc_tm_rsp);
1743
1744	rc = qlt_check_reserve_free_req(qpair, 1);
1745	if (rc) {
1746		ql_dbg(ql_dbg_tgt, vha, 0xe04a,
1747		    "qla_target(%d): %s failed: unable to allocate request packet\n",
1748		    vha->vp_idx, __func__);
1749		return -EAGAIN;
1750	}
1751
1752	resp = (struct abts_resp_to_24xx *)qpair->req->ring_ptr;
1753	memset(resp, 0, sizeof(*resp));
1754
1755	h = qlt_make_handle(qpair);
1756	if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
1757		/*
1758		 * CTIO type 7 from the firmware doesn't provide a way to
1759		 * know the initiator's LOOP ID, hence we can't find
1760		 * the session and, so, the command.
1761		 */
1762		return -EAGAIN;
1763	} else {
1764		qpair->req->outstanding_cmds[h] = (srb_t *)mcmd;
1765	}
1766
1767	resp->handle = make_handle(qpair->req->id, h);
1768	resp->entry_type = ABTS_RESP_24XX;
1769	resp->entry_count = 1;
1770	resp->nport_handle = abts->nport_handle;
1771	resp->vp_index = vha->vp_idx;
1772	resp->sof_type = abts->sof_type;
1773	resp->exchange_address = abts->exchange_address;
1774	resp->fcp_hdr_le = abts->fcp_hdr_le;
1775	f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
1776	    F_CTL_LAST_SEQ | F_CTL_END_SEQ |
1777	    F_CTL_SEQ_INITIATIVE);
1778	p = (uint8_t *)&f_ctl;
1779	resp->fcp_hdr_le.f_ctl[0] = *p++;
1780	resp->fcp_hdr_le.f_ctl[1] = *p++;
1781	resp->fcp_hdr_le.f_ctl[2] = *p;
1782
1783	resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.s_id;
1784	resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.d_id;
1785
1786	resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
1787	if (mcmd->fc_tm_rsp == FCP_TMF_CMPL) {
1788		resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
1789		resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
1790		resp->payload.ba_acct.low_seq_cnt = 0x0000;
1791		resp->payload.ba_acct.high_seq_cnt = cpu_to_le16(0xFFFF);
1792		resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
1793		resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
1794	} else {
1795		resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
1796		resp->payload.ba_rjt.reason_code =
1797			BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
1798		/* Other bytes are zero */
1799	}
1800
1801	vha->vha_tgt.qla_tgt->abts_resp_expected++;
1802
1803	/* Memory Barrier */
1804	wmb();
1805	if (qpair->reqq_start_iocbs)
1806		qpair->reqq_start_iocbs(qpair);
1807	else
1808		qla2x00_start_iocbs(vha, qpair->req);
1809
1810	return rc;
1811}
1812
1813/*
1814 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1815 */
1816static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair,
1817	struct abts_recv_from_24xx *abts, uint32_t status,
1818	bool ids_reversed)
1819{
1820	struct scsi_qla_host *vha = qpair->vha;
1821	struct qla_hw_data *ha = vha->hw;
1822	struct abts_resp_to_24xx *resp;
1823	__le32 f_ctl;
1824	uint8_t *p;
1825
1826	ql_dbg(ql_dbg_tgt, vha, 0xe006,
1827	    "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n",
1828	    ha, abts, status);
1829
1830	resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs_ready(qpair,
1831	    NULL);
1832	if (!resp) {
1833		ql_dbg(ql_dbg_tgt, vha, 0xe04a,
1834		    "qla_target(%d): %s failed: unable to allocate "
1835		    "request packet", vha->vp_idx, __func__);
1836		return;
1837	}
1838
1839	resp->entry_type = ABTS_RESP_24XX;
1840	resp->handle = QLA_TGT_SKIP_HANDLE;
1841	resp->entry_count = 1;
1842	resp->nport_handle = abts->nport_handle;
1843	resp->vp_index = vha->vp_idx;
1844	resp->sof_type = abts->sof_type;
1845	resp->exchange_address = abts->exchange_address;
1846	resp->fcp_hdr_le = abts->fcp_hdr_le;
1847	f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
1848	    F_CTL_LAST_SEQ | F_CTL_END_SEQ |
1849	    F_CTL_SEQ_INITIATIVE);
1850	p = (uint8_t *)&f_ctl;
1851	resp->fcp_hdr_le.f_ctl[0] = *p++;
1852	resp->fcp_hdr_le.f_ctl[1] = *p++;
1853	resp->fcp_hdr_le.f_ctl[2] = *p;
1854	if (ids_reversed) {
1855		resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.d_id;
1856		resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.s_id;
1857	} else {
1858		resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.s_id;
1859		resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.d_id;
1860	}
1861	resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
1862	if (status == FCP_TMF_CMPL) {
1863		resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
1864		resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
1865		resp->payload.ba_acct.low_seq_cnt = 0x0000;
1866		resp->payload.ba_acct.high_seq_cnt = cpu_to_le16(0xFFFF);
1867		resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
1868		resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
1869	} else {
1870		resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
1871		resp->payload.ba_rjt.reason_code =
1872			BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
1873		/* Other bytes are zero */
1874	}
1875
1876	vha->vha_tgt.qla_tgt->abts_resp_expected++;
1877
1878	/* Memory Barrier */
1879	wmb();
1880	if (qpair->reqq_start_iocbs)
1881		qpair->reqq_start_iocbs(qpair);
1882	else
1883		qla2x00_start_iocbs(vha, qpair->req);
1884}
1885
1886/*
1887 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1888 */
1889static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
1890    struct qla_qpair *qpair, response_t *pkt, struct qla_tgt_mgmt_cmd *mcmd)
1891{
1892	struct ctio7_to_24xx *ctio;
1893	u16 tmp;
1894	struct abts_recv_from_24xx *entry;
1895
1896	ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(qpair, NULL);
1897	if (ctio == NULL) {
1898		ql_dbg(ql_dbg_tgt, vha, 0xe04b,
1899		    "qla_target(%d): %s failed: unable to allocate "
1900		    "request packet\n", vha->vp_idx, __func__);
1901		return;
1902	}
1903
1904	if (mcmd)
1905		/* abts from remote port */
1906		entry = &mcmd->orig_iocb.abts;
1907	else
1908		/* abts from this driver.  */
1909		entry = (struct abts_recv_from_24xx *)pkt;
1910
1911	/*
1912	 * We've got on entrance firmware's response on by us generated
1913	 * ABTS response. So, in it ID fields are reversed.
1914	 */
1915
1916	ctio->entry_type = CTIO_TYPE7;
1917	ctio->entry_count = 1;
1918	ctio->nport_handle = entry->nport_handle;
1919	ctio->handle = QLA_TGT_SKIP_HANDLE |	CTIO_COMPLETION_HANDLE_MARK;
1920	ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
1921	ctio->vp_index = vha->vp_idx;
1922	ctio->exchange_addr = entry->exchange_addr_to_abort;
1923	tmp = (CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_TERMINATE);
1924
1925	if (mcmd) {
1926		ctio->initiator_id = entry->fcp_hdr_le.s_id;
1927
1928		if (mcmd->flags & QLA24XX_MGMT_ABORT_IO_ATTR_VALID)
1929			tmp |= (mcmd->abort_io_attr << 9);
1930		else if (qpair->retry_term_cnt & 1)
1931			tmp |= (0x4 << 9);
1932	} else {
1933		ctio->initiator_id = entry->fcp_hdr_le.d_id;
1934
1935		if (qpair->retry_term_cnt & 1)
1936			tmp |= (0x4 << 9);
1937	}
1938	ctio->u.status1.flags = cpu_to_le16(tmp);
1939	ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id;
1940
1941	ql_dbg(ql_dbg_tgt, vha, 0xe007,
1942	    "Sending retry TERM EXCH CTIO7 flags %04xh oxid %04xh attr valid %x\n",
1943	    le16_to_cpu(ctio->u.status1.flags),
1944	    le16_to_cpu(ctio->u.status1.ox_id),
1945	    (mcmd && mcmd->flags & QLA24XX_MGMT_ABORT_IO_ATTR_VALID) ? 1 : 0);
1946
1947	/* Memory Barrier */
1948	wmb();
1949	if (qpair->reqq_start_iocbs)
1950		qpair->reqq_start_iocbs(qpair);
1951	else
1952		qla2x00_start_iocbs(vha, qpair->req);
1953
1954	if (mcmd)
1955		qlt_build_abts_resp_iocb(mcmd);
1956	else
1957		qlt_24xx_send_abts_resp(qpair,
1958		    (struct abts_recv_from_24xx *)entry, FCP_TMF_CMPL, true);
1959
1960}
1961
1962/* drop cmds for the given lun
1963 * XXX only looks for cmds on the port through which lun reset was recieved
1964 * XXX does not go through the list of other port (which may have cmds
1965 *     for the same lun)
1966 */
1967static void abort_cmds_for_lun(struct scsi_qla_host *vha, u64 lun, be_id_t s_id)
1968{
1969	struct qla_tgt_sess_op *op;
1970	struct qla_tgt_cmd *cmd;
1971	uint32_t key;
1972	unsigned long flags;
1973
1974	key = sid_to_key(s_id);
1975	spin_lock_irqsave(&vha->cmd_list_lock, flags);
1976	list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
1977		uint32_t op_key;
1978		u64 op_lun;
1979
1980		op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
1981		op_lun = scsilun_to_int(
1982			(struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun);
1983		if (op_key == key && op_lun == lun)
1984			op->aborted = true;
1985	}
1986
1987	list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
1988		uint32_t op_key;
1989		u64 op_lun;
1990
1991		op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
1992		op_lun = scsilun_to_int(
1993			(struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun);
1994		if (op_key == key && op_lun == lun)
1995			op->aborted = true;
1996	}
1997
1998	list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
1999		uint32_t cmd_key;
2000		u64 cmd_lun;
2001
2002		cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
2003		cmd_lun = scsilun_to_int(
2004			(struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun);
2005		if (cmd_key == key && cmd_lun == lun)
2006			cmd->aborted = 1;
2007	}
2008	spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
2009}
2010
2011static struct qla_qpair_hint *qlt_find_qphint(struct scsi_qla_host *vha,
2012    uint64_t unpacked_lun)
2013{
2014	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
2015	struct qla_qpair_hint *h = NULL;
2016
2017	if (vha->flags.qpairs_available) {
2018		h = btree_lookup64(&tgt->lun_qpair_map, unpacked_lun);
2019		if (!h)
2020			h = &tgt->qphints[0];
2021	} else {
2022		h = &tgt->qphints[0];
2023	}
2024
2025	return h;
2026}
2027
2028static void qlt_do_tmr_work(struct work_struct *work)
2029{
2030	struct qla_tgt_mgmt_cmd *mcmd =
2031		container_of(work, struct qla_tgt_mgmt_cmd, work);
2032	struct qla_hw_data *ha = mcmd->vha->hw;
2033	int rc;
2034	uint32_t tag;
2035	unsigned long flags;
2036
2037	switch (mcmd->tmr_func) {
2038	case QLA_TGT_ABTS:
2039		tag = le32_to_cpu(mcmd->orig_iocb.abts.exchange_addr_to_abort);
2040		break;
2041	default:
2042		tag = 0;
2043		break;
2044	}
2045
2046	rc = ha->tgt.tgt_ops->handle_tmr(mcmd, mcmd->unpacked_lun,
2047	    mcmd->tmr_func, tag);
2048
2049	if (rc != 0) {
2050		spin_lock_irqsave(mcmd->qpair->qp_lock_ptr, flags);
2051		switch (mcmd->tmr_func) {
2052		case QLA_TGT_ABTS:
2053			mcmd->fc_tm_rsp = FCP_TMF_REJECTED;
2054			qlt_build_abts_resp_iocb(mcmd);
2055			break;
2056		case QLA_TGT_LUN_RESET:
2057		case QLA_TGT_CLEAR_TS:
2058		case QLA_TGT_ABORT_TS:
2059		case QLA_TGT_CLEAR_ACA:
2060		case QLA_TGT_TARGET_RESET:
2061			qlt_send_busy(mcmd->qpair, &mcmd->orig_iocb.atio,
2062			    qla_sam_status);
2063			break;
2064
2065		case QLA_TGT_ABORT_ALL:
2066		case QLA_TGT_NEXUS_LOSS_SESS:
2067		case QLA_TGT_NEXUS_LOSS:
2068			qlt_send_notify_ack(mcmd->qpair,
2069			    &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0);
2070			break;
2071		}
2072		spin_unlock_irqrestore(mcmd->qpair->qp_lock_ptr, flags);
2073
2074		ql_dbg(ql_dbg_tgt_mgt, mcmd->vha, 0xf052,
2075		    "qla_target(%d):  tgt_ops->handle_tmr() failed: %d\n",
2076		    mcmd->vha->vp_idx, rc);
2077		mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
2078	}
2079}
2080
2081/* ha->hardware_lock supposed to be held on entry */
2082static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
2083	struct abts_recv_from_24xx *abts, struct fc_port *sess)
2084{
2085	struct qla_hw_data *ha = vha->hw;
2086	struct qla_tgt_mgmt_cmd *mcmd;
2087	struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0];
2088
2089	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
2090	    "qla_target(%d): task abort (tag=%d)\n",
2091	    vha->vp_idx, abts->exchange_addr_to_abort);
2092
2093	mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
2094	if (mcmd == NULL) {
2095		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051,
2096		    "qla_target(%d): %s: Allocation of ABORT cmd failed",
2097		    vha->vp_idx, __func__);
2098		return -ENOMEM;
2099	}
2100	memset(mcmd, 0, sizeof(*mcmd));
2101	mcmd->cmd_type = TYPE_TGT_TMCMD;
2102	mcmd->sess = sess;
2103	memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
2104	mcmd->reset_count = ha->base_qpair->chip_reset;
2105	mcmd->tmr_func = QLA_TGT_ABTS;
2106	mcmd->qpair = h->qpair;
2107	mcmd->vha = vha;
2108
2109	/*
2110	 * LUN is looked up by target-core internally based on the passed
2111	 * abts->exchange_addr_to_abort tag.
2112	 */
2113	mcmd->se_cmd.cpuid = h->cpuid;
2114
2115	if (ha->tgt.tgt_ops->find_cmd_by_tag) {
2116		struct qla_tgt_cmd *abort_cmd;
2117
2118		abort_cmd = ha->tgt.tgt_ops->find_cmd_by_tag(sess,
2119				le32_to_cpu(abts->exchange_addr_to_abort));
2120		if (abort_cmd && abort_cmd->qpair) {
2121			mcmd->qpair = abort_cmd->qpair;
2122			mcmd->se_cmd.cpuid = abort_cmd->se_cmd.cpuid;
2123			mcmd->abort_io_attr = abort_cmd->atio.u.isp24.attr;
2124			mcmd->flags = QLA24XX_MGMT_ABORT_IO_ATTR_VALID;
2125		}
2126	}
2127
2128	INIT_WORK(&mcmd->work, qlt_do_tmr_work);
2129	queue_work_on(mcmd->se_cmd.cpuid, qla_tgt_wq, &mcmd->work);
2130
2131	return 0;
2132}
2133
2134/*
2135 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2136 */
2137static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
2138	struct abts_recv_from_24xx *abts)
2139{
2140	struct qla_hw_data *ha = vha->hw;
2141	struct fc_port *sess;
2142	uint32_t tag = le32_to_cpu(abts->exchange_addr_to_abort);
2143	be_id_t s_id;
2144	int rc;
2145	unsigned long flags;
2146
2147	if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) {
2148		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053,
2149		    "qla_target(%d): ABTS: Abort Sequence not "
2150		    "supported\n", vha->vp_idx);
2151		qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
2152		    false);
2153		return;
2154	}
2155
2156	if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) {
2157		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010,
2158		    "qla_target(%d): ABTS: Unknown Exchange "
2159		    "Address received\n", vha->vp_idx);
2160		qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
2161		    false);
2162		return;
2163	}
2164
2165	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011,
2166	    "qla_target(%d): task abort (s_id=%x:%x:%x, "
2167	    "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id.domain,
2168	    abts->fcp_hdr_le.s_id.area, abts->fcp_hdr_le.s_id.al_pa, tag,
2169	    le32_to_cpu(abts->fcp_hdr_le.parameter));
2170
2171	s_id = le_id_to_be(abts->fcp_hdr_le.s_id);
2172
2173	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
2174	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
2175	if (!sess) {
2176		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
2177		    "qla_target(%d): task abort for non-existent session\n",
2178		    vha->vp_idx);
2179		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
2180
2181		qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
2182			    false);
2183		return;
2184	}
2185	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
2186
2187
2188	if (sess->deleted) {
2189		qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
2190		    false);
2191		return;
2192	}
2193
2194	rc = __qlt_24xx_handle_abts(vha, abts, sess);
2195	if (rc != 0) {
2196		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
2197		    "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n",
2198		    vha->vp_idx, rc);
2199		qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
2200		    false);
2201		return;
2202	}
2203}
2204
2205/*
2206 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2207 */
2208static void qlt_24xx_send_task_mgmt_ctio(struct qla_qpair *qpair,
2209	struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code)
2210{
2211	struct scsi_qla_host *ha = mcmd->vha;
2212	struct atio_from_isp *atio = &mcmd->orig_iocb.atio;
2213	struct ctio7_to_24xx *ctio;
2214	uint16_t temp;
2215
2216	ql_dbg(ql_dbg_tgt, ha, 0xe008,
2217	    "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
2218	    ha, atio, resp_code);
2219
2220
2221	ctio = (struct ctio7_to_24xx *)__qla2x00_alloc_iocbs(qpair, NULL);
2222	if (ctio == NULL) {
2223		ql_dbg(ql_dbg_tgt, ha, 0xe04c,
2224		    "qla_target(%d): %s failed: unable to allocate "
2225		    "request packet\n", ha->vp_idx, __func__);
2226		return;
2227	}
2228
2229	ctio->entry_type = CTIO_TYPE7;
2230	ctio->entry_count = 1;
2231	ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
2232	ctio->nport_handle = cpu_to_le16(mcmd->sess->loop_id);
2233	ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2234	ctio->vp_index = ha->vp_idx;
2235	ctio->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
2236	ctio->exchange_addr = atio->u.isp24.exchange_addr;
2237	temp = (atio->u.isp24.attr << 9)|
2238		CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS;
2239	ctio->u.status1.flags = cpu_to_le16(temp);
2240	temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
2241	ctio->u.status1.ox_id = cpu_to_le16(temp);
2242	ctio->u.status1.scsi_status =
2243	    cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
2244	ctio->u.status1.response_len = cpu_to_le16(8);
2245	ctio->u.status1.sense_data[0] = resp_code;
2246
2247	/* Memory Barrier */
2248	wmb();
2249	if (qpair->reqq_start_iocbs)
2250		qpair->reqq_start_iocbs(qpair);
2251	else
2252		qla2x00_start_iocbs(ha, qpair->req);
2253}
2254
2255void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
2256{
2257	mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
2258}
2259EXPORT_SYMBOL(qlt_free_mcmd);
2260
2261/*
2262 * ha->hardware_lock supposed to be held on entry. Might drop it, then
2263 * reacquire
2264 */
2265void qlt_send_resp_ctio(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
2266    uint8_t scsi_status, uint8_t sense_key, uint8_t asc, uint8_t ascq)
2267{
2268	struct atio_from_isp *atio = &cmd->atio;
2269	struct ctio7_to_24xx *ctio;
2270	uint16_t temp;
2271	struct scsi_qla_host *vha = cmd->vha;
2272
2273	ql_dbg(ql_dbg_tgt_dif, vha, 0x3066,
2274	    "Sending response CTIO7 (vha=%p, atio=%p, scsi_status=%02x, "
2275	    "sense_key=%02x, asc=%02x, ascq=%02x",
2276	    vha, atio, scsi_status, sense_key, asc, ascq);
2277
2278	ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
2279	if (!ctio) {
2280		ql_dbg(ql_dbg_async, vha, 0x3067,
2281		    "qla2x00t(%ld): %s failed: unable to allocate request packet",
2282		    vha->host_no, __func__);
2283		goto out;
2284	}
2285
2286	ctio->entry_type = CTIO_TYPE7;
2287	ctio->entry_count = 1;
2288	ctio->handle = QLA_TGT_SKIP_HANDLE;
2289	ctio->nport_handle = cpu_to_le16(cmd->sess->loop_id);
2290	ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2291	ctio->vp_index = vha->vp_idx;
2292	ctio->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
2293	ctio->exchange_addr = atio->u.isp24.exchange_addr;
2294	temp = (atio->u.isp24.attr << 9) |
2295	    CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS;
2296	ctio->u.status1.flags = cpu_to_le16(temp);
2297	temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
2298	ctio->u.status1.ox_id = cpu_to_le16(temp);
2299	ctio->u.status1.scsi_status =
2300	    cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID | scsi_status);
2301	ctio->u.status1.response_len = cpu_to_le16(18);
2302	ctio->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio));
2303
2304	if (ctio->u.status1.residual != 0)
2305		ctio->u.status1.scsi_status |=
2306		    cpu_to_le16(SS_RESIDUAL_UNDER);
2307
2308	/* Fixed format sense data. */
2309	ctio->u.status1.sense_data[0] = 0x70;
2310	ctio->u.status1.sense_data[2] = sense_key;
2311	/* Additional sense length */
2312	ctio->u.status1.sense_data[7] = 0xa;
2313	/* ASC and ASCQ */
2314	ctio->u.status1.sense_data[12] = asc;
2315	ctio->u.status1.sense_data[13] = ascq;
2316
2317	/* Memory Barrier */
2318	wmb();
2319
2320	if (qpair->reqq_start_iocbs)
2321		qpair->reqq_start_iocbs(qpair);
2322	else
2323		qla2x00_start_iocbs(vha, qpair->req);
2324
2325out:
2326	return;
2327}
2328
2329/* callback from target fabric module code */
2330void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
2331{
2332	struct scsi_qla_host *vha = mcmd->sess->vha;
2333	struct qla_hw_data *ha = vha->hw;
2334	unsigned long flags;
2335	struct qla_qpair *qpair = mcmd->qpair;
2336	bool free_mcmd = true;
2337
2338	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013,
2339	    "TM response mcmd (%p) status %#x state %#x",
2340	    mcmd, mcmd->fc_tm_rsp, mcmd->flags);
2341
2342	spin_lock_irqsave(qpair->qp_lock_ptr, flags);
2343
2344	if (!vha->flags.online || mcmd->reset_count != qpair->chip_reset) {
2345		/*
2346		 * Either the port is not online or this request was from
2347		 * previous life, just abort the processing.
2348		 */
2349		ql_dbg(ql_dbg_async, vha, 0xe100,
2350			"RESET-TMR online/active/old-count/new-count = %d/%d/%d/%d.\n",
2351			vha->flags.online, qla2x00_reset_active(vha),
2352			mcmd->reset_count, qpair->chip_reset);
2353		ha->tgt.tgt_ops->free_mcmd(mcmd);
2354		spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
2355		return;
2356	}
2357
2358	if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) {
2359		switch (mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode) {
2360		case ELS_LOGO:
2361		case ELS_PRLO:
2362		case ELS_TPRLO:
2363			ql_dbg(ql_dbg_disc, vha, 0x2106,
2364			    "TM response logo %8phC status %#x state %#x",
2365			    mcmd->sess->port_name, mcmd->fc_tm_rsp,
2366			    mcmd->flags);
2367			qlt_schedule_sess_for_deletion(mcmd->sess);
2368			break;
2369		default:
2370			qlt_send_notify_ack(vha->hw->base_qpair,
2371			    &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0);
2372			break;
2373		}
2374	} else {
2375		if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX) {
2376			qlt_build_abts_resp_iocb(mcmd);
2377			free_mcmd = false;
2378		} else
2379			qlt_24xx_send_task_mgmt_ctio(qpair, mcmd,
2380			    mcmd->fc_tm_rsp);
2381	}
2382	/*
2383	 * Make the callback for ->free_mcmd() to queue_work() and invoke
2384	 * target_put_sess_cmd() to drop cmd_kref to 1.  The final
2385	 * target_put_sess_cmd() call will be made from TFO->check_stop_free()
2386	 * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd
2387	 * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
2388	 * qlt_xmit_tm_rsp() returns here..
2389	 */
2390	if (free_mcmd)
2391		ha->tgt.tgt_ops->free_mcmd(mcmd);
2392
2393	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
2394}
2395EXPORT_SYMBOL(qlt_xmit_tm_rsp);
2396
2397/* No locks */
2398static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
2399{
2400	struct qla_tgt_cmd *cmd = prm->cmd;
2401
2402	BUG_ON(cmd->sg_cnt == 0);
2403
2404	prm->sg = (struct scatterlist *)cmd->sg;
2405	prm->seg_cnt = dma_map_sg(&cmd->qpair->pdev->dev, cmd->sg,
2406	    cmd->sg_cnt, cmd->dma_data_direction);
2407	if (unlikely(prm->seg_cnt == 0))
2408		goto out_err;
2409
2410	prm->cmd->sg_mapped = 1;
2411
2412	if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) {
2413		/*
2414		 * If greater than four sg entries then we need to allocate
2415		 * the continuation entries
2416		 */
2417		if (prm->seg_cnt > QLA_TGT_DATASEGS_PER_CMD_24XX)
2418			prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt -
2419			QLA_TGT_DATASEGS_PER_CMD_24XX,
2420			QLA_TGT_DATASEGS_PER_CONT_24XX);
2421	} else {
2422		/* DIF */
2423		if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
2424		    (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
2425			prm->seg_cnt = DIV_ROUND_UP(cmd->bufflen, cmd->blk_sz);
2426			prm->tot_dsds = prm->seg_cnt;
2427		} else
2428			prm->tot_dsds = prm->seg_cnt;
2429
2430		if (cmd->prot_sg_cnt) {
2431			prm->prot_sg      = cmd->prot_sg;
2432			prm->prot_seg_cnt = dma_map_sg(&cmd->qpair->pdev->dev,
2433				cmd->prot_sg, cmd->prot_sg_cnt,
2434				cmd->dma_data_direction);
2435			if (unlikely(prm->prot_seg_cnt == 0))
2436				goto out_err;
2437
2438			if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
2439			    (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
2440				/* Dif Bundling not support here */
2441				prm->prot_seg_cnt = DIV_ROUND_UP(cmd->bufflen,
2442								cmd->blk_sz);
2443				prm->tot_dsds += prm->prot_seg_cnt;
2444			} else
2445				prm->tot_dsds += prm->prot_seg_cnt;
2446		}
2447	}
2448
2449	return 0;
2450
2451out_err:
2452	ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe04d,
2453	    "qla_target(%d): PCI mapping failed: sg_cnt=%d",
2454	    0, prm->cmd->sg_cnt);
2455	return -1;
2456}
2457
2458static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
2459{
2460	struct qla_hw_data *ha;
2461	struct qla_qpair *qpair;
2462
2463	if (!cmd->sg_mapped)
2464		return;
2465
2466	qpair = cmd->qpair;
2467
2468	dma_unmap_sg(&qpair->pdev->dev, cmd->sg, cmd->sg_cnt,
2469	    cmd->dma_data_direction);
2470	cmd->sg_mapped = 0;
2471
2472	if (cmd->prot_sg_cnt)
2473		dma_unmap_sg(&qpair->pdev->dev, cmd->prot_sg, cmd->prot_sg_cnt,
2474			cmd->dma_data_direction);
2475
2476	if (!cmd->ctx)
2477		return;
2478	ha = vha->hw;
2479	if (cmd->ctx_dsd_alloced)
2480		qla2x00_clean_dsd_pool(ha, cmd->ctx);
2481
2482	dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma);
2483}
2484
2485static int qlt_check_reserve_free_req(struct qla_qpair *qpair,
2486	uint32_t req_cnt)
2487{
2488	uint32_t cnt;
2489	struct req_que *req = qpair->req;
2490
2491	if (req->cnt < (req_cnt + 2)) {
2492		cnt = (uint16_t)(qpair->use_shadow_reg ? *req->out_ptr :
2493		    rd_reg_dword_relaxed(req->req_q_out));
2494
2495		if  (req->ring_index < cnt)
2496			req->cnt = cnt - req->ring_index;
2497		else
2498			req->cnt = req->length - (req->ring_index - cnt);
2499
2500		if (unlikely(req->cnt < (req_cnt + 2)))
2501			return -EAGAIN;
2502	}
2503
2504	req->cnt -= req_cnt;
2505
2506	return 0;
2507}
2508
2509/*
2510 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2511 */
2512static inline void *qlt_get_req_pkt(struct req_que *req)
2513{
2514	/* Adjust ring index. */
2515	req->ring_index++;
2516	if (req->ring_index == req->length) {
2517		req->ring_index = 0;
2518		req->ring_ptr = req->ring;
2519	} else {
2520		req->ring_ptr++;
2521	}
2522	return (cont_entry_t *)req->ring_ptr;
2523}
2524
2525/* ha->hardware_lock supposed to be held on entry */
2526static inline uint32_t qlt_make_handle(struct qla_qpair *qpair)
2527{
2528	uint32_t h;
2529	int index;
2530	uint8_t found = 0;
2531	struct req_que *req = qpair->req;
2532
2533	h = req->current_outstanding_cmd;
2534
2535	for (index = 1; index < req->num_outstanding_cmds; index++) {
2536		h++;
2537		if (h == req->num_outstanding_cmds)
2538			h = 1;
2539
2540		if (h == QLA_TGT_SKIP_HANDLE)
2541			continue;
2542
2543		if (!req->outstanding_cmds[h]) {
2544			found = 1;
2545			break;
2546		}
2547	}
2548
2549	if (found) {
2550		req->current_outstanding_cmd = h;
2551	} else {
2552		ql_dbg(ql_dbg_io, qpair->vha, 0x305b,
2553		    "qla_target(%d): Ran out of empty cmd slots\n",
2554		    qpair->vha->vp_idx);
2555		h = QLA_TGT_NULL_HANDLE;
2556	}
2557
2558	return h;
2559}
2560
2561/* ha->hardware_lock supposed to be held on entry */
2562static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair,
2563	struct qla_tgt_prm *prm)
2564{
2565	uint32_t h;
2566	struct ctio7_to_24xx *pkt;
2567	struct atio_from_isp *atio = &prm->cmd->atio;
2568	uint16_t temp;
2569
2570	pkt = (struct ctio7_to_24xx *)qpair->req->ring_ptr;
2571	prm->pkt = pkt;
2572	memset(pkt, 0, sizeof(*pkt));
2573
2574	pkt->entry_type = CTIO_TYPE7;
2575	pkt->entry_count = (uint8_t)prm->req_cnt;
2576	pkt->vp_index = prm->cmd->vp_idx;
2577
2578	h = qlt_make_handle(qpair);
2579	if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
2580		/*
2581		 * CTIO type 7 from the firmware doesn't provide a way to
2582		 * know the initiator's LOOP ID, hence we can't find
2583		 * the session and, so, the command.
2584		 */
2585		return -EAGAIN;
2586	} else
2587		qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd;
2588
2589	pkt->handle = make_handle(qpair->req->id, h);
2590	pkt->handle |= CTIO_COMPLETION_HANDLE_MARK;
2591	pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
2592	pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2593	pkt->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
2594	pkt->exchange_addr = atio->u.isp24.exchange_addr;
2595	temp = atio->u.isp24.attr << 9;
2596	pkt->u.status0.flags |= cpu_to_le16(temp);
2597	temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
2598	pkt->u.status0.ox_id = cpu_to_le16(temp);
2599	pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
2600
2601	return 0;
2602}
2603
2604/*
2605 * ha->hardware_lock supposed to be held on entry. We have already made sure
2606 * that there is sufficient amount of request entries to not drop it.
2607 */
2608static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm)
2609{
2610	int cnt;
2611	struct dsd64 *cur_dsd;
2612
2613	/* Build continuation packets */
2614	while (prm->seg_cnt > 0) {
2615		cont_a64_entry_t *cont_pkt64 =
2616			(cont_a64_entry_t *)qlt_get_req_pkt(
2617			   prm->cmd->qpair->req);
2618
2619		/*
2620		 * Make sure that from cont_pkt64 none of
2621		 * 64-bit specific fields used for 32-bit
2622		 * addressing. Cast to (cont_entry_t *) for
2623		 * that.
2624		 */
2625
2626		memset(cont_pkt64, 0, sizeof(*cont_pkt64));
2627
2628		cont_pkt64->entry_count = 1;
2629		cont_pkt64->sys_define = 0;
2630
2631		cont_pkt64->entry_type = CONTINUE_A64_TYPE;
2632		cur_dsd = cont_pkt64->dsd;
2633
2634		/* Load continuation entry data segments */
2635		for (cnt = 0;
2636		    cnt < QLA_TGT_DATASEGS_PER_CONT_24XX && prm->seg_cnt;
2637		    cnt++, prm->seg_cnt--) {
2638			append_dsd64(&cur_dsd, prm->sg);
2639			prm->sg = sg_next(prm->sg);
2640		}
2641	}
2642}
2643
2644/*
2645 * ha->hardware_lock supposed to be held on entry. We have already made sure
2646 * that there is sufficient amount of request entries to not drop it.
2647 */
2648static void qlt_load_data_segments(struct qla_tgt_prm *prm)
2649{
2650	int cnt;
2651	struct dsd64 *cur_dsd;
2652	struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt;
2653
2654	pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen);
2655
2656	/* Setup packet address segment pointer */
2657	cur_dsd = &pkt24->u.status0.dsd;
2658
2659	/* Set total data segment count */
2660	if (prm->seg_cnt)
2661		pkt24->dseg_count = cpu_to_le16(prm->seg_cnt);
2662
2663	if (prm->seg_cnt == 0) {
2664		/* No data transfer */
2665		cur_dsd->address = 0;
2666		cur_dsd->length = 0;
2667		return;
2668	}
2669
2670	/* If scatter gather */
2671
2672	/* Load command entry data segments */
2673	for (cnt = 0;
2674	    (cnt < QLA_TGT_DATASEGS_PER_CMD_24XX) && prm->seg_cnt;
2675	    cnt++, prm->seg_cnt--) {
2676		append_dsd64(&cur_dsd, prm->sg);
2677		prm->sg = sg_next(prm->sg);
2678	}
2679
2680	qlt_load_cont_data_segments(prm);
2681}
2682
2683static inline int qlt_has_data(struct qla_tgt_cmd *cmd)
2684{
2685	return cmd->bufflen > 0;
2686}
2687
2688static void qlt_print_dif_err(struct qla_tgt_prm *prm)
2689{
2690	struct qla_tgt_cmd *cmd;
2691	struct scsi_qla_host *vha;
2692
2693	/* asc 0x10=dif error */
2694	if (prm->sense_buffer && (prm->sense_buffer[12] == 0x10)) {
2695		cmd = prm->cmd;
2696		vha = cmd->vha;
2697		/* ASCQ */
2698		switch (prm->sense_buffer[13]) {
2699		case 1:
2700			ql_dbg(ql_dbg_tgt_dif, vha, 0xe00b,
2701			    "BE detected Guard TAG ERR: lba[0x%llx|%lld] len[0x%x] "
2702			    "se_cmd=%p tag[%x]",
2703			    cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2704			    cmd->atio.u.isp24.exchange_addr);
2705			break;
2706		case 2:
2707			ql_dbg(ql_dbg_tgt_dif, vha, 0xe00c,
2708			    "BE detected APP TAG ERR: lba[0x%llx|%lld] len[0x%x] "
2709			    "se_cmd=%p tag[%x]",
2710			    cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2711			    cmd->atio.u.isp24.exchange_addr);
2712			break;
2713		case 3:
2714			ql_dbg(ql_dbg_tgt_dif, vha, 0xe00f,
2715			    "BE detected REF TAG ERR: lba[0x%llx|%lld] len[0x%x] "
2716			    "se_cmd=%p tag[%x]",
2717			    cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2718			    cmd->atio.u.isp24.exchange_addr);
2719			break;
2720		default:
2721			ql_dbg(ql_dbg_tgt_dif, vha, 0xe010,
2722			    "BE detected Dif ERR: lba[%llx|%lld] len[%x] "
2723			    "se_cmd=%p tag[%x]",
2724			    cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2725			    cmd->atio.u.isp24.exchange_addr);
2726			break;
2727		}
2728		ql_dump_buffer(ql_dbg_tgt_dif, vha, 0xe011, cmd->cdb, 16);
2729	}
2730}
2731
2732/*
2733 * Called without ha->hardware_lock held
2734 */
2735static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
2736	struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status,
2737	uint32_t *full_req_cnt)
2738{
2739	struct se_cmd *se_cmd = &cmd->se_cmd;
2740	struct qla_qpair *qpair = cmd->qpair;
2741
2742	prm->cmd = cmd;
2743	prm->tgt = cmd->tgt;
2744	prm->pkt = NULL;
2745	prm->rq_result = scsi_status;
2746	prm->sense_buffer = &cmd->sense_buffer[0];
2747	prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER;
2748	prm->sg = NULL;
2749	prm->seg_cnt = -1;
2750	prm->req_cnt = 1;
2751	prm->residual = 0;
2752	prm->add_status_pkt = 0;
2753	prm->prot_sg = NULL;
2754	prm->prot_seg_cnt = 0;
2755	prm->tot_dsds = 0;
2756
2757	if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) {
2758		if  (qlt_pci_map_calc_cnt(prm) != 0)
2759			return -EAGAIN;
2760	}
2761
2762	*full_req_cnt = prm->req_cnt;
2763
2764	if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
2765		prm->residual = se_cmd->residual_count;
2766		ql_dbg_qp(ql_dbg_io + ql_dbg_verbose, qpair, 0x305c,
2767		    "Residual underflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
2768		       prm->residual, se_cmd->tag,
2769		       se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
2770		       cmd->bufflen, prm->rq_result);
2771		prm->rq_result |= SS_RESIDUAL_UNDER;
2772	} else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
2773		prm->residual = se_cmd->residual_count;
2774		ql_dbg_qp(ql_dbg_io, qpair, 0x305d,
2775		    "Residual overflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
2776		       prm->residual, se_cmd->tag, se_cmd->t_task_cdb ?
2777		       se_cmd->t_task_cdb[0] : 0, cmd->bufflen, prm->rq_result);
2778		prm->rq_result |= SS_RESIDUAL_OVER;
2779	}
2780
2781	if (xmit_type & QLA_TGT_XMIT_STATUS) {
2782		/*
2783		 * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be
2784		 * ignored in *xmit_response() below
2785		 */
2786		if (qlt_has_data(cmd)) {
2787			if (QLA_TGT_SENSE_VALID(prm->sense_buffer) ||
2788			    (IS_FWI2_CAPABLE(cmd->vha->hw) &&
2789			    (prm->rq_result != 0))) {
2790				prm->add_status_pkt = 1;
2791				(*full_req_cnt)++;
2792			}
2793		}
2794	}
2795
2796	return 0;
2797}
2798
2799static inline int qlt_need_explicit_conf(struct qla_tgt_cmd *cmd,
2800    int sending_sense)
2801{
2802	if (cmd->qpair->enable_class_2)
2803		return 0;
2804
2805	if (sending_sense)
2806		return cmd->conf_compl_supported;
2807	else
2808		return cmd->qpair->enable_explicit_conf &&
2809                    cmd->conf_compl_supported;
2810}
2811
2812static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
2813	struct qla_tgt_prm *prm)
2814{
2815	prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len,
2816	    (uint32_t)sizeof(ctio->u.status1.sense_data));
2817	ctio->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
2818	if (qlt_need_explicit_conf(prm->cmd, 0)) {
2819		ctio->u.status0.flags |= cpu_to_le16(
2820		    CTIO7_FLAGS_EXPLICIT_CONFORM |
2821		    CTIO7_FLAGS_CONFORM_REQ);
2822	}
2823	ctio->u.status0.residual = cpu_to_le32(prm->residual);
2824	ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result);
2825	if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) {
2826		int i;
2827
2828		if (qlt_need_explicit_conf(prm->cmd, 1)) {
2829			if ((prm->rq_result & SS_SCSI_STATUS_BYTE) != 0) {
2830				ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe017,
2831				    "Skipping EXPLICIT_CONFORM and "
2832				    "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ "
2833				    "non GOOD status\n");
2834				goto skip_explict_conf;
2835			}
2836			ctio->u.status1.flags |= cpu_to_le16(
2837			    CTIO7_FLAGS_EXPLICIT_CONFORM |
2838			    CTIO7_FLAGS_CONFORM_REQ);
2839		}
2840skip_explict_conf:
2841		ctio->u.status1.flags &=
2842		    ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
2843		ctio->u.status1.flags |=
2844		    cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
2845		ctio->u.status1.scsi_status |=
2846		    cpu_to_le16(SS_SENSE_LEN_VALID);
2847		ctio->u.status1.sense_length =
2848		    cpu_to_le16(prm->sense_buffer_len);
2849		for (i = 0; i < prm->sense_buffer_len/4; i++) {
2850			uint32_t v;
2851
2852			v = get_unaligned_be32(
2853					&((uint32_t *)prm->sense_buffer)[i]);
2854			put_unaligned_le32(v,
2855				&((uint32_t *)ctio->u.status1.sense_data)[i]);
2856		}
2857		qlt_print_dif_err(prm);
2858
2859	} else {
2860		ctio->u.status1.flags &=
2861		    ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
2862		ctio->u.status1.flags |=
2863		    cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
2864		ctio->u.status1.sense_length = 0;
2865		memset(ctio->u.status1.sense_data, 0,
2866		    sizeof(ctio->u.status1.sense_data));
2867	}
2868
2869	/* Sense with len > 24, is it possible ??? */
2870}
2871
2872static inline int
2873qlt_hba_err_chk_enabled(struct se_cmd *se_cmd)
2874{
2875	switch (se_cmd->prot_op) {
2876	case TARGET_PROT_DOUT_INSERT:
2877	case TARGET_PROT_DIN_STRIP:
2878		if (ql2xenablehba_err_chk >= 1)
2879			return 1;
2880		break;
2881	case TARGET_PROT_DOUT_PASS:
2882	case TARGET_PROT_DIN_PASS:
2883		if (ql2xenablehba_err_chk >= 2)
2884			return 1;
2885		break;
2886	case TARGET_PROT_DIN_INSERT:
2887	case TARGET_PROT_DOUT_STRIP:
2888		return 1;
2889	default:
2890		break;
2891	}
2892	return 0;
2893}
2894
2895static inline int
2896qla_tgt_ref_mask_check(struct se_cmd *se_cmd)
2897{
2898	switch (se_cmd->prot_op) {
2899	case TARGET_PROT_DIN_INSERT:
2900	case TARGET_PROT_DOUT_INSERT:
2901	case TARGET_PROT_DIN_STRIP:
2902	case TARGET_PROT_DOUT_STRIP:
2903	case TARGET_PROT_DIN_PASS:
2904	case TARGET_PROT_DOUT_PASS:
2905	    return 1;
2906	default:
2907	    return 0;
2908	}
2909	return 0;
2910}
2911
2912/*
2913 * qla_tgt_set_dif_tags - Extract Ref and App tags from SCSI command
2914 */
2915static void
2916qla_tgt_set_dif_tags(struct qla_tgt_cmd *cmd, struct crc_context *ctx,
2917    uint16_t *pfw_prot_opts)
2918{
2919	struct se_cmd *se_cmd = &cmd->se_cmd;
2920	uint32_t lba = 0xffffffff & se_cmd->t_task_lba;
2921	scsi_qla_host_t *vha = cmd->tgt->vha;
2922	struct qla_hw_data *ha = vha->hw;
2923	uint32_t t32 = 0;
2924
2925	/*
2926	 * wait till Mode Sense/Select cmd, modepage Ah, subpage 2
2927	 * have been immplemented by TCM, before AppTag is avail.
2928	 * Look for modesense_handlers[]
2929	 */
2930	ctx->app_tag = 0;
2931	ctx->app_tag_mask[0] = 0x0;
2932	ctx->app_tag_mask[1] = 0x0;
2933
2934	if (IS_PI_UNINIT_CAPABLE(ha)) {
2935		if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
2936		    (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
2937			*pfw_prot_opts |= PO_DIS_VALD_APP_ESC;
2938		else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
2939			*pfw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
2940	}
2941
2942	t32 = ha->tgt.tgt_ops->get_dif_tags(cmd, pfw_prot_opts);
2943
2944	switch (se_cmd->prot_type) {
2945	case TARGET_DIF_TYPE0_PROT:
2946		/*
2947		 * No check for ql2xenablehba_err_chk, as it
2948		 * would be an I/O error if hba tag generation
2949		 * is not done.
2950		 */
2951		ctx->ref_tag = cpu_to_le32(lba);
2952		/* enable ALL bytes of the ref tag */
2953		ctx->ref_tag_mask[0] = 0xff;
2954		ctx->ref_tag_mask[1] = 0xff;
2955		ctx->ref_tag_mask[2] = 0xff;
2956		ctx->ref_tag_mask[3] = 0xff;
2957		break;
2958	case TARGET_DIF_TYPE1_PROT:
2959	    /*
2960	     * For TYPE 1 protection: 16 bit GUARD tag, 32 bit
2961	     * REF tag, and 16 bit app tag.
2962	     */
2963	    ctx->ref_tag = cpu_to_le32(lba);
2964	    if (!qla_tgt_ref_mask_check(se_cmd) ||
2965		!(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
2966		    *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
2967		    break;
2968	    }
2969	    /* enable ALL bytes of the ref tag */
2970	    ctx->ref_tag_mask[0] = 0xff;
2971	    ctx->ref_tag_mask[1] = 0xff;
2972	    ctx->ref_tag_mask[2] = 0xff;
2973	    ctx->ref_tag_mask[3] = 0xff;
2974	    break;
2975	case TARGET_DIF_TYPE2_PROT:
2976	    /*
2977	     * For TYPE 2 protection: 16 bit GUARD + 32 bit REF
2978	     * tag has to match LBA in CDB + N
2979	     */
2980	    ctx->ref_tag = cpu_to_le32(lba);
2981	    if (!qla_tgt_ref_mask_check(se_cmd) ||
2982		!(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
2983		    *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
2984		    break;
2985	    }
2986	    /* enable ALL bytes of the ref tag */
2987	    ctx->ref_tag_mask[0] = 0xff;
2988	    ctx->ref_tag_mask[1] = 0xff;
2989	    ctx->ref_tag_mask[2] = 0xff;
2990	    ctx->ref_tag_mask[3] = 0xff;
2991	    break;
2992	case TARGET_DIF_TYPE3_PROT:
2993	    /* For TYPE 3 protection: 16 bit GUARD only */
2994	    *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
2995	    ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] =
2996		ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00;
2997	    break;
2998	}
2999}
3000
3001static inline int
3002qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm)
3003{
3004	struct dsd64		*cur_dsd;
3005	uint32_t		transfer_length = 0;
3006	uint32_t		data_bytes;
3007	uint32_t		dif_bytes;
3008	uint8_t			bundling = 1;
3009	struct crc_context	*crc_ctx_pkt = NULL;
3010	struct qla_hw_data	*ha;
3011	struct ctio_crc2_to_fw	*pkt;
3012	dma_addr_t		crc_ctx_dma;
3013	uint16_t		fw_prot_opts = 0;
3014	struct qla_tgt_cmd	*cmd = prm->cmd;
3015	struct se_cmd		*se_cmd = &cmd->se_cmd;
3016	uint32_t h;
3017	struct atio_from_isp *atio = &prm->cmd->atio;
3018	struct qla_tc_param	tc;
3019	uint16_t t16;
3020	scsi_qla_host_t *vha = cmd->vha;
3021
3022	ha = vha->hw;
3023
3024	pkt = (struct ctio_crc2_to_fw *)qpair->req->ring_ptr;
3025	prm->pkt = pkt;
3026	memset(pkt, 0, sizeof(*pkt));
3027
3028	ql_dbg_qp(ql_dbg_tgt, cmd->qpair, 0xe071,
3029		"qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n",
3030		cmd->vp_idx, __func__, se_cmd, se_cmd->prot_op,
3031		prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba);
3032
3033	if ((se_cmd->prot_op == TARGET_PROT_DIN_INSERT) ||
3034	    (se_cmd->prot_op == TARGET_PROT_DOUT_STRIP))
3035		bundling = 0;
3036
3037	/* Compute dif len and adjust data len to incude protection */
3038	data_bytes = cmd->bufflen;
3039	dif_bytes  = (data_bytes / cmd->blk_sz) * 8;
3040
3041	switch (se_cmd->prot_op) {
3042	case TARGET_PROT_DIN_INSERT:
3043	case TARGET_PROT_DOUT_STRIP:
3044		transfer_length = data_bytes;
3045		if (cmd->prot_sg_cnt)
3046			data_bytes += dif_bytes;
3047		break;
3048	case TARGET_PROT_DIN_STRIP:
3049	case TARGET_PROT_DOUT_INSERT:
3050	case TARGET_PROT_DIN_PASS:
3051	case TARGET_PROT_DOUT_PASS:
3052		transfer_length = data_bytes + dif_bytes;
3053		break;
3054	default:
3055		BUG();
3056		break;
3057	}
3058
3059	if (!qlt_hba_err_chk_enabled(se_cmd))
3060		fw_prot_opts |= 0x10; /* Disable Guard tag checking */
3061	/* HBA error checking enabled */
3062	else if (IS_PI_UNINIT_CAPABLE(ha)) {
3063		if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
3064		    (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
3065			fw_prot_opts |= PO_DIS_VALD_APP_ESC;
3066		else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
3067			fw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
3068	}
3069
3070	switch (se_cmd->prot_op) {
3071	case TARGET_PROT_DIN_INSERT:
3072	case TARGET_PROT_DOUT_INSERT:
3073		fw_prot_opts |= PO_MODE_DIF_INSERT;
3074		break;
3075	case TARGET_PROT_DIN_STRIP:
3076	case TARGET_PROT_DOUT_STRIP:
3077		fw_prot_opts |= PO_MODE_DIF_REMOVE;
3078		break;
3079	case TARGET_PROT_DIN_PASS:
3080	case TARGET_PROT_DOUT_PASS:
3081		fw_prot_opts |= PO_MODE_DIF_PASS;
3082		/* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */
3083		break;
3084	default:/* Normal Request */
3085		fw_prot_opts |= PO_MODE_DIF_PASS;
3086		break;
3087	}
3088
3089	/* ---- PKT ---- */
3090	/* Update entry type to indicate Command Type CRC_2 IOCB */
3091	pkt->entry_type  = CTIO_CRC2;
3092	pkt->entry_count = 1;
3093	pkt->vp_index = cmd->vp_idx;
3094
3095	h = qlt_make_handle(qpair);
3096	if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
3097		/*
3098		 * CTIO type 7 from the firmware doesn't provide a way to
3099		 * know the initiator's LOOP ID, hence we can't find
3100		 * the session and, so, the command.
3101		 */
3102		return -EAGAIN;
3103	} else
3104		qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd;
3105
3106	pkt->handle  = make_handle(qpair->req->id, h);
3107	pkt->handle |= CTIO_COMPLETION_HANDLE_MARK;
3108	pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
3109	pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
3110	pkt->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
3111	pkt->exchange_addr   = atio->u.isp24.exchange_addr;
3112
3113	/* silence compile warning */
3114	t16 = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
3115	pkt->ox_id  = cpu_to_le16(t16);
3116
3117	t16 = (atio->u.isp24.attr << 9);
3118	pkt->flags |= cpu_to_le16(t16);
3119	pkt->relative_offset = cpu_to_le32(prm->cmd->offset);
3120
3121	/* Set transfer direction */
3122	if (cmd->dma_data_direction == DMA_TO_DEVICE)
3123		pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_IN);
3124	else if (cmd->dma_data_direction == DMA_FROM_DEVICE)
3125		pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
3126
3127	pkt->dseg_count = cpu_to_le16(prm->tot_dsds);
3128	/* Fibre channel byte count */
3129	pkt->transfer_length = cpu_to_le32(transfer_length);
3130
3131	/* ----- CRC context -------- */
3132
3133	/* Allocate CRC context from global pool */
3134	crc_ctx_pkt = cmd->ctx =
3135	    dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
3136
3137	if (!crc_ctx_pkt)
3138		goto crc_queuing_error;
3139
3140	crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
3141	INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
3142
3143	/* Set handle */
3144	crc_ctx_pkt->handle = pkt->handle;
3145
3146	qla_tgt_set_dif_tags(cmd, crc_ctx_pkt, &fw_prot_opts);
3147
3148	put_unaligned_le64(crc_ctx_dma, &pkt->crc_context_address);
3149	pkt->crc_context_len = cpu_to_le16(CRC_CONTEXT_LEN_FW);
3150
3151	if (!bundling) {
3152		cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0];
3153	} else {
3154		/*
3155		 * Configure Bundling if we need to fetch interlaving
3156		 * protection PCI accesses
3157		 */
3158		fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
3159		crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
3160		crc_ctx_pkt->u.bundling.dseg_count =
3161			cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt);
3162		cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0];
3163	}
3164
3165	/* Finish the common fields of CRC pkt */
3166	crc_ctx_pkt->blk_size   = cpu_to_le16(cmd->blk_sz);
3167	crc_ctx_pkt->prot_opts  = cpu_to_le16(fw_prot_opts);
3168	crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
3169	crc_ctx_pkt->guard_seed = cpu_to_le16(0);
3170
3171	memset((uint8_t *)&tc, 0 , sizeof(tc));
3172	tc.vha = vha;
3173	tc.blk_sz = cmd->blk_sz;
3174	tc.bufflen = cmd->bufflen;
3175	tc.sg = cmd->sg;
3176	tc.prot_sg = cmd->prot_sg;
3177	tc.ctx = crc_ctx_pkt;
3178	tc.ctx_dsd_alloced = &cmd->ctx_dsd_alloced;
3179
3180	/* Walks data segments */
3181	pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR);
3182
3183	if (!bundling && prm->prot_seg_cnt) {
3184		if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd,
3185			prm->tot_dsds, &tc))
3186			goto crc_queuing_error;
3187	} else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd,
3188		(prm->tot_dsds - prm->prot_seg_cnt), &tc))
3189		goto crc_queuing_error;
3190
3191	if (bundling && prm->prot_seg_cnt) {
3192		/* Walks dif segments */
3193		pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA;
3194
3195		cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd;
3196		if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd,
3197			prm->prot_seg_cnt, cmd))
3198			goto crc_queuing_error;
3199	}
3200	return QLA_SUCCESS;
3201
3202crc_queuing_error:
3203	/* Cleanup will be performed by the caller */
3204	qpair->req->outstanding_cmds[h] = NULL;
3205
3206	return QLA_FUNCTION_FAILED;
3207}
3208
3209/*
3210 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
3211 * QLA_TGT_XMIT_STATUS for >= 24xx silicon
3212 */
3213int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
3214	uint8_t scsi_status)
3215{
3216	struct scsi_qla_host *vha = cmd->vha;
3217	struct qla_qpair *qpair = cmd->qpair;
3218	struct ctio7_to_24xx *pkt;
3219	struct qla_tgt_prm prm;
3220	uint32_t full_req_cnt = 0;
3221	unsigned long flags = 0;
3222	int res;
3223
3224	if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
3225	    (cmd->sess && cmd->sess->deleted)) {
3226		cmd->state = QLA_TGT_STATE_PROCESSED;
3227		return 0;
3228	}
3229
3230	ql_dbg_qp(ql_dbg_tgt, qpair, 0xe018,
3231	    "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p] qp %d\n",
3232	    (xmit_type & QLA_TGT_XMIT_STATUS) ?
3233	    1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction,
3234	    &cmd->se_cmd, qpair->id);
3235
3236	res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
3237	    &full_req_cnt);
3238	if (unlikely(res != 0)) {
3239		return res;
3240	}
3241
3242	spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3243
3244	if (xmit_type == QLA_TGT_XMIT_STATUS)
3245		qpair->tgt_counters.core_qla_snd_status++;
3246	else
3247		qpair->tgt_counters.core_qla_que_buf++;
3248
3249	if (!qpair->fw_started || cmd->reset_count != qpair->chip_reset) {
3250		/*
3251		 * Either the port is not online or this request was from
3252		 * previous life, just abort the processing.
3253		 */
3254		cmd->state = QLA_TGT_STATE_PROCESSED;
3255		ql_dbg_qp(ql_dbg_async, qpair, 0xe101,
3256			"RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n",
3257			vha->flags.online, qla2x00_reset_active(vha),
3258			cmd->reset_count, qpair->chip_reset);
3259		res = 0;
3260		goto out_unmap_unlock;
3261	}
3262
3263	/* Does F/W have an IOCBs for this request */
3264	res = qlt_check_reserve_free_req(qpair, full_req_cnt);
3265	if (unlikely(res))
3266		goto out_unmap_unlock;
3267
3268	if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA))
3269		res = qlt_build_ctio_crc2_pkt(qpair, &prm);
3270	else
3271		res = qlt_24xx_build_ctio_pkt(qpair, &prm);
3272	if (unlikely(res != 0)) {
3273		qpair->req->cnt += full_req_cnt;
3274		goto out_unmap_unlock;
3275	}
3276
3277	pkt = (struct ctio7_to_24xx *)prm.pkt;
3278
3279	if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) {
3280		pkt->u.status0.flags |=
3281		    cpu_to_le16(CTIO7_FLAGS_DATA_IN |
3282			CTIO7_FLAGS_STATUS_MODE_0);
3283
3284		if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
3285			qlt_load_data_segments(&prm);
3286
3287		if (prm.add_status_pkt == 0) {
3288			if (xmit_type & QLA_TGT_XMIT_STATUS) {
3289				pkt->u.status0.scsi_status =
3290				    cpu_to_le16(prm.rq_result);
3291				pkt->u.status0.residual =
3292				    cpu_to_le32(prm.residual);
3293				pkt->u.status0.flags |= cpu_to_le16(
3294				    CTIO7_FLAGS_SEND_STATUS);
3295				if (qlt_need_explicit_conf(cmd, 0)) {
3296					pkt->u.status0.flags |=
3297					    cpu_to_le16(
3298						CTIO7_FLAGS_EXPLICIT_CONFORM |
3299						CTIO7_FLAGS_CONFORM_REQ);
3300				}
3301			}
3302
3303		} else {
3304			/*
3305			 * We have already made sure that there is sufficient
3306			 * amount of request entries to not drop HW lock in
3307			 * req_pkt().
3308			 */
3309			struct ctio7_to_24xx *ctio =
3310				(struct ctio7_to_24xx *)qlt_get_req_pkt(
3311				    qpair->req);
3312
3313			ql_dbg_qp(ql_dbg_tgt, qpair, 0x305e,
3314			    "Building additional status packet 0x%p.\n",
3315			    ctio);
3316
3317			/*
3318			 * T10Dif: ctio_crc2_to_fw overlay ontop of
3319			 * ctio7_to_24xx
3320			 */
3321			memcpy(ctio, pkt, sizeof(*ctio));
3322			/* reset back to CTIO7 */
3323			ctio->entry_count = 1;
3324			ctio->entry_type = CTIO_TYPE7;
3325			ctio->dseg_count = 0;
3326			ctio->u.status1.flags &= ~cpu_to_le16(
3327			    CTIO7_FLAGS_DATA_IN);
3328
3329			/* Real finish is ctio_m1's finish */
3330			pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
3331			pkt->u.status0.flags |= cpu_to_le16(
3332			    CTIO7_FLAGS_DONT_RET_CTIO);
3333
3334			/* qlt_24xx_init_ctio_to_isp will correct
3335			 * all neccessary fields that's part of CTIO7.
3336			 * There should be no residual of CTIO-CRC2 data.
3337			 */
3338			qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio,
3339			    &prm);
3340		}
3341	} else
3342		qlt_24xx_init_ctio_to_isp(pkt, &prm);
3343
3344
3345	cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
3346	cmd->cmd_sent_to_fw = 1;
3347	cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags);
3348
3349	/* Memory Barrier */
3350	wmb();
3351	if (qpair->reqq_start_iocbs)
3352		qpair->reqq_start_iocbs(qpair);
3353	else
3354		qla2x00_start_iocbs(vha, qpair->req);
3355	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3356
3357	return 0;
3358
3359out_unmap_unlock:
3360	qlt_unmap_sg(vha, cmd);
3361	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3362
3363	return res;
3364}
3365EXPORT_SYMBOL(qlt_xmit_response);
3366
3367int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
3368{
3369	struct ctio7_to_24xx *pkt;
3370	struct scsi_qla_host *vha = cmd->vha;
3371	struct qla_tgt *tgt = cmd->tgt;
3372	struct qla_tgt_prm prm;
3373	unsigned long flags = 0;
3374	int res = 0;
3375	struct qla_qpair *qpair = cmd->qpair;
3376
3377	memset(&prm, 0, sizeof(prm));
3378	prm.cmd = cmd;
3379	prm.tgt = tgt;
3380	prm.sg = NULL;
3381	prm.req_cnt = 1;
3382
3383	if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
3384	    (cmd->sess && cmd->sess->deleted)) {
3385		/*
3386		 * Either the port is not online or this request was from
3387		 * previous life, just abort the processing.
3388		 */
3389		cmd->aborted = 1;
3390		cmd->write_data_transferred = 0;
3391		cmd->state = QLA_TGT_STATE_DATA_IN;
3392		vha->hw->tgt.tgt_ops->handle_data(cmd);
3393		ql_dbg_qp(ql_dbg_async, qpair, 0xe102,
3394			"RESET-XFR online/active/old-count/new-count = %d/%d/%d/%d.\n",
3395			vha->flags.online, qla2x00_reset_active(vha),
3396			cmd->reset_count, qpair->chip_reset);
3397		return 0;
3398	}
3399
3400	/* Calculate number of entries and segments required */
3401	if (qlt_pci_map_calc_cnt(&prm) != 0)
3402		return -EAGAIN;
3403
3404	spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3405	/* Does F/W have an IOCBs for this request */
3406	res = qlt_check_reserve_free_req(qpair, prm.req_cnt);
3407	if (res != 0)
3408		goto out_unlock_free_unmap;
3409	if (cmd->se_cmd.prot_op)
3410		res = qlt_build_ctio_crc2_pkt(qpair, &prm);
3411	else
3412		res = qlt_24xx_build_ctio_pkt(qpair, &prm);
3413
3414	if (unlikely(res != 0)) {
3415		qpair->req->cnt += prm.req_cnt;
3416		goto out_unlock_free_unmap;
3417	}
3418
3419	pkt = (struct ctio7_to_24xx *)prm.pkt;
3420	pkt->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
3421	    CTIO7_FLAGS_STATUS_MODE_0);
3422
3423	if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
3424		qlt_load_data_segments(&prm);
3425
3426	cmd->state = QLA_TGT_STATE_NEED_DATA;
3427	cmd->cmd_sent_to_fw = 1;
3428	cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags);
3429
3430	/* Memory Barrier */
3431	wmb();
3432	if (qpair->reqq_start_iocbs)
3433		qpair->reqq_start_iocbs(qpair);
3434	else
3435		qla2x00_start_iocbs(vha, qpair->req);
3436	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3437
3438	return res;
3439
3440out_unlock_free_unmap:
3441	qlt_unmap_sg(vha, cmd);
3442	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3443
3444	return res;
3445}
3446EXPORT_SYMBOL(qlt_rdy_to_xfer);
3447
3448
3449/*
3450 * it is assumed either hardware_lock or qpair lock is held.
3451 */
3452static void
3453qlt_handle_dif_error(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
3454	struct ctio_crc_from_fw *sts)
3455{
3456	uint8_t		*ap = &sts->actual_dif[0];
3457	uint8_t		*ep = &sts->expected_dif[0];
3458	uint64_t	lba = cmd->se_cmd.t_task_lba;
3459	uint8_t scsi_status, sense_key, asc, ascq;
3460	unsigned long flags;
3461	struct scsi_qla_host *vha = cmd->vha;
3462
3463	cmd->trc_flags |= TRC_DIF_ERR;
3464
3465	cmd->a_guard   = get_unaligned_be16(ap + 0);
3466	cmd->a_app_tag = get_unaligned_be16(ap + 2);
3467	cmd->a_ref_tag = get_unaligned_be32(ap + 4);
3468
3469	cmd->e_guard   = get_unaligned_be16(ep + 0);
3470	cmd->e_app_tag = get_unaligned_be16(ep + 2);
3471	cmd->e_ref_tag = get_unaligned_be32(ep + 4);
3472
3473	ql_dbg(ql_dbg_tgt_dif, vha, 0xf075,
3474	    "%s: aborted %d state %d\n", __func__, cmd->aborted, cmd->state);
3475
3476	scsi_status = sense_key = asc = ascq = 0;
3477
3478	/* check appl tag */
3479	if (cmd->e_app_tag != cmd->a_app_tag) {
3480		ql_dbg(ql_dbg_tgt_dif, vha, 0xe00d,
3481		    "App Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]",
3482		    cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
3483		    cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
3484		    cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
3485		    cmd->atio.u.isp24.fcp_hdr.ox_id);
3486
3487		cmd->dif_err_code = DIF_ERR_APP;
3488		scsi_status = SAM_STAT_CHECK_CONDITION;
3489		sense_key = ABORTED_COMMAND;
3490		asc = 0x10;
3491		ascq = 0x2;
3492	}
3493
3494	/* check ref tag */
3495	if (cmd->e_ref_tag != cmd->a_ref_tag) {
3496		ql_dbg(ql_dbg_tgt_dif, vha, 0xe00e,
3497		    "Ref Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard[%x|%x] cmd=%p ox_id[%04x] ",
3498		    cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
3499		    cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
3500		    cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
3501		    cmd->atio.u.isp24.fcp_hdr.ox_id);
3502
3503		cmd->dif_err_code = DIF_ERR_REF;
3504		scsi_status = SAM_STAT_CHECK_CONDITION;
3505		sense_key = ABORTED_COMMAND;
3506		asc = 0x10;
3507		ascq = 0x3;
3508		goto out;
3509	}
3510
3511	/* check guard */
3512	if (cmd->e_guard != cmd->a_guard) {
3513		ql_dbg(ql_dbg_tgt_dif, vha, 0xe012,
3514		    "Guard ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]",
3515		    cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
3516		    cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
3517		    cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
3518		    cmd->atio.u.isp24.fcp_hdr.ox_id);
3519
3520		cmd->dif_err_code = DIF_ERR_GRD;
3521		scsi_status = SAM_STAT_CHECK_CONDITION;
3522		sense_key = ABORTED_COMMAND;
3523		asc = 0x10;
3524		ascq = 0x1;
3525	}
3526out:
3527	switch (cmd->state) {
3528	case QLA_TGT_STATE_NEED_DATA:
3529		/* handle_data will load DIF error code  */
3530		cmd->state = QLA_TGT_STATE_DATA_IN;
3531		vha->hw->tgt.tgt_ops->handle_data(cmd);
3532		break;
3533	default:
3534		spin_lock_irqsave(&cmd->cmd_lock, flags);
3535		if (cmd->aborted) {
3536			spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3537			vha->hw->tgt.tgt_ops->free_cmd(cmd);
3538			break;
3539		}
3540		spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3541
3542		qlt_send_resp_ctio(qpair, cmd, scsi_status, sense_key, asc,
3543		    ascq);
3544		/* assume scsi status gets out on the wire.
3545		 * Will not wait for completion.
3546		 */
3547		vha->hw->tgt.tgt_ops->free_cmd(cmd);
3548		break;
3549	}
3550}
3551
3552/* If hardware_lock held on entry, might drop it, then reaquire */
3553/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
3554static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
3555	struct imm_ntfy_from_isp *ntfy)
3556{
3557	struct nack_to_isp *nack;
3558	struct qla_hw_data *ha = vha->hw;
3559	request_t *pkt;
3560	int ret = 0;
3561
3562	ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c,
3563	    "Sending TERM ELS CTIO (ha=%p)\n", ha);
3564
3565	pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
3566	if (pkt == NULL) {
3567		ql_dbg(ql_dbg_tgt, vha, 0xe080,
3568		    "qla_target(%d): %s failed: unable to allocate "
3569		    "request packet\n", vha->vp_idx, __func__);
3570		return -ENOMEM;
3571	}
3572
3573	pkt->entry_type = NOTIFY_ACK_TYPE;
3574	pkt->entry_count = 1;
3575	pkt->handle = QLA_TGT_SKIP_HANDLE;
3576
3577	nack = (struct nack_to_isp *)pkt;
3578	nack->ox_id = ntfy->ox_id;
3579
3580	nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3581	if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3582		nack->u.isp24.flags = ntfy->u.isp24.flags &
3583			cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB);
3584	}
3585
3586	/* terminate */
3587	nack->u.isp24.flags |=
3588		__constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE);
3589
3590	nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3591	nack->u.isp24.status = ntfy->u.isp24.status;
3592	nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3593	nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3594	nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3595	nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3596	nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3597	nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3598
3599	qla2x00_start_iocbs(vha, vha->req);
3600	return ret;
3601}
3602
3603static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
3604	struct imm_ntfy_from_isp *imm, int ha_locked)
3605{
3606	int rc;
3607
3608	WARN_ON_ONCE(!ha_locked);
3609	rc = __qlt_send_term_imm_notif(vha, imm);
3610	pr_debug("rc = %d\n", rc);
3611}
3612
3613/*
3614 * If hardware_lock held on entry, might drop it, then reaquire
3615 * This function sends the appropriate CTIO to ISP 2xxx or 24xx
3616 */
3617static int __qlt_send_term_exchange(struct qla_qpair *qpair,
3618	struct qla_tgt_cmd *cmd,
3619	struct atio_from_isp *atio)
3620{
3621	struct scsi_qla_host *vha = qpair->vha;
3622	struct ctio7_to_24xx *ctio24;
3623	struct qla_hw_data *ha = vha->hw;
3624	request_t *pkt;
3625	int ret = 0;
3626	uint16_t temp;
3627
3628	ql_dbg(ql_dbg_tgt, vha, 0xe009, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
3629
3630	if (cmd)
3631		vha = cmd->vha;
3632
3633	pkt = (request_t *)qla2x00_alloc_iocbs_ready(qpair, NULL);
3634	if (pkt == NULL) {
3635		ql_dbg(ql_dbg_tgt, vha, 0xe050,
3636		    "qla_target(%d): %s failed: unable to allocate "
3637		    "request packet\n", vha->vp_idx, __func__);
3638		return -ENOMEM;
3639	}
3640
3641	if (cmd != NULL) {
3642		if (cmd->state < QLA_TGT_STATE_PROCESSED) {
3643			ql_dbg(ql_dbg_tgt, vha, 0xe051,
3644			    "qla_target(%d): Terminating cmd %p with "
3645			    "incorrect state %d\n", vha->vp_idx, cmd,
3646			    cmd->state);
3647		} else
3648			ret = 1;
3649	}
3650
3651	qpair->tgt_counters.num_term_xchg_sent++;
3652	pkt->entry_count = 1;
3653	pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
3654
3655	ctio24 = (struct ctio7_to_24xx *)pkt;
3656	ctio24->entry_type = CTIO_TYPE7;
3657	ctio24->nport_handle = cpu_to_le16(CTIO7_NHANDLE_UNRECOGNIZED);
3658	ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
3659	ctio24->vp_index = vha->vp_idx;
3660	ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
3661	ctio24->exchange_addr = atio->u.isp24.exchange_addr;
3662	temp = (atio->u.isp24.attr << 9) | CTIO7_FLAGS_STATUS_MODE_1 |
3663		CTIO7_FLAGS_TERMINATE;
3664	ctio24->u.status1.flags = cpu_to_le16(temp);
3665	temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
3666	ctio24->u.status1.ox_id = cpu_to_le16(temp);
3667
3668	/* Memory Barrier */
3669	wmb();
3670	if (qpair->reqq_start_iocbs)
3671		qpair->reqq_start_iocbs(qpair);
3672	else
3673		qla2x00_start_iocbs(vha, qpair->req);
3674	return ret;
3675}
3676
3677static void qlt_send_term_exchange(struct qla_qpair *qpair,
3678	struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked,
3679	int ul_abort)
3680{
3681	struct scsi_qla_host *vha;
3682	unsigned long flags = 0;
3683	int rc;
3684
3685	/* why use different vha? NPIV */
3686	if (cmd)
3687		vha = cmd->vha;
3688	else
3689		vha = qpair->vha;
3690
3691	if (ha_locked) {
3692		rc = __qlt_send_term_exchange(qpair, cmd, atio);
3693		if (rc == -ENOMEM)
3694			qlt_alloc_qfull_cmd(vha, atio, 0, 0);
3695		goto done;
3696	}
3697	spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3698	rc = __qlt_send_term_exchange(qpair, cmd, atio);
3699	if (rc == -ENOMEM)
3700		qlt_alloc_qfull_cmd(vha, atio, 0, 0);
3701
3702done:
3703	if (cmd && !ul_abort && !cmd->aborted) {
3704		if (cmd->sg_mapped)
3705			qlt_unmap_sg(vha, cmd);
3706		vha->hw->tgt.tgt_ops->free_cmd(cmd);
3707	}
3708
3709	if (!ha_locked)
3710		spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3711
3712	return;
3713}
3714
3715static void qlt_init_term_exchange(struct scsi_qla_host *vha)
3716{
3717	struct list_head free_list;
3718	struct qla_tgt_cmd *cmd, *tcmd;
3719
3720	vha->hw->tgt.leak_exchg_thresh_hold =
3721	    (vha->hw->cur_fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT;
3722
3723	cmd = tcmd = NULL;
3724	if (!list_empty(&vha->hw->tgt.q_full_list)) {
3725		INIT_LIST_HEAD(&free_list);
3726		list_splice_init(&vha->hw->tgt.q_full_list, &free_list);
3727
3728		list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
3729			list_del(&cmd->cmd_list);
3730			/* This cmd was never sent to TCM.  There is no need
3731			 * to schedule free or call free_cmd
3732			 */
3733			qlt_free_cmd(cmd);
3734			vha->hw->tgt.num_qfull_cmds_alloc--;
3735		}
3736	}
3737	vha->hw->tgt.num_qfull_cmds_dropped = 0;
3738}
3739
3740static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
3741{
3742	uint32_t total_leaked;
3743
3744	total_leaked = vha->hw->tgt.num_qfull_cmds_dropped;
3745
3746	if (vha->hw->tgt.leak_exchg_thresh_hold &&
3747	    (total_leaked > vha->hw->tgt.leak_exchg_thresh_hold)) {
3748
3749		ql_dbg(ql_dbg_tgt, vha, 0xe079,
3750		    "Chip reset due to exchange starvation: %d/%d.\n",
3751		    total_leaked, vha->hw->cur_fw_xcb_count);
3752
3753		if (IS_P3P_TYPE(vha->hw))
3754			set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
3755		else
3756			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3757		qla2xxx_wake_dpc(vha);
3758	}
3759
3760}
3761
3762int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
3763{
3764	struct qla_tgt *tgt = cmd->tgt;
3765	struct scsi_qla_host *vha = tgt->vha;
3766	struct se_cmd *se_cmd = &cmd->se_cmd;
3767	unsigned long flags;
3768
3769	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
3770	    "qla_target(%d): terminating exchange for aborted cmd=%p "
3771	    "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd,
3772	    se_cmd->tag);
3773
3774	spin_lock_irqsave(&cmd->cmd_lock, flags);
3775	if (cmd->aborted) {
3776		if (cmd->sg_mapped)
3777			qlt_unmap_sg(vha, cmd);
3778
3779		spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3780		/*
3781		 * It's normal to see 2 calls in this path:
3782		 *  1) XFER Rdy completion + CMD_T_ABORT
3783		 *  2) TCM TMR - drain_state_list
3784		 */
3785		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf016,
3786		    "multiple abort. %p transport_state %x, t_state %x, "
3787		    "se_cmd_flags %x\n", cmd, cmd->se_cmd.transport_state,
3788		    cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags);
3789		return -EIO;
3790	}
3791	cmd->aborted = 1;
3792	cmd->trc_flags |= TRC_ABORT;
3793	spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3794
3795	qlt_send_term_exchange(cmd->qpair, cmd, &cmd->atio, 0, 1);
3796	return 0;
3797}
3798EXPORT_SYMBOL(qlt_abort_cmd);
3799
3800void qlt_free_cmd(struct qla_tgt_cmd *cmd)
3801{
3802	struct fc_port *sess = cmd->sess;
3803
3804	ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074,
3805	    "%s: se_cmd[%p] ox_id %04x\n",
3806	    __func__, &cmd->se_cmd,
3807	    be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
3808
3809	BUG_ON(cmd->cmd_in_wq);
3810
3811	if (!cmd->q_full)
3812		qlt_decr_num_pend_cmds(cmd->vha);
3813
3814	BUG_ON(cmd->sg_mapped);
3815	cmd->jiffies_at_free = get_jiffies_64();
3816	if (unlikely(cmd->free_sg))
3817		kfree(cmd->sg);
3818
3819	if (!sess || !sess->se_sess) {
3820		WARN_ON(1);
3821		return;
3822	}
3823	cmd->jiffies_at_free = get_jiffies_64();
3824	cmd->vha->hw->tgt.tgt_ops->rel_cmd(cmd);
3825}
3826EXPORT_SYMBOL(qlt_free_cmd);
3827
3828/*
3829 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3830 */
3831static int qlt_term_ctio_exchange(struct qla_qpair *qpair, void *ctio,
3832	struct qla_tgt_cmd *cmd, uint32_t status)
3833{
3834	int term = 0;
3835	struct scsi_qla_host *vha = qpair->vha;
3836
3837	if (cmd->se_cmd.prot_op)
3838		ql_dbg(ql_dbg_tgt_dif, vha, 0xe013,
3839		    "Term DIF cmd: lba[0x%llx|%lld] len[0x%x] "
3840		    "se_cmd=%p tag[%x] op %#x/%s",
3841		     cmd->lba, cmd->lba,
3842		     cmd->num_blks, &cmd->se_cmd,
3843		     cmd->atio.u.isp24.exchange_addr,
3844		     cmd->se_cmd.prot_op,
3845		     prot_op_str(cmd->se_cmd.prot_op));
3846
3847	if (ctio != NULL) {
3848		struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
3849
3850		term = !(c->flags &
3851		    cpu_to_le16(OF_TERM_EXCH));
3852	} else
3853		term = 1;
3854
3855	if (term)
3856		qlt_send_term_exchange(qpair, cmd, &cmd->atio, 1, 0);
3857
3858	return term;
3859}
3860
3861
3862/* ha->hardware_lock supposed to be held on entry */
3863static void *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
3864	struct rsp_que *rsp, uint32_t handle, void *ctio)
3865{
3866	void *cmd = NULL;
3867	struct req_que *req;
3868	int qid = GET_QID(handle);
3869	uint32_t h = handle & ~QLA_TGT_HANDLE_MASK;
3870
3871	if (unlikely(h == QLA_TGT_SKIP_HANDLE))
3872		return NULL;
3873
3874	if (qid == rsp->req->id) {
3875		req = rsp->req;
3876	} else if (vha->hw->req_q_map[qid]) {
3877		ql_dbg(ql_dbg_tgt_mgt, vha, 0x1000a,
3878		    "qla_target(%d): CTIO completion with different QID %d handle %x\n",
3879		    vha->vp_idx, rsp->id, handle);
3880		req = vha->hw->req_q_map[qid];
3881	} else {
3882		return NULL;
3883	}
3884
3885	h &= QLA_CMD_HANDLE_MASK;
3886
3887	if (h != QLA_TGT_NULL_HANDLE) {
3888		if (unlikely(h >= req->num_outstanding_cmds)) {
3889			ql_dbg(ql_dbg_tgt, vha, 0xe052,
3890			    "qla_target(%d): Wrong handle %x received\n",
3891			    vha->vp_idx, handle);
3892			return NULL;
3893		}
3894
3895		cmd = req->outstanding_cmds[h];
3896		if (unlikely(cmd == NULL)) {
3897			ql_dbg(ql_dbg_async, vha, 0xe053,
3898			    "qla_target(%d): Suspicious: unable to find the command with handle %x req->id %d rsp->id %d\n",
3899				vha->vp_idx, handle, req->id, rsp->id);
3900			return NULL;
3901		}
3902		req->outstanding_cmds[h] = NULL;
3903	} else if (ctio != NULL) {
3904		/* We can't get loop ID from CTIO7 */
3905		ql_dbg(ql_dbg_tgt, vha, 0xe054,
3906		    "qla_target(%d): Wrong CTIO received: QLA24xx doesn't "
3907		    "support NULL handles\n", vha->vp_idx);
3908		return NULL;
3909	}
3910
3911	return cmd;
3912}
3913
3914/*
3915 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3916 */
3917static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
3918    struct rsp_que *rsp, uint32_t handle, uint32_t status, void *ctio)
3919{
3920	struct qla_hw_data *ha = vha->hw;
3921	struct se_cmd *se_cmd;
3922	struct qla_tgt_cmd *cmd;
3923	struct qla_qpair *qpair = rsp->qpair;
3924
3925	if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
3926		/* That could happen only in case of an error/reset/abort */
3927		if (status != CTIO_SUCCESS) {
3928			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d,
3929			    "Intermediate CTIO received"
3930			    " (status %x)\n", status);
3931		}
3932		return;
3933	}
3934
3935	cmd = qlt_ctio_to_cmd(vha, rsp, handle, ctio);
3936	if (cmd == NULL)
3937		return;
3938
3939	se_cmd = &cmd->se_cmd;
3940	cmd->cmd_sent_to_fw = 0;
3941
3942	qlt_unmap_sg(vha, cmd);
3943
3944	if (unlikely(status != CTIO_SUCCESS)) {
3945		switch (status & 0xFFFF) {
3946		case CTIO_INVALID_RX_ID:
3947			if (printk_ratelimit())
3948				dev_info(&vha->hw->pdev->dev,
3949				    "qla_target(%d): CTIO with INVALID_RX_ID ATIO attr %x CTIO Flags %x|%x\n",
3950				    vha->vp_idx, cmd->atio.u.isp24.attr,
3951				    ((cmd->ctio_flags >> 9) & 0xf),
3952				    cmd->ctio_flags);
3953
3954			break;
3955		case CTIO_LIP_RESET:
3956		case CTIO_TARGET_RESET:
3957		case CTIO_ABORTED:
3958			/* driver request abort via Terminate exchange */
3959		case CTIO_TIMEOUT:
3960			/* They are OK */
3961			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058,
3962			    "qla_target(%d): CTIO with "
3963			    "status %#x received, state %x, se_cmd %p, "
3964			    "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
3965			    "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx,
3966			    status, cmd->state, se_cmd);
3967			break;
3968
3969		case CTIO_PORT_LOGGED_OUT:
3970		case CTIO_PORT_UNAVAILABLE:
3971		{
3972			int logged_out =
3973				(status & 0xFFFF) == CTIO_PORT_LOGGED_OUT;
3974
3975			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
3976			    "qla_target(%d): CTIO with %s status %x "
3977			    "received (state %x, se_cmd %p)\n", vha->vp_idx,
3978			    logged_out ? "PORT LOGGED OUT" : "PORT UNAVAILABLE",
3979			    status, cmd->state, se_cmd);
3980
3981			if (logged_out && cmd->sess) {
3982				/*
3983				 * Session is already logged out, but we need
3984				 * to notify initiator, who's not aware of this
3985				 */
3986				cmd->sess->send_els_logo = 1;
3987				ql_dbg(ql_dbg_disc, vha, 0x20f8,
3988				    "%s %d %8phC post del sess\n",
3989				    __func__, __LINE__, cmd->sess->port_name);
3990
3991				qlt_schedule_sess_for_deletion(cmd->sess);
3992			}
3993			break;
3994		}
3995		case CTIO_DIF_ERROR: {
3996			struct ctio_crc_from_fw *crc =
3997				(struct ctio_crc_from_fw *)ctio;
3998			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073,
3999			    "qla_target(%d): CTIO with DIF_ERROR status %x "
4000			    "received (state %x, ulp_cmd %p) actual_dif[0x%llx] "
4001			    "expect_dif[0x%llx]\n",
4002			    vha->vp_idx, status, cmd->state, se_cmd,
4003			    *((u64 *)&crc->actual_dif[0]),
4004			    *((u64 *)&crc->expected_dif[0]));
4005
4006			qlt_handle_dif_error(qpair, cmd, ctio);
4007			return;
4008		}
4009		default:
4010			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
4011			    "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n",
4012			    vha->vp_idx, status, cmd->state, se_cmd);
4013			break;
4014		}
4015
4016
4017		/* "cmd->aborted" means
4018		 * cmd is already aborted/terminated, we don't
4019		 * need to terminate again.  The exchange is already
4020		 * cleaned up/freed at FW level.  Just cleanup at driver
4021		 * level.
4022		 */
4023		if ((cmd->state != QLA_TGT_STATE_NEED_DATA) &&
4024		    (!cmd->aborted)) {
4025			cmd->trc_flags |= TRC_CTIO_ERR;
4026			if (qlt_term_ctio_exchange(qpair, ctio, cmd, status))
4027				return;
4028		}
4029	}
4030
4031	if (cmd->state == QLA_TGT_STATE_PROCESSED) {
4032		cmd->trc_flags |= TRC_CTIO_DONE;
4033	} else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
4034		cmd->state = QLA_TGT_STATE_DATA_IN;
4035
4036		if (status == CTIO_SUCCESS)
4037			cmd->write_data_transferred = 1;
4038
4039		ha->tgt.tgt_ops->handle_data(cmd);
4040		return;
4041	} else if (cmd->aborted) {
4042		cmd->trc_flags |= TRC_CTIO_ABORTED;
4043		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
4044		  "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag);
4045	} else {
4046		cmd->trc_flags |= TRC_CTIO_STRANGE;
4047		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
4048		    "qla_target(%d): A command in state (%d) should "
4049		    "not return a CTIO complete\n", vha->vp_idx, cmd->state);
4050	}
4051
4052	if (unlikely(status != CTIO_SUCCESS) &&
4053		!cmd->aborted) {
4054		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n");
4055		dump_stack();
4056	}
4057
4058	ha->tgt.tgt_ops->free_cmd(cmd);
4059}
4060
4061static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
4062	uint8_t task_codes)
4063{
4064	int fcp_task_attr;
4065
4066	switch (task_codes) {
4067	case ATIO_SIMPLE_QUEUE:
4068		fcp_task_attr = TCM_SIMPLE_TAG;
4069		break;
4070	case ATIO_HEAD_OF_QUEUE:
4071		fcp_task_attr = TCM_HEAD_TAG;
4072		break;
4073	case ATIO_ORDERED_QUEUE:
4074		fcp_task_attr = TCM_ORDERED_TAG;
4075		break;
4076	case ATIO_ACA_QUEUE:
4077		fcp_task_attr = TCM_ACA_TAG;
4078		break;
4079	case ATIO_UNTAGGED:
4080		fcp_task_attr = TCM_SIMPLE_TAG;
4081		break;
4082	default:
4083		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d,
4084		    "qla_target: unknown task code %x, use ORDERED instead\n",
4085		    task_codes);
4086		fcp_task_attr = TCM_ORDERED_TAG;
4087		break;
4088	}
4089
4090	return fcp_task_attr;
4091}
4092
4093/*
4094 * Process context for I/O path into tcm_qla2xxx code
4095 */
4096static void __qlt_do_work(struct qla_tgt_cmd *cmd)
4097{
4098	scsi_qla_host_t *vha = cmd->vha;
4099	struct qla_hw_data *ha = vha->hw;
4100	struct fc_port *sess = cmd->sess;
4101	struct atio_from_isp *atio = &cmd->atio;
4102	unsigned char *cdb;
4103	unsigned long flags;
4104	uint32_t data_length;
4105	int ret, fcp_task_attr, data_dir, bidi = 0;
4106	struct qla_qpair *qpair = cmd->qpair;
4107
4108	cmd->cmd_in_wq = 0;
4109	cmd->trc_flags |= TRC_DO_WORK;
4110
4111	if (cmd->aborted) {
4112		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082,
4113		    "cmd with tag %u is aborted\n",
4114		    cmd->atio.u.isp24.exchange_addr);
4115		goto out_term;
4116	}
4117
4118	spin_lock_init(&cmd->cmd_lock);
4119	cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
4120	cmd->se_cmd.tag = le32_to_cpu(atio->u.isp24.exchange_addr);
4121
4122	if (atio->u.isp24.fcp_cmnd.rddata &&
4123	    atio->u.isp24.fcp_cmnd.wrdata) {
4124		bidi = 1;
4125		data_dir = DMA_TO_DEVICE;
4126	} else if (atio->u.isp24.fcp_cmnd.rddata)
4127		data_dir = DMA_FROM_DEVICE;
4128	else if (atio->u.isp24.fcp_cmnd.wrdata)
4129		data_dir = DMA_TO_DEVICE;
4130	else
4131		data_dir = DMA_NONE;
4132
4133	fcp_task_attr = qlt_get_fcp_task_attr(vha,
4134	    atio->u.isp24.fcp_cmnd.task_attr);
4135	data_length = get_datalen_for_atio(atio);
4136
4137	ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
4138				          fcp_task_attr, data_dir, bidi);
4139	if (ret != 0)
4140		goto out_term;
4141	/*
4142	 * Drop extra session reference from qlt_handle_cmd_for_atio().
4143	 */
4144	ha->tgt.tgt_ops->put_sess(sess);
4145	return;
4146
4147out_term:
4148	ql_dbg(ql_dbg_io, vha, 0x3060, "Terminating work cmd %p", cmd);
4149	/*
4150	 * cmd has not sent to target yet, so pass NULL as the second
4151	 * argument to qlt_send_term_exchange() and free the memory here.
4152	 */
4153	cmd->trc_flags |= TRC_DO_WORK_ERR;
4154	spin_lock_irqsave(qpair->qp_lock_ptr, flags);
4155	qlt_send_term_exchange(qpair, NULL, &cmd->atio, 1, 0);
4156
4157	qlt_decr_num_pend_cmds(vha);
4158	cmd->vha->hw->tgt.tgt_ops->rel_cmd(cmd);
4159	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
4160
4161	ha->tgt.tgt_ops->put_sess(sess);
4162}
4163
4164static void qlt_do_work(struct work_struct *work)
4165{
4166	struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
4167	scsi_qla_host_t *vha = cmd->vha;
4168	unsigned long flags;
4169
4170	spin_lock_irqsave(&vha->cmd_list_lock, flags);
4171	list_del(&cmd->cmd_list);
4172	spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4173
4174	__qlt_do_work(cmd);
4175}
4176
4177void qlt_clr_qp_table(struct scsi_qla_host *vha)
4178{
4179	unsigned long flags;
4180	struct qla_hw_data *ha = vha->hw;
4181	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4182	void *node;
4183	u64 key = 0;
4184
4185	ql_log(ql_log_info, vha, 0x706c,
4186	    "User update Number of Active Qpairs %d\n",
4187	    ha->tgt.num_act_qpairs);
4188
4189	spin_lock_irqsave(&ha->tgt.atio_lock, flags);
4190
4191	btree_for_each_safe64(&tgt->lun_qpair_map, key, node)
4192		btree_remove64(&tgt->lun_qpair_map, key);
4193
4194	ha->base_qpair->lun_cnt = 0;
4195	for (key = 0; key < ha->max_qpairs; key++)
4196		if (ha->queue_pair_map[key])
4197			ha->queue_pair_map[key]->lun_cnt = 0;
4198
4199	spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
4200}
4201
4202static void qlt_assign_qpair(struct scsi_qla_host *vha,
4203	struct qla_tgt_cmd *cmd)
4204{
4205	struct qla_qpair *qpair, *qp;
4206	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4207	struct qla_qpair_hint *h;
4208
4209	if (vha->flags.qpairs_available) {
4210		h = btree_lookup64(&tgt->lun_qpair_map, cmd->unpacked_lun);
4211		if (unlikely(!h)) {
4212			/* spread lun to qpair ratio evently */
4213			int lcnt = 0, rc;
4214			struct scsi_qla_host *base_vha =
4215				pci_get_drvdata(vha->hw->pdev);
4216
4217			qpair = vha->hw->base_qpair;
4218			if (qpair->lun_cnt == 0) {
4219				qpair->lun_cnt++;
4220				h = qla_qpair_to_hint(tgt, qpair);
4221				BUG_ON(!h);
4222				rc = btree_insert64(&tgt->lun_qpair_map,
4223					cmd->unpacked_lun, h, GFP_ATOMIC);
4224				if (rc) {
4225					qpair->lun_cnt--;
4226					ql_log(ql_log_info, vha, 0xd037,
4227					    "Unable to insert lun %llx into lun_qpair_map\n",
4228					    cmd->unpacked_lun);
4229				}
4230				goto out;
4231			} else {
4232				lcnt = qpair->lun_cnt;
4233			}
4234
4235			h = NULL;
4236			list_for_each_entry(qp, &base_vha->qp_list,
4237			    qp_list_elem) {
4238				if (qp->lun_cnt == 0) {
4239					qp->lun_cnt++;
4240					h = qla_qpair_to_hint(tgt, qp);
4241					BUG_ON(!h);
4242					rc = btree_insert64(&tgt->lun_qpair_map,
4243					    cmd->unpacked_lun, h, GFP_ATOMIC);
4244					if (rc) {
4245						qp->lun_cnt--;
4246						ql_log(ql_log_info, vha, 0xd038,
4247							"Unable to insert lun %llx into lun_qpair_map\n",
4248							cmd->unpacked_lun);
4249					}
4250					qpair = qp;
4251					goto out;
4252				} else {
4253					if (qp->lun_cnt < lcnt) {
4254						lcnt = qp->lun_cnt;
4255						qpair = qp;
4256						continue;
4257					}
4258				}
4259			}
4260			BUG_ON(!qpair);
4261			qpair->lun_cnt++;
4262			h = qla_qpair_to_hint(tgt, qpair);
4263			BUG_ON(!h);
4264			rc = btree_insert64(&tgt->lun_qpair_map,
4265				cmd->unpacked_lun, h, GFP_ATOMIC);
4266			if (rc) {
4267				qpair->lun_cnt--;
4268				ql_log(ql_log_info, vha, 0xd039,
4269				   "Unable to insert lun %llx into lun_qpair_map\n",
4270				   cmd->unpacked_lun);
4271			}
4272		}
4273	} else {
4274		h = &tgt->qphints[0];
4275	}
4276out:
4277	cmd->qpair = h->qpair;
4278	cmd->se_cmd.cpuid = h->cpuid;
4279}
4280
4281static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
4282				       struct fc_port *sess,
4283				       struct atio_from_isp *atio)
4284{
4285	struct qla_tgt_cmd *cmd;
4286
4287	cmd = vha->hw->tgt.tgt_ops->get_cmd(sess);
4288	if (!cmd)
4289		return NULL;
4290
4291	cmd->cmd_type = TYPE_TGT_CMD;
4292	memcpy(&cmd->atio, atio, sizeof(*atio));
4293	cmd->state = QLA_TGT_STATE_NEW;
4294	cmd->tgt = vha->vha_tgt.qla_tgt;
4295	qlt_incr_num_pend_cmds(vha);
4296	cmd->vha = vha;
4297	cmd->sess = sess;
4298	cmd->loop_id = sess->loop_id;
4299	cmd->conf_compl_supported = sess->conf_compl_supported;
4300
4301	cmd->trc_flags = 0;
4302	cmd->jiffies_at_alloc = get_jiffies_64();
4303
4304	cmd->unpacked_lun = scsilun_to_int(
4305	    (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun);
4306	qlt_assign_qpair(vha, cmd);
4307	cmd->reset_count = vha->hw->base_qpair->chip_reset;
4308	cmd->vp_idx = vha->vp_idx;
4309
4310	return cmd;
4311}
4312
4313/* ha->hardware_lock supposed to be held on entry */
4314static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
4315	struct atio_from_isp *atio)
4316{
4317	struct qla_hw_data *ha = vha->hw;
4318	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4319	struct fc_port *sess;
4320	struct qla_tgt_cmd *cmd;
4321	unsigned long flags;
4322	port_id_t id;
4323
4324	if (unlikely(tgt->tgt_stop)) {
4325		ql_dbg(ql_dbg_io, vha, 0x3061,
4326		    "New command while device %p is shutting down\n", tgt);
4327		return -ENODEV;
4328	}
4329
4330	id = be_to_port_id(atio->u.isp24.fcp_hdr.s_id);
4331	if (IS_SW_RESV_ADDR(id))
4332		return -EBUSY;
4333
4334	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id);
4335	if (unlikely(!sess))
4336		return -EFAULT;
4337
4338	/* Another WWN used to have our s_id. Our PLOGI scheduled its
4339	 * session deletion, but it's still in sess_del_work wq */
4340	if (sess->deleted) {
4341		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf002,
4342		    "New command while old session %p is being deleted\n",
4343		    sess);
4344		return -EFAULT;
4345	}
4346
4347	/*
4348	 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
4349	 */
4350	if (!kref_get_unless_zero(&sess->sess_kref)) {
4351		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
4352		    "%s: kref_get fail, %8phC oxid %x \n",
4353		    __func__, sess->port_name,
4354		     be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
4355		return -EFAULT;
4356	}
4357
4358	cmd = qlt_get_tag(vha, sess, atio);
4359	if (!cmd) {
4360		ql_dbg(ql_dbg_io, vha, 0x3062,
4361		    "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
4362		ha->tgt.tgt_ops->put_sess(sess);
4363		return -EBUSY;
4364	}
4365
4366	cmd->cmd_in_wq = 1;
4367	cmd->trc_flags |= TRC_NEW_CMD;
4368
4369	spin_lock_irqsave(&vha->cmd_list_lock, flags);
4370	list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
4371	spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4372
4373	INIT_WORK(&cmd->work, qlt_do_work);
4374	if (vha->flags.qpairs_available) {
4375		queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, &cmd->work);
4376	} else if (ha->msix_count) {
4377		if (cmd->atio.u.isp24.fcp_cmnd.rddata)
4378			queue_work_on(smp_processor_id(), qla_tgt_wq,
4379			    &cmd->work);
4380		else
4381			queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq,
4382			    &cmd->work);
4383	} else {
4384		queue_work(qla_tgt_wq, &cmd->work);
4385	}
4386
4387	return 0;
4388}
4389
4390/* ha->hardware_lock supposed to be held on entry */
4391static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
4392	int fn, void *iocb, int flags)
4393{
4394	struct scsi_qla_host *vha = sess->vha;
4395	struct qla_hw_data *ha = vha->hw;
4396	struct qla_tgt_mgmt_cmd *mcmd;
4397	struct atio_from_isp *a = (struct atio_from_isp *)iocb;
4398	struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0];
4399
4400	mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
4401	if (!mcmd) {
4402		ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009,
4403		    "qla_target(%d): Allocation of management "
4404		    "command failed, some commands and their data could "
4405		    "leak\n", vha->vp_idx);
4406		return -ENOMEM;
4407	}
4408	memset(mcmd, 0, sizeof(*mcmd));
4409	mcmd->sess = sess;
4410
4411	if (iocb) {
4412		memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
4413		    sizeof(mcmd->orig_iocb.imm_ntfy));
4414	}
4415	mcmd->tmr_func = fn;
4416	mcmd->flags = flags;
4417	mcmd->reset_count = ha->base_qpair->chip_reset;
4418	mcmd->qpair = h->qpair;
4419	mcmd->vha = vha;
4420	mcmd->se_cmd.cpuid = h->cpuid;
4421	mcmd->unpacked_lun = lun;
4422
4423	switch (fn) {
4424	case QLA_TGT_LUN_RESET:
4425	case QLA_TGT_CLEAR_TS:
4426	case QLA_TGT_ABORT_TS:
4427		abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
4428		fallthrough;
4429	case QLA_TGT_CLEAR_ACA:
4430		h = qlt_find_qphint(vha, mcmd->unpacked_lun);
4431		mcmd->qpair = h->qpair;
4432		mcmd->se_cmd.cpuid = h->cpuid;
4433		break;
4434
4435	case QLA_TGT_TARGET_RESET:
4436	case QLA_TGT_NEXUS_LOSS_SESS:
4437	case QLA_TGT_NEXUS_LOSS:
4438	case QLA_TGT_ABORT_ALL:
4439	default:
4440		/* no-op */
4441		break;
4442	}
4443
4444	INIT_WORK(&mcmd->work, qlt_do_tmr_work);
4445	queue_work_on(mcmd->se_cmd.cpuid, qla_tgt_wq,
4446	    &mcmd->work);
4447
4448	return 0;
4449}
4450
4451/* ha->hardware_lock supposed to be held on entry */
4452static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
4453{
4454	struct atio_from_isp *a = (struct atio_from_isp *)iocb;
4455	struct qla_hw_data *ha = vha->hw;
4456	struct fc_port *sess;
4457	u64 unpacked_lun;
4458	int fn;
4459	unsigned long flags;
4460
4461	fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
4462
4463	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4464	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
4465	    a->u.isp24.fcp_hdr.s_id);
4466	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4467
4468	unpacked_lun =
4469	    scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
4470
4471	if (sess == NULL || sess->deleted)
4472		return -EFAULT;
4473
4474	return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
4475}
4476
4477/* ha->hardware_lock supposed to be held on entry */
4478static int __qlt_abort_task(struct scsi_qla_host *vha,
4479	struct imm_ntfy_from_isp *iocb, struct fc_port *sess)
4480{
4481	struct atio_from_isp *a = (struct atio_from_isp *)iocb;
4482	struct qla_hw_data *ha = vha->hw;
4483	struct qla_tgt_mgmt_cmd *mcmd;
4484	u64 unpacked_lun;
4485	int rc;
4486
4487	mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
4488	if (mcmd == NULL) {
4489		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f,
4490		    "qla_target(%d): %s: Allocation of ABORT cmd failed\n",
4491		    vha->vp_idx, __func__);
4492		return -ENOMEM;
4493	}
4494	memset(mcmd, 0, sizeof(*mcmd));
4495
4496	mcmd->sess = sess;
4497	memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
4498	    sizeof(mcmd->orig_iocb.imm_ntfy));
4499
4500	unpacked_lun =
4501	    scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
4502	mcmd->reset_count = ha->base_qpair->chip_reset;
4503	mcmd->tmr_func = QLA_TGT_2G_ABORT_TASK;
4504	mcmd->qpair = ha->base_qpair;
4505
4506	rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, mcmd->tmr_func,
4507	    le16_to_cpu(iocb->u.isp2x.seq_id));
4508	if (rc != 0) {
4509		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060,
4510		    "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
4511		    vha->vp_idx, rc);
4512		mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
4513		return -EFAULT;
4514	}
4515
4516	return 0;
4517}
4518
4519/* ha->hardware_lock supposed to be held on entry */
4520static int qlt_abort_task(struct scsi_qla_host *vha,
4521	struct imm_ntfy_from_isp *iocb)
4522{
4523	struct qla_hw_data *ha = vha->hw;
4524	struct fc_port *sess;
4525	int loop_id;
4526	unsigned long flags;
4527
4528	loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb);
4529
4530	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4531	sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
4532	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4533
4534	if (sess == NULL) {
4535		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,
4536		    "qla_target(%d): task abort for unexisting "
4537		    "session\n", vha->vp_idx);
4538		return qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
4539		    QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb));
4540	}
4541
4542	return __qlt_abort_task(vha, iocb, sess);
4543}
4544
4545void qlt_logo_completion_handler(fc_port_t *fcport, int rc)
4546{
4547	if (rc != MBS_COMMAND_COMPLETE) {
4548		ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093,
4549			"%s: se_sess %p / sess %p from"
4550			" port %8phC loop_id %#04x s_id %02x:%02x:%02x"
4551			" LOGO failed: %#x\n",
4552			__func__,
4553			fcport->se_sess,
4554			fcport,
4555			fcport->port_name, fcport->loop_id,
4556			fcport->d_id.b.domain, fcport->d_id.b.area,
4557			fcport->d_id.b.al_pa, rc);
4558	}
4559
4560	fcport->logout_completed = 1;
4561}
4562
4563/*
4564* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list)
4565*
4566* Schedules sessions with matching port_id/loop_id but different wwn for
4567* deletion. Returns existing session with matching wwn if present.
4568* Null otherwise.
4569*/
4570struct fc_port *
4571qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
4572    port_id_t port_id, uint16_t loop_id, struct fc_port **conflict_sess)
4573{
4574	struct fc_port *sess = NULL, *other_sess;
4575	uint64_t other_wwn;
4576
4577	*conflict_sess = NULL;
4578
4579	list_for_each_entry(other_sess, &vha->vp_fcports, list) {
4580
4581		other_wwn = wwn_to_u64(other_sess->port_name);
4582
4583		if (wwn == other_wwn) {
4584			WARN_ON(sess);
4585			sess = other_sess;
4586			continue;
4587		}
4588
4589		/* find other sess with nport_id collision */
4590		if (port_id.b24 == other_sess->d_id.b24) {
4591			if (loop_id != other_sess->loop_id) {
4592				ql_dbg(ql_dbg_disc, vha, 0x1000c,
4593				    "Invalidating sess %p loop_id %d wwn %llx.\n",
4594				    other_sess, other_sess->loop_id, other_wwn);
4595
4596				/*
4597				 * logout_on_delete is set by default, but another
4598				 * session that has the same s_id/loop_id combo
4599				 * might have cleared it when requested this session
4600				 * deletion, so don't touch it
4601				 */
4602				qlt_schedule_sess_for_deletion(other_sess);
4603			} else {
4604				/*
4605				 * Another wwn used to have our s_id/loop_id
4606				 * kill the session, but don't free the loop_id
4607				 */
4608				ql_dbg(ql_dbg_disc, vha, 0xf01b,
4609				    "Invalidating sess %p loop_id %d wwn %llx.\n",
4610				    other_sess, other_sess->loop_id, other_wwn);
4611
4612				other_sess->keep_nport_handle = 1;
4613				if (other_sess->disc_state != DSC_DELETED)
4614					*conflict_sess = other_sess;
4615				qlt_schedule_sess_for_deletion(other_sess);
4616			}
4617			continue;
4618		}
4619
4620		/* find other sess with nport handle collision */
4621		if ((loop_id == other_sess->loop_id) &&
4622			(loop_id != FC_NO_LOOP_ID)) {
4623			ql_dbg(ql_dbg_disc, vha, 0x1000d,
4624			       "Invalidating sess %p loop_id %d wwn %llx.\n",
4625			       other_sess, other_sess->loop_id, other_wwn);
4626
4627			/* Same loop_id but different s_id
4628			 * Ok to kill and logout */
4629			qlt_schedule_sess_for_deletion(other_sess);
4630		}
4631	}
4632
4633	return sess;
4634}
4635
4636/* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */
4637static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
4638{
4639	struct qla_tgt_sess_op *op;
4640	struct qla_tgt_cmd *cmd;
4641	uint32_t key;
4642	int count = 0;
4643	unsigned long flags;
4644
4645	key = (((u32)s_id->b.domain << 16) |
4646	       ((u32)s_id->b.area   <<  8) |
4647	       ((u32)s_id->b.al_pa));
4648
4649	spin_lock_irqsave(&vha->cmd_list_lock, flags);
4650	list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
4651		uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
4652
4653		if (op_key == key) {
4654			op->aborted = true;
4655			count++;
4656		}
4657	}
4658
4659	list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
4660		uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
4661
4662		if (op_key == key) {
4663			op->aborted = true;
4664			count++;
4665		}
4666	}
4667
4668	list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
4669		uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
4670
4671		if (cmd_key == key) {
4672			cmd->aborted = 1;
4673			count++;
4674		}
4675	}
4676	spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4677
4678	return count;
4679}
4680
4681static int qlt_handle_login(struct scsi_qla_host *vha,
4682    struct imm_ntfy_from_isp *iocb)
4683{
4684	struct fc_port *sess = NULL, *conflict_sess = NULL;
4685	uint64_t wwn;
4686	port_id_t port_id;
4687	uint16_t loop_id, wd3_lo;
4688	int res = 0;
4689	struct qlt_plogi_ack_t *pla;
4690	unsigned long flags;
4691
4692	lockdep_assert_held(&vha->hw->hardware_lock);
4693
4694	wwn = wwn_to_u64(iocb->u.isp24.port_name);
4695
4696	port_id.b.domain = iocb->u.isp24.port_id[2];
4697	port_id.b.area   = iocb->u.isp24.port_id[1];
4698	port_id.b.al_pa  = iocb->u.isp24.port_id[0];
4699	port_id.b.rsvd_1 = 0;
4700
4701	loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
4702
4703	/* Mark all stale commands sitting in qla_tgt_wq for deletion */
4704	abort_cmds_for_s_id(vha, &port_id);
4705
4706	if (wwn) {
4707		spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
4708		sess = qlt_find_sess_invalidate_other(vha, wwn,
4709		    port_id, loop_id, &conflict_sess);
4710		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4711	} else {
4712		ql_dbg(ql_dbg_disc, vha, 0xffff,
4713		    "%s %d Term INOT due to WWN=0 lid=%d, NportID %06X ",
4714		    __func__, __LINE__, loop_id, port_id.b24);
4715		qlt_send_term_imm_notif(vha, iocb, 1);
4716		goto out;
4717	}
4718
4719	if (IS_SW_RESV_ADDR(port_id)) {
4720		res = 1;
4721		goto out;
4722	}
4723
4724	pla = qlt_plogi_ack_find_add(vha, &port_id, iocb);
4725	if (!pla) {
4726		ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
4727		    "%s %d %8phC Term INOT due to mem alloc fail",
4728		    __func__, __LINE__,
4729		    iocb->u.isp24.port_name);
4730		qlt_send_term_imm_notif(vha, iocb, 1);
4731		goto out;
4732	}
4733
4734	if (conflict_sess) {
4735		conflict_sess->login_gen++;
4736		qlt_plogi_ack_link(vha, pla, conflict_sess,
4737		    QLT_PLOGI_LINK_CONFLICT);
4738	}
4739
4740	if (!sess) {
4741		pla->ref_count++;
4742		ql_dbg(ql_dbg_disc, vha, 0xffff,
4743		    "%s %d %8phC post new sess\n",
4744		    __func__, __LINE__, iocb->u.isp24.port_name);
4745		if (iocb->u.isp24.status_subcode == ELS_PLOGI)
4746			qla24xx_post_newsess_work(vha, &port_id,
4747			    iocb->u.isp24.port_name,
4748			    iocb->u.isp24.u.plogi.node_name,
4749			    pla, 0);
4750		else
4751			qla24xx_post_newsess_work(vha, &port_id,
4752			    iocb->u.isp24.port_name, NULL,
4753			    pla, 0);
4754
4755		goto out;
4756	}
4757
4758	if (sess->disc_state == DSC_UPD_FCPORT) {
4759		u16 sec;
4760
4761		/*
4762		 * Remote port registration is still going on from
4763		 * previous login. Allow it to finish before we
4764		 * accept the new login.
4765		 */
4766		sess->next_disc_state = DSC_DELETE_PEND;
4767		sec = jiffies_to_msecs(jiffies -
4768		    sess->jiffies_at_registration) / 1000;
4769		if (sess->sec_since_registration < sec && sec &&
4770		    !(sec % 5)) {
4771			sess->sec_since_registration = sec;
4772			ql_dbg(ql_dbg_disc, vha, 0xffff,
4773			    "%s %8phC - Slow Rport registration (%d Sec)\n",
4774			    __func__, sess->port_name, sec);
4775		}
4776
4777		if (!conflict_sess) {
4778			list_del(&pla->list);
4779			kmem_cache_free(qla_tgt_plogi_cachep, pla);
4780		}
4781
4782		qlt_send_term_imm_notif(vha, iocb, 1);
4783		goto out;
4784	}
4785
4786	qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN);
4787	sess->d_id = port_id;
4788	sess->login_gen++;
4789
4790	if (iocb->u.isp24.status_subcode == ELS_PRLI) {
4791		sess->fw_login_state = DSC_LS_PRLI_PEND;
4792		sess->local = 0;
4793		sess->loop_id = loop_id;
4794		sess->d_id = port_id;
4795		sess->fw_login_state = DSC_LS_PRLI_PEND;
4796		wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
4797
4798		if (wd3_lo & BIT_7)
4799			sess->conf_compl_supported = 1;
4800
4801		if ((wd3_lo & BIT_4) == 0)
4802			sess->port_type = FCT_INITIATOR;
4803		else
4804			sess->port_type = FCT_TARGET;
4805
4806	} else
4807		sess->fw_login_state = DSC_LS_PLOGI_PEND;
4808
4809
4810	ql_dbg(ql_dbg_disc, vha, 0x20f9,
4811	    "%s %d %8phC  DS %d\n",
4812	    __func__, __LINE__, sess->port_name, sess->disc_state);
4813
4814	switch (sess->disc_state) {
4815	case DSC_DELETED:
4816	case DSC_LOGIN_PEND:
4817		qlt_plogi_ack_unref(vha, pla);
4818		break;
4819
4820	default:
4821		/*
4822		 * Under normal circumstances we want to release nport handle
4823		 * during LOGO process to avoid nport handle leaks inside FW.
4824		 * The exception is when LOGO is done while another PLOGI with
4825		 * the same nport handle is waiting as might be the case here.
4826		 * Note: there is always a possibily of a race where session
4827		 * deletion has already started for other reasons (e.g. ACL
4828		 * removal) and now PLOGI arrives:
4829		 * 1. if PLOGI arrived in FW after nport handle has been freed,
4830		 *    FW must have assigned this PLOGI a new/same handle and we
4831		 *    can proceed ACK'ing it as usual when session deletion
4832		 *    completes.
4833		 * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
4834		 *    bit reached it, the handle has now been released. We'll
4835		 *    get an error when we ACK this PLOGI. Nothing will be sent
4836		 *    back to initiator. Initiator should eventually retry
4837		 *    PLOGI and situation will correct itself.
4838		 */
4839		sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
4840		    (sess->d_id.b24 == port_id.b24));
4841
4842		ql_dbg(ql_dbg_disc, vha, 0x20f9,
4843		    "%s %d %8phC post del sess\n",
4844		    __func__, __LINE__, sess->port_name);
4845
4846
4847		qlt_schedule_sess_for_deletion(sess);
4848		break;
4849	}
4850out:
4851	return res;
4852}
4853
4854/*
4855 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
4856 */
4857static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
4858	struct imm_ntfy_from_isp *iocb)
4859{
4860	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4861	struct qla_hw_data *ha = vha->hw;
4862	struct fc_port *sess = NULL, *conflict_sess = NULL;
4863	uint64_t wwn;
4864	port_id_t port_id;
4865	uint16_t loop_id;
4866	uint16_t wd3_lo;
4867	int res = 0;
4868	unsigned long flags;
4869
4870	lockdep_assert_held(&ha->hardware_lock);
4871
4872	wwn = wwn_to_u64(iocb->u.isp24.port_name);
4873
4874	port_id.b.domain = iocb->u.isp24.port_id[2];
4875	port_id.b.area   = iocb->u.isp24.port_id[1];
4876	port_id.b.al_pa  = iocb->u.isp24.port_id[0];
4877	port_id.b.rsvd_1 = 0;
4878
4879	loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
4880
4881	ql_dbg(ql_dbg_disc, vha, 0xf026,
4882	    "qla_target(%d): Port ID: %02x:%02x:%02x ELS opcode: 0x%02x lid %d %8phC\n",
4883	    vha->vp_idx, iocb->u.isp24.port_id[2],
4884		iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0],
4885		   iocb->u.isp24.status_subcode, loop_id,
4886		iocb->u.isp24.port_name);
4887
4888	/* res = 1 means ack at the end of thread
4889	 * res = 0 means ack async/later.
4890	 */
4891	switch (iocb->u.isp24.status_subcode) {
4892	case ELS_PLOGI:
4893		res = qlt_handle_login(vha, iocb);
4894		break;
4895
4896	case ELS_PRLI:
4897		if (N2N_TOPO(ha)) {
4898			sess = qla2x00_find_fcport_by_wwpn(vha,
4899			    iocb->u.isp24.port_name, 1);
4900
4901			if (sess && sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]) {
4902				ql_dbg(ql_dbg_disc, vha, 0xffff,
4903				    "%s %d %8phC Term PRLI due to PLOGI ACK not completed\n",
4904				    __func__, __LINE__,
4905				    iocb->u.isp24.port_name);
4906				qlt_send_term_imm_notif(vha, iocb, 1);
4907				break;
4908			}
4909
4910			res = qlt_handle_login(vha, iocb);
4911			break;
4912		}
4913
4914		if (IS_SW_RESV_ADDR(port_id)) {
4915			res = 1;
4916			break;
4917		}
4918
4919		wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
4920
4921		if (wwn) {
4922			spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
4923			sess = qlt_find_sess_invalidate_other(vha, wwn, port_id,
4924				loop_id, &conflict_sess);
4925			spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
4926		}
4927
4928		if (conflict_sess) {
4929			switch (conflict_sess->disc_state) {
4930			case DSC_DELETED:
4931			case DSC_DELETE_PEND:
4932				break;
4933			default:
4934				ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b,
4935				    "PRLI with conflicting sess %p port %8phC\n",
4936				    conflict_sess, conflict_sess->port_name);
4937				conflict_sess->fw_login_state =
4938				    DSC_LS_PORT_UNAVAIL;
4939				qlt_send_term_imm_notif(vha, iocb, 1);
4940				res = 0;
4941				break;
4942			}
4943		}
4944
4945		if (sess != NULL) {
4946			bool delete = false;
4947			int sec;
4948
4949			spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
4950			switch (sess->fw_login_state) {
4951			case DSC_LS_PLOGI_PEND:
4952			case DSC_LS_PLOGI_COMP:
4953			case DSC_LS_PRLI_COMP:
4954				break;
4955			default:
4956				delete = true;
4957				break;
4958			}
4959
4960			switch (sess->disc_state) {
4961			case DSC_UPD_FCPORT:
4962				spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock,
4963				    flags);
4964
4965				sec = jiffies_to_msecs(jiffies -
4966				    sess->jiffies_at_registration)/1000;
4967				if (sess->sec_since_registration < sec && sec &&
4968				    !(sec % 5)) {
4969					sess->sec_since_registration = sec;
4970					ql_dbg(ql_dbg_disc, sess->vha, 0xffff,
4971					    "%s %8phC : Slow Rport registration(%d Sec)\n",
4972					    __func__, sess->port_name, sec);
4973				}
4974				qlt_send_term_imm_notif(vha, iocb, 1);
4975				return 0;
4976
4977			case DSC_LOGIN_PEND:
4978			case DSC_GPDB:
4979			case DSC_LOGIN_COMPLETE:
4980			case DSC_ADISC:
4981				delete = false;
4982				break;
4983			default:
4984				break;
4985			}
4986
4987			if (delete) {
4988				spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock,
4989				    flags);
4990				/*
4991				 * Impatient initiator sent PRLI before last
4992				 * PLOGI could finish. Will force him to re-try,
4993				 * while last one finishes.
4994				 */
4995				ql_log(ql_log_warn, sess->vha, 0xf095,
4996				    "sess %p PRLI received, before plogi ack.\n",
4997				    sess);
4998				qlt_send_term_imm_notif(vha, iocb, 1);
4999				res = 0;
5000				break;
5001			}
5002
5003			/*
5004			 * This shouldn't happen under normal circumstances,
5005			 * since we have deleted the old session during PLOGI
5006			 */
5007			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096,
5008			    "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n",
5009			    sess->loop_id, sess, iocb->u.isp24.nport_handle);
5010
5011			sess->local = 0;
5012			sess->loop_id = loop_id;
5013			sess->d_id = port_id;
5014			sess->fw_login_state = DSC_LS_PRLI_PEND;
5015
5016			if (wd3_lo & BIT_7)
5017				sess->conf_compl_supported = 1;
5018
5019			if ((wd3_lo & BIT_4) == 0)
5020				sess->port_type = FCT_INITIATOR;
5021			else
5022				sess->port_type = FCT_TARGET;
5023
5024			spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
5025		}
5026		res = 1; /* send notify ack */
5027
5028		/* Make session global (not used in fabric mode) */
5029		if (ha->current_topology != ISP_CFG_F) {
5030			if (sess) {
5031				ql_dbg(ql_dbg_disc, vha, 0x20fa,
5032				    "%s %d %8phC post nack\n",
5033				    __func__, __LINE__, sess->port_name);
5034				qla24xx_post_nack_work(vha, sess, iocb,
5035					SRB_NACK_PRLI);
5036				res = 0;
5037			} else {
5038				set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5039				set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5040				qla2xxx_wake_dpc(vha);
5041			}
5042		} else {
5043			if (sess) {
5044				ql_dbg(ql_dbg_disc, vha, 0x20fb,
5045				    "%s %d %8phC post nack\n",
5046				    __func__, __LINE__, sess->port_name);
5047				qla24xx_post_nack_work(vha, sess, iocb,
5048					SRB_NACK_PRLI);
5049				res = 0;
5050			}
5051		}
5052		break;
5053
5054	case ELS_TPRLO:
5055		if (le16_to_cpu(iocb->u.isp24.flags) &
5056			NOTIFY24XX_FLAGS_GLOBAL_TPRLO) {
5057			loop_id = 0xFFFF;
5058			qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS);
5059			res = 1;
5060			break;
5061		}
5062		fallthrough;
5063	case ELS_LOGO:
5064	case ELS_PRLO:
5065		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
5066		sess = qla2x00_find_fcport_by_loopid(vha, loop_id);
5067		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5068
5069		if (sess) {
5070			sess->login_gen++;
5071			sess->fw_login_state = DSC_LS_LOGO_PEND;
5072			sess->logo_ack_needed = 1;
5073			memcpy(sess->iocb, iocb, IOCB_SIZE);
5074		}
5075
5076		res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
5077
5078		ql_dbg(ql_dbg_disc, vha, 0x20fc,
5079		    "%s: logo %llx res %d sess %p ",
5080		    __func__, wwn, res, sess);
5081		if (res == 0) {
5082			/*
5083			 * cmd went upper layer, look for qlt_xmit_tm_rsp()
5084			 * for LOGO_ACK & sess delete
5085			 */
5086			BUG_ON(!sess);
5087			res = 0;
5088		} else {
5089			/* cmd did not go to upper layer. */
5090			if (sess) {
5091				qlt_schedule_sess_for_deletion(sess);
5092				res = 0;
5093			}
5094			/* else logo will be ack */
5095		}
5096		break;
5097	case ELS_PDISC:
5098	case ELS_ADISC:
5099	{
5100		struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5101
5102		if (tgt->link_reinit_iocb_pending) {
5103			qlt_send_notify_ack(ha->base_qpair,
5104			    &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0);
5105			tgt->link_reinit_iocb_pending = 0;
5106		}
5107
5108		sess = qla2x00_find_fcport_by_wwpn(vha,
5109		    iocb->u.isp24.port_name, 1);
5110		if (sess) {
5111			ql_dbg(ql_dbg_disc, vha, 0x20fd,
5112				"sess %p lid %d|%d DS %d LS %d\n",
5113				sess, sess->loop_id, loop_id,
5114				sess->disc_state, sess->fw_login_state);
5115		}
5116
5117		res = 1; /* send notify ack */
5118		break;
5119	}
5120
5121	case ELS_FLOGI:	/* should never happen */
5122	default:
5123		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
5124		    "qla_target(%d): Unsupported ELS command %x "
5125		    "received\n", vha->vp_idx, iocb->u.isp24.status_subcode);
5126		res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
5127		break;
5128	}
5129
5130	ql_dbg(ql_dbg_disc, vha, 0xf026,
5131	    "qla_target(%d): Exit ELS opcode: 0x%02x res %d\n",
5132	    vha->vp_idx, iocb->u.isp24.status_subcode, res);
5133
5134	return res;
5135}
5136
5137/*
5138 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
5139 */
5140static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
5141	struct imm_ntfy_from_isp *iocb)
5142{
5143	struct qla_hw_data *ha = vha->hw;
5144	uint32_t add_flags = 0;
5145	int send_notify_ack = 1;
5146	uint16_t status;
5147
5148	lockdep_assert_held(&ha->hardware_lock);
5149
5150	status = le16_to_cpu(iocb->u.isp2x.status);
5151	switch (status) {
5152	case IMM_NTFY_LIP_RESET:
5153	{
5154		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032,
5155		    "qla_target(%d): LIP reset (loop %#x), subcode %x\n",
5156		    vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle),
5157		    iocb->u.isp24.status_subcode);
5158
5159		if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
5160			send_notify_ack = 0;
5161		break;
5162	}
5163
5164	case IMM_NTFY_LIP_LINK_REINIT:
5165	{
5166		struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5167
5168		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,
5169		    "qla_target(%d): LINK REINIT (loop %#x, "
5170		    "subcode %x)\n", vha->vp_idx,
5171		    le16_to_cpu(iocb->u.isp24.nport_handle),
5172		    iocb->u.isp24.status_subcode);
5173		if (tgt->link_reinit_iocb_pending) {
5174			qlt_send_notify_ack(ha->base_qpair,
5175			    &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0);
5176		}
5177		memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb));
5178		tgt->link_reinit_iocb_pending = 1;
5179		/*
5180		 * QLogic requires to wait after LINK REINIT for possible
5181		 * PDISC or ADISC ELS commands
5182		 */
5183		send_notify_ack = 0;
5184		break;
5185	}
5186
5187	case IMM_NTFY_PORT_LOGOUT:
5188		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034,
5189		    "qla_target(%d): Port logout (loop "
5190		    "%#x, subcode %x)\n", vha->vp_idx,
5191		    le16_to_cpu(iocb->u.isp24.nport_handle),
5192		    iocb->u.isp24.status_subcode);
5193
5194		if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0)
5195			send_notify_ack = 0;
5196		/* The sessions will be cleared in the callback, if needed */
5197		break;
5198
5199	case IMM_NTFY_GLBL_TPRLO:
5200		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035,
5201		    "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status);
5202		if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
5203			send_notify_ack = 0;
5204		/* The sessions will be cleared in the callback, if needed */
5205		break;
5206
5207	case IMM_NTFY_PORT_CONFIG:
5208		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036,
5209		    "qla_target(%d): Port config changed (%x)\n", vha->vp_idx,
5210		    status);
5211		if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
5212			send_notify_ack = 0;
5213		/* The sessions will be cleared in the callback, if needed */
5214		break;
5215
5216	case IMM_NTFY_GLBL_LOGO:
5217		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a,
5218		    "qla_target(%d): Link failure detected\n",
5219		    vha->vp_idx);
5220		/* I_T nexus loss */
5221		if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
5222			send_notify_ack = 0;
5223		break;
5224
5225	case IMM_NTFY_IOCB_OVERFLOW:
5226		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b,
5227		    "qla_target(%d): Cannot provide requested "
5228		    "capability (IOCB overflowed the immediate notify "
5229		    "resource count)\n", vha->vp_idx);
5230		break;
5231
5232	case IMM_NTFY_ABORT_TASK:
5233		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037,
5234		    "qla_target(%d): Abort Task (S %08x I %#x -> "
5235		    "L %#x)\n", vha->vp_idx,
5236		    le16_to_cpu(iocb->u.isp2x.seq_id),
5237		    GET_TARGET_ID(ha, (struct atio_from_isp *)iocb),
5238		    le16_to_cpu(iocb->u.isp2x.lun));
5239		if (qlt_abort_task(vha, iocb) == 0)
5240			send_notify_ack = 0;
5241		break;
5242
5243	case IMM_NTFY_RESOURCE:
5244		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c,
5245		    "qla_target(%d): Out of resources, host %ld\n",
5246		    vha->vp_idx, vha->host_no);
5247		break;
5248
5249	case IMM_NTFY_MSG_RX:
5250		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038,
5251		    "qla_target(%d): Immediate notify task %x\n",
5252		    vha->vp_idx, iocb->u.isp2x.task_flags);
5253		break;
5254
5255	case IMM_NTFY_ELS:
5256		if (qlt_24xx_handle_els(vha, iocb) == 0)
5257			send_notify_ack = 0;
5258		break;
5259	default:
5260		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d,
5261		    "qla_target(%d): Received unknown immediate "
5262		    "notify status %x\n", vha->vp_idx, status);
5263		break;
5264	}
5265
5266	if (send_notify_ack)
5267		qlt_send_notify_ack(ha->base_qpair, iocb, add_flags, 0, 0, 0,
5268		    0, 0);
5269}
5270
5271/*
5272 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
5273 * This function sends busy to ISP 2xxx or 24xx.
5274 */
5275static int __qlt_send_busy(struct qla_qpair *qpair,
5276	struct atio_from_isp *atio, uint16_t status)
5277{
5278	struct scsi_qla_host *vha = qpair->vha;
5279	struct ctio7_to_24xx *ctio24;
5280	struct qla_hw_data *ha = vha->hw;
5281	request_t *pkt;
5282	struct fc_port *sess = NULL;
5283	unsigned long flags;
5284	u16 temp;
5285	port_id_t id;
5286
5287	id = be_to_port_id(atio->u.isp24.fcp_hdr.s_id);
5288
5289	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
5290	sess = qla2x00_find_fcport_by_nportid(vha, &id, 1);
5291	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5292	if (!sess) {
5293		qlt_send_term_exchange(qpair, NULL, atio, 1, 0);
5294		return 0;
5295	}
5296	/* Sending marker isn't necessary, since we called from ISR */
5297
5298	pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL);
5299	if (!pkt) {
5300		ql_dbg(ql_dbg_io, vha, 0x3063,
5301		    "qla_target(%d): %s failed: unable to allocate "
5302		    "request packet", vha->vp_idx, __func__);
5303		return -ENOMEM;
5304	}
5305
5306	qpair->tgt_counters.num_q_full_sent++;
5307	pkt->entry_count = 1;
5308	pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
5309
5310	ctio24 = (struct ctio7_to_24xx *)pkt;
5311	ctio24->entry_type = CTIO_TYPE7;
5312	ctio24->nport_handle = cpu_to_le16(sess->loop_id);
5313	ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
5314	ctio24->vp_index = vha->vp_idx;
5315	ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
5316	ctio24->exchange_addr = atio->u.isp24.exchange_addr;
5317	temp = (atio->u.isp24.attr << 9) |
5318		CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS |
5319		CTIO7_FLAGS_DONT_RET_CTIO;
5320	ctio24->u.status1.flags = cpu_to_le16(temp);
5321	/*
5322	 * CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
5323	 * if the explicit conformation is used.
5324	 */
5325	ctio24->u.status1.ox_id =
5326		cpu_to_le16(be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
5327	ctio24->u.status1.scsi_status = cpu_to_le16(status);
5328
5329	ctio24->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio));
5330
5331	if (ctio24->u.status1.residual != 0)
5332		ctio24->u.status1.scsi_status |= cpu_to_le16(SS_RESIDUAL_UNDER);
5333
5334	/* Memory Barrier */
5335	wmb();
5336	if (qpair->reqq_start_iocbs)
5337		qpair->reqq_start_iocbs(qpair);
5338	else
5339		qla2x00_start_iocbs(vha, qpair->req);
5340	return 0;
5341}
5342
5343/*
5344 * This routine is used to allocate a command for either a QFull condition
5345 * (ie reply SAM_STAT_BUSY) or to terminate an exchange that did not go
5346 * out previously.
5347 */
5348static void
5349qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
5350	struct atio_from_isp *atio, uint16_t status, int qfull)
5351{
5352	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5353	struct qla_hw_data *ha = vha->hw;
5354	struct fc_port *sess;
5355	struct qla_tgt_cmd *cmd;
5356	unsigned long flags;
5357
5358	if (unlikely(tgt->tgt_stop)) {
5359		ql_dbg(ql_dbg_io, vha, 0x300a,
5360			"New command while device %p is shutting down\n", tgt);
5361		return;
5362	}
5363
5364	if ((vha->hw->tgt.num_qfull_cmds_alloc + 1) > MAX_QFULL_CMDS_ALLOC) {
5365		vha->hw->tgt.num_qfull_cmds_dropped++;
5366		if (vha->hw->tgt.num_qfull_cmds_dropped >
5367			vha->qla_stats.stat_max_qfull_cmds_dropped)
5368			vha->qla_stats.stat_max_qfull_cmds_dropped =
5369				vha->hw->tgt.num_qfull_cmds_dropped;
5370
5371		ql_dbg(ql_dbg_io, vha, 0x3068,
5372			"qla_target(%d): %s: QFull CMD dropped[%d]\n",
5373			vha->vp_idx, __func__,
5374			vha->hw->tgt.num_qfull_cmds_dropped);
5375
5376		qlt_chk_exch_leak_thresh_hold(vha);
5377		return;
5378	}
5379
5380	sess = ha->tgt.tgt_ops->find_sess_by_s_id
5381		(vha, atio->u.isp24.fcp_hdr.s_id);
5382	if (!sess)
5383		return;
5384
5385	cmd = ha->tgt.tgt_ops->get_cmd(sess);
5386	if (!cmd) {
5387		ql_dbg(ql_dbg_io, vha, 0x3009,
5388			"qla_target(%d): %s: Allocation of cmd failed\n",
5389			vha->vp_idx, __func__);
5390
5391		vha->hw->tgt.num_qfull_cmds_dropped++;
5392		if (vha->hw->tgt.num_qfull_cmds_dropped >
5393			vha->qla_stats.stat_max_qfull_cmds_dropped)
5394			vha->qla_stats.stat_max_qfull_cmds_dropped =
5395				vha->hw->tgt.num_qfull_cmds_dropped;
5396
5397		qlt_chk_exch_leak_thresh_hold(vha);
5398		return;
5399	}
5400
5401	qlt_incr_num_pend_cmds(vha);
5402	INIT_LIST_HEAD(&cmd->cmd_list);
5403	memcpy(&cmd->atio, atio, sizeof(*atio));
5404
5405	cmd->tgt = vha->vha_tgt.qla_tgt;
5406	cmd->vha = vha;
5407	cmd->reset_count = ha->base_qpair->chip_reset;
5408	cmd->q_full = 1;
5409	cmd->qpair = ha->base_qpair;
5410
5411	if (qfull) {
5412		cmd->q_full = 1;
5413		/* NOTE: borrowing the state field to carry the status */
5414		cmd->state = status;
5415	} else
5416		cmd->term_exchg = 1;
5417
5418	spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
5419	list_add_tail(&cmd->cmd_list, &vha->hw->tgt.q_full_list);
5420
5421	vha->hw->tgt.num_qfull_cmds_alloc++;
5422	if (vha->hw->tgt.num_qfull_cmds_alloc >
5423		vha->qla_stats.stat_max_qfull_cmds_alloc)
5424		vha->qla_stats.stat_max_qfull_cmds_alloc =
5425			vha->hw->tgt.num_qfull_cmds_alloc;
5426	spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5427}
5428
5429int
5430qlt_free_qfull_cmds(struct qla_qpair *qpair)
5431{
5432	struct scsi_qla_host *vha = qpair->vha;
5433	struct qla_hw_data *ha = vha->hw;
5434	unsigned long flags;
5435	struct qla_tgt_cmd *cmd, *tcmd;
5436	struct list_head free_list, q_full_list;
5437	int rc = 0;
5438
5439	if (list_empty(&ha->tgt.q_full_list))
5440		return 0;
5441
5442	INIT_LIST_HEAD(&free_list);
5443	INIT_LIST_HEAD(&q_full_list);
5444
5445	spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
5446	if (list_empty(&ha->tgt.q_full_list)) {
5447		spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5448		return 0;
5449	}
5450
5451	list_splice_init(&vha->hw->tgt.q_full_list, &q_full_list);
5452	spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5453
5454	spin_lock_irqsave(qpair->qp_lock_ptr, flags);
5455	list_for_each_entry_safe(cmd, tcmd, &q_full_list, cmd_list) {
5456		if (cmd->q_full)
5457			/* cmd->state is a borrowed field to hold status */
5458			rc = __qlt_send_busy(qpair, &cmd->atio, cmd->state);
5459		else if (cmd->term_exchg)
5460			rc = __qlt_send_term_exchange(qpair, NULL, &cmd->atio);
5461
5462		if (rc == -ENOMEM)
5463			break;
5464
5465		if (cmd->q_full)
5466			ql_dbg(ql_dbg_io, vha, 0x3006,
5467			    "%s: busy sent for ox_id[%04x]\n", __func__,
5468			    be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
5469		else if (cmd->term_exchg)
5470			ql_dbg(ql_dbg_io, vha, 0x3007,
5471			    "%s: Term exchg sent for ox_id[%04x]\n", __func__,
5472			    be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
5473		else
5474			ql_dbg(ql_dbg_io, vha, 0x3008,
5475			    "%s: Unexpected cmd in QFull list %p\n", __func__,
5476			    cmd);
5477
5478		list_del(&cmd->cmd_list);
5479		list_add_tail(&cmd->cmd_list, &free_list);
5480
5481		/* piggy back on hardware_lock for protection */
5482		vha->hw->tgt.num_qfull_cmds_alloc--;
5483	}
5484	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
5485
5486	cmd = NULL;
5487
5488	list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
5489		list_del(&cmd->cmd_list);
5490		/* This cmd was never sent to TCM.  There is no need
5491		 * to schedule free or call free_cmd
5492		 */
5493		qlt_free_cmd(cmd);
5494	}
5495
5496	if (!list_empty(&q_full_list)) {
5497		spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
5498		list_splice(&q_full_list, &vha->hw->tgt.q_full_list);
5499		spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5500	}
5501
5502	return rc;
5503}
5504
5505static void
5506qlt_send_busy(struct qla_qpair *qpair, struct atio_from_isp *atio,
5507    uint16_t status)
5508{
5509	int rc = 0;
5510	struct scsi_qla_host *vha = qpair->vha;
5511
5512	rc = __qlt_send_busy(qpair, atio, status);
5513	if (rc == -ENOMEM)
5514		qlt_alloc_qfull_cmd(vha, atio, status, 1);
5515}
5516
5517static int
5518qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, struct qla_qpair *qpair,
5519	struct atio_from_isp *atio, uint8_t ha_locked)
5520{
5521	struct qla_hw_data *ha = vha->hw;
5522	unsigned long flags;
5523
5524	if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
5525		return 0;
5526
5527	if (!ha_locked)
5528		spin_lock_irqsave(&ha->hardware_lock, flags);
5529	qlt_send_busy(qpair, atio, qla_sam_status);
5530	if (!ha_locked)
5531		spin_unlock_irqrestore(&ha->hardware_lock, flags);
5532
5533	return 1;
5534}
5535
5536/* ha->hardware_lock supposed to be held on entry */
5537/* called via callback from qla2xxx */
5538static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
5539	struct atio_from_isp *atio, uint8_t ha_locked)
5540{
5541	struct qla_hw_data *ha = vha->hw;
5542	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5543	int rc;
5544	unsigned long flags = 0;
5545
5546	if (unlikely(tgt == NULL)) {
5547		ql_dbg(ql_dbg_tgt, vha, 0x3064,
5548		    "ATIO pkt, but no tgt (ha %p)", ha);
5549		return;
5550	}
5551	/*
5552	 * In tgt_stop mode we also should allow all requests to pass.
5553	 * Otherwise, some commands can stuck.
5554	 */
5555
5556	tgt->atio_irq_cmd_count++;
5557
5558	switch (atio->u.raw.entry_type) {
5559	case ATIO_TYPE7:
5560		if (unlikely(atio->u.isp24.exchange_addr ==
5561			     cpu_to_le32(ATIO_EXCHANGE_ADDRESS_UNKNOWN))) {
5562			ql_dbg(ql_dbg_io, vha, 0x3065,
5563			    "qla_target(%d): ATIO_TYPE7 "
5564			    "received with UNKNOWN exchange address, "
5565			    "sending QUEUE_FULL\n", vha->vp_idx);
5566			if (!ha_locked)
5567				spin_lock_irqsave(&ha->hardware_lock, flags);
5568			qlt_send_busy(ha->base_qpair, atio, qla_sam_status);
5569			if (!ha_locked)
5570				spin_unlock_irqrestore(&ha->hardware_lock,
5571				    flags);
5572			break;
5573		}
5574
5575		if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
5576			rc = qlt_chk_qfull_thresh_hold(vha, ha->base_qpair,
5577			    atio, ha_locked);
5578			if (rc != 0) {
5579				tgt->atio_irq_cmd_count--;
5580				return;
5581			}
5582			rc = qlt_handle_cmd_for_atio(vha, atio);
5583		} else {
5584			rc = qlt_handle_task_mgmt(vha, atio);
5585		}
5586		if (unlikely(rc != 0)) {
5587			if (!ha_locked)
5588				spin_lock_irqsave(&ha->hardware_lock, flags);
5589			switch (rc) {
5590			case -ENODEV:
5591				ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5592				    "qla_target: Unable to send command to target\n");
5593				break;
5594			case -EBADF:
5595				ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5596				    "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n");
5597				qlt_send_term_exchange(ha->base_qpair, NULL,
5598				    atio, 1, 0);
5599				break;
5600			case -EBUSY:
5601				ql_dbg(ql_dbg_tgt, vha, 0xe060,
5602				    "qla_target(%d): Unable to send command to target, sending BUSY status\n",
5603				    vha->vp_idx);
5604				qlt_send_busy(ha->base_qpair, atio,
5605				    tc_sam_status);
5606				break;
5607			default:
5608				ql_dbg(ql_dbg_tgt, vha, 0xe060,
5609				    "qla_target(%d): Unable to send command to target, sending BUSY status\n",
5610				    vha->vp_idx);
5611				qlt_send_busy(ha->base_qpair, atio,
5612				    qla_sam_status);
5613				break;
5614			}
5615			if (!ha_locked)
5616				spin_unlock_irqrestore(&ha->hardware_lock,
5617				    flags);
5618		}
5619		break;
5620
5621	case IMMED_NOTIFY_TYPE:
5622	{
5623		if (unlikely(atio->u.isp2x.entry_status != 0)) {
5624			ql_dbg(ql_dbg_tgt, vha, 0xe05b,
5625			    "qla_target(%d): Received ATIO packet %x "
5626			    "with error status %x\n", vha->vp_idx,
5627			    atio->u.raw.entry_type,
5628			    atio->u.isp2x.entry_status);
5629			break;
5630		}
5631		ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
5632
5633		if (!ha_locked)
5634			spin_lock_irqsave(&ha->hardware_lock, flags);
5635		qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio);
5636		if (!ha_locked)
5637			spin_unlock_irqrestore(&ha->hardware_lock, flags);
5638		break;
5639	}
5640
5641	default:
5642		ql_dbg(ql_dbg_tgt, vha, 0xe05c,
5643		    "qla_target(%d): Received unknown ATIO atio "
5644		    "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
5645		break;
5646	}
5647
5648	tgt->atio_irq_cmd_count--;
5649}
5650
5651/*
5652 * qpair lock is assume to be held
5653 * rc = 0 : send terminate & abts respond
5654 * rc != 0: do not send term & abts respond
5655 */
5656static int qlt_chk_unresolv_exchg(struct scsi_qla_host *vha,
5657    struct qla_qpair *qpair, struct abts_resp_from_24xx_fw *entry)
5658{
5659	struct qla_hw_data *ha = vha->hw;
5660	int rc = 0;
5661
5662	/*
5663	 * Detect unresolved exchange. If the same ABTS is unable
5664	 * to terminate an existing command and the same ABTS loops
5665	 * between FW & Driver, then force FW dump. Under 1 jiff,
5666	 * we should see multiple loops.
5667	 */
5668	if (qpair->retry_term_exchg_addr == entry->exchange_addr_to_abort &&
5669	    qpair->retry_term_jiff == jiffies) {
5670		/* found existing exchange */
5671		qpair->retry_term_cnt++;
5672		if (qpair->retry_term_cnt >= 5) {
5673			rc = -EIO;
5674			qpair->retry_term_cnt = 0;
5675			ql_log(ql_log_warn, vha, 0xffff,
5676			    "Unable to send ABTS Respond. Dumping firmware.\n");
5677			ql_dump_buffer(ql_dbg_tgt_mgt + ql_dbg_buffer,
5678			    vha, 0xffff, (uint8_t *)entry, sizeof(*entry));
5679
5680			if (qpair == ha->base_qpair)
5681				ha->isp_ops->fw_dump(vha);
5682			else
5683				qla2xxx_dump_fw(vha);
5684
5685			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
5686			qla2xxx_wake_dpc(vha);
5687		}
5688	} else if (qpair->retry_term_jiff != jiffies) {
5689		qpair->retry_term_exchg_addr = entry->exchange_addr_to_abort;
5690		qpair->retry_term_cnt = 0;
5691		qpair->retry_term_jiff = jiffies;
5692	}
5693
5694	return rc;
5695}
5696
5697
5698static void qlt_handle_abts_completion(struct scsi_qla_host *vha,
5699	struct rsp_que *rsp, response_t *pkt)
5700{
5701	struct abts_resp_from_24xx_fw *entry =
5702		(struct abts_resp_from_24xx_fw *)pkt;
5703	u32 h = pkt->handle & ~QLA_TGT_HANDLE_MASK;
5704	struct qla_tgt_mgmt_cmd *mcmd;
5705	struct qla_hw_data *ha = vha->hw;
5706
5707	mcmd = qlt_ctio_to_cmd(vha, rsp, pkt->handle, pkt);
5708	if (mcmd == NULL && h != QLA_TGT_SKIP_HANDLE) {
5709		ql_dbg(ql_dbg_async, vha, 0xe064,
5710		    "qla_target(%d): ABTS Comp without mcmd\n",
5711		    vha->vp_idx);
5712		return;
5713	}
5714
5715	if (mcmd)
5716		vha  = mcmd->vha;
5717	vha->vha_tgt.qla_tgt->abts_resp_expected--;
5718
5719	ql_dbg(ql_dbg_tgt, vha, 0xe038,
5720	    "ABTS_RESP_24XX: compl_status %x\n",
5721	    entry->compl_status);
5722
5723	if (le16_to_cpu(entry->compl_status) != ABTS_RESP_COMPL_SUCCESS) {
5724		if (le32_to_cpu(entry->error_subcode1) == 0x1E &&
5725		    le32_to_cpu(entry->error_subcode2) == 0) {
5726			if (qlt_chk_unresolv_exchg(vha, rsp->qpair, entry)) {
5727				ha->tgt.tgt_ops->free_mcmd(mcmd);
5728				return;
5729			}
5730			qlt_24xx_retry_term_exchange(vha, rsp->qpair,
5731			    pkt, mcmd);
5732		} else {
5733			ql_dbg(ql_dbg_tgt, vha, 0xe063,
5734			    "qla_target(%d): ABTS_RESP_24XX failed %x (subcode %x:%x)",
5735			    vha->vp_idx, entry->compl_status,
5736			    entry->error_subcode1,
5737			    entry->error_subcode2);
5738			ha->tgt.tgt_ops->free_mcmd(mcmd);
5739		}
5740	} else if (mcmd) {
5741		ha->tgt.tgt_ops->free_mcmd(mcmd);
5742	}
5743}
5744
5745/* ha->hardware_lock supposed to be held on entry */
5746/* called via callback from qla2xxx */
5747static void qlt_response_pkt(struct scsi_qla_host *vha,
5748	struct rsp_que *rsp, response_t *pkt)
5749{
5750	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5751
5752	if (unlikely(tgt == NULL)) {
5753		ql_dbg(ql_dbg_tgt, vha, 0xe05d,
5754		    "qla_target(%d): Response pkt %x received, but no tgt (ha %p)\n",
5755		    vha->vp_idx, pkt->entry_type, vha->hw);
5756		return;
5757	}
5758
5759	/*
5760	 * In tgt_stop mode we also should allow all requests to pass.
5761	 * Otherwise, some commands can stuck.
5762	 */
5763
5764	switch (pkt->entry_type) {
5765	case CTIO_CRC2:
5766	case CTIO_TYPE7:
5767	{
5768		struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
5769
5770		qlt_do_ctio_completion(vha, rsp, entry->handle,
5771		    le16_to_cpu(entry->status)|(pkt->entry_status << 16),
5772		    entry);
5773		break;
5774	}
5775
5776	case ACCEPT_TGT_IO_TYPE:
5777	{
5778		struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
5779		int rc;
5780
5781		if (atio->u.isp2x.status !=
5782		    cpu_to_le16(ATIO_CDB_VALID)) {
5783			ql_dbg(ql_dbg_tgt, vha, 0xe05e,
5784			    "qla_target(%d): ATIO with error "
5785			    "status %x received\n", vha->vp_idx,
5786			    le16_to_cpu(atio->u.isp2x.status));
5787			break;
5788		}
5789
5790		rc = qlt_chk_qfull_thresh_hold(vha, rsp->qpair, atio, 1);
5791		if (rc != 0)
5792			return;
5793
5794		rc = qlt_handle_cmd_for_atio(vha, atio);
5795		if (unlikely(rc != 0)) {
5796			switch (rc) {
5797			case -ENODEV:
5798				ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5799				    "qla_target: Unable to send command to target\n");
5800				break;
5801			case -EBADF:
5802				ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5803				    "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n");
5804				qlt_send_term_exchange(rsp->qpair, NULL,
5805				    atio, 1, 0);
5806				break;
5807			case -EBUSY:
5808				ql_dbg(ql_dbg_tgt, vha, 0xe060,
5809				    "qla_target(%d): Unable to send command to target, sending BUSY status\n",
5810				    vha->vp_idx);
5811				qlt_send_busy(rsp->qpair, atio,
5812				    tc_sam_status);
5813				break;
5814			default:
5815				ql_dbg(ql_dbg_tgt, vha, 0xe060,
5816				    "qla_target(%d): Unable to send command to target, sending BUSY status\n",
5817				    vha->vp_idx);
5818				qlt_send_busy(rsp->qpair, atio,
5819				    qla_sam_status);
5820				break;
5821			}
5822		}
5823	}
5824	break;
5825
5826	case CONTINUE_TGT_IO_TYPE:
5827	{
5828		struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
5829
5830		qlt_do_ctio_completion(vha, rsp, entry->handle,
5831		    le16_to_cpu(entry->status)|(pkt->entry_status << 16),
5832		    entry);
5833		break;
5834	}
5835
5836	case CTIO_A64_TYPE:
5837	{
5838		struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
5839
5840		qlt_do_ctio_completion(vha, rsp, entry->handle,
5841		    le16_to_cpu(entry->status)|(pkt->entry_status << 16),
5842		    entry);
5843		break;
5844	}
5845
5846	case IMMED_NOTIFY_TYPE:
5847		ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n");
5848		qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt);
5849		break;
5850
5851	case NOTIFY_ACK_TYPE:
5852		if (tgt->notify_ack_expected > 0) {
5853			struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
5854
5855			ql_dbg(ql_dbg_tgt, vha, 0xe036,
5856			    "NOTIFY_ACK seq %08x status %x\n",
5857			    le16_to_cpu(entry->u.isp2x.seq_id),
5858			    le16_to_cpu(entry->u.isp2x.status));
5859			tgt->notify_ack_expected--;
5860			if (entry->u.isp2x.status !=
5861			    cpu_to_le16(NOTIFY_ACK_SUCCESS)) {
5862				ql_dbg(ql_dbg_tgt, vha, 0xe061,
5863				    "qla_target(%d): NOTIFY_ACK "
5864				    "failed %x\n", vha->vp_idx,
5865				    le16_to_cpu(entry->u.isp2x.status));
5866			}
5867		} else {
5868			ql_dbg(ql_dbg_tgt, vha, 0xe062,
5869			    "qla_target(%d): Unexpected NOTIFY_ACK received\n",
5870			    vha->vp_idx);
5871		}
5872		break;
5873
5874	case ABTS_RECV_24XX:
5875		ql_dbg(ql_dbg_tgt, vha, 0xe037,
5876		    "ABTS_RECV_24XX: instance %d\n", vha->vp_idx);
5877		qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt);
5878		break;
5879
5880	case ABTS_RESP_24XX:
5881		if (tgt->abts_resp_expected > 0) {
5882			qlt_handle_abts_completion(vha, rsp, pkt);
5883		} else {
5884			ql_dbg(ql_dbg_tgt, vha, 0xe064,
5885			    "qla_target(%d): Unexpected ABTS_RESP_24XX "
5886			    "received\n", vha->vp_idx);
5887		}
5888		break;
5889
5890	default:
5891		ql_dbg(ql_dbg_tgt, vha, 0xe065,
5892		    "qla_target(%d): Received unknown response pkt "
5893		    "type %x\n", vha->vp_idx, pkt->entry_type);
5894		break;
5895	}
5896
5897}
5898
5899/*
5900 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
5901 */
5902void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
5903	uint16_t *mailbox)
5904{
5905	struct qla_hw_data *ha = vha->hw;
5906	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5907	int login_code;
5908
5909	if (!tgt || tgt->tgt_stop || tgt->tgt_stopped)
5910		return;
5911
5912	if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) &&
5913	    IS_QLA2100(ha))
5914		return;
5915	/*
5916	 * In tgt_stop mode we also should allow all requests to pass.
5917	 * Otherwise, some commands can stuck.
5918	 */
5919
5920
5921	switch (code) {
5922	case MBA_RESET:			/* Reset */
5923	case MBA_SYSTEM_ERR:		/* System Error */
5924	case MBA_REQ_TRANSFER_ERR:	/* Request Transfer Error */
5925	case MBA_RSP_TRANSFER_ERR:	/* Response Transfer Error */
5926		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a,
5927		    "qla_target(%d): System error async event %#x "
5928		    "occurred", vha->vp_idx, code);
5929		break;
5930	case MBA_WAKEUP_THRES:		/* Request Queue Wake-up. */
5931		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
5932		break;
5933
5934	case MBA_LOOP_UP:
5935	{
5936		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
5937		    "qla_target(%d): Async LOOP_UP occurred "
5938		    "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
5939		    mailbox[0], mailbox[1], mailbox[2], mailbox[3]);
5940		if (tgt->link_reinit_iocb_pending) {
5941			qlt_send_notify_ack(ha->base_qpair,
5942			    &tgt->link_reinit_iocb,
5943			    0, 0, 0, 0, 0, 0);
5944			tgt->link_reinit_iocb_pending = 0;
5945		}
5946		break;
5947	}
5948
5949	case MBA_LIP_OCCURRED:
5950	case MBA_LOOP_DOWN:
5951	case MBA_LIP_RESET:
5952	case MBA_RSCN_UPDATE:
5953		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
5954		    "qla_target(%d): Async event %#x occurred "
5955		    "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code,
5956		    mailbox[0], mailbox[1], mailbox[2], mailbox[3]);
5957		break;
5958
5959	case MBA_REJECTED_FCP_CMD:
5960		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf017,
5961		    "qla_target(%d): Async event LS_REJECT occurred (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)",
5962		    vha->vp_idx,
5963		    mailbox[0], mailbox[1], mailbox[2], mailbox[3]);
5964
5965		if (mailbox[3] == 1) {
5966			/* exchange starvation. */
5967			vha->hw->exch_starvation++;
5968			if (vha->hw->exch_starvation > 5) {
5969				ql_log(ql_log_warn, vha, 0xd03a,
5970				    "Exchange starvation-. Resetting RISC\n");
5971
5972				vha->hw->exch_starvation = 0;
5973				if (IS_P3P_TYPE(vha->hw))
5974					set_bit(FCOE_CTX_RESET_NEEDED,
5975					    &vha->dpc_flags);
5976				else
5977					set_bit(ISP_ABORT_NEEDED,
5978					    &vha->dpc_flags);
5979				qla2xxx_wake_dpc(vha);
5980			}
5981		}
5982		break;
5983
5984	case MBA_PORT_UPDATE:
5985		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d,
5986		    "qla_target(%d): Port update async event %#x "
5987		    "occurred: updating the ports database (m[0]=%x, m[1]=%x, "
5988		    "m[2]=%x, m[3]=%x)", vha->vp_idx, code,
5989		    mailbox[0], mailbox[1], mailbox[2], mailbox[3]);
5990
5991		login_code = mailbox[2];
5992		if (login_code == 0x4) {
5993			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
5994			    "Async MB 2: Got PLOGI Complete\n");
5995			vha->hw->exch_starvation = 0;
5996		} else if (login_code == 0x7)
5997			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f,
5998			    "Async MB 2: Port Logged Out\n");
5999		break;
6000	default:
6001		break;
6002	}
6003
6004}
6005
6006static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
6007	uint16_t loop_id)
6008{
6009	fc_port_t *fcport, *tfcp, *del;
6010	int rc;
6011	unsigned long flags;
6012	u8 newfcport = 0;
6013
6014	fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
6015	if (!fcport) {
6016		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
6017		    "qla_target(%d): Allocation of tmp FC port failed",
6018		    vha->vp_idx);
6019		return NULL;
6020	}
6021
6022	fcport->loop_id = loop_id;
6023
6024	rc = qla24xx_gpdb_wait(vha, fcport, 0);
6025	if (rc != QLA_SUCCESS) {
6026		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070,
6027		    "qla_target(%d): Failed to retrieve fcport "
6028		    "information -- get_port_database() returned %x "
6029		    "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
6030		kfree(fcport);
6031		return NULL;
6032	}
6033
6034	del = NULL;
6035	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
6036	tfcp = qla2x00_find_fcport_by_wwpn(vha, fcport->port_name, 1);
6037
6038	if (tfcp) {
6039		tfcp->d_id = fcport->d_id;
6040		tfcp->port_type = fcport->port_type;
6041		tfcp->supported_classes = fcport->supported_classes;
6042		tfcp->flags |= fcport->flags;
6043		tfcp->scan_state = QLA_FCPORT_FOUND;
6044
6045		del = fcport;
6046		fcport = tfcp;
6047	} else {
6048		if (vha->hw->current_topology == ISP_CFG_F)
6049			fcport->flags |= FCF_FABRIC_DEVICE;
6050
6051		list_add_tail(&fcport->list, &vha->vp_fcports);
6052		if (!IS_SW_RESV_ADDR(fcport->d_id))
6053		   vha->fcport_count++;
6054		fcport->login_gen++;
6055		qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE);
6056		fcport->login_succ = 1;
6057		newfcport = 1;
6058	}
6059
6060	fcport->deleted = 0;
6061	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
6062
6063	switch (vha->host->active_mode) {
6064	case MODE_INITIATOR:
6065	case MODE_DUAL:
6066		if (newfcport) {
6067			if (!IS_IIDMA_CAPABLE(vha->hw) || !vha->hw->flags.gpsc_supported) {
6068				qla24xx_sched_upd_fcport(fcport);
6069			} else {
6070				ql_dbg(ql_dbg_disc, vha, 0x20ff,
6071				   "%s %d %8phC post gpsc fcp_cnt %d\n",
6072				   __func__, __LINE__, fcport->port_name, vha->fcport_count);
6073				qla24xx_post_gpsc_work(vha, fcport);
6074			}
6075		}
6076		break;
6077
6078	case MODE_TARGET:
6079	default:
6080		break;
6081	}
6082	if (del)
6083		qla2x00_free_fcport(del);
6084
6085	return fcport;
6086}
6087
6088/* Must be called under tgt_mutex */
6089static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *vha,
6090					   be_id_t s_id)
6091{
6092	struct fc_port *sess = NULL;
6093	fc_port_t *fcport = NULL;
6094	int rc, global_resets;
6095	uint16_t loop_id = 0;
6096
6097	if (s_id.domain == 0xFF && s_id.area == 0xFC) {
6098		/*
6099		 * This is Domain Controller, so it should be
6100		 * OK to drop SCSI commands from it.
6101		 */
6102		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042,
6103		    "Unable to find initiator with S_ID %x:%x:%x",
6104		    s_id.domain, s_id.area, s_id.al_pa);
6105		return NULL;
6106	}
6107
6108	mutex_lock(&vha->vha_tgt.tgt_mutex);
6109
6110retry:
6111	global_resets =
6112	    atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
6113
6114	rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
6115	if (rc != 0) {
6116		mutex_unlock(&vha->vha_tgt.tgt_mutex);
6117
6118		ql_log(ql_log_info, vha, 0xf071,
6119		    "qla_target(%d): Unable to find "
6120		    "initiator with S_ID %x:%x:%x",
6121		    vha->vp_idx, s_id.domain, s_id.area, s_id.al_pa);
6122
6123		if (rc == -ENOENT) {
6124			qlt_port_logo_t logo;
6125
6126			logo.id = be_to_port_id(s_id);
6127			logo.cmd_count = 1;
6128			qlt_send_first_logo(vha, &logo);
6129		}
6130
6131		return NULL;
6132	}
6133
6134	fcport = qlt_get_port_database(vha, loop_id);
6135	if (!fcport) {
6136		mutex_unlock(&vha->vha_tgt.tgt_mutex);
6137		return NULL;
6138	}
6139
6140	if (global_resets !=
6141	    atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) {
6142		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043,
6143		    "qla_target(%d): global reset during session discovery "
6144		    "(counter was %d, new %d), retrying", vha->vp_idx,
6145		    global_resets,
6146		    atomic_read(&vha->vha_tgt.
6147			qla_tgt->tgt_global_resets_count));
6148		goto retry;
6149	}
6150
6151	sess = qlt_create_sess(vha, fcport, true);
6152
6153	mutex_unlock(&vha->vha_tgt.tgt_mutex);
6154
6155	return sess;
6156}
6157
6158static void qlt_abort_work(struct qla_tgt *tgt,
6159	struct qla_tgt_sess_work_param *prm)
6160{
6161	struct scsi_qla_host *vha = tgt->vha;
6162	struct qla_hw_data *ha = vha->hw;
6163	struct fc_port *sess = NULL;
6164	unsigned long flags = 0, flags2 = 0;
6165	be_id_t s_id;
6166	int rc;
6167
6168	spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
6169
6170	if (tgt->tgt_stop)
6171		goto out_term2;
6172
6173	s_id = le_id_to_be(prm->abts.fcp_hdr_le.s_id);
6174
6175	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
6176	if (!sess) {
6177		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
6178
6179		sess = qlt_make_local_sess(vha, s_id);
6180		/* sess has got an extra creation ref */
6181
6182		spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
6183		if (!sess)
6184			goto out_term2;
6185	} else {
6186		if (sess->deleted) {
6187			sess = NULL;
6188			goto out_term2;
6189		}
6190
6191		if (!kref_get_unless_zero(&sess->sess_kref)) {
6192			ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01c,
6193			    "%s: kref_get fail %8phC \n",
6194			     __func__, sess->port_name);
6195			sess = NULL;
6196			goto out_term2;
6197		}
6198	}
6199
6200	rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
6201	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
6202
6203	ha->tgt.tgt_ops->put_sess(sess);
6204
6205	if (rc != 0)
6206		goto out_term;
6207	return;
6208
6209out_term2:
6210	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
6211
6212out_term:
6213	spin_lock_irqsave(&ha->hardware_lock, flags);
6214	qlt_24xx_send_abts_resp(ha->base_qpair, &prm->abts,
6215	    FCP_TMF_REJECTED, false);
6216	spin_unlock_irqrestore(&ha->hardware_lock, flags);
6217}
6218
6219static void qlt_tmr_work(struct qla_tgt *tgt,
6220	struct qla_tgt_sess_work_param *prm)
6221{
6222	struct atio_from_isp *a = &prm->tm_iocb2;
6223	struct scsi_qla_host *vha = tgt->vha;
6224	struct qla_hw_data *ha = vha->hw;
6225	struct fc_port *sess;
6226	unsigned long flags;
6227	be_id_t s_id;
6228	int rc;
6229	u64 unpacked_lun;
6230	int fn;
6231	void *iocb;
6232
6233	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
6234
6235	if (tgt->tgt_stop)
6236		goto out_term2;
6237
6238	s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id;
6239	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
6240	if (!sess) {
6241		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
6242
6243		sess = qlt_make_local_sess(vha, s_id);
6244		/* sess has got an extra creation ref */
6245
6246		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
6247		if (!sess)
6248			goto out_term2;
6249	} else {
6250		if (sess->deleted) {
6251			goto out_term2;
6252		}
6253
6254		if (!kref_get_unless_zero(&sess->sess_kref)) {
6255			ql_dbg(ql_dbg_tgt_tmr, vha, 0xf020,
6256			    "%s: kref_get fail %8phC\n",
6257			     __func__, sess->port_name);
6258			goto out_term2;
6259		}
6260	}
6261
6262	iocb = a;
6263	fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
6264	unpacked_lun =
6265	    scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
6266
6267	rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
6268	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
6269
6270	ha->tgt.tgt_ops->put_sess(sess);
6271
6272	if (rc != 0)
6273		goto out_term;
6274	return;
6275
6276out_term2:
6277	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
6278out_term:
6279	qlt_send_term_exchange(ha->base_qpair, NULL, &prm->tm_iocb2, 1, 0);
6280}
6281
6282static void qlt_sess_work_fn(struct work_struct *work)
6283{
6284	struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work);
6285	struct scsi_qla_host *vha = tgt->vha;
6286	unsigned long flags;
6287
6288	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt);
6289
6290	spin_lock_irqsave(&tgt->sess_work_lock, flags);
6291	while (!list_empty(&tgt->sess_works_list)) {
6292		struct qla_tgt_sess_work_param *prm = list_entry(
6293		    tgt->sess_works_list.next, typeof(*prm),
6294		    sess_works_list_entry);
6295
6296		/*
6297		 * This work can be scheduled on several CPUs at time, so we
6298		 * must delete the entry to eliminate double processing
6299		 */
6300		list_del(&prm->sess_works_list_entry);
6301
6302		spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
6303
6304		switch (prm->type) {
6305		case QLA_TGT_SESS_WORK_ABORT:
6306			qlt_abort_work(tgt, prm);
6307			break;
6308		case QLA_TGT_SESS_WORK_TM:
6309			qlt_tmr_work(tgt, prm);
6310			break;
6311		default:
6312			BUG_ON(1);
6313			break;
6314		}
6315
6316		spin_lock_irqsave(&tgt->sess_work_lock, flags);
6317
6318		kfree(prm);
6319	}
6320	spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
6321}
6322
6323/* Must be called under tgt_host_action_mutex */
6324int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
6325{
6326	struct qla_tgt *tgt;
6327	int rc, i;
6328	struct qla_qpair_hint *h;
6329
6330	if (!QLA_TGT_MODE_ENABLED())
6331		return 0;
6332
6333	if (!IS_TGT_MODE_CAPABLE(ha)) {
6334		ql_log(ql_log_warn, base_vha, 0xe070,
6335		    "This adapter does not support target mode.\n");
6336		return 0;
6337	}
6338
6339	ql_dbg(ql_dbg_tgt, base_vha, 0xe03b,
6340	    "Registering target for host %ld(%p).\n", base_vha->host_no, ha);
6341
6342	BUG_ON(base_vha->vha_tgt.qla_tgt != NULL);
6343
6344	tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL);
6345	if (!tgt) {
6346		ql_dbg(ql_dbg_tgt, base_vha, 0xe066,
6347		    "Unable to allocate struct qla_tgt\n");
6348		return -ENOMEM;
6349	}
6350
6351	tgt->qphints = kcalloc(ha->max_qpairs + 1,
6352			       sizeof(struct qla_qpair_hint),
6353			       GFP_KERNEL);
6354	if (!tgt->qphints) {
6355		kfree(tgt);
6356		ql_log(ql_log_warn, base_vha, 0x0197,
6357		    "Unable to allocate qpair hints.\n");
6358		return -ENOMEM;
6359	}
6360
6361	if (!(base_vha->host->hostt->supported_mode & MODE_TARGET))
6362		base_vha->host->hostt->supported_mode |= MODE_TARGET;
6363
6364	rc = btree_init64(&tgt->lun_qpair_map);
6365	if (rc) {
6366		kfree(tgt->qphints);
6367		kfree(tgt);
6368		ql_log(ql_log_info, base_vha, 0x0198,
6369			"Unable to initialize lun_qpair_map btree\n");
6370		return -EIO;
6371	}
6372	h = &tgt->qphints[0];
6373	h->qpair = ha->base_qpair;
6374	INIT_LIST_HEAD(&h->hint_elem);
6375	h->cpuid = ha->base_qpair->cpuid;
6376	list_add_tail(&h->hint_elem, &ha->base_qpair->hints_list);
6377
6378	for (i = 0; i < ha->max_qpairs; i++) {
6379		unsigned long flags;
6380
6381		struct qla_qpair *qpair = ha->queue_pair_map[i];
6382
6383		h = &tgt->qphints[i + 1];
6384		INIT_LIST_HEAD(&h->hint_elem);
6385		if (qpair) {
6386			h->qpair = qpair;
6387			spin_lock_irqsave(qpair->qp_lock_ptr, flags);
6388			list_add_tail(&h->hint_elem, &qpair->hints_list);
6389			spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
6390			h->cpuid = qpair->cpuid;
6391		}
6392	}
6393
6394	tgt->ha = ha;
6395	tgt->vha = base_vha;
6396	init_waitqueue_head(&tgt->waitQ);
6397	INIT_LIST_HEAD(&tgt->del_sess_list);
6398	spin_lock_init(&tgt->sess_work_lock);
6399	INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
6400	INIT_LIST_HEAD(&tgt->sess_works_list);
6401	atomic_set(&tgt->tgt_global_resets_count, 0);
6402
6403	base_vha->vha_tgt.qla_tgt = tgt;
6404
6405	ql_dbg(ql_dbg_tgt, base_vha, 0xe067,
6406		"qla_target(%d): using 64 Bit PCI addressing",
6407		base_vha->vp_idx);
6408	/* 3 is reserved */
6409	tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3);
6410
6411	mutex_lock(&qla_tgt_mutex);
6412	list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
6413	mutex_unlock(&qla_tgt_mutex);
6414
6415	if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->add_target)
6416		ha->tgt.tgt_ops->add_target(base_vha);
6417
6418	return 0;
6419}
6420
6421/* Must be called under tgt_host_action_mutex */
6422int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
6423{
6424	if (!vha->vha_tgt.qla_tgt)
6425		return 0;
6426
6427	if (vha->fc_vport) {
6428		qlt_release(vha->vha_tgt.qla_tgt);
6429		return 0;
6430	}
6431
6432	/* free left over qfull cmds */
6433	qlt_init_term_exchange(vha);
6434
6435	ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)",
6436	    vha->host_no, ha);
6437	qlt_release(vha->vha_tgt.qla_tgt);
6438
6439	return 0;
6440}
6441
6442void qlt_remove_target_resources(struct qla_hw_data *ha)
6443{
6444	struct scsi_qla_host *node;
6445	u32 key = 0;
6446
6447	btree_for_each_safe32(&ha->tgt.host_map, key, node)
6448		btree_remove32(&ha->tgt.host_map, key);
6449
6450	btree_destroy32(&ha->tgt.host_map);
6451}
6452
6453static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
6454	unsigned char *b)
6455{
6456	pr_debug("qla2xxx HW vha->node_name: %8phC\n", vha->node_name);
6457	pr_debug("qla2xxx HW vha->port_name: %8phC\n", vha->port_name);
6458	put_unaligned_be64(wwpn, b);
6459	pr_debug("qla2xxx passed configfs WWPN: %8phC\n", b);
6460}
6461
6462/**
6463 * qla_tgt_lport_register - register lport with external module
6464 *
6465 * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
6466 * @phys_wwpn: physical port WWPN
6467 * @npiv_wwpn: NPIV WWPN
6468 * @npiv_wwnn: NPIV WWNN
6469 * @callback:  lport initialization callback for tcm_qla2xxx code
6470 */
6471int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
6472		       u64 npiv_wwpn, u64 npiv_wwnn,
6473		       int (*callback)(struct scsi_qla_host *, void *, u64, u64))
6474{
6475	struct qla_tgt *tgt;
6476	struct scsi_qla_host *vha;
6477	struct qla_hw_data *ha;
6478	struct Scsi_Host *host;
6479	unsigned long flags;
6480	int rc;
6481	u8 b[WWN_SIZE];
6482
6483	mutex_lock(&qla_tgt_mutex);
6484	list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) {
6485		vha = tgt->vha;
6486		ha = vha->hw;
6487
6488		host = vha->host;
6489		if (!host)
6490			continue;
6491
6492		if (!(host->hostt->supported_mode & MODE_TARGET))
6493			continue;
6494
6495		if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED)
6496			continue;
6497
6498		spin_lock_irqsave(&ha->hardware_lock, flags);
6499		if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) {
6500			pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
6501			    host->host_no);
6502			spin_unlock_irqrestore(&ha->hardware_lock, flags);
6503			continue;
6504		}
6505		if (tgt->tgt_stop) {
6506			pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n",
6507				 host->host_no);
6508			spin_unlock_irqrestore(&ha->hardware_lock, flags);
6509			continue;
6510		}
6511		spin_unlock_irqrestore(&ha->hardware_lock, flags);
6512
6513		if (!scsi_host_get(host)) {
6514			ql_dbg(ql_dbg_tgt, vha, 0xe068,
6515			    "Unable to scsi_host_get() for"
6516			    " qla2xxx scsi_host\n");
6517			continue;
6518		}
6519		qlt_lport_dump(vha, phys_wwpn, b);
6520
6521		if (memcmp(vha->port_name, b, WWN_SIZE)) {
6522			scsi_host_put(host);
6523			continue;
6524		}
6525		rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn);
6526		if (rc != 0)
6527			scsi_host_put(host);
6528
6529		mutex_unlock(&qla_tgt_mutex);
6530		return rc;
6531	}
6532	mutex_unlock(&qla_tgt_mutex);
6533
6534	return -ENODEV;
6535}
6536EXPORT_SYMBOL(qlt_lport_register);
6537
6538/**
6539 * qla_tgt_lport_deregister - Degister lport
6540 *
6541 * @vha:  Registered scsi_qla_host pointer
6542 */
6543void qlt_lport_deregister(struct scsi_qla_host *vha)
6544{
6545	struct qla_hw_data *ha = vha->hw;
6546	struct Scsi_Host *sh = vha->host;
6547	/*
6548	 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
6549	 */
6550	vha->vha_tgt.target_lport_ptr = NULL;
6551	ha->tgt.tgt_ops = NULL;
6552	/*
6553	 * Release the Scsi_Host reference for the underlying qla2xxx host
6554	 */
6555	scsi_host_put(sh);
6556}
6557EXPORT_SYMBOL(qlt_lport_deregister);
6558
6559/* Must be called under HW lock */
6560void qlt_set_mode(struct scsi_qla_host *vha)
6561{
6562	switch (vha->qlini_mode) {
6563	case QLA2XXX_INI_MODE_DISABLED:
6564	case QLA2XXX_INI_MODE_EXCLUSIVE:
6565		vha->host->active_mode = MODE_TARGET;
6566		break;
6567	case QLA2XXX_INI_MODE_ENABLED:
6568		vha->host->active_mode = MODE_INITIATOR;
6569		break;
6570	case QLA2XXX_INI_MODE_DUAL:
6571		vha->host->active_mode = MODE_DUAL;
6572		break;
6573	default:
6574		break;
6575	}
6576}
6577
6578/* Must be called under HW lock */
6579static void qlt_clear_mode(struct scsi_qla_host *vha)
6580{
6581	switch (vha->qlini_mode) {
6582	case QLA2XXX_INI_MODE_DISABLED:
6583		vha->host->active_mode = MODE_UNKNOWN;
6584		break;
6585	case QLA2XXX_INI_MODE_EXCLUSIVE:
6586		vha->host->active_mode = MODE_INITIATOR;
6587		break;
6588	case QLA2XXX_INI_MODE_ENABLED:
6589	case QLA2XXX_INI_MODE_DUAL:
6590		vha->host->active_mode = MODE_INITIATOR;
6591		break;
6592	default:
6593		break;
6594	}
6595}
6596
6597/*
6598 * qla_tgt_enable_vha - NO LOCK HELD
6599 *
6600 * host_reset, bring up w/ Target Mode Enabled
6601 */
6602void
6603qlt_enable_vha(struct scsi_qla_host *vha)
6604{
6605	struct qla_hw_data *ha = vha->hw;
6606	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6607	unsigned long flags;
6608	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
6609
6610	if (!tgt) {
6611		ql_dbg(ql_dbg_tgt, vha, 0xe069,
6612		    "Unable to locate qla_tgt pointer from"
6613		    " struct qla_hw_data\n");
6614		dump_stack();
6615		return;
6616	}
6617	if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED)
6618		return;
6619
6620	if (ha->tgt.num_act_qpairs > ha->max_qpairs)
6621		ha->tgt.num_act_qpairs = ha->max_qpairs;
6622	spin_lock_irqsave(&ha->hardware_lock, flags);
6623	tgt->tgt_stopped = 0;
6624	qlt_set_mode(vha);
6625	spin_unlock_irqrestore(&ha->hardware_lock, flags);
6626
6627	mutex_lock(&ha->optrom_mutex);
6628	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
6629	    "%s.\n", __func__);
6630	if (vha->vp_idx) {
6631		qla24xx_disable_vp(vha);
6632		qla24xx_enable_vp(vha);
6633	} else {
6634		set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
6635		qla2xxx_wake_dpc(base_vha);
6636		WARN_ON_ONCE(qla2x00_wait_for_hba_online(base_vha) !=
6637			     QLA_SUCCESS);
6638	}
6639	mutex_unlock(&ha->optrom_mutex);
6640}
6641EXPORT_SYMBOL(qlt_enable_vha);
6642
6643/*
6644 * qla_tgt_disable_vha - NO LOCK HELD
6645 *
6646 * Disable Target Mode and reset the adapter
6647 */
6648static void qlt_disable_vha(struct scsi_qla_host *vha)
6649{
6650	struct qla_hw_data *ha = vha->hw;
6651	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6652	unsigned long flags;
6653
6654	if (!tgt) {
6655		ql_dbg(ql_dbg_tgt, vha, 0xe06a,
6656		    "Unable to locate qla_tgt pointer from"
6657		    " struct qla_hw_data\n");
6658		dump_stack();
6659		return;
6660	}
6661
6662	spin_lock_irqsave(&ha->hardware_lock, flags);
6663	qlt_clear_mode(vha);
6664	spin_unlock_irqrestore(&ha->hardware_lock, flags);
6665
6666	set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
6667	qla2xxx_wake_dpc(vha);
6668
6669	/*
6670	 * We are expecting the offline state.
6671	 * QLA_FUNCTION_FAILED means that adapter is offline.
6672	 */
6673	if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
6674		ql_dbg(ql_dbg_tgt, vha, 0xe081,
6675		       "adapter is offline\n");
6676}
6677
6678/*
6679 * Called from qla_init.c:qla24xx_vport_create() contex to setup
6680 * the target mode specific struct scsi_qla_host and struct qla_hw_data
6681 * members.
6682 */
6683void
6684qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
6685{
6686	vha->vha_tgt.qla_tgt = NULL;
6687
6688	mutex_init(&vha->vha_tgt.tgt_mutex);
6689	mutex_init(&vha->vha_tgt.tgt_host_action_mutex);
6690
6691	qlt_clear_mode(vha);
6692
6693	/*
6694	 * NOTE: Currently the value is kept the same for <24xx and
6695	 * >=24xx ISPs. If it is necessary to change it,
6696	 * the check should be added for specific ISPs,
6697	 * assigning the value appropriately.
6698	 */
6699	ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
6700
6701	qlt_add_target(ha, vha);
6702}
6703
6704u8
6705qlt_rff_id(struct scsi_qla_host *vha)
6706{
6707	u8 fc4_feature = 0;
6708	/*
6709	 * FC-4 Feature bit 0 indicates target functionality to the name server.
6710	 */
6711	if (qla_tgt_mode_enabled(vha)) {
6712		fc4_feature = BIT_0;
6713	} else if (qla_ini_mode_enabled(vha)) {
6714		fc4_feature = BIT_1;
6715	} else if (qla_dual_mode_enabled(vha))
6716		fc4_feature = BIT_0 | BIT_1;
6717
6718	return fc4_feature;
6719}
6720
6721/*
6722 * qlt_init_atio_q_entries() - Initializes ATIO queue entries.
6723 * @ha: HA context
6724 *
6725 * Beginning of ATIO ring has initialization control block already built
6726 * by nvram config routine.
6727 *
6728 * Returns 0 on success.
6729 */
6730void
6731qlt_init_atio_q_entries(struct scsi_qla_host *vha)
6732{
6733	struct qla_hw_data *ha = vha->hw;
6734	uint16_t cnt;
6735	struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring;
6736
6737	if (qla_ini_mode_enabled(vha))
6738		return;
6739
6740	for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) {
6741		pkt->u.raw.signature = cpu_to_le32(ATIO_PROCESSED);
6742		pkt++;
6743	}
6744
6745}
6746
6747/*
6748 * qlt_24xx_process_atio_queue() - Process ATIO queue entries.
6749 * @ha: SCSI driver HA context
6750 */
6751void
6752qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
6753{
6754	struct qla_hw_data *ha = vha->hw;
6755	struct atio_from_isp *pkt;
6756	int cnt, i;
6757
6758	if (!ha->flags.fw_started)
6759		return;
6760
6761	while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) ||
6762	    fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr)) {
6763		pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
6764		cnt = pkt->u.raw.entry_count;
6765
6766		if (unlikely(fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr))) {
6767			/*
6768			 * This packet is corrupted. The header + payload
6769			 * can not be trusted. There is no point in passing
6770			 * it further up.
6771			 */
6772			ql_log(ql_log_warn, vha, 0xd03c,
6773			    "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n",
6774			    &pkt->u.isp24.fcp_hdr.s_id,
6775			    be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id),
6776			    pkt->u.isp24.exchange_addr, pkt);
6777
6778			adjust_corrupted_atio(pkt);
6779			qlt_send_term_exchange(ha->base_qpair, NULL, pkt,
6780			    ha_locked, 0);
6781		} else {
6782			qlt_24xx_atio_pkt_all_vps(vha,
6783			    (struct atio_from_isp *)pkt, ha_locked);
6784		}
6785
6786		for (i = 0; i < cnt; i++) {
6787			ha->tgt.atio_ring_index++;
6788			if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) {
6789				ha->tgt.atio_ring_index = 0;
6790				ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
6791			} else
6792				ha->tgt.atio_ring_ptr++;
6793
6794			pkt->u.raw.signature = cpu_to_le32(ATIO_PROCESSED);
6795			pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
6796		}
6797		wmb();
6798	}
6799
6800	/* Adjust ring index */
6801	wrt_reg_dword(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
6802}
6803
6804void
6805qlt_24xx_config_rings(struct scsi_qla_host *vha)
6806{
6807	struct qla_hw_data *ha = vha->hw;
6808	struct qla_msix_entry *msix = &ha->msix_entries[2];
6809	struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb;
6810
6811	if (!QLA_TGT_MODE_ENABLED())
6812		return;
6813
6814	wrt_reg_dword(ISP_ATIO_Q_IN(vha), 0);
6815	wrt_reg_dword(ISP_ATIO_Q_OUT(vha), 0);
6816	rd_reg_dword(ISP_ATIO_Q_OUT(vha));
6817
6818	if (ha->flags.msix_enabled) {
6819		if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
6820			icb->msix_atio = cpu_to_le16(msix->entry);
6821			icb->firmware_options_2 &= cpu_to_le32(~BIT_26);
6822			ql_dbg(ql_dbg_init, vha, 0xf072,
6823			    "Registering ICB vector 0x%x for atio que.\n",
6824			    msix->entry);
6825		}
6826	} else {
6827		/* INTx|MSI */
6828		if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
6829			icb->msix_atio = 0;
6830			icb->firmware_options_2 |= cpu_to_le32(BIT_26);
6831			ql_dbg(ql_dbg_init, vha, 0xf072,
6832			    "%s: Use INTx for ATIOQ.\n", __func__);
6833		}
6834	}
6835}
6836
6837void
6838qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
6839{
6840	struct qla_hw_data *ha = vha->hw;
6841	u32 tmp;
6842
6843	if (!QLA_TGT_MODE_ENABLED())
6844		return;
6845
6846	if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
6847		if (!ha->tgt.saved_set) {
6848			/* We save only once */
6849			ha->tgt.saved_exchange_count = nv->exchange_count;
6850			ha->tgt.saved_firmware_options_1 =
6851			    nv->firmware_options_1;
6852			ha->tgt.saved_firmware_options_2 =
6853			    nv->firmware_options_2;
6854			ha->tgt.saved_firmware_options_3 =
6855			    nv->firmware_options_3;
6856			ha->tgt.saved_set = 1;
6857		}
6858
6859		if (qla_tgt_mode_enabled(vha))
6860			nv->exchange_count = cpu_to_le16(0xFFFF);
6861		else			/* dual */
6862			nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
6863
6864		/* Enable target mode */
6865		nv->firmware_options_1 |= cpu_to_le32(BIT_4);
6866
6867		/* Disable ini mode, if requested */
6868		if (qla_tgt_mode_enabled(vha))
6869			nv->firmware_options_1 |= cpu_to_le32(BIT_5);
6870
6871		/* Disable Full Login after LIP */
6872		nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
6873		/* Enable initial LIP */
6874		nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
6875		if (ql2xtgt_tape_enable)
6876			/* Enable FC Tape support */
6877			nv->firmware_options_2 |= cpu_to_le32(BIT_12);
6878		else
6879			/* Disable FC Tape support */
6880			nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
6881
6882		/* Disable Full Login after LIP */
6883		nv->host_p &= cpu_to_le32(~BIT_10);
6884
6885		/*
6886		 * clear BIT 15 explicitly as we have seen at least
6887		 * a couple of instances where this was set and this
6888		 * was causing the firmware to not be initialized.
6889		 */
6890		nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
6891		/* Enable target PRLI control */
6892		nv->firmware_options_2 |= cpu_to_le32(BIT_14);
6893
6894		if (IS_QLA25XX(ha)) {
6895			/* Change Loop-prefer to Pt-Pt */
6896			tmp = ~(BIT_4|BIT_5|BIT_6);
6897			nv->firmware_options_2 &= cpu_to_le32(tmp);
6898			tmp = P2P << 4;
6899			nv->firmware_options_2 |= cpu_to_le32(tmp);
6900		}
6901	} else {
6902		if (ha->tgt.saved_set) {
6903			nv->exchange_count = ha->tgt.saved_exchange_count;
6904			nv->firmware_options_1 =
6905			    ha->tgt.saved_firmware_options_1;
6906			nv->firmware_options_2 =
6907			    ha->tgt.saved_firmware_options_2;
6908			nv->firmware_options_3 =
6909			    ha->tgt.saved_firmware_options_3;
6910		}
6911		return;
6912	}
6913
6914	if (ha->base_qpair->enable_class_2) {
6915		if (vha->flags.init_done)
6916			fc_host_supported_classes(vha->host) =
6917				FC_COS_CLASS2 | FC_COS_CLASS3;
6918
6919		nv->firmware_options_2 |= cpu_to_le32(BIT_8);
6920	} else {
6921		if (vha->flags.init_done)
6922			fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
6923
6924		nv->firmware_options_2 &= ~cpu_to_le32(BIT_8);
6925	}
6926}
6927
6928void
6929qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
6930	struct init_cb_24xx *icb)
6931{
6932	struct qla_hw_data *ha = vha->hw;
6933
6934	if (!QLA_TGT_MODE_ENABLED())
6935		return;
6936
6937	if (ha->tgt.node_name_set) {
6938		memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
6939		icb->firmware_options_1 |= cpu_to_le32(BIT_14);
6940	}
6941}
6942
6943void
6944qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
6945{
6946	struct qla_hw_data *ha = vha->hw;
6947	u32 tmp;
6948
6949	if (!QLA_TGT_MODE_ENABLED())
6950		return;
6951
6952	if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
6953		if (!ha->tgt.saved_set) {
6954			/* We save only once */
6955			ha->tgt.saved_exchange_count = nv->exchange_count;
6956			ha->tgt.saved_firmware_options_1 =
6957			    nv->firmware_options_1;
6958			ha->tgt.saved_firmware_options_2 =
6959			    nv->firmware_options_2;
6960			ha->tgt.saved_firmware_options_3 =
6961			    nv->firmware_options_3;
6962			ha->tgt.saved_set = 1;
6963		}
6964
6965		if (qla_tgt_mode_enabled(vha))
6966			nv->exchange_count = cpu_to_le16(0xFFFF);
6967		else			/* dual */
6968			nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
6969
6970		/* Enable target mode */
6971		nv->firmware_options_1 |= cpu_to_le32(BIT_4);
6972
6973		/* Disable ini mode, if requested */
6974		if (qla_tgt_mode_enabled(vha))
6975			nv->firmware_options_1 |= cpu_to_le32(BIT_5);
6976		/* Disable Full Login after LIP */
6977		nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
6978		/* Enable initial LIP */
6979		nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
6980		/*
6981		 * clear BIT 15 explicitly as we have seen at
6982		 * least a couple of instances where this was set
6983		 * and this was causing the firmware to not be
6984		 * initialized.
6985		 */
6986		nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
6987		if (ql2xtgt_tape_enable)
6988			/* Enable FC tape support */
6989			nv->firmware_options_2 |= cpu_to_le32(BIT_12);
6990		else
6991			/* Disable FC tape support */
6992			nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
6993
6994		/* Disable Full Login after LIP */
6995		nv->host_p &= cpu_to_le32(~BIT_10);
6996		/* Enable target PRLI control */
6997		nv->firmware_options_2 |= cpu_to_le32(BIT_14);
6998
6999		/* Change Loop-prefer to Pt-Pt */
7000		tmp = ~(BIT_4|BIT_5|BIT_6);
7001		nv->firmware_options_2 &= cpu_to_le32(tmp);
7002		tmp = P2P << 4;
7003		nv->firmware_options_2 |= cpu_to_le32(tmp);
7004	} else {
7005		if (ha->tgt.saved_set) {
7006			nv->exchange_count = ha->tgt.saved_exchange_count;
7007			nv->firmware_options_1 =
7008			    ha->tgt.saved_firmware_options_1;
7009			nv->firmware_options_2 =
7010			    ha->tgt.saved_firmware_options_2;
7011			nv->firmware_options_3 =
7012			    ha->tgt.saved_firmware_options_3;
7013		}
7014		return;
7015	}
7016
7017	if (ha->base_qpair->enable_class_2) {
7018		if (vha->flags.init_done)
7019			fc_host_supported_classes(vha->host) =
7020				FC_COS_CLASS2 | FC_COS_CLASS3;
7021
7022		nv->firmware_options_2 |= cpu_to_le32(BIT_8);
7023	} else {
7024		if (vha->flags.init_done)
7025			fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
7026
7027		nv->firmware_options_2 &= ~cpu_to_le32(BIT_8);
7028	}
7029}
7030
7031void
7032qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
7033	struct init_cb_81xx *icb)
7034{
7035	struct qla_hw_data *ha = vha->hw;
7036
7037	if (!QLA_TGT_MODE_ENABLED())
7038		return;
7039
7040	if (ha->tgt.node_name_set) {
7041		memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
7042		icb->firmware_options_1 |= cpu_to_le32(BIT_14);
7043	}
7044}
7045
7046void
7047qlt_83xx_iospace_config(struct qla_hw_data *ha)
7048{
7049	if (!QLA_TGT_MODE_ENABLED())
7050		return;
7051
7052	ha->msix_count += 1; /* For ATIO Q */
7053}
7054
7055
7056void
7057qlt_modify_vp_config(struct scsi_qla_host *vha,
7058	struct vp_config_entry_24xx *vpmod)
7059{
7060	/* enable target mode.  Bit5 = 1 => disable */
7061	if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha))
7062		vpmod->options_idx1 &= ~BIT_5;
7063
7064	/* Disable ini mode, if requested.  bit4 = 1 => disable */
7065	if (qla_tgt_mode_enabled(vha))
7066		vpmod->options_idx1 &= ~BIT_4;
7067}
7068
7069void
7070qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
7071{
7072	int rc;
7073
7074	if (!QLA_TGT_MODE_ENABLED())
7075		return;
7076
7077	if  (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
7078		ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
7079		ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
7080	} else {
7081		ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in;
7082		ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out;
7083	}
7084
7085	mutex_init(&base_vha->vha_tgt.tgt_mutex);
7086	mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex);
7087
7088	INIT_LIST_HEAD(&base_vha->unknown_atio_list);
7089	INIT_DELAYED_WORK(&base_vha->unknown_atio_work,
7090	    qlt_unknown_atio_work_fn);
7091
7092	qlt_clear_mode(base_vha);
7093
7094	rc = btree_init32(&ha->tgt.host_map);
7095	if (rc)
7096		ql_log(ql_log_info, base_vha, 0xd03d,
7097		    "Unable to initialize ha->host_map btree\n");
7098
7099	qlt_update_vp_map(base_vha, SET_VP_IDX);
7100}
7101
7102irqreturn_t
7103qla83xx_msix_atio_q(int irq, void *dev_id)
7104{
7105	struct rsp_que *rsp;
7106	scsi_qla_host_t	*vha;
7107	struct qla_hw_data *ha;
7108	unsigned long flags;
7109
7110	rsp = (struct rsp_que *) dev_id;
7111	ha = rsp->hw;
7112	vha = pci_get_drvdata(ha->pdev);
7113
7114	spin_lock_irqsave(&ha->tgt.atio_lock, flags);
7115
7116	qlt_24xx_process_atio_queue(vha, 0);
7117
7118	spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
7119
7120	return IRQ_HANDLED;
7121}
7122
7123static void
7124qlt_handle_abts_recv_work(struct work_struct *work)
7125{
7126	struct qla_tgt_sess_op *op = container_of(work,
7127		struct qla_tgt_sess_op, work);
7128	scsi_qla_host_t *vha = op->vha;
7129	struct qla_hw_data *ha = vha->hw;
7130	unsigned long flags;
7131
7132	if (qla2x00_reset_active(vha) ||
7133	    (op->chip_reset != ha->base_qpair->chip_reset))
7134		return;
7135
7136	spin_lock_irqsave(&ha->tgt.atio_lock, flags);
7137	qlt_24xx_process_atio_queue(vha, 0);
7138	spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
7139
7140	spin_lock_irqsave(&ha->hardware_lock, flags);
7141	qlt_response_pkt_all_vps(vha, op->rsp, (response_t *)&op->atio);
7142	spin_unlock_irqrestore(&ha->hardware_lock, flags);
7143
7144	kfree(op);
7145}
7146
7147void
7148qlt_handle_abts_recv(struct scsi_qla_host *vha, struct rsp_que *rsp,
7149    response_t *pkt)
7150{
7151	struct qla_tgt_sess_op *op;
7152
7153	op = kzalloc(sizeof(*op), GFP_ATOMIC);
7154
7155	if (!op) {
7156		/* do not reach for ATIO queue here.  This is best effort err
7157		 * recovery at this point.
7158		 */
7159		qlt_response_pkt_all_vps(vha, rsp, pkt);
7160		return;
7161	}
7162
7163	memcpy(&op->atio, pkt, sizeof(*pkt));
7164	op->vha = vha;
7165	op->chip_reset = vha->hw->base_qpair->chip_reset;
7166	op->rsp = rsp;
7167	INIT_WORK(&op->work, qlt_handle_abts_recv_work);
7168	queue_work(qla_tgt_wq, &op->work);
7169	return;
7170}
7171
7172int
7173qlt_mem_alloc(struct qla_hw_data *ha)
7174{
7175	if (!QLA_TGT_MODE_ENABLED())
7176		return 0;
7177
7178	ha->tgt.tgt_vp_map = kcalloc(MAX_MULTI_ID_FABRIC,
7179				     sizeof(struct qla_tgt_vp_map),
7180				     GFP_KERNEL);
7181	if (!ha->tgt.tgt_vp_map)
7182		return -ENOMEM;
7183
7184	ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev,
7185	    (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp),
7186	    &ha->tgt.atio_dma, GFP_KERNEL);
7187	if (!ha->tgt.atio_ring) {
7188		kfree(ha->tgt.tgt_vp_map);
7189		return -ENOMEM;
7190	}
7191	return 0;
7192}
7193
7194void
7195qlt_mem_free(struct qla_hw_data *ha)
7196{
7197	if (!QLA_TGT_MODE_ENABLED())
7198		return;
7199
7200	if (ha->tgt.atio_ring) {
7201		dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) *
7202		    sizeof(struct atio_from_isp), ha->tgt.atio_ring,
7203		    ha->tgt.atio_dma);
7204	}
7205	ha->tgt.atio_ring = NULL;
7206	ha->tgt.atio_dma = 0;
7207	kfree(ha->tgt.tgt_vp_map);
7208	ha->tgt.tgt_vp_map = NULL;
7209}
7210
7211/* vport_slock to be held by the caller */
7212void
7213qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
7214{
7215	void *slot;
7216	u32 key;
7217	int rc;
7218
7219	if (!QLA_TGT_MODE_ENABLED())
7220		return;
7221
7222	key = vha->d_id.b24;
7223
7224	switch (cmd) {
7225	case SET_VP_IDX:
7226		vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
7227		break;
7228	case SET_AL_PA:
7229		slot = btree_lookup32(&vha->hw->tgt.host_map, key);
7230		if (!slot) {
7231			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf018,
7232			    "Save vha in host_map %p %06x\n", vha, key);
7233			rc = btree_insert32(&vha->hw->tgt.host_map,
7234				key, vha, GFP_ATOMIC);
7235			if (rc)
7236				ql_log(ql_log_info, vha, 0xd03e,
7237				    "Unable to insert s_id into host_map: %06x\n",
7238				    key);
7239			return;
7240		}
7241		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
7242		    "replace existing vha in host_map %p %06x\n", vha, key);
7243		btree_update32(&vha->hw->tgt.host_map, key, vha);
7244		break;
7245	case RESET_VP_IDX:
7246		vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
7247		break;
7248	case RESET_AL_PA:
7249		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a,
7250		   "clear vha in host_map %p %06x\n", vha, key);
7251		slot = btree_lookup32(&vha->hw->tgt.host_map, key);
7252		if (slot)
7253			btree_remove32(&vha->hw->tgt.host_map, key);
7254		vha->d_id.b24 = 0;
7255		break;
7256	}
7257}
7258
7259void qlt_update_host_map(struct scsi_qla_host *vha, port_id_t id)
7260{
7261
7262	if (!vha->d_id.b24) {
7263		vha->d_id = id;
7264		qlt_update_vp_map(vha, SET_AL_PA);
7265	} else if (vha->d_id.b24 != id.b24) {
7266		qlt_update_vp_map(vha, RESET_AL_PA);
7267		vha->d_id = id;
7268		qlt_update_vp_map(vha, SET_AL_PA);
7269	}
7270}
7271
7272static int __init qlt_parse_ini_mode(void)
7273{
7274	if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0)
7275		ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
7276	else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0)
7277		ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED;
7278	else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0)
7279		ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED;
7280	else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DUAL) == 0)
7281		ql2x_ini_mode = QLA2XXX_INI_MODE_DUAL;
7282	else
7283		return false;
7284
7285	return true;
7286}
7287
7288int __init qlt_init(void)
7289{
7290	int ret;
7291
7292	BUILD_BUG_ON(sizeof(struct ctio7_to_24xx) != 64);
7293	BUILD_BUG_ON(sizeof(struct ctio_to_2xxx) != 64);
7294
7295	if (!qlt_parse_ini_mode()) {
7296		ql_log(ql_log_fatal, NULL, 0xe06b,
7297		    "qlt_parse_ini_mode() failed\n");
7298		return -EINVAL;
7299	}
7300
7301	if (!QLA_TGT_MODE_ENABLED())
7302		return 0;
7303
7304	qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
7305	    sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct
7306	    qla_tgt_mgmt_cmd), 0, NULL);
7307	if (!qla_tgt_mgmt_cmd_cachep) {
7308		ql_log(ql_log_fatal, NULL, 0xd04b,
7309		    "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
7310		return -ENOMEM;
7311	}
7312
7313	qla_tgt_plogi_cachep = kmem_cache_create("qla_tgt_plogi_cachep",
7314	    sizeof(struct qlt_plogi_ack_t), __alignof__(struct qlt_plogi_ack_t),
7315	    0, NULL);
7316
7317	if (!qla_tgt_plogi_cachep) {
7318		ql_log(ql_log_fatal, NULL, 0xe06d,
7319		    "kmem_cache_create for qla_tgt_plogi_cachep failed\n");
7320		ret = -ENOMEM;
7321		goto out_mgmt_cmd_cachep;
7322	}
7323
7324	qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab,
7325	    mempool_free_slab, qla_tgt_mgmt_cmd_cachep);
7326	if (!qla_tgt_mgmt_cmd_mempool) {
7327		ql_log(ql_log_fatal, NULL, 0xe06e,
7328		    "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n");
7329		ret = -ENOMEM;
7330		goto out_plogi_cachep;
7331	}
7332
7333	qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0);
7334	if (!qla_tgt_wq) {
7335		ql_log(ql_log_fatal, NULL, 0xe06f,
7336		    "alloc_workqueue for qla_tgt_wq failed\n");
7337		ret = -ENOMEM;
7338		goto out_cmd_mempool;
7339	}
7340	/*
7341	 * Return 1 to signal that initiator-mode is being disabled
7342	 */
7343	return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0;
7344
7345out_cmd_mempool:
7346	mempool_destroy(qla_tgt_mgmt_cmd_mempool);
7347out_plogi_cachep:
7348	kmem_cache_destroy(qla_tgt_plogi_cachep);
7349out_mgmt_cmd_cachep:
7350	kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
7351	return ret;
7352}
7353
7354void qlt_exit(void)
7355{
7356	if (!QLA_TGT_MODE_ENABLED())
7357		return;
7358
7359	destroy_workqueue(qla_tgt_wq);
7360	mempool_destroy(qla_tgt_mgmt_cmd_mempool);
7361	kmem_cache_destroy(qla_tgt_plogi_cachep);
7362	kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
7363}
7364