1/* bnx2fc_io.c: QLogic Linux FCoE offload driver.
2 * IO manager and SCSI IO processing.
3 *
4 * Copyright (c) 2008-2013 Broadcom Corporation
5 * Copyright (c) 2014-2016 QLogic Corporation
6 * Copyright (c) 2016-2017 Cavium Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 *
12 * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
13 */
14
15#include "bnx2fc.h"
16
17#define RESERVE_FREE_LIST_INDEX num_possible_cpus()
18
19static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len,
20			   int bd_index);
21static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req);
22static int bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req);
23static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req);
24static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req);
25static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
26				 struct fcoe_fcp_rsp_payload *fcp_rsp,
27				 u8 num_rq, unsigned char *rq_data);
28
29void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req,
30			  unsigned int timer_msec)
31{
32	struct bnx2fc_interface *interface = io_req->port->priv;
33
34	if (queue_delayed_work(interface->timer_work_queue,
35			       &io_req->timeout_work,
36			       msecs_to_jiffies(timer_msec)))
37		kref_get(&io_req->refcount);
38}
39
40static void bnx2fc_cmd_timeout(struct work_struct *work)
41{
42	struct bnx2fc_cmd *io_req = container_of(work, struct bnx2fc_cmd,
43						 timeout_work.work);
44	u8 cmd_type = io_req->cmd_type;
45	struct bnx2fc_rport *tgt = io_req->tgt;
46	int rc;
47
48	BNX2FC_IO_DBG(io_req, "cmd_timeout, cmd_type = %d,"
49		      "req_flags = %lx\n", cmd_type, io_req->req_flags);
50
51	spin_lock_bh(&tgt->tgt_lock);
52	if (test_and_clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags)) {
53		clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags);
54		/*
55		 * ideally we should hold the io_req until RRQ complets,
56		 * and release io_req from timeout hold.
57		 */
58		spin_unlock_bh(&tgt->tgt_lock);
59		bnx2fc_send_rrq(io_req);
60		return;
61	}
62	if (test_and_clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags)) {
63		BNX2FC_IO_DBG(io_req, "IO ready for reuse now\n");
64		goto done;
65	}
66
67	switch (cmd_type) {
68	case BNX2FC_SCSI_CMD:
69		if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
70							&io_req->req_flags)) {
71			/* Handle eh_abort timeout */
72			BNX2FC_IO_DBG(io_req, "eh_abort timed out\n");
73			complete(&io_req->abts_done);
74		} else if (test_bit(BNX2FC_FLAG_ISSUE_ABTS,
75				    &io_req->req_flags)) {
76			/* Handle internally generated ABTS timeout */
77			BNX2FC_IO_DBG(io_req, "ABTS timed out refcnt = %d\n",
78					kref_read(&io_req->refcount));
79			if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
80					       &io_req->req_flags))) {
81				/*
82				 * Cleanup and return original command to
83				 * mid-layer.
84				 */
85				bnx2fc_initiate_cleanup(io_req);
86				kref_put(&io_req->refcount, bnx2fc_cmd_release);
87				spin_unlock_bh(&tgt->tgt_lock);
88
89				return;
90			}
91		} else {
92			/* Hanlde IO timeout */
93			BNX2FC_IO_DBG(io_req, "IO timed out. issue ABTS\n");
94			if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL,
95					     &io_req->req_flags)) {
96				BNX2FC_IO_DBG(io_req, "IO completed before "
97							   " timer expiry\n");
98				goto done;
99			}
100
101			if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
102					      &io_req->req_flags)) {
103				rc = bnx2fc_initiate_abts(io_req);
104				if (rc == SUCCESS)
105					goto done;
106
107				kref_put(&io_req->refcount, bnx2fc_cmd_release);
108				spin_unlock_bh(&tgt->tgt_lock);
109
110				return;
111			} else {
112				BNX2FC_IO_DBG(io_req, "IO already in "
113						      "ABTS processing\n");
114			}
115		}
116		break;
117	case BNX2FC_ELS:
118
119		if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
120			BNX2FC_IO_DBG(io_req, "ABTS for ELS timed out\n");
121
122			if (!test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
123					      &io_req->req_flags)) {
124				kref_put(&io_req->refcount, bnx2fc_cmd_release);
125				spin_unlock_bh(&tgt->tgt_lock);
126
127				return;
128			}
129		} else {
130			/*
131			 * Handle ELS timeout.
132			 * tgt_lock is used to sync compl path and timeout
133			 * path. If els compl path is processing this IO, we
134			 * have nothing to do here, just release the timer hold
135			 */
136			BNX2FC_IO_DBG(io_req, "ELS timed out\n");
137			if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE,
138					       &io_req->req_flags))
139				goto done;
140
141			/* Indicate the cb_func that this ELS is timed out */
142			set_bit(BNX2FC_FLAG_ELS_TIMEOUT, &io_req->req_flags);
143
144			if ((io_req->cb_func) && (io_req->cb_arg)) {
145				io_req->cb_func(io_req->cb_arg);
146				io_req->cb_arg = NULL;
147			}
148		}
149		break;
150	default:
151		printk(KERN_ERR PFX "cmd_timeout: invalid cmd_type %d\n",
152			cmd_type);
153		break;
154	}
155
156done:
157	/* release the cmd that was held when timer was set */
158	kref_put(&io_req->refcount, bnx2fc_cmd_release);
159	spin_unlock_bh(&tgt->tgt_lock);
160}
161
162static void bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code)
163{
164	/* Called with host lock held */
165	struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
166
167	/*
168	 * active_cmd_queue may have other command types as well,
169	 * and during flush operation,  we want to error back only
170	 * scsi commands.
171	 */
172	if (io_req->cmd_type != BNX2FC_SCSI_CMD)
173		return;
174
175	BNX2FC_IO_DBG(io_req, "scsi_done. err_code = 0x%x\n", err_code);
176	if (test_bit(BNX2FC_FLAG_CMD_LOST, &io_req->req_flags)) {
177		/* Do not call scsi done for this IO */
178		return;
179	}
180
181	bnx2fc_unmap_sg_list(io_req);
182	io_req->sc_cmd = NULL;
183
184	/* Sanity checks before returning command to mid-layer */
185	if (!sc_cmd) {
186		printk(KERN_ERR PFX "scsi_done - sc_cmd NULL. "
187				    "IO(0x%x) already cleaned up\n",
188		       io_req->xid);
189		return;
190	}
191	if (!sc_cmd->device) {
192		pr_err(PFX "0x%x: sc_cmd->device is NULL.\n", io_req->xid);
193		return;
194	}
195	if (!sc_cmd->device->host) {
196		pr_err(PFX "0x%x: sc_cmd->device->host is NULL.\n",
197		    io_req->xid);
198		return;
199	}
200
201	sc_cmd->result = err_code << 16;
202
203	BNX2FC_IO_DBG(io_req, "sc=%p, result=0x%x, retries=%d, allowed=%d\n",
204		sc_cmd, host_byte(sc_cmd->result), sc_cmd->retries,
205		sc_cmd->allowed);
206	scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
207	sc_cmd->SCp.ptr = NULL;
208	sc_cmd->scsi_done(sc_cmd);
209}
210
211struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
212{
213	struct bnx2fc_cmd_mgr *cmgr;
214	struct io_bdt *bdt_info;
215	struct bnx2fc_cmd *io_req;
216	size_t len;
217	u32 mem_size;
218	u16 xid;
219	int i;
220	int num_ios, num_pri_ios;
221	size_t bd_tbl_sz;
222	int arr_sz = num_possible_cpus() + 1;
223	u16 min_xid = BNX2FC_MIN_XID;
224	u16 max_xid = hba->max_xid;
225
226	if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) {
227		printk(KERN_ERR PFX "cmd_mgr_alloc: Invalid min_xid 0x%x \
228					and max_xid 0x%x\n", min_xid, max_xid);
229		return NULL;
230	}
231	BNX2FC_MISC_DBG("min xid 0x%x, max xid 0x%x\n", min_xid, max_xid);
232
233	num_ios = max_xid - min_xid + 1;
234	len = (num_ios * (sizeof(struct bnx2fc_cmd *)));
235	len += sizeof(struct bnx2fc_cmd_mgr);
236
237	cmgr = kzalloc(len, GFP_KERNEL);
238	if (!cmgr) {
239		printk(KERN_ERR PFX "failed to alloc cmgr\n");
240		return NULL;
241	}
242
243	cmgr->hba = hba;
244	cmgr->free_list = kcalloc(arr_sz, sizeof(*cmgr->free_list),
245				  GFP_KERNEL);
246	if (!cmgr->free_list) {
247		printk(KERN_ERR PFX "failed to alloc free_list\n");
248		goto mem_err;
249	}
250
251	cmgr->free_list_lock = kcalloc(arr_sz, sizeof(*cmgr->free_list_lock),
252				       GFP_KERNEL);
253	if (!cmgr->free_list_lock) {
254		printk(KERN_ERR PFX "failed to alloc free_list_lock\n");
255		kfree(cmgr->free_list);
256		cmgr->free_list = NULL;
257		goto mem_err;
258	}
259
260	cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1);
261
262	for (i = 0; i < arr_sz; i++)  {
263		INIT_LIST_HEAD(&cmgr->free_list[i]);
264		spin_lock_init(&cmgr->free_list_lock[i]);
265	}
266
267	/*
268	 * Pre-allocated pool of bnx2fc_cmds.
269	 * Last entry in the free list array is the free list
270	 * of slow path requests.
271	 */
272	xid = BNX2FC_MIN_XID;
273	num_pri_ios = num_ios - hba->elstm_xids;
274	for (i = 0; i < num_ios; i++) {
275		io_req = kzalloc(sizeof(*io_req), GFP_KERNEL);
276
277		if (!io_req) {
278			printk(KERN_ERR PFX "failed to alloc io_req\n");
279			goto mem_err;
280		}
281
282		INIT_LIST_HEAD(&io_req->link);
283		INIT_DELAYED_WORK(&io_req->timeout_work, bnx2fc_cmd_timeout);
284
285		io_req->xid = xid++;
286		if (i < num_pri_ios)
287			list_add_tail(&io_req->link,
288				&cmgr->free_list[io_req->xid %
289						 num_possible_cpus()]);
290		else
291			list_add_tail(&io_req->link,
292				&cmgr->free_list[num_possible_cpus()]);
293		io_req++;
294	}
295
296	/* Allocate pool of io_bdts - one for each bnx2fc_cmd */
297	mem_size = num_ios * sizeof(struct io_bdt *);
298	cmgr->io_bdt_pool = kzalloc(mem_size, GFP_KERNEL);
299	if (!cmgr->io_bdt_pool) {
300		printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n");
301		goto mem_err;
302	}
303
304	mem_size = sizeof(struct io_bdt);
305	for (i = 0; i < num_ios; i++) {
306		cmgr->io_bdt_pool[i] = kmalloc(mem_size, GFP_KERNEL);
307		if (!cmgr->io_bdt_pool[i]) {
308			printk(KERN_ERR PFX "failed to alloc "
309				"io_bdt_pool[%d]\n", i);
310			goto mem_err;
311		}
312	}
313
314	/* Allocate an map fcoe_bdt_ctx structures */
315	bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx);
316	for (i = 0; i < num_ios; i++) {
317		bdt_info = cmgr->io_bdt_pool[i];
318		bdt_info->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
319						      bd_tbl_sz,
320						      &bdt_info->bd_tbl_dma,
321						      GFP_KERNEL);
322		if (!bdt_info->bd_tbl) {
323			printk(KERN_ERR PFX "failed to alloc "
324				"bdt_tbl[%d]\n", i);
325			goto mem_err;
326		}
327	}
328
329	return cmgr;
330
331mem_err:
332	bnx2fc_cmd_mgr_free(cmgr);
333	return NULL;
334}
335
336void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr)
337{
338	struct io_bdt *bdt_info;
339	struct bnx2fc_hba *hba = cmgr->hba;
340	size_t bd_tbl_sz;
341	u16 min_xid = BNX2FC_MIN_XID;
342	u16 max_xid = hba->max_xid;
343	int num_ios;
344	int i;
345
346	num_ios = max_xid - min_xid + 1;
347
348	/* Free fcoe_bdt_ctx structures */
349	if (!cmgr->io_bdt_pool)
350		goto free_cmd_pool;
351
352	bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx);
353	for (i = 0; i < num_ios; i++) {
354		bdt_info = cmgr->io_bdt_pool[i];
355		if (bdt_info->bd_tbl) {
356			dma_free_coherent(&hba->pcidev->dev, bd_tbl_sz,
357					    bdt_info->bd_tbl,
358					    bdt_info->bd_tbl_dma);
359			bdt_info->bd_tbl = NULL;
360		}
361	}
362
363	/* Destroy io_bdt pool */
364	for (i = 0; i < num_ios; i++) {
365		kfree(cmgr->io_bdt_pool[i]);
366		cmgr->io_bdt_pool[i] = NULL;
367	}
368
369	kfree(cmgr->io_bdt_pool);
370	cmgr->io_bdt_pool = NULL;
371
372free_cmd_pool:
373	kfree(cmgr->free_list_lock);
374
375	/* Destroy cmd pool */
376	if (!cmgr->free_list)
377		goto free_cmgr;
378
379	for (i = 0; i < num_possible_cpus() + 1; i++)  {
380		struct bnx2fc_cmd *tmp, *io_req;
381
382		list_for_each_entry_safe(io_req, tmp,
383					 &cmgr->free_list[i], link) {
384			list_del(&io_req->link);
385			kfree(io_req);
386		}
387	}
388	kfree(cmgr->free_list);
389free_cmgr:
390	/* Free command manager itself */
391	kfree(cmgr);
392}
393
394struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type)
395{
396	struct fcoe_port *port = tgt->port;
397	struct bnx2fc_interface *interface = port->priv;
398	struct bnx2fc_cmd_mgr *cmd_mgr = interface->hba->cmd_mgr;
399	struct bnx2fc_cmd *io_req;
400	struct list_head *listp;
401	struct io_bdt *bd_tbl;
402	int index = RESERVE_FREE_LIST_INDEX;
403	u32 free_sqes;
404	u32 max_sqes;
405	u16 xid;
406
407	max_sqes = tgt->max_sqes;
408	switch (type) {
409	case BNX2FC_TASK_MGMT_CMD:
410		max_sqes = BNX2FC_TM_MAX_SQES;
411		break;
412	case BNX2FC_ELS:
413		max_sqes = BNX2FC_ELS_MAX_SQES;
414		break;
415	default:
416		break;
417	}
418
419	/*
420	 * NOTE: Free list insertions and deletions are protected with
421	 * cmgr lock
422	 */
423	spin_lock_bh(&cmd_mgr->free_list_lock[index]);
424	free_sqes = atomic_read(&tgt->free_sqes);
425	if ((list_empty(&(cmd_mgr->free_list[index]))) ||
426	    (tgt->num_active_ios.counter  >= max_sqes) ||
427	    (free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) {
428		BNX2FC_TGT_DBG(tgt, "No free els_tm cmds available "
429			"ios(%d):sqes(%d)\n",
430			tgt->num_active_ios.counter, tgt->max_sqes);
431		if (list_empty(&(cmd_mgr->free_list[index])))
432			printk(KERN_ERR PFX "elstm_alloc: list_empty\n");
433		spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
434		return NULL;
435	}
436
437	listp = (struct list_head *)
438			cmd_mgr->free_list[index].next;
439	list_del_init(listp);
440	io_req = (struct bnx2fc_cmd *) listp;
441	xid = io_req->xid;
442	cmd_mgr->cmds[xid] = io_req;
443	atomic_inc(&tgt->num_active_ios);
444	atomic_dec(&tgt->free_sqes);
445	spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
446
447	INIT_LIST_HEAD(&io_req->link);
448
449	io_req->port = port;
450	io_req->cmd_mgr = cmd_mgr;
451	io_req->req_flags = 0;
452	io_req->cmd_type = type;
453
454	/* Bind io_bdt for this io_req */
455	/* Have a static link between io_req and io_bdt_pool */
456	bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
457	bd_tbl->io_req = io_req;
458
459	/* Hold the io_req  against deletion */
460	kref_init(&io_req->refcount);
461	return io_req;
462}
463
464struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt)
465{
466	struct fcoe_port *port = tgt->port;
467	struct bnx2fc_interface *interface = port->priv;
468	struct bnx2fc_cmd_mgr *cmd_mgr = interface->hba->cmd_mgr;
469	struct bnx2fc_cmd *io_req;
470	struct list_head *listp;
471	struct io_bdt *bd_tbl;
472	u32 free_sqes;
473	u32 max_sqes;
474	u16 xid;
475	int index = get_cpu();
476
477	max_sqes = BNX2FC_SCSI_MAX_SQES;
478	/*
479	 * NOTE: Free list insertions and deletions are protected with
480	 * cmgr lock
481	 */
482	spin_lock_bh(&cmd_mgr->free_list_lock[index]);
483	free_sqes = atomic_read(&tgt->free_sqes);
484	if ((list_empty(&cmd_mgr->free_list[index])) ||
485	    (tgt->num_active_ios.counter  >= max_sqes) ||
486	    (free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) {
487		spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
488		put_cpu();
489		return NULL;
490	}
491
492	listp = (struct list_head *)
493		cmd_mgr->free_list[index].next;
494	list_del_init(listp);
495	io_req = (struct bnx2fc_cmd *) listp;
496	xid = io_req->xid;
497	cmd_mgr->cmds[xid] = io_req;
498	atomic_inc(&tgt->num_active_ios);
499	atomic_dec(&tgt->free_sqes);
500	spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
501	put_cpu();
502
503	INIT_LIST_HEAD(&io_req->link);
504
505	io_req->port = port;
506	io_req->cmd_mgr = cmd_mgr;
507	io_req->req_flags = 0;
508
509	/* Bind io_bdt for this io_req */
510	/* Have a static link between io_req and io_bdt_pool */
511	bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
512	bd_tbl->io_req = io_req;
513
514	/* Hold the io_req  against deletion */
515	kref_init(&io_req->refcount);
516	return io_req;
517}
518
519void bnx2fc_cmd_release(struct kref *ref)
520{
521	struct bnx2fc_cmd *io_req = container_of(ref,
522						struct bnx2fc_cmd, refcount);
523	struct bnx2fc_cmd_mgr *cmd_mgr = io_req->cmd_mgr;
524	int index;
525
526	if (io_req->cmd_type == BNX2FC_SCSI_CMD)
527		index = io_req->xid % num_possible_cpus();
528	else
529		index = RESERVE_FREE_LIST_INDEX;
530
531
532	spin_lock_bh(&cmd_mgr->free_list_lock[index]);
533	if (io_req->cmd_type != BNX2FC_SCSI_CMD)
534		bnx2fc_free_mp_resc(io_req);
535	cmd_mgr->cmds[io_req->xid] = NULL;
536	/* Delete IO from retire queue */
537	list_del_init(&io_req->link);
538	/* Add it to the free list */
539	list_add(&io_req->link,
540			&cmd_mgr->free_list[index]);
541	atomic_dec(&io_req->tgt->num_active_ios);
542	spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
543
544}
545
546static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req)
547{
548	struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
549	struct bnx2fc_interface *interface = io_req->port->priv;
550	struct bnx2fc_hba *hba = interface->hba;
551	size_t sz = sizeof(struct fcoe_bd_ctx);
552
553	/* clear tm flags */
554	mp_req->tm_flags = 0;
555	if (mp_req->mp_req_bd) {
556		dma_free_coherent(&hba->pcidev->dev, sz,
557				     mp_req->mp_req_bd,
558				     mp_req->mp_req_bd_dma);
559		mp_req->mp_req_bd = NULL;
560	}
561	if (mp_req->mp_resp_bd) {
562		dma_free_coherent(&hba->pcidev->dev, sz,
563				     mp_req->mp_resp_bd,
564				     mp_req->mp_resp_bd_dma);
565		mp_req->mp_resp_bd = NULL;
566	}
567	if (mp_req->req_buf) {
568		dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
569				     mp_req->req_buf,
570				     mp_req->req_buf_dma);
571		mp_req->req_buf = NULL;
572	}
573	if (mp_req->resp_buf) {
574		dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
575				     mp_req->resp_buf,
576				     mp_req->resp_buf_dma);
577		mp_req->resp_buf = NULL;
578	}
579}
580
581int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
582{
583	struct bnx2fc_mp_req *mp_req;
584	struct fcoe_bd_ctx *mp_req_bd;
585	struct fcoe_bd_ctx *mp_resp_bd;
586	struct bnx2fc_interface *interface = io_req->port->priv;
587	struct bnx2fc_hba *hba = interface->hba;
588	dma_addr_t addr;
589	size_t sz;
590
591	mp_req = (struct bnx2fc_mp_req *)&(io_req->mp_req);
592	memset(mp_req, 0, sizeof(struct bnx2fc_mp_req));
593
594	if (io_req->cmd_type != BNX2FC_ELS) {
595		mp_req->req_len = sizeof(struct fcp_cmnd);
596		io_req->data_xfer_len = mp_req->req_len;
597	} else
598		mp_req->req_len = io_req->data_xfer_len;
599
600	mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
601					     &mp_req->req_buf_dma,
602					     GFP_ATOMIC);
603	if (!mp_req->req_buf) {
604		printk(KERN_ERR PFX "unable to alloc MP req buffer\n");
605		bnx2fc_free_mp_resc(io_req);
606		return FAILED;
607	}
608
609	mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
610					      &mp_req->resp_buf_dma,
611					      GFP_ATOMIC);
612	if (!mp_req->resp_buf) {
613		printk(KERN_ERR PFX "unable to alloc TM resp buffer\n");
614		bnx2fc_free_mp_resc(io_req);
615		return FAILED;
616	}
617	memset(mp_req->req_buf, 0, CNIC_PAGE_SIZE);
618	memset(mp_req->resp_buf, 0, CNIC_PAGE_SIZE);
619
620	/* Allocate and map mp_req_bd and mp_resp_bd */
621	sz = sizeof(struct fcoe_bd_ctx);
622	mp_req->mp_req_bd = dma_alloc_coherent(&hba->pcidev->dev, sz,
623						 &mp_req->mp_req_bd_dma,
624						 GFP_ATOMIC);
625	if (!mp_req->mp_req_bd) {
626		printk(KERN_ERR PFX "unable to alloc MP req bd\n");
627		bnx2fc_free_mp_resc(io_req);
628		return FAILED;
629	}
630	mp_req->mp_resp_bd = dma_alloc_coherent(&hba->pcidev->dev, sz,
631						 &mp_req->mp_resp_bd_dma,
632						 GFP_ATOMIC);
633	if (!mp_req->mp_resp_bd) {
634		printk(KERN_ERR PFX "unable to alloc MP resp bd\n");
635		bnx2fc_free_mp_resc(io_req);
636		return FAILED;
637	}
638	/* Fill bd table */
639	addr = mp_req->req_buf_dma;
640	mp_req_bd = mp_req->mp_req_bd;
641	mp_req_bd->buf_addr_lo = (u32)addr & 0xffffffff;
642	mp_req_bd->buf_addr_hi = (u32)((u64)addr >> 32);
643	mp_req_bd->buf_len = CNIC_PAGE_SIZE;
644	mp_req_bd->flags = 0;
645
646	/*
647	 * MP buffer is either a task mgmt command or an ELS.
648	 * So the assumption is that it consumes a single bd
649	 * entry in the bd table
650	 */
651	mp_resp_bd = mp_req->mp_resp_bd;
652	addr = mp_req->resp_buf_dma;
653	mp_resp_bd->buf_addr_lo = (u32)addr & 0xffffffff;
654	mp_resp_bd->buf_addr_hi = (u32)((u64)addr >> 32);
655	mp_resp_bd->buf_len = CNIC_PAGE_SIZE;
656	mp_resp_bd->flags = 0;
657
658	return SUCCESS;
659}
660
661static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
662{
663	struct fc_lport *lport;
664	struct fc_rport *rport;
665	struct fc_rport_libfc_priv *rp;
666	struct fcoe_port *port;
667	struct bnx2fc_interface *interface;
668	struct bnx2fc_rport *tgt;
669	struct bnx2fc_cmd *io_req;
670	struct bnx2fc_mp_req *tm_req;
671	struct fcoe_task_ctx_entry *task;
672	struct fcoe_task_ctx_entry *task_page;
673	struct Scsi_Host *host = sc_cmd->device->host;
674	struct fc_frame_header *fc_hdr;
675	struct fcp_cmnd *fcp_cmnd;
676	int task_idx, index;
677	int rc = SUCCESS;
678	u16 xid;
679	u32 sid, did;
680	unsigned long start = jiffies;
681
682	lport = shost_priv(host);
683	rport = starget_to_rport(scsi_target(sc_cmd->device));
684	port = lport_priv(lport);
685	interface = port->priv;
686
687	if (rport == NULL) {
688		printk(KERN_ERR PFX "device_reset: rport is NULL\n");
689		rc = FAILED;
690		goto tmf_err;
691	}
692	rp = rport->dd_data;
693
694	rc = fc_block_scsi_eh(sc_cmd);
695	if (rc)
696		return rc;
697
698	if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
699		printk(KERN_ERR PFX "device_reset: link is not ready\n");
700		rc = FAILED;
701		goto tmf_err;
702	}
703	/* rport and tgt are allocated together, so tgt should be non-NULL */
704	tgt = (struct bnx2fc_rport *)&rp[1];
705
706	if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) {
707		printk(KERN_ERR PFX "device_reset: tgt not offloaded\n");
708		rc = FAILED;
709		goto tmf_err;
710	}
711retry_tmf:
712	io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_TASK_MGMT_CMD);
713	if (!io_req) {
714		if (time_after(jiffies, start + HZ)) {
715			printk(KERN_ERR PFX "tmf: Failed TMF");
716			rc = FAILED;
717			goto tmf_err;
718		}
719		msleep(20);
720		goto retry_tmf;
721	}
722	/* Initialize rest of io_req fields */
723	io_req->sc_cmd = sc_cmd;
724	io_req->port = port;
725	io_req->tgt = tgt;
726
727	tm_req = (struct bnx2fc_mp_req *)&(io_req->mp_req);
728
729	rc = bnx2fc_init_mp_req(io_req);
730	if (rc == FAILED) {
731		printk(KERN_ERR PFX "Task mgmt MP request init failed\n");
732		spin_lock_bh(&tgt->tgt_lock);
733		kref_put(&io_req->refcount, bnx2fc_cmd_release);
734		spin_unlock_bh(&tgt->tgt_lock);
735		goto tmf_err;
736	}
737
738	/* Set TM flags */
739	io_req->io_req_flags = 0;
740	tm_req->tm_flags = tm_flags;
741
742	/* Fill FCP_CMND */
743	bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tm_req->req_buf);
744	fcp_cmnd = (struct fcp_cmnd *)tm_req->req_buf;
745	memset(fcp_cmnd->fc_cdb, 0,  sc_cmd->cmd_len);
746	fcp_cmnd->fc_dl = 0;
747
748	/* Fill FC header */
749	fc_hdr = &(tm_req->req_fc_hdr);
750	sid = tgt->sid;
751	did = rport->port_id;
752	__fc_fill_fc_hdr(fc_hdr, FC_RCTL_DD_UNSOL_CMD, did, sid,
753			   FC_TYPE_FCP, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
754			   FC_FC_SEQ_INIT, 0);
755	/* Obtain exchange id */
756	xid = io_req->xid;
757
758	BNX2FC_TGT_DBG(tgt, "Initiate TMF - xid = 0x%x\n", xid);
759	task_idx = xid/BNX2FC_TASKS_PER_PAGE;
760	index = xid % BNX2FC_TASKS_PER_PAGE;
761
762	/* Initialize task context for this IO request */
763	task_page = (struct fcoe_task_ctx_entry *)
764			interface->hba->task_ctx[task_idx];
765	task = &(task_page[index]);
766	bnx2fc_init_mp_task(io_req, task);
767
768	sc_cmd->SCp.ptr = (char *)io_req;
769
770	/* Obtain free SQ entry */
771	spin_lock_bh(&tgt->tgt_lock);
772	bnx2fc_add_2_sq(tgt, xid);
773
774	/* Enqueue the io_req to active_tm_queue */
775	io_req->on_tmf_queue = 1;
776	list_add_tail(&io_req->link, &tgt->active_tm_queue);
777
778	init_completion(&io_req->abts_done);
779	io_req->wait_for_abts_comp = 1;
780
781	/* Ring doorbell */
782	bnx2fc_ring_doorbell(tgt);
783	spin_unlock_bh(&tgt->tgt_lock);
784
785	rc = wait_for_completion_timeout(&io_req->abts_done,
786					 interface->tm_timeout * HZ);
787	spin_lock_bh(&tgt->tgt_lock);
788
789	io_req->wait_for_abts_comp = 0;
790	if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags))) {
791		set_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags);
792		if (io_req->on_tmf_queue) {
793			list_del_init(&io_req->link);
794			io_req->on_tmf_queue = 0;
795		}
796		io_req->wait_for_cleanup_comp = 1;
797		init_completion(&io_req->cleanup_done);
798		bnx2fc_initiate_cleanup(io_req);
799		spin_unlock_bh(&tgt->tgt_lock);
800		rc = wait_for_completion_timeout(&io_req->cleanup_done,
801						 BNX2FC_FW_TIMEOUT);
802		spin_lock_bh(&tgt->tgt_lock);
803		io_req->wait_for_cleanup_comp = 0;
804		if (!rc)
805			kref_put(&io_req->refcount, bnx2fc_cmd_release);
806	}
807
808	spin_unlock_bh(&tgt->tgt_lock);
809
810	if (!rc) {
811		BNX2FC_TGT_DBG(tgt, "task mgmt command failed...\n");
812		rc = FAILED;
813	} else {
814		BNX2FC_TGT_DBG(tgt, "task mgmt command success...\n");
815		rc = SUCCESS;
816	}
817tmf_err:
818	return rc;
819}
820
821int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req)
822{
823	struct fc_lport *lport;
824	struct bnx2fc_rport *tgt = io_req->tgt;
825	struct fc_rport *rport = tgt->rport;
826	struct fc_rport_priv *rdata = tgt->rdata;
827	struct bnx2fc_interface *interface;
828	struct fcoe_port *port;
829	struct bnx2fc_cmd *abts_io_req;
830	struct fcoe_task_ctx_entry *task;
831	struct fcoe_task_ctx_entry *task_page;
832	struct fc_frame_header *fc_hdr;
833	struct bnx2fc_mp_req *abts_req;
834	int task_idx, index;
835	u32 sid, did;
836	u16 xid;
837	int rc = SUCCESS;
838	u32 r_a_tov = rdata->r_a_tov;
839
840	/* called with tgt_lock held */
841	BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_abts\n");
842
843	port = io_req->port;
844	interface = port->priv;
845	lport = port->lport;
846
847	if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
848		printk(KERN_ERR PFX "initiate_abts: tgt not offloaded\n");
849		rc = FAILED;
850		goto abts_err;
851	}
852
853	if (rport == NULL) {
854		printk(KERN_ERR PFX "initiate_abts: rport is NULL\n");
855		rc = FAILED;
856		goto abts_err;
857	}
858
859	if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
860		printk(KERN_ERR PFX "initiate_abts: link is not ready\n");
861		rc = FAILED;
862		goto abts_err;
863	}
864
865	abts_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ABTS);
866	if (!abts_io_req) {
867		printk(KERN_ERR PFX "abts: couldn't allocate cmd\n");
868		rc = FAILED;
869		goto abts_err;
870	}
871
872	/* Initialize rest of io_req fields */
873	abts_io_req->sc_cmd = NULL;
874	abts_io_req->port = port;
875	abts_io_req->tgt = tgt;
876	abts_io_req->data_xfer_len = 0; /* No data transfer for ABTS */
877
878	abts_req = (struct bnx2fc_mp_req *)&(abts_io_req->mp_req);
879	memset(abts_req, 0, sizeof(struct bnx2fc_mp_req));
880
881	/* Fill FC header */
882	fc_hdr = &(abts_req->req_fc_hdr);
883
884	/* Obtain oxid and rxid for the original exchange to be aborted */
885	fc_hdr->fh_ox_id = htons(io_req->xid);
886	fc_hdr->fh_rx_id = htons(io_req->task->rxwr_txrd.var_ctx.rx_id);
887
888	sid = tgt->sid;
889	did = rport->port_id;
890
891	__fc_fill_fc_hdr(fc_hdr, FC_RCTL_BA_ABTS, did, sid,
892			   FC_TYPE_BLS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
893			   FC_FC_SEQ_INIT, 0);
894
895	xid = abts_io_req->xid;
896	BNX2FC_IO_DBG(abts_io_req, "ABTS io_req\n");
897	task_idx = xid/BNX2FC_TASKS_PER_PAGE;
898	index = xid % BNX2FC_TASKS_PER_PAGE;
899
900	/* Initialize task context for this IO request */
901	task_page = (struct fcoe_task_ctx_entry *)
902			interface->hba->task_ctx[task_idx];
903	task = &(task_page[index]);
904	bnx2fc_init_mp_task(abts_io_req, task);
905
906	/*
907	 * ABTS task is a temporary task that will be cleaned up
908	 * irrespective of ABTS response. We need to start the timer
909	 * for the original exchange, as the CQE is posted for the original
910	 * IO request.
911	 *
912	 * Timer for ABTS is started only when it is originated by a
913	 * TM request. For the ABTS issued as part of ULP timeout,
914	 * scsi-ml maintains the timers.
915	 */
916
917	/* if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags))*/
918	bnx2fc_cmd_timer_set(io_req, 2 * r_a_tov);
919
920	/* Obtain free SQ entry */
921	bnx2fc_add_2_sq(tgt, xid);
922
923	/* Ring doorbell */
924	bnx2fc_ring_doorbell(tgt);
925
926abts_err:
927	return rc;
928}
929
930int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset,
931				enum fc_rctl r_ctl)
932{
933	struct bnx2fc_rport *tgt = orig_io_req->tgt;
934	struct bnx2fc_interface *interface;
935	struct fcoe_port *port;
936	struct bnx2fc_cmd *seq_clnp_req;
937	struct fcoe_task_ctx_entry *task;
938	struct fcoe_task_ctx_entry *task_page;
939	struct bnx2fc_els_cb_arg *cb_arg = NULL;
940	int task_idx, index;
941	u16 xid;
942	int rc = 0;
943
944	BNX2FC_IO_DBG(orig_io_req, "bnx2fc_initiate_seq_cleanup xid = 0x%x\n",
945		   orig_io_req->xid);
946	kref_get(&orig_io_req->refcount);
947
948	port = orig_io_req->port;
949	interface = port->priv;
950
951	cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
952	if (!cb_arg) {
953		printk(KERN_ERR PFX "Unable to alloc cb_arg for seq clnup\n");
954		rc = -ENOMEM;
955		goto cleanup_err;
956	}
957
958	seq_clnp_req = bnx2fc_elstm_alloc(tgt, BNX2FC_SEQ_CLEANUP);
959	if (!seq_clnp_req) {
960		printk(KERN_ERR PFX "cleanup: couldn't allocate cmd\n");
961		rc = -ENOMEM;
962		kfree(cb_arg);
963		goto cleanup_err;
964	}
965	/* Initialize rest of io_req fields */
966	seq_clnp_req->sc_cmd = NULL;
967	seq_clnp_req->port = port;
968	seq_clnp_req->tgt = tgt;
969	seq_clnp_req->data_xfer_len = 0; /* No data transfer for cleanup */
970
971	xid = seq_clnp_req->xid;
972
973	task_idx = xid/BNX2FC_TASKS_PER_PAGE;
974	index = xid % BNX2FC_TASKS_PER_PAGE;
975
976	/* Initialize task context for this IO request */
977	task_page = (struct fcoe_task_ctx_entry *)
978		     interface->hba->task_ctx[task_idx];
979	task = &(task_page[index]);
980	cb_arg->aborted_io_req = orig_io_req;
981	cb_arg->io_req = seq_clnp_req;
982	cb_arg->r_ctl = r_ctl;
983	cb_arg->offset = offset;
984	seq_clnp_req->cb_arg = cb_arg;
985
986	printk(KERN_ERR PFX "call init_seq_cleanup_task\n");
987	bnx2fc_init_seq_cleanup_task(seq_clnp_req, task, orig_io_req, offset);
988
989	/* Obtain free SQ entry */
990	bnx2fc_add_2_sq(tgt, xid);
991
992	/* Ring doorbell */
993	bnx2fc_ring_doorbell(tgt);
994cleanup_err:
995	return rc;
996}
997
998int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req)
999{
1000	struct bnx2fc_rport *tgt = io_req->tgt;
1001	struct bnx2fc_interface *interface;
1002	struct fcoe_port *port;
1003	struct bnx2fc_cmd *cleanup_io_req;
1004	struct fcoe_task_ctx_entry *task;
1005	struct fcoe_task_ctx_entry *task_page;
1006	int task_idx, index;
1007	u16 xid, orig_xid;
1008	int rc = 0;
1009
1010	/* ASSUMPTION: called with tgt_lock held */
1011	BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_cleanup\n");
1012
1013	port = io_req->port;
1014	interface = port->priv;
1015
1016	cleanup_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_CLEANUP);
1017	if (!cleanup_io_req) {
1018		printk(KERN_ERR PFX "cleanup: couldn't allocate cmd\n");
1019		rc = -1;
1020		goto cleanup_err;
1021	}
1022
1023	/* Initialize rest of io_req fields */
1024	cleanup_io_req->sc_cmd = NULL;
1025	cleanup_io_req->port = port;
1026	cleanup_io_req->tgt = tgt;
1027	cleanup_io_req->data_xfer_len = 0; /* No data transfer for cleanup */
1028
1029	xid = cleanup_io_req->xid;
1030
1031	task_idx = xid/BNX2FC_TASKS_PER_PAGE;
1032	index = xid % BNX2FC_TASKS_PER_PAGE;
1033
1034	/* Initialize task context for this IO request */
1035	task_page = (struct fcoe_task_ctx_entry *)
1036			interface->hba->task_ctx[task_idx];
1037	task = &(task_page[index]);
1038	orig_xid = io_req->xid;
1039
1040	BNX2FC_IO_DBG(io_req, "CLEANUP io_req xid = 0x%x\n", xid);
1041
1042	bnx2fc_init_cleanup_task(cleanup_io_req, task, orig_xid);
1043
1044	/* Obtain free SQ entry */
1045	bnx2fc_add_2_sq(tgt, xid);
1046
1047	/* Set flag that cleanup request is pending with the firmware */
1048	set_bit(BNX2FC_FLAG_ISSUE_CLEANUP_REQ, &io_req->req_flags);
1049
1050	/* Ring doorbell */
1051	bnx2fc_ring_doorbell(tgt);
1052
1053cleanup_err:
1054	return rc;
1055}
1056
1057/**
1058 * bnx2fc_eh_target_reset: Reset a target
1059 *
1060 * @sc_cmd:	SCSI command
1061 *
1062 * Set from SCSI host template to send task mgmt command to the target
1063 *	and wait for the response
1064 */
1065int bnx2fc_eh_target_reset(struct scsi_cmnd *sc_cmd)
1066{
1067	return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET);
1068}
1069
1070/**
1071 * bnx2fc_eh_device_reset - Reset a single LUN
1072 *
1073 * @sc_cmd:	SCSI command
1074 *
1075 * Set from SCSI host template to send task mgmt command to the target
1076 *	and wait for the response
1077 */
1078int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
1079{
1080	return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
1081}
1082
1083static int bnx2fc_abts_cleanup(struct bnx2fc_cmd *io_req)
1084	__must_hold(&tgt->tgt_lock)
1085{
1086	struct bnx2fc_rport *tgt = io_req->tgt;
1087	unsigned int time_left;
1088
1089	init_completion(&io_req->cleanup_done);
1090	io_req->wait_for_cleanup_comp = 1;
1091	bnx2fc_initiate_cleanup(io_req);
1092
1093	spin_unlock_bh(&tgt->tgt_lock);
1094
1095	/*
1096	 * Can't wait forever on cleanup response lest we let the SCSI error
1097	 * handler wait forever
1098	 */
1099	time_left = wait_for_completion_timeout(&io_req->cleanup_done,
1100						BNX2FC_FW_TIMEOUT);
1101	if (!time_left) {
1102		BNX2FC_IO_DBG(io_req, "%s(): Wait for cleanup timed out.\n",
1103			      __func__);
1104
1105		/*
1106		 * Put the extra reference to the SCSI command since it would
1107		 * not have been returned in this case.
1108		 */
1109		kref_put(&io_req->refcount, bnx2fc_cmd_release);
1110	}
1111
1112	spin_lock_bh(&tgt->tgt_lock);
1113	io_req->wait_for_cleanup_comp = 0;
1114	return SUCCESS;
1115}
1116
1117/**
1118 * bnx2fc_eh_abort - eh_abort_handler api to abort an outstanding
1119 *			SCSI command
1120 *
1121 * @sc_cmd:	SCSI_ML command pointer
1122 *
1123 * SCSI abort request handler
1124 */
1125int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
1126{
1127	struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
1128	struct fc_rport_libfc_priv *rp = rport->dd_data;
1129	struct bnx2fc_cmd *io_req;
1130	struct fc_lport *lport;
1131	struct bnx2fc_rport *tgt;
1132	int rc;
1133	unsigned int time_left;
1134
1135	rc = fc_block_scsi_eh(sc_cmd);
1136	if (rc)
1137		return rc;
1138
1139	lport = shost_priv(sc_cmd->device->host);
1140	if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) {
1141		printk(KERN_ERR PFX "eh_abort: link not ready\n");
1142		return FAILED;
1143	}
1144
1145	tgt = (struct bnx2fc_rport *)&rp[1];
1146
1147	BNX2FC_TGT_DBG(tgt, "Entered bnx2fc_eh_abort\n");
1148
1149	spin_lock_bh(&tgt->tgt_lock);
1150	io_req = (struct bnx2fc_cmd *)sc_cmd->SCp.ptr;
1151	if (!io_req) {
1152		/* Command might have just completed */
1153		printk(KERN_ERR PFX "eh_abort: io_req is NULL\n");
1154		spin_unlock_bh(&tgt->tgt_lock);
1155		return SUCCESS;
1156	}
1157	BNX2FC_IO_DBG(io_req, "eh_abort - refcnt = %d\n",
1158		      kref_read(&io_req->refcount));
1159
1160	/* Hold IO request across abort processing */
1161	kref_get(&io_req->refcount);
1162
1163	BUG_ON(tgt != io_req->tgt);
1164
1165	/* Remove the io_req from the active_q. */
1166	/*
1167	 * Task Mgmt functions (LUN RESET & TGT RESET) will not
1168	 * issue an ABTS on this particular IO req, as the
1169	 * io_req is no longer in the active_q.
1170	 */
1171	if (tgt->flush_in_prog) {
1172		printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
1173			"flush in progress\n", io_req->xid);
1174		kref_put(&io_req->refcount, bnx2fc_cmd_release);
1175		spin_unlock_bh(&tgt->tgt_lock);
1176		return SUCCESS;
1177	}
1178
1179	if (io_req->on_active_queue == 0) {
1180		printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
1181				"not on active_q\n", io_req->xid);
1182		/*
1183		 * The IO is still with the FW.
1184		 * Return failure and let SCSI-ml retry eh_abort.
1185		 */
1186		spin_unlock_bh(&tgt->tgt_lock);
1187		return FAILED;
1188	}
1189
1190	/*
1191	 * Only eh_abort processing will remove the IO from
1192	 * active_cmd_q before processing the request. this is
1193	 * done to avoid race conditions between IOs aborted
1194	 * as part of task management completion and eh_abort
1195	 * processing
1196	 */
1197	list_del_init(&io_req->link);
1198	io_req->on_active_queue = 0;
1199	/* Move IO req to retire queue */
1200	list_add_tail(&io_req->link, &tgt->io_retire_queue);
1201
1202	init_completion(&io_req->abts_done);
1203	init_completion(&io_req->cleanup_done);
1204
1205	if (test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
1206		printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
1207				"already in abts processing\n", io_req->xid);
1208		if (cancel_delayed_work(&io_req->timeout_work))
1209			kref_put(&io_req->refcount,
1210				 bnx2fc_cmd_release); /* drop timer hold */
1211		/*
1212		 * We don't want to hold off the upper layer timer so simply
1213		 * cleanup the command and return that I/O was successfully
1214		 * aborted.
1215		 */
1216		rc = bnx2fc_abts_cleanup(io_req);
1217		/* This only occurs when an task abort was requested while ABTS
1218		   is in progress.  Setting the IO_CLEANUP flag will skip the
1219		   RRQ process in the case when the fw generated SCSI_CMD cmpl
1220		   was a result from the ABTS request rather than the CLEANUP
1221		   request */
1222		set_bit(BNX2FC_FLAG_IO_CLEANUP,	&io_req->req_flags);
1223		rc = FAILED;
1224		goto done;
1225	}
1226
1227	/* Cancel the current timer running on this io_req */
1228	if (cancel_delayed_work(&io_req->timeout_work))
1229		kref_put(&io_req->refcount,
1230			 bnx2fc_cmd_release); /* drop timer hold */
1231	set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags);
1232	io_req->wait_for_abts_comp = 1;
1233	rc = bnx2fc_initiate_abts(io_req);
1234	if (rc == FAILED) {
1235		io_req->wait_for_cleanup_comp = 1;
1236		bnx2fc_initiate_cleanup(io_req);
1237		spin_unlock_bh(&tgt->tgt_lock);
1238		wait_for_completion(&io_req->cleanup_done);
1239		spin_lock_bh(&tgt->tgt_lock);
1240		io_req->wait_for_cleanup_comp = 0;
1241		goto done;
1242	}
1243	spin_unlock_bh(&tgt->tgt_lock);
1244
1245	/* Wait 2 * RA_TOV + 1 to be sure timeout function hasn't fired */
1246	time_left = wait_for_completion_timeout(&io_req->abts_done,
1247					msecs_to_jiffies(2 * rp->r_a_tov + 1));
1248	if (time_left)
1249		BNX2FC_IO_DBG(io_req,
1250			      "Timed out in eh_abort waiting for abts_done");
1251
1252	spin_lock_bh(&tgt->tgt_lock);
1253	io_req->wait_for_abts_comp = 0;
1254	if (test_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) {
1255		BNX2FC_IO_DBG(io_req, "IO completed in a different context\n");
1256		rc = SUCCESS;
1257	} else if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
1258				      &io_req->req_flags))) {
1259		/* Let the scsi-ml try to recover this command */
1260		printk(KERN_ERR PFX "abort failed, xid = 0x%x\n",
1261		       io_req->xid);
1262		/*
1263		 * Cleanup firmware residuals before returning control back
1264		 * to SCSI ML.
1265		 */
1266		rc = bnx2fc_abts_cleanup(io_req);
1267		goto done;
1268	} else {
1269		/*
1270		 * We come here even when there was a race condition
1271		 * between timeout and abts completion, and abts
1272		 * completion happens just in time.
1273		 */
1274		BNX2FC_IO_DBG(io_req, "abort succeeded\n");
1275		rc = SUCCESS;
1276		bnx2fc_scsi_done(io_req, DID_ABORT);
1277		kref_put(&io_req->refcount, bnx2fc_cmd_release);
1278	}
1279done:
1280	/* release the reference taken in eh_abort */
1281	kref_put(&io_req->refcount, bnx2fc_cmd_release);
1282	spin_unlock_bh(&tgt->tgt_lock);
1283	return rc;
1284}
1285
1286void bnx2fc_process_seq_cleanup_compl(struct bnx2fc_cmd *seq_clnp_req,
1287				      struct fcoe_task_ctx_entry *task,
1288				      u8 rx_state)
1289{
1290	struct bnx2fc_els_cb_arg *cb_arg = seq_clnp_req->cb_arg;
1291	struct bnx2fc_cmd *orig_io_req = cb_arg->aborted_io_req;
1292	u32 offset = cb_arg->offset;
1293	enum fc_rctl r_ctl = cb_arg->r_ctl;
1294	int rc = 0;
1295	struct bnx2fc_rport *tgt = orig_io_req->tgt;
1296
1297	BNX2FC_IO_DBG(orig_io_req, "Entered process_cleanup_compl xid = 0x%x"
1298			      "cmd_type = %d\n",
1299		   seq_clnp_req->xid, seq_clnp_req->cmd_type);
1300
1301	if (rx_state == FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP) {
1302		printk(KERN_ERR PFX "seq cleanup ignored - xid = 0x%x\n",
1303			seq_clnp_req->xid);
1304		goto free_cb_arg;
1305	}
1306
1307	spin_unlock_bh(&tgt->tgt_lock);
1308	rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl);
1309	spin_lock_bh(&tgt->tgt_lock);
1310
1311	if (rc)
1312		printk(KERN_ERR PFX "clnup_compl: Unable to send SRR"
1313			" IO will abort\n");
1314	seq_clnp_req->cb_arg = NULL;
1315	kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
1316free_cb_arg:
1317	kfree(cb_arg);
1318	return;
1319}
1320
1321void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req,
1322				  struct fcoe_task_ctx_entry *task,
1323				  u8 num_rq)
1324{
1325	BNX2FC_IO_DBG(io_req, "Entered process_cleanup_compl "
1326			      "refcnt = %d, cmd_type = %d\n",
1327		   kref_read(&io_req->refcount), io_req->cmd_type);
1328	/*
1329	 * Test whether there is a cleanup request pending. If not just
1330	 * exit.
1331	 */
1332	if (!test_and_clear_bit(BNX2FC_FLAG_ISSUE_CLEANUP_REQ,
1333				&io_req->req_flags))
1334		return;
1335	/*
1336	 * If we receive a cleanup completion for this request then the
1337	 * firmware will not give us an abort completion for this request
1338	 * so clear any ABTS pending flags.
1339	 */
1340	if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags) &&
1341	    !test_bit(BNX2FC_FLAG_ABTS_DONE, &io_req->req_flags)) {
1342		set_bit(BNX2FC_FLAG_ABTS_DONE, &io_req->req_flags);
1343		if (io_req->wait_for_abts_comp)
1344			complete(&io_req->abts_done);
1345	}
1346
1347	bnx2fc_scsi_done(io_req, DID_ERROR);
1348	kref_put(&io_req->refcount, bnx2fc_cmd_release);
1349	if (io_req->wait_for_cleanup_comp)
1350		complete(&io_req->cleanup_done);
1351}
1352
1353void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req,
1354			       struct fcoe_task_ctx_entry *task,
1355			       u8 num_rq)
1356{
1357	u32 r_ctl;
1358	u32 r_a_tov = FC_DEF_R_A_TOV;
1359	u8 issue_rrq = 0;
1360	struct bnx2fc_rport *tgt = io_req->tgt;
1361
1362	BNX2FC_IO_DBG(io_req, "Entered process_abts_compl xid = 0x%x"
1363			      "refcnt = %d, cmd_type = %d\n",
1364		   io_req->xid,
1365		   kref_read(&io_req->refcount), io_req->cmd_type);
1366
1367	if (test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
1368				       &io_req->req_flags)) {
1369		BNX2FC_IO_DBG(io_req, "Timer context finished processing"
1370				" this io\n");
1371		return;
1372	}
1373
1374	/*
1375	 * If we receive an ABTS completion here then we will not receive
1376	 * a cleanup completion so clear any cleanup pending flags.
1377	 */
1378	if (test_bit(BNX2FC_FLAG_ISSUE_CLEANUP_REQ, &io_req->req_flags)) {
1379		clear_bit(BNX2FC_FLAG_ISSUE_CLEANUP_REQ, &io_req->req_flags);
1380		if (io_req->wait_for_cleanup_comp)
1381			complete(&io_req->cleanup_done);
1382	}
1383
1384	/* Do not issue RRQ as this IO is already cleanedup */
1385	if (test_and_set_bit(BNX2FC_FLAG_IO_CLEANUP,
1386				&io_req->req_flags))
1387		goto io_compl;
1388
1389	/*
1390	 * For ABTS issued due to SCSI eh_abort_handler, timeout
1391	 * values are maintained by scsi-ml itself. Cancel timeout
1392	 * in case ABTS issued as part of task management function
1393	 * or due to FW error.
1394	 */
1395	if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags))
1396		if (cancel_delayed_work(&io_req->timeout_work))
1397			kref_put(&io_req->refcount,
1398				 bnx2fc_cmd_release); /* drop timer hold */
1399
1400	r_ctl = (u8)task->rxwr_only.union_ctx.comp_info.abts_rsp.r_ctl;
1401
1402	switch (r_ctl) {
1403	case FC_RCTL_BA_ACC:
1404		/*
1405		 * Dont release this cmd yet. It will be relesed
1406		 * after we get RRQ response
1407		 */
1408		BNX2FC_IO_DBG(io_req, "ABTS response - ACC Send RRQ\n");
1409		issue_rrq = 1;
1410		break;
1411
1412	case FC_RCTL_BA_RJT:
1413		BNX2FC_IO_DBG(io_req, "ABTS response - RJT\n");
1414		break;
1415	default:
1416		printk(KERN_ERR PFX "Unknown ABTS response\n");
1417		break;
1418	}
1419
1420	if (issue_rrq) {
1421		BNX2FC_IO_DBG(io_req, "Issue RRQ after R_A_TOV\n");
1422		set_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags);
1423	}
1424	set_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags);
1425	bnx2fc_cmd_timer_set(io_req, r_a_tov);
1426
1427io_compl:
1428	if (io_req->wait_for_abts_comp) {
1429		if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
1430				       &io_req->req_flags))
1431			complete(&io_req->abts_done);
1432	} else {
1433		/*
1434		 * We end up here when ABTS is issued as
1435		 * in asynchronous context, i.e., as part
1436		 * of task management completion, or
1437		 * when FW error is received or when the
1438		 * ABTS is issued when the IO is timed
1439		 * out.
1440		 */
1441
1442		if (io_req->on_active_queue) {
1443			list_del_init(&io_req->link);
1444			io_req->on_active_queue = 0;
1445			/* Move IO req to retire queue */
1446			list_add_tail(&io_req->link, &tgt->io_retire_queue);
1447		}
1448		bnx2fc_scsi_done(io_req, DID_ERROR);
1449		kref_put(&io_req->refcount, bnx2fc_cmd_release);
1450	}
1451}
1452
1453static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req)
1454{
1455	struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1456	struct bnx2fc_rport *tgt = io_req->tgt;
1457	struct bnx2fc_cmd *cmd, *tmp;
1458	u64 tm_lun = sc_cmd->device->lun;
1459	u64 lun;
1460	int rc = 0;
1461
1462	/* called with tgt_lock held */
1463	BNX2FC_IO_DBG(io_req, "Entered bnx2fc_lun_reset_cmpl\n");
1464	/*
1465	 * Walk thru the active_ios queue and ABORT the IO
1466	 * that matches with the LUN that was reset
1467	 */
1468	list_for_each_entry_safe(cmd, tmp, &tgt->active_cmd_queue, link) {
1469		BNX2FC_TGT_DBG(tgt, "LUN RST cmpl: scan for pending IOs\n");
1470		lun = cmd->sc_cmd->device->lun;
1471		if (lun == tm_lun) {
1472			/* Initiate ABTS on this cmd */
1473			if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
1474					      &cmd->req_flags)) {
1475				/* cancel the IO timeout */
1476				if (cancel_delayed_work(&io_req->timeout_work))
1477					kref_put(&io_req->refcount,
1478						 bnx2fc_cmd_release);
1479							/* timer hold */
1480				rc = bnx2fc_initiate_abts(cmd);
1481				/* abts shouldn't fail in this context */
1482				WARN_ON(rc != SUCCESS);
1483			} else
1484				printk(KERN_ERR PFX "lun_rst: abts already in"
1485					" progress for this IO 0x%x\n",
1486					cmd->xid);
1487		}
1488	}
1489}
1490
1491static void bnx2fc_tgt_reset_cmpl(struct bnx2fc_cmd *io_req)
1492{
1493	struct bnx2fc_rport *tgt = io_req->tgt;
1494	struct bnx2fc_cmd *cmd, *tmp;
1495	int rc = 0;
1496
1497	/* called with tgt_lock held */
1498	BNX2FC_IO_DBG(io_req, "Entered bnx2fc_tgt_reset_cmpl\n");
1499	/*
1500	 * Walk thru the active_ios queue and ABORT the IO
1501	 * that matches with the LUN that was reset
1502	 */
1503	list_for_each_entry_safe(cmd, tmp, &tgt->active_cmd_queue, link) {
1504		BNX2FC_TGT_DBG(tgt, "TGT RST cmpl: scan for pending IOs\n");
1505		/* Initiate ABTS */
1506		if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
1507							&cmd->req_flags)) {
1508			/* cancel the IO timeout */
1509			if (cancel_delayed_work(&io_req->timeout_work))
1510				kref_put(&io_req->refcount,
1511					 bnx2fc_cmd_release); /* timer hold */
1512			rc = bnx2fc_initiate_abts(cmd);
1513			/* abts shouldn't fail in this context */
1514			WARN_ON(rc != SUCCESS);
1515
1516		} else
1517			printk(KERN_ERR PFX "tgt_rst: abts already in progress"
1518				" for this IO 0x%x\n", cmd->xid);
1519	}
1520}
1521
1522void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
1523			     struct fcoe_task_ctx_entry *task, u8 num_rq,
1524				  unsigned char *rq_data)
1525{
1526	struct bnx2fc_mp_req *tm_req;
1527	struct fc_frame_header *fc_hdr;
1528	struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1529	u64 *hdr;
1530	u64 *temp_hdr;
1531	void *rsp_buf;
1532
1533	/* Called with tgt_lock held */
1534	BNX2FC_IO_DBG(io_req, "Entered process_tm_compl\n");
1535
1536	if (!(test_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags)))
1537		set_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags);
1538	else {
1539		/* TM has already timed out and we got
1540		 * delayed completion. Ignore completion
1541		 * processing.
1542		 */
1543		return;
1544	}
1545
1546	tm_req = &(io_req->mp_req);
1547	fc_hdr = &(tm_req->resp_fc_hdr);
1548	hdr = (u64 *)fc_hdr;
1549	temp_hdr = (u64 *)
1550		&task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr;
1551	hdr[0] = cpu_to_be64(temp_hdr[0]);
1552	hdr[1] = cpu_to_be64(temp_hdr[1]);
1553	hdr[2] = cpu_to_be64(temp_hdr[2]);
1554
1555	tm_req->resp_len =
1556		task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len;
1557
1558	rsp_buf = tm_req->resp_buf;
1559
1560	if (fc_hdr->fh_r_ctl == FC_RCTL_DD_CMD_STATUS) {
1561		bnx2fc_parse_fcp_rsp(io_req,
1562				     (struct fcoe_fcp_rsp_payload *)
1563				     rsp_buf, num_rq, rq_data);
1564		if (io_req->fcp_rsp_code == 0) {
1565			/* TM successful */
1566			if (tm_req->tm_flags & FCP_TMF_LUN_RESET)
1567				bnx2fc_lun_reset_cmpl(io_req);
1568			else if (tm_req->tm_flags & FCP_TMF_TGT_RESET)
1569				bnx2fc_tgt_reset_cmpl(io_req);
1570		}
1571	} else {
1572		printk(KERN_ERR PFX "tmf's fc_hdr r_ctl = 0x%x\n",
1573			fc_hdr->fh_r_ctl);
1574	}
1575	if (!sc_cmd->SCp.ptr) {
1576		printk(KERN_ERR PFX "tm_compl: SCp.ptr is NULL\n");
1577		return;
1578	}
1579	switch (io_req->fcp_status) {
1580	case FC_GOOD:
1581		if (io_req->cdb_status == 0) {
1582			/* Good IO completion */
1583			sc_cmd->result = DID_OK << 16;
1584		} else {
1585			/* Transport status is good, SCSI status not good */
1586			sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1587		}
1588		if (io_req->fcp_resid)
1589			scsi_set_resid(sc_cmd, io_req->fcp_resid);
1590		break;
1591
1592	default:
1593		BNX2FC_IO_DBG(io_req, "process_tm_compl: fcp_status = %d\n",
1594			   io_req->fcp_status);
1595		break;
1596	}
1597
1598	sc_cmd = io_req->sc_cmd;
1599	io_req->sc_cmd = NULL;
1600
1601	/* check if the io_req exists in tgt's tmf_q */
1602	if (io_req->on_tmf_queue) {
1603
1604		list_del_init(&io_req->link);
1605		io_req->on_tmf_queue = 0;
1606	} else {
1607
1608		printk(KERN_ERR PFX "Command not on active_cmd_queue!\n");
1609		return;
1610	}
1611
1612	sc_cmd->SCp.ptr = NULL;
1613	sc_cmd->scsi_done(sc_cmd);
1614
1615	kref_put(&io_req->refcount, bnx2fc_cmd_release);
1616	if (io_req->wait_for_abts_comp) {
1617		BNX2FC_IO_DBG(io_req, "tm_compl - wake up the waiter\n");
1618		complete(&io_req->abts_done);
1619	}
1620}
1621
1622static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len,
1623			   int bd_index)
1624{
1625	struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
1626	int frag_size, sg_frags;
1627
1628	sg_frags = 0;
1629	while (sg_len) {
1630		if (sg_len >= BNX2FC_BD_SPLIT_SZ)
1631			frag_size = BNX2FC_BD_SPLIT_SZ;
1632		else
1633			frag_size = sg_len;
1634		bd[bd_index + sg_frags].buf_addr_lo = addr & 0xffffffff;
1635		bd[bd_index + sg_frags].buf_addr_hi  = addr >> 32;
1636		bd[bd_index + sg_frags].buf_len = (u16)frag_size;
1637		bd[bd_index + sg_frags].flags = 0;
1638
1639		addr += (u64) frag_size;
1640		sg_frags++;
1641		sg_len -= frag_size;
1642	}
1643	return sg_frags;
1644
1645}
1646
1647static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req)
1648{
1649	struct bnx2fc_interface *interface = io_req->port->priv;
1650	struct bnx2fc_hba *hba = interface->hba;
1651	struct scsi_cmnd *sc = io_req->sc_cmd;
1652	struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
1653	struct scatterlist *sg;
1654	int byte_count = 0;
1655	int sg_count = 0;
1656	int bd_count = 0;
1657	int sg_frags;
1658	unsigned int sg_len;
1659	u64 addr;
1660	int i;
1661
1662	WARN_ON(scsi_sg_count(sc) > BNX2FC_MAX_BDS_PER_CMD);
1663	/*
1664	 * Use dma_map_sg directly to ensure we're using the correct
1665	 * dev struct off of pcidev.
1666	 */
1667	sg_count = dma_map_sg(&hba->pcidev->dev, scsi_sglist(sc),
1668			      scsi_sg_count(sc), sc->sc_data_direction);
1669	scsi_for_each_sg(sc, sg, sg_count, i) {
1670		sg_len = sg_dma_len(sg);
1671		addr = sg_dma_address(sg);
1672		if (sg_len > BNX2FC_MAX_BD_LEN) {
1673			sg_frags = bnx2fc_split_bd(io_req, addr, sg_len,
1674						   bd_count);
1675		} else {
1676
1677			sg_frags = 1;
1678			bd[bd_count].buf_addr_lo = addr & 0xffffffff;
1679			bd[bd_count].buf_addr_hi  = addr >> 32;
1680			bd[bd_count].buf_len = (u16)sg_len;
1681			bd[bd_count].flags = 0;
1682		}
1683		bd_count += sg_frags;
1684		byte_count += sg_len;
1685	}
1686	if (byte_count != scsi_bufflen(sc))
1687		printk(KERN_ERR PFX "byte_count = %d != scsi_bufflen = %d, "
1688			"task_id = 0x%x\n", byte_count, scsi_bufflen(sc),
1689			io_req->xid);
1690	return bd_count;
1691}
1692
1693static int bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req)
1694{
1695	struct scsi_cmnd *sc = io_req->sc_cmd;
1696	struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
1697	int bd_count;
1698
1699	if (scsi_sg_count(sc)) {
1700		bd_count = bnx2fc_map_sg(io_req);
1701		if (bd_count == 0)
1702			return -ENOMEM;
1703	} else {
1704		bd_count = 0;
1705		bd[0].buf_addr_lo = bd[0].buf_addr_hi = 0;
1706		bd[0].buf_len = bd[0].flags = 0;
1707	}
1708	io_req->bd_tbl->bd_valid = bd_count;
1709
1710	/*
1711	 * Return the command to ML if BD count exceeds the max number
1712	 * that can be handled by FW.
1713	 */
1714	if (bd_count > BNX2FC_FW_MAX_BDS_PER_CMD) {
1715		pr_err("bd_count = %d exceeded FW supported max BD(255), task_id = 0x%x\n",
1716		       bd_count, io_req->xid);
1717		return -ENOMEM;
1718	}
1719
1720	return 0;
1721}
1722
1723static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req)
1724{
1725	struct scsi_cmnd *sc = io_req->sc_cmd;
1726	struct bnx2fc_interface *interface = io_req->port->priv;
1727	struct bnx2fc_hba *hba = interface->hba;
1728
1729	/*
1730	 * Use dma_unmap_sg directly to ensure we're using the correct
1731	 * dev struct off of pcidev.
1732	 */
1733	if (io_req->bd_tbl->bd_valid && sc && scsi_sg_count(sc)) {
1734		dma_unmap_sg(&hba->pcidev->dev, scsi_sglist(sc),
1735		    scsi_sg_count(sc), sc->sc_data_direction);
1736		io_req->bd_tbl->bd_valid = 0;
1737	}
1738}
1739
1740void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req,
1741				  struct fcp_cmnd *fcp_cmnd)
1742{
1743	struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1744
1745	memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
1746
1747	int_to_scsilun(sc_cmd->device->lun, &fcp_cmnd->fc_lun);
1748
1749	fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
1750	memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
1751
1752	fcp_cmnd->fc_cmdref = 0;
1753	fcp_cmnd->fc_pri_ta = 0;
1754	fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags;
1755	fcp_cmnd->fc_flags = io_req->io_req_flags;
1756	fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
1757}
1758
1759static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
1760				 struct fcoe_fcp_rsp_payload *fcp_rsp,
1761				 u8 num_rq, unsigned char *rq_data)
1762{
1763	struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1764	u8 rsp_flags = fcp_rsp->fcp_flags.flags;
1765	u32 rq_buff_len = 0;
1766	int fcp_sns_len = 0;
1767	int fcp_rsp_len = 0;
1768
1769	io_req->fcp_status = FC_GOOD;
1770	io_req->fcp_resid = 0;
1771	if (rsp_flags & (FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER |
1772	    FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER))
1773		io_req->fcp_resid = fcp_rsp->fcp_resid;
1774
1775	io_req->scsi_comp_flags = rsp_flags;
1776	CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status =
1777				fcp_rsp->scsi_status_code;
1778
1779	/* Fetch fcp_rsp_info and fcp_sns_info if available */
1780	if (num_rq) {
1781
1782		/*
1783		 * We do not anticipate num_rq >1, as the linux defined
1784		 * SCSI_SENSE_BUFFERSIZE is 96 bytes + 8 bytes of FCP_RSP_INFO
1785		 * 256 bytes of single rq buffer is good enough to hold this.
1786		 */
1787
1788		if (rsp_flags &
1789		    FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID) {
1790			fcp_rsp_len = rq_buff_len
1791					= fcp_rsp->fcp_rsp_len;
1792		}
1793
1794		if (rsp_flags &
1795		    FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID) {
1796			fcp_sns_len = fcp_rsp->fcp_sns_len;
1797			rq_buff_len += fcp_rsp->fcp_sns_len;
1798		}
1799
1800		io_req->fcp_rsp_len = fcp_rsp_len;
1801		io_req->fcp_sns_len = fcp_sns_len;
1802
1803		if (rq_buff_len > num_rq * BNX2FC_RQ_BUF_SZ) {
1804			/* Invalid sense sense length. */
1805			printk(KERN_ERR PFX "invalid sns length %d\n",
1806				rq_buff_len);
1807			/* reset rq_buff_len */
1808			rq_buff_len =  num_rq * BNX2FC_RQ_BUF_SZ;
1809		}
1810
1811		/* fetch fcp_rsp_code */
1812		if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) {
1813			/* Only for task management function */
1814			io_req->fcp_rsp_code = rq_data[3];
1815			BNX2FC_IO_DBG(io_req, "fcp_rsp_code = %d\n",
1816				io_req->fcp_rsp_code);
1817		}
1818
1819		/* fetch sense data */
1820		rq_data += fcp_rsp_len;
1821
1822		if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) {
1823			printk(KERN_ERR PFX "Truncating sense buffer\n");
1824			fcp_sns_len = SCSI_SENSE_BUFFERSIZE;
1825		}
1826
1827		memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1828		if (fcp_sns_len)
1829			memcpy(sc_cmd->sense_buffer, rq_data, fcp_sns_len);
1830
1831	}
1832}
1833
1834/**
1835 * bnx2fc_queuecommand - Queuecommand function of the scsi template
1836 *
1837 * @host:	The Scsi_Host the command was issued to
1838 * @sc_cmd:	struct scsi_cmnd to be executed
1839 *
1840 * This is the IO strategy routine, called by SCSI-ML
1841 **/
1842int bnx2fc_queuecommand(struct Scsi_Host *host,
1843			struct scsi_cmnd *sc_cmd)
1844{
1845	struct fc_lport *lport = shost_priv(host);
1846	struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
1847	struct fc_rport_libfc_priv *rp = rport->dd_data;
1848	struct bnx2fc_rport *tgt;
1849	struct bnx2fc_cmd *io_req;
1850	int rc = 0;
1851	int rval;
1852
1853	rval = fc_remote_port_chkready(rport);
1854	if (rval) {
1855		sc_cmd->result = rval;
1856		sc_cmd->scsi_done(sc_cmd);
1857		return 0;
1858	}
1859
1860	if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) {
1861		rc = SCSI_MLQUEUE_HOST_BUSY;
1862		goto exit_qcmd;
1863	}
1864
1865	/* rport and tgt are allocated together, so tgt should be non-NULL */
1866	tgt = (struct bnx2fc_rport *)&rp[1];
1867
1868	if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
1869		/*
1870		 * Session is not offloaded yet. Let SCSI-ml retry
1871		 * the command.
1872		 */
1873		rc = SCSI_MLQUEUE_TARGET_BUSY;
1874		goto exit_qcmd;
1875	}
1876	if (tgt->retry_delay_timestamp) {
1877		if (time_after(jiffies, tgt->retry_delay_timestamp)) {
1878			tgt->retry_delay_timestamp = 0;
1879		} else {
1880			/* If retry_delay timer is active, flow off the ML */
1881			rc = SCSI_MLQUEUE_TARGET_BUSY;
1882			goto exit_qcmd;
1883		}
1884	}
1885
1886	spin_lock_bh(&tgt->tgt_lock);
1887
1888	io_req = bnx2fc_cmd_alloc(tgt);
1889	if (!io_req) {
1890		rc = SCSI_MLQUEUE_HOST_BUSY;
1891		goto exit_qcmd_tgtlock;
1892	}
1893	io_req->sc_cmd = sc_cmd;
1894
1895	if (bnx2fc_post_io_req(tgt, io_req)) {
1896		printk(KERN_ERR PFX "Unable to post io_req\n");
1897		rc = SCSI_MLQUEUE_HOST_BUSY;
1898		goto exit_qcmd_tgtlock;
1899	}
1900
1901exit_qcmd_tgtlock:
1902	spin_unlock_bh(&tgt->tgt_lock);
1903exit_qcmd:
1904	return rc;
1905}
1906
1907void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
1908				   struct fcoe_task_ctx_entry *task,
1909				   u8 num_rq, unsigned char *rq_data)
1910{
1911	struct fcoe_fcp_rsp_payload *fcp_rsp;
1912	struct bnx2fc_rport *tgt = io_req->tgt;
1913	struct scsi_cmnd *sc_cmd;
1914	u16 scope = 0, qualifier = 0;
1915
1916	/* scsi_cmd_cmpl is called with tgt lock held */
1917
1918	if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) {
1919		/* we will not receive ABTS response for this IO */
1920		BNX2FC_IO_DBG(io_req, "Timer context finished processing "
1921			   "this scsi cmd\n");
1922		if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP,
1923				       &io_req->req_flags)) {
1924			BNX2FC_IO_DBG(io_req,
1925				      "Actual completion after cleanup request cleaning up\n");
1926			bnx2fc_process_cleanup_compl(io_req, task, num_rq);
1927		}
1928		return;
1929	}
1930
1931	/* Cancel the timeout_work, as we received IO completion */
1932	if (cancel_delayed_work(&io_req->timeout_work))
1933		kref_put(&io_req->refcount,
1934			 bnx2fc_cmd_release); /* drop timer hold */
1935
1936	sc_cmd = io_req->sc_cmd;
1937	if (sc_cmd == NULL) {
1938		printk(KERN_ERR PFX "scsi_cmd_compl - sc_cmd is NULL\n");
1939		return;
1940	}
1941
1942	/* Fetch fcp_rsp from task context and perform cmd completion */
1943	fcp_rsp = (struct fcoe_fcp_rsp_payload *)
1944		   &(task->rxwr_only.union_ctx.comp_info.fcp_rsp.payload);
1945
1946	/* parse fcp_rsp and obtain sense data from RQ if available */
1947	bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq, rq_data);
1948
1949	if (!sc_cmd->SCp.ptr) {
1950		printk(KERN_ERR PFX "SCp.ptr is NULL\n");
1951		return;
1952	}
1953
1954	if (io_req->on_active_queue) {
1955		list_del_init(&io_req->link);
1956		io_req->on_active_queue = 0;
1957		/* Move IO req to retire queue */
1958		list_add_tail(&io_req->link, &tgt->io_retire_queue);
1959	} else {
1960		/* This should not happen, but could have been pulled
1961		 * by bnx2fc_flush_active_ios(), or during a race
1962		 * between command abort and (late) completion.
1963		 */
1964		BNX2FC_IO_DBG(io_req, "xid not on active_cmd_queue\n");
1965		if (io_req->wait_for_abts_comp)
1966			if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
1967					       &io_req->req_flags))
1968				complete(&io_req->abts_done);
1969	}
1970
1971	bnx2fc_unmap_sg_list(io_req);
1972	io_req->sc_cmd = NULL;
1973
1974	switch (io_req->fcp_status) {
1975	case FC_GOOD:
1976		if (io_req->cdb_status == 0) {
1977			/* Good IO completion */
1978			sc_cmd->result = DID_OK << 16;
1979		} else {
1980			/* Transport status is good, SCSI status not good */
1981			BNX2FC_IO_DBG(io_req, "scsi_cmpl: cdb_status = %d"
1982				 " fcp_resid = 0x%x\n",
1983				io_req->cdb_status, io_req->fcp_resid);
1984			sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1985
1986			if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL ||
1987			    io_req->cdb_status == SAM_STAT_BUSY) {
1988				/* Newer array firmware with BUSY or
1989				 * TASK_SET_FULL may return a status that needs
1990				 * the scope bits masked.
1991				 * Or a huge delay timestamp up to 27 minutes
1992				 * can result.
1993				 */
1994				if (fcp_rsp->retry_delay_timer) {
1995					/* Upper 2 bits */
1996					scope = fcp_rsp->retry_delay_timer
1997						& 0xC000;
1998					/* Lower 14 bits */
1999					qualifier = fcp_rsp->retry_delay_timer
2000						& 0x3FFF;
2001				}
2002				if (scope > 0 && qualifier > 0 &&
2003					qualifier <= 0x3FEF) {
2004					/* Set the jiffies +
2005					 * retry_delay_timer * 100ms
2006					 * for the rport/tgt
2007					 */
2008					tgt->retry_delay_timestamp = jiffies +
2009						(qualifier * HZ / 10);
2010				}
2011			}
2012		}
2013		if (io_req->fcp_resid)
2014			scsi_set_resid(sc_cmd, io_req->fcp_resid);
2015		break;
2016	default:
2017		printk(KERN_ERR PFX "scsi_cmd_compl: fcp_status = %d\n",
2018			io_req->fcp_status);
2019		break;
2020	}
2021	sc_cmd->SCp.ptr = NULL;
2022	sc_cmd->scsi_done(sc_cmd);
2023	kref_put(&io_req->refcount, bnx2fc_cmd_release);
2024}
2025
2026int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
2027			       struct bnx2fc_cmd *io_req)
2028{
2029	struct fcoe_task_ctx_entry *task;
2030	struct fcoe_task_ctx_entry *task_page;
2031	struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
2032	struct fcoe_port *port = tgt->port;
2033	struct bnx2fc_interface *interface = port->priv;
2034	struct bnx2fc_hba *hba = interface->hba;
2035	struct fc_lport *lport = port->lport;
2036	struct fc_stats *stats;
2037	int task_idx, index;
2038	u16 xid;
2039
2040	/* bnx2fc_post_io_req() is called with the tgt_lock held */
2041
2042	/* Initialize rest of io_req fields */
2043	io_req->cmd_type = BNX2FC_SCSI_CMD;
2044	io_req->port = port;
2045	io_req->tgt = tgt;
2046	io_req->data_xfer_len = scsi_bufflen(sc_cmd);
2047	sc_cmd->SCp.ptr = (char *)io_req;
2048
2049	stats = per_cpu_ptr(lport->stats, get_cpu());
2050	if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
2051		io_req->io_req_flags = BNX2FC_READ;
2052		stats->InputRequests++;
2053		stats->InputBytes += io_req->data_xfer_len;
2054	} else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
2055		io_req->io_req_flags = BNX2FC_WRITE;
2056		stats->OutputRequests++;
2057		stats->OutputBytes += io_req->data_xfer_len;
2058	} else {
2059		io_req->io_req_flags = 0;
2060		stats->ControlRequests++;
2061	}
2062	put_cpu();
2063
2064	xid = io_req->xid;
2065
2066	/* Build buffer descriptor list for firmware from sg list */
2067	if (bnx2fc_build_bd_list_from_sg(io_req)) {
2068		printk(KERN_ERR PFX "BD list creation failed\n");
2069		kref_put(&io_req->refcount, bnx2fc_cmd_release);
2070		return -EAGAIN;
2071	}
2072
2073	task_idx = xid / BNX2FC_TASKS_PER_PAGE;
2074	index = xid % BNX2FC_TASKS_PER_PAGE;
2075
2076	/* Initialize task context for this IO request */
2077	task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
2078	task = &(task_page[index]);
2079	bnx2fc_init_task(io_req, task);
2080
2081	if (tgt->flush_in_prog) {
2082		printk(KERN_ERR PFX "Flush in progress..Host Busy\n");
2083		kref_put(&io_req->refcount, bnx2fc_cmd_release);
2084		return -EAGAIN;
2085	}
2086
2087	if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
2088		printk(KERN_ERR PFX "Session not ready...post_io\n");
2089		kref_put(&io_req->refcount, bnx2fc_cmd_release);
2090		return -EAGAIN;
2091	}
2092
2093	/* Time IO req */
2094	if (tgt->io_timeout)
2095		bnx2fc_cmd_timer_set(io_req, BNX2FC_IO_TIMEOUT);
2096	/* Obtain free SQ entry */
2097	bnx2fc_add_2_sq(tgt, xid);
2098
2099	/* Enqueue the io_req to active_cmd_queue */
2100
2101	io_req->on_active_queue = 1;
2102	/* move io_req from pending_queue to active_queue */
2103	list_add_tail(&io_req->link, &tgt->active_cmd_queue);
2104
2105	/* Ring doorbell */
2106	bnx2fc_ring_doorbell(tgt);
2107	return 0;
2108}
2109