1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c)  2003-2014 QLogic Corporation
5 */
6#include "qla_def.h"
7#include "qla_target.h"
8
9#include <linux/blkdev.h>
10#include <linux/delay.h>
11
12#include <scsi/scsi_tcq.h>
13
14/**
15 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
16 * @sp: SCSI command
17 *
18 * Returns the proper CF_* direction based on CDB.
19 */
20static inline uint16_t
21qla2x00_get_cmd_direction(srb_t *sp)
22{
23	uint16_t cflags;
24	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
25	struct scsi_qla_host *vha = sp->vha;
26
27	cflags = 0;
28
29	/* Set transfer direction */
30	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
31		cflags = CF_WRITE;
32		vha->qla_stats.output_bytes += scsi_bufflen(cmd);
33		vha->qla_stats.output_requests++;
34	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
35		cflags = CF_READ;
36		vha->qla_stats.input_bytes += scsi_bufflen(cmd);
37		vha->qla_stats.input_requests++;
38	}
39	return (cflags);
40}
41
42/**
43 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
44 * Continuation Type 0 IOCBs to allocate.
45 *
46 * @dsds: number of data segment descriptors needed
47 *
48 * Returns the number of IOCB entries needed to store @dsds.
49 */
50uint16_t
51qla2x00_calc_iocbs_32(uint16_t dsds)
52{
53	uint16_t iocbs;
54
55	iocbs = 1;
56	if (dsds > 3) {
57		iocbs += (dsds - 3) / 7;
58		if ((dsds - 3) % 7)
59			iocbs++;
60	}
61	return (iocbs);
62}
63
64/**
65 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
66 * Continuation Type 1 IOCBs to allocate.
67 *
68 * @dsds: number of data segment descriptors needed
69 *
70 * Returns the number of IOCB entries needed to store @dsds.
71 */
72uint16_t
73qla2x00_calc_iocbs_64(uint16_t dsds)
74{
75	uint16_t iocbs;
76
77	iocbs = 1;
78	if (dsds > 2) {
79		iocbs += (dsds - 2) / 5;
80		if ((dsds - 2) % 5)
81			iocbs++;
82	}
83	return (iocbs);
84}
85
86/**
87 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
88 * @vha: HA context
89 *
90 * Returns a pointer to the Continuation Type 0 IOCB packet.
91 */
92static inline cont_entry_t *
93qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
94{
95	cont_entry_t *cont_pkt;
96	struct req_que *req = vha->req;
97	/* Adjust ring index. */
98	req->ring_index++;
99	if (req->ring_index == req->length) {
100		req->ring_index = 0;
101		req->ring_ptr = req->ring;
102	} else {
103		req->ring_ptr++;
104	}
105
106	cont_pkt = (cont_entry_t *)req->ring_ptr;
107
108	/* Load packet defaults. */
109	put_unaligned_le32(CONTINUE_TYPE, &cont_pkt->entry_type);
110
111	return (cont_pkt);
112}
113
114/**
115 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
116 * @vha: HA context
117 * @req: request queue
118 *
119 * Returns a pointer to the continuation type 1 IOCB packet.
120 */
121static inline cont_a64_entry_t *
122qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
123{
124	cont_a64_entry_t *cont_pkt;
125
126	/* Adjust ring index. */
127	req->ring_index++;
128	if (req->ring_index == req->length) {
129		req->ring_index = 0;
130		req->ring_ptr = req->ring;
131	} else {
132		req->ring_ptr++;
133	}
134
135	cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
136
137	/* Load packet defaults. */
138	put_unaligned_le32(IS_QLAFX00(vha->hw) ? CONTINUE_A64_TYPE_FX00 :
139			   CONTINUE_A64_TYPE, &cont_pkt->entry_type);
140
141	return (cont_pkt);
142}
143
144inline int
145qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
146{
147	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
148	uint8_t	guard = scsi_host_get_guard(cmd->device->host);
149
150	/* We always use DIFF Bundling for best performance */
151	*fw_prot_opts = 0;
152
153	/* Translate SCSI opcode to a protection opcode */
154	switch (scsi_get_prot_op(cmd)) {
155	case SCSI_PROT_READ_STRIP:
156		*fw_prot_opts |= PO_MODE_DIF_REMOVE;
157		break;
158	case SCSI_PROT_WRITE_INSERT:
159		*fw_prot_opts |= PO_MODE_DIF_INSERT;
160		break;
161	case SCSI_PROT_READ_INSERT:
162		*fw_prot_opts |= PO_MODE_DIF_INSERT;
163		break;
164	case SCSI_PROT_WRITE_STRIP:
165		*fw_prot_opts |= PO_MODE_DIF_REMOVE;
166		break;
167	case SCSI_PROT_READ_PASS:
168	case SCSI_PROT_WRITE_PASS:
169		if (guard & SHOST_DIX_GUARD_IP)
170			*fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
171		else
172			*fw_prot_opts |= PO_MODE_DIF_PASS;
173		break;
174	default:	/* Normal Request */
175		*fw_prot_opts |= PO_MODE_DIF_PASS;
176		break;
177	}
178
179	return scsi_prot_sg_count(cmd);
180}
181
182/*
183 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
184 * capable IOCB types.
185 *
186 * @sp: SRB command to process
187 * @cmd_pkt: Command type 2 IOCB
188 * @tot_dsds: Total number of segments to transfer
189 */
190void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
191    uint16_t tot_dsds)
192{
193	uint16_t	avail_dsds;
194	struct dsd32	*cur_dsd;
195	scsi_qla_host_t	*vha;
196	struct scsi_cmnd *cmd;
197	struct scatterlist *sg;
198	int i;
199
200	cmd = GET_CMD_SP(sp);
201
202	/* Update entry type to indicate Command Type 2 IOCB */
203	put_unaligned_le32(COMMAND_TYPE, &cmd_pkt->entry_type);
204
205	/* No data transfer */
206	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
207		cmd_pkt->byte_count = cpu_to_le32(0);
208		return;
209	}
210
211	vha = sp->vha;
212	cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
213
214	/* Three DSDs are available in the Command Type 2 IOCB */
215	avail_dsds = ARRAY_SIZE(cmd_pkt->dsd32);
216	cur_dsd = cmd_pkt->dsd32;
217
218	/* Load data segments */
219	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
220		cont_entry_t *cont_pkt;
221
222		/* Allocate additional continuation packets? */
223		if (avail_dsds == 0) {
224			/*
225			 * Seven DSDs are available in the Continuation
226			 * Type 0 IOCB.
227			 */
228			cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
229			cur_dsd = cont_pkt->dsd;
230			avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
231		}
232
233		append_dsd32(&cur_dsd, sg);
234		avail_dsds--;
235	}
236}
237
238/**
239 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
240 * capable IOCB types.
241 *
242 * @sp: SRB command to process
243 * @cmd_pkt: Command type 3 IOCB
244 * @tot_dsds: Total number of segments to transfer
245 */
246void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
247    uint16_t tot_dsds)
248{
249	uint16_t	avail_dsds;
250	struct dsd64	*cur_dsd;
251	scsi_qla_host_t	*vha;
252	struct scsi_cmnd *cmd;
253	struct scatterlist *sg;
254	int i;
255
256	cmd = GET_CMD_SP(sp);
257
258	/* Update entry type to indicate Command Type 3 IOCB */
259	put_unaligned_le32(COMMAND_A64_TYPE, &cmd_pkt->entry_type);
260
261	/* No data transfer */
262	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
263		cmd_pkt->byte_count = cpu_to_le32(0);
264		return;
265	}
266
267	vha = sp->vha;
268	cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
269
270	/* Two DSDs are available in the Command Type 3 IOCB */
271	avail_dsds = ARRAY_SIZE(cmd_pkt->dsd64);
272	cur_dsd = cmd_pkt->dsd64;
273
274	/* Load data segments */
275	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
276		cont_a64_entry_t *cont_pkt;
277
278		/* Allocate additional continuation packets? */
279		if (avail_dsds == 0) {
280			/*
281			 * Five DSDs are available in the Continuation
282			 * Type 1 IOCB.
283			 */
284			cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
285			cur_dsd = cont_pkt->dsd;
286			avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
287		}
288
289		append_dsd64(&cur_dsd, sg);
290		avail_dsds--;
291	}
292}
293
294/*
295 * Find the first handle that is not in use, starting from
296 * req->current_outstanding_cmd + 1. The caller must hold the lock that is
297 * associated with @req.
298 */
299uint32_t qla2xxx_get_next_handle(struct req_que *req)
300{
301	uint32_t index, handle = req->current_outstanding_cmd;
302
303	for (index = 1; index < req->num_outstanding_cmds; index++) {
304		handle++;
305		if (handle == req->num_outstanding_cmds)
306			handle = 1;
307		if (!req->outstanding_cmds[handle])
308			return handle;
309	}
310
311	return 0;
312}
313
314/**
315 * qla2x00_start_scsi() - Send a SCSI command to the ISP
316 * @sp: command to send to the ISP
317 *
318 * Returns non-zero if a failure occurred, else zero.
319 */
320int
321qla2x00_start_scsi(srb_t *sp)
322{
323	int		nseg;
324	unsigned long   flags;
325	scsi_qla_host_t	*vha;
326	struct scsi_cmnd *cmd;
327	uint32_t	*clr_ptr;
328	uint32_t	handle;
329	cmd_entry_t	*cmd_pkt;
330	uint16_t	cnt;
331	uint16_t	req_cnt;
332	uint16_t	tot_dsds;
333	struct device_reg_2xxx __iomem *reg;
334	struct qla_hw_data *ha;
335	struct req_que *req;
336	struct rsp_que *rsp;
337
338	/* Setup device pointers. */
339	vha = sp->vha;
340	ha = vha->hw;
341	reg = &ha->iobase->isp;
342	cmd = GET_CMD_SP(sp);
343	req = ha->req_q_map[0];
344	rsp = ha->rsp_q_map[0];
345	/* So we know we haven't pci_map'ed anything yet */
346	tot_dsds = 0;
347
348	/* Send marker if required */
349	if (vha->marker_needed != 0) {
350		if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
351		    QLA_SUCCESS) {
352			return (QLA_FUNCTION_FAILED);
353		}
354		vha->marker_needed = 0;
355	}
356
357	/* Acquire ring specific lock */
358	spin_lock_irqsave(&ha->hardware_lock, flags);
359
360	handle = qla2xxx_get_next_handle(req);
361	if (handle == 0)
362		goto queuing_error;
363
364	/* Map the sg table so we have an accurate count of sg entries needed */
365	if (scsi_sg_count(cmd)) {
366		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
367		    scsi_sg_count(cmd), cmd->sc_data_direction);
368		if (unlikely(!nseg))
369			goto queuing_error;
370	} else
371		nseg = 0;
372
373	tot_dsds = nseg;
374
375	/* Calculate the number of request entries needed. */
376	req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
377	if (req->cnt < (req_cnt + 2)) {
378		cnt = rd_reg_word_relaxed(ISP_REQ_Q_OUT(ha, reg));
379		if (req->ring_index < cnt)
380			req->cnt = cnt - req->ring_index;
381		else
382			req->cnt = req->length -
383			    (req->ring_index - cnt);
384		/* If still no head room then bail out */
385		if (req->cnt < (req_cnt + 2))
386			goto queuing_error;
387	}
388
389	/* Build command packet */
390	req->current_outstanding_cmd = handle;
391	req->outstanding_cmds[handle] = sp;
392	sp->handle = handle;
393	cmd->host_scribble = (unsigned char *)(unsigned long)handle;
394	req->cnt -= req_cnt;
395
396	cmd_pkt = (cmd_entry_t *)req->ring_ptr;
397	cmd_pkt->handle = handle;
398	/* Zero out remaining portion of packet. */
399	clr_ptr = (uint32_t *)cmd_pkt + 2;
400	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
401	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
402
403	/* Set target ID and LUN number*/
404	SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
405	cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
406	cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG);
407
408	/* Load SCSI command packet. */
409	memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
410	cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
411
412	/* Build IOCB segments */
413	ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
414
415	/* Set total data segment count. */
416	cmd_pkt->entry_count = (uint8_t)req_cnt;
417	wmb();
418
419	/* Adjust ring index. */
420	req->ring_index++;
421	if (req->ring_index == req->length) {
422		req->ring_index = 0;
423		req->ring_ptr = req->ring;
424	} else
425		req->ring_ptr++;
426
427	sp->flags |= SRB_DMA_VALID;
428
429	/* Set chip new ring index. */
430	wrt_reg_word(ISP_REQ_Q_IN(ha, reg), req->ring_index);
431	rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, reg));	/* PCI Posting. */
432
433	/* Manage unprocessed RIO/ZIO commands in response queue. */
434	if (vha->flags.process_response_queue &&
435	    rsp->ring_ptr->signature != RESPONSE_PROCESSED)
436		qla2x00_process_response_queue(rsp);
437
438	spin_unlock_irqrestore(&ha->hardware_lock, flags);
439	return (QLA_SUCCESS);
440
441queuing_error:
442	if (tot_dsds)
443		scsi_dma_unmap(cmd);
444
445	spin_unlock_irqrestore(&ha->hardware_lock, flags);
446
447	return (QLA_FUNCTION_FAILED);
448}
449
450/**
451 * qla2x00_start_iocbs() - Execute the IOCB command
452 * @vha: HA context
453 * @req: request queue
454 */
455void
456qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
457{
458	struct qla_hw_data *ha = vha->hw;
459	device_reg_t *reg = ISP_QUE_REG(ha, req->id);
460
461	if (IS_P3P_TYPE(ha)) {
462		qla82xx_start_iocbs(vha);
463	} else {
464		/* Adjust ring index. */
465		req->ring_index++;
466		if (req->ring_index == req->length) {
467			req->ring_index = 0;
468			req->ring_ptr = req->ring;
469		} else
470			req->ring_ptr++;
471
472		/* Set chip new ring index. */
473		if (ha->mqenable || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
474			wrt_reg_dword(req->req_q_in, req->ring_index);
475		} else if (IS_QLA83XX(ha)) {
476			wrt_reg_dword(req->req_q_in, req->ring_index);
477			rd_reg_dword_relaxed(&ha->iobase->isp24.hccr);
478		} else if (IS_QLAFX00(ha)) {
479			wrt_reg_dword(&reg->ispfx00.req_q_in, req->ring_index);
480			rd_reg_dword_relaxed(&reg->ispfx00.req_q_in);
481			QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
482		} else if (IS_FWI2_CAPABLE(ha)) {
483			wrt_reg_dword(&reg->isp24.req_q_in, req->ring_index);
484			rd_reg_dword_relaxed(&reg->isp24.req_q_in);
485		} else {
486			wrt_reg_word(ISP_REQ_Q_IN(ha, &reg->isp),
487				req->ring_index);
488			rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, &reg->isp));
489		}
490	}
491}
492
493/**
494 * qla2x00_marker() - Send a marker IOCB to the firmware.
495 * @vha: HA context
496 * @qpair: queue pair pointer
497 * @loop_id: loop ID
498 * @lun: LUN
499 * @type: marker modifier
500 *
501 * Can be called from both normal and interrupt context.
502 *
503 * Returns non-zero if a failure occurred, else zero.
504 */
505static int
506__qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
507    uint16_t loop_id, uint64_t lun, uint8_t type)
508{
509	mrk_entry_t *mrk;
510	struct mrk_entry_24xx *mrk24 = NULL;
511	struct req_que *req = qpair->req;
512	struct qla_hw_data *ha = vha->hw;
513	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
514
515	mrk = (mrk_entry_t *)__qla2x00_alloc_iocbs(qpair, NULL);
516	if (mrk == NULL) {
517		ql_log(ql_log_warn, base_vha, 0x3026,
518		    "Failed to allocate Marker IOCB.\n");
519
520		return (QLA_FUNCTION_FAILED);
521	}
522
523	mrk->entry_type = MARKER_TYPE;
524	mrk->modifier = type;
525	if (type != MK_SYNC_ALL) {
526		if (IS_FWI2_CAPABLE(ha)) {
527			mrk24 = (struct mrk_entry_24xx *) mrk;
528			mrk24->nport_handle = cpu_to_le16(loop_id);
529			int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
530			host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
531			mrk24->vp_index = vha->vp_idx;
532			mrk24->handle = make_handle(req->id, mrk24->handle);
533		} else {
534			SET_TARGET_ID(ha, mrk->target, loop_id);
535			mrk->lun = cpu_to_le16((uint16_t)lun);
536		}
537	}
538	wmb();
539
540	qla2x00_start_iocbs(vha, req);
541
542	return (QLA_SUCCESS);
543}
544
545int
546qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
547    uint16_t loop_id, uint64_t lun, uint8_t type)
548{
549	int ret;
550	unsigned long flags = 0;
551
552	spin_lock_irqsave(qpair->qp_lock_ptr, flags);
553	ret = __qla2x00_marker(vha, qpair, loop_id, lun, type);
554	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
555
556	return (ret);
557}
558
559/*
560 * qla2x00_issue_marker
561 *
562 * Issue marker
563 * Caller CAN have hardware lock held as specified by ha_locked parameter.
564 * Might release it, then reaquire.
565 */
566int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
567{
568	if (ha_locked) {
569		if (__qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
570					MK_SYNC_ALL) != QLA_SUCCESS)
571			return QLA_FUNCTION_FAILED;
572	} else {
573		if (qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
574					MK_SYNC_ALL) != QLA_SUCCESS)
575			return QLA_FUNCTION_FAILED;
576	}
577	vha->marker_needed = 0;
578
579	return QLA_SUCCESS;
580}
581
582static inline int
583qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
584	uint16_t tot_dsds)
585{
586	struct dsd64 *cur_dsd = NULL, *next_dsd;
587	scsi_qla_host_t	*vha;
588	struct qla_hw_data *ha;
589	struct scsi_cmnd *cmd;
590	struct	scatterlist *cur_seg;
591	uint8_t avail_dsds;
592	uint8_t first_iocb = 1;
593	uint32_t dsd_list_len;
594	struct dsd_dma *dsd_ptr;
595	struct ct6_dsd *ctx;
596	struct qla_qpair *qpair = sp->qpair;
597
598	cmd = GET_CMD_SP(sp);
599
600	/* Update entry type to indicate Command Type 3 IOCB */
601	put_unaligned_le32(COMMAND_TYPE_6, &cmd_pkt->entry_type);
602
603	/* No data transfer */
604	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE ||
605	    tot_dsds == 0) {
606		cmd_pkt->byte_count = cpu_to_le32(0);
607		return 0;
608	}
609
610	vha = sp->vha;
611	ha = vha->hw;
612
613	/* Set transfer direction */
614	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
615		cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
616		qpair->counters.output_bytes += scsi_bufflen(cmd);
617		qpair->counters.output_requests++;
618	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
619		cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
620		qpair->counters.input_bytes += scsi_bufflen(cmd);
621		qpair->counters.input_requests++;
622	}
623
624	cur_seg = scsi_sglist(cmd);
625	ctx = sp->u.scmd.ct6_ctx;
626
627	while (tot_dsds) {
628		avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
629		    QLA_DSDS_PER_IOCB : tot_dsds;
630		tot_dsds -= avail_dsds;
631		dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
632
633		dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
634		    struct dsd_dma, list);
635		next_dsd = dsd_ptr->dsd_addr;
636		list_del(&dsd_ptr->list);
637		ha->gbl_dsd_avail--;
638		list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
639		ctx->dsd_use_cnt++;
640		ha->gbl_dsd_inuse++;
641
642		if (first_iocb) {
643			first_iocb = 0;
644			put_unaligned_le64(dsd_ptr->dsd_list_dma,
645					   &cmd_pkt->fcp_dsd.address);
646			cmd_pkt->fcp_dsd.length = cpu_to_le32(dsd_list_len);
647		} else {
648			put_unaligned_le64(dsd_ptr->dsd_list_dma,
649					   &cur_dsd->address);
650			cur_dsd->length = cpu_to_le32(dsd_list_len);
651			cur_dsd++;
652		}
653		cur_dsd = next_dsd;
654		while (avail_dsds) {
655			append_dsd64(&cur_dsd, cur_seg);
656			cur_seg = sg_next(cur_seg);
657			avail_dsds--;
658		}
659	}
660
661	/* Null termination */
662	cur_dsd->address = 0;
663	cur_dsd->length = 0;
664	cur_dsd++;
665	cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
666	return 0;
667}
668
669/*
670 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
671 * for Command Type 6.
672 *
673 * @dsds: number of data segment descriptors needed
674 *
675 * Returns the number of dsd list needed to store @dsds.
676 */
677static inline uint16_t
678qla24xx_calc_dsd_lists(uint16_t dsds)
679{
680	uint16_t dsd_lists = 0;
681
682	dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
683	if (dsds % QLA_DSDS_PER_IOCB)
684		dsd_lists++;
685	return dsd_lists;
686}
687
688
689/**
690 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
691 * IOCB types.
692 *
693 * @sp: SRB command to process
694 * @cmd_pkt: Command type 3 IOCB
695 * @tot_dsds: Total number of segments to transfer
696 * @req: pointer to request queue
697 */
698inline void
699qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
700	uint16_t tot_dsds, struct req_que *req)
701{
702	uint16_t	avail_dsds;
703	struct dsd64	*cur_dsd;
704	scsi_qla_host_t	*vha;
705	struct scsi_cmnd *cmd;
706	struct scatterlist *sg;
707	int i;
708	struct qla_qpair *qpair = sp->qpair;
709
710	cmd = GET_CMD_SP(sp);
711
712	/* Update entry type to indicate Command Type 3 IOCB */
713	put_unaligned_le32(COMMAND_TYPE_7, &cmd_pkt->entry_type);
714
715	/* No data transfer */
716	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
717		cmd_pkt->byte_count = cpu_to_le32(0);
718		return;
719	}
720
721	vha = sp->vha;
722
723	/* Set transfer direction */
724	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
725		cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
726		qpair->counters.output_bytes += scsi_bufflen(cmd);
727		qpair->counters.output_requests++;
728	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
729		cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
730		qpair->counters.input_bytes += scsi_bufflen(cmd);
731		qpair->counters.input_requests++;
732	}
733
734	/* One DSD is available in the Command Type 3 IOCB */
735	avail_dsds = 1;
736	cur_dsd = &cmd_pkt->dsd;
737
738	/* Load data segments */
739
740	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
741		cont_a64_entry_t *cont_pkt;
742
743		/* Allocate additional continuation packets? */
744		if (avail_dsds == 0) {
745			/*
746			 * Five DSDs are available in the Continuation
747			 * Type 1 IOCB.
748			 */
749			cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
750			cur_dsd = cont_pkt->dsd;
751			avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
752		}
753
754		append_dsd64(&cur_dsd, sg);
755		avail_dsds--;
756	}
757}
758
759struct fw_dif_context {
760	__le32	ref_tag;
761	__le16	app_tag;
762	uint8_t ref_tag_mask[4];	/* Validation/Replacement Mask*/
763	uint8_t app_tag_mask[2];	/* Validation/Replacement Mask*/
764};
765
766/*
767 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
768 *
769 */
770static inline void
771qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
772    unsigned int protcnt)
773{
774	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
775
776	switch (scsi_get_prot_type(cmd)) {
777	case SCSI_PROT_DIF_TYPE0:
778		/*
779		 * No check for ql2xenablehba_err_chk, as it would be an
780		 * I/O error if hba tag generation is not done.
781		 */
782		pkt->ref_tag = cpu_to_le32((uint32_t)
783		    (0xffffffff & scsi_get_lba(cmd)));
784
785		if (!qla2x00_hba_err_chk_enabled(sp))
786			break;
787
788		pkt->ref_tag_mask[0] = 0xff;
789		pkt->ref_tag_mask[1] = 0xff;
790		pkt->ref_tag_mask[2] = 0xff;
791		pkt->ref_tag_mask[3] = 0xff;
792		break;
793
794	/*
795	 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
796	 * match LBA in CDB + N
797	 */
798	case SCSI_PROT_DIF_TYPE2:
799		pkt->app_tag = cpu_to_le16(0);
800		pkt->app_tag_mask[0] = 0x0;
801		pkt->app_tag_mask[1] = 0x0;
802
803		pkt->ref_tag = cpu_to_le32((uint32_t)
804		    (0xffffffff & scsi_get_lba(cmd)));
805
806		if (!qla2x00_hba_err_chk_enabled(sp))
807			break;
808
809		/* enable ALL bytes of the ref tag */
810		pkt->ref_tag_mask[0] = 0xff;
811		pkt->ref_tag_mask[1] = 0xff;
812		pkt->ref_tag_mask[2] = 0xff;
813		pkt->ref_tag_mask[3] = 0xff;
814		break;
815
816	/* For Type 3 protection: 16 bit GUARD only */
817	case SCSI_PROT_DIF_TYPE3:
818		pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
819			pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
820								0x00;
821		break;
822
823	/*
824	 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
825	 * 16 bit app tag.
826	 */
827	case SCSI_PROT_DIF_TYPE1:
828		pkt->ref_tag = cpu_to_le32((uint32_t)
829		    (0xffffffff & scsi_get_lba(cmd)));
830		pkt->app_tag = cpu_to_le16(0);
831		pkt->app_tag_mask[0] = 0x0;
832		pkt->app_tag_mask[1] = 0x0;
833
834		if (!qla2x00_hba_err_chk_enabled(sp))
835			break;
836
837		/* enable ALL bytes of the ref tag */
838		pkt->ref_tag_mask[0] = 0xff;
839		pkt->ref_tag_mask[1] = 0xff;
840		pkt->ref_tag_mask[2] = 0xff;
841		pkt->ref_tag_mask[3] = 0xff;
842		break;
843	}
844}
845
846int
847qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
848	uint32_t *partial)
849{
850	struct scatterlist *sg;
851	uint32_t cumulative_partial, sg_len;
852	dma_addr_t sg_dma_addr;
853
854	if (sgx->num_bytes == sgx->tot_bytes)
855		return 0;
856
857	sg = sgx->cur_sg;
858	cumulative_partial = sgx->tot_partial;
859
860	sg_dma_addr = sg_dma_address(sg);
861	sg_len = sg_dma_len(sg);
862
863	sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
864
865	if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
866		sgx->dma_len = (blk_sz - cumulative_partial);
867		sgx->tot_partial = 0;
868		sgx->num_bytes += blk_sz;
869		*partial = 0;
870	} else {
871		sgx->dma_len = sg_len - sgx->bytes_consumed;
872		sgx->tot_partial += sgx->dma_len;
873		*partial = 1;
874	}
875
876	sgx->bytes_consumed += sgx->dma_len;
877
878	if (sg_len == sgx->bytes_consumed) {
879		sg = sg_next(sg);
880		sgx->num_sg++;
881		sgx->cur_sg = sg;
882		sgx->bytes_consumed = 0;
883	}
884
885	return 1;
886}
887
888int
889qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
890	struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
891{
892	void *next_dsd;
893	uint8_t avail_dsds = 0;
894	uint32_t dsd_list_len;
895	struct dsd_dma *dsd_ptr;
896	struct scatterlist *sg_prot;
897	struct dsd64 *cur_dsd = dsd;
898	uint16_t	used_dsds = tot_dsds;
899	uint32_t	prot_int; /* protection interval */
900	uint32_t	partial;
901	struct qla2_sgx sgx;
902	dma_addr_t	sle_dma;
903	uint32_t	sle_dma_len, tot_prot_dma_len = 0;
904	struct scsi_cmnd *cmd;
905
906	memset(&sgx, 0, sizeof(struct qla2_sgx));
907	if (sp) {
908		cmd = GET_CMD_SP(sp);
909		prot_int = cmd->device->sector_size;
910
911		sgx.tot_bytes = scsi_bufflen(cmd);
912		sgx.cur_sg = scsi_sglist(cmd);
913		sgx.sp = sp;
914
915		sg_prot = scsi_prot_sglist(cmd);
916	} else if (tc) {
917		prot_int      = tc->blk_sz;
918		sgx.tot_bytes = tc->bufflen;
919		sgx.cur_sg    = tc->sg;
920		sg_prot	      = tc->prot_sg;
921	} else {
922		BUG();
923		return 1;
924	}
925
926	while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
927
928		sle_dma = sgx.dma_addr;
929		sle_dma_len = sgx.dma_len;
930alloc_and_fill:
931		/* Allocate additional continuation packets? */
932		if (avail_dsds == 0) {
933			avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
934					QLA_DSDS_PER_IOCB : used_dsds;
935			dsd_list_len = (avail_dsds + 1) * 12;
936			used_dsds -= avail_dsds;
937
938			/* allocate tracking DS */
939			dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
940			if (!dsd_ptr)
941				return 1;
942
943			/* allocate new list */
944			dsd_ptr->dsd_addr = next_dsd =
945			    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
946				&dsd_ptr->dsd_list_dma);
947
948			if (!next_dsd) {
949				/*
950				 * Need to cleanup only this dsd_ptr, rest
951				 * will be done by sp_free_dma()
952				 */
953				kfree(dsd_ptr);
954				return 1;
955			}
956
957			if (sp) {
958				list_add_tail(&dsd_ptr->list,
959					      &sp->u.scmd.crc_ctx->dsd_list);
960
961				sp->flags |= SRB_CRC_CTX_DSD_VALID;
962			} else {
963				list_add_tail(&dsd_ptr->list,
964				    &(tc->ctx->dsd_list));
965				*tc->ctx_dsd_alloced = 1;
966			}
967
968
969			/* add new list to cmd iocb or last list */
970			put_unaligned_le64(dsd_ptr->dsd_list_dma,
971					   &cur_dsd->address);
972			cur_dsd->length = cpu_to_le32(dsd_list_len);
973			cur_dsd = next_dsd;
974		}
975		put_unaligned_le64(sle_dma, &cur_dsd->address);
976		cur_dsd->length = cpu_to_le32(sle_dma_len);
977		cur_dsd++;
978		avail_dsds--;
979
980		if (partial == 0) {
981			/* Got a full protection interval */
982			sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
983			sle_dma_len = 8;
984
985			tot_prot_dma_len += sle_dma_len;
986			if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
987				tot_prot_dma_len = 0;
988				sg_prot = sg_next(sg_prot);
989			}
990
991			partial = 1; /* So as to not re-enter this block */
992			goto alloc_and_fill;
993		}
994	}
995	/* Null termination */
996	cur_dsd->address = 0;
997	cur_dsd->length = 0;
998	cur_dsd++;
999	return 0;
1000}
1001
1002int
1003qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp,
1004	struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
1005{
1006	void *next_dsd;
1007	uint8_t avail_dsds = 0;
1008	uint32_t dsd_list_len;
1009	struct dsd_dma *dsd_ptr;
1010	struct scatterlist *sg, *sgl;
1011	struct dsd64 *cur_dsd = dsd;
1012	int	i;
1013	uint16_t	used_dsds = tot_dsds;
1014	struct scsi_cmnd *cmd;
1015
1016	if (sp) {
1017		cmd = GET_CMD_SP(sp);
1018		sgl = scsi_sglist(cmd);
1019	} else if (tc) {
1020		sgl = tc->sg;
1021	} else {
1022		BUG();
1023		return 1;
1024	}
1025
1026
1027	for_each_sg(sgl, sg, tot_dsds, i) {
1028		/* Allocate additional continuation packets? */
1029		if (avail_dsds == 0) {
1030			avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1031					QLA_DSDS_PER_IOCB : used_dsds;
1032			dsd_list_len = (avail_dsds + 1) * 12;
1033			used_dsds -= avail_dsds;
1034
1035			/* allocate tracking DS */
1036			dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1037			if (!dsd_ptr)
1038				return 1;
1039
1040			/* allocate new list */
1041			dsd_ptr->dsd_addr = next_dsd =
1042			    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1043				&dsd_ptr->dsd_list_dma);
1044
1045			if (!next_dsd) {
1046				/*
1047				 * Need to cleanup only this dsd_ptr, rest
1048				 * will be done by sp_free_dma()
1049				 */
1050				kfree(dsd_ptr);
1051				return 1;
1052			}
1053
1054			if (sp) {
1055				list_add_tail(&dsd_ptr->list,
1056					      &sp->u.scmd.crc_ctx->dsd_list);
1057
1058				sp->flags |= SRB_CRC_CTX_DSD_VALID;
1059			} else {
1060				list_add_tail(&dsd_ptr->list,
1061				    &(tc->ctx->dsd_list));
1062				*tc->ctx_dsd_alloced = 1;
1063			}
1064
1065			/* add new list to cmd iocb or last list */
1066			put_unaligned_le64(dsd_ptr->dsd_list_dma,
1067					   &cur_dsd->address);
1068			cur_dsd->length = cpu_to_le32(dsd_list_len);
1069			cur_dsd = next_dsd;
1070		}
1071		append_dsd64(&cur_dsd, sg);
1072		avail_dsds--;
1073
1074	}
1075	/* Null termination */
1076	cur_dsd->address = 0;
1077	cur_dsd->length = 0;
1078	cur_dsd++;
1079	return 0;
1080}
1081
1082int
1083qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1084	struct dsd64 *cur_dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
1085{
1086	struct dsd_dma *dsd_ptr = NULL, *dif_dsd, *nxt_dsd;
1087	struct scatterlist *sg, *sgl;
1088	struct crc_context *difctx = NULL;
1089	struct scsi_qla_host *vha;
1090	uint dsd_list_len;
1091	uint avail_dsds = 0;
1092	uint used_dsds = tot_dsds;
1093	bool dif_local_dma_alloc = false;
1094	bool direction_to_device = false;
1095	int i;
1096
1097	if (sp) {
1098		struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1099
1100		sgl = scsi_prot_sglist(cmd);
1101		vha = sp->vha;
1102		difctx = sp->u.scmd.crc_ctx;
1103		direction_to_device = cmd->sc_data_direction == DMA_TO_DEVICE;
1104		ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1105		  "%s: scsi_cmnd: %p, crc_ctx: %p, sp: %p\n",
1106			__func__, cmd, difctx, sp);
1107	} else if (tc) {
1108		vha = tc->vha;
1109		sgl = tc->prot_sg;
1110		difctx = tc->ctx;
1111		direction_to_device = tc->dma_data_direction == DMA_TO_DEVICE;
1112	} else {
1113		BUG();
1114		return 1;
1115	}
1116
1117	ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1118	    "%s: enter (write=%u)\n", __func__, direction_to_device);
1119
1120	/* if initiator doing write or target doing read */
1121	if (direction_to_device) {
1122		for_each_sg(sgl, sg, tot_dsds, i) {
1123			u64 sle_phys = sg_phys(sg);
1124
1125			/* If SGE addr + len flips bits in upper 32-bits */
1126			if (MSD(sle_phys + sg->length) ^ MSD(sle_phys)) {
1127				ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe022,
1128				    "%s: page boundary crossing (phys=%llx len=%x)\n",
1129				    __func__, sle_phys, sg->length);
1130
1131				if (difctx) {
1132					ha->dif_bundle_crossed_pages++;
1133					dif_local_dma_alloc = true;
1134				} else {
1135					ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
1136					    vha, 0xe022,
1137					    "%s: difctx pointer is NULL\n",
1138					    __func__);
1139				}
1140				break;
1141			}
1142		}
1143		ha->dif_bundle_writes++;
1144	} else {
1145		ha->dif_bundle_reads++;
1146	}
1147
1148	if (ql2xdifbundlinginternalbuffers)
1149		dif_local_dma_alloc = direction_to_device;
1150
1151	if (dif_local_dma_alloc) {
1152		u32 track_difbundl_buf = 0;
1153		u32 ldma_sg_len = 0;
1154		u8 ldma_needed = 1;
1155
1156		difctx->no_dif_bundl = 0;
1157		difctx->dif_bundl_len = 0;
1158
1159		/* Track DSD buffers */
1160		INIT_LIST_HEAD(&difctx->ldif_dsd_list);
1161		/* Track local DMA buffers */
1162		INIT_LIST_HEAD(&difctx->ldif_dma_hndl_list);
1163
1164		for_each_sg(sgl, sg, tot_dsds, i) {
1165			u32 sglen = sg_dma_len(sg);
1166
1167			ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe023,
1168			    "%s: sg[%x] (phys=%llx sglen=%x) ldma_sg_len: %x dif_bundl_len: %x ldma_needed: %x\n",
1169			    __func__, i, (u64)sg_phys(sg), sglen, ldma_sg_len,
1170			    difctx->dif_bundl_len, ldma_needed);
1171
1172			while (sglen) {
1173				u32 xfrlen = 0;
1174
1175				if (ldma_needed) {
1176					/*
1177					 * Allocate list item to store
1178					 * the DMA buffers
1179					 */
1180					dsd_ptr = kzalloc(sizeof(*dsd_ptr),
1181					    GFP_ATOMIC);
1182					if (!dsd_ptr) {
1183						ql_dbg(ql_dbg_tgt, vha, 0xe024,
1184						    "%s: failed alloc dsd_ptr\n",
1185						    __func__);
1186						return 1;
1187					}
1188					ha->dif_bundle_kallocs++;
1189
1190					/* allocate dma buffer */
1191					dsd_ptr->dsd_addr = dma_pool_alloc
1192						(ha->dif_bundl_pool, GFP_ATOMIC,
1193						 &dsd_ptr->dsd_list_dma);
1194					if (!dsd_ptr->dsd_addr) {
1195						ql_dbg(ql_dbg_tgt, vha, 0xe024,
1196						    "%s: failed alloc ->dsd_ptr\n",
1197						    __func__);
1198						/*
1199						 * need to cleanup only this
1200						 * dsd_ptr rest will be done
1201						 * by sp_free_dma()
1202						 */
1203						kfree(dsd_ptr);
1204						ha->dif_bundle_kallocs--;
1205						return 1;
1206					}
1207					ha->dif_bundle_dma_allocs++;
1208					ldma_needed = 0;
1209					difctx->no_dif_bundl++;
1210					list_add_tail(&dsd_ptr->list,
1211					    &difctx->ldif_dma_hndl_list);
1212				}
1213
1214				/* xfrlen is min of dma pool size and sglen */
1215				xfrlen = (sglen >
1216				   (DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len)) ?
1217				    DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len :
1218				    sglen;
1219
1220				/* replace with local allocated dma buffer */
1221				sg_pcopy_to_buffer(sgl, sg_nents(sgl),
1222				    dsd_ptr->dsd_addr + ldma_sg_len, xfrlen,
1223				    difctx->dif_bundl_len);
1224				difctx->dif_bundl_len += xfrlen;
1225				sglen -= xfrlen;
1226				ldma_sg_len += xfrlen;
1227				if (ldma_sg_len == DIF_BUNDLING_DMA_POOL_SIZE ||
1228				    sg_is_last(sg)) {
1229					ldma_needed = 1;
1230					ldma_sg_len = 0;
1231				}
1232			}
1233		}
1234
1235		track_difbundl_buf = used_dsds = difctx->no_dif_bundl;
1236		ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe025,
1237		    "dif_bundl_len=%x, no_dif_bundl=%x track_difbundl_buf: %x\n",
1238		    difctx->dif_bundl_len, difctx->no_dif_bundl,
1239		    track_difbundl_buf);
1240
1241		if (sp)
1242			sp->flags |= SRB_DIF_BUNDL_DMA_VALID;
1243		else
1244			tc->prot_flags = DIF_BUNDL_DMA_VALID;
1245
1246		list_for_each_entry_safe(dif_dsd, nxt_dsd,
1247		    &difctx->ldif_dma_hndl_list, list) {
1248			u32 sglen = (difctx->dif_bundl_len >
1249			    DIF_BUNDLING_DMA_POOL_SIZE) ?
1250			    DIF_BUNDLING_DMA_POOL_SIZE : difctx->dif_bundl_len;
1251
1252			BUG_ON(track_difbundl_buf == 0);
1253
1254			/* Allocate additional continuation packets? */
1255			if (avail_dsds == 0) {
1256				ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha,
1257				    0xe024,
1258				    "%s: adding continuation iocb's\n",
1259				    __func__);
1260				avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1261				    QLA_DSDS_PER_IOCB : used_dsds;
1262				dsd_list_len = (avail_dsds + 1) * 12;
1263				used_dsds -= avail_dsds;
1264
1265				/* allocate tracking DS */
1266				dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
1267				if (!dsd_ptr) {
1268					ql_dbg(ql_dbg_tgt, vha, 0xe026,
1269					    "%s: failed alloc dsd_ptr\n",
1270					    __func__);
1271					return 1;
1272				}
1273				ha->dif_bundle_kallocs++;
1274
1275				difctx->no_ldif_dsd++;
1276				/* allocate new list */
1277				dsd_ptr->dsd_addr =
1278				    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1279					&dsd_ptr->dsd_list_dma);
1280				if (!dsd_ptr->dsd_addr) {
1281					ql_dbg(ql_dbg_tgt, vha, 0xe026,
1282					    "%s: failed alloc ->dsd_addr\n",
1283					    __func__);
1284					/*
1285					 * need to cleanup only this dsd_ptr
1286					 *  rest will be done by sp_free_dma()
1287					 */
1288					kfree(dsd_ptr);
1289					ha->dif_bundle_kallocs--;
1290					return 1;
1291				}
1292				ha->dif_bundle_dma_allocs++;
1293
1294				if (sp) {
1295					list_add_tail(&dsd_ptr->list,
1296					    &difctx->ldif_dsd_list);
1297					sp->flags |= SRB_CRC_CTX_DSD_VALID;
1298				} else {
1299					list_add_tail(&dsd_ptr->list,
1300					    &difctx->ldif_dsd_list);
1301					tc->ctx_dsd_alloced = 1;
1302				}
1303
1304				/* add new list to cmd iocb or last list */
1305				put_unaligned_le64(dsd_ptr->dsd_list_dma,
1306						   &cur_dsd->address);
1307				cur_dsd->length = cpu_to_le32(dsd_list_len);
1308				cur_dsd = dsd_ptr->dsd_addr;
1309			}
1310			put_unaligned_le64(dif_dsd->dsd_list_dma,
1311					   &cur_dsd->address);
1312			cur_dsd->length = cpu_to_le32(sglen);
1313			cur_dsd++;
1314			avail_dsds--;
1315			difctx->dif_bundl_len -= sglen;
1316			track_difbundl_buf--;
1317		}
1318
1319		ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe026,
1320		    "%s: no_ldif_dsd:%x, no_dif_bundl:%x\n", __func__,
1321			difctx->no_ldif_dsd, difctx->no_dif_bundl);
1322	} else {
1323		for_each_sg(sgl, sg, tot_dsds, i) {
1324			/* Allocate additional continuation packets? */
1325			if (avail_dsds == 0) {
1326				avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1327				    QLA_DSDS_PER_IOCB : used_dsds;
1328				dsd_list_len = (avail_dsds + 1) * 12;
1329				used_dsds -= avail_dsds;
1330
1331				/* allocate tracking DS */
1332				dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
1333				if (!dsd_ptr) {
1334					ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
1335					    vha, 0xe027,
1336					    "%s: failed alloc dsd_dma...\n",
1337					    __func__);
1338					return 1;
1339				}
1340
1341				/* allocate new list */
1342				dsd_ptr->dsd_addr =
1343				    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1344					&dsd_ptr->dsd_list_dma);
1345				if (!dsd_ptr->dsd_addr) {
1346					/* need to cleanup only this dsd_ptr */
1347					/* rest will be done by sp_free_dma() */
1348					kfree(dsd_ptr);
1349					return 1;
1350				}
1351
1352				if (sp) {
1353					list_add_tail(&dsd_ptr->list,
1354					    &difctx->dsd_list);
1355					sp->flags |= SRB_CRC_CTX_DSD_VALID;
1356				} else {
1357					list_add_tail(&dsd_ptr->list,
1358					    &difctx->dsd_list);
1359					tc->ctx_dsd_alloced = 1;
1360				}
1361
1362				/* add new list to cmd iocb or last list */
1363				put_unaligned_le64(dsd_ptr->dsd_list_dma,
1364						   &cur_dsd->address);
1365				cur_dsd->length = cpu_to_le32(dsd_list_len);
1366				cur_dsd = dsd_ptr->dsd_addr;
1367			}
1368			append_dsd64(&cur_dsd, sg);
1369			avail_dsds--;
1370		}
1371	}
1372	/* Null termination */
1373	cur_dsd->address = 0;
1374	cur_dsd->length = 0;
1375	cur_dsd++;
1376	return 0;
1377}
1378
1379/**
1380 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1381 *							Type 6 IOCB types.
1382 *
1383 * @sp: SRB command to process
1384 * @cmd_pkt: Command type 3 IOCB
1385 * @tot_dsds: Total number of segments to transfer
1386 * @tot_prot_dsds: Total number of segments with protection information
1387 * @fw_prot_opts: Protection options to be passed to firmware
1388 */
1389static inline int
1390qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1391    uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1392{
1393	struct dsd64		*cur_dsd;
1394	__be32			*fcp_dl;
1395	scsi_qla_host_t		*vha;
1396	struct scsi_cmnd	*cmd;
1397	uint32_t		total_bytes = 0;
1398	uint32_t		data_bytes;
1399	uint32_t		dif_bytes;
1400	uint8_t			bundling = 1;
1401	uint16_t		blk_size;
1402	struct crc_context	*crc_ctx_pkt = NULL;
1403	struct qla_hw_data	*ha;
1404	uint8_t			additional_fcpcdb_len;
1405	uint16_t		fcp_cmnd_len;
1406	struct fcp_cmnd		*fcp_cmnd;
1407	dma_addr_t		crc_ctx_dma;
1408
1409	cmd = GET_CMD_SP(sp);
1410
1411	/* Update entry type to indicate Command Type CRC_2 IOCB */
1412	put_unaligned_le32(COMMAND_TYPE_CRC_2, &cmd_pkt->entry_type);
1413
1414	vha = sp->vha;
1415	ha = vha->hw;
1416
1417	/* No data transfer */
1418	data_bytes = scsi_bufflen(cmd);
1419	if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1420		cmd_pkt->byte_count = cpu_to_le32(0);
1421		return QLA_SUCCESS;
1422	}
1423
1424	cmd_pkt->vp_index = sp->vha->vp_idx;
1425
1426	/* Set transfer direction */
1427	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1428		cmd_pkt->control_flags =
1429		    cpu_to_le16(CF_WRITE_DATA);
1430	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1431		cmd_pkt->control_flags =
1432		    cpu_to_le16(CF_READ_DATA);
1433	}
1434
1435	if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1436	    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1437	    (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1438	    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1439		bundling = 0;
1440
1441	/* Allocate CRC context from global pool */
1442	crc_ctx_pkt = sp->u.scmd.crc_ctx =
1443	    dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1444
1445	if (!crc_ctx_pkt)
1446		goto crc_queuing_error;
1447
1448	crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1449
1450	sp->flags |= SRB_CRC_CTX_DMA_VALID;
1451
1452	/* Set handle */
1453	crc_ctx_pkt->handle = cmd_pkt->handle;
1454
1455	INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1456
1457	qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1458	    &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1459
1460	put_unaligned_le64(crc_ctx_dma, &cmd_pkt->crc_context_address);
1461	cmd_pkt->crc_context_len = cpu_to_le16(CRC_CONTEXT_LEN_FW);
1462
1463	/* Determine SCSI command length -- align to 4 byte boundary */
1464	if (cmd->cmd_len > 16) {
1465		additional_fcpcdb_len = cmd->cmd_len - 16;
1466		if ((cmd->cmd_len % 4) != 0) {
1467			/* SCSI cmd > 16 bytes must be multiple of 4 */
1468			goto crc_queuing_error;
1469		}
1470		fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1471	} else {
1472		additional_fcpcdb_len = 0;
1473		fcp_cmnd_len = 12 + 16 + 4;
1474	}
1475
1476	fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1477
1478	fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1479	if (cmd->sc_data_direction == DMA_TO_DEVICE)
1480		fcp_cmnd->additional_cdb_len |= 1;
1481	else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1482		fcp_cmnd->additional_cdb_len |= 2;
1483
1484	int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1485	memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1486	cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1487	put_unaligned_le64(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF,
1488			   &cmd_pkt->fcp_cmnd_dseg_address);
1489	fcp_cmnd->task_management = 0;
1490	fcp_cmnd->task_attribute = TSK_SIMPLE;
1491
1492	cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1493
1494	/* Compute dif len and adjust data len to incude protection */
1495	dif_bytes = 0;
1496	blk_size = cmd->device->sector_size;
1497	dif_bytes = (data_bytes / blk_size) * 8;
1498
1499	switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1500	case SCSI_PROT_READ_INSERT:
1501	case SCSI_PROT_WRITE_STRIP:
1502		total_bytes = data_bytes;
1503		data_bytes += dif_bytes;
1504		break;
1505
1506	case SCSI_PROT_READ_STRIP:
1507	case SCSI_PROT_WRITE_INSERT:
1508	case SCSI_PROT_READ_PASS:
1509	case SCSI_PROT_WRITE_PASS:
1510		total_bytes = data_bytes + dif_bytes;
1511		break;
1512	default:
1513		BUG();
1514	}
1515
1516	if (!qla2x00_hba_err_chk_enabled(sp))
1517		fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1518	/* HBA error checking enabled */
1519	else if (IS_PI_UNINIT_CAPABLE(ha)) {
1520		if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1521		    || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1522			SCSI_PROT_DIF_TYPE2))
1523			fw_prot_opts |= BIT_10;
1524		else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1525		    SCSI_PROT_DIF_TYPE3)
1526			fw_prot_opts |= BIT_11;
1527	}
1528
1529	if (!bundling) {
1530		cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0];
1531	} else {
1532		/*
1533		 * Configure Bundling if we need to fetch interlaving
1534		 * protection PCI accesses
1535		 */
1536		fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1537		crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1538		crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1539							tot_prot_dsds);
1540		cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0];
1541	}
1542
1543	/* Finish the common fields of CRC pkt */
1544	crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1545	crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1546	crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1547	crc_ctx_pkt->guard_seed = cpu_to_le16(0);
1548	/* Fibre channel byte count */
1549	cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1550	fcp_dl = (__be32 *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1551	    additional_fcpcdb_len);
1552	*fcp_dl = htonl(total_bytes);
1553
1554	if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1555		cmd_pkt->byte_count = cpu_to_le32(0);
1556		return QLA_SUCCESS;
1557	}
1558	/* Walks data segments */
1559
1560	cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1561
1562	if (!bundling && tot_prot_dsds) {
1563		if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1564			cur_dsd, tot_dsds, NULL))
1565			goto crc_queuing_error;
1566	} else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1567			(tot_dsds - tot_prot_dsds), NULL))
1568		goto crc_queuing_error;
1569
1570	if (bundling && tot_prot_dsds) {
1571		/* Walks dif segments */
1572		cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1573		cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd;
1574		if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1575				tot_prot_dsds, NULL))
1576			goto crc_queuing_error;
1577	}
1578	return QLA_SUCCESS;
1579
1580crc_queuing_error:
1581	/* Cleanup will be performed by the caller */
1582
1583	return QLA_FUNCTION_FAILED;
1584}
1585
1586/**
1587 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1588 * @sp: command to send to the ISP
1589 *
1590 * Returns non-zero if a failure occurred, else zero.
1591 */
1592int
1593qla24xx_start_scsi(srb_t *sp)
1594{
1595	int		nseg;
1596	unsigned long   flags;
1597	uint32_t	*clr_ptr;
1598	uint32_t	handle;
1599	struct cmd_type_7 *cmd_pkt;
1600	uint16_t	cnt;
1601	uint16_t	req_cnt;
1602	uint16_t	tot_dsds;
1603	struct req_que *req = NULL;
1604	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1605	struct scsi_qla_host *vha = sp->vha;
1606	struct qla_hw_data *ha = vha->hw;
1607
1608	/* Setup device pointers. */
1609	req = vha->req;
1610
1611	/* So we know we haven't pci_map'ed anything yet */
1612	tot_dsds = 0;
1613
1614	/* Send marker if required */
1615	if (vha->marker_needed != 0) {
1616		if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
1617		    QLA_SUCCESS)
1618			return QLA_FUNCTION_FAILED;
1619		vha->marker_needed = 0;
1620	}
1621
1622	/* Acquire ring specific lock */
1623	spin_lock_irqsave(&ha->hardware_lock, flags);
1624
1625	handle = qla2xxx_get_next_handle(req);
1626	if (handle == 0)
1627		goto queuing_error;
1628
1629	/* Map the sg table so we have an accurate count of sg entries needed */
1630	if (scsi_sg_count(cmd)) {
1631		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1632		    scsi_sg_count(cmd), cmd->sc_data_direction);
1633		if (unlikely(!nseg))
1634			goto queuing_error;
1635	} else
1636		nseg = 0;
1637
1638	tot_dsds = nseg;
1639	req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1640
1641	sp->iores.res_type = RESOURCE_INI;
1642	sp->iores.iocb_cnt = req_cnt;
1643	if (qla_get_iocbs(sp->qpair, &sp->iores))
1644		goto queuing_error;
1645
1646	if (req->cnt < (req_cnt + 2)) {
1647		if (IS_SHADOW_REG_CAPABLE(ha)) {
1648			cnt = *req->out_ptr;
1649		} else {
1650			cnt = rd_reg_dword_relaxed(req->req_q_out);
1651			if (qla2x00_check_reg16_for_disconnect(vha, cnt))
1652				goto queuing_error;
1653		}
1654
1655		if (req->ring_index < cnt)
1656			req->cnt = cnt - req->ring_index;
1657		else
1658			req->cnt = req->length -
1659				(req->ring_index - cnt);
1660		if (req->cnt < (req_cnt + 2))
1661			goto queuing_error;
1662	}
1663
1664	/* Build command packet. */
1665	req->current_outstanding_cmd = handle;
1666	req->outstanding_cmds[handle] = sp;
1667	sp->handle = handle;
1668	cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1669	req->cnt -= req_cnt;
1670
1671	cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1672	cmd_pkt->handle = make_handle(req->id, handle);
1673
1674	/* Zero out remaining portion of packet. */
1675	/*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1676	clr_ptr = (uint32_t *)cmd_pkt + 2;
1677	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1678	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1679
1680	/* Set NPORT-ID and LUN number*/
1681	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1682	cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1683	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1684	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1685	cmd_pkt->vp_index = sp->vha->vp_idx;
1686
1687	int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1688	host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1689
1690	cmd_pkt->task = TSK_SIMPLE;
1691
1692	/* Load SCSI command packet. */
1693	memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1694	host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1695
1696	cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1697
1698	/* Build IOCB segments */
1699	qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1700
1701	/* Set total data segment count. */
1702	cmd_pkt->entry_count = (uint8_t)req_cnt;
1703	wmb();
1704	/* Adjust ring index. */
1705	req->ring_index++;
1706	if (req->ring_index == req->length) {
1707		req->ring_index = 0;
1708		req->ring_ptr = req->ring;
1709	} else
1710		req->ring_ptr++;
1711
1712	sp->flags |= SRB_DMA_VALID;
1713
1714	/* Set chip new ring index. */
1715	wrt_reg_dword(req->req_q_in, req->ring_index);
1716
1717	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1718	return QLA_SUCCESS;
1719
1720queuing_error:
1721	if (tot_dsds)
1722		scsi_dma_unmap(cmd);
1723
1724	qla_put_iocbs(sp->qpair, &sp->iores);
1725	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1726
1727	return QLA_FUNCTION_FAILED;
1728}
1729
1730/**
1731 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1732 * @sp: command to send to the ISP
1733 *
1734 * Returns non-zero if a failure occurred, else zero.
1735 */
1736int
1737qla24xx_dif_start_scsi(srb_t *sp)
1738{
1739	int			nseg;
1740	unsigned long		flags;
1741	uint32_t		*clr_ptr;
1742	uint32_t		handle;
1743	uint16_t		cnt;
1744	uint16_t		req_cnt = 0;
1745	uint16_t		tot_dsds;
1746	uint16_t		tot_prot_dsds;
1747	uint16_t		fw_prot_opts = 0;
1748	struct req_que		*req = NULL;
1749	struct rsp_que		*rsp = NULL;
1750	struct scsi_cmnd	*cmd = GET_CMD_SP(sp);
1751	struct scsi_qla_host	*vha = sp->vha;
1752	struct qla_hw_data	*ha = vha->hw;
1753	struct cmd_type_crc_2	*cmd_pkt;
1754	uint32_t		status = 0;
1755
1756#define QDSS_GOT_Q_SPACE	BIT_0
1757
1758	/* Only process protection or >16 cdb in this routine */
1759	if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1760		if (cmd->cmd_len <= 16)
1761			return qla24xx_start_scsi(sp);
1762	}
1763
1764	/* Setup device pointers. */
1765	req = vha->req;
1766	rsp = req->rsp;
1767
1768	/* So we know we haven't pci_map'ed anything yet */
1769	tot_dsds = 0;
1770
1771	/* Send marker if required */
1772	if (vha->marker_needed != 0) {
1773		if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
1774		    QLA_SUCCESS)
1775			return QLA_FUNCTION_FAILED;
1776		vha->marker_needed = 0;
1777	}
1778
1779	/* Acquire ring specific lock */
1780	spin_lock_irqsave(&ha->hardware_lock, flags);
1781
1782	handle = qla2xxx_get_next_handle(req);
1783	if (handle == 0)
1784		goto queuing_error;
1785
1786	/* Compute number of required data segments */
1787	/* Map the sg table so we have an accurate count of sg entries needed */
1788	if (scsi_sg_count(cmd)) {
1789		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1790		    scsi_sg_count(cmd), cmd->sc_data_direction);
1791		if (unlikely(!nseg))
1792			goto queuing_error;
1793		else
1794			sp->flags |= SRB_DMA_VALID;
1795
1796		if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1797		    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1798			struct qla2_sgx sgx;
1799			uint32_t	partial;
1800
1801			memset(&sgx, 0, sizeof(struct qla2_sgx));
1802			sgx.tot_bytes = scsi_bufflen(cmd);
1803			sgx.cur_sg = scsi_sglist(cmd);
1804			sgx.sp = sp;
1805
1806			nseg = 0;
1807			while (qla24xx_get_one_block_sg(
1808			    cmd->device->sector_size, &sgx, &partial))
1809				nseg++;
1810		}
1811	} else
1812		nseg = 0;
1813
1814	/* number of required data segments */
1815	tot_dsds = nseg;
1816
1817	/* Compute number of required protection segments */
1818	if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1819		nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1820		    scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1821		if (unlikely(!nseg))
1822			goto queuing_error;
1823		else
1824			sp->flags |= SRB_CRC_PROT_DMA_VALID;
1825
1826		if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1827		    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1828			nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1829		}
1830	} else {
1831		nseg = 0;
1832	}
1833
1834	req_cnt = 1;
1835	/* Total Data and protection sg segment(s) */
1836	tot_prot_dsds = nseg;
1837	tot_dsds += nseg;
1838
1839	sp->iores.res_type = RESOURCE_INI;
1840	sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1841	if (qla_get_iocbs(sp->qpair, &sp->iores))
1842		goto queuing_error;
1843
1844	if (req->cnt < (req_cnt + 2)) {
1845		if (IS_SHADOW_REG_CAPABLE(ha)) {
1846			cnt = *req->out_ptr;
1847		} else {
1848			cnt = rd_reg_dword_relaxed(req->req_q_out);
1849			if (qla2x00_check_reg16_for_disconnect(vha, cnt))
1850				goto queuing_error;
1851		}
1852		if (req->ring_index < cnt)
1853			req->cnt = cnt - req->ring_index;
1854		else
1855			req->cnt = req->length -
1856				(req->ring_index - cnt);
1857		if (req->cnt < (req_cnt + 2))
1858			goto queuing_error;
1859	}
1860
1861	status |= QDSS_GOT_Q_SPACE;
1862
1863	/* Build header part of command packet (excluding the OPCODE). */
1864	req->current_outstanding_cmd = handle;
1865	req->outstanding_cmds[handle] = sp;
1866	sp->handle = handle;
1867	cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1868	req->cnt -= req_cnt;
1869
1870	/* Fill-in common area */
1871	cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1872	cmd_pkt->handle = make_handle(req->id, handle);
1873
1874	clr_ptr = (uint32_t *)cmd_pkt + 2;
1875	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1876
1877	/* Set NPORT-ID and LUN number*/
1878	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1879	cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1880	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1881	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1882
1883	int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1884	host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1885
1886	/* Total Data and protection segment(s) */
1887	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1888
1889	/* Build IOCB segments and adjust for data protection segments */
1890	if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1891	    req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1892		QLA_SUCCESS)
1893		goto queuing_error;
1894
1895	cmd_pkt->entry_count = (uint8_t)req_cnt;
1896	/* Specify response queue number where completion should happen */
1897	cmd_pkt->entry_status = (uint8_t) rsp->id;
1898	cmd_pkt->timeout = cpu_to_le16(0);
1899	wmb();
1900
1901	/* Adjust ring index. */
1902	req->ring_index++;
1903	if (req->ring_index == req->length) {
1904		req->ring_index = 0;
1905		req->ring_ptr = req->ring;
1906	} else
1907		req->ring_ptr++;
1908
1909	/* Set chip new ring index. */
1910	wrt_reg_dword(req->req_q_in, req->ring_index);
1911
1912	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1913
1914	return QLA_SUCCESS;
1915
1916queuing_error:
1917	if (status & QDSS_GOT_Q_SPACE) {
1918		req->outstanding_cmds[handle] = NULL;
1919		req->cnt += req_cnt;
1920	}
1921	/* Cleanup will be performed by the caller (queuecommand) */
1922
1923	qla_put_iocbs(sp->qpair, &sp->iores);
1924	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1925
1926	return QLA_FUNCTION_FAILED;
1927}
1928
1929/**
1930 * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
1931 * @sp: command to send to the ISP
1932 *
1933 * Returns non-zero if a failure occurred, else zero.
1934 */
1935static int
1936qla2xxx_start_scsi_mq(srb_t *sp)
1937{
1938	int		nseg;
1939	unsigned long   flags;
1940	uint32_t	*clr_ptr;
1941	uint32_t	handle;
1942	struct cmd_type_7 *cmd_pkt;
1943	uint16_t	cnt;
1944	uint16_t	req_cnt;
1945	uint16_t	tot_dsds;
1946	struct req_que *req = NULL;
1947	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1948	struct scsi_qla_host *vha = sp->fcport->vha;
1949	struct qla_hw_data *ha = vha->hw;
1950	struct qla_qpair *qpair = sp->qpair;
1951
1952	/* Acquire qpair specific lock */
1953	spin_lock_irqsave(&qpair->qp_lock, flags);
1954
1955	/* Setup qpair pointers */
1956	req = qpair->req;
1957
1958	/* So we know we haven't pci_map'ed anything yet */
1959	tot_dsds = 0;
1960
1961	/* Send marker if required */
1962	if (vha->marker_needed != 0) {
1963		if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
1964		    QLA_SUCCESS) {
1965			spin_unlock_irqrestore(&qpair->qp_lock, flags);
1966			return QLA_FUNCTION_FAILED;
1967		}
1968		vha->marker_needed = 0;
1969	}
1970
1971	handle = qla2xxx_get_next_handle(req);
1972	if (handle == 0)
1973		goto queuing_error;
1974
1975	/* Map the sg table so we have an accurate count of sg entries needed */
1976	if (scsi_sg_count(cmd)) {
1977		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1978		    scsi_sg_count(cmd), cmd->sc_data_direction);
1979		if (unlikely(!nseg))
1980			goto queuing_error;
1981	} else
1982		nseg = 0;
1983
1984	tot_dsds = nseg;
1985	req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1986
1987	sp->iores.res_type = RESOURCE_INI;
1988	sp->iores.iocb_cnt = req_cnt;
1989	if (qla_get_iocbs(sp->qpair, &sp->iores))
1990		goto queuing_error;
1991
1992	if (req->cnt < (req_cnt + 2)) {
1993		if (IS_SHADOW_REG_CAPABLE(ha)) {
1994			cnt = *req->out_ptr;
1995		} else {
1996			cnt = rd_reg_dword_relaxed(req->req_q_out);
1997			if (qla2x00_check_reg16_for_disconnect(vha, cnt))
1998				goto queuing_error;
1999		}
2000
2001		if (req->ring_index < cnt)
2002			req->cnt = cnt - req->ring_index;
2003		else
2004			req->cnt = req->length -
2005				(req->ring_index - cnt);
2006		if (req->cnt < (req_cnt + 2))
2007			goto queuing_error;
2008	}
2009
2010	/* Build command packet. */
2011	req->current_outstanding_cmd = handle;
2012	req->outstanding_cmds[handle] = sp;
2013	sp->handle = handle;
2014	cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2015	req->cnt -= req_cnt;
2016
2017	cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2018	cmd_pkt->handle = make_handle(req->id, handle);
2019
2020	/* Zero out remaining portion of packet. */
2021	/*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
2022	clr_ptr = (uint32_t *)cmd_pkt + 2;
2023	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2024	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2025
2026	/* Set NPORT-ID and LUN number*/
2027	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2028	cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2029	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2030	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2031	cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2032
2033	int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2034	host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2035
2036	cmd_pkt->task = TSK_SIMPLE;
2037
2038	/* Load SCSI command packet. */
2039	memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2040	host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2041
2042	cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2043
2044	/* Build IOCB segments */
2045	qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
2046
2047	/* Set total data segment count. */
2048	cmd_pkt->entry_count = (uint8_t)req_cnt;
2049	wmb();
2050	/* Adjust ring index. */
2051	req->ring_index++;
2052	if (req->ring_index == req->length) {
2053		req->ring_index = 0;
2054		req->ring_ptr = req->ring;
2055	} else
2056		req->ring_ptr++;
2057
2058	sp->flags |= SRB_DMA_VALID;
2059
2060	/* Set chip new ring index. */
2061	wrt_reg_dword(req->req_q_in, req->ring_index);
2062
2063	spin_unlock_irqrestore(&qpair->qp_lock, flags);
2064	return QLA_SUCCESS;
2065
2066queuing_error:
2067	if (tot_dsds)
2068		scsi_dma_unmap(cmd);
2069
2070	qla_put_iocbs(sp->qpair, &sp->iores);
2071	spin_unlock_irqrestore(&qpair->qp_lock, flags);
2072
2073	return QLA_FUNCTION_FAILED;
2074}
2075
2076
2077/**
2078 * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
2079 * @sp: command to send to the ISP
2080 *
2081 * Returns non-zero if a failure occurred, else zero.
2082 */
2083int
2084qla2xxx_dif_start_scsi_mq(srb_t *sp)
2085{
2086	int			nseg;
2087	unsigned long		flags;
2088	uint32_t		*clr_ptr;
2089	uint32_t		handle;
2090	uint16_t		cnt;
2091	uint16_t		req_cnt = 0;
2092	uint16_t		tot_dsds;
2093	uint16_t		tot_prot_dsds;
2094	uint16_t		fw_prot_opts = 0;
2095	struct req_que		*req = NULL;
2096	struct rsp_que		*rsp = NULL;
2097	struct scsi_cmnd	*cmd = GET_CMD_SP(sp);
2098	struct scsi_qla_host	*vha = sp->fcport->vha;
2099	struct qla_hw_data	*ha = vha->hw;
2100	struct cmd_type_crc_2	*cmd_pkt;
2101	uint32_t		status = 0;
2102	struct qla_qpair	*qpair = sp->qpair;
2103
2104#define QDSS_GOT_Q_SPACE	BIT_0
2105
2106	/* Check for host side state */
2107	if (!qpair->online) {
2108		cmd->result = DID_NO_CONNECT << 16;
2109		return QLA_INTERFACE_ERROR;
2110	}
2111
2112	if (!qpair->difdix_supported &&
2113		scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
2114		cmd->result = DID_NO_CONNECT << 16;
2115		return QLA_INTERFACE_ERROR;
2116	}
2117
2118	/* Only process protection or >16 cdb in this routine */
2119	if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
2120		if (cmd->cmd_len <= 16)
2121			return qla2xxx_start_scsi_mq(sp);
2122	}
2123
2124	spin_lock_irqsave(&qpair->qp_lock, flags);
2125
2126	/* Setup qpair pointers */
2127	rsp = qpair->rsp;
2128	req = qpair->req;
2129
2130	/* So we know we haven't pci_map'ed anything yet */
2131	tot_dsds = 0;
2132
2133	/* Send marker if required */
2134	if (vha->marker_needed != 0) {
2135		if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
2136		    QLA_SUCCESS) {
2137			spin_unlock_irqrestore(&qpair->qp_lock, flags);
2138			return QLA_FUNCTION_FAILED;
2139		}
2140		vha->marker_needed = 0;
2141	}
2142
2143	handle = qla2xxx_get_next_handle(req);
2144	if (handle == 0)
2145		goto queuing_error;
2146
2147	/* Compute number of required data segments */
2148	/* Map the sg table so we have an accurate count of sg entries needed */
2149	if (scsi_sg_count(cmd)) {
2150		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2151		    scsi_sg_count(cmd), cmd->sc_data_direction);
2152		if (unlikely(!nseg))
2153			goto queuing_error;
2154		else
2155			sp->flags |= SRB_DMA_VALID;
2156
2157		if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2158		    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2159			struct qla2_sgx sgx;
2160			uint32_t	partial;
2161
2162			memset(&sgx, 0, sizeof(struct qla2_sgx));
2163			sgx.tot_bytes = scsi_bufflen(cmd);
2164			sgx.cur_sg = scsi_sglist(cmd);
2165			sgx.sp = sp;
2166
2167			nseg = 0;
2168			while (qla24xx_get_one_block_sg(
2169			    cmd->device->sector_size, &sgx, &partial))
2170				nseg++;
2171		}
2172	} else
2173		nseg = 0;
2174
2175	/* number of required data segments */
2176	tot_dsds = nseg;
2177
2178	/* Compute number of required protection segments */
2179	if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
2180		nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
2181		    scsi_prot_sg_count(cmd), cmd->sc_data_direction);
2182		if (unlikely(!nseg))
2183			goto queuing_error;
2184		else
2185			sp->flags |= SRB_CRC_PROT_DMA_VALID;
2186
2187		if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2188		    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2189			nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
2190		}
2191	} else {
2192		nseg = 0;
2193	}
2194
2195	req_cnt = 1;
2196	/* Total Data and protection sg segment(s) */
2197	tot_prot_dsds = nseg;
2198	tot_dsds += nseg;
2199
2200	sp->iores.res_type = RESOURCE_INI;
2201	sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2202	if (qla_get_iocbs(sp->qpair, &sp->iores))
2203		goto queuing_error;
2204
2205	if (req->cnt < (req_cnt + 2)) {
2206		if (IS_SHADOW_REG_CAPABLE(ha)) {
2207			cnt = *req->out_ptr;
2208		} else {
2209			cnt = rd_reg_dword_relaxed(req->req_q_out);
2210			if (qla2x00_check_reg16_for_disconnect(vha, cnt))
2211				goto queuing_error;
2212		}
2213
2214		if (req->ring_index < cnt)
2215			req->cnt = cnt - req->ring_index;
2216		else
2217			req->cnt = req->length -
2218				(req->ring_index - cnt);
2219		if (req->cnt < (req_cnt + 2))
2220			goto queuing_error;
2221	}
2222
2223	status |= QDSS_GOT_Q_SPACE;
2224
2225	/* Build header part of command packet (excluding the OPCODE). */
2226	req->current_outstanding_cmd = handle;
2227	req->outstanding_cmds[handle] = sp;
2228	sp->handle = handle;
2229	cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2230	req->cnt -= req_cnt;
2231
2232	/* Fill-in common area */
2233	cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
2234	cmd_pkt->handle = make_handle(req->id, handle);
2235
2236	clr_ptr = (uint32_t *)cmd_pkt + 2;
2237	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2238
2239	/* Set NPORT-ID and LUN number*/
2240	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2241	cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2242	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2243	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2244
2245	int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2246	host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2247
2248	/* Total Data and protection segment(s) */
2249	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2250
2251	/* Build IOCB segments and adjust for data protection segments */
2252	if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
2253	    req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
2254		QLA_SUCCESS)
2255		goto queuing_error;
2256
2257	cmd_pkt->entry_count = (uint8_t)req_cnt;
2258	cmd_pkt->timeout = cpu_to_le16(0);
2259	wmb();
2260
2261	/* Adjust ring index. */
2262	req->ring_index++;
2263	if (req->ring_index == req->length) {
2264		req->ring_index = 0;
2265		req->ring_ptr = req->ring;
2266	} else
2267		req->ring_ptr++;
2268
2269	/* Set chip new ring index. */
2270	wrt_reg_dword(req->req_q_in, req->ring_index);
2271
2272	/* Manage unprocessed RIO/ZIO commands in response queue. */
2273	if (vha->flags.process_response_queue &&
2274	    rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2275		qla24xx_process_response_queue(vha, rsp);
2276
2277	spin_unlock_irqrestore(&qpair->qp_lock, flags);
2278
2279	return QLA_SUCCESS;
2280
2281queuing_error:
2282	if (status & QDSS_GOT_Q_SPACE) {
2283		req->outstanding_cmds[handle] = NULL;
2284		req->cnt += req_cnt;
2285	}
2286	/* Cleanup will be performed by the caller (queuecommand) */
2287
2288	qla_put_iocbs(sp->qpair, &sp->iores);
2289	spin_unlock_irqrestore(&qpair->qp_lock, flags);
2290
2291	return QLA_FUNCTION_FAILED;
2292}
2293
2294/* Generic Control-SRB manipulation functions. */
2295
2296/* hardware_lock assumed to be held. */
2297
2298void *
2299__qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
2300{
2301	scsi_qla_host_t *vha = qpair->vha;
2302	struct qla_hw_data *ha = vha->hw;
2303	struct req_que *req = qpair->req;
2304	device_reg_t *reg = ISP_QUE_REG(ha, req->id);
2305	uint32_t handle;
2306	request_t *pkt;
2307	uint16_t cnt, req_cnt;
2308
2309	pkt = NULL;
2310	req_cnt = 1;
2311	handle = 0;
2312
2313	if (sp && (sp->type != SRB_SCSI_CMD)) {
2314		/* Adjust entry-counts as needed. */
2315		req_cnt = sp->iocbs;
2316	}
2317
2318	/* Check for room on request queue. */
2319	if (req->cnt < req_cnt + 2) {
2320		if (qpair->use_shadow_reg)
2321			cnt = *req->out_ptr;
2322		else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
2323		    IS_QLA28XX(ha))
2324			cnt = rd_reg_dword(&reg->isp25mq.req_q_out);
2325		else if (IS_P3P_TYPE(ha))
2326			cnt = rd_reg_dword(reg->isp82.req_q_out);
2327		else if (IS_FWI2_CAPABLE(ha))
2328			cnt = rd_reg_dword(&reg->isp24.req_q_out);
2329		else if (IS_QLAFX00(ha))
2330			cnt = rd_reg_dword(&reg->ispfx00.req_q_out);
2331		else
2332			cnt = qla2x00_debounce_register(
2333			    ISP_REQ_Q_OUT(ha, &reg->isp));
2334
2335		if (!qpair->use_shadow_reg && cnt == ISP_REG16_DISCONNECT) {
2336			qla_schedule_eeh_work(vha);
2337			return NULL;
2338		}
2339
2340		if  (req->ring_index < cnt)
2341			req->cnt = cnt - req->ring_index;
2342		else
2343			req->cnt = req->length -
2344			    (req->ring_index - cnt);
2345	}
2346	if (req->cnt < req_cnt + 2)
2347		goto queuing_error;
2348
2349	if (sp) {
2350		handle = qla2xxx_get_next_handle(req);
2351		if (handle == 0) {
2352			ql_log(ql_log_warn, vha, 0x700b,
2353			    "No room on outstanding cmd array.\n");
2354			goto queuing_error;
2355		}
2356
2357		/* Prep command array. */
2358		req->current_outstanding_cmd = handle;
2359		req->outstanding_cmds[handle] = sp;
2360		sp->handle = handle;
2361	}
2362
2363	/* Prep packet */
2364	req->cnt -= req_cnt;
2365	pkt = req->ring_ptr;
2366	memset(pkt, 0, REQUEST_ENTRY_SIZE);
2367	if (IS_QLAFX00(ha)) {
2368		wrt_reg_byte((u8 __force __iomem *)&pkt->entry_count, req_cnt);
2369		wrt_reg_dword((__le32 __force __iomem *)&pkt->handle, handle);
2370	} else {
2371		pkt->entry_count = req_cnt;
2372		pkt->handle = handle;
2373	}
2374
2375	return pkt;
2376
2377queuing_error:
2378	qpair->tgt_counters.num_alloc_iocb_failed++;
2379	return pkt;
2380}
2381
2382void *
2383qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp)
2384{
2385	scsi_qla_host_t *vha = qpair->vha;
2386
2387	if (qla2x00_reset_active(vha))
2388		return NULL;
2389
2390	return __qla2x00_alloc_iocbs(qpair, sp);
2391}
2392
2393void *
2394qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp)
2395{
2396	return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp);
2397}
2398
2399static void
2400qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2401{
2402	struct srb_iocb *lio = &sp->u.iocb_cmd;
2403
2404	logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2405	logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2406	if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI) {
2407		logio->control_flags |= cpu_to_le16(LCF_NVME_PRLI);
2408		if (sp->vha->flags.nvme_first_burst)
2409			logio->io_parameter[0] =
2410				cpu_to_le32(NVME_PRLI_SP_FIRST_BURST);
2411		if (sp->vha->flags.nvme2_enabled) {
2412			/* Set service parameter BIT_8 for SLER support */
2413			logio->io_parameter[0] |=
2414				cpu_to_le32(NVME_PRLI_SP_SLER);
2415			/* Set service parameter BIT_9 for PI control support */
2416			logio->io_parameter[0] |=
2417				cpu_to_le32(NVME_PRLI_SP_PI_CTRL);
2418		}
2419	}
2420
2421	logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2422	logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2423	logio->port_id[1] = sp->fcport->d_id.b.area;
2424	logio->port_id[2] = sp->fcport->d_id.b.domain;
2425	logio->vp_index = sp->vha->vp_idx;
2426}
2427
2428static void
2429qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2430{
2431	struct srb_iocb *lio = &sp->u.iocb_cmd;
2432
2433	logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2434	logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2435
2436	if (lio->u.logio.flags & SRB_LOGIN_PRLI_ONLY) {
2437		logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2438	} else {
2439		logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2440		if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
2441			logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2442		if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
2443			logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2444	}
2445	logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2446	logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2447	logio->port_id[1] = sp->fcport->d_id.b.area;
2448	logio->port_id[2] = sp->fcport->d_id.b.domain;
2449	logio->vp_index = sp->vha->vp_idx;
2450}
2451
2452static void
2453qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
2454{
2455	struct qla_hw_data *ha = sp->vha->hw;
2456	struct srb_iocb *lio = &sp->u.iocb_cmd;
2457	uint16_t opts;
2458
2459	mbx->entry_type = MBX_IOCB_TYPE;
2460	SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2461	mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
2462	opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
2463	opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
2464	if (HAS_EXTENDED_IDS(ha)) {
2465		mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2466		mbx->mb10 = cpu_to_le16(opts);
2467	} else {
2468		mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
2469	}
2470	mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2471	mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2472	    sp->fcport->d_id.b.al_pa);
2473	mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2474}
2475
2476static void
2477qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2478{
2479	u16 control_flags = LCF_COMMAND_LOGO;
2480	logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2481
2482	if (sp->fcport->explicit_logout) {
2483		control_flags |= LCF_EXPL_LOGO|LCF_FREE_NPORT;
2484	} else {
2485		control_flags |= LCF_IMPL_LOGO;
2486
2487		if (!sp->fcport->keep_nport_handle)
2488			control_flags |= LCF_FREE_NPORT;
2489	}
2490
2491	logio->control_flags = cpu_to_le16(control_flags);
2492	logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2493	logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2494	logio->port_id[1] = sp->fcport->d_id.b.area;
2495	logio->port_id[2] = sp->fcport->d_id.b.domain;
2496	logio->vp_index = sp->vha->vp_idx;
2497}
2498
2499static void
2500qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
2501{
2502	struct qla_hw_data *ha = sp->vha->hw;
2503
2504	mbx->entry_type = MBX_IOCB_TYPE;
2505	SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2506	mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
2507	mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
2508	    cpu_to_le16(sp->fcport->loop_id) :
2509	    cpu_to_le16(sp->fcport->loop_id << 8);
2510	mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2511	mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2512	    sp->fcport->d_id.b.al_pa);
2513	mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2514	/* Implicit: mbx->mbx10 = 0. */
2515}
2516
2517static void
2518qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2519{
2520	logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2521	logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
2522	logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2523	logio->vp_index = sp->vha->vp_idx;
2524}
2525
2526static void
2527qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
2528{
2529	struct qla_hw_data *ha = sp->vha->hw;
2530
2531	mbx->entry_type = MBX_IOCB_TYPE;
2532	SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2533	mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
2534	if (HAS_EXTENDED_IDS(ha)) {
2535		mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2536		mbx->mb10 = cpu_to_le16(BIT_0);
2537	} else {
2538		mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
2539	}
2540	mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
2541	mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
2542	mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
2543	mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
2544	mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2545}
2546
2547static void
2548qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2549{
2550	uint32_t flags;
2551	uint64_t lun;
2552	struct fc_port *fcport = sp->fcport;
2553	scsi_qla_host_t *vha = fcport->vha;
2554	struct qla_hw_data *ha = vha->hw;
2555	struct srb_iocb *iocb = &sp->u.iocb_cmd;
2556	struct req_que *req = vha->req;
2557
2558	flags = iocb->u.tmf.flags;
2559	lun = iocb->u.tmf.lun;
2560
2561	tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2562	tsk->entry_count = 1;
2563	tsk->handle = make_handle(req->id, tsk->handle);
2564	tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2565	tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2566	tsk->control_flags = cpu_to_le32(flags);
2567	tsk->port_id[0] = fcport->d_id.b.al_pa;
2568	tsk->port_id[1] = fcport->d_id.b.area;
2569	tsk->port_id[2] = fcport->d_id.b.domain;
2570	tsk->vp_index = fcport->vha->vp_idx;
2571
2572	if (flags == TCF_LUN_RESET) {
2573		int_to_scsilun(lun, &tsk->lun);
2574		host_to_fcp_swap((uint8_t *)&tsk->lun,
2575			sizeof(tsk->lun));
2576	}
2577}
2578
2579void qla2x00_init_timer(srb_t *sp, unsigned long tmo)
2580{
2581	timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0);
2582	sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
2583	sp->free = qla2x00_sp_free;
2584	if (IS_QLAFX00(sp->vha->hw) && sp->type == SRB_FXIOCB_DCMD)
2585		init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
2586	sp->start_timer = 1;
2587}
2588
2589static void qla2x00_els_dcmd_sp_free(srb_t *sp)
2590{
2591	struct srb_iocb *elsio = &sp->u.iocb_cmd;
2592
2593	kfree(sp->fcport);
2594
2595	if (elsio->u.els_logo.els_logo_pyld)
2596		dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
2597		    elsio->u.els_logo.els_logo_pyld,
2598		    elsio->u.els_logo.els_logo_pyld_dma);
2599
2600	del_timer(&elsio->timer);
2601	qla2x00_rel_sp(sp);
2602}
2603
2604static void
2605qla2x00_els_dcmd_iocb_timeout(void *data)
2606{
2607	srb_t *sp = data;
2608	fc_port_t *fcport = sp->fcport;
2609	struct scsi_qla_host *vha = sp->vha;
2610	struct srb_iocb *lio = &sp->u.iocb_cmd;
2611	unsigned long flags = 0;
2612	int res, h;
2613
2614	ql_dbg(ql_dbg_io, vha, 0x3069,
2615	    "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
2616	    sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
2617	    fcport->d_id.b.al_pa);
2618
2619	/* Abort the exchange */
2620	res = qla24xx_async_abort_cmd(sp, false);
2621	if (res) {
2622		ql_dbg(ql_dbg_io, vha, 0x3070,
2623		    "mbx abort_command failed.\n");
2624		spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
2625		for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
2626			if (sp->qpair->req->outstanding_cmds[h] == sp) {
2627				sp->qpair->req->outstanding_cmds[h] = NULL;
2628				break;
2629			}
2630		}
2631		spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
2632		complete(&lio->u.els_logo.comp);
2633	} else {
2634		ql_dbg(ql_dbg_io, vha, 0x3071,
2635		    "mbx abort_command success.\n");
2636	}
2637}
2638
2639static void qla2x00_els_dcmd_sp_done(srb_t *sp, int res)
2640{
2641	fc_port_t *fcport = sp->fcport;
2642	struct srb_iocb *lio = &sp->u.iocb_cmd;
2643	struct scsi_qla_host *vha = sp->vha;
2644
2645	ql_dbg(ql_dbg_io, vha, 0x3072,
2646	    "%s hdl=%x, portid=%02x%02x%02x done\n",
2647	    sp->name, sp->handle, fcport->d_id.b.domain,
2648	    fcport->d_id.b.area, fcport->d_id.b.al_pa);
2649
2650	complete(&lio->u.els_logo.comp);
2651}
2652
2653int
2654qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
2655    port_id_t remote_did)
2656{
2657	srb_t *sp;
2658	fc_port_t *fcport = NULL;
2659	struct srb_iocb *elsio = NULL;
2660	struct qla_hw_data *ha = vha->hw;
2661	struct els_logo_payload logo_pyld;
2662	int rval = QLA_SUCCESS;
2663
2664	fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2665	if (!fcport) {
2666	       ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n");
2667	       return -ENOMEM;
2668	}
2669
2670	/* Alloc SRB structure */
2671	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2672	if (!sp) {
2673		kfree(fcport);
2674		ql_log(ql_log_info, vha, 0x70e6,
2675		 "SRB allocation failed\n");
2676		return -ENOMEM;
2677	}
2678
2679	elsio = &sp->u.iocb_cmd;
2680	fcport->loop_id = 0xFFFF;
2681	fcport->d_id.b.domain = remote_did.b.domain;
2682	fcport->d_id.b.area = remote_did.b.area;
2683	fcport->d_id.b.al_pa = remote_did.b.al_pa;
2684
2685	ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n",
2686	    fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
2687
2688	sp->type = SRB_ELS_DCMD;
2689	sp->name = "ELS_DCMD";
2690	sp->fcport = fcport;
2691	elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
2692	qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
2693	init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
2694	sp->done = qla2x00_els_dcmd_sp_done;
2695	sp->free = qla2x00_els_dcmd_sp_free;
2696
2697	elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
2698			    DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
2699			    GFP_KERNEL);
2700
2701	if (!elsio->u.els_logo.els_logo_pyld) {
2702		sp->free(sp);
2703		return QLA_FUNCTION_FAILED;
2704	}
2705
2706	memset(&logo_pyld, 0, sizeof(struct els_logo_payload));
2707
2708	elsio->u.els_logo.els_cmd = els_opcode;
2709	logo_pyld.opcode = els_opcode;
2710	logo_pyld.s_id[0] = vha->d_id.b.al_pa;
2711	logo_pyld.s_id[1] = vha->d_id.b.area;
2712	logo_pyld.s_id[2] = vha->d_id.b.domain;
2713	host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t));
2714	memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE);
2715
2716	memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
2717	    sizeof(struct els_logo_payload));
2718	ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3075, "LOGO buffer:");
2719	ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x010a,
2720		       elsio->u.els_logo.els_logo_pyld,
2721		       sizeof(*elsio->u.els_logo.els_logo_pyld));
2722
2723	rval = qla2x00_start_sp(sp);
2724	if (rval != QLA_SUCCESS) {
2725		sp->free(sp);
2726		return QLA_FUNCTION_FAILED;
2727	}
2728
2729	ql_dbg(ql_dbg_io, vha, 0x3074,
2730	    "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
2731	    sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain,
2732	    fcport->d_id.b.area, fcport->d_id.b.al_pa);
2733
2734	wait_for_completion(&elsio->u.els_logo.comp);
2735
2736	sp->free(sp);
2737	return rval;
2738}
2739
2740static void
2741qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2742{
2743	scsi_qla_host_t *vha = sp->vha;
2744	struct srb_iocb *elsio = &sp->u.iocb_cmd;
2745
2746	els_iocb->entry_type = ELS_IOCB_TYPE;
2747	els_iocb->entry_count = 1;
2748	els_iocb->sys_define = 0;
2749	els_iocb->entry_status = 0;
2750	els_iocb->handle = sp->handle;
2751	els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2752	els_iocb->tx_dsd_count = cpu_to_le16(1);
2753	els_iocb->vp_index = vha->vp_idx;
2754	els_iocb->sof_type = EST_SOFI3;
2755	els_iocb->rx_dsd_count = 0;
2756	els_iocb->opcode = elsio->u.els_logo.els_cmd;
2757
2758	els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa;
2759	els_iocb->d_id[1] = sp->fcport->d_id.b.area;
2760	els_iocb->d_id[2] = sp->fcport->d_id.b.domain;
2761	/* For SID the byte order is different than DID */
2762	els_iocb->s_id[1] = vha->d_id.b.al_pa;
2763	els_iocb->s_id[2] = vha->d_id.b.area;
2764	els_iocb->s_id[0] = vha->d_id.b.domain;
2765
2766	if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
2767		els_iocb->control_flags = 0;
2768		els_iocb->tx_byte_count = els_iocb->tx_len =
2769			cpu_to_le32(sizeof(struct els_plogi_payload));
2770		put_unaligned_le64(elsio->u.els_plogi.els_plogi_pyld_dma,
2771				   &els_iocb->tx_address);
2772		els_iocb->rx_dsd_count = cpu_to_le16(1);
2773		els_iocb->rx_byte_count = els_iocb->rx_len =
2774			cpu_to_le32(sizeof(struct els_plogi_payload));
2775		put_unaligned_le64(elsio->u.els_plogi.els_resp_pyld_dma,
2776				   &els_iocb->rx_address);
2777
2778		ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073,
2779		    "PLOGI ELS IOCB:\n");
2780		ql_dump_buffer(ql_log_info, vha, 0x0109,
2781		    (uint8_t *)els_iocb,
2782		    sizeof(*els_iocb));
2783	} else {
2784		els_iocb->control_flags = cpu_to_le16(1 << 13);
2785		els_iocb->tx_byte_count =
2786			cpu_to_le32(sizeof(struct els_logo_payload));
2787		put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma,
2788				   &els_iocb->tx_address);
2789		els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
2790
2791		els_iocb->rx_byte_count = 0;
2792		els_iocb->rx_address = 0;
2793		els_iocb->rx_len = 0;
2794		ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3076,
2795		       "LOGO ELS IOCB:");
2796		ql_dump_buffer(ql_log_info, vha, 0x010b,
2797			       els_iocb,
2798			       sizeof(*els_iocb));
2799	}
2800
2801	sp->vha->qla_stats.control_requests++;
2802}
2803
2804static void
2805qla2x00_els_dcmd2_iocb_timeout(void *data)
2806{
2807	srb_t *sp = data;
2808	fc_port_t *fcport = sp->fcport;
2809	struct scsi_qla_host *vha = sp->vha;
2810	unsigned long flags = 0;
2811	int res, h;
2812
2813	ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069,
2814	    "%s hdl=%x ELS Timeout, %8phC portid=%06x\n",
2815	    sp->name, sp->handle, fcport->port_name, fcport->d_id.b24);
2816
2817	/* Abort the exchange */
2818	res = qla24xx_async_abort_cmd(sp, false);
2819	ql_dbg(ql_dbg_io, vha, 0x3070,
2820	    "mbx abort_command %s\n",
2821	    (res == QLA_SUCCESS) ? "successful" : "failed");
2822	if (res) {
2823		spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
2824		for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
2825			if (sp->qpair->req->outstanding_cmds[h] == sp) {
2826				sp->qpair->req->outstanding_cmds[h] = NULL;
2827				break;
2828			}
2829		}
2830		spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
2831		sp->done(sp, QLA_FUNCTION_TIMEOUT);
2832	}
2833}
2834
2835void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha, struct els_plogi *els_plogi)
2836{
2837	if (els_plogi->els_plogi_pyld)
2838		dma_free_coherent(&vha->hw->pdev->dev,
2839				  els_plogi->tx_size,
2840				  els_plogi->els_plogi_pyld,
2841				  els_plogi->els_plogi_pyld_dma);
2842
2843	if (els_plogi->els_resp_pyld)
2844		dma_free_coherent(&vha->hw->pdev->dev,
2845				  els_plogi->rx_size,
2846				  els_plogi->els_resp_pyld,
2847				  els_plogi->els_resp_pyld_dma);
2848}
2849
2850static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
2851{
2852	fc_port_t *fcport = sp->fcport;
2853	struct srb_iocb *lio = &sp->u.iocb_cmd;
2854	struct scsi_qla_host *vha = sp->vha;
2855	struct event_arg ea;
2856	struct qla_work_evt *e;
2857	struct fc_port *conflict_fcport;
2858	port_id_t cid;	/* conflict Nport id */
2859	const __le32 *fw_status = sp->u.iocb_cmd.u.els_plogi.fw_status;
2860	u16 lid;
2861
2862	ql_dbg(ql_dbg_disc, vha, 0x3072,
2863	    "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n",
2864	    sp->name, res, sp->handle, fcport->d_id.b24, fcport->port_name);
2865
2866	fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
2867	del_timer(&sp->u.iocb_cmd.timer);
2868
2869	if (sp->flags & SRB_WAKEUP_ON_COMP)
2870		complete(&lio->u.els_plogi.comp);
2871	else {
2872		switch (le32_to_cpu(fw_status[0])) {
2873		case CS_DATA_UNDERRUN:
2874		case CS_COMPLETE:
2875			memset(&ea, 0, sizeof(ea));
2876			ea.fcport = fcport;
2877			ea.rc = res;
2878			qla_handle_els_plogi_done(vha, &ea);
2879			break;
2880
2881		case CS_IOCB_ERROR:
2882			switch (le32_to_cpu(fw_status[1])) {
2883			case LSC_SCODE_PORTID_USED:
2884				lid = le32_to_cpu(fw_status[2]) & 0xffff;
2885				qlt_find_sess_invalidate_other(vha,
2886				    wwn_to_u64(fcport->port_name),
2887				    fcport->d_id, lid, &conflict_fcport);
2888				if (conflict_fcport) {
2889					/*
2890					 * Another fcport shares the same
2891					 * loop_id & nport id; conflict
2892					 * fcport needs to finish cleanup
2893					 * before this fcport can proceed
2894					 * to login.
2895					 */
2896					conflict_fcport->conflict = fcport;
2897					fcport->login_pause = 1;
2898					ql_dbg(ql_dbg_disc, vha, 0x20ed,
2899					    "%s %d %8phC pid %06x inuse with lid %#x post gidpn\n",
2900					    __func__, __LINE__,
2901					    fcport->port_name,
2902					    fcport->d_id.b24, lid);
2903				} else {
2904					ql_dbg(ql_dbg_disc, vha, 0x20ed,
2905					    "%s %d %8phC pid %06x inuse with lid %#x sched del\n",
2906					    __func__, __LINE__,
2907					    fcport->port_name,
2908					    fcport->d_id.b24, lid);
2909					qla2x00_clear_loop_id(fcport);
2910					set_bit(lid, vha->hw->loop_id_map);
2911					fcport->loop_id = lid;
2912					fcport->keep_nport_handle = 0;
2913					qlt_schedule_sess_for_deletion(fcport);
2914				}
2915				break;
2916
2917			case LSC_SCODE_NPORT_USED:
2918				cid.b.domain = (le32_to_cpu(fw_status[2]) >> 16)
2919					& 0xff;
2920				cid.b.area   = (le32_to_cpu(fw_status[2]) >>  8)
2921					& 0xff;
2922				cid.b.al_pa  = le32_to_cpu(fw_status[2]) & 0xff;
2923				cid.b.rsvd_1 = 0;
2924
2925				ql_dbg(ql_dbg_disc, vha, 0x20ec,
2926				    "%s %d %8phC lid %#x in use with pid %06x post gnl\n",
2927				    __func__, __LINE__, fcport->port_name,
2928				    fcport->loop_id, cid.b24);
2929				set_bit(fcport->loop_id,
2930				    vha->hw->loop_id_map);
2931				fcport->loop_id = FC_NO_LOOP_ID;
2932				qla24xx_post_gnl_work(vha, fcport);
2933				break;
2934
2935			case LSC_SCODE_NOXCB:
2936				vha->hw->exch_starvation++;
2937				if (vha->hw->exch_starvation > 5) {
2938					ql_log(ql_log_warn, vha, 0xd046,
2939					    "Exchange starvation. Resetting RISC\n");
2940					vha->hw->exch_starvation = 0;
2941					set_bit(ISP_ABORT_NEEDED,
2942					    &vha->dpc_flags);
2943					qla2xxx_wake_dpc(vha);
2944					break;
2945				}
2946				fallthrough;
2947			default:
2948				ql_dbg(ql_dbg_disc, vha, 0x20eb,
2949				    "%s %8phC cmd error fw_status 0x%x 0x%x 0x%x\n",
2950				    __func__, sp->fcport->port_name,
2951				    fw_status[0], fw_status[1], fw_status[2]);
2952
2953				fcport->flags &= ~FCF_ASYNC_SENT;
2954				qlt_schedule_sess_for_deletion(fcport);
2955				break;
2956			}
2957			break;
2958
2959		default:
2960			ql_dbg(ql_dbg_disc, vha, 0x20eb,
2961			    "%s %8phC cmd error 2 fw_status 0x%x 0x%x 0x%x\n",
2962			    __func__, sp->fcport->port_name,
2963			    fw_status[0], fw_status[1], fw_status[2]);
2964
2965			sp->fcport->flags &= ~FCF_ASYNC_SENT;
2966			qlt_schedule_sess_for_deletion(fcport);
2967			break;
2968		}
2969
2970		e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
2971		if (!e) {
2972			struct srb_iocb *elsio = &sp->u.iocb_cmd;
2973
2974			qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
2975			sp->free(sp);
2976			return;
2977		}
2978		e->u.iosb.sp = sp;
2979		qla2x00_post_work(vha, e);
2980	}
2981}
2982
2983int
2984qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
2985    fc_port_t *fcport, bool wait)
2986{
2987	srb_t *sp;
2988	struct srb_iocb *elsio = NULL;
2989	struct qla_hw_data *ha = vha->hw;
2990	int rval = QLA_SUCCESS;
2991	void	*ptr, *resp_ptr;
2992
2993	/* Alloc SRB structure */
2994	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2995	if (!sp) {
2996		ql_log(ql_log_info, vha, 0x70e6,
2997		 "SRB allocation failed\n");
2998		fcport->flags &= ~FCF_ASYNC_ACTIVE;
2999		return -ENOMEM;
3000	}
3001
3002	fcport->flags |= FCF_ASYNC_SENT;
3003	qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND);
3004	elsio = &sp->u.iocb_cmd;
3005	ql_dbg(ql_dbg_io, vha, 0x3073,
3006	    "Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
3007
3008	sp->type = SRB_ELS_DCMD;
3009	sp->name = "ELS_DCMD";
3010	sp->fcport = fcport;
3011
3012	elsio->timeout = qla2x00_els_dcmd2_iocb_timeout;
3013	if (wait)
3014		sp->flags = SRB_WAKEUP_ON_COMP;
3015
3016	qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT + 2);
3017
3018	sp->done = qla2x00_els_dcmd2_sp_done;
3019	elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE;
3020
3021	ptr = elsio->u.els_plogi.els_plogi_pyld =
3022	    dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.tx_size,
3023		&elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL);
3024
3025	if (!elsio->u.els_plogi.els_plogi_pyld) {
3026		rval = QLA_FUNCTION_FAILED;
3027		goto out;
3028	}
3029
3030	resp_ptr = elsio->u.els_plogi.els_resp_pyld =
3031	    dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.rx_size,
3032		&elsio->u.els_plogi.els_resp_pyld_dma, GFP_KERNEL);
3033
3034	if (!elsio->u.els_plogi.els_resp_pyld) {
3035		rval = QLA_FUNCTION_FAILED;
3036		goto out;
3037	}
3038
3039	ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %p %p\n", ptr, resp_ptr);
3040
3041	memset(ptr, 0, sizeof(struct els_plogi_payload));
3042	memset(resp_ptr, 0, sizeof(struct els_plogi_payload));
3043	memcpy(elsio->u.els_plogi.els_plogi_pyld->data,
3044	    &ha->plogi_els_payld.fl_csp, LOGIN_TEMPLATE_SIZE);
3045
3046	elsio->u.els_plogi.els_cmd = els_opcode;
3047	elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode;
3048
3049	ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
3050	ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109,
3051	    (uint8_t *)elsio->u.els_plogi.els_plogi_pyld,
3052	    sizeof(*elsio->u.els_plogi.els_plogi_pyld));
3053
3054	init_completion(&elsio->u.els_plogi.comp);
3055	rval = qla2x00_start_sp(sp);
3056	if (rval != QLA_SUCCESS) {
3057		rval = QLA_FUNCTION_FAILED;
3058	} else {
3059		ql_dbg(ql_dbg_disc, vha, 0x3074,
3060		    "%s PLOGI sent, hdl=%x, loopid=%x, to port_id %06x from port_id %06x\n",
3061		    sp->name, sp->handle, fcport->loop_id,
3062		    fcport->d_id.b24, vha->d_id.b24);
3063	}
3064
3065	if (wait) {
3066		wait_for_completion(&elsio->u.els_plogi.comp);
3067
3068		if (elsio->u.els_plogi.comp_status != CS_COMPLETE)
3069			rval = QLA_FUNCTION_FAILED;
3070	} else {
3071		goto done;
3072	}
3073
3074out:
3075	fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
3076	qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
3077	sp->free(sp);
3078done:
3079	return rval;
3080}
3081
3082static void
3083qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
3084{
3085	struct bsg_job *bsg_job = sp->u.bsg_job;
3086	struct fc_bsg_request *bsg_request = bsg_job->request;
3087
3088        els_iocb->entry_type = ELS_IOCB_TYPE;
3089        els_iocb->entry_count = 1;
3090        els_iocb->sys_define = 0;
3091        els_iocb->entry_status = 0;
3092        els_iocb->handle = sp->handle;
3093	els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3094	els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3095	els_iocb->vp_index = sp->vha->vp_idx;
3096        els_iocb->sof_type = EST_SOFI3;
3097	els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3098
3099	els_iocb->opcode =
3100	    sp->type == SRB_ELS_CMD_RPT ?
3101	    bsg_request->rqst_data.r_els.els_code :
3102	    bsg_request->rqst_data.h_els.command_code;
3103	els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa;
3104	els_iocb->d_id[1] = sp->fcport->d_id.b.area;
3105	els_iocb->d_id[2] = sp->fcport->d_id.b.domain;
3106        els_iocb->control_flags = 0;
3107        els_iocb->rx_byte_count =
3108            cpu_to_le32(bsg_job->reply_payload.payload_len);
3109        els_iocb->tx_byte_count =
3110            cpu_to_le32(bsg_job->request_payload.payload_len);
3111
3112	put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
3113			   &els_iocb->tx_address);
3114        els_iocb->tx_len = cpu_to_le32(sg_dma_len
3115            (bsg_job->request_payload.sg_list));
3116
3117	put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
3118			   &els_iocb->rx_address);
3119        els_iocb->rx_len = cpu_to_le32(sg_dma_len
3120            (bsg_job->reply_payload.sg_list));
3121
3122	sp->vha->qla_stats.control_requests++;
3123}
3124
3125static void
3126qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
3127{
3128	uint16_t        avail_dsds;
3129	struct dsd64	*cur_dsd;
3130	struct scatterlist *sg;
3131	int index;
3132	uint16_t tot_dsds;
3133	scsi_qla_host_t *vha = sp->vha;
3134	struct qla_hw_data *ha = vha->hw;
3135	struct bsg_job *bsg_job = sp->u.bsg_job;
3136	int entry_count = 1;
3137
3138	memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
3139	ct_iocb->entry_type = CT_IOCB_TYPE;
3140	ct_iocb->entry_status = 0;
3141	ct_iocb->handle1 = sp->handle;
3142	SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
3143	ct_iocb->status = cpu_to_le16(0);
3144	ct_iocb->control_flags = cpu_to_le16(0);
3145	ct_iocb->timeout = 0;
3146	ct_iocb->cmd_dsd_count =
3147	    cpu_to_le16(bsg_job->request_payload.sg_cnt);
3148	ct_iocb->total_dsd_count =
3149	    cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
3150	ct_iocb->req_bytecount =
3151	    cpu_to_le32(bsg_job->request_payload.payload_len);
3152	ct_iocb->rsp_bytecount =
3153	    cpu_to_le32(bsg_job->reply_payload.payload_len);
3154
3155	put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
3156			   &ct_iocb->req_dsd.address);
3157	ct_iocb->req_dsd.length = ct_iocb->req_bytecount;
3158
3159	put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
3160			   &ct_iocb->rsp_dsd.address);
3161	ct_iocb->rsp_dsd.length = ct_iocb->rsp_bytecount;
3162
3163	avail_dsds = 1;
3164	cur_dsd = &ct_iocb->rsp_dsd;
3165	index = 0;
3166	tot_dsds = bsg_job->reply_payload.sg_cnt;
3167
3168	for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
3169		cont_a64_entry_t *cont_pkt;
3170
3171		/* Allocate additional continuation packets? */
3172		if (avail_dsds == 0) {
3173			/*
3174			* Five DSDs are available in the Cont.
3175			* Type 1 IOCB.
3176			       */
3177			cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
3178			    vha->hw->req_q_map[0]);
3179			cur_dsd = cont_pkt->dsd;
3180			avail_dsds = 5;
3181			entry_count++;
3182		}
3183
3184		append_dsd64(&cur_dsd, sg);
3185		avail_dsds--;
3186	}
3187	ct_iocb->entry_count = entry_count;
3188
3189	sp->vha->qla_stats.control_requests++;
3190}
3191
3192static void
3193qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
3194{
3195	uint16_t        avail_dsds;
3196	struct dsd64	*cur_dsd;
3197	struct scatterlist *sg;
3198	int index;
3199	uint16_t cmd_dsds, rsp_dsds;
3200	scsi_qla_host_t *vha = sp->vha;
3201	struct qla_hw_data *ha = vha->hw;
3202	struct bsg_job *bsg_job = sp->u.bsg_job;
3203	int entry_count = 1;
3204	cont_a64_entry_t *cont_pkt = NULL;
3205
3206	ct_iocb->entry_type = CT_IOCB_TYPE;
3207        ct_iocb->entry_status = 0;
3208        ct_iocb->sys_define = 0;
3209        ct_iocb->handle = sp->handle;
3210
3211	ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3212	ct_iocb->vp_index = sp->vha->vp_idx;
3213	ct_iocb->comp_status = cpu_to_le16(0);
3214
3215	cmd_dsds = bsg_job->request_payload.sg_cnt;
3216	rsp_dsds = bsg_job->reply_payload.sg_cnt;
3217
3218	ct_iocb->cmd_dsd_count = cpu_to_le16(cmd_dsds);
3219        ct_iocb->timeout = 0;
3220	ct_iocb->rsp_dsd_count = cpu_to_le16(rsp_dsds);
3221        ct_iocb->cmd_byte_count =
3222            cpu_to_le32(bsg_job->request_payload.payload_len);
3223
3224	avail_dsds = 2;
3225	cur_dsd = ct_iocb->dsd;
3226	index = 0;
3227
3228	for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) {
3229		/* Allocate additional continuation packets? */
3230		if (avail_dsds == 0) {
3231			/*
3232			 * Five DSDs are available in the Cont.
3233			 * Type 1 IOCB.
3234			 */
3235			cont_pkt = qla2x00_prep_cont_type1_iocb(
3236			    vha, ha->req_q_map[0]);
3237			cur_dsd = cont_pkt->dsd;
3238			avail_dsds = 5;
3239			entry_count++;
3240		}
3241
3242		append_dsd64(&cur_dsd, sg);
3243		avail_dsds--;
3244	}
3245
3246	index = 0;
3247
3248	for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) {
3249		/* Allocate additional continuation packets? */
3250		if (avail_dsds == 0) {
3251			/*
3252			* Five DSDs are available in the Cont.
3253			* Type 1 IOCB.
3254			       */
3255			cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
3256			    ha->req_q_map[0]);
3257			cur_dsd = cont_pkt->dsd;
3258			avail_dsds = 5;
3259			entry_count++;
3260		}
3261
3262		append_dsd64(&cur_dsd, sg);
3263		avail_dsds--;
3264	}
3265        ct_iocb->entry_count = entry_count;
3266}
3267
3268/*
3269 * qla82xx_start_scsi() - Send a SCSI command to the ISP
3270 * @sp: command to send to the ISP
3271 *
3272 * Returns non-zero if a failure occurred, else zero.
3273 */
3274int
3275qla82xx_start_scsi(srb_t *sp)
3276{
3277	int		nseg;
3278	unsigned long   flags;
3279	struct scsi_cmnd *cmd;
3280	uint32_t	*clr_ptr;
3281	uint32_t	handle;
3282	uint16_t	cnt;
3283	uint16_t	req_cnt;
3284	uint16_t	tot_dsds;
3285	struct device_reg_82xx __iomem *reg;
3286	uint32_t dbval;
3287	__be32 *fcp_dl;
3288	uint8_t additional_cdb_len;
3289	struct ct6_dsd *ctx;
3290	struct scsi_qla_host *vha = sp->vha;
3291	struct qla_hw_data *ha = vha->hw;
3292	struct req_que *req = NULL;
3293	struct rsp_que *rsp = NULL;
3294
3295	/* Setup device pointers. */
3296	reg = &ha->iobase->isp82;
3297	cmd = GET_CMD_SP(sp);
3298	req = vha->req;
3299	rsp = ha->rsp_q_map[0];
3300
3301	/* So we know we haven't pci_map'ed anything yet */
3302	tot_dsds = 0;
3303
3304	dbval = 0x04 | (ha->portnum << 5);
3305
3306	/* Send marker if required */
3307	if (vha->marker_needed != 0) {
3308		if (qla2x00_marker(vha, ha->base_qpair,
3309			0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
3310			ql_log(ql_log_warn, vha, 0x300c,
3311			    "qla2x00_marker failed for cmd=%p.\n", cmd);
3312			return QLA_FUNCTION_FAILED;
3313		}
3314		vha->marker_needed = 0;
3315	}
3316
3317	/* Acquire ring specific lock */
3318	spin_lock_irqsave(&ha->hardware_lock, flags);
3319
3320	handle = qla2xxx_get_next_handle(req);
3321	if (handle == 0)
3322		goto queuing_error;
3323
3324	/* Map the sg table so we have an accurate count of sg entries needed */
3325	if (scsi_sg_count(cmd)) {
3326		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
3327		    scsi_sg_count(cmd), cmd->sc_data_direction);
3328		if (unlikely(!nseg))
3329			goto queuing_error;
3330	} else
3331		nseg = 0;
3332
3333	tot_dsds = nseg;
3334
3335	if (tot_dsds > ql2xshiftctondsd) {
3336		struct cmd_type_6 *cmd_pkt;
3337		uint16_t more_dsd_lists = 0;
3338		struct dsd_dma *dsd_ptr;
3339		uint16_t i;
3340
3341		more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
3342		if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
3343			ql_dbg(ql_dbg_io, vha, 0x300d,
3344			    "Num of DSD list %d is than %d for cmd=%p.\n",
3345			    more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
3346			    cmd);
3347			goto queuing_error;
3348		}
3349
3350		if (more_dsd_lists <= ha->gbl_dsd_avail)
3351			goto sufficient_dsds;
3352		else
3353			more_dsd_lists -= ha->gbl_dsd_avail;
3354
3355		for (i = 0; i < more_dsd_lists; i++) {
3356			dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
3357			if (!dsd_ptr) {
3358				ql_log(ql_log_fatal, vha, 0x300e,
3359				    "Failed to allocate memory for dsd_dma "
3360				    "for cmd=%p.\n", cmd);
3361				goto queuing_error;
3362			}
3363
3364			dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
3365				GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
3366			if (!dsd_ptr->dsd_addr) {
3367				kfree(dsd_ptr);
3368				ql_log(ql_log_fatal, vha, 0x300f,
3369				    "Failed to allocate memory for dsd_addr "
3370				    "for cmd=%p.\n", cmd);
3371				goto queuing_error;
3372			}
3373			list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
3374			ha->gbl_dsd_avail++;
3375		}
3376
3377sufficient_dsds:
3378		req_cnt = 1;
3379
3380		if (req->cnt < (req_cnt + 2)) {
3381			cnt = (uint16_t)rd_reg_dword_relaxed(
3382				&reg->req_q_out[0]);
3383			if (req->ring_index < cnt)
3384				req->cnt = cnt - req->ring_index;
3385			else
3386				req->cnt = req->length -
3387					(req->ring_index - cnt);
3388			if (req->cnt < (req_cnt + 2))
3389				goto queuing_error;
3390		}
3391
3392		ctx = sp->u.scmd.ct6_ctx =
3393		    mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
3394		if (!ctx) {
3395			ql_log(ql_log_fatal, vha, 0x3010,
3396			    "Failed to allocate ctx for cmd=%p.\n", cmd);
3397			goto queuing_error;
3398		}
3399
3400		memset(ctx, 0, sizeof(struct ct6_dsd));
3401		ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
3402			GFP_ATOMIC, &ctx->fcp_cmnd_dma);
3403		if (!ctx->fcp_cmnd) {
3404			ql_log(ql_log_fatal, vha, 0x3011,
3405			    "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
3406			goto queuing_error;
3407		}
3408
3409		/* Initialize the DSD list and dma handle */
3410		INIT_LIST_HEAD(&ctx->dsd_list);
3411		ctx->dsd_use_cnt = 0;
3412
3413		if (cmd->cmd_len > 16) {
3414			additional_cdb_len = cmd->cmd_len - 16;
3415			if ((cmd->cmd_len % 4) != 0) {
3416				/* SCSI command bigger than 16 bytes must be
3417				 * multiple of 4
3418				 */
3419				ql_log(ql_log_warn, vha, 0x3012,
3420				    "scsi cmd len %d not multiple of 4 "
3421				    "for cmd=%p.\n", cmd->cmd_len, cmd);
3422				goto queuing_error_fcp_cmnd;
3423			}
3424			ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
3425		} else {
3426			additional_cdb_len = 0;
3427			ctx->fcp_cmnd_len = 12 + 16 + 4;
3428		}
3429
3430		cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
3431		cmd_pkt->handle = make_handle(req->id, handle);
3432
3433		/* Zero out remaining portion of packet. */
3434		/*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
3435		clr_ptr = (uint32_t *)cmd_pkt + 2;
3436		memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3437		cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3438
3439		/* Set NPORT-ID and LUN number*/
3440		cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3441		cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3442		cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3443		cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3444		cmd_pkt->vp_index = sp->vha->vp_idx;
3445
3446		/* Build IOCB segments */
3447		if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
3448			goto queuing_error_fcp_cmnd;
3449
3450		int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3451		host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
3452
3453		/* build FCP_CMND IU */
3454		int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
3455		ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
3456
3457		if (cmd->sc_data_direction == DMA_TO_DEVICE)
3458			ctx->fcp_cmnd->additional_cdb_len |= 1;
3459		else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
3460			ctx->fcp_cmnd->additional_cdb_len |= 2;
3461
3462		/* Populate the FCP_PRIO. */
3463		if (ha->flags.fcp_prio_enabled)
3464			ctx->fcp_cmnd->task_attribute |=
3465			    sp->fcport->fcp_prio << 3;
3466
3467		memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
3468
3469		fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 +
3470		    additional_cdb_len);
3471		*fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
3472
3473		cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
3474		put_unaligned_le64(ctx->fcp_cmnd_dma,
3475				   &cmd_pkt->fcp_cmnd_dseg_address);
3476
3477		sp->flags |= SRB_FCP_CMND_DMA_VALID;
3478		cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3479		/* Set total data segment count. */
3480		cmd_pkt->entry_count = (uint8_t)req_cnt;
3481		/* Specify response queue number where
3482		 * completion should happen
3483		 */
3484		cmd_pkt->entry_status = (uint8_t) rsp->id;
3485	} else {
3486		struct cmd_type_7 *cmd_pkt;
3487
3488		req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3489		if (req->cnt < (req_cnt + 2)) {
3490			cnt = (uint16_t)rd_reg_dword_relaxed(
3491			    &reg->req_q_out[0]);
3492			if (req->ring_index < cnt)
3493				req->cnt = cnt - req->ring_index;
3494			else
3495				req->cnt = req->length -
3496					(req->ring_index - cnt);
3497		}
3498		if (req->cnt < (req_cnt + 2))
3499			goto queuing_error;
3500
3501		cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
3502		cmd_pkt->handle = make_handle(req->id, handle);
3503
3504		/* Zero out remaining portion of packet. */
3505		/* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3506		clr_ptr = (uint32_t *)cmd_pkt + 2;
3507		memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3508		cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3509
3510		/* Set NPORT-ID and LUN number*/
3511		cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3512		cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3513		cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3514		cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3515		cmd_pkt->vp_index = sp->vha->vp_idx;
3516
3517		int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3518		host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
3519		    sizeof(cmd_pkt->lun));
3520
3521		/* Populate the FCP_PRIO. */
3522		if (ha->flags.fcp_prio_enabled)
3523			cmd_pkt->task |= sp->fcport->fcp_prio << 3;
3524
3525		/* Load SCSI command packet. */
3526		memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
3527		host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
3528
3529		cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3530
3531		/* Build IOCB segments */
3532		qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
3533
3534		/* Set total data segment count. */
3535		cmd_pkt->entry_count = (uint8_t)req_cnt;
3536		/* Specify response queue number where
3537		 * completion should happen.
3538		 */
3539		cmd_pkt->entry_status = (uint8_t) rsp->id;
3540
3541	}
3542	/* Build command packet. */
3543	req->current_outstanding_cmd = handle;
3544	req->outstanding_cmds[handle] = sp;
3545	sp->handle = handle;
3546	cmd->host_scribble = (unsigned char *)(unsigned long)handle;
3547	req->cnt -= req_cnt;
3548	wmb();
3549
3550	/* Adjust ring index. */
3551	req->ring_index++;
3552	if (req->ring_index == req->length) {
3553		req->ring_index = 0;
3554		req->ring_ptr = req->ring;
3555	} else
3556		req->ring_ptr++;
3557
3558	sp->flags |= SRB_DMA_VALID;
3559
3560	/* Set chip new ring index. */
3561	/* write, read and verify logic */
3562	dbval = dbval | (req->id << 8) | (req->ring_index << 16);
3563	if (ql2xdbwr)
3564		qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
3565	else {
3566		wrt_reg_dword(ha->nxdb_wr_ptr, dbval);
3567		wmb();
3568		while (rd_reg_dword(ha->nxdb_rd_ptr) != dbval) {
3569			wrt_reg_dword(ha->nxdb_wr_ptr, dbval);
3570			wmb();
3571		}
3572	}
3573
3574	/* Manage unprocessed RIO/ZIO commands in response queue. */
3575	if (vha->flags.process_response_queue &&
3576	    rsp->ring_ptr->signature != RESPONSE_PROCESSED)
3577		qla24xx_process_response_queue(vha, rsp);
3578
3579	spin_unlock_irqrestore(&ha->hardware_lock, flags);
3580	return QLA_SUCCESS;
3581
3582queuing_error_fcp_cmnd:
3583	dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
3584queuing_error:
3585	if (tot_dsds)
3586		scsi_dma_unmap(cmd);
3587
3588	if (sp->u.scmd.crc_ctx) {
3589		mempool_free(sp->u.scmd.crc_ctx, ha->ctx_mempool);
3590		sp->u.scmd.crc_ctx = NULL;
3591	}
3592	spin_unlock_irqrestore(&ha->hardware_lock, flags);
3593
3594	return QLA_FUNCTION_FAILED;
3595}
3596
3597static void
3598qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
3599{
3600	struct srb_iocb *aio = &sp->u.iocb_cmd;
3601	scsi_qla_host_t *vha = sp->vha;
3602	struct req_que *req = sp->qpair->req;
3603
3604	memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
3605	abt_iocb->entry_type = ABORT_IOCB_TYPE;
3606	abt_iocb->entry_count = 1;
3607	abt_iocb->handle = make_handle(req->id, sp->handle);
3608	if (sp->fcport) {
3609		abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3610		abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
3611		abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
3612		abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
3613	}
3614	abt_iocb->handle_to_abort =
3615		make_handle(le16_to_cpu(aio->u.abt.req_que_no),
3616			    aio->u.abt.cmd_hndl);
3617	abt_iocb->vp_index = vha->vp_idx;
3618	abt_iocb->req_que_no = aio->u.abt.req_que_no;
3619	/* Send the command to the firmware */
3620	wmb();
3621}
3622
3623static void
3624qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx)
3625{
3626	int i, sz;
3627
3628	mbx->entry_type = MBX_IOCB_TYPE;
3629	mbx->handle = sp->handle;
3630	sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
3631
3632	for (i = 0; i < sz; i++)
3633		mbx->mb[i] = sp->u.iocb_cmd.u.mbx.out_mb[i];
3634}
3635
3636static void
3637qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt)
3638{
3639	sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt;
3640	qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg);
3641	ct_pkt->handle = sp->handle;
3642}
3643
3644static void qla2x00_send_notify_ack_iocb(srb_t *sp,
3645	struct nack_to_isp *nack)
3646{
3647	struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy;
3648
3649	nack->entry_type = NOTIFY_ACK_TYPE;
3650	nack->entry_count = 1;
3651	nack->ox_id = ntfy->ox_id;
3652
3653	nack->u.isp24.handle = sp->handle;
3654	nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3655	if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3656		nack->u.isp24.flags = ntfy->u.isp24.flags &
3657			cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB);
3658	}
3659	nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3660	nack->u.isp24.status = ntfy->u.isp24.status;
3661	nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3662	nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3663	nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3664	nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3665	nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3666	nack->u.isp24.srr_flags = 0;
3667	nack->u.isp24.srr_reject_code = 0;
3668	nack->u.isp24.srr_reject_code_expl = 0;
3669	nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3670}
3671
3672/*
3673 * Build NVME LS request
3674 */
3675static void
3676qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
3677{
3678	struct srb_iocb *nvme;
3679
3680	nvme = &sp->u.iocb_cmd;
3681	cmd_pkt->entry_type = PT_LS4_REQUEST;
3682	cmd_pkt->entry_count = 1;
3683	cmd_pkt->control_flags = cpu_to_le16(CF_LS4_ORIGINATOR << CF_LS4_SHIFT);
3684
3685	cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
3686	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3687	cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
3688
3689	cmd_pkt->tx_dseg_count = cpu_to_le16(1);
3690	cmd_pkt->tx_byte_count = cpu_to_le32(nvme->u.nvme.cmd_len);
3691	cmd_pkt->dsd[0].length = cpu_to_le32(nvme->u.nvme.cmd_len);
3692	put_unaligned_le64(nvme->u.nvme.cmd_dma, &cmd_pkt->dsd[0].address);
3693
3694	cmd_pkt->rx_dseg_count = cpu_to_le16(1);
3695	cmd_pkt->rx_byte_count = cpu_to_le32(nvme->u.nvme.rsp_len);
3696	cmd_pkt->dsd[1].length = cpu_to_le32(nvme->u.nvme.rsp_len);
3697	put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address);
3698}
3699
3700static void
3701qla25xx_ctrlvp_iocb(srb_t *sp, struct vp_ctrl_entry_24xx *vce)
3702{
3703	int map, pos;
3704
3705	vce->entry_type = VP_CTRL_IOCB_TYPE;
3706	vce->handle = sp->handle;
3707	vce->entry_count = 1;
3708	vce->command = cpu_to_le16(sp->u.iocb_cmd.u.ctrlvp.cmd);
3709	vce->vp_count = cpu_to_le16(1);
3710
3711	/*
3712	 * index map in firmware starts with 1; decrement index
3713	 * this is ok as we never use index 0
3714	 */
3715	map = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) / 8;
3716	pos = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) & 7;
3717	vce->vp_idx_map[map] |= 1 << pos;
3718}
3719
3720static void
3721qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio)
3722{
3723	logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
3724	logio->control_flags =
3725	    cpu_to_le16(LCF_COMMAND_PRLO|LCF_IMPL_PRLO);
3726
3727	logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3728	logio->port_id[0] = sp->fcport->d_id.b.al_pa;
3729	logio->port_id[1] = sp->fcport->d_id.b.area;
3730	logio->port_id[2] = sp->fcport->d_id.b.domain;
3731	logio->vp_index = sp->fcport->vha->vp_idx;
3732}
3733
3734int
3735qla2x00_start_sp(srb_t *sp)
3736{
3737	int rval = QLA_SUCCESS;
3738	scsi_qla_host_t *vha = sp->vha;
3739	struct qla_hw_data *ha = vha->hw;
3740	struct qla_qpair *qp = sp->qpair;
3741	void *pkt;
3742	unsigned long flags;
3743
3744	if (vha->hw->flags.eeh_busy)
3745		return -EIO;
3746
3747	spin_lock_irqsave(qp->qp_lock_ptr, flags);
3748	pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
3749	if (!pkt) {
3750		rval = -EAGAIN;
3751		ql_log(ql_log_warn, vha, 0x700c,
3752		    "qla2x00_alloc_iocbs failed.\n");
3753		goto done;
3754	}
3755
3756	switch (sp->type) {
3757	case SRB_LOGIN_CMD:
3758		IS_FWI2_CAPABLE(ha) ?
3759		    qla24xx_login_iocb(sp, pkt) :
3760		    qla2x00_login_iocb(sp, pkt);
3761		break;
3762	case SRB_PRLI_CMD:
3763		qla24xx_prli_iocb(sp, pkt);
3764		break;
3765	case SRB_LOGOUT_CMD:
3766		IS_FWI2_CAPABLE(ha) ?
3767		    qla24xx_logout_iocb(sp, pkt) :
3768		    qla2x00_logout_iocb(sp, pkt);
3769		break;
3770	case SRB_ELS_CMD_RPT:
3771	case SRB_ELS_CMD_HST:
3772		qla24xx_els_iocb(sp, pkt);
3773		break;
3774	case SRB_CT_CMD:
3775		IS_FWI2_CAPABLE(ha) ?
3776		    qla24xx_ct_iocb(sp, pkt) :
3777		    qla2x00_ct_iocb(sp, pkt);
3778		break;
3779	case SRB_ADISC_CMD:
3780		IS_FWI2_CAPABLE(ha) ?
3781		    qla24xx_adisc_iocb(sp, pkt) :
3782		    qla2x00_adisc_iocb(sp, pkt);
3783		break;
3784	case SRB_TM_CMD:
3785		IS_QLAFX00(ha) ?
3786		    qlafx00_tm_iocb(sp, pkt) :
3787		    qla24xx_tm_iocb(sp, pkt);
3788		break;
3789	case SRB_FXIOCB_DCMD:
3790	case SRB_FXIOCB_BCMD:
3791		qlafx00_fxdisc_iocb(sp, pkt);
3792		break;
3793	case SRB_NVME_LS:
3794		qla_nvme_ls(sp, pkt);
3795		break;
3796	case SRB_ABT_CMD:
3797		IS_QLAFX00(ha) ?
3798			qlafx00_abort_iocb(sp, pkt) :
3799			qla24xx_abort_iocb(sp, pkt);
3800		break;
3801	case SRB_ELS_DCMD:
3802		qla24xx_els_logo_iocb(sp, pkt);
3803		break;
3804	case SRB_CT_PTHRU_CMD:
3805		qla2x00_ctpthru_cmd_iocb(sp, pkt);
3806		break;
3807	case SRB_MB_IOCB:
3808		qla2x00_mb_iocb(sp, pkt);
3809		break;
3810	case SRB_NACK_PLOGI:
3811	case SRB_NACK_PRLI:
3812	case SRB_NACK_LOGO:
3813		qla2x00_send_notify_ack_iocb(sp, pkt);
3814		break;
3815	case SRB_CTRL_VP:
3816		qla25xx_ctrlvp_iocb(sp, pkt);
3817		break;
3818	case SRB_PRLO_CMD:
3819		qla24xx_prlo_iocb(sp, pkt);
3820		break;
3821	default:
3822		break;
3823	}
3824
3825	if (sp->start_timer)
3826		add_timer(&sp->u.iocb_cmd.timer);
3827
3828	wmb();
3829	qla2x00_start_iocbs(vha, qp->req);
3830done:
3831	spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
3832	return rval;
3833}
3834
3835static void
3836qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
3837				struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
3838{
3839	uint16_t avail_dsds;
3840	struct dsd64 *cur_dsd;
3841	uint32_t req_data_len = 0;
3842	uint32_t rsp_data_len = 0;
3843	struct scatterlist *sg;
3844	int index;
3845	int entry_count = 1;
3846	struct bsg_job *bsg_job = sp->u.bsg_job;
3847
3848	/*Update entry type to indicate bidir command */
3849	put_unaligned_le32(COMMAND_BIDIRECTIONAL, &cmd_pkt->entry_type);
3850
3851	/* Set the transfer direction, in this set both flags
3852	 * Also set the BD_WRAP_BACK flag, firmware will take care
3853	 * assigning DID=SID for outgoing pkts.
3854	 */
3855	cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3856	cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3857	cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
3858							BD_WRAP_BACK);
3859
3860	req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
3861	cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
3862	cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
3863	cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
3864
3865	vha->bidi_stats.transfer_bytes += req_data_len;
3866	vha->bidi_stats.io_count++;
3867
3868	vha->qla_stats.output_bytes += req_data_len;
3869	vha->qla_stats.output_requests++;
3870
3871	/* Only one dsd is available for bidirectional IOCB, remaining dsds
3872	 * are bundled in continuation iocb
3873	 */
3874	avail_dsds = 1;
3875	cur_dsd = &cmd_pkt->fcp_dsd;
3876
3877	index = 0;
3878
3879	for_each_sg(bsg_job->request_payload.sg_list, sg,
3880				bsg_job->request_payload.sg_cnt, index) {
3881		cont_a64_entry_t *cont_pkt;
3882
3883		/* Allocate additional continuation packets */
3884		if (avail_dsds == 0) {
3885			/* Continuation type 1 IOCB can accomodate
3886			 * 5 DSDS
3887			 */
3888			cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3889			cur_dsd = cont_pkt->dsd;
3890			avail_dsds = 5;
3891			entry_count++;
3892		}
3893		append_dsd64(&cur_dsd, sg);
3894		avail_dsds--;
3895	}
3896	/* For read request DSD will always goes to continuation IOCB
3897	 * and follow the write DSD. If there is room on the current IOCB
3898	 * then it is added to that IOCB else new continuation IOCB is
3899	 * allocated.
3900	 */
3901	for_each_sg(bsg_job->reply_payload.sg_list, sg,
3902				bsg_job->reply_payload.sg_cnt, index) {
3903		cont_a64_entry_t *cont_pkt;
3904
3905		/* Allocate additional continuation packets */
3906		if (avail_dsds == 0) {
3907			/* Continuation type 1 IOCB can accomodate
3908			 * 5 DSDS
3909			 */
3910			cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3911			cur_dsd = cont_pkt->dsd;
3912			avail_dsds = 5;
3913			entry_count++;
3914		}
3915		append_dsd64(&cur_dsd, sg);
3916		avail_dsds--;
3917	}
3918	/* This value should be same as number of IOCB required for this cmd */
3919	cmd_pkt->entry_count = entry_count;
3920}
3921
3922int
3923qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
3924{
3925
3926	struct qla_hw_data *ha = vha->hw;
3927	unsigned long flags;
3928	uint32_t handle;
3929	uint16_t req_cnt;
3930	uint16_t cnt;
3931	uint32_t *clr_ptr;
3932	struct cmd_bidir *cmd_pkt = NULL;
3933	struct rsp_que *rsp;
3934	struct req_que *req;
3935	int rval = EXT_STATUS_OK;
3936
3937	rval = QLA_SUCCESS;
3938
3939	rsp = ha->rsp_q_map[0];
3940	req = vha->req;
3941
3942	/* Send marker if required */
3943	if (vha->marker_needed != 0) {
3944		if (qla2x00_marker(vha, ha->base_qpair,
3945			0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
3946			return EXT_STATUS_MAILBOX;
3947		vha->marker_needed = 0;
3948	}
3949
3950	/* Acquire ring specific lock */
3951	spin_lock_irqsave(&ha->hardware_lock, flags);
3952
3953	handle = qla2xxx_get_next_handle(req);
3954	if (handle == 0) {
3955		rval = EXT_STATUS_BUSY;
3956		goto queuing_error;
3957	}
3958
3959	/* Calculate number of IOCB required */
3960	req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3961
3962	/* Check for room on request queue. */
3963	if (req->cnt < req_cnt + 2) {
3964		if (IS_SHADOW_REG_CAPABLE(ha)) {
3965			cnt = *req->out_ptr;
3966		} else {
3967			cnt = rd_reg_dword_relaxed(req->req_q_out);
3968			if (qla2x00_check_reg16_for_disconnect(vha, cnt))
3969				goto queuing_error;
3970		}
3971
3972		if  (req->ring_index < cnt)
3973			req->cnt = cnt - req->ring_index;
3974		else
3975			req->cnt = req->length -
3976				(req->ring_index - cnt);
3977	}
3978	if (req->cnt < req_cnt + 2) {
3979		rval = EXT_STATUS_BUSY;
3980		goto queuing_error;
3981	}
3982
3983	cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
3984	cmd_pkt->handle = make_handle(req->id, handle);
3985
3986	/* Zero out remaining portion of packet. */
3987	/* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3988	clr_ptr = (uint32_t *)cmd_pkt + 2;
3989	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3990
3991	/* Set NPORT-ID  (of vha)*/
3992	cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
3993	cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
3994	cmd_pkt->port_id[1] = vha->d_id.b.area;
3995	cmd_pkt->port_id[2] = vha->d_id.b.domain;
3996
3997	qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
3998	cmd_pkt->entry_status = (uint8_t) rsp->id;
3999	/* Build command packet. */
4000	req->current_outstanding_cmd = handle;
4001	req->outstanding_cmds[handle] = sp;
4002	sp->handle = handle;
4003	req->cnt -= req_cnt;
4004
4005	/* Send the command to the firmware */
4006	wmb();
4007	qla2x00_start_iocbs(vha, req);
4008queuing_error:
4009	spin_unlock_irqrestore(&ha->hardware_lock, flags);
4010
4011	return rval;
4012}
4013