1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
4 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
5 */
6
7/*
8 * All common (i.e. transport-independent) SLI-4 functions are implemented
9 * in this file.
10 */
11#include "sli4.h"
12
13static struct sli4_asic_entry_t sli4_asic_table[] = {
14	{ SLI4_ASIC_REV_B0, SLI4_ASIC_GEN_5},
15	{ SLI4_ASIC_REV_D0, SLI4_ASIC_GEN_5},
16	{ SLI4_ASIC_REV_A3, SLI4_ASIC_GEN_6},
17	{ SLI4_ASIC_REV_A0, SLI4_ASIC_GEN_6},
18	{ SLI4_ASIC_REV_A1, SLI4_ASIC_GEN_6},
19	{ SLI4_ASIC_REV_A3, SLI4_ASIC_GEN_6},
20	{ SLI4_ASIC_REV_A1, SLI4_ASIC_GEN_7},
21	{ SLI4_ASIC_REV_A0, SLI4_ASIC_GEN_7},
22};
23
24/* Convert queue type enum (SLI_QTYPE_*) into a string */
25static char *SLI4_QNAME[] = {
26	"Event Queue",
27	"Completion Queue",
28	"Mailbox Queue",
29	"Work Queue",
30	"Receive Queue",
31	"Undefined"
32};
33
34/**
35 * sli_config_cmd_init() - Write a SLI_CONFIG command to the provided buffer.
36 *
37 * @sli4: SLI context pointer.
38 * @buf: Destination buffer for the command.
39 * @length: Length in bytes of attached command.
40 * @dma: DMA buffer for non-embedded commands.
41 * Return: Command payload buffer.
42 */
43static void *
44sli_config_cmd_init(struct sli4 *sli4, void *buf, u32 length,
45		    struct efc_dma *dma)
46{
47	struct sli4_cmd_sli_config *config;
48	u32 flags;
49
50	if (length > sizeof(config->payload.embed) && !dma) {
51		efc_log_err(sli4, "Too big for an embedded cmd with len(%d)\n",
52			    length);
53		return NULL;
54	}
55
56	memset(buf, 0, SLI4_BMBX_SIZE);
57
58	config = buf;
59
60	config->hdr.command = SLI4_MBX_CMD_SLI_CONFIG;
61	if (!dma) {
62		flags = SLI4_SLICONF_EMB;
63		config->dw1_flags = cpu_to_le32(flags);
64		config->payload_len = cpu_to_le32(length);
65		return config->payload.embed;
66	}
67
68	flags = SLI4_SLICONF_PMDCMD_VAL_1;
69	flags &= ~SLI4_SLICONF_EMB;
70	config->dw1_flags = cpu_to_le32(flags);
71
72	config->payload.mem.addr.low = cpu_to_le32(lower_32_bits(dma->phys));
73	config->payload.mem.addr.high =	cpu_to_le32(upper_32_bits(dma->phys));
74	config->payload.mem.length =
75				cpu_to_le32(dma->size & SLI4_SLICONF_PMD_LEN);
76	config->payload_len = cpu_to_le32(dma->size);
77	/* save pointer to DMA for BMBX dumping purposes */
78	sli4->bmbx_non_emb_pmd = dma;
79	return dma->virt;
80}
81
82/**
83 * sli_cmd_common_create_cq() - Write a COMMON_CREATE_CQ V2 command.
84 *
85 * @sli4: SLI context pointer.
86 * @buf: Destination buffer for the command.
87 * @qmem: DMA memory for queue.
88 * @eq_id: EQ id assosiated with this cq.
89 * Return: status -EIO/0.
90 */
91static int
92sli_cmd_common_create_cq(struct sli4 *sli4, void *buf, struct efc_dma *qmem,
93			 u16 eq_id)
94{
95	struct sli4_rqst_cmn_create_cq_v2 *cqv2 = NULL;
96	u32 p;
97	uintptr_t addr;
98	u32 num_pages = 0;
99	size_t cmd_size = 0;
100	u32 page_size = 0;
101	u32 n_cqe = 0;
102	u32 dw5_flags = 0;
103	u16 dw6w1_arm = 0;
104	__le32 len;
105
106	/* First calculate number of pages and the mailbox cmd length */
107	n_cqe = qmem->size / SLI4_CQE_BYTES;
108	switch (n_cqe) {
109	case 256:
110	case 512:
111	case 1024:
112	case 2048:
113		page_size = SZ_4K;
114		break;
115	case 4096:
116		page_size = SZ_8K;
117		break;
118	default:
119		return -EIO;
120	}
121	num_pages = sli_page_count(qmem->size, page_size);
122
123	cmd_size = SLI4_RQST_CMDSZ(cmn_create_cq_v2)
124		   + SZ_DMAADDR * num_pages;
125
126	cqv2 = sli_config_cmd_init(sli4, buf, cmd_size, NULL);
127	if (!cqv2)
128		return -EIO;
129
130	len = SLI4_RQST_PYLD_LEN_VAR(cmn_create_cq_v2, SZ_DMAADDR * num_pages);
131	sli_cmd_fill_hdr(&cqv2->hdr, SLI4_CMN_CREATE_CQ, SLI4_SUBSYSTEM_COMMON,
132			 CMD_V2, len);
133	cqv2->page_size = page_size / SLI_PAGE_SIZE;
134
135	/* valid values for number of pages: 1, 2, 4, 8 (sec 4.4.3) */
136	cqv2->num_pages = cpu_to_le16(num_pages);
137	if (!num_pages || num_pages > SLI4_CREATE_CQV2_MAX_PAGES)
138		return -EIO;
139
140	switch (num_pages) {
141	case 1:
142		dw5_flags |= SLI4_CQ_CNT_VAL(256);
143		break;
144	case 2:
145		dw5_flags |= SLI4_CQ_CNT_VAL(512);
146		break;
147	case 4:
148		dw5_flags |= SLI4_CQ_CNT_VAL(1024);
149		break;
150	case 8:
151		dw5_flags |= SLI4_CQ_CNT_VAL(LARGE);
152		cqv2->cqe_count = cpu_to_le16(n_cqe);
153		break;
154	default:
155		efc_log_err(sli4, "num_pages %d not valid\n", num_pages);
156		return -EIO;
157	}
158
159	if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
160		dw5_flags |= SLI4_CREATE_CQV2_AUTOVALID;
161
162	dw5_flags |= SLI4_CREATE_CQV2_EVT;
163	dw5_flags |= SLI4_CREATE_CQV2_VALID;
164
165	cqv2->dw5_flags = cpu_to_le32(dw5_flags);
166	cqv2->dw6w1_arm = cpu_to_le16(dw6w1_arm);
167	cqv2->eq_id = cpu_to_le16(eq_id);
168
169	for (p = 0, addr = qmem->phys; p < num_pages; p++, addr += page_size) {
170		cqv2->page_phys_addr[p].low = cpu_to_le32(lower_32_bits(addr));
171		cqv2->page_phys_addr[p].high = cpu_to_le32(upper_32_bits(addr));
172	}
173
174	return 0;
175}
176
177static int
178sli_cmd_common_create_eq(struct sli4 *sli4, void *buf, struct efc_dma *qmem)
179{
180	struct sli4_rqst_cmn_create_eq *eq;
181	u32 p;
182	uintptr_t addr;
183	u16 num_pages;
184	u32 dw5_flags = 0;
185	u32 dw6_flags = 0, ver;
186
187	eq = sli_config_cmd_init(sli4, buf, SLI4_CFG_PYLD_LENGTH(cmn_create_eq),
188				 NULL);
189	if (!eq)
190		return -EIO;
191
192	if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
193		ver = CMD_V2;
194	else
195		ver = CMD_V0;
196
197	sli_cmd_fill_hdr(&eq->hdr, SLI4_CMN_CREATE_EQ, SLI4_SUBSYSTEM_COMMON,
198			 ver, SLI4_RQST_PYLD_LEN(cmn_create_eq));
199
200	/* valid values for number of pages: 1, 2, 4 (sec 4.4.3) */
201	num_pages = qmem->size / SLI_PAGE_SIZE;
202	eq->num_pages = cpu_to_le16(num_pages);
203
204	switch (num_pages) {
205	case 1:
206		dw5_flags |= SLI4_EQE_SIZE_4;
207		dw6_flags |= SLI4_EQ_CNT_VAL(1024);
208		break;
209	case 2:
210		dw5_flags |= SLI4_EQE_SIZE_4;
211		dw6_flags |= SLI4_EQ_CNT_VAL(2048);
212		break;
213	case 4:
214		dw5_flags |= SLI4_EQE_SIZE_4;
215		dw6_flags |= SLI4_EQ_CNT_VAL(4096);
216		break;
217	default:
218		efc_log_err(sli4, "num_pages %d not valid\n", num_pages);
219		return -EIO;
220	}
221
222	if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
223		dw5_flags |= SLI4_CREATE_EQ_AUTOVALID;
224
225	dw5_flags |= SLI4_CREATE_EQ_VALID;
226	dw6_flags &= (~SLI4_CREATE_EQ_ARM);
227	eq->dw5_flags = cpu_to_le32(dw5_flags);
228	eq->dw6_flags = cpu_to_le32(dw6_flags);
229	eq->dw7_delaymulti = cpu_to_le32(SLI4_CREATE_EQ_DELAYMULTI);
230
231	for (p = 0, addr = qmem->phys; p < num_pages;
232	     p++, addr += SLI_PAGE_SIZE) {
233		eq->page_address[p].low = cpu_to_le32(lower_32_bits(addr));
234		eq->page_address[p].high = cpu_to_le32(upper_32_bits(addr));
235	}
236
237	return 0;
238}
239
240static int
241sli_cmd_common_create_mq_ext(struct sli4 *sli4, void *buf, struct efc_dma *qmem,
242			     u16 cq_id)
243{
244	struct sli4_rqst_cmn_create_mq_ext *mq;
245	u32 p;
246	uintptr_t addr;
247	u32 num_pages;
248	u16 dw6w1_flags = 0;
249
250	mq = sli_config_cmd_init(sli4, buf,
251				 SLI4_CFG_PYLD_LENGTH(cmn_create_mq_ext), NULL);
252	if (!mq)
253		return -EIO;
254
255	sli_cmd_fill_hdr(&mq->hdr, SLI4_CMN_CREATE_MQ_EXT,
256			 SLI4_SUBSYSTEM_COMMON, CMD_V0,
257			 SLI4_RQST_PYLD_LEN(cmn_create_mq_ext));
258
259	/* valid values for number of pages: 1, 2, 4, 8 (sec 4.4.12) */
260	num_pages = qmem->size / SLI_PAGE_SIZE;
261	mq->num_pages = cpu_to_le16(num_pages);
262	switch (num_pages) {
263	case 1:
264		dw6w1_flags |= SLI4_MQE_SIZE_16;
265		break;
266	case 2:
267		dw6w1_flags |= SLI4_MQE_SIZE_32;
268		break;
269	case 4:
270		dw6w1_flags |= SLI4_MQE_SIZE_64;
271		break;
272	case 8:
273		dw6w1_flags |= SLI4_MQE_SIZE_128;
274		break;
275	default:
276		efc_log_info(sli4, "num_pages %d not valid\n", num_pages);
277		return -EIO;
278	}
279
280	mq->async_event_bitmap = cpu_to_le32(SLI4_ASYNC_EVT_FC_ALL);
281
282	if (sli4->params.mq_create_version) {
283		mq->cq_id_v1 = cpu_to_le16(cq_id);
284		mq->hdr.dw3_version = cpu_to_le32(CMD_V1);
285	} else {
286		dw6w1_flags |= (cq_id << SLI4_CREATE_MQEXT_CQID_SHIFT);
287	}
288	mq->dw7_val = cpu_to_le32(SLI4_CREATE_MQEXT_VAL);
289
290	mq->dw6w1_flags = cpu_to_le16(dw6w1_flags);
291	for (p = 0, addr = qmem->phys; p < num_pages;
292	     p++, addr += SLI_PAGE_SIZE) {
293		mq->page_phys_addr[p].low = cpu_to_le32(lower_32_bits(addr));
294		mq->page_phys_addr[p].high = cpu_to_le32(upper_32_bits(addr));
295	}
296
297	return 0;
298}
299
300int
301sli_cmd_wq_create(struct sli4 *sli4, void *buf, struct efc_dma *qmem, u16 cq_id)
302{
303	struct sli4_rqst_wq_create *wq;
304	u32 p;
305	uintptr_t addr;
306	u32 page_size = 0;
307	u32 n_wqe = 0;
308	u16 num_pages;
309
310	wq = sli_config_cmd_init(sli4, buf, SLI4_CFG_PYLD_LENGTH(wq_create),
311				 NULL);
312	if (!wq)
313		return -EIO;
314
315	sli_cmd_fill_hdr(&wq->hdr, SLI4_OPC_WQ_CREATE, SLI4_SUBSYSTEM_FC,
316			 CMD_V1, SLI4_RQST_PYLD_LEN(wq_create));
317	n_wqe = qmem->size / sli4->wqe_size;
318
319	switch (qmem->size) {
320	case 4096:
321	case 8192:
322	case 16384:
323	case 32768:
324		page_size = SZ_4K;
325		break;
326	case 65536:
327		page_size = SZ_8K;
328		break;
329	case 131072:
330		page_size = SZ_16K;
331		break;
332	case 262144:
333		page_size = SZ_32K;
334		break;
335	case 524288:
336		page_size = SZ_64K;
337		break;
338	default:
339		return -EIO;
340	}
341
342	/* valid values for number of pages(num_pages): 1-8 */
343	num_pages = sli_page_count(qmem->size, page_size);
344	wq->num_pages = cpu_to_le16(num_pages);
345	if (!num_pages || num_pages > SLI4_WQ_CREATE_MAX_PAGES)
346		return -EIO;
347
348	wq->cq_id = cpu_to_le16(cq_id);
349
350	wq->page_size = page_size / SLI_PAGE_SIZE;
351
352	if (sli4->wqe_size == SLI4_WQE_EXT_BYTES)
353		wq->wqe_size_byte |= SLI4_WQE_EXT_SIZE;
354	else
355		wq->wqe_size_byte |= SLI4_WQE_SIZE;
356
357	wq->wqe_count = cpu_to_le16(n_wqe);
358
359	for (p = 0, addr = qmem->phys; p < num_pages; p++, addr += page_size) {
360		wq->page_phys_addr[p].low  = cpu_to_le32(lower_32_bits(addr));
361		wq->page_phys_addr[p].high = cpu_to_le32(upper_32_bits(addr));
362	}
363
364	return 0;
365}
366
367static int
368sli_cmd_rq_create_v1(struct sli4 *sli4, void *buf, struct efc_dma *qmem,
369		     u16 cq_id, u16 buffer_size)
370{
371	struct sli4_rqst_rq_create_v1 *rq;
372	u32 p;
373	uintptr_t addr;
374	u32 num_pages;
375
376	rq = sli_config_cmd_init(sli4, buf, SLI4_CFG_PYLD_LENGTH(rq_create_v1),
377				 NULL);
378	if (!rq)
379		return -EIO;
380
381	sli_cmd_fill_hdr(&rq->hdr, SLI4_OPC_RQ_CREATE, SLI4_SUBSYSTEM_FC,
382			 CMD_V1, SLI4_RQST_PYLD_LEN(rq_create_v1));
383	/* Disable "no buffer warnings" to avoid Lancer bug */
384	rq->dim_dfd_dnb |= SLI4_RQ_CREATE_V1_DNB;
385
386	/* valid values for number of pages: 1-8 (sec 4.5.6) */
387	num_pages = sli_page_count(qmem->size, SLI_PAGE_SIZE);
388	rq->num_pages = cpu_to_le16(num_pages);
389	if (!num_pages ||
390	    num_pages > SLI4_RQ_CREATE_V1_MAX_PAGES) {
391		efc_log_info(sli4, "num_pages %d not valid, max %d\n",
392			     num_pages, SLI4_RQ_CREATE_V1_MAX_PAGES);
393		return -EIO;
394	}
395
396	/*
397	 * RQE count is the total number of entries (note not lg2(# entries))
398	 */
399	rq->rqe_count = cpu_to_le16(qmem->size / SLI4_RQE_SIZE);
400
401	rq->rqe_size_byte |= SLI4_RQE_SIZE_8;
402
403	rq->page_size = SLI4_RQ_PAGE_SIZE_4096;
404
405	if (buffer_size < sli4->rq_min_buf_size ||
406	    buffer_size > sli4->rq_max_buf_size) {
407		efc_log_err(sli4, "buffer_size %d out of range (%d-%d)\n",
408			    buffer_size, sli4->rq_min_buf_size,
409			    sli4->rq_max_buf_size);
410		return -EIO;
411	}
412	rq->buffer_size = cpu_to_le32(buffer_size);
413
414	rq->cq_id = cpu_to_le16(cq_id);
415
416	for (p = 0, addr = qmem->phys;
417			p < num_pages;
418			p++, addr += SLI_PAGE_SIZE) {
419		rq->page_phys_addr[p].low  = cpu_to_le32(lower_32_bits(addr));
420		rq->page_phys_addr[p].high = cpu_to_le32(upper_32_bits(addr));
421	}
422
423	return 0;
424}
425
426static int
427sli_cmd_rq_create_v2(struct sli4 *sli4, u32 num_rqs,
428		     struct sli4_queue *qs[], u32 base_cq_id,
429		     u32 header_buffer_size,
430		     u32 payload_buffer_size, struct efc_dma *dma)
431{
432	struct sli4_rqst_rq_create_v2 *req = NULL;
433	u32 i, p, offset = 0;
434	u32 payload_size, page_count;
435	uintptr_t addr;
436	u32 num_pages;
437	__le32 len;
438
439	page_count =  sli_page_count(qs[0]->dma.size, SLI_PAGE_SIZE) * num_rqs;
440
441	/* Payload length must accommodate both request and response */
442	payload_size = max(SLI4_RQST_CMDSZ(rq_create_v2) +
443			   SZ_DMAADDR * page_count,
444			   sizeof(struct sli4_rsp_cmn_create_queue_set));
445
446	dma->size = payload_size;
447	dma->virt = dma_alloc_coherent(&sli4->pci->dev, dma->size,
448				       &dma->phys, GFP_KERNEL);
449	if (!dma->virt)
450		return -EIO;
451
452	memset(dma->virt, 0, payload_size);
453
454	req = sli_config_cmd_init(sli4, sli4->bmbx.virt, payload_size, dma);
455	if (!req)
456		return -EIO;
457
458	len =  SLI4_RQST_PYLD_LEN_VAR(rq_create_v2, SZ_DMAADDR * page_count);
459	sli_cmd_fill_hdr(&req->hdr, SLI4_OPC_RQ_CREATE, SLI4_SUBSYSTEM_FC,
460			 CMD_V2, len);
461	/* Fill Payload fields */
462	req->dim_dfd_dnb |= SLI4_RQCREATEV2_DNB;
463	num_pages = sli_page_count(qs[0]->dma.size, SLI_PAGE_SIZE);
464	req->num_pages = cpu_to_le16(num_pages);
465	req->rqe_count = cpu_to_le16(qs[0]->dma.size / SLI4_RQE_SIZE);
466	req->rqe_size_byte |= SLI4_RQE_SIZE_8;
467	req->page_size = SLI4_RQ_PAGE_SIZE_4096;
468	req->rq_count = num_rqs;
469	req->base_cq_id = cpu_to_le16(base_cq_id);
470	req->hdr_buffer_size = cpu_to_le16(header_buffer_size);
471	req->payload_buffer_size = cpu_to_le16(payload_buffer_size);
472
473	for (i = 0; i < num_rqs; i++) {
474		for (p = 0, addr = qs[i]->dma.phys; p < num_pages;
475		     p++, addr += SLI_PAGE_SIZE) {
476			req->page_phys_addr[offset].low =
477					cpu_to_le32(lower_32_bits(addr));
478			req->page_phys_addr[offset].high =
479					cpu_to_le32(upper_32_bits(addr));
480			offset++;
481		}
482	}
483
484	return 0;
485}
486
487static void
488__sli_queue_destroy(struct sli4 *sli4, struct sli4_queue *q)
489{
490	if (!q->dma.size)
491		return;
492
493	dma_free_coherent(&sli4->pci->dev, q->dma.size,
494			  q->dma.virt, q->dma.phys);
495	memset(&q->dma, 0, sizeof(struct efc_dma));
496}
497
498int
499__sli_queue_init(struct sli4 *sli4, struct sli4_queue *q, u32 qtype,
500		 size_t size, u32 n_entries, u32 align)
501{
502	if (q->dma.virt) {
503		efc_log_err(sli4, "%s failed\n", __func__);
504		return -EIO;
505	}
506
507	memset(q, 0, sizeof(struct sli4_queue));
508
509	q->dma.size = size * n_entries;
510	q->dma.virt = dma_alloc_coherent(&sli4->pci->dev, q->dma.size,
511					 &q->dma.phys, GFP_KERNEL);
512	if (!q->dma.virt) {
513		memset(&q->dma, 0, sizeof(struct efc_dma));
514		efc_log_err(sli4, "%s allocation failed\n", SLI4_QNAME[qtype]);
515		return -EIO;
516	}
517
518	memset(q->dma.virt, 0, size * n_entries);
519
520	spin_lock_init(&q->lock);
521
522	q->type = qtype;
523	q->size = size;
524	q->length = n_entries;
525
526	if (q->type == SLI4_QTYPE_EQ || q->type == SLI4_QTYPE_CQ) {
527		/* For prism, phase will be flipped after
528		 * a sweep through eq and cq
529		 */
530		q->phase = 1;
531	}
532
533	/* Limit to hwf the queue size per interrupt */
534	q->proc_limit = n_entries / 2;
535
536	if (q->type == SLI4_QTYPE_EQ)
537		q->posted_limit = q->length / 2;
538	else
539		q->posted_limit = 64;
540
541	return 0;
542}
543
544int
545sli_fc_rq_alloc(struct sli4 *sli4, struct sli4_queue *q,
546		u32 n_entries, u32 buffer_size,
547		struct sli4_queue *cq, bool is_hdr)
548{
549	if (__sli_queue_init(sli4, q, SLI4_QTYPE_RQ, SLI4_RQE_SIZE,
550			     n_entries, SLI_PAGE_SIZE))
551		return -EIO;
552
553	if (sli_cmd_rq_create_v1(sli4, sli4->bmbx.virt, &q->dma, cq->id,
554				 buffer_size))
555		goto error;
556
557	if (__sli_create_queue(sli4, q))
558		goto error;
559
560	if (is_hdr && q->id & 1) {
561		efc_log_info(sli4, "bad header RQ_ID %d\n", q->id);
562		goto error;
563	} else if (!is_hdr  && (q->id & 1) == 0) {
564		efc_log_info(sli4, "bad data RQ_ID %d\n", q->id);
565		goto error;
566	}
567
568	if (is_hdr)
569		q->u.flag |= SLI4_QUEUE_FLAG_HDR;
570	else
571		q->u.flag &= ~SLI4_QUEUE_FLAG_HDR;
572
573	return 0;
574
575error:
576	__sli_queue_destroy(sli4, q);
577	return -EIO;
578}
579
580int
581sli_fc_rq_set_alloc(struct sli4 *sli4, u32 num_rq_pairs,
582		    struct sli4_queue *qs[], u32 base_cq_id,
583		    u32 n_entries, u32 header_buffer_size,
584		    u32 payload_buffer_size)
585{
586	u32 i;
587	struct efc_dma dma = {0};
588	struct sli4_rsp_cmn_create_queue_set *rsp = NULL;
589	void __iomem *db_regaddr = NULL;
590	u32 num_rqs = num_rq_pairs * 2;
591
592	for (i = 0; i < num_rqs; i++) {
593		if (__sli_queue_init(sli4, qs[i], SLI4_QTYPE_RQ,
594				     SLI4_RQE_SIZE, n_entries,
595				     SLI_PAGE_SIZE)) {
596			goto error;
597		}
598	}
599
600	if (sli_cmd_rq_create_v2(sli4, num_rqs, qs, base_cq_id,
601				 header_buffer_size, payload_buffer_size,
602				 &dma)) {
603		goto error;
604	}
605
606	if (sli_bmbx_command(sli4)) {
607		efc_log_err(sli4, "bootstrap mailbox write failed RQSet\n");
608		goto error;
609	}
610
611	if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
612		db_regaddr = sli4->reg[1] + SLI4_IF6_RQ_DB_REG;
613	else
614		db_regaddr = sli4->reg[0] + SLI4_RQ_DB_REG;
615
616	rsp = dma.virt;
617	if (rsp->hdr.status) {
618		efc_log_err(sli4, "bad create RQSet status=%#x addl=%#x\n",
619			    rsp->hdr.status, rsp->hdr.additional_status);
620		goto error;
621	}
622
623	for (i = 0; i < num_rqs; i++) {
624		qs[i]->id = i + le16_to_cpu(rsp->q_id);
625		if ((qs[i]->id & 1) == 0)
626			qs[i]->u.flag |= SLI4_QUEUE_FLAG_HDR;
627		else
628			qs[i]->u.flag &= ~SLI4_QUEUE_FLAG_HDR;
629
630		qs[i]->db_regaddr = db_regaddr;
631	}
632
633	dma_free_coherent(&sli4->pci->dev, dma.size, dma.virt, dma.phys);
634
635	return 0;
636
637error:
638	for (i = 0; i < num_rqs; i++)
639		__sli_queue_destroy(sli4, qs[i]);
640
641	if (dma.virt)
642		dma_free_coherent(&sli4->pci->dev, dma.size, dma.virt,
643				  dma.phys);
644
645	return -EIO;
646}
647
648static int
649sli_res_sli_config(struct sli4 *sli4, void *buf)
650{
651	struct sli4_cmd_sli_config *sli_config = buf;
652
653	/* sanity check */
654	if (!buf || sli_config->hdr.command !=
655		    SLI4_MBX_CMD_SLI_CONFIG) {
656		efc_log_err(sli4, "bad parameter buf=%p cmd=%#x\n", buf,
657			    buf ? sli_config->hdr.command : -1);
658		return -EIO;
659	}
660
661	if (le16_to_cpu(sli_config->hdr.status))
662		return le16_to_cpu(sli_config->hdr.status);
663
664	if (le32_to_cpu(sli_config->dw1_flags) & SLI4_SLICONF_EMB)
665		return sli_config->payload.embed[4];
666
667	efc_log_info(sli4, "external buffers not supported\n");
668	return -EIO;
669}
670
671int
672__sli_create_queue(struct sli4 *sli4, struct sli4_queue *q)
673{
674	struct sli4_rsp_cmn_create_queue *res_q = NULL;
675
676	if (sli_bmbx_command(sli4)) {
677		efc_log_crit(sli4, "bootstrap mailbox write fail %s\n",
678			     SLI4_QNAME[q->type]);
679		return -EIO;
680	}
681	if (sli_res_sli_config(sli4, sli4->bmbx.virt)) {
682		efc_log_err(sli4, "bad status create %s\n",
683			    SLI4_QNAME[q->type]);
684		return -EIO;
685	}
686	res_q = (void *)((u8 *)sli4->bmbx.virt +
687			offsetof(struct sli4_cmd_sli_config, payload));
688
689	if (res_q->hdr.status) {
690		efc_log_err(sli4, "bad create %s status=%#x addl=%#x\n",
691			    SLI4_QNAME[q->type], res_q->hdr.status,
692			    res_q->hdr.additional_status);
693		return -EIO;
694	}
695	q->id = le16_to_cpu(res_q->q_id);
696	switch (q->type) {
697	case SLI4_QTYPE_EQ:
698		if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
699			q->db_regaddr = sli4->reg[1] + SLI4_IF6_EQ_DB_REG;
700		else
701			q->db_regaddr =	sli4->reg[0] + SLI4_EQCQ_DB_REG;
702		break;
703	case SLI4_QTYPE_CQ:
704		if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
705			q->db_regaddr = sli4->reg[1] + SLI4_IF6_CQ_DB_REG;
706		else
707			q->db_regaddr =	sli4->reg[0] + SLI4_EQCQ_DB_REG;
708		break;
709	case SLI4_QTYPE_MQ:
710		if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
711			q->db_regaddr = sli4->reg[1] + SLI4_IF6_MQ_DB_REG;
712		else
713			q->db_regaddr =	sli4->reg[0] + SLI4_MQ_DB_REG;
714		break;
715	case SLI4_QTYPE_RQ:
716		if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
717			q->db_regaddr = sli4->reg[1] + SLI4_IF6_RQ_DB_REG;
718		else
719			q->db_regaddr =	sli4->reg[0] + SLI4_RQ_DB_REG;
720		break;
721	case SLI4_QTYPE_WQ:
722		if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
723			q->db_regaddr = sli4->reg[1] + SLI4_IF6_WQ_DB_REG;
724		else
725			q->db_regaddr =	sli4->reg[0] + SLI4_IO_WQ_DB_REG;
726		break;
727	default:
728		break;
729	}
730
731	return 0;
732}
733
734int
735sli_get_queue_entry_size(struct sli4 *sli4, u32 qtype)
736{
737	u32 size = 0;
738
739	switch (qtype) {
740	case SLI4_QTYPE_EQ:
741		size = sizeof(u32);
742		break;
743	case SLI4_QTYPE_CQ:
744		size = 16;
745		break;
746	case SLI4_QTYPE_MQ:
747		size = 256;
748		break;
749	case SLI4_QTYPE_WQ:
750		size = sli4->wqe_size;
751		break;
752	case SLI4_QTYPE_RQ:
753		size = SLI4_RQE_SIZE;
754		break;
755	default:
756		efc_log_info(sli4, "unknown queue type %d\n", qtype);
757		return -1;
758	}
759	return size;
760}
761
762int
763sli_queue_alloc(struct sli4 *sli4, u32 qtype,
764		struct sli4_queue *q, u32 n_entries,
765		     struct sli4_queue *assoc)
766{
767	int size;
768	u32 align = 0;
769
770	/* get queue size */
771	size = sli_get_queue_entry_size(sli4, qtype);
772	if (size < 0)
773		return -EIO;
774	align = SLI_PAGE_SIZE;
775
776	if (__sli_queue_init(sli4, q, qtype, size, n_entries, align))
777		return -EIO;
778
779	switch (qtype) {
780	case SLI4_QTYPE_EQ:
781		if (!sli_cmd_common_create_eq(sli4, sli4->bmbx.virt, &q->dma) &&
782		    !__sli_create_queue(sli4, q))
783			return 0;
784
785		break;
786	case SLI4_QTYPE_CQ:
787		if (!sli_cmd_common_create_cq(sli4, sli4->bmbx.virt, &q->dma,
788					      assoc ? assoc->id : 0) &&
789		    !__sli_create_queue(sli4, q))
790			return 0;
791
792		break;
793	case SLI4_QTYPE_MQ:
794		assoc->u.flag |= SLI4_QUEUE_FLAG_MQ;
795		if (!sli_cmd_common_create_mq_ext(sli4, sli4->bmbx.virt,
796						  &q->dma, assoc->id) &&
797		    !__sli_create_queue(sli4, q))
798			return 0;
799
800		break;
801	case SLI4_QTYPE_WQ:
802		if (!sli_cmd_wq_create(sli4, sli4->bmbx.virt, &q->dma,
803				       assoc ? assoc->id : 0) &&
804		    !__sli_create_queue(sli4, q))
805			return 0;
806
807		break;
808	default:
809		efc_log_info(sli4, "unknown queue type %d\n", qtype);
810	}
811
812	__sli_queue_destroy(sli4, q);
813	return -EIO;
814}
815
816static int sli_cmd_cq_set_create(struct sli4 *sli4,
817				 struct sli4_queue *qs[], u32 num_cqs,
818				 struct sli4_queue *eqs[],
819				 struct efc_dma *dma)
820{
821	struct sli4_rqst_cmn_create_cq_set_v0 *req = NULL;
822	uintptr_t addr;
823	u32 i, offset = 0,  page_bytes = 0, payload_size;
824	u32 p = 0, page_size = 0, n_cqe = 0, num_pages_cq;
825	u32 dw5_flags = 0;
826	u16 dw6w1_flags = 0;
827	__le32 req_len;
828
829	n_cqe = qs[0]->dma.size / SLI4_CQE_BYTES;
830	switch (n_cqe) {
831	case 256:
832	case 512:
833	case 1024:
834	case 2048:
835		page_size = 1;
836		break;
837	case 4096:
838		page_size = 2;
839		break;
840	default:
841		return -EIO;
842	}
843
844	page_bytes = page_size * SLI_PAGE_SIZE;
845	num_pages_cq = sli_page_count(qs[0]->dma.size, page_bytes);
846	payload_size = max(SLI4_RQST_CMDSZ(cmn_create_cq_set_v0) +
847			   (SZ_DMAADDR * num_pages_cq * num_cqs),
848			   sizeof(struct sli4_rsp_cmn_create_queue_set));
849
850	dma->size = payload_size;
851	dma->virt = dma_alloc_coherent(&sli4->pci->dev, dma->size,
852				       &dma->phys, GFP_KERNEL);
853	if (!dma->virt)
854		return -EIO;
855
856	memset(dma->virt, 0, payload_size);
857
858	req = sli_config_cmd_init(sli4, sli4->bmbx.virt, payload_size, dma);
859	if (!req)
860		return -EIO;
861
862	req_len = SLI4_RQST_PYLD_LEN_VAR(cmn_create_cq_set_v0,
863					 SZ_DMAADDR * num_pages_cq * num_cqs);
864	sli_cmd_fill_hdr(&req->hdr, SLI4_CMN_CREATE_CQ_SET, SLI4_SUBSYSTEM_FC,
865			 CMD_V0, req_len);
866	req->page_size = page_size;
867
868	req->num_pages = cpu_to_le16(num_pages_cq);
869	switch (num_pages_cq) {
870	case 1:
871		dw5_flags |= SLI4_CQ_CNT_VAL(256);
872		break;
873	case 2:
874		dw5_flags |= SLI4_CQ_CNT_VAL(512);
875		break;
876	case 4:
877		dw5_flags |= SLI4_CQ_CNT_VAL(1024);
878		break;
879	case 8:
880		dw5_flags |= SLI4_CQ_CNT_VAL(LARGE);
881		dw6w1_flags |= (n_cqe & SLI4_CREATE_CQSETV0_CQE_COUNT);
882		break;
883	default:
884		efc_log_info(sli4, "num_pages %d not valid\n", num_pages_cq);
885		return -EIO;
886	}
887
888	dw5_flags |= SLI4_CREATE_CQSETV0_EVT;
889	dw5_flags |= SLI4_CREATE_CQSETV0_VALID;
890	if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
891		dw5_flags |= SLI4_CREATE_CQSETV0_AUTOVALID;
892
893	dw6w1_flags &= ~SLI4_CREATE_CQSETV0_ARM;
894
895	req->dw5_flags = cpu_to_le32(dw5_flags);
896	req->dw6w1_flags = cpu_to_le16(dw6w1_flags);
897
898	req->num_cq_req = cpu_to_le16(num_cqs);
899
900	/* Fill page addresses of all the CQs. */
901	for (i = 0; i < num_cqs; i++) {
902		req->eq_id[i] = cpu_to_le16(eqs[i]->id);
903		for (p = 0, addr = qs[i]->dma.phys; p < num_pages_cq;
904		     p++, addr += page_bytes) {
905			req->page_phys_addr[offset].low =
906				cpu_to_le32(lower_32_bits(addr));
907			req->page_phys_addr[offset].high =
908				cpu_to_le32(upper_32_bits(addr));
909			offset++;
910		}
911	}
912
913	return 0;
914}
915
916int
917sli_cq_alloc_set(struct sli4 *sli4, struct sli4_queue *qs[],
918		 u32 num_cqs, u32 n_entries, struct sli4_queue *eqs[])
919{
920	u32 i;
921	struct efc_dma dma = {0};
922	struct sli4_rsp_cmn_create_queue_set *res;
923	void __iomem *db_regaddr;
924
925	/* Align the queue DMA memory */
926	for (i = 0; i < num_cqs; i++) {
927		if (__sli_queue_init(sli4, qs[i], SLI4_QTYPE_CQ, SLI4_CQE_BYTES,
928				     n_entries, SLI_PAGE_SIZE))
929			goto error;
930	}
931
932	if (sli_cmd_cq_set_create(sli4, qs, num_cqs, eqs, &dma))
933		goto error;
934
935	if (sli_bmbx_command(sli4))
936		goto error;
937
938	if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
939		db_regaddr = sli4->reg[1] + SLI4_IF6_CQ_DB_REG;
940	else
941		db_regaddr = sli4->reg[0] + SLI4_EQCQ_DB_REG;
942
943	res = dma.virt;
944	if (res->hdr.status) {
945		efc_log_err(sli4, "bad create CQSet status=%#x addl=%#x\n",
946			    res->hdr.status, res->hdr.additional_status);
947		goto error;
948	}
949
950	/* Check if we got all requested CQs. */
951	if (le16_to_cpu(res->num_q_allocated) != num_cqs) {
952		efc_log_crit(sli4, "Requested count CQs doesn't match.\n");
953		goto error;
954	}
955	/* Fill the resp cq ids. */
956	for (i = 0; i < num_cqs; i++) {
957		qs[i]->id = le16_to_cpu(res->q_id) + i;
958		qs[i]->db_regaddr = db_regaddr;
959	}
960
961	dma_free_coherent(&sli4->pci->dev, dma.size, dma.virt, dma.phys);
962
963	return 0;
964
965error:
966	for (i = 0; i < num_cqs; i++)
967		__sli_queue_destroy(sli4, qs[i]);
968
969	if (dma.virt)
970		dma_free_coherent(&sli4->pci->dev, dma.size, dma.virt,
971				  dma.phys);
972
973	return -EIO;
974}
975
976static int
977sli_cmd_common_destroy_q(struct sli4 *sli4, u8 opc, u8 subsystem, u16 q_id)
978{
979	struct sli4_rqst_cmn_destroy_q *req;
980
981	/* Payload length must accommodate both request and response */
982	req = sli_config_cmd_init(sli4, sli4->bmbx.virt,
983				  SLI4_CFG_PYLD_LENGTH(cmn_destroy_q), NULL);
984	if (!req)
985		return -EIO;
986
987	sli_cmd_fill_hdr(&req->hdr, opc, subsystem,
988			 CMD_V0, SLI4_RQST_PYLD_LEN(cmn_destroy_q));
989	req->q_id = cpu_to_le16(q_id);
990
991	return 0;
992}
993
994int
995sli_queue_free(struct sli4 *sli4, struct sli4_queue *q,
996	       u32 destroy_queues, u32 free_memory)
997{
998	int rc = 0;
999	u8 opcode, subsystem;
1000	struct sli4_rsp_hdr *res;
1001
1002	if (!q) {
1003		efc_log_err(sli4, "bad parameter sli4=%p q=%p\n", sli4, q);
1004		return -EIO;
1005	}
1006
1007	if (!destroy_queues)
1008		goto free_mem;
1009
1010	switch (q->type) {
1011	case SLI4_QTYPE_EQ:
1012		opcode = SLI4_CMN_DESTROY_EQ;
1013		subsystem = SLI4_SUBSYSTEM_COMMON;
1014		break;
1015	case SLI4_QTYPE_CQ:
1016		opcode = SLI4_CMN_DESTROY_CQ;
1017		subsystem = SLI4_SUBSYSTEM_COMMON;
1018		break;
1019	case SLI4_QTYPE_MQ:
1020		opcode = SLI4_CMN_DESTROY_MQ;
1021		subsystem = SLI4_SUBSYSTEM_COMMON;
1022		break;
1023	case SLI4_QTYPE_WQ:
1024		opcode = SLI4_OPC_WQ_DESTROY;
1025		subsystem = SLI4_SUBSYSTEM_FC;
1026		break;
1027	case SLI4_QTYPE_RQ:
1028		opcode = SLI4_OPC_RQ_DESTROY;
1029		subsystem = SLI4_SUBSYSTEM_FC;
1030		break;
1031	default:
1032		efc_log_info(sli4, "bad queue type %d\n", q->type);
1033		rc = -EIO;
1034		goto free_mem;
1035	}
1036
1037	rc = sli_cmd_common_destroy_q(sli4, opcode, subsystem, q->id);
1038	if (rc)
1039		goto free_mem;
1040
1041	rc = sli_bmbx_command(sli4);
1042	if (rc)
1043		goto free_mem;
1044
1045	rc = sli_res_sli_config(sli4, sli4->bmbx.virt);
1046	if (rc)
1047		goto free_mem;
1048
1049	res = (void *)((u8 *)sli4->bmbx.virt +
1050			     offsetof(struct sli4_cmd_sli_config, payload));
1051	if (res->status) {
1052		efc_log_err(sli4, "destroy %s st=%#x addl=%#x\n",
1053			    SLI4_QNAME[q->type], res->status,
1054			    res->additional_status);
1055		rc = -EIO;
1056		goto free_mem;
1057	}
1058
1059free_mem:
1060	if (free_memory)
1061		__sli_queue_destroy(sli4, q);
1062
1063	return rc;
1064}
1065
1066int
1067sli_queue_eq_arm(struct sli4 *sli4, struct sli4_queue *q, bool arm)
1068{
1069	u32 val;
1070	unsigned long flags = 0;
1071	u32 a = arm ? SLI4_EQCQ_ARM : SLI4_EQCQ_UNARM;
1072
1073	spin_lock_irqsave(&q->lock, flags);
1074	if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
1075		val = sli_format_if6_eq_db_data(q->n_posted, q->id, a);
1076	else
1077		val = sli_format_eq_db_data(q->n_posted, q->id, a);
1078
1079	writel(val, q->db_regaddr);
1080	q->n_posted = 0;
1081	spin_unlock_irqrestore(&q->lock, flags);
1082
1083	return 0;
1084}
1085
1086int
1087sli_queue_arm(struct sli4 *sli4, struct sli4_queue *q, bool arm)
1088{
1089	u32 val = 0;
1090	unsigned long flags = 0;
1091	u32 a = arm ? SLI4_EQCQ_ARM : SLI4_EQCQ_UNARM;
1092
1093	spin_lock_irqsave(&q->lock, flags);
1094
1095	switch (q->type) {
1096	case SLI4_QTYPE_EQ:
1097		if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
1098			val = sli_format_if6_eq_db_data(q->n_posted, q->id, a);
1099		else
1100			val = sli_format_eq_db_data(q->n_posted, q->id, a);
1101
1102		writel(val, q->db_regaddr);
1103		q->n_posted = 0;
1104		break;
1105	case SLI4_QTYPE_CQ:
1106		if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
1107			val = sli_format_if6_cq_db_data(q->n_posted, q->id, a);
1108		else
1109			val = sli_format_cq_db_data(q->n_posted, q->id, a);
1110
1111		writel(val, q->db_regaddr);
1112		q->n_posted = 0;
1113		break;
1114	default:
1115		efc_log_info(sli4, "should only be used for EQ/CQ, not %s\n",
1116			     SLI4_QNAME[q->type]);
1117	}
1118
1119	spin_unlock_irqrestore(&q->lock, flags);
1120
1121	return 0;
1122}
1123
1124int
1125sli_wq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
1126{
1127	u8 *qe = q->dma.virt;
1128	u32 qindex;
1129	u32 val = 0;
1130
1131	qindex = q->index;
1132	qe += q->index * q->size;
1133
1134	if (sli4->params.perf_wq_id_association)
1135		sli_set_wq_id_association(entry, q->id);
1136
1137	memcpy(qe, entry, q->size);
1138	val = sli_format_wq_db_data(q->id);
1139
1140	writel(val, q->db_regaddr);
1141	q->index = (q->index + 1) & (q->length - 1);
1142
1143	return qindex;
1144}
1145
1146int
1147sli_mq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
1148{
1149	u8 *qe = q->dma.virt;
1150	u32 qindex;
1151	u32 val = 0;
1152	unsigned long flags;
1153
1154	spin_lock_irqsave(&q->lock, flags);
1155	qindex = q->index;
1156	qe += q->index * q->size;
1157
1158	memcpy(qe, entry, q->size);
1159	val = sli_format_mq_db_data(q->id);
1160	writel(val, q->db_regaddr);
1161	q->index = (q->index + 1) & (q->length - 1);
1162	spin_unlock_irqrestore(&q->lock, flags);
1163
1164	return qindex;
1165}
1166
1167int
1168sli_rq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
1169{
1170	u8 *qe = q->dma.virt;
1171	u32 qindex;
1172	u32 val = 0;
1173
1174	qindex = q->index;
1175	qe += q->index * q->size;
1176
1177	memcpy(qe, entry, q->size);
1178
1179	/*
1180	 * In RQ-pair, an RQ either contains the FC header
1181	 * (i.e. is_hdr == TRUE) or the payload.
1182	 *
1183	 * Don't ring doorbell for payload RQ
1184	 */
1185	if (!(q->u.flag & SLI4_QUEUE_FLAG_HDR))
1186		goto skip;
1187
1188	val = sli_format_rq_db_data(q->id);
1189	writel(val, q->db_regaddr);
1190skip:
1191	q->index = (q->index + 1) & (q->length - 1);
1192
1193	return qindex;
1194}
1195
1196int
1197sli_eq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
1198{
1199	u8 *qe = q->dma.virt;
1200	unsigned long flags = 0;
1201	u16 wflags = 0;
1202
1203	spin_lock_irqsave(&q->lock, flags);
1204
1205	qe += q->index * q->size;
1206
1207	/* Check if eqe is valid */
1208	wflags = le16_to_cpu(((struct sli4_eqe *)qe)->dw0w0_flags);
1209
1210	if ((wflags & SLI4_EQE_VALID) != q->phase) {
1211		spin_unlock_irqrestore(&q->lock, flags);
1212		return -EIO;
1213	}
1214
1215	if (sli4->if_type != SLI4_INTF_IF_TYPE_6) {
1216		wflags &= ~SLI4_EQE_VALID;
1217		((struct sli4_eqe *)qe)->dw0w0_flags = cpu_to_le16(wflags);
1218	}
1219
1220	memcpy(entry, qe, q->size);
1221	q->index = (q->index + 1) & (q->length - 1);
1222	q->n_posted++;
1223	/*
1224	 * For prism, the phase value will be used
1225	 * to check the validity of eq/cq entries.
1226	 * The value toggles after a complete sweep
1227	 * through the queue.
1228	 */
1229
1230	if (sli4->if_type == SLI4_INTF_IF_TYPE_6 && q->index == 0)
1231		q->phase ^= (u16)0x1;
1232
1233	spin_unlock_irqrestore(&q->lock, flags);
1234
1235	return 0;
1236}
1237
1238int
1239sli_cq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
1240{
1241	u8 *qe = q->dma.virt;
1242	unsigned long flags = 0;
1243	u32 dwflags = 0;
1244	bool valid_bit_set;
1245
1246	spin_lock_irqsave(&q->lock, flags);
1247
1248	qe += q->index * q->size;
1249
1250	/* Check if cqe is valid */
1251	dwflags = le32_to_cpu(((struct sli4_mcqe *)qe)->dw3_flags);
1252	valid_bit_set = (dwflags & SLI4_MCQE_VALID) != 0;
1253
1254	if (valid_bit_set != q->phase) {
1255		spin_unlock_irqrestore(&q->lock, flags);
1256		return -EIO;
1257	}
1258
1259	if (sli4->if_type != SLI4_INTF_IF_TYPE_6) {
1260		dwflags &= ~SLI4_MCQE_VALID;
1261		((struct sli4_mcqe *)qe)->dw3_flags = cpu_to_le32(dwflags);
1262	}
1263
1264	memcpy(entry, qe, q->size);
1265	q->index = (q->index + 1) & (q->length - 1);
1266	q->n_posted++;
1267	/*
1268	 * For prism, the phase value will be used
1269	 * to check the validity of eq/cq entries.
1270	 * The value toggles after a complete sweep
1271	 * through the queue.
1272	 */
1273
1274	if (sli4->if_type == SLI4_INTF_IF_TYPE_6 && q->index == 0)
1275		q->phase ^= (u16)0x1;
1276
1277	spin_unlock_irqrestore(&q->lock, flags);
1278
1279	return 0;
1280}
1281
1282int
1283sli_mq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
1284{
1285	u8 *qe = q->dma.virt;
1286	unsigned long flags = 0;
1287
1288	spin_lock_irqsave(&q->lock, flags);
1289
1290	qe += q->u.r_idx * q->size;
1291
1292	/* Check if mqe is valid */
1293	if (q->index == q->u.r_idx) {
1294		spin_unlock_irqrestore(&q->lock, flags);
1295		return -EIO;
1296	}
1297
1298	memcpy(entry, qe, q->size);
1299	q->u.r_idx = (q->u.r_idx + 1) & (q->length - 1);
1300
1301	spin_unlock_irqrestore(&q->lock, flags);
1302
1303	return 0;
1304}
1305
1306int
1307sli_eq_parse(struct sli4 *sli4, u8 *buf, u16 *cq_id)
1308{
1309	struct sli4_eqe *eqe = (void *)buf;
1310	int rc = 0;
1311	u16 flags = 0;
1312	u16 majorcode;
1313	u16 minorcode;
1314
1315	if (!buf || !cq_id) {
1316		efc_log_err(sli4, "bad parameters sli4=%p buf=%p cq_id=%p\n",
1317			    sli4, buf, cq_id);
1318		return -EIO;
1319	}
1320
1321	flags = le16_to_cpu(eqe->dw0w0_flags);
1322	majorcode = (flags & SLI4_EQE_MJCODE) >> 1;
1323	minorcode = (flags & SLI4_EQE_MNCODE) >> 4;
1324	switch (majorcode) {
1325	case SLI4_MAJOR_CODE_STANDARD:
1326		*cq_id = le16_to_cpu(eqe->resource_id);
1327		break;
1328	case SLI4_MAJOR_CODE_SENTINEL:
1329		efc_log_info(sli4, "sentinel EQE\n");
1330		rc = SLI4_EQE_STATUS_EQ_FULL;
1331		break;
1332	default:
1333		efc_log_info(sli4, "Unsupported EQE: major %x minor %x\n",
1334			     majorcode, minorcode);
1335		rc = -EIO;
1336	}
1337
1338	return rc;
1339}
1340
1341int
1342sli_cq_parse(struct sli4 *sli4, struct sli4_queue *cq, u8 *cqe,
1343	     enum sli4_qentry *etype, u16 *q_id)
1344{
1345	int rc = 0;
1346
1347	if (!cq || !cqe || !etype) {
1348		efc_log_err(sli4, "bad params sli4=%p cq=%p cqe=%p etype=%p q_id=%p\n",
1349			    sli4, cq, cqe, etype, q_id);
1350		return -EINVAL;
1351	}
1352
1353	/* Parse a CQ entry to retrieve the event type and the queue id */
1354	if (cq->u.flag & SLI4_QUEUE_FLAG_MQ) {
1355		struct sli4_mcqe	*mcqe = (void *)cqe;
1356
1357		if (le32_to_cpu(mcqe->dw3_flags) & SLI4_MCQE_AE) {
1358			*etype = SLI4_QENTRY_ASYNC;
1359		} else {
1360			*etype = SLI4_QENTRY_MQ;
1361			rc = sli_cqe_mq(sli4, mcqe);
1362		}
1363		*q_id = -1;
1364	} else {
1365		rc = sli_fc_cqe_parse(sli4, cq, cqe, etype, q_id);
1366	}
1367
1368	return rc;
1369}
1370
1371int
1372sli_abort_wqe(struct sli4 *sli, void *buf, enum sli4_abort_type type,
1373	      bool send_abts, u32 ids, u32 mask, u16 tag, u16 cq_id)
1374{
1375	struct sli4_abort_wqe *abort = buf;
1376
1377	memset(buf, 0, sli->wqe_size);
1378
1379	switch (type) {
1380	case SLI4_ABORT_XRI:
1381		abort->criteria = SLI4_ABORT_CRITERIA_XRI_TAG;
1382		if (mask) {
1383			efc_log_warn(sli, "%#x aborting XRI %#x warning non-zero mask",
1384				     mask, ids);
1385			mask = 0;
1386		}
1387		break;
1388	case SLI4_ABORT_ABORT_ID:
1389		abort->criteria = SLI4_ABORT_CRITERIA_ABORT_TAG;
1390		break;
1391	case SLI4_ABORT_REQUEST_ID:
1392		abort->criteria = SLI4_ABORT_CRITERIA_REQUEST_TAG;
1393		break;
1394	default:
1395		efc_log_info(sli, "unsupported type %#x\n", type);
1396		return -EIO;
1397	}
1398
1399	abort->ia_ir_byte |= send_abts ? 0 : 1;
1400
1401	/* Suppress ABTS retries */
1402	abort->ia_ir_byte |= SLI4_ABRT_WQE_IR;
1403
1404	abort->t_mask = cpu_to_le32(mask);
1405	abort->t_tag  = cpu_to_le32(ids);
1406	abort->command = SLI4_WQE_ABORT;
1407	abort->request_tag = cpu_to_le16(tag);
1408
1409	abort->dw10w0_flags = cpu_to_le16(SLI4_ABRT_WQE_QOSD);
1410
1411	abort->cq_id = cpu_to_le16(cq_id);
1412	abort->cmdtype_wqec_byte |= SLI4_CMD_ABORT_WQE;
1413
1414	return 0;
1415}
1416
1417int
1418sli_els_request64_wqe(struct sli4 *sli, void *buf, struct efc_dma *sgl,
1419		      struct sli_els_params *params)
1420{
1421	struct sli4_els_request64_wqe *els = buf;
1422	struct sli4_sge *sge = sgl->virt;
1423	bool is_fabric = false;
1424	struct sli4_bde *bptr;
1425
1426	memset(buf, 0, sli->wqe_size);
1427
1428	bptr = &els->els_request_payload;
1429	if (sli->params.sgl_pre_registered) {
1430		els->qosd_xbl_hlm_iod_dbde_wqes &= ~SLI4_REQ_WQE_XBL;
1431
1432		els->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_REQ_WQE_DBDE;
1433		bptr->bde_type_buflen =
1434			cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
1435				    (params->xmit_len & SLI4_BDE_LEN_MASK));
1436
1437		bptr->u.data.low  = sge[0].buffer_address_low;
1438		bptr->u.data.high = sge[0].buffer_address_high;
1439	} else {
1440		els->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_REQ_WQE_XBL;
1441
1442		bptr->bde_type_buflen =
1443			cpu_to_le32((SLI4_BDE_TYPE_VAL(BLP)) |
1444				    ((2 * sizeof(struct sli4_sge)) &
1445				     SLI4_BDE_LEN_MASK));
1446		bptr->u.blp.low  = cpu_to_le32(lower_32_bits(sgl->phys));
1447		bptr->u.blp.high = cpu_to_le32(upper_32_bits(sgl->phys));
1448	}
1449
1450	els->els_request_payload_length = cpu_to_le32(params->xmit_len);
1451	els->max_response_payload_length = cpu_to_le32(params->rsp_len);
1452
1453	els->xri_tag = cpu_to_le16(params->xri);
1454	els->timer = params->timeout;
1455	els->class_byte |= SLI4_GENERIC_CLASS_CLASS_3;
1456
1457	els->command = SLI4_WQE_ELS_REQUEST64;
1458
1459	els->request_tag = cpu_to_le16(params->tag);
1460
1461	els->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_REQ_WQE_IOD;
1462
1463	els->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_REQ_WQE_QOSD;
1464
1465	/* figure out the ELS_ID value from the request buffer */
1466
1467	switch (params->cmd) {
1468	case ELS_LOGO:
1469		els->cmdtype_elsid_byte |=
1470			SLI4_ELS_REQUEST64_LOGO << SLI4_REQ_WQE_ELSID_SHFT;
1471		if (params->rpi_registered) {
1472			els->ct_byte |=
1473			SLI4_GENERIC_CONTEXT_RPI << SLI4_REQ_WQE_CT_SHFT;
1474			els->context_tag = cpu_to_le16(params->rpi);
1475		} else {
1476			els->ct_byte |=
1477			SLI4_GENERIC_CONTEXT_VPI << SLI4_REQ_WQE_CT_SHFT;
1478			els->context_tag = cpu_to_le16(params->vpi);
1479		}
1480		if (params->d_id == FC_FID_FLOGI)
1481			is_fabric = true;
1482		break;
1483	case ELS_FDISC:
1484		if (params->d_id == FC_FID_FLOGI)
1485			is_fabric = true;
1486		if (params->s_id == 0) {
1487			els->cmdtype_elsid_byte |=
1488			SLI4_ELS_REQUEST64_FDISC << SLI4_REQ_WQE_ELSID_SHFT;
1489			is_fabric = true;
1490		} else {
1491			els->cmdtype_elsid_byte |=
1492			SLI4_ELS_REQUEST64_OTHER << SLI4_REQ_WQE_ELSID_SHFT;
1493		}
1494		els->ct_byte |=
1495			SLI4_GENERIC_CONTEXT_VPI << SLI4_REQ_WQE_CT_SHFT;
1496		els->context_tag = cpu_to_le16(params->vpi);
1497		els->sid_sp_dword |= cpu_to_le32(1 << SLI4_REQ_WQE_SP_SHFT);
1498		break;
1499	case ELS_FLOGI:
1500		els->ct_byte |=
1501			SLI4_GENERIC_CONTEXT_VPI << SLI4_REQ_WQE_CT_SHFT;
1502		els->context_tag = cpu_to_le16(params->vpi);
1503		/*
1504		 * Set SP here ... we haven't done a REG_VPI yet
1505		 * need to maybe not set this when we have
1506		 * completed VFI/VPI registrations ...
1507		 *
1508		 * Use the FC_ID of the SPORT if it has been allocated,
1509		 * otherwise use an S_ID of zero.
1510		 */
1511		els->sid_sp_dword |= cpu_to_le32(1 << SLI4_REQ_WQE_SP_SHFT);
1512		if (params->s_id != U32_MAX)
1513			els->sid_sp_dword |= cpu_to_le32(params->s_id);
1514		break;
1515	case ELS_PLOGI:
1516		els->cmdtype_elsid_byte |=
1517			SLI4_ELS_REQUEST64_PLOGI << SLI4_REQ_WQE_ELSID_SHFT;
1518		els->ct_byte |=
1519			SLI4_GENERIC_CONTEXT_VPI << SLI4_REQ_WQE_CT_SHFT;
1520		els->context_tag = cpu_to_le16(params->vpi);
1521		break;
1522	case ELS_SCR:
1523		els->cmdtype_elsid_byte |=
1524			SLI4_ELS_REQUEST64_OTHER << SLI4_REQ_WQE_ELSID_SHFT;
1525		els->ct_byte |=
1526			SLI4_GENERIC_CONTEXT_VPI << SLI4_REQ_WQE_CT_SHFT;
1527		els->context_tag = cpu_to_le16(params->vpi);
1528		break;
1529	default:
1530		els->cmdtype_elsid_byte |=
1531			SLI4_ELS_REQUEST64_OTHER << SLI4_REQ_WQE_ELSID_SHFT;
1532		if (params->rpi_registered) {
1533			els->ct_byte |= (SLI4_GENERIC_CONTEXT_RPI <<
1534					 SLI4_REQ_WQE_CT_SHFT);
1535			els->context_tag = cpu_to_le16(params->vpi);
1536		} else {
1537			els->ct_byte |=
1538			SLI4_GENERIC_CONTEXT_VPI << SLI4_REQ_WQE_CT_SHFT;
1539			els->context_tag = cpu_to_le16(params->vpi);
1540		}
1541		break;
1542	}
1543
1544	if (is_fabric)
1545		els->cmdtype_elsid_byte |= SLI4_ELS_REQUEST64_CMD_FABRIC;
1546	else
1547		els->cmdtype_elsid_byte |= SLI4_ELS_REQUEST64_CMD_NON_FABRIC;
1548
1549	els->cq_id = cpu_to_le16(SLI4_CQ_DEFAULT);
1550
1551	if (((els->ct_byte & SLI4_REQ_WQE_CT) >> SLI4_REQ_WQE_CT_SHFT) !=
1552					SLI4_GENERIC_CONTEXT_RPI)
1553		els->remote_id_dword = cpu_to_le32(params->d_id);
1554
1555	if (((els->ct_byte & SLI4_REQ_WQE_CT) >> SLI4_REQ_WQE_CT_SHFT) ==
1556					SLI4_GENERIC_CONTEXT_VPI)
1557		els->temporary_rpi = cpu_to_le16(params->rpi);
1558
1559	return 0;
1560}
1561
1562int
1563sli_fcp_icmnd64_wqe(struct sli4 *sli, void *buf, struct efc_dma *sgl, u16 xri,
1564		    u16 tag, u16 cq_id, u32 rpi, u32 rnode_fcid, u8 timeout)
1565{
1566	struct sli4_fcp_icmnd64_wqe *icmnd = buf;
1567	struct sli4_sge *sge = NULL;
1568	struct sli4_bde *bptr;
1569	u32 len;
1570
1571	memset(buf, 0, sli->wqe_size);
1572
1573	if (!sgl || !sgl->virt) {
1574		efc_log_err(sli, "bad parameter sgl=%p virt=%p\n",
1575			    sgl, sgl ? sgl->virt : NULL);
1576		return -EIO;
1577	}
1578	sge = sgl->virt;
1579	bptr = &icmnd->bde;
1580	if (sli->params.sgl_pre_registered) {
1581		icmnd->qosd_xbl_hlm_iod_dbde_wqes &= ~SLI4_ICMD_WQE_XBL;
1582
1583		icmnd->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_ICMD_WQE_DBDE;
1584		bptr->bde_type_buflen =
1585			cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
1586				    (le32_to_cpu(sge[0].buffer_length) &
1587				     SLI4_BDE_LEN_MASK));
1588
1589		bptr->u.data.low  = sge[0].buffer_address_low;
1590		bptr->u.data.high = sge[0].buffer_address_high;
1591	} else {
1592		icmnd->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_ICMD_WQE_XBL;
1593
1594		bptr->bde_type_buflen =
1595			cpu_to_le32((SLI4_BDE_TYPE_VAL(BLP)) |
1596				    (sgl->size & SLI4_BDE_LEN_MASK));
1597
1598		bptr->u.blp.low  = cpu_to_le32(lower_32_bits(sgl->phys));
1599		bptr->u.blp.high = cpu_to_le32(upper_32_bits(sgl->phys));
1600	}
1601
1602	len = le32_to_cpu(sge[0].buffer_length) +
1603	      le32_to_cpu(sge[1].buffer_length);
1604	icmnd->payload_offset_length = cpu_to_le16(len);
1605	icmnd->xri_tag = cpu_to_le16(xri);
1606	icmnd->context_tag = cpu_to_le16(rpi);
1607	icmnd->timer = timeout;
1608
1609	/* WQE word 4 contains read transfer length */
1610	icmnd->class_pu_byte |= 2 << SLI4_ICMD_WQE_PU_SHFT;
1611	icmnd->class_pu_byte |= SLI4_GENERIC_CLASS_CLASS_3;
1612	icmnd->command = SLI4_WQE_FCP_ICMND64;
1613	icmnd->dif_ct_bs_byte |=
1614		SLI4_GENERIC_CONTEXT_RPI << SLI4_ICMD_WQE_CT_SHFT;
1615
1616	icmnd->abort_tag = cpu_to_le32(xri);
1617
1618	icmnd->request_tag = cpu_to_le16(tag);
1619	icmnd->len_loc1_byte |= SLI4_ICMD_WQE_LEN_LOC_BIT1;
1620	icmnd->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_ICMD_WQE_LEN_LOC_BIT2;
1621	icmnd->cmd_type_byte |= SLI4_CMD_FCP_ICMND64_WQE;
1622	icmnd->cq_id = cpu_to_le16(cq_id);
1623
1624	return  0;
1625}
1626
1627int
1628sli_fcp_iread64_wqe(struct sli4 *sli, void *buf, struct efc_dma *sgl,
1629		    u32 first_data_sge, u32 xfer_len, u16 xri, u16 tag,
1630		    u16 cq_id, u32 rpi, u32 rnode_fcid,
1631		    u8 dif, u8 bs, u8 timeout)
1632{
1633	struct sli4_fcp_iread64_wqe *iread = buf;
1634	struct sli4_sge *sge = NULL;
1635	struct sli4_bde *bptr;
1636	u32 sge_flags, len;
1637
1638	memset(buf, 0, sli->wqe_size);
1639
1640	if (!sgl || !sgl->virt) {
1641		efc_log_err(sli, "bad parameter sgl=%p virt=%p\n",
1642			    sgl, sgl ? sgl->virt : NULL);
1643		return -EIO;
1644	}
1645
1646	sge = sgl->virt;
1647	bptr = &iread->bde;
1648	if (sli->params.sgl_pre_registered) {
1649		iread->qosd_xbl_hlm_iod_dbde_wqes &= ~SLI4_IR_WQE_XBL;
1650
1651		iread->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IR_WQE_DBDE;
1652
1653		bptr->bde_type_buflen =
1654			cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
1655				    (le32_to_cpu(sge[0].buffer_length) &
1656				     SLI4_BDE_LEN_MASK));
1657
1658		bptr->u.blp.low  = sge[0].buffer_address_low;
1659		bptr->u.blp.high = sge[0].buffer_address_high;
1660	} else {
1661		iread->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IR_WQE_XBL;
1662
1663		bptr->bde_type_buflen =
1664			cpu_to_le32((SLI4_BDE_TYPE_VAL(BLP)) |
1665				    (sgl->size & SLI4_BDE_LEN_MASK));
1666
1667		bptr->u.blp.low  =
1668				cpu_to_le32(lower_32_bits(sgl->phys));
1669		bptr->u.blp.high =
1670				cpu_to_le32(upper_32_bits(sgl->phys));
1671
1672		/*
1673		 * fill out fcp_cmnd buffer len and change resp buffer to be of
1674		 * type "skip" (note: response will still be written to sge[1]
1675		 * if necessary)
1676		 */
1677		len = le32_to_cpu(sge[0].buffer_length);
1678		iread->fcp_cmd_buffer_length = cpu_to_le16(len);
1679
1680		sge_flags = le32_to_cpu(sge[1].dw2_flags);
1681		sge_flags &= (~SLI4_SGE_TYPE_MASK);
1682		sge_flags |= (SLI4_SGE_TYPE_SKIP << SLI4_SGE_TYPE_SHIFT);
1683		sge[1].dw2_flags = cpu_to_le32(sge_flags);
1684	}
1685
1686	len = le32_to_cpu(sge[0].buffer_length) +
1687	      le32_to_cpu(sge[1].buffer_length);
1688	iread->payload_offset_length = cpu_to_le16(len);
1689	iread->total_transfer_length = cpu_to_le32(xfer_len);
1690
1691	iread->xri_tag = cpu_to_le16(xri);
1692	iread->context_tag = cpu_to_le16(rpi);
1693
1694	iread->timer = timeout;
1695
1696	/* WQE word 4 contains read transfer length */
1697	iread->class_pu_byte |= 2 << SLI4_IR_WQE_PU_SHFT;
1698	iread->class_pu_byte |= SLI4_GENERIC_CLASS_CLASS_3;
1699	iread->command = SLI4_WQE_FCP_IREAD64;
1700	iread->dif_ct_bs_byte |=
1701		SLI4_GENERIC_CONTEXT_RPI << SLI4_IR_WQE_CT_SHFT;
1702	iread->dif_ct_bs_byte |= dif;
1703	iread->dif_ct_bs_byte  |= bs << SLI4_IR_WQE_BS_SHFT;
1704
1705	iread->abort_tag = cpu_to_le32(xri);
1706
1707	iread->request_tag = cpu_to_le16(tag);
1708	iread->len_loc1_byte |= SLI4_IR_WQE_LEN_LOC_BIT1;
1709	iread->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IR_WQE_LEN_LOC_BIT2;
1710	iread->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IR_WQE_IOD;
1711	iread->cmd_type_byte |= SLI4_CMD_FCP_IREAD64_WQE;
1712	iread->cq_id = cpu_to_le16(cq_id);
1713
1714	if (sli->params.perf_hint) {
1715		bptr = &iread->first_data_bde;
1716		bptr->bde_type_buflen =	cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
1717			  (le32_to_cpu(sge[first_data_sge].buffer_length) &
1718			     SLI4_BDE_LEN_MASK));
1719		bptr->u.data.low =
1720			sge[first_data_sge].buffer_address_low;
1721		bptr->u.data.high =
1722			sge[first_data_sge].buffer_address_high;
1723	}
1724
1725	return  0;
1726}
1727
1728int
1729sli_fcp_iwrite64_wqe(struct sli4 *sli, void *buf, struct efc_dma *sgl,
1730		     u32 first_data_sge, u32 xfer_len,
1731		     u32 first_burst, u16 xri, u16 tag,
1732		     u16 cq_id, u32 rpi,
1733		     u32 rnode_fcid,
1734		     u8 dif, u8 bs, u8 timeout)
1735{
1736	struct sli4_fcp_iwrite64_wqe *iwrite = buf;
1737	struct sli4_sge *sge = NULL;
1738	struct sli4_bde *bptr;
1739	u32 sge_flags, min, len;
1740
1741	memset(buf, 0, sli->wqe_size);
1742
1743	if (!sgl || !sgl->virt) {
1744		efc_log_err(sli, "bad parameter sgl=%p virt=%p\n",
1745			    sgl, sgl ? sgl->virt : NULL);
1746		return -EIO;
1747	}
1748	sge = sgl->virt;
1749	bptr = &iwrite->bde;
1750	if (sli->params.sgl_pre_registered) {
1751		iwrite->qosd_xbl_hlm_iod_dbde_wqes &= ~SLI4_IWR_WQE_XBL;
1752
1753		iwrite->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IWR_WQE_DBDE;
1754		bptr->bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
1755		       (le32_to_cpu(sge[0].buffer_length) & SLI4_BDE_LEN_MASK));
1756		bptr->u.data.low  = sge[0].buffer_address_low;
1757		bptr->u.data.high = sge[0].buffer_address_high;
1758	} else {
1759		iwrite->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IWR_WQE_XBL;
1760
1761		bptr->bde_type_buflen =	cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
1762					(sgl->size & SLI4_BDE_LEN_MASK));
1763
1764		bptr->u.blp.low  = cpu_to_le32(lower_32_bits(sgl->phys));
1765		bptr->u.blp.high = cpu_to_le32(upper_32_bits(sgl->phys));
1766
1767		/*
1768		 * fill out fcp_cmnd buffer len and change resp buffer to be of
1769		 * type "skip" (note: response will still be written to sge[1]
1770		 * if necessary)
1771		 */
1772		len = le32_to_cpu(sge[0].buffer_length);
1773		iwrite->fcp_cmd_buffer_length = cpu_to_le16(len);
1774		sge_flags = le32_to_cpu(sge[1].dw2_flags);
1775		sge_flags &= ~SLI4_SGE_TYPE_MASK;
1776		sge_flags |= (SLI4_SGE_TYPE_SKIP << SLI4_SGE_TYPE_SHIFT);
1777		sge[1].dw2_flags = cpu_to_le32(sge_flags);
1778	}
1779
1780	len = le32_to_cpu(sge[0].buffer_length) +
1781	      le32_to_cpu(sge[1].buffer_length);
1782	iwrite->payload_offset_length = cpu_to_le16(len);
1783	iwrite->total_transfer_length = cpu_to_le16(xfer_len);
1784	min = (xfer_len < first_burst) ? xfer_len : first_burst;
1785	iwrite->initial_transfer_length = cpu_to_le16(min);
1786
1787	iwrite->xri_tag = cpu_to_le16(xri);
1788	iwrite->context_tag = cpu_to_le16(rpi);
1789
1790	iwrite->timer = timeout;
1791	/* WQE word 4 contains read transfer length */
1792	iwrite->class_pu_byte |= 2 << SLI4_IWR_WQE_PU_SHFT;
1793	iwrite->class_pu_byte |= SLI4_GENERIC_CLASS_CLASS_3;
1794	iwrite->command = SLI4_WQE_FCP_IWRITE64;
1795	iwrite->dif_ct_bs_byte |=
1796			SLI4_GENERIC_CONTEXT_RPI << SLI4_IWR_WQE_CT_SHFT;
1797	iwrite->dif_ct_bs_byte |= dif;
1798	iwrite->dif_ct_bs_byte |= bs << SLI4_IWR_WQE_BS_SHFT;
1799
1800	iwrite->abort_tag = cpu_to_le32(xri);
1801
1802	iwrite->request_tag = cpu_to_le16(tag);
1803	iwrite->len_loc1_byte |= SLI4_IWR_WQE_LEN_LOC_BIT1;
1804	iwrite->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IWR_WQE_LEN_LOC_BIT2;
1805	iwrite->cmd_type_byte |= SLI4_CMD_FCP_IWRITE64_WQE;
1806	iwrite->cq_id = cpu_to_le16(cq_id);
1807
1808	if (sli->params.perf_hint) {
1809		bptr = &iwrite->first_data_bde;
1810
1811		bptr->bde_type_buflen =	cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
1812			 (le32_to_cpu(sge[first_data_sge].buffer_length) &
1813			     SLI4_BDE_LEN_MASK));
1814
1815		bptr->u.data.low = sge[first_data_sge].buffer_address_low;
1816		bptr->u.data.high = sge[first_data_sge].buffer_address_high;
1817	}
1818
1819	return  0;
1820}
1821
1822int
1823sli_fcp_treceive64_wqe(struct sli4 *sli, void *buf, struct efc_dma *sgl,
1824		       u32 first_data_sge, u16 cq_id, u8 dif, u8 bs,
1825		       struct sli_fcp_tgt_params *params)
1826{
1827	struct sli4_fcp_treceive64_wqe *trecv = buf;
1828	struct sli4_fcp_128byte_wqe *trecv_128 = buf;
1829	struct sli4_sge *sge = NULL;
1830	struct sli4_bde *bptr;
1831
1832	memset(buf, 0, sli->wqe_size);
1833
1834	if (!sgl || !sgl->virt) {
1835		efc_log_err(sli, "bad parameter sgl=%p virt=%p\n",
1836			    sgl, sgl ? sgl->virt : NULL);
1837		return -EIO;
1838	}
1839	sge = sgl->virt;
1840	bptr = &trecv->bde;
1841	if (sli->params.sgl_pre_registered) {
1842		trecv->qosd_xbl_hlm_iod_dbde_wqes &= ~SLI4_TRCV_WQE_XBL;
1843
1844		trecv->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_TRCV_WQE_DBDE;
1845
1846		bptr->bde_type_buflen =
1847			cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
1848				    (le32_to_cpu(sge[0].buffer_length)
1849					& SLI4_BDE_LEN_MASK));
1850
1851		bptr->u.data.low  = sge[0].buffer_address_low;
1852		bptr->u.data.high = sge[0].buffer_address_high;
1853
1854		trecv->payload_offset_length = sge[0].buffer_length;
1855	} else {
1856		trecv->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_TRCV_WQE_XBL;
1857
1858		/* if data is a single physical address, use a BDE */
1859		if (!dif &&
1860		    params->xmit_len <= le32_to_cpu(sge[2].buffer_length)) {
1861			trecv->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_TRCV_WQE_DBDE;
1862			bptr->bde_type_buflen =
1863			      cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
1864					  (le32_to_cpu(sge[2].buffer_length)
1865					  & SLI4_BDE_LEN_MASK));
1866
1867			bptr->u.data.low = sge[2].buffer_address_low;
1868			bptr->u.data.high = sge[2].buffer_address_high;
1869		} else {
1870			bptr->bde_type_buflen =
1871				cpu_to_le32((SLI4_BDE_TYPE_VAL(BLP)) |
1872				(sgl->size & SLI4_BDE_LEN_MASK));
1873			bptr->u.blp.low = cpu_to_le32(lower_32_bits(sgl->phys));
1874			bptr->u.blp.high =
1875				cpu_to_le32(upper_32_bits(sgl->phys));
1876		}
1877	}
1878
1879	trecv->relative_offset = cpu_to_le32(params->offset);
1880
1881	if (params->flags & SLI4_IO_CONTINUATION)
1882		trecv->eat_xc_ccpe |= SLI4_TRCV_WQE_XC;
1883
1884	trecv->xri_tag = cpu_to_le16(params->xri);
1885
1886	trecv->context_tag = cpu_to_le16(params->rpi);
1887
1888	/* WQE uses relative offset */
1889	trecv->class_ar_pu_byte |= 1 << SLI4_TRCV_WQE_PU_SHFT;
1890
1891	if (params->flags & SLI4_IO_AUTO_GOOD_RESPONSE)
1892		trecv->class_ar_pu_byte |= SLI4_TRCV_WQE_AR;
1893
1894	trecv->command = SLI4_WQE_FCP_TRECEIVE64;
1895	trecv->class_ar_pu_byte |= SLI4_GENERIC_CLASS_CLASS_3;
1896	trecv->dif_ct_bs_byte |=
1897		SLI4_GENERIC_CONTEXT_RPI << SLI4_TRCV_WQE_CT_SHFT;
1898	trecv->dif_ct_bs_byte |= bs << SLI4_TRCV_WQE_BS_SHFT;
1899
1900	trecv->remote_xid = cpu_to_le16(params->ox_id);
1901
1902	trecv->request_tag = cpu_to_le16(params->tag);
1903
1904	trecv->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_TRCV_WQE_IOD;
1905
1906	trecv->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_TRCV_WQE_LEN_LOC_BIT2;
1907
1908	trecv->cmd_type_byte |= SLI4_CMD_FCP_TRECEIVE64_WQE;
1909
1910	trecv->cq_id = cpu_to_le16(cq_id);
1911
1912	trecv->fcp_data_receive_length = cpu_to_le32(params->xmit_len);
1913
1914	if (sli->params.perf_hint) {
1915		bptr = &trecv->first_data_bde;
1916
1917		bptr->bde_type_buflen =
1918			cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
1919			    (le32_to_cpu(sge[first_data_sge].buffer_length) &
1920			     SLI4_BDE_LEN_MASK));
1921		bptr->u.data.low = sge[first_data_sge].buffer_address_low;
1922		bptr->u.data.high = sge[first_data_sge].buffer_address_high;
1923	}
1924
1925	/* The upper 7 bits of csctl is the priority */
1926	if (params->cs_ctl & SLI4_MASK_CCP) {
1927		trecv->eat_xc_ccpe |= SLI4_TRCV_WQE_CCPE;
1928		trecv->ccp = (params->cs_ctl & SLI4_MASK_CCP);
1929	}
1930
1931	if (params->app_id && sli->wqe_size == SLI4_WQE_EXT_BYTES &&
1932	    !(trecv->eat_xc_ccpe & SLI4_TRSP_WQE_EAT)) {
1933		trecv->lloc1_appid |= SLI4_TRCV_WQE_APPID;
1934		trecv->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_TRCV_WQE_WQES;
1935		trecv_128->dw[31] = params->app_id;
1936	}
1937	return 0;
1938}
1939
1940int
1941sli_fcp_cont_treceive64_wqe(struct sli4 *sli, void *buf,
1942			    struct efc_dma *sgl, u32 first_data_sge,
1943			    u16 sec_xri, u16 cq_id, u8 dif, u8 bs,
1944			    struct sli_fcp_tgt_params *params)
1945{
1946	int rc;
1947
1948	rc = sli_fcp_treceive64_wqe(sli, buf, sgl, first_data_sge,
1949				    cq_id, dif, bs, params);
1950	if (!rc) {
1951		struct sli4_fcp_treceive64_wqe *trecv = buf;
1952
1953		trecv->command = SLI4_WQE_FCP_CONT_TRECEIVE64;
1954		trecv->dword5.sec_xri_tag = cpu_to_le16(sec_xri);
1955	}
1956	return rc;
1957}
1958
1959int
1960sli_fcp_trsp64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl,
1961		   u16 cq_id, u8 port_owned, struct sli_fcp_tgt_params *params)
1962{
1963	struct sli4_fcp_trsp64_wqe *trsp = buf;
1964	struct sli4_fcp_128byte_wqe *trsp_128 = buf;
1965
1966	memset(buf, 0, sli4->wqe_size);
1967
1968	if (params->flags & SLI4_IO_AUTO_GOOD_RESPONSE) {
1969		trsp->class_ag_byte |= SLI4_TRSP_WQE_AG;
1970	} else {
1971		struct sli4_sge	*sge = sgl->virt;
1972		struct sli4_bde *bptr;
1973
1974		if (sli4->params.sgl_pre_registered || port_owned)
1975			trsp->qosd_xbl_hlm_dbde_wqes |= SLI4_TRSP_WQE_DBDE;
1976		else
1977			trsp->qosd_xbl_hlm_dbde_wqes |= SLI4_TRSP_WQE_XBL;
1978		bptr = &trsp->bde;
1979
1980		bptr->bde_type_buflen =
1981			cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
1982				     (le32_to_cpu(sge[0].buffer_length) &
1983				      SLI4_BDE_LEN_MASK));
1984		bptr->u.data.low  = sge[0].buffer_address_low;
1985		bptr->u.data.high = sge[0].buffer_address_high;
1986
1987		trsp->fcp_response_length = cpu_to_le32(params->xmit_len);
1988	}
1989
1990	if (params->flags & SLI4_IO_CONTINUATION)
1991		trsp->eat_xc_ccpe |= SLI4_TRSP_WQE_XC;
1992
1993	trsp->xri_tag = cpu_to_le16(params->xri);
1994	trsp->rpi = cpu_to_le16(params->rpi);
1995
1996	trsp->command = SLI4_WQE_FCP_TRSP64;
1997	trsp->class_ag_byte |= SLI4_GENERIC_CLASS_CLASS_3;
1998
1999	trsp->remote_xid = cpu_to_le16(params->ox_id);
2000	trsp->request_tag = cpu_to_le16(params->tag);
2001	if (params->flags & SLI4_IO_DNRX)
2002		trsp->ct_dnrx_byte |= SLI4_TRSP_WQE_DNRX;
2003	else
2004		trsp->ct_dnrx_byte &= ~SLI4_TRSP_WQE_DNRX;
2005
2006	trsp->lloc1_appid |= 0x1;
2007	trsp->cq_id = cpu_to_le16(cq_id);
2008	trsp->cmd_type_byte = SLI4_CMD_FCP_TRSP64_WQE;
2009
2010	/* The upper 7 bits of csctl is the priority */
2011	if (params->cs_ctl & SLI4_MASK_CCP) {
2012		trsp->eat_xc_ccpe |= SLI4_TRSP_WQE_CCPE;
2013		trsp->ccp = (params->cs_ctl & SLI4_MASK_CCP);
2014	}
2015
2016	if (params->app_id && sli4->wqe_size == SLI4_WQE_EXT_BYTES &&
2017	    !(trsp->eat_xc_ccpe & SLI4_TRSP_WQE_EAT)) {
2018		trsp->lloc1_appid |= SLI4_TRSP_WQE_APPID;
2019		trsp->qosd_xbl_hlm_dbde_wqes |= SLI4_TRSP_WQE_WQES;
2020		trsp_128->dw[31] = params->app_id;
2021	}
2022	return 0;
2023}
2024
2025int
2026sli_fcp_tsend64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl,
2027		    u32 first_data_sge, u16 cq_id, u8 dif, u8 bs,
2028		    struct sli_fcp_tgt_params *params)
2029{
2030	struct sli4_fcp_tsend64_wqe *tsend = buf;
2031	struct sli4_fcp_128byte_wqe *tsend_128 = buf;
2032	struct sli4_sge *sge = NULL;
2033	struct sli4_bde *bptr;
2034
2035	memset(buf, 0, sli4->wqe_size);
2036
2037	if (!sgl || !sgl->virt) {
2038		efc_log_err(sli4, "bad parameter sgl=%p virt=%p\n",
2039			    sgl, sgl ? sgl->virt : NULL);
2040		return -EIO;
2041	}
2042	sge = sgl->virt;
2043
2044	bptr = &tsend->bde;
2045	if (sli4->params.sgl_pre_registered) {
2046		tsend->ll_qd_xbl_hlm_iod_dbde &= ~SLI4_TSEND_WQE_XBL;
2047
2048		tsend->ll_qd_xbl_hlm_iod_dbde |= SLI4_TSEND_WQE_DBDE;
2049
2050		bptr->bde_type_buflen =
2051			cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
2052				   (le32_to_cpu(sge[2].buffer_length) &
2053				    SLI4_BDE_LEN_MASK));
2054
2055		/* TSEND64_WQE specifies first two SGE are skipped (3rd is
2056		 * valid)
2057		 */
2058		bptr->u.data.low  = sge[2].buffer_address_low;
2059		bptr->u.data.high = sge[2].buffer_address_high;
2060	} else {
2061		tsend->ll_qd_xbl_hlm_iod_dbde |= SLI4_TSEND_WQE_XBL;
2062
2063		/* if data is a single physical address, use a BDE */
2064		if (!dif &&
2065		    params->xmit_len <= le32_to_cpu(sge[2].buffer_length)) {
2066			tsend->ll_qd_xbl_hlm_iod_dbde |= SLI4_TSEND_WQE_DBDE;
2067
2068			bptr->bde_type_buflen =
2069			    cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
2070					(le32_to_cpu(sge[2].buffer_length) &
2071					SLI4_BDE_LEN_MASK));
2072			/*
2073			 * TSEND64_WQE specifies first two SGE are skipped
2074			 * (i.e. 3rd is valid)
2075			 */
2076			bptr->u.data.low =
2077				sge[2].buffer_address_low;
2078			bptr->u.data.high =
2079				sge[2].buffer_address_high;
2080		} else {
2081			bptr->bde_type_buflen =
2082				cpu_to_le32((SLI4_BDE_TYPE_VAL(BLP)) |
2083					    (sgl->size &
2084					     SLI4_BDE_LEN_MASK));
2085			bptr->u.blp.low =
2086				cpu_to_le32(lower_32_bits(sgl->phys));
2087			bptr->u.blp.high =
2088				cpu_to_le32(upper_32_bits(sgl->phys));
2089		}
2090	}
2091
2092	tsend->relative_offset = cpu_to_le32(params->offset);
2093
2094	if (params->flags & SLI4_IO_CONTINUATION)
2095		tsend->dw10byte2 |= SLI4_TSEND_XC;
2096
2097	tsend->xri_tag = cpu_to_le16(params->xri);
2098
2099	tsend->rpi = cpu_to_le16(params->rpi);
2100	/* WQE uses relative offset */
2101	tsend->class_pu_ar_byte |= 1 << SLI4_TSEND_WQE_PU_SHFT;
2102
2103	if (params->flags & SLI4_IO_AUTO_GOOD_RESPONSE)
2104		tsend->class_pu_ar_byte |= SLI4_TSEND_WQE_AR;
2105
2106	tsend->command = SLI4_WQE_FCP_TSEND64;
2107	tsend->class_pu_ar_byte |= SLI4_GENERIC_CLASS_CLASS_3;
2108	tsend->ct_byte |= SLI4_GENERIC_CONTEXT_RPI << SLI4_TSEND_CT_SHFT;
2109	tsend->ct_byte |= dif;
2110	tsend->ct_byte |= bs << SLI4_TSEND_BS_SHFT;
2111
2112	tsend->remote_xid = cpu_to_le16(params->ox_id);
2113
2114	tsend->request_tag = cpu_to_le16(params->tag);
2115
2116	tsend->ll_qd_xbl_hlm_iod_dbde |= SLI4_TSEND_LEN_LOC_BIT2;
2117
2118	tsend->cq_id = cpu_to_le16(cq_id);
2119
2120	tsend->cmd_type_byte |= SLI4_CMD_FCP_TSEND64_WQE;
2121
2122	tsend->fcp_data_transmit_length = cpu_to_le32(params->xmit_len);
2123
2124	if (sli4->params.perf_hint) {
2125		bptr = &tsend->first_data_bde;
2126		bptr->bde_type_buflen =
2127			cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
2128			    (le32_to_cpu(sge[first_data_sge].buffer_length) &
2129			     SLI4_BDE_LEN_MASK));
2130		bptr->u.data.low =
2131			sge[first_data_sge].buffer_address_low;
2132		bptr->u.data.high =
2133			sge[first_data_sge].buffer_address_high;
2134	}
2135
2136	/* The upper 7 bits of csctl is the priority */
2137	if (params->cs_ctl & SLI4_MASK_CCP) {
2138		tsend->dw10byte2 |= SLI4_TSEND_CCPE;
2139		tsend->ccp = (params->cs_ctl & SLI4_MASK_CCP);
2140	}
2141
2142	if (params->app_id && sli4->wqe_size == SLI4_WQE_EXT_BYTES &&
2143	    !(tsend->dw10byte2 & SLI4_TSEND_EAT)) {
2144		tsend->dw10byte0 |= SLI4_TSEND_APPID_VALID;
2145		tsend->ll_qd_xbl_hlm_iod_dbde |= SLI4_TSEND_WQES;
2146		tsend_128->dw[31] = params->app_id;
2147	}
2148	return 0;
2149}
2150
2151int
2152sli_gen_request64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl,
2153		      struct sli_ct_params *params)
2154{
2155	struct sli4_gen_request64_wqe *gen = buf;
2156	struct sli4_sge *sge = NULL;
2157	struct sli4_bde *bptr;
2158
2159	memset(buf, 0, sli4->wqe_size);
2160
2161	if (!sgl || !sgl->virt) {
2162		efc_log_err(sli4, "bad parameter sgl=%p virt=%p\n",
2163			    sgl, sgl ? sgl->virt : NULL);
2164		return -EIO;
2165	}
2166	sge = sgl->virt;
2167	bptr = &gen->bde;
2168
2169	if (sli4->params.sgl_pre_registered) {
2170		gen->dw10flags1 &= ~SLI4_GEN_REQ64_WQE_XBL;
2171
2172		gen->dw10flags1 |= SLI4_GEN_REQ64_WQE_DBDE;
2173		bptr->bde_type_buflen =
2174			cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
2175				    (params->xmit_len & SLI4_BDE_LEN_MASK));
2176
2177		bptr->u.data.low  = sge[0].buffer_address_low;
2178		bptr->u.data.high = sge[0].buffer_address_high;
2179	} else {
2180		gen->dw10flags1 |= SLI4_GEN_REQ64_WQE_XBL;
2181
2182		bptr->bde_type_buflen =
2183			cpu_to_le32((SLI4_BDE_TYPE_VAL(BLP)) |
2184				    ((2 * sizeof(struct sli4_sge)) &
2185				     SLI4_BDE_LEN_MASK));
2186
2187		bptr->u.blp.low =
2188			cpu_to_le32(lower_32_bits(sgl->phys));
2189		bptr->u.blp.high =
2190			cpu_to_le32(upper_32_bits(sgl->phys));
2191	}
2192
2193	gen->request_payload_length = cpu_to_le32(params->xmit_len);
2194	gen->max_response_payload_length = cpu_to_le32(params->rsp_len);
2195
2196	gen->df_ctl = params->df_ctl;
2197	gen->type = params->type;
2198	gen->r_ctl = params->r_ctl;
2199
2200	gen->xri_tag = cpu_to_le16(params->xri);
2201
2202	gen->ct_byte = SLI4_GENERIC_CONTEXT_RPI << SLI4_GEN_REQ64_CT_SHFT;
2203	gen->context_tag = cpu_to_le16(params->rpi);
2204
2205	gen->class_byte = SLI4_GENERIC_CLASS_CLASS_3;
2206
2207	gen->command = SLI4_WQE_GEN_REQUEST64;
2208
2209	gen->timer = params->timeout;
2210
2211	gen->request_tag = cpu_to_le16(params->tag);
2212
2213	gen->dw10flags1 |= SLI4_GEN_REQ64_WQE_IOD;
2214
2215	gen->dw10flags0 |= SLI4_GEN_REQ64_WQE_QOSD;
2216
2217	gen->cmd_type_byte = SLI4_CMD_GEN_REQUEST64_WQE;
2218
2219	gen->cq_id = cpu_to_le16(SLI4_CQ_DEFAULT);
2220
2221	return 0;
2222}
2223
2224int
2225sli_send_frame_wqe(struct sli4 *sli, void *buf, u8 sof, u8 eof, u32 *hdr,
2226		   struct efc_dma *payload, u32 req_len, u8 timeout, u16 xri,
2227		   u16 req_tag)
2228{
2229	struct sli4_send_frame_wqe *sf = buf;
2230
2231	memset(buf, 0, sli->wqe_size);
2232
2233	sf->dw10flags1 |= SLI4_SF_WQE_DBDE;
2234	sf->bde.bde_type_buflen = cpu_to_le32(req_len &
2235					      SLI4_BDE_LEN_MASK);
2236	sf->bde.u.data.low = cpu_to_le32(lower_32_bits(payload->phys));
2237	sf->bde.u.data.high = cpu_to_le32(upper_32_bits(payload->phys));
2238
2239	/* Copy FC header */
2240	sf->fc_header_0_1[0] = cpu_to_le32(hdr[0]);
2241	sf->fc_header_0_1[1] = cpu_to_le32(hdr[1]);
2242	sf->fc_header_2_5[0] = cpu_to_le32(hdr[2]);
2243	sf->fc_header_2_5[1] = cpu_to_le32(hdr[3]);
2244	sf->fc_header_2_5[2] = cpu_to_le32(hdr[4]);
2245	sf->fc_header_2_5[3] = cpu_to_le32(hdr[5]);
2246
2247	sf->frame_length = cpu_to_le32(req_len);
2248
2249	sf->xri_tag = cpu_to_le16(xri);
2250	sf->dw7flags0 &= ~SLI4_SF_PU;
2251	sf->context_tag = 0;
2252
2253	sf->ct_byte &= ~SLI4_SF_CT;
2254	sf->command = SLI4_WQE_SEND_FRAME;
2255	sf->dw7flags0 |= SLI4_GENERIC_CLASS_CLASS_3;
2256	sf->timer = timeout;
2257
2258	sf->request_tag = cpu_to_le16(req_tag);
2259	sf->eof = eof;
2260	sf->sof = sof;
2261
2262	sf->dw10flags1 &= ~SLI4_SF_QOSD;
2263	sf->dw10flags0 |= SLI4_SF_LEN_LOC_BIT1;
2264	sf->dw10flags2 &= ~SLI4_SF_XC;
2265
2266	sf->dw10flags1 |= SLI4_SF_XBL;
2267
2268	sf->cmd_type_byte |= SLI4_CMD_SEND_FRAME_WQE;
2269	sf->cq_id = cpu_to_le16(0xffff);
2270
2271	return 0;
2272}
2273
2274int
2275sli_xmit_bls_rsp64_wqe(struct sli4 *sli, void *buf,
2276		       struct sli_bls_payload *payload,
2277		       struct sli_bls_params *params)
2278{
2279	struct sli4_xmit_bls_rsp_wqe *bls = buf;
2280	u32 dw_ridflags = 0;
2281
2282	/*
2283	 * Callers can either specify RPI or S_ID, but not both
2284	 */
2285	if (params->rpi_registered && params->s_id != U32_MAX) {
2286		efc_log_info(sli, "S_ID specified for attached remote node %d\n",
2287			     params->rpi);
2288		return -EIO;
2289	}
2290
2291	memset(buf, 0, sli->wqe_size);
2292
2293	if (payload->type == SLI4_SLI_BLS_ACC) {
2294		bls->payload_word0 =
2295			cpu_to_le32((payload->u.acc.seq_id_last << 16) |
2296				    (payload->u.acc.seq_id_validity << 24));
2297		bls->high_seq_cnt = payload->u.acc.high_seq_cnt;
2298		bls->low_seq_cnt = payload->u.acc.low_seq_cnt;
2299	} else if (payload->type == SLI4_SLI_BLS_RJT) {
2300		bls->payload_word0 =
2301				cpu_to_le32(*((u32 *)&payload->u.rjt));
2302		dw_ridflags |= SLI4_BLS_RSP_WQE_AR;
2303	} else {
2304		efc_log_info(sli, "bad BLS type %#x\n", payload->type);
2305		return -EIO;
2306	}
2307
2308	bls->ox_id = payload->ox_id;
2309	bls->rx_id = payload->rx_id;
2310
2311	if (params->rpi_registered) {
2312		bls->dw8flags0 |=
2313		SLI4_GENERIC_CONTEXT_RPI << SLI4_BLS_RSP_WQE_CT_SHFT;
2314		bls->context_tag = cpu_to_le16(params->rpi);
2315	} else {
2316		bls->dw8flags0 |=
2317		SLI4_GENERIC_CONTEXT_VPI << SLI4_BLS_RSP_WQE_CT_SHFT;
2318		bls->context_tag = cpu_to_le16(params->vpi);
2319
2320		bls->local_n_port_id_dword |=
2321			cpu_to_le32(params->s_id & 0x00ffffff);
2322
2323		dw_ridflags = (dw_ridflags & ~SLI4_BLS_RSP_RID) |
2324			       (params->d_id & SLI4_BLS_RSP_RID);
2325
2326		bls->temporary_rpi = cpu_to_le16(params->rpi);
2327	}
2328
2329	bls->xri_tag = cpu_to_le16(params->xri);
2330
2331	bls->dw8flags1 |= SLI4_GENERIC_CLASS_CLASS_3;
2332
2333	bls->command = SLI4_WQE_XMIT_BLS_RSP;
2334
2335	bls->request_tag = cpu_to_le16(params->tag);
2336
2337	bls->dw11flags1 |= SLI4_BLS_RSP_WQE_QOSD;
2338
2339	bls->remote_id_dword = cpu_to_le32(dw_ridflags);
2340	bls->cq_id = cpu_to_le16(SLI4_CQ_DEFAULT);
2341
2342	bls->dw12flags0 |= SLI4_CMD_XMIT_BLS_RSP64_WQE;
2343
2344	return 0;
2345}
2346
2347int
2348sli_xmit_els_rsp64_wqe(struct sli4 *sli, void *buf, struct efc_dma *rsp,
2349		       struct sli_els_params *params)
2350{
2351	struct sli4_xmit_els_rsp64_wqe *els = buf;
2352
2353	memset(buf, 0, sli->wqe_size);
2354
2355	if (sli->params.sgl_pre_registered)
2356		els->flags2 |= SLI4_ELS_DBDE;
2357	else
2358		els->flags2 |= SLI4_ELS_XBL;
2359
2360	els->els_response_payload.bde_type_buflen =
2361		cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
2362			    (params->rsp_len & SLI4_BDE_LEN_MASK));
2363	els->els_response_payload.u.data.low =
2364		cpu_to_le32(lower_32_bits(rsp->phys));
2365	els->els_response_payload.u.data.high =
2366		cpu_to_le32(upper_32_bits(rsp->phys));
2367
2368	els->els_response_payload_length = cpu_to_le32(params->rsp_len);
2369
2370	els->xri_tag = cpu_to_le16(params->xri);
2371
2372	els->class_byte |= SLI4_GENERIC_CLASS_CLASS_3;
2373
2374	els->command = SLI4_WQE_ELS_RSP64;
2375
2376	els->request_tag = cpu_to_le16(params->tag);
2377
2378	els->ox_id = cpu_to_le16(params->ox_id);
2379
2380	els->flags2 |= SLI4_ELS_QOSD;
2381
2382	els->cmd_type_wqec = SLI4_ELS_REQUEST64_CMD_GEN;
2383
2384	els->cq_id = cpu_to_le16(SLI4_CQ_DEFAULT);
2385
2386	if (params->rpi_registered) {
2387		els->ct_byte |=
2388			SLI4_GENERIC_CONTEXT_RPI << SLI4_ELS_CT_OFFSET;
2389		els->context_tag = cpu_to_le16(params->rpi);
2390		return 0;
2391	}
2392
2393	els->ct_byte |= SLI4_GENERIC_CONTEXT_VPI << SLI4_ELS_CT_OFFSET;
2394	els->context_tag = cpu_to_le16(params->vpi);
2395	els->rid_dw = cpu_to_le32(params->d_id & SLI4_ELS_RID);
2396	els->temporary_rpi = cpu_to_le16(params->rpi);
2397	if (params->s_id != U32_MAX) {
2398		els->sid_dw |=
2399		      cpu_to_le32(SLI4_ELS_SP | (params->s_id & SLI4_ELS_SID));
2400	}
2401
2402	return 0;
2403}
2404
2405int
2406sli_xmit_sequence64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *payload,
2407			struct sli_ct_params *params)
2408{
2409	struct sli4_xmit_sequence64_wqe *xmit = buf;
2410
2411	memset(buf, 0, sli4->wqe_size);
2412
2413	if (!payload || !payload->virt) {
2414		efc_log_err(sli4, "bad parameter sgl=%p virt=%p\n",
2415			    payload, payload ? payload->virt : NULL);
2416		return -EIO;
2417	}
2418
2419	if (sli4->params.sgl_pre_registered)
2420		xmit->dw10w0 |= cpu_to_le16(SLI4_SEQ_WQE_DBDE);
2421	else
2422		xmit->dw10w0 |= cpu_to_le16(SLI4_SEQ_WQE_XBL);
2423
2424	xmit->bde.bde_type_buflen =
2425		cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
2426			(params->rsp_len & SLI4_BDE_LEN_MASK));
2427	xmit->bde.u.data.low  =
2428			cpu_to_le32(lower_32_bits(payload->phys));
2429	xmit->bde.u.data.high =
2430			cpu_to_le32(upper_32_bits(payload->phys));
2431	xmit->sequence_payload_len = cpu_to_le32(params->rsp_len);
2432
2433	xmit->remote_n_port_id_dword |= cpu_to_le32(params->d_id & 0x00ffffff);
2434
2435	xmit->relative_offset = 0;
2436
2437	/* sequence initiative - this matches what is seen from
2438	 * FC switches in response to FCGS commands
2439	 */
2440	xmit->dw5flags0 &= (~SLI4_SEQ_WQE_SI);
2441	xmit->dw5flags0 &= (~SLI4_SEQ_WQE_FT);/* force transmit */
2442	xmit->dw5flags0 &= (~SLI4_SEQ_WQE_XO);/* exchange responder */
2443	xmit->dw5flags0 |= SLI4_SEQ_WQE_LS;/* last in seqence */
2444	xmit->df_ctl = params->df_ctl;
2445	xmit->type = params->type;
2446	xmit->r_ctl = params->r_ctl;
2447
2448	xmit->xri_tag = cpu_to_le16(params->xri);
2449	xmit->context_tag = cpu_to_le16(params->rpi);
2450
2451	xmit->dw7flags0 &= ~SLI4_SEQ_WQE_DIF;
2452	xmit->dw7flags0 |=
2453		SLI4_GENERIC_CONTEXT_RPI << SLI4_SEQ_WQE_CT_SHIFT;
2454	xmit->dw7flags0 &= ~SLI4_SEQ_WQE_BS;
2455
2456	xmit->command = SLI4_WQE_XMIT_SEQUENCE64;
2457	xmit->dw7flags1 |= SLI4_GENERIC_CLASS_CLASS_3;
2458	xmit->dw7flags1 &= ~SLI4_SEQ_WQE_PU;
2459	xmit->timer = params->timeout;
2460
2461	xmit->abort_tag = 0;
2462	xmit->request_tag = cpu_to_le16(params->tag);
2463	xmit->remote_xid = cpu_to_le16(params->ox_id);
2464
2465	xmit->dw10w0 |=
2466	cpu_to_le16(SLI4_ELS_REQUEST64_DIR_READ << SLI4_SEQ_WQE_IOD_SHIFT);
2467
2468	xmit->cmd_type_wqec_byte |= SLI4_CMD_XMIT_SEQUENCE64_WQE;
2469
2470	xmit->dw10w0 |= cpu_to_le16(2 << SLI4_SEQ_WQE_LEN_LOC_SHIFT);
2471
2472	xmit->cq_id = cpu_to_le16(0xFFFF);
2473
2474	return 0;
2475}
2476
2477int
2478sli_requeue_xri_wqe(struct sli4 *sli4, void *buf, u16 xri, u16 tag, u16 cq_id)
2479{
2480	struct sli4_requeue_xri_wqe *requeue = buf;
2481
2482	memset(buf, 0, sli4->wqe_size);
2483
2484	requeue->command = SLI4_WQE_REQUEUE_XRI;
2485	requeue->xri_tag = cpu_to_le16(xri);
2486	requeue->request_tag = cpu_to_le16(tag);
2487	requeue->flags2 |= cpu_to_le16(SLI4_REQU_XRI_WQE_XC);
2488	requeue->flags1 |= cpu_to_le16(SLI4_REQU_XRI_WQE_QOSD);
2489	requeue->cq_id = cpu_to_le16(cq_id);
2490	requeue->cmd_type_wqec_byte = SLI4_CMD_REQUEUE_XRI_WQE;
2491	return 0;
2492}
2493
2494int
2495sli_fc_process_link_attention(struct sli4 *sli4, void *acqe)
2496{
2497	struct sli4_link_attention *link_attn = acqe;
2498	struct sli4_link_event event = { 0 };
2499
2500	efc_log_info(sli4, "link=%d attn_type=%#x top=%#x speed=%#x pfault=%#x\n",
2501		     link_attn->link_number, link_attn->attn_type,
2502		     link_attn->topology, link_attn->port_speed,
2503		     link_attn->port_fault);
2504	efc_log_info(sli4, "shared_lnk_status=%#x logl_lnk_speed=%#x evttag=%#x\n",
2505		     link_attn->shared_link_status,
2506		     le16_to_cpu(link_attn->logical_link_speed),
2507		     le32_to_cpu(link_attn->event_tag));
2508
2509	if (!sli4->link)
2510		return -EIO;
2511
2512	event.medium   = SLI4_LINK_MEDIUM_FC;
2513
2514	switch (link_attn->attn_type) {
2515	case SLI4_LNK_ATTN_TYPE_LINK_UP:
2516		event.status = SLI4_LINK_STATUS_UP;
2517		break;
2518	case SLI4_LNK_ATTN_TYPE_LINK_DOWN:
2519		event.status = SLI4_LINK_STATUS_DOWN;
2520		break;
2521	case SLI4_LNK_ATTN_TYPE_NO_HARD_ALPA:
2522		efc_log_info(sli4, "attn_type: no hard alpa\n");
2523		event.status = SLI4_LINK_STATUS_NO_ALPA;
2524		break;
2525	default:
2526		efc_log_info(sli4, "attn_type: unknown\n");
2527		break;
2528	}
2529
2530	switch (link_attn->event_type) {
2531	case SLI4_EVENT_LINK_ATTENTION:
2532		break;
2533	case SLI4_EVENT_SHARED_LINK_ATTENTION:
2534		efc_log_info(sli4, "event_type: FC shared link event\n");
2535		break;
2536	default:
2537		efc_log_info(sli4, "event_type: unknown\n");
2538		break;
2539	}
2540
2541	switch (link_attn->topology) {
2542	case SLI4_LNK_ATTN_P2P:
2543		event.topology = SLI4_LINK_TOPO_NON_FC_AL;
2544		break;
2545	case SLI4_LNK_ATTN_FC_AL:
2546		event.topology = SLI4_LINK_TOPO_FC_AL;
2547		break;
2548	case SLI4_LNK_ATTN_INTERNAL_LOOPBACK:
2549		efc_log_info(sli4, "topology Internal loopback\n");
2550		event.topology = SLI4_LINK_TOPO_LOOPBACK_INTERNAL;
2551		break;
2552	case SLI4_LNK_ATTN_SERDES_LOOPBACK:
2553		efc_log_info(sli4, "topology serdes loopback\n");
2554		event.topology = SLI4_LINK_TOPO_LOOPBACK_EXTERNAL;
2555		break;
2556	default:
2557		efc_log_info(sli4, "topology: unknown\n");
2558		break;
2559	}
2560
2561	event.speed = link_attn->port_speed * 1000;
2562
2563	sli4->link(sli4->link_arg, (void *)&event);
2564
2565	return 0;
2566}
2567
2568int
2569sli_fc_cqe_parse(struct sli4 *sli4, struct sli4_queue *cq,
2570		 u8 *cqe, enum sli4_qentry *etype, u16 *r_id)
2571{
2572	u8 code = cqe[SLI4_CQE_CODE_OFFSET];
2573	int rc;
2574
2575	switch (code) {
2576	case SLI4_CQE_CODE_WORK_REQUEST_COMPLETION:
2577	{
2578		struct sli4_fc_wcqe *wcqe = (void *)cqe;
2579
2580		*etype = SLI4_QENTRY_WQ;
2581		*r_id = le16_to_cpu(wcqe->request_tag);
2582		rc = wcqe->status;
2583
2584		/* Flag errors except for FCP_RSP_FAILURE */
2585		if (rc && rc != SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE) {
2586			efc_log_info(sli4, "WCQE: status=%#x hw_status=%#x tag=%#x\n",
2587				     wcqe->status, wcqe->hw_status,
2588				     le16_to_cpu(wcqe->request_tag));
2589			efc_log_info(sli4, "w1=%#x w2=%#x xb=%d\n",
2590				     le32_to_cpu(wcqe->wqe_specific_1),
2591				     le32_to_cpu(wcqe->wqe_specific_2),
2592				     (wcqe->flags & SLI4_WCQE_XB));
2593			efc_log_info(sli4, "      %08X %08X %08X %08X\n",
2594				     ((u32 *)cqe)[0], ((u32 *)cqe)[1],
2595				     ((u32 *)cqe)[2], ((u32 *)cqe)[3]);
2596		}
2597
2598		break;
2599	}
2600	case SLI4_CQE_CODE_RQ_ASYNC:
2601	{
2602		struct sli4_fc_async_rcqe *rcqe = (void *)cqe;
2603
2604		*etype = SLI4_QENTRY_RQ;
2605		*r_id = le16_to_cpu(rcqe->fcfi_rq_id_word) & SLI4_RACQE_RQ_ID;
2606		rc = rcqe->status;
2607		break;
2608	}
2609	case SLI4_CQE_CODE_RQ_ASYNC_V1:
2610	{
2611		struct sli4_fc_async_rcqe_v1 *rcqe = (void *)cqe;
2612
2613		*etype = SLI4_QENTRY_RQ;
2614		*r_id = le16_to_cpu(rcqe->rq_id);
2615		rc = rcqe->status;
2616		break;
2617	}
2618	case SLI4_CQE_CODE_OPTIMIZED_WRITE_CMD:
2619	{
2620		struct sli4_fc_optimized_write_cmd_cqe *optcqe = (void *)cqe;
2621
2622		*etype = SLI4_QENTRY_OPT_WRITE_CMD;
2623		*r_id = le16_to_cpu(optcqe->rq_id);
2624		rc = optcqe->status;
2625		break;
2626	}
2627	case SLI4_CQE_CODE_OPTIMIZED_WRITE_DATA:
2628	{
2629		struct sli4_fc_optimized_write_data_cqe *dcqe = (void *)cqe;
2630
2631		*etype = SLI4_QENTRY_OPT_WRITE_DATA;
2632		*r_id = le16_to_cpu(dcqe->xri);
2633		rc = dcqe->status;
2634
2635		/* Flag errors */
2636		if (rc != SLI4_FC_WCQE_STATUS_SUCCESS) {
2637			efc_log_info(sli4, "Optimized DATA CQE: status=%#x\n",
2638				     dcqe->status);
2639			efc_log_info(sli4, "hstat=%#x xri=%#x dpl=%#x w3=%#x xb=%d\n",
2640				     dcqe->hw_status, le16_to_cpu(dcqe->xri),
2641				     le32_to_cpu(dcqe->total_data_placed),
2642				     ((u32 *)cqe)[3],
2643				     (dcqe->flags & SLI4_OCQE_XB));
2644		}
2645		break;
2646	}
2647	case SLI4_CQE_CODE_RQ_COALESCING:
2648	{
2649		struct sli4_fc_coalescing_rcqe *rcqe = (void *)cqe;
2650
2651		*etype = SLI4_QENTRY_RQ;
2652		*r_id = le16_to_cpu(rcqe->rq_id);
2653		rc = rcqe->status;
2654		break;
2655	}
2656	case SLI4_CQE_CODE_XRI_ABORTED:
2657	{
2658		struct sli4_fc_xri_aborted_cqe *xa = (void *)cqe;
2659
2660		*etype = SLI4_QENTRY_XABT;
2661		*r_id = le16_to_cpu(xa->xri);
2662		rc = 0;
2663		break;
2664	}
2665	case SLI4_CQE_CODE_RELEASE_WQE:
2666	{
2667		struct sli4_fc_wqec *wqec = (void *)cqe;
2668
2669		*etype = SLI4_QENTRY_WQ_RELEASE;
2670		*r_id = le16_to_cpu(wqec->wq_id);
2671		rc = 0;
2672		break;
2673	}
2674	default:
2675		efc_log_info(sli4, "CQE completion code %d not handled\n",
2676			     code);
2677		*etype = SLI4_QENTRY_MAX;
2678		*r_id = U16_MAX;
2679		rc = -EINVAL;
2680	}
2681
2682	return rc;
2683}
2684
2685u32
2686sli_fc_response_length(struct sli4 *sli4, u8 *cqe)
2687{
2688	struct sli4_fc_wcqe *wcqe = (void *)cqe;
2689
2690	return le32_to_cpu(wcqe->wqe_specific_1);
2691}
2692
2693u32
2694sli_fc_io_length(struct sli4 *sli4, u8 *cqe)
2695{
2696	struct sli4_fc_wcqe *wcqe = (void *)cqe;
2697
2698	return le32_to_cpu(wcqe->wqe_specific_1);
2699}
2700
2701int
2702sli_fc_els_did(struct sli4 *sli4, u8 *cqe, u32 *d_id)
2703{
2704	struct sli4_fc_wcqe *wcqe = (void *)cqe;
2705
2706	*d_id = 0;
2707
2708	if (wcqe->status)
2709		return -EIO;
2710	*d_id = le32_to_cpu(wcqe->wqe_specific_2) & 0x00ffffff;
2711	return 0;
2712}
2713
2714u32
2715sli_fc_ext_status(struct sli4 *sli4, u8 *cqe)
2716{
2717	struct sli4_fc_wcqe *wcqe = (void *)cqe;
2718	u32	mask;
2719
2720	switch (wcqe->status) {
2721	case SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE:
2722		mask = U32_MAX;
2723		break;
2724	case SLI4_FC_WCQE_STATUS_LOCAL_REJECT:
2725	case SLI4_FC_WCQE_STATUS_CMD_REJECT:
2726		mask = 0xff;
2727		break;
2728	case SLI4_FC_WCQE_STATUS_NPORT_RJT:
2729	case SLI4_FC_WCQE_STATUS_FABRIC_RJT:
2730	case SLI4_FC_WCQE_STATUS_NPORT_BSY:
2731	case SLI4_FC_WCQE_STATUS_FABRIC_BSY:
2732	case SLI4_FC_WCQE_STATUS_LS_RJT:
2733		mask = U32_MAX;
2734		break;
2735	case SLI4_FC_WCQE_STATUS_DI_ERROR:
2736		mask = U32_MAX;
2737		break;
2738	default:
2739		mask = 0;
2740	}
2741
2742	return le32_to_cpu(wcqe->wqe_specific_2) & mask;
2743}
2744
2745int
2746sli_fc_rqe_rqid_and_index(struct sli4 *sli4, u8 *cqe, u16 *rq_id, u32 *index)
2747{
2748	int rc = -EIO;
2749	u8 code = 0;
2750	u16 rq_element_index;
2751
2752	*rq_id = 0;
2753	*index = U32_MAX;
2754
2755	code = cqe[SLI4_CQE_CODE_OFFSET];
2756
2757	/* Retrieve the RQ index from the completion */
2758	if (code == SLI4_CQE_CODE_RQ_ASYNC) {
2759		struct sli4_fc_async_rcqe *rcqe = (void *)cqe;
2760
2761		*rq_id = le16_to_cpu(rcqe->fcfi_rq_id_word) & SLI4_RACQE_RQ_ID;
2762		rq_element_index =
2763		le16_to_cpu(rcqe->rq_elmt_indx_word) & SLI4_RACQE_RQ_EL_INDX;
2764		*index = rq_element_index;
2765		if (rcqe->status == SLI4_FC_ASYNC_RQ_SUCCESS) {
2766			rc = 0;
2767		} else {
2768			rc = rcqe->status;
2769			efc_log_info(sli4, "status=%02x (%s) rq_id=%d\n",
2770				     rcqe->status,
2771				     sli_fc_get_status_string(rcqe->status),
2772				     le16_to_cpu(rcqe->fcfi_rq_id_word) &
2773				     SLI4_RACQE_RQ_ID);
2774
2775			efc_log_info(sli4, "pdpl=%x sof=%02x eof=%02x hdpl=%x\n",
2776				     le16_to_cpu(rcqe->data_placement_length),
2777				     rcqe->sof_byte, rcqe->eof_byte,
2778				     rcqe->hdpl_byte & SLI4_RACQE_HDPL);
2779		}
2780	} else if (code == SLI4_CQE_CODE_RQ_ASYNC_V1) {
2781		struct sli4_fc_async_rcqe_v1 *rcqe_v1 = (void *)cqe;
2782
2783		*rq_id = le16_to_cpu(rcqe_v1->rq_id);
2784		rq_element_index =
2785			(le16_to_cpu(rcqe_v1->rq_elmt_indx_word) &
2786			 SLI4_RACQE_RQ_EL_INDX);
2787		*index = rq_element_index;
2788		if (rcqe_v1->status == SLI4_FC_ASYNC_RQ_SUCCESS) {
2789			rc = 0;
2790		} else {
2791			rc = rcqe_v1->status;
2792			efc_log_info(sli4, "status=%02x (%s) rq_id=%d, index=%x\n",
2793				     rcqe_v1->status,
2794				     sli_fc_get_status_string(rcqe_v1->status),
2795				     le16_to_cpu(rcqe_v1->rq_id), rq_element_index);
2796
2797			efc_log_info(sli4, "pdpl=%x sof=%02x eof=%02x hdpl=%x\n",
2798				     le16_to_cpu(rcqe_v1->data_placement_length),
2799			rcqe_v1->sof_byte, rcqe_v1->eof_byte,
2800			rcqe_v1->hdpl_byte & SLI4_RACQE_HDPL);
2801		}
2802	} else if (code == SLI4_CQE_CODE_OPTIMIZED_WRITE_CMD) {
2803		struct sli4_fc_optimized_write_cmd_cqe *optcqe = (void *)cqe;
2804
2805		*rq_id = le16_to_cpu(optcqe->rq_id);
2806		*index = le16_to_cpu(optcqe->w1) & SLI4_OCQE_RQ_EL_INDX;
2807		if (optcqe->status == SLI4_FC_ASYNC_RQ_SUCCESS) {
2808			rc = 0;
2809		} else {
2810			rc = optcqe->status;
2811			efc_log_info(sli4, "stat=%02x (%s) rqid=%d, idx=%x pdpl=%x\n",
2812				     optcqe->status,
2813				     sli_fc_get_status_string(optcqe->status),
2814				     le16_to_cpu(optcqe->rq_id), *index,
2815				     le16_to_cpu(optcqe->data_placement_length));
2816
2817			efc_log_info(sli4, "hdpl=%x oox=%d agxr=%d xri=0x%x rpi=%x\n",
2818				     (optcqe->hdpl_vld & SLI4_OCQE_HDPL),
2819				     (optcqe->flags1 & SLI4_OCQE_OOX),
2820				     (optcqe->flags1 & SLI4_OCQE_AGXR),
2821				     optcqe->xri, le16_to_cpu(optcqe->rpi));
2822		}
2823	} else if (code == SLI4_CQE_CODE_RQ_COALESCING) {
2824		struct sli4_fc_coalescing_rcqe  *rcqe = (void *)cqe;
2825
2826		rq_element_index = (le16_to_cpu(rcqe->rq_elmt_indx_word) &
2827				    SLI4_RCQE_RQ_EL_INDX);
2828
2829		*rq_id = le16_to_cpu(rcqe->rq_id);
2830		if (rcqe->status == SLI4_FC_COALESCE_RQ_SUCCESS) {
2831			*index = rq_element_index;
2832			rc = 0;
2833		} else {
2834			*index = U32_MAX;
2835			rc = rcqe->status;
2836
2837			efc_log_info(sli4, "stat=%02x (%s) rq_id=%d, idx=%x\n",
2838				     rcqe->status,
2839				     sli_fc_get_status_string(rcqe->status),
2840				     le16_to_cpu(rcqe->rq_id), rq_element_index);
2841			efc_log_info(sli4, "rq_id=%#x sdpl=%x\n",
2842				     le16_to_cpu(rcqe->rq_id),
2843				     le16_to_cpu(rcqe->seq_placement_length));
2844		}
2845	} else {
2846		struct sli4_fc_async_rcqe *rcqe = (void *)cqe;
2847
2848		*index = U32_MAX;
2849		rc = rcqe->status;
2850
2851		efc_log_info(sli4, "status=%02x rq_id=%d, index=%x pdpl=%x\n",
2852			     rcqe->status,
2853			     le16_to_cpu(rcqe->fcfi_rq_id_word) & SLI4_RACQE_RQ_ID,
2854			     (le16_to_cpu(rcqe->rq_elmt_indx_word) & SLI4_RACQE_RQ_EL_INDX),
2855			     le16_to_cpu(rcqe->data_placement_length));
2856		efc_log_info(sli4, "sof=%02x eof=%02x hdpl=%x\n",
2857			     rcqe->sof_byte, rcqe->eof_byte,
2858			     rcqe->hdpl_byte & SLI4_RACQE_HDPL);
2859	}
2860
2861	return rc;
2862}
2863
2864static int
2865sli_bmbx_wait(struct sli4 *sli4, u32 msec)
2866{
2867	u32 val;
2868	unsigned long end;
2869
2870	/* Wait for the bootstrap mailbox to report "ready" */
2871	end = jiffies + msecs_to_jiffies(msec);
2872	do {
2873		val = readl(sli4->reg[0] + SLI4_BMBX_REG);
2874		if (val & SLI4_BMBX_RDY)
2875			return 0;
2876
2877		usleep_range(1000, 2000);
2878	} while (time_before(jiffies, end));
2879
2880	return -EIO;
2881}
2882
2883static int
2884sli_bmbx_write(struct sli4 *sli4)
2885{
2886	u32 val;
2887
2888	/* write buffer location to bootstrap mailbox register */
2889	val = sli_bmbx_write_hi(sli4->bmbx.phys);
2890	writel(val, (sli4->reg[0] + SLI4_BMBX_REG));
2891
2892	if (sli_bmbx_wait(sli4, SLI4_BMBX_DELAY_US)) {
2893		efc_log_crit(sli4, "BMBX WRITE_HI failed\n");
2894		return -EIO;
2895	}
2896	val = sli_bmbx_write_lo(sli4->bmbx.phys);
2897	writel(val, (sli4->reg[0] + SLI4_BMBX_REG));
2898
2899	/* wait for SLI Port to set ready bit */
2900	return sli_bmbx_wait(sli4, SLI4_BMBX_TIMEOUT_MSEC);
2901}
2902
2903int
2904sli_bmbx_command(struct sli4 *sli4)
2905{
2906	void *cqe = (u8 *)sli4->bmbx.virt + SLI4_BMBX_SIZE;
2907
2908	if (sli_fw_error_status(sli4) > 0) {
2909		efc_log_crit(sli4, "Chip is in an error state -Mailbox command rejected");
2910		efc_log_crit(sli4, " status=%#x error1=%#x error2=%#x\n",
2911			     sli_reg_read_status(sli4),
2912			     sli_reg_read_err1(sli4),
2913			     sli_reg_read_err2(sli4));
2914		return -EIO;
2915	}
2916
2917	/* Submit a command to the bootstrap mailbox and check the status */
2918	if (sli_bmbx_write(sli4)) {
2919		efc_log_crit(sli4, "bmbx write fail phys=%pad reg=%#x\n",
2920			     &sli4->bmbx.phys, readl(sli4->reg[0] + SLI4_BMBX_REG));
2921		return -EIO;
2922	}
2923
2924	/* check completion queue entry status */
2925	if (le32_to_cpu(((struct sli4_mcqe *)cqe)->dw3_flags) &
2926	    SLI4_MCQE_VALID) {
2927		return sli_cqe_mq(sli4, cqe);
2928	}
2929	efc_log_crit(sli4, "invalid or wrong type\n");
2930	return -EIO;
2931}
2932
2933int
2934sli_cmd_config_link(struct sli4 *sli4, void *buf)
2935{
2936	struct sli4_cmd_config_link *config_link = buf;
2937
2938	memset(buf, 0, SLI4_BMBX_SIZE);
2939
2940	config_link->hdr.command = SLI4_MBX_CMD_CONFIG_LINK;
2941
2942	/* Port interprets zero in a field as "use default value" */
2943
2944	return 0;
2945}
2946
2947int
2948sli_cmd_down_link(struct sli4 *sli4, void *buf)
2949{
2950	struct sli4_mbox_command_header *hdr = buf;
2951
2952	memset(buf, 0, SLI4_BMBX_SIZE);
2953
2954	hdr->command = SLI4_MBX_CMD_DOWN_LINK;
2955
2956	/* Port interprets zero in a field as "use default value" */
2957
2958	return 0;
2959}
2960
2961int
2962sli_cmd_dump_type4(struct sli4 *sli4, void *buf, u16 wki)
2963{
2964	struct sli4_cmd_dump4 *cmd = buf;
2965
2966	memset(buf, 0, SLI4_BMBX_SIZE);
2967
2968	cmd->hdr.command = SLI4_MBX_CMD_DUMP;
2969	cmd->type_dword = cpu_to_le32(0x4);
2970	cmd->wki_selection = cpu_to_le16(wki);
2971	return 0;
2972}
2973
2974int
2975sli_cmd_common_read_transceiver_data(struct sli4 *sli4, void *buf, u32 page_num,
2976				     struct efc_dma *dma)
2977{
2978	struct sli4_rqst_cmn_read_transceiver_data *req = NULL;
2979	u32 psize;
2980
2981	if (!dma)
2982		psize = SLI4_CFG_PYLD_LENGTH(cmn_read_transceiver_data);
2983	else
2984		psize = dma->size;
2985
2986	req = sli_config_cmd_init(sli4, buf, psize, dma);
2987	if (!req)
2988		return -EIO;
2989
2990	sli_cmd_fill_hdr(&req->hdr, SLI4_CMN_READ_TRANS_DATA,
2991			 SLI4_SUBSYSTEM_COMMON, CMD_V0,
2992			 SLI4_RQST_PYLD_LEN(cmn_read_transceiver_data));
2993
2994	req->page_number = cpu_to_le32(page_num);
2995	req->port = cpu_to_le32(sli4->port_number);
2996
2997	return 0;
2998}
2999
3000int
3001sli_cmd_read_link_stats(struct sli4 *sli4, void *buf, u8 req_ext_counters,
3002			u8 clear_overflow_flags,
3003			u8 clear_all_counters)
3004{
3005	struct sli4_cmd_read_link_stats *cmd = buf;
3006	u32 flags;
3007
3008	memset(buf, 0, SLI4_BMBX_SIZE);
3009
3010	cmd->hdr.command = SLI4_MBX_CMD_READ_LNK_STAT;
3011
3012	flags = 0;
3013	if (req_ext_counters)
3014		flags |= SLI4_READ_LNKSTAT_REC;
3015	if (clear_all_counters)
3016		flags |= SLI4_READ_LNKSTAT_CLRC;
3017	if (clear_overflow_flags)
3018		flags |= SLI4_READ_LNKSTAT_CLOF;
3019
3020	cmd->dw1_flags = cpu_to_le32(flags);
3021	return 0;
3022}
3023
3024int
3025sli_cmd_read_status(struct sli4 *sli4, void *buf, u8 clear_counters)
3026{
3027	struct sli4_cmd_read_status *cmd = buf;
3028	u32 flags = 0;
3029
3030	memset(buf, 0, SLI4_BMBX_SIZE);
3031
3032	cmd->hdr.command = SLI4_MBX_CMD_READ_STATUS;
3033	if (clear_counters)
3034		flags |= SLI4_READSTATUS_CLEAR_COUNTERS;
3035	else
3036		flags &= ~SLI4_READSTATUS_CLEAR_COUNTERS;
3037
3038	cmd->dw1_flags = cpu_to_le32(flags);
3039	return 0;
3040}
3041
3042int
3043sli_cmd_init_link(struct sli4 *sli4, void *buf, u32 speed, u8 reset_alpa)
3044{
3045	struct sli4_cmd_init_link *init_link = buf;
3046	u32 flags = 0;
3047
3048	memset(buf, 0, SLI4_BMBX_SIZE);
3049
3050	init_link->hdr.command = SLI4_MBX_CMD_INIT_LINK;
3051
3052	init_link->sel_reset_al_pa_dword =
3053				cpu_to_le32(reset_alpa);
3054	flags &= ~SLI4_INIT_LINK_F_LOOPBACK;
3055
3056	init_link->link_speed_sel_code = cpu_to_le32(speed);
3057	switch (speed) {
3058	case SLI4_LINK_SPEED_1G:
3059	case SLI4_LINK_SPEED_2G:
3060	case SLI4_LINK_SPEED_4G:
3061	case SLI4_LINK_SPEED_8G:
3062	case SLI4_LINK_SPEED_16G:
3063	case SLI4_LINK_SPEED_32G:
3064	case SLI4_LINK_SPEED_64G:
3065		flags |= SLI4_INIT_LINK_F_FIXED_SPEED;
3066		break;
3067	case SLI4_LINK_SPEED_10G:
3068		efc_log_info(sli4, "unsupported FC speed %d\n", speed);
3069		init_link->flags0 = cpu_to_le32(flags);
3070		return -EIO;
3071	}
3072
3073	switch (sli4->topology) {
3074	case SLI4_READ_CFG_TOPO_FC:
3075		/* Attempt P2P but failover to FC-AL */
3076		flags |= SLI4_INIT_LINK_F_FAIL_OVER;
3077		flags |= SLI4_INIT_LINK_F_P2P_FAIL_OVER;
3078		break;
3079	case SLI4_READ_CFG_TOPO_FC_AL:
3080		flags |= SLI4_INIT_LINK_F_FCAL_ONLY;
3081		if (speed == SLI4_LINK_SPEED_16G ||
3082		    speed == SLI4_LINK_SPEED_32G) {
3083			efc_log_info(sli4, "unsupported FC-AL speed %d\n",
3084				     speed);
3085			init_link->flags0 = cpu_to_le32(flags);
3086			return -EIO;
3087		}
3088		break;
3089	case SLI4_READ_CFG_TOPO_NON_FC_AL:
3090		flags |= SLI4_INIT_LINK_F_P2P_ONLY;
3091		break;
3092	default:
3093
3094		efc_log_info(sli4, "unsupported topology %#x\n", sli4->topology);
3095
3096		init_link->flags0 = cpu_to_le32(flags);
3097		return -EIO;
3098	}
3099
3100	flags &= ~SLI4_INIT_LINK_F_UNFAIR;
3101	flags &= ~SLI4_INIT_LINK_F_NO_LIRP;
3102	flags &= ~SLI4_INIT_LINK_F_LOOP_VALID_CHK;
3103	flags &= ~SLI4_INIT_LINK_F_NO_LISA;
3104	flags &= ~SLI4_INIT_LINK_F_PICK_HI_ALPA;
3105	init_link->flags0 = cpu_to_le32(flags);
3106
3107	return 0;
3108}
3109
3110int
3111sli_cmd_init_vfi(struct sli4 *sli4, void *buf, u16 vfi, u16 fcfi, u16 vpi)
3112{
3113	struct sli4_cmd_init_vfi *init_vfi = buf;
3114	u16 flags = 0;
3115
3116	memset(buf, 0, SLI4_BMBX_SIZE);
3117
3118	init_vfi->hdr.command = SLI4_MBX_CMD_INIT_VFI;
3119	init_vfi->vfi = cpu_to_le16(vfi);
3120	init_vfi->fcfi = cpu_to_le16(fcfi);
3121
3122	/*
3123	 * If the VPI is valid, initialize it at the same time as
3124	 * the VFI
3125	 */
3126	if (vpi != U16_MAX) {
3127		flags |= SLI4_INIT_VFI_FLAG_VP;
3128		init_vfi->flags0_word = cpu_to_le16(flags);
3129		init_vfi->vpi = cpu_to_le16(vpi);
3130	}
3131
3132	return 0;
3133}
3134
3135int
3136sli_cmd_init_vpi(struct sli4 *sli4, void *buf, u16 vpi, u16 vfi)
3137{
3138	struct sli4_cmd_init_vpi *init_vpi = buf;
3139
3140	memset(buf, 0, SLI4_BMBX_SIZE);
3141
3142	init_vpi->hdr.command = SLI4_MBX_CMD_INIT_VPI;
3143	init_vpi->vpi = cpu_to_le16(vpi);
3144	init_vpi->vfi = cpu_to_le16(vfi);
3145
3146	return 0;
3147}
3148
3149int
3150sli_cmd_post_xri(struct sli4 *sli4, void *buf, u16 xri_base, u16 xri_count)
3151{
3152	struct sli4_cmd_post_xri *post_xri = buf;
3153	u16 xri_count_flags = 0;
3154
3155	memset(buf, 0, SLI4_BMBX_SIZE);
3156
3157	post_xri->hdr.command = SLI4_MBX_CMD_POST_XRI;
3158	post_xri->xri_base = cpu_to_le16(xri_base);
3159	xri_count_flags = xri_count & SLI4_POST_XRI_COUNT;
3160	xri_count_flags |= SLI4_POST_XRI_FLAG_ENX;
3161	xri_count_flags |= SLI4_POST_XRI_FLAG_VAL;
3162	post_xri->xri_count_flags = cpu_to_le16(xri_count_flags);
3163
3164	return 0;
3165}
3166
3167int
3168sli_cmd_release_xri(struct sli4 *sli4, void *buf, u8 num_xri)
3169{
3170	struct sli4_cmd_release_xri *release_xri = buf;
3171
3172	memset(buf, 0, SLI4_BMBX_SIZE);
3173
3174	release_xri->hdr.command = SLI4_MBX_CMD_RELEASE_XRI;
3175	release_xri->xri_count_word = cpu_to_le16(num_xri &
3176					SLI4_RELEASE_XRI_COUNT);
3177
3178	return 0;
3179}
3180
3181static int
3182sli_cmd_read_config(struct sli4 *sli4, void *buf)
3183{
3184	struct sli4_cmd_read_config *read_config = buf;
3185
3186	memset(buf, 0, SLI4_BMBX_SIZE);
3187
3188	read_config->hdr.command = SLI4_MBX_CMD_READ_CONFIG;
3189
3190	return 0;
3191}
3192
3193int
3194sli_cmd_read_nvparms(struct sli4 *sli4, void *buf)
3195{
3196	struct sli4_cmd_read_nvparms *read_nvparms = buf;
3197
3198	memset(buf, 0, SLI4_BMBX_SIZE);
3199
3200	read_nvparms->hdr.command = SLI4_MBX_CMD_READ_NVPARMS;
3201
3202	return 0;
3203}
3204
3205int
3206sli_cmd_write_nvparms(struct sli4 *sli4, void *buf, u8 *wwpn, u8 *wwnn,
3207		      u8 hard_alpa, u32 preferred_d_id)
3208{
3209	struct sli4_cmd_write_nvparms *write_nvparms = buf;
3210
3211	memset(buf, 0, SLI4_BMBX_SIZE);
3212
3213	write_nvparms->hdr.command = SLI4_MBX_CMD_WRITE_NVPARMS;
3214	memcpy(write_nvparms->wwpn, wwpn, 8);
3215	memcpy(write_nvparms->wwnn, wwnn, 8);
3216
3217	write_nvparms->hard_alpa_d_id =
3218			cpu_to_le32((preferred_d_id << 8) | hard_alpa);
3219	return 0;
3220}
3221
3222static int
3223sli_cmd_read_rev(struct sli4 *sli4, void *buf, struct efc_dma *vpd)
3224{
3225	struct sli4_cmd_read_rev *read_rev = buf;
3226
3227	memset(buf, 0, SLI4_BMBX_SIZE);
3228
3229	read_rev->hdr.command = SLI4_MBX_CMD_READ_REV;
3230
3231	if (vpd && vpd->size) {
3232		read_rev->flags0_word |= cpu_to_le16(SLI4_READ_REV_FLAG_VPD);
3233
3234		read_rev->available_length_dword =
3235			cpu_to_le32(vpd->size &
3236				    SLI4_READ_REV_AVAILABLE_LENGTH);
3237
3238		read_rev->hostbuf.low =
3239				cpu_to_le32(lower_32_bits(vpd->phys));
3240		read_rev->hostbuf.high =
3241				cpu_to_le32(upper_32_bits(vpd->phys));
3242	}
3243
3244	return 0;
3245}
3246
3247int
3248sli_cmd_read_sparm64(struct sli4 *sli4, void *buf, struct efc_dma *dma, u16 vpi)
3249{
3250	struct sli4_cmd_read_sparm64 *read_sparm64 = buf;
3251
3252	if (vpi == U16_MAX) {
3253		efc_log_err(sli4, "special VPI not supported!!!\n");
3254		return -EIO;
3255	}
3256
3257	if (!dma || !dma->phys) {
3258		efc_log_err(sli4, "bad DMA buffer\n");
3259		return -EIO;
3260	}
3261
3262	memset(buf, 0, SLI4_BMBX_SIZE);
3263
3264	read_sparm64->hdr.command = SLI4_MBX_CMD_READ_SPARM64;
3265
3266	read_sparm64->bde_64.bde_type_buflen =
3267			cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
3268				    (dma->size & SLI4_BDE_LEN_MASK));
3269	read_sparm64->bde_64.u.data.low =
3270			cpu_to_le32(lower_32_bits(dma->phys));
3271	read_sparm64->bde_64.u.data.high =
3272			cpu_to_le32(upper_32_bits(dma->phys));
3273
3274	read_sparm64->vpi = cpu_to_le16(vpi);
3275
3276	return 0;
3277}
3278
3279int
3280sli_cmd_read_topology(struct sli4 *sli4, void *buf, struct efc_dma *dma)
3281{
3282	struct sli4_cmd_read_topology *read_topo = buf;
3283
3284	if (!dma || !dma->size)
3285		return -EIO;
3286
3287	if (dma->size < SLI4_MIN_LOOP_MAP_BYTES) {
3288		efc_log_err(sli4, "loop map buffer too small %zx\n", dma->size);
3289		return -EIO;
3290	}
3291
3292	memset(buf, 0, SLI4_BMBX_SIZE);
3293
3294	read_topo->hdr.command = SLI4_MBX_CMD_READ_TOPOLOGY;
3295
3296	memset(dma->virt, 0, dma->size);
3297
3298	read_topo->bde_loop_map.bde_type_buflen =
3299					cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
3300					(dma->size & SLI4_BDE_LEN_MASK));
3301	read_topo->bde_loop_map.u.data.low  =
3302				cpu_to_le32(lower_32_bits(dma->phys));
3303	read_topo->bde_loop_map.u.data.high =
3304				cpu_to_le32(upper_32_bits(dma->phys));
3305
3306	return 0;
3307}
3308
3309int
3310sli_cmd_reg_fcfi(struct sli4 *sli4, void *buf, u16 index,
3311		 struct sli4_cmd_rq_cfg *rq_cfg)
3312{
3313	struct sli4_cmd_reg_fcfi *reg_fcfi = buf;
3314	u32 i;
3315
3316	memset(buf, 0, SLI4_BMBX_SIZE);
3317
3318	reg_fcfi->hdr.command = SLI4_MBX_CMD_REG_FCFI;
3319
3320	reg_fcfi->fcf_index = cpu_to_le16(index);
3321
3322	for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
3323		switch (i) {
3324		case 0:
3325			reg_fcfi->rqid0 = rq_cfg[0].rq_id;
3326			break;
3327		case 1:
3328			reg_fcfi->rqid1 = rq_cfg[1].rq_id;
3329			break;
3330		case 2:
3331			reg_fcfi->rqid2 = rq_cfg[2].rq_id;
3332			break;
3333		case 3:
3334			reg_fcfi->rqid3 = rq_cfg[3].rq_id;
3335			break;
3336		}
3337		reg_fcfi->rq_cfg[i].r_ctl_mask = rq_cfg[i].r_ctl_mask;
3338		reg_fcfi->rq_cfg[i].r_ctl_match = rq_cfg[i].r_ctl_match;
3339		reg_fcfi->rq_cfg[i].type_mask = rq_cfg[i].type_mask;
3340		reg_fcfi->rq_cfg[i].type_match = rq_cfg[i].type_match;
3341	}
3342
3343	return 0;
3344}
3345
3346int
3347sli_cmd_reg_fcfi_mrq(struct sli4 *sli4, void *buf, u8 mode, u16 fcf_index,
3348		     u8 rq_selection_policy, u8 mrq_bit_mask, u16 num_mrqs,
3349		     struct sli4_cmd_rq_cfg *rq_cfg)
3350{
3351	struct sli4_cmd_reg_fcfi_mrq *reg_fcfi_mrq = buf;
3352	u32 i;
3353	u32 mrq_flags = 0;
3354
3355	memset(buf, 0, SLI4_BMBX_SIZE);
3356
3357	reg_fcfi_mrq->hdr.command = SLI4_MBX_CMD_REG_FCFI_MRQ;
3358	if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE) {
3359		reg_fcfi_mrq->fcf_index = cpu_to_le16(fcf_index);
3360		goto done;
3361	}
3362
3363	reg_fcfi_mrq->dw8_vlan = cpu_to_le32(SLI4_REGFCFI_MRQ_MODE);
3364
3365	for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
3366		reg_fcfi_mrq->rq_cfg[i].r_ctl_mask = rq_cfg[i].r_ctl_mask;
3367		reg_fcfi_mrq->rq_cfg[i].r_ctl_match = rq_cfg[i].r_ctl_match;
3368		reg_fcfi_mrq->rq_cfg[i].type_mask = rq_cfg[i].type_mask;
3369		reg_fcfi_mrq->rq_cfg[i].type_match = rq_cfg[i].type_match;
3370
3371		switch (i) {
3372		case 3:
3373			reg_fcfi_mrq->rqid3 = rq_cfg[i].rq_id;
3374			break;
3375		case 2:
3376			reg_fcfi_mrq->rqid2 = rq_cfg[i].rq_id;
3377			break;
3378		case 1:
3379			reg_fcfi_mrq->rqid1 = rq_cfg[i].rq_id;
3380			break;
3381		case 0:
3382			reg_fcfi_mrq->rqid0 = rq_cfg[i].rq_id;
3383			break;
3384		}
3385	}
3386
3387	mrq_flags = num_mrqs & SLI4_REGFCFI_MRQ_MASK_NUM_PAIRS;
3388	mrq_flags |= (mrq_bit_mask << 8);
3389	mrq_flags |= (rq_selection_policy << 12);
3390	reg_fcfi_mrq->dw9_mrqflags = cpu_to_le32(mrq_flags);
3391done:
3392	return 0;
3393}
3394
3395int
3396sli_cmd_reg_rpi(struct sli4 *sli4, void *buf, u32 rpi, u32 vpi, u32 fc_id,
3397		struct efc_dma *dma, u8 update, u8 enable_t10_pi)
3398{
3399	struct sli4_cmd_reg_rpi *reg_rpi = buf;
3400	u32 rportid_flags = 0;
3401
3402	memset(buf, 0, SLI4_BMBX_SIZE);
3403
3404	reg_rpi->hdr.command = SLI4_MBX_CMD_REG_RPI;
3405
3406	reg_rpi->rpi = cpu_to_le16(rpi);
3407
3408	rportid_flags = fc_id & SLI4_REGRPI_REMOTE_N_PORTID;
3409
3410	if (update)
3411		rportid_flags |= SLI4_REGRPI_UPD;
3412	else
3413		rportid_flags &= ~SLI4_REGRPI_UPD;
3414
3415	if (enable_t10_pi)
3416		rportid_flags |= SLI4_REGRPI_ETOW;
3417	else
3418		rportid_flags &= ~SLI4_REGRPI_ETOW;
3419
3420	reg_rpi->dw2_rportid_flags = cpu_to_le32(rportid_flags);
3421
3422	reg_rpi->bde_64.bde_type_buflen =
3423		cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
3424			    (SLI4_REG_RPI_BUF_LEN & SLI4_BDE_LEN_MASK));
3425	reg_rpi->bde_64.u.data.low  =
3426		cpu_to_le32(lower_32_bits(dma->phys));
3427	reg_rpi->bde_64.u.data.high =
3428		cpu_to_le32(upper_32_bits(dma->phys));
3429
3430	reg_rpi->vpi = cpu_to_le16(vpi);
3431
3432	return 0;
3433}
3434
3435int
3436sli_cmd_reg_vfi(struct sli4 *sli4, void *buf, size_t size,
3437		u16 vfi, u16 fcfi, struct efc_dma dma,
3438		u16 vpi, __be64 sli_wwpn, u32 fc_id)
3439{
3440	struct sli4_cmd_reg_vfi *reg_vfi = buf;
3441
3442	memset(buf, 0, SLI4_BMBX_SIZE);
3443
3444	reg_vfi->hdr.command = SLI4_MBX_CMD_REG_VFI;
3445
3446	reg_vfi->vfi = cpu_to_le16(vfi);
3447
3448	reg_vfi->fcfi = cpu_to_le16(fcfi);
3449
3450	reg_vfi->sparm.bde_type_buflen =
3451		cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
3452			    (SLI4_REG_RPI_BUF_LEN & SLI4_BDE_LEN_MASK));
3453	reg_vfi->sparm.u.data.low  =
3454		cpu_to_le32(lower_32_bits(dma.phys));
3455	reg_vfi->sparm.u.data.high =
3456		cpu_to_le32(upper_32_bits(dma.phys));
3457
3458	reg_vfi->e_d_tov = cpu_to_le32(sli4->e_d_tov);
3459	reg_vfi->r_a_tov = cpu_to_le32(sli4->r_a_tov);
3460
3461	reg_vfi->dw0w1_flags |= cpu_to_le16(SLI4_REGVFI_VP);
3462	reg_vfi->vpi = cpu_to_le16(vpi);
3463	memcpy(reg_vfi->wwpn, &sli_wwpn, sizeof(reg_vfi->wwpn));
3464	reg_vfi->dw10_lportid_flags = cpu_to_le32(fc_id);
3465
3466	return 0;
3467}
3468
3469int
3470sli_cmd_reg_vpi(struct sli4 *sli4, void *buf, u32 fc_id, __be64 sli_wwpn,
3471		u16 vpi, u16 vfi, bool update)
3472{
3473	struct sli4_cmd_reg_vpi *reg_vpi = buf;
3474	u32 flags = 0;
3475
3476	memset(buf, 0, SLI4_BMBX_SIZE);
3477
3478	reg_vpi->hdr.command = SLI4_MBX_CMD_REG_VPI;
3479
3480	flags = (fc_id & SLI4_REGVPI_LOCAL_N_PORTID);
3481	if (update)
3482		flags |= SLI4_REGVPI_UPD;
3483	else
3484		flags &= ~SLI4_REGVPI_UPD;
3485
3486	reg_vpi->dw2_lportid_flags = cpu_to_le32(flags);
3487	memcpy(reg_vpi->wwpn, &sli_wwpn, sizeof(reg_vpi->wwpn));
3488	reg_vpi->vpi = cpu_to_le16(vpi);
3489	reg_vpi->vfi = cpu_to_le16(vfi);
3490
3491	return 0;
3492}
3493
3494static int
3495sli_cmd_request_features(struct sli4 *sli4, void *buf, u32 features_mask,
3496			 bool query)
3497{
3498	struct sli4_cmd_request_features *req_features = buf;
3499
3500	memset(buf, 0, SLI4_BMBX_SIZE);
3501
3502	req_features->hdr.command = SLI4_MBX_CMD_RQST_FEATURES;
3503
3504	if (query)
3505		req_features->dw1_qry = cpu_to_le32(SLI4_REQFEAT_QRY);
3506
3507	req_features->cmd = cpu_to_le32(features_mask);
3508
3509	return 0;
3510}
3511
3512int
3513sli_cmd_unreg_fcfi(struct sli4 *sli4, void *buf, u16 indicator)
3514{
3515	struct sli4_cmd_unreg_fcfi *unreg_fcfi = buf;
3516
3517	memset(buf, 0, SLI4_BMBX_SIZE);
3518
3519	unreg_fcfi->hdr.command = SLI4_MBX_CMD_UNREG_FCFI;
3520	unreg_fcfi->fcfi = cpu_to_le16(indicator);
3521
3522	return 0;
3523}
3524
3525int
3526sli_cmd_unreg_rpi(struct sli4 *sli4, void *buf, u16 indicator,
3527		  enum sli4_resource which, u32 fc_id)
3528{
3529	struct sli4_cmd_unreg_rpi *unreg_rpi = buf;
3530	u32 flags = 0;
3531
3532	memset(buf, 0, SLI4_BMBX_SIZE);
3533
3534	unreg_rpi->hdr.command = SLI4_MBX_CMD_UNREG_RPI;
3535	switch (which) {
3536	case SLI4_RSRC_RPI:
3537		flags |= SLI4_UNREG_RPI_II_RPI;
3538		if (fc_id == U32_MAX)
3539			break;
3540
3541		flags |= SLI4_UNREG_RPI_DP;
3542		unreg_rpi->dw2_dest_n_portid =
3543			cpu_to_le32(fc_id & SLI4_UNREG_RPI_DEST_N_PORTID_MASK);
3544		break;
3545	case SLI4_RSRC_VPI:
3546		flags |= SLI4_UNREG_RPI_II_VPI;
3547		break;
3548	case SLI4_RSRC_VFI:
3549		flags |= SLI4_UNREG_RPI_II_VFI;
3550		break;
3551	case SLI4_RSRC_FCFI:
3552		flags |= SLI4_UNREG_RPI_II_FCFI;
3553		break;
3554	default:
3555		efc_log_info(sli4, "unknown type %#x\n", which);
3556		return -EIO;
3557	}
3558
3559	unreg_rpi->dw1w1_flags = cpu_to_le16(flags);
3560	unreg_rpi->index = cpu_to_le16(indicator);
3561
3562	return 0;
3563}
3564
3565int
3566sli_cmd_unreg_vfi(struct sli4 *sli4, void *buf, u16 index, u32 which)
3567{
3568	struct sli4_cmd_unreg_vfi *unreg_vfi = buf;
3569
3570	memset(buf, 0, SLI4_BMBX_SIZE);
3571
3572	unreg_vfi->hdr.command = SLI4_MBX_CMD_UNREG_VFI;
3573	switch (which) {
3574	case SLI4_UNREG_TYPE_DOMAIN:
3575		unreg_vfi->index = cpu_to_le16(index);
3576		break;
3577	case SLI4_UNREG_TYPE_FCF:
3578		unreg_vfi->index = cpu_to_le16(index);
3579		break;
3580	case SLI4_UNREG_TYPE_ALL:
3581		unreg_vfi->index = cpu_to_le16(U32_MAX);
3582		break;
3583	default:
3584		return -EIO;
3585	}
3586
3587	if (which != SLI4_UNREG_TYPE_DOMAIN)
3588		unreg_vfi->dw2_flags = cpu_to_le16(SLI4_UNREG_VFI_II_FCFI);
3589
3590	return 0;
3591}
3592
3593int
3594sli_cmd_unreg_vpi(struct sli4 *sli4, void *buf, u16 indicator, u32 which)
3595{
3596	struct sli4_cmd_unreg_vpi *unreg_vpi = buf;
3597	u32 flags = 0;
3598
3599	memset(buf, 0, SLI4_BMBX_SIZE);
3600
3601	unreg_vpi->hdr.command = SLI4_MBX_CMD_UNREG_VPI;
3602	unreg_vpi->index = cpu_to_le16(indicator);
3603	switch (which) {
3604	case SLI4_UNREG_TYPE_PORT:
3605		flags |= SLI4_UNREG_VPI_II_VPI;
3606		break;
3607	case SLI4_UNREG_TYPE_DOMAIN:
3608		flags |= SLI4_UNREG_VPI_II_VFI;
3609		break;
3610	case SLI4_UNREG_TYPE_FCF:
3611		flags |= SLI4_UNREG_VPI_II_FCFI;
3612		break;
3613	case SLI4_UNREG_TYPE_ALL:
3614		/* override indicator */
3615		unreg_vpi->index = cpu_to_le16(U32_MAX);
3616		flags |= SLI4_UNREG_VPI_II_FCFI;
3617		break;
3618	default:
3619		return -EIO;
3620	}
3621
3622	unreg_vpi->dw2w0_flags = cpu_to_le16(flags);
3623	return 0;
3624}
3625
3626static int
3627sli_cmd_common_modify_eq_delay(struct sli4 *sli4, void *buf,
3628			       struct sli4_queue *q, int num_q, u32 shift,
3629			       u32 delay_mult)
3630{
3631	struct sli4_rqst_cmn_modify_eq_delay *req = NULL;
3632	int i;
3633
3634	req = sli_config_cmd_init(sli4, buf,
3635			SLI4_CFG_PYLD_LENGTH(cmn_modify_eq_delay), NULL);
3636	if (!req)
3637		return -EIO;
3638
3639	sli_cmd_fill_hdr(&req->hdr, SLI4_CMN_MODIFY_EQ_DELAY,
3640			 SLI4_SUBSYSTEM_COMMON, CMD_V0,
3641			 SLI4_RQST_PYLD_LEN(cmn_modify_eq_delay));
3642	req->num_eq = cpu_to_le32(num_q);
3643
3644	for (i = 0; i < num_q; i++) {
3645		req->eq_delay_record[i].eq_id = cpu_to_le32(q[i].id);
3646		req->eq_delay_record[i].phase = cpu_to_le32(shift);
3647		req->eq_delay_record[i].delay_multiplier =
3648			cpu_to_le32(delay_mult);
3649	}
3650
3651	return 0;
3652}
3653
3654void
3655sli4_cmd_lowlevel_set_watchdog(struct sli4 *sli4, void *buf,
3656			       size_t size, u16 timeout)
3657{
3658	struct sli4_rqst_lowlevel_set_watchdog *req = NULL;
3659
3660	req = sli_config_cmd_init(sli4, buf,
3661			SLI4_CFG_PYLD_LENGTH(lowlevel_set_watchdog), NULL);
3662	if (!req)
3663		return;
3664
3665	sli_cmd_fill_hdr(&req->hdr, SLI4_OPC_LOWLEVEL_SET_WATCHDOG,
3666			 SLI4_SUBSYSTEM_LOWLEVEL, CMD_V0,
3667			 SLI4_RQST_PYLD_LEN(lowlevel_set_watchdog));
3668	req->watchdog_timeout = cpu_to_le16(timeout);
3669}
3670
3671static int
3672sli_cmd_common_get_cntl_attributes(struct sli4 *sli4, void *buf,
3673				   struct efc_dma *dma)
3674{
3675	struct sli4_rqst_hdr *hdr = NULL;
3676
3677	hdr = sli_config_cmd_init(sli4, buf, SLI4_RQST_CMDSZ(hdr), dma);
3678	if (!hdr)
3679		return -EIO;
3680
3681	hdr->opcode = SLI4_CMN_GET_CNTL_ATTRIBUTES;
3682	hdr->subsystem = SLI4_SUBSYSTEM_COMMON;
3683	hdr->request_length = cpu_to_le32(dma->size);
3684
3685	return 0;
3686}
3687
3688static int
3689sli_cmd_common_get_cntl_addl_attributes(struct sli4 *sli4, void *buf,
3690					struct efc_dma *dma)
3691{
3692	struct sli4_rqst_hdr *hdr = NULL;
3693
3694	hdr = sli_config_cmd_init(sli4, buf, SLI4_RQST_CMDSZ(hdr), dma);
3695	if (!hdr)
3696		return -EIO;
3697
3698	hdr->opcode = SLI4_CMN_GET_CNTL_ADDL_ATTRS;
3699	hdr->subsystem = SLI4_SUBSYSTEM_COMMON;
3700	hdr->request_length = cpu_to_le32(dma->size);
3701
3702	return 0;
3703}
3704
3705int
3706sli_cmd_common_nop(struct sli4 *sli4, void *buf, uint64_t context)
3707{
3708	struct sli4_rqst_cmn_nop *nop = NULL;
3709
3710	nop = sli_config_cmd_init(sli4, buf, SLI4_CFG_PYLD_LENGTH(cmn_nop),
3711				  NULL);
3712	if (!nop)
3713		return -EIO;
3714
3715	sli_cmd_fill_hdr(&nop->hdr, SLI4_CMN_NOP, SLI4_SUBSYSTEM_COMMON,
3716			 CMD_V0, SLI4_RQST_PYLD_LEN(cmn_nop));
3717
3718	memcpy(&nop->context, &context, sizeof(context));
3719
3720	return 0;
3721}
3722
3723int
3724sli_cmd_common_get_resource_extent_info(struct sli4 *sli4, void *buf, u16 rtype)
3725{
3726	struct sli4_rqst_cmn_get_resource_extent_info *ext = NULL;
3727
3728	ext = sli_config_cmd_init(sli4, buf,
3729			SLI4_RQST_CMDSZ(cmn_get_resource_extent_info), NULL);
3730	if (!ext)
3731		return -EIO;
3732
3733	sli_cmd_fill_hdr(&ext->hdr, SLI4_CMN_GET_RSC_EXTENT_INFO,
3734			 SLI4_SUBSYSTEM_COMMON, CMD_V0,
3735			 SLI4_RQST_PYLD_LEN(cmn_get_resource_extent_info));
3736
3737	ext->resource_type = cpu_to_le16(rtype);
3738
3739	return 0;
3740}
3741
3742int
3743sli_cmd_common_get_sli4_parameters(struct sli4 *sli4, void *buf)
3744{
3745	struct sli4_rqst_hdr *hdr = NULL;
3746
3747	hdr = sli_config_cmd_init(sli4, buf,
3748			SLI4_CFG_PYLD_LENGTH(cmn_get_sli4_params), NULL);
3749	if (!hdr)
3750		return -EIO;
3751
3752	hdr->opcode = SLI4_CMN_GET_SLI4_PARAMS;
3753	hdr->subsystem = SLI4_SUBSYSTEM_COMMON;
3754	hdr->request_length = SLI4_RQST_PYLD_LEN(cmn_get_sli4_params);
3755
3756	return 0;
3757}
3758
3759static int
3760sli_cmd_common_get_port_name(struct sli4 *sli4, void *buf)
3761{
3762	struct sli4_rqst_cmn_get_port_name *pname;
3763
3764	pname = sli_config_cmd_init(sli4, buf,
3765			SLI4_CFG_PYLD_LENGTH(cmn_get_port_name), NULL);
3766	if (!pname)
3767		return -EIO;
3768
3769	sli_cmd_fill_hdr(&pname->hdr, SLI4_CMN_GET_PORT_NAME,
3770			 SLI4_SUBSYSTEM_COMMON, CMD_V1,
3771			 SLI4_RQST_PYLD_LEN(cmn_get_port_name));
3772
3773	/* Set the port type value (ethernet=0, FC=1) for V1 commands */
3774	pname->port_type = SLI4_PORT_TYPE_FC;
3775
3776	return 0;
3777}
3778
3779int
3780sli_cmd_common_write_object(struct sli4 *sli4, void *buf, u16 noc,
3781			    u16 eof, u32 desired_write_length,
3782			    u32 offset, char *obj_name,
3783			    struct efc_dma *dma)
3784{
3785	struct sli4_rqst_cmn_write_object *wr_obj = NULL;
3786	struct sli4_bde *bde;
3787	u32 dwflags = 0;
3788
3789	wr_obj = sli_config_cmd_init(sli4, buf,
3790			SLI4_RQST_CMDSZ(cmn_write_object) + sizeof(*bde), NULL);
3791	if (!wr_obj)
3792		return -EIO;
3793
3794	sli_cmd_fill_hdr(&wr_obj->hdr, SLI4_CMN_WRITE_OBJECT,
3795		SLI4_SUBSYSTEM_COMMON, CMD_V0,
3796		SLI4_RQST_PYLD_LEN_VAR(cmn_write_object, sizeof(*bde)));
3797
3798	if (noc)
3799		dwflags |= SLI4_RQ_DES_WRITE_LEN_NOC;
3800	if (eof)
3801		dwflags |= SLI4_RQ_DES_WRITE_LEN_EOF;
3802	dwflags |= (desired_write_length & SLI4_RQ_DES_WRITE_LEN);
3803
3804	wr_obj->desired_write_len_dword = cpu_to_le32(dwflags);
3805
3806	wr_obj->write_offset = cpu_to_le32(offset);
3807	strncpy(wr_obj->object_name, obj_name, sizeof(wr_obj->object_name) - 1);
3808	wr_obj->host_buffer_descriptor_count = cpu_to_le32(1);
3809
3810	bde = (struct sli4_bde *)wr_obj->host_buffer_descriptor;
3811
3812	/* Setup to transfer xfer_size bytes to device */
3813	bde->bde_type_buflen =
3814		cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
3815			    (desired_write_length & SLI4_BDE_LEN_MASK));
3816	bde->u.data.low = cpu_to_le32(lower_32_bits(dma->phys));
3817	bde->u.data.high = cpu_to_le32(upper_32_bits(dma->phys));
3818
3819	return 0;
3820}
3821
3822int
3823sli_cmd_common_delete_object(struct sli4 *sli4, void *buf, char *obj_name)
3824{
3825	struct sli4_rqst_cmn_delete_object *req = NULL;
3826
3827	req = sli_config_cmd_init(sli4, buf,
3828				  SLI4_RQST_CMDSZ(cmn_delete_object), NULL);
3829	if (!req)
3830		return -EIO;
3831
3832	sli_cmd_fill_hdr(&req->hdr, SLI4_CMN_DELETE_OBJECT,
3833			 SLI4_SUBSYSTEM_COMMON, CMD_V0,
3834			 SLI4_RQST_PYLD_LEN(cmn_delete_object));
3835
3836	strncpy(req->object_name, obj_name, sizeof(req->object_name) - 1);
3837	return 0;
3838}
3839
3840int
3841sli_cmd_common_read_object(struct sli4 *sli4, void *buf, u32 desired_read_len,
3842			   u32 offset, char *obj_name, struct efc_dma *dma)
3843{
3844	struct sli4_rqst_cmn_read_object *rd_obj = NULL;
3845	struct sli4_bde *bde;
3846
3847	rd_obj = sli_config_cmd_init(sli4, buf,
3848			SLI4_RQST_CMDSZ(cmn_read_object) + sizeof(*bde), NULL);
3849	if (!rd_obj)
3850		return -EIO;
3851
3852	sli_cmd_fill_hdr(&rd_obj->hdr, SLI4_CMN_READ_OBJECT,
3853		SLI4_SUBSYSTEM_COMMON, CMD_V0,
3854		SLI4_RQST_PYLD_LEN_VAR(cmn_read_object, sizeof(*bde)));
3855	rd_obj->desired_read_length_dword =
3856		cpu_to_le32(desired_read_len & SLI4_REQ_DESIRE_READLEN);
3857
3858	rd_obj->read_offset = cpu_to_le32(offset);
3859	strncpy(rd_obj->object_name, obj_name, sizeof(rd_obj->object_name) - 1);
3860	rd_obj->host_buffer_descriptor_count = cpu_to_le32(1);
3861
3862	bde = (struct sli4_bde *)rd_obj->host_buffer_descriptor;
3863
3864	/* Setup to transfer xfer_size bytes to device */
3865	bde->bde_type_buflen =
3866		cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
3867			    (desired_read_len & SLI4_BDE_LEN_MASK));
3868	if (dma) {
3869		bde->u.data.low = cpu_to_le32(lower_32_bits(dma->phys));
3870		bde->u.data.high = cpu_to_le32(upper_32_bits(dma->phys));
3871	} else {
3872		bde->u.data.low = 0;
3873		bde->u.data.high = 0;
3874	}
3875
3876	return 0;
3877}
3878
3879int
3880sli_cmd_dmtf_exec_clp_cmd(struct sli4 *sli4, void *buf, struct efc_dma *cmd,
3881			  struct efc_dma *resp)
3882{
3883	struct sli4_rqst_dmtf_exec_clp_cmd *clp_cmd = NULL;
3884
3885	clp_cmd = sli_config_cmd_init(sli4, buf,
3886				SLI4_RQST_CMDSZ(dmtf_exec_clp_cmd), NULL);
3887	if (!clp_cmd)
3888		return -EIO;
3889
3890	sli_cmd_fill_hdr(&clp_cmd->hdr, DMTF_EXEC_CLP_CMD, SLI4_SUBSYSTEM_DMTF,
3891			 CMD_V0, SLI4_RQST_PYLD_LEN(dmtf_exec_clp_cmd));
3892
3893	clp_cmd->cmd_buf_length = cpu_to_le32(cmd->size);
3894	clp_cmd->cmd_buf_addr_low =  cpu_to_le32(lower_32_bits(cmd->phys));
3895	clp_cmd->cmd_buf_addr_high =  cpu_to_le32(upper_32_bits(cmd->phys));
3896	clp_cmd->resp_buf_length = cpu_to_le32(resp->size);
3897	clp_cmd->resp_buf_addr_low =  cpu_to_le32(lower_32_bits(resp->phys));
3898	clp_cmd->resp_buf_addr_high =  cpu_to_le32(upper_32_bits(resp->phys));
3899	return 0;
3900}
3901
3902int
3903sli_cmd_common_set_dump_location(struct sli4 *sli4, void *buf, bool query,
3904				 bool is_buffer_list,
3905				 struct efc_dma *buffer, u8 fdb)
3906{
3907	struct sli4_rqst_cmn_set_dump_location *set_dump_loc = NULL;
3908	u32 buffer_length_flag = 0;
3909
3910	set_dump_loc = sli_config_cmd_init(sli4, buf,
3911				SLI4_RQST_CMDSZ(cmn_set_dump_location), NULL);
3912	if (!set_dump_loc)
3913		return -EIO;
3914
3915	sli_cmd_fill_hdr(&set_dump_loc->hdr, SLI4_CMN_SET_DUMP_LOCATION,
3916			 SLI4_SUBSYSTEM_COMMON, CMD_V0,
3917			 SLI4_RQST_PYLD_LEN(cmn_set_dump_location));
3918
3919	if (is_buffer_list)
3920		buffer_length_flag |= SLI4_CMN_SET_DUMP_BLP;
3921
3922	if (query)
3923		buffer_length_flag |= SLI4_CMN_SET_DUMP_QRY;
3924
3925	if (fdb)
3926		buffer_length_flag |= SLI4_CMN_SET_DUMP_FDB;
3927
3928	if (buffer) {
3929		set_dump_loc->buf_addr_low =
3930			cpu_to_le32(lower_32_bits(buffer->phys));
3931		set_dump_loc->buf_addr_high =
3932			cpu_to_le32(upper_32_bits(buffer->phys));
3933
3934		buffer_length_flag |=
3935			buffer->len & SLI4_CMN_SET_DUMP_BUFFER_LEN;
3936	} else {
3937		set_dump_loc->buf_addr_low = 0;
3938		set_dump_loc->buf_addr_high = 0;
3939		set_dump_loc->buffer_length_dword = 0;
3940	}
3941	set_dump_loc->buffer_length_dword = cpu_to_le32(buffer_length_flag);
3942	return 0;
3943}
3944
3945int
3946sli_cmd_common_set_features(struct sli4 *sli4, void *buf, u32 feature,
3947			    u32 param_len, void *parameter)
3948{
3949	struct sli4_rqst_cmn_set_features *cmd = NULL;
3950
3951	cmd = sli_config_cmd_init(sli4, buf,
3952				  SLI4_RQST_CMDSZ(cmn_set_features), NULL);
3953	if (!cmd)
3954		return -EIO;
3955
3956	sli_cmd_fill_hdr(&cmd->hdr, SLI4_CMN_SET_FEATURES,
3957			 SLI4_SUBSYSTEM_COMMON, CMD_V0,
3958			 SLI4_RQST_PYLD_LEN(cmn_set_features));
3959
3960	cmd->feature = cpu_to_le32(feature);
3961	cmd->param_len = cpu_to_le32(param_len);
3962	memcpy(cmd->params, parameter, param_len);
3963
3964	return 0;
3965}
3966
3967int
3968sli_cqe_mq(struct sli4 *sli4, void *buf)
3969{
3970	struct sli4_mcqe *mcqe = buf;
3971	u32 dwflags = le32_to_cpu(mcqe->dw3_flags);
3972	/*
3973	 * Firmware can split mbx completions into two MCQEs: first with only
3974	 * the "consumed" bit set and a second with the "complete" bit set.
3975	 * Thus, ignore MCQE unless "complete" is set.
3976	 */
3977	if (!(dwflags & SLI4_MCQE_COMPLETED))
3978		return SLI4_MCQE_STATUS_NOT_COMPLETED;
3979
3980	if (le16_to_cpu(mcqe->completion_status)) {
3981		efc_log_info(sli4, "status(st=%#x ext=%#x con=%d cmp=%d ae=%d val=%d)\n",
3982			     le16_to_cpu(mcqe->completion_status),
3983			     le16_to_cpu(mcqe->extended_status),
3984			     (dwflags & SLI4_MCQE_CONSUMED),
3985			     (dwflags & SLI4_MCQE_COMPLETED),
3986			     (dwflags & SLI4_MCQE_AE),
3987			     (dwflags & SLI4_MCQE_VALID));
3988	}
3989
3990	return le16_to_cpu(mcqe->completion_status);
3991}
3992
3993int
3994sli_cqe_async(struct sli4 *sli4, void *buf)
3995{
3996	struct sli4_acqe *acqe = buf;
3997	int rc = -EIO;
3998
3999	if (!buf) {
4000		efc_log_err(sli4, "bad parameter sli4=%p buf=%p\n", sli4, buf);
4001		return -EIO;
4002	}
4003
4004	switch (acqe->event_code) {
4005	case SLI4_ACQE_EVENT_CODE_LINK_STATE:
4006		efc_log_info(sli4, "Unsupported by FC link, evt code:%#x\n",
4007			     acqe->event_code);
4008		break;
4009	case SLI4_ACQE_EVENT_CODE_GRP_5:
4010		efc_log_info(sli4, "ACQE GRP5\n");
4011		break;
4012	case SLI4_ACQE_EVENT_CODE_SLI_PORT_EVENT:
4013		efc_log_info(sli4, "ACQE SLI Port, type=0x%x, data1,2=0x%08x,0x%08x\n",
4014			     acqe->event_type,
4015			     le32_to_cpu(acqe->event_data[0]),
4016			     le32_to_cpu(acqe->event_data[1]));
4017		break;
4018	case SLI4_ACQE_EVENT_CODE_FC_LINK_EVENT:
4019		rc = sli_fc_process_link_attention(sli4, buf);
4020		break;
4021	default:
4022		efc_log_info(sli4, "ACQE unknown=%#x\n", acqe->event_code);
4023	}
4024
4025	return rc;
4026}
4027
4028bool
4029sli_fw_ready(struct sli4 *sli4)
4030{
4031	u32 val;
4032
4033	/* Determine if the chip FW is in a ready state */
4034	val = sli_reg_read_status(sli4);
4035	return (val & SLI4_PORT_STATUS_RDY) ? 1 : 0;
4036}
4037
4038static bool
4039sli_wait_for_fw_ready(struct sli4 *sli4, u32 timeout_ms)
4040{
4041	unsigned long end;
4042
4043	end = jiffies + msecs_to_jiffies(timeout_ms);
4044
4045	do {
4046		if (sli_fw_ready(sli4))
4047			return true;
4048
4049		usleep_range(1000, 2000);
4050	} while (time_before(jiffies, end));
4051
4052	return false;
4053}
4054
4055static bool
4056sli_sliport_reset(struct sli4 *sli4)
4057{
4058	bool rc;
4059	u32 val;
4060
4061	val = SLI4_PORT_CTRL_IP;
4062	/* Initialize port, endian */
4063	writel(val, (sli4->reg[0] + SLI4_PORT_CTRL_REG));
4064
4065	rc = sli_wait_for_fw_ready(sli4, SLI4_FW_READY_TIMEOUT_MSEC);
4066	if (!rc)
4067		efc_log_crit(sli4, "port failed to become ready after initialization\n");
4068
4069	return rc;
4070}
4071
4072static bool
4073sli_fw_init(struct sli4 *sli4)
4074{
4075	/*
4076	 * Is firmware ready for operation?
4077	 */
4078	if (!sli_wait_for_fw_ready(sli4, SLI4_FW_READY_TIMEOUT_MSEC)) {
4079		efc_log_crit(sli4, "FW status is NOT ready\n");
4080		return false;
4081	}
4082
4083	/*
4084	 * Reset port to a known state
4085	 */
4086	return sli_sliport_reset(sli4);
4087}
4088
4089static int
4090sli_request_features(struct sli4 *sli4, u32 *features, bool query)
4091{
4092	struct sli4_cmd_request_features *req_features = sli4->bmbx.virt;
4093
4094	if (sli_cmd_request_features(sli4, sli4->bmbx.virt, *features, query)) {
4095		efc_log_err(sli4, "bad REQUEST_FEATURES write\n");
4096		return -EIO;
4097	}
4098
4099	if (sli_bmbx_command(sli4)) {
4100		efc_log_crit(sli4, "bootstrap mailbox write fail\n");
4101		return -EIO;
4102	}
4103
4104	if (le16_to_cpu(req_features->hdr.status)) {
4105		efc_log_err(sli4, "REQUEST_FEATURES bad status %#x\n",
4106			    le16_to_cpu(req_features->hdr.status));
4107		return -EIO;
4108	}
4109
4110	*features = le32_to_cpu(req_features->resp);
4111	return 0;
4112}
4113
4114void
4115sli_calc_max_qentries(struct sli4 *sli4)
4116{
4117	enum sli4_qtype q;
4118	u32 qentries;
4119
4120	for (q = SLI4_QTYPE_EQ; q < SLI4_QTYPE_MAX; q++) {
4121		sli4->qinfo.max_qentries[q] =
4122			sli_convert_mask_to_count(sli4->qinfo.count_method[q],
4123						  sli4->qinfo.count_mask[q]);
4124	}
4125
4126	/* single, contiguous DMA allocations will be called for each queue
4127	 * of size (max_qentries * queue entry size); since these can be large,
4128	 * check against the OS max DMA allocation size
4129	 */
4130	for (q = SLI4_QTYPE_EQ; q < SLI4_QTYPE_MAX; q++) {
4131		qentries = sli4->qinfo.max_qentries[q];
4132
4133		efc_log_info(sli4, "[%s]: max_qentries from %d to %d\n",
4134			     SLI4_QNAME[q],
4135			     sli4->qinfo.max_qentries[q], qentries);
4136		sli4->qinfo.max_qentries[q] = qentries;
4137	}
4138}
4139
4140static int
4141sli_get_read_config(struct sli4 *sli4)
4142{
4143	struct sli4_rsp_read_config *conf = sli4->bmbx.virt;
4144	u32 i, total;
4145	u32 *base;
4146
4147	if (sli_cmd_read_config(sli4, sli4->bmbx.virt)) {
4148		efc_log_err(sli4, "bad READ_CONFIG write\n");
4149		return -EIO;
4150	}
4151
4152	if (sli_bmbx_command(sli4)) {
4153		efc_log_crit(sli4, "bootstrap mailbox fail (READ_CONFIG)\n");
4154		return -EIO;
4155	}
4156
4157	if (le16_to_cpu(conf->hdr.status)) {
4158		efc_log_err(sli4, "READ_CONFIG bad status %#x\n",
4159			    le16_to_cpu(conf->hdr.status));
4160		return -EIO;
4161	}
4162
4163	sli4->params.has_extents =
4164	  le32_to_cpu(conf->ext_dword) & SLI4_READ_CFG_RESP_RESOURCE_EXT;
4165	if (sli4->params.has_extents) {
4166		efc_log_err(sli4, "extents not supported\n");
4167		return -EIO;
4168	}
4169
4170	base = sli4->ext[0].base;
4171	if (!base) {
4172		int size = SLI4_RSRC_MAX * sizeof(u32);
4173
4174		base = kzalloc(size, GFP_KERNEL);
4175		if (!base)
4176			return -EIO;
4177	}
4178
4179	for (i = 0; i < SLI4_RSRC_MAX; i++) {
4180		sli4->ext[i].number = 1;
4181		sli4->ext[i].n_alloc = 0;
4182		sli4->ext[i].base = &base[i];
4183	}
4184
4185	sli4->ext[SLI4_RSRC_VFI].base[0] = le16_to_cpu(conf->vfi_base);
4186	sli4->ext[SLI4_RSRC_VFI].size = le16_to_cpu(conf->vfi_count);
4187
4188	sli4->ext[SLI4_RSRC_VPI].base[0] = le16_to_cpu(conf->vpi_base);
4189	sli4->ext[SLI4_RSRC_VPI].size = le16_to_cpu(conf->vpi_count);
4190
4191	sli4->ext[SLI4_RSRC_RPI].base[0] = le16_to_cpu(conf->rpi_base);
4192	sli4->ext[SLI4_RSRC_RPI].size = le16_to_cpu(conf->rpi_count);
4193
4194	sli4->ext[SLI4_RSRC_XRI].base[0] = le16_to_cpu(conf->xri_base);
4195	sli4->ext[SLI4_RSRC_XRI].size = le16_to_cpu(conf->xri_count);
4196
4197	sli4->ext[SLI4_RSRC_FCFI].base[0] = 0;
4198	sli4->ext[SLI4_RSRC_FCFI].size = le16_to_cpu(conf->fcfi_count);
4199
4200	for (i = 0; i < SLI4_RSRC_MAX; i++) {
4201		total = sli4->ext[i].number * sli4->ext[i].size;
4202		sli4->ext[i].use_map = bitmap_zalloc(total, GFP_KERNEL);
4203		if (!sli4->ext[i].use_map) {
4204			efc_log_err(sli4, "bitmap memory allocation failed %d\n",
4205				    i);
4206			return -EIO;
4207		}
4208		sli4->ext[i].map_size = total;
4209	}
4210
4211	sli4->topology = (le32_to_cpu(conf->topology_dword) &
4212			  SLI4_READ_CFG_RESP_TOPOLOGY) >> 24;
4213	switch (sli4->topology) {
4214	case SLI4_READ_CFG_TOPO_FC:
4215		efc_log_info(sli4, "FC (unknown)\n");
4216		break;
4217	case SLI4_READ_CFG_TOPO_NON_FC_AL:
4218		efc_log_info(sli4, "FC (direct attach)\n");
4219		break;
4220	case SLI4_READ_CFG_TOPO_FC_AL:
4221		efc_log_info(sli4, "FC (arbitrated loop)\n");
4222		break;
4223	default:
4224		efc_log_info(sli4, "bad topology %#x\n", sli4->topology);
4225	}
4226
4227	sli4->e_d_tov = le16_to_cpu(conf->e_d_tov);
4228	sli4->r_a_tov = le16_to_cpu(conf->r_a_tov);
4229
4230	sli4->link_module_type = le16_to_cpu(conf->lmt);
4231
4232	sli4->qinfo.max_qcount[SLI4_QTYPE_EQ] =	le16_to_cpu(conf->eq_count);
4233	sli4->qinfo.max_qcount[SLI4_QTYPE_CQ] =	le16_to_cpu(conf->cq_count);
4234	sli4->qinfo.max_qcount[SLI4_QTYPE_WQ] =	le16_to_cpu(conf->wq_count);
4235	sli4->qinfo.max_qcount[SLI4_QTYPE_RQ] =	le16_to_cpu(conf->rq_count);
4236
4237	/*
4238	 * READ_CONFIG doesn't give the max number of MQ. Applications
4239	 * will typically want 1, but we may need another at some future
4240	 * date. Dummy up a "max" MQ count here.
4241	 */
4242	sli4->qinfo.max_qcount[SLI4_QTYPE_MQ] = SLI4_USER_MQ_COUNT;
4243	return 0;
4244}
4245
4246static int
4247sli_get_sli4_parameters(struct sli4 *sli4)
4248{
4249	struct sli4_rsp_cmn_get_sli4_params *parms;
4250	u32 dw_loopback;
4251	u32 dw_eq_pg_cnt;
4252	u32 dw_cq_pg_cnt;
4253	u32 dw_mq_pg_cnt;
4254	u32 dw_wq_pg_cnt;
4255	u32 dw_rq_pg_cnt;
4256	u32 dw_sgl_pg_cnt;
4257
4258	if (sli_cmd_common_get_sli4_parameters(sli4, sli4->bmbx.virt))
4259		return -EIO;
4260
4261	parms = (struct sli4_rsp_cmn_get_sli4_params *)
4262		 (((u8 *)sli4->bmbx.virt) +
4263		  offsetof(struct sli4_cmd_sli_config, payload.embed));
4264
4265	if (sli_bmbx_command(sli4)) {
4266		efc_log_crit(sli4, "bootstrap mailbox write fail\n");
4267		return -EIO;
4268	}
4269
4270	if (parms->hdr.status) {
4271		efc_log_err(sli4, "COMMON_GET_SLI4_PARAMETERS bad status %#x",
4272			    parms->hdr.status);
4273		efc_log_err(sli4, "additional status %#x\n",
4274			    parms->hdr.additional_status);
4275		return -EIO;
4276	}
4277
4278	dw_loopback = le32_to_cpu(parms->dw16_loopback_scope);
4279	dw_eq_pg_cnt = le32_to_cpu(parms->dw6_eq_page_cnt);
4280	dw_cq_pg_cnt = le32_to_cpu(parms->dw8_cq_page_cnt);
4281	dw_mq_pg_cnt = le32_to_cpu(parms->dw10_mq_page_cnt);
4282	dw_wq_pg_cnt = le32_to_cpu(parms->dw12_wq_page_cnt);
4283	dw_rq_pg_cnt = le32_to_cpu(parms->dw14_rq_page_cnt);
4284
4285	sli4->params.auto_reg =	(dw_loopback & SLI4_PARAM_AREG);
4286	sli4->params.auto_xfer_rdy = (dw_loopback & SLI4_PARAM_AGXF);
4287	sli4->params.hdr_template_req =	(dw_loopback & SLI4_PARAM_HDRR);
4288	sli4->params.t10_dif_inline_capable = (dw_loopback & SLI4_PARAM_TIMM);
4289	sli4->params.t10_dif_separate_capable =	(dw_loopback & SLI4_PARAM_TSMM);
4290
4291	sli4->params.mq_create_version = GET_Q_CREATE_VERSION(dw_mq_pg_cnt);
4292	sli4->params.cq_create_version = GET_Q_CREATE_VERSION(dw_cq_pg_cnt);
4293
4294	sli4->rq_min_buf_size =	le16_to_cpu(parms->min_rq_buffer_size);
4295	sli4->rq_max_buf_size = le32_to_cpu(parms->max_rq_buffer_size);
4296
4297	sli4->qinfo.qpage_count[SLI4_QTYPE_EQ] =
4298		(dw_eq_pg_cnt & SLI4_PARAM_EQ_PAGE_CNT_MASK);
4299	sli4->qinfo.qpage_count[SLI4_QTYPE_CQ] =
4300		(dw_cq_pg_cnt & SLI4_PARAM_CQ_PAGE_CNT_MASK);
4301	sli4->qinfo.qpage_count[SLI4_QTYPE_MQ] =
4302		(dw_mq_pg_cnt & SLI4_PARAM_MQ_PAGE_CNT_MASK);
4303	sli4->qinfo.qpage_count[SLI4_QTYPE_WQ] =
4304		(dw_wq_pg_cnt & SLI4_PARAM_WQ_PAGE_CNT_MASK);
4305	sli4->qinfo.qpage_count[SLI4_QTYPE_RQ] =
4306		(dw_rq_pg_cnt & SLI4_PARAM_RQ_PAGE_CNT_MASK);
4307
4308	/* save count methods and masks for each queue type */
4309
4310	sli4->qinfo.count_mask[SLI4_QTYPE_EQ] =
4311			le16_to_cpu(parms->eqe_count_mask);
4312	sli4->qinfo.count_method[SLI4_QTYPE_EQ] =
4313			GET_Q_CNT_METHOD(dw_eq_pg_cnt);
4314
4315	sli4->qinfo.count_mask[SLI4_QTYPE_CQ] =
4316			le16_to_cpu(parms->cqe_count_mask);
4317	sli4->qinfo.count_method[SLI4_QTYPE_CQ] =
4318			GET_Q_CNT_METHOD(dw_cq_pg_cnt);
4319
4320	sli4->qinfo.count_mask[SLI4_QTYPE_MQ] =
4321			le16_to_cpu(parms->mqe_count_mask);
4322	sli4->qinfo.count_method[SLI4_QTYPE_MQ] =
4323			GET_Q_CNT_METHOD(dw_mq_pg_cnt);
4324
4325	sli4->qinfo.count_mask[SLI4_QTYPE_WQ] =
4326			le16_to_cpu(parms->wqe_count_mask);
4327	sli4->qinfo.count_method[SLI4_QTYPE_WQ] =
4328			GET_Q_CNT_METHOD(dw_wq_pg_cnt);
4329
4330	sli4->qinfo.count_mask[SLI4_QTYPE_RQ] =
4331			le16_to_cpu(parms->rqe_count_mask);
4332	sli4->qinfo.count_method[SLI4_QTYPE_RQ] =
4333			GET_Q_CNT_METHOD(dw_rq_pg_cnt);
4334
4335	/* now calculate max queue entries */
4336	sli_calc_max_qentries(sli4);
4337
4338	dw_sgl_pg_cnt = le32_to_cpu(parms->dw18_sgl_page_cnt);
4339
4340	/* max # of pages */
4341	sli4->max_sgl_pages = (dw_sgl_pg_cnt & SLI4_PARAM_SGL_PAGE_CNT_MASK);
4342
4343	/* bit map of available sizes */
4344	sli4->sgl_page_sizes = (dw_sgl_pg_cnt &
4345				SLI4_PARAM_SGL_PAGE_SZS_MASK) >> 8;
4346	/* ignore HLM here. Use value from REQUEST_FEATURES */
4347	sli4->sge_supported_length = le32_to_cpu(parms->sge_supported_length);
4348	sli4->params.sgl_pre_reg_required = (dw_loopback & SLI4_PARAM_SGLR);
4349	/* default to using pre-registered SGL's */
4350	sli4->params.sgl_pre_registered = true;
4351
4352	sli4->params.perf_hint = dw_loopback & SLI4_PARAM_PHON;
4353	sli4->params.perf_wq_id_association = (dw_loopback & SLI4_PARAM_PHWQ);
4354
4355	sli4->rq_batch = (le16_to_cpu(parms->dw15w1_rq_db_window) &
4356			  SLI4_PARAM_RQ_DB_WINDOW_MASK) >> 12;
4357
4358	/* Use the highest available WQE size. */
4359	if (((dw_wq_pg_cnt & SLI4_PARAM_WQE_SZS_MASK) >> 8) &
4360	    SLI4_128BYTE_WQE_SUPPORT)
4361		sli4->wqe_size = SLI4_WQE_EXT_BYTES;
4362	else
4363		sli4->wqe_size = SLI4_WQE_BYTES;
4364
4365	return 0;
4366}
4367
4368static int
4369sli_get_ctrl_attributes(struct sli4 *sli4)
4370{
4371	struct sli4_rsp_cmn_get_cntl_attributes *attr;
4372	struct sli4_rsp_cmn_get_cntl_addl_attributes *add_attr;
4373	struct efc_dma data;
4374	u32 psize;
4375
4376	/*
4377	 * Issue COMMON_GET_CNTL_ATTRIBUTES to get port_number. Temporarily
4378	 * uses VPD DMA buffer as the response won't fit in the embedded
4379	 * buffer.
4380	 */
4381	memset(sli4->vpd_data.virt, 0, sli4->vpd_data.size);
4382	if (sli_cmd_common_get_cntl_attributes(sli4, sli4->bmbx.virt,
4383					       &sli4->vpd_data)) {
4384		efc_log_err(sli4, "bad COMMON_GET_CNTL_ATTRIBUTES write\n");
4385		return -EIO;
4386	}
4387
4388	attr =	sli4->vpd_data.virt;
4389
4390	if (sli_bmbx_command(sli4)) {
4391		efc_log_crit(sli4, "bootstrap mailbox write fail\n");
4392		return -EIO;
4393	}
4394
4395	if (attr->hdr.status) {
4396		efc_log_err(sli4, "COMMON_GET_CNTL_ATTRIBUTES bad status %#x",
4397			    attr->hdr.status);
4398		efc_log_err(sli4, "additional status %#x\n",
4399			    attr->hdr.additional_status);
4400		return -EIO;
4401	}
4402
4403	sli4->port_number = attr->port_num_type_flags & SLI4_CNTL_ATTR_PORTNUM;
4404
4405	memcpy(sli4->bios_version_string, attr->bios_version_str,
4406	       sizeof(sli4->bios_version_string));
4407
4408	/* get additional attributes */
4409	psize = sizeof(struct sli4_rsp_cmn_get_cntl_addl_attributes);
4410	data.size = psize;
4411	data.virt = dma_alloc_coherent(&sli4->pci->dev, data.size,
4412				       &data.phys, GFP_KERNEL);
4413	if (!data.virt) {
4414		memset(&data, 0, sizeof(struct efc_dma));
4415		efc_log_err(sli4, "Failed to allocate memory for GET_CNTL_ADDL_ATTR\n");
4416		return -EIO;
4417	}
4418
4419	if (sli_cmd_common_get_cntl_addl_attributes(sli4, sli4->bmbx.virt,
4420						    &data)) {
4421		efc_log_err(sli4, "bad GET_CNTL_ADDL_ATTR write\n");
4422		dma_free_coherent(&sli4->pci->dev, data.size,
4423				  data.virt, data.phys);
4424		return -EIO;
4425	}
4426
4427	if (sli_bmbx_command(sli4)) {
4428		efc_log_crit(sli4, "mailbox fail (GET_CNTL_ADDL_ATTR)\n");
4429		dma_free_coherent(&sli4->pci->dev, data.size,
4430				  data.virt, data.phys);
4431		return -EIO;
4432	}
4433
4434	add_attr = data.virt;
4435	if (add_attr->hdr.status) {
4436		efc_log_err(sli4, "GET_CNTL_ADDL_ATTR bad status %#x\n",
4437			    add_attr->hdr.status);
4438		dma_free_coherent(&sli4->pci->dev, data.size,
4439				  data.virt, data.phys);
4440		return -EIO;
4441	}
4442
4443	memcpy(sli4->ipl_name, add_attr->ipl_file_name, sizeof(sli4->ipl_name));
4444
4445	efc_log_info(sli4, "IPL:%s\n", (char *)sli4->ipl_name);
4446
4447	dma_free_coherent(&sli4->pci->dev, data.size, data.virt,
4448			  data.phys);
4449	memset(&data, 0, sizeof(struct efc_dma));
4450	return 0;
4451}
4452
4453static int
4454sli_get_fw_rev(struct sli4 *sli4)
4455{
4456	struct sli4_cmd_read_rev	*read_rev = sli4->bmbx.virt;
4457
4458	if (sli_cmd_read_rev(sli4, sli4->bmbx.virt, &sli4->vpd_data))
4459		return -EIO;
4460
4461	if (sli_bmbx_command(sli4)) {
4462		efc_log_crit(sli4, "bootstrap mailbox write fail (READ_REV)\n");
4463		return -EIO;
4464	}
4465
4466	if (le16_to_cpu(read_rev->hdr.status)) {
4467		efc_log_err(sli4, "READ_REV bad status %#x\n",
4468			    le16_to_cpu(read_rev->hdr.status));
4469		return -EIO;
4470	}
4471
4472	sli4->fw_rev[0] = le32_to_cpu(read_rev->first_fw_id);
4473	memcpy(sli4->fw_name[0], read_rev->first_fw_name,
4474	       sizeof(sli4->fw_name[0]));
4475
4476	sli4->fw_rev[1] = le32_to_cpu(read_rev->second_fw_id);
4477	memcpy(sli4->fw_name[1], read_rev->second_fw_name,
4478	       sizeof(sli4->fw_name[1]));
4479
4480	sli4->hw_rev[0] = le32_to_cpu(read_rev->first_hw_rev);
4481	sli4->hw_rev[1] = le32_to_cpu(read_rev->second_hw_rev);
4482	sli4->hw_rev[2] = le32_to_cpu(read_rev->third_hw_rev);
4483
4484	efc_log_info(sli4, "FW1:%s (%08x) / FW2:%s (%08x)\n",
4485		     read_rev->first_fw_name, le32_to_cpu(read_rev->first_fw_id),
4486		     read_rev->second_fw_name, le32_to_cpu(read_rev->second_fw_id));
4487
4488	efc_log_info(sli4, "HW1: %08x / HW2: %08x\n",
4489		     le32_to_cpu(read_rev->first_hw_rev),
4490		     le32_to_cpu(read_rev->second_hw_rev));
4491
4492	/* Check that all VPD data was returned */
4493	if (le32_to_cpu(read_rev->returned_vpd_length) !=
4494	    le32_to_cpu(read_rev->actual_vpd_length)) {
4495		efc_log_info(sli4, "VPD length: avail=%d return=%d actual=%d\n",
4496			     le32_to_cpu(read_rev->available_length_dword) &
4497				    SLI4_READ_REV_AVAILABLE_LENGTH,
4498			     le32_to_cpu(read_rev->returned_vpd_length),
4499			     le32_to_cpu(read_rev->actual_vpd_length));
4500	}
4501	sli4->vpd_length = le32_to_cpu(read_rev->returned_vpd_length);
4502	return 0;
4503}
4504
4505static int
4506sli_get_config(struct sli4 *sli4)
4507{
4508	struct sli4_rsp_cmn_get_port_name *port_name;
4509	struct sli4_cmd_read_nvparms *read_nvparms;
4510
4511	/*
4512	 * Read the device configuration
4513	 */
4514	if (sli_get_read_config(sli4))
4515		return -EIO;
4516
4517	if (sli_get_sli4_parameters(sli4))
4518		return -EIO;
4519
4520	if (sli_get_ctrl_attributes(sli4))
4521		return -EIO;
4522
4523	if (sli_cmd_common_get_port_name(sli4, sli4->bmbx.virt))
4524		return -EIO;
4525
4526	port_name = (struct sli4_rsp_cmn_get_port_name *)
4527		    (((u8 *)sli4->bmbx.virt) +
4528		    offsetof(struct sli4_cmd_sli_config, payload.embed));
4529
4530	if (sli_bmbx_command(sli4)) {
4531		efc_log_crit(sli4, "bootstrap mailbox fail (GET_PORT_NAME)\n");
4532		return -EIO;
4533	}
4534
4535	sli4->port_name[0] = port_name->port_name[sli4->port_number];
4536	sli4->port_name[1] = '\0';
4537
4538	if (sli_get_fw_rev(sli4))
4539		return -EIO;
4540
4541	if (sli_cmd_read_nvparms(sli4, sli4->bmbx.virt)) {
4542		efc_log_err(sli4, "bad READ_NVPARMS write\n");
4543		return -EIO;
4544	}
4545
4546	if (sli_bmbx_command(sli4)) {
4547		efc_log_crit(sli4, "bootstrap mailbox fail (READ_NVPARMS)\n");
4548		return -EIO;
4549	}
4550
4551	read_nvparms = sli4->bmbx.virt;
4552	if (le16_to_cpu(read_nvparms->hdr.status)) {
4553		efc_log_err(sli4, "READ_NVPARMS bad status %#x\n",
4554			    le16_to_cpu(read_nvparms->hdr.status));
4555		return -EIO;
4556	}
4557
4558	memcpy(sli4->wwpn, read_nvparms->wwpn, sizeof(sli4->wwpn));
4559	memcpy(sli4->wwnn, read_nvparms->wwnn, sizeof(sli4->wwnn));
4560
4561	efc_log_info(sli4, "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
4562		     sli4->wwpn[0], sli4->wwpn[1], sli4->wwpn[2], sli4->wwpn[3],
4563		     sli4->wwpn[4], sli4->wwpn[5], sli4->wwpn[6], sli4->wwpn[7]);
4564	efc_log_info(sli4, "WWNN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
4565		     sli4->wwnn[0], sli4->wwnn[1], sli4->wwnn[2], sli4->wwnn[3],
4566		     sli4->wwnn[4], sli4->wwnn[5], sli4->wwnn[6], sli4->wwnn[7]);
4567
4568	return 0;
4569}
4570
4571int
4572sli_setup(struct sli4 *sli4, void *os, struct pci_dev  *pdev,
4573	  void __iomem *reg[])
4574{
4575	u32 intf = U32_MAX;
4576	u32 pci_class_rev = 0;
4577	u32 rev_id = 0;
4578	u32 family = 0;
4579	u32 asic_id = 0;
4580	u32 i;
4581	struct sli4_asic_entry_t *asic;
4582
4583	memset(sli4, 0, sizeof(struct sli4));
4584
4585	sli4->os = os;
4586	sli4->pci = pdev;
4587
4588	for (i = 0; i < 6; i++)
4589		sli4->reg[i] = reg[i];
4590	/*
4591	 * Read the SLI_INTF register to discover the register layout
4592	 * and other capability information
4593	 */
4594	if (pci_read_config_dword(pdev, SLI4_INTF_REG, &intf))
4595		return -EIO;
4596
4597	if ((intf & SLI4_INTF_VALID_MASK) != (u32)SLI4_INTF_VALID_VALUE) {
4598		efc_log_err(sli4, "SLI_INTF is not valid\n");
4599		return -EIO;
4600	}
4601
4602	/* driver only support SLI-4 */
4603	if ((intf & SLI4_INTF_REV_MASK) != SLI4_INTF_REV_S4) {
4604		efc_log_err(sli4, "Unsupported SLI revision (intf=%#x)\n", intf);
4605		return -EIO;
4606	}
4607
4608	sli4->sli_family = intf & SLI4_INTF_FAMILY_MASK;
4609
4610	sli4->if_type = intf & SLI4_INTF_IF_TYPE_MASK;
4611	efc_log_info(sli4, "status=%#x error1=%#x error2=%#x\n",
4612		     sli_reg_read_status(sli4),
4613		     sli_reg_read_err1(sli4),
4614		     sli_reg_read_err2(sli4));
4615
4616	/*
4617	 * set the ASIC type and revision
4618	 */
4619	if (pci_read_config_dword(pdev, PCI_CLASS_REVISION, &pci_class_rev))
4620		return -EIO;
4621
4622	rev_id = pci_class_rev & 0xff;
4623	family = sli4->sli_family;
4624	if (family == SLI4_FAMILY_CHECK_ASIC_TYPE) {
4625		if (!pci_read_config_dword(pdev, SLI4_ASIC_ID_REG, &asic_id))
4626			family = asic_id & SLI4_ASIC_GEN_MASK;
4627	}
4628
4629	for (i = 0, asic = sli4_asic_table; i < ARRAY_SIZE(sli4_asic_table);
4630	     i++, asic++) {
4631		if (rev_id == asic->rev_id && family == asic->family) {
4632			sli4->asic_type = family;
4633			sli4->asic_rev = rev_id;
4634			break;
4635		}
4636	}
4637	/* Fail if no matching asic type/rev was found */
4638	if (!sli4->asic_type) {
4639		efc_log_err(sli4, "no matching asic family/rev found: %02x/%02x\n",
4640			    family, rev_id);
4641		return -EIO;
4642	}
4643
4644	/*
4645	 * The bootstrap mailbox is equivalent to a MQ with a single 256 byte
4646	 * entry, a CQ with a single 16 byte entry, and no event queue.
4647	 * Alignment must be 16 bytes as the low order address bits in the
4648	 * address register are also control / status.
4649	 */
4650	sli4->bmbx.size = SLI4_BMBX_SIZE + sizeof(struct sli4_mcqe);
4651	sli4->bmbx.virt = dma_alloc_coherent(&pdev->dev, sli4->bmbx.size,
4652					     &sli4->bmbx.phys, GFP_KERNEL);
4653	if (!sli4->bmbx.virt) {
4654		memset(&sli4->bmbx, 0, sizeof(struct efc_dma));
4655		efc_log_err(sli4, "bootstrap mailbox allocation failed\n");
4656		return -EIO;
4657	}
4658
4659	if (sli4->bmbx.phys & SLI4_BMBX_MASK_LO) {
4660		efc_log_err(sli4, "bad alignment for bootstrap mailbox\n");
4661		return -EIO;
4662	}
4663
4664	efc_log_info(sli4, "bmbx v=%p p=0x%x %08x s=%zd\n", sli4->bmbx.virt,
4665		     upper_32_bits(sli4->bmbx.phys),
4666		     lower_32_bits(sli4->bmbx.phys), sli4->bmbx.size);
4667
4668	/* 4096 is arbitrary. What should this value actually be? */
4669	sli4->vpd_data.size = 4096;
4670	sli4->vpd_data.virt = dma_alloc_coherent(&pdev->dev,
4671						 sli4->vpd_data.size,
4672						 &sli4->vpd_data.phys,
4673						 GFP_KERNEL);
4674	if (!sli4->vpd_data.virt) {
4675		memset(&sli4->vpd_data, 0, sizeof(struct efc_dma));
4676		/* Note that failure isn't fatal in this specific case */
4677		efc_log_info(sli4, "VPD buffer allocation failed\n");
4678	}
4679
4680	if (!sli_fw_init(sli4)) {
4681		efc_log_err(sli4, "FW initialization failed\n");
4682		return -EIO;
4683	}
4684
4685	/*
4686	 * Set one of fcpi(initiator), fcpt(target), fcpc(combined) to true
4687	 * in addition to any other desired features
4688	 */
4689	sli4->features = (SLI4_REQFEAT_IAAB | SLI4_REQFEAT_NPIV |
4690				 SLI4_REQFEAT_DIF | SLI4_REQFEAT_VF |
4691				 SLI4_REQFEAT_FCPC | SLI4_REQFEAT_IAAR |
4692				 SLI4_REQFEAT_HLM | SLI4_REQFEAT_PERFH |
4693				 SLI4_REQFEAT_RXSEQ | SLI4_REQFEAT_RXRI |
4694				 SLI4_REQFEAT_MRQP);
4695
4696	/* use performance hints if available */
4697	if (sli4->params.perf_hint)
4698		sli4->features |= SLI4_REQFEAT_PERFH;
4699
4700	if (sli_request_features(sli4, &sli4->features, true))
4701		return -EIO;
4702
4703	if (sli_get_config(sli4))
4704		return -EIO;
4705
4706	return 0;
4707}
4708
4709int
4710sli_init(struct sli4 *sli4)
4711{
4712	if (sli4->params.has_extents) {
4713		efc_log_info(sli4, "extend allocation not supported\n");
4714		return -EIO;
4715	}
4716
4717	sli4->features &= (~SLI4_REQFEAT_HLM);
4718	sli4->features &= (~SLI4_REQFEAT_RXSEQ);
4719	sli4->features &= (~SLI4_REQFEAT_RXRI);
4720
4721	if (sli_request_features(sli4, &sli4->features, false))
4722		return -EIO;
4723
4724	return 0;
4725}
4726
4727int
4728sli_reset(struct sli4 *sli4)
4729{
4730	u32	i;
4731
4732	if (!sli_fw_init(sli4)) {
4733		efc_log_crit(sli4, "FW initialization failed\n");
4734		return -EIO;
4735	}
4736
4737	kfree(sli4->ext[0].base);
4738	sli4->ext[0].base = NULL;
4739
4740	for (i = 0; i < SLI4_RSRC_MAX; i++) {
4741		bitmap_free(sli4->ext[i].use_map);
4742		sli4->ext[i].use_map = NULL;
4743		sli4->ext[i].base = NULL;
4744	}
4745
4746	return sli_get_config(sli4);
4747}
4748
4749int
4750sli_fw_reset(struct sli4 *sli4)
4751{
4752	/*
4753	 * Firmware must be ready before issuing the reset.
4754	 */
4755	if (!sli_wait_for_fw_ready(sli4, SLI4_FW_READY_TIMEOUT_MSEC)) {
4756		efc_log_crit(sli4, "FW status is NOT ready\n");
4757		return -EIO;
4758	}
4759
4760	/* Lancer uses PHYDEV_CONTROL */
4761	writel(SLI4_PHYDEV_CTRL_FRST, (sli4->reg[0] + SLI4_PHYDEV_CTRL_REG));
4762
4763	/* wait for the FW to become ready after the reset */
4764	if (!sli_wait_for_fw_ready(sli4, SLI4_FW_READY_TIMEOUT_MSEC)) {
4765		efc_log_crit(sli4, "Failed to be ready after firmware reset\n");
4766		return -EIO;
4767	}
4768	return 0;
4769}
4770
4771void
4772sli_teardown(struct sli4 *sli4)
4773{
4774	u32 i;
4775
4776	kfree(sli4->ext[0].base);
4777	sli4->ext[0].base = NULL;
4778
4779	for (i = 0; i < SLI4_RSRC_MAX; i++) {
4780		sli4->ext[i].base = NULL;
4781
4782		bitmap_free(sli4->ext[i].use_map);
4783		sli4->ext[i].use_map = NULL;
4784	}
4785
4786	if (!sli_sliport_reset(sli4))
4787		efc_log_err(sli4, "FW deinitialization failed\n");
4788
4789	dma_free_coherent(&sli4->pci->dev, sli4->vpd_data.size,
4790			  sli4->vpd_data.virt, sli4->vpd_data.phys);
4791	memset(&sli4->vpd_data, 0, sizeof(struct efc_dma));
4792
4793	dma_free_coherent(&sli4->pci->dev, sli4->bmbx.size,
4794			  sli4->bmbx.virt, sli4->bmbx.phys);
4795	memset(&sli4->bmbx, 0, sizeof(struct efc_dma));
4796}
4797
4798int
4799sli_callback(struct sli4 *sli4, enum sli4_callback which,
4800	     void *func, void *arg)
4801{
4802	if (!func) {
4803		efc_log_err(sli4, "bad parameter sli4=%p which=%#x func=%p\n",
4804			    sli4, which, func);
4805		return -EIO;
4806	}
4807
4808	switch (which) {
4809	case SLI4_CB_LINK:
4810		sli4->link = func;
4811		sli4->link_arg = arg;
4812		break;
4813	default:
4814		efc_log_info(sli4, "unknown callback %#x\n", which);
4815		return -EIO;
4816	}
4817
4818	return 0;
4819}
4820
4821int
4822sli_eq_modify_delay(struct sli4 *sli4, struct sli4_queue *eq,
4823		    u32 num_eq, u32 shift, u32 delay_mult)
4824{
4825	sli_cmd_common_modify_eq_delay(sli4, sli4->bmbx.virt, eq, num_eq,
4826				       shift, delay_mult);
4827
4828	if (sli_bmbx_command(sli4)) {
4829		efc_log_crit(sli4, "bootstrap mailbox write fail (MODIFY EQ DELAY)\n");
4830		return -EIO;
4831	}
4832	if (sli_res_sli_config(sli4, sli4->bmbx.virt)) {
4833		efc_log_err(sli4, "bad status MODIFY EQ DELAY\n");
4834		return -EIO;
4835	}
4836
4837	return 0;
4838}
4839
4840int
4841sli_resource_alloc(struct sli4 *sli4, enum sli4_resource rtype,
4842		   u32 *rid, u32 *index)
4843{
4844	int rc = 0;
4845	u32 size;
4846	u32 ext_idx;
4847	u32 item_idx;
4848	u32 position;
4849
4850	*rid = U32_MAX;
4851	*index = U32_MAX;
4852
4853	switch (rtype) {
4854	case SLI4_RSRC_VFI:
4855	case SLI4_RSRC_VPI:
4856	case SLI4_RSRC_RPI:
4857	case SLI4_RSRC_XRI:
4858		position =
4859		find_first_zero_bit(sli4->ext[rtype].use_map,
4860				    sli4->ext[rtype].map_size);
4861		if (position >= sli4->ext[rtype].map_size) {
4862			efc_log_err(sli4, "out of resource %d (alloc=%d)\n",
4863				    rtype, sli4->ext[rtype].n_alloc);
4864			rc = -EIO;
4865			break;
4866		}
4867		set_bit(position, sli4->ext[rtype].use_map);
4868		*index = position;
4869
4870		size = sli4->ext[rtype].size;
4871
4872		ext_idx = *index / size;
4873		item_idx   = *index % size;
4874
4875		*rid = sli4->ext[rtype].base[ext_idx] + item_idx;
4876
4877		sli4->ext[rtype].n_alloc++;
4878		break;
4879	default:
4880		rc = -EIO;
4881	}
4882
4883	return rc;
4884}
4885
4886int
4887sli_resource_free(struct sli4 *sli4, enum sli4_resource rtype, u32 rid)
4888{
4889	int rc = -EIO;
4890	u32 x;
4891	u32 size, *base;
4892
4893	switch (rtype) {
4894	case SLI4_RSRC_VFI:
4895	case SLI4_RSRC_VPI:
4896	case SLI4_RSRC_RPI:
4897	case SLI4_RSRC_XRI:
4898		/*
4899		 * Figure out which extent contains the resource ID. I.e. find
4900		 * the extent such that
4901		 *   extent->base <= resource ID < extent->base + extent->size
4902		 */
4903		base = sli4->ext[rtype].base;
4904		size = sli4->ext[rtype].size;
4905
4906		/*
4907		 * In the case of FW reset, this may be cleared
4908		 * but the force_free path will still attempt to
4909		 * free the resource. Prevent a NULL pointer access.
4910		 */
4911		if (!base)
4912			break;
4913
4914		for (x = 0; x < sli4->ext[rtype].number; x++) {
4915			if ((rid < base[x] || (rid >= (base[x] + size))))
4916				continue;
4917
4918			rid -= base[x];
4919			clear_bit((x * size) + rid, sli4->ext[rtype].use_map);
4920			rc = 0;
4921			break;
4922		}
4923		break;
4924	default:
4925		break;
4926	}
4927
4928	return rc;
4929}
4930
4931int
4932sli_resource_reset(struct sli4 *sli4, enum sli4_resource rtype)
4933{
4934	int rc = -EIO;
4935	u32 i;
4936
4937	switch (rtype) {
4938	case SLI4_RSRC_VFI:
4939	case SLI4_RSRC_VPI:
4940	case SLI4_RSRC_RPI:
4941	case SLI4_RSRC_XRI:
4942		for (i = 0; i < sli4->ext[rtype].map_size; i++)
4943			clear_bit(i, sli4->ext[rtype].use_map);
4944		rc = 0;
4945		break;
4946	default:
4947		break;
4948	}
4949
4950	return rc;
4951}
4952
4953int sli_raise_ue(struct sli4 *sli4, u8 dump)
4954{
4955	u32 val = 0;
4956
4957	if (dump == SLI4_FUNC_DESC_DUMP) {
4958		val = SLI4_PORT_CTRL_FDD | SLI4_PORT_CTRL_IP;
4959		writel(val, (sli4->reg[0] + SLI4_PORT_CTRL_REG));
4960	} else {
4961		val = SLI4_PHYDEV_CTRL_FRST;
4962
4963		if (dump == SLI4_CHIP_LEVEL_DUMP)
4964			val |= SLI4_PHYDEV_CTRL_DD;
4965		writel(val, (sli4->reg[0] + SLI4_PHYDEV_CTRL_REG));
4966	}
4967
4968	return 0;
4969}
4970
4971int sli_dump_is_ready(struct sli4 *sli4)
4972{
4973	int rc = SLI4_DUMP_READY_STATUS_NOT_READY;
4974	u32 port_val;
4975	u32 bmbx_val;
4976
4977	/*
4978	 * Ensure that the port is ready AND the mailbox is
4979	 * ready before signaling that the dump is ready to go.
4980	 */
4981	port_val = sli_reg_read_status(sli4);
4982	bmbx_val = readl(sli4->reg[0] + SLI4_BMBX_REG);
4983
4984	if ((bmbx_val & SLI4_BMBX_RDY) &&
4985	    (port_val & SLI4_PORT_STATUS_RDY)) {
4986		if (port_val & SLI4_PORT_STATUS_DIP)
4987			rc = SLI4_DUMP_READY_STATUS_DD_PRESENT;
4988		else if (port_val & SLI4_PORT_STATUS_FDP)
4989			rc = SLI4_DUMP_READY_STATUS_FDB_PRESENT;
4990	}
4991
4992	return rc;
4993}
4994
4995bool sli_reset_required(struct sli4 *sli4)
4996{
4997	u32 val;
4998
4999	val = sli_reg_read_status(sli4);
5000	return (val & SLI4_PORT_STATUS_RN);
5001}
5002
5003int
5004sli_cmd_post_sgl_pages(struct sli4 *sli4, void *buf, u16 xri,
5005		       u32 xri_count, struct efc_dma *page0[],
5006		       struct efc_dma *page1[], struct efc_dma *dma)
5007{
5008	struct sli4_rqst_post_sgl_pages *post = NULL;
5009	u32 i;
5010	__le32 req_len;
5011
5012	post = sli_config_cmd_init(sli4, buf,
5013				   SLI4_CFG_PYLD_LENGTH(post_sgl_pages), dma);
5014	if (!post)
5015		return -EIO;
5016
5017	/* payload size calculation */
5018	/* 4 = xri_start + xri_count */
5019	/* xri_count = # of XRI's registered */
5020	/* sizeof(uint64_t) = physical address size */
5021	/* 2 = # of physical addresses per page set */
5022	req_len = cpu_to_le32(4 + (xri_count * (sizeof(uint64_t) * 2)));
5023	sli_cmd_fill_hdr(&post->hdr, SLI4_OPC_POST_SGL_PAGES, SLI4_SUBSYSTEM_FC,
5024			 CMD_V0, req_len);
5025	post->xri_start = cpu_to_le16(xri);
5026	post->xri_count = cpu_to_le16(xri_count);
5027
5028	for (i = 0; i < xri_count; i++) {
5029		post->page_set[i].page0_low  =
5030				cpu_to_le32(lower_32_bits(page0[i]->phys));
5031		post->page_set[i].page0_high =
5032				cpu_to_le32(upper_32_bits(page0[i]->phys));
5033	}
5034
5035	if (page1) {
5036		for (i = 0; i < xri_count; i++) {
5037			post->page_set[i].page1_low =
5038				cpu_to_le32(lower_32_bits(page1[i]->phys));
5039			post->page_set[i].page1_high =
5040				cpu_to_le32(upper_32_bits(page1[i]->phys));
5041		}
5042	}
5043
5044	return 0;
5045}
5046
5047int
5048sli_cmd_post_hdr_templates(struct sli4 *sli4, void *buf, struct efc_dma *dma,
5049			   u16 rpi, struct efc_dma *payload_dma)
5050{
5051	struct sli4_rqst_post_hdr_templates *req = NULL;
5052	uintptr_t phys = 0;
5053	u32 i = 0;
5054	u32 page_count, payload_size;
5055
5056	page_count = sli_page_count(dma->size, SLI_PAGE_SIZE);
5057
5058	payload_size = ((sizeof(struct sli4_rqst_post_hdr_templates) +
5059		(page_count * SZ_DMAADDR)) - sizeof(struct sli4_rqst_hdr));
5060
5061	if (page_count > 16) {
5062		/*
5063		 * We can't fit more than 16 descriptors into an embedded mbox
5064		 * command, it has to be non-embedded
5065		 */
5066		payload_dma->size = payload_size;
5067		payload_dma->virt = dma_alloc_coherent(&sli4->pci->dev,
5068						       payload_dma->size,
5069					     &payload_dma->phys, GFP_KERNEL);
5070		if (!payload_dma->virt) {
5071			memset(payload_dma, 0, sizeof(struct efc_dma));
5072			efc_log_err(sli4, "mbox payload memory allocation fail\n");
5073			return -EIO;
5074		}
5075		req = sli_config_cmd_init(sli4, buf, payload_size, payload_dma);
5076	} else {
5077		req = sli_config_cmd_init(sli4, buf, payload_size, NULL);
5078	}
5079
5080	if (!req)
5081		return -EIO;
5082
5083	if (rpi == U16_MAX)
5084		rpi = sli4->ext[SLI4_RSRC_RPI].base[0];
5085
5086	sli_cmd_fill_hdr(&req->hdr, SLI4_OPC_POST_HDR_TEMPLATES,
5087			 SLI4_SUBSYSTEM_FC, CMD_V0,
5088			 SLI4_RQST_PYLD_LEN(post_hdr_templates));
5089
5090	req->rpi_offset = cpu_to_le16(rpi);
5091	req->page_count = cpu_to_le16(page_count);
5092	phys = dma->phys;
5093	for (i = 0; i < page_count; i++) {
5094		req->page_descriptor[i].low  = cpu_to_le32(lower_32_bits(phys));
5095		req->page_descriptor[i].high = cpu_to_le32(upper_32_bits(phys));
5096
5097		phys += SLI_PAGE_SIZE;
5098	}
5099
5100	return 0;
5101}
5102
5103u32
5104sli_fc_get_rpi_requirements(struct sli4 *sli4, u32 n_rpi)
5105{
5106	u32 bytes = 0;
5107
5108	/* Check if header templates needed */
5109	if (sli4->params.hdr_template_req)
5110		/* round up to a page */
5111		bytes = round_up(n_rpi * SLI4_HDR_TEMPLATE_SIZE, SLI_PAGE_SIZE);
5112
5113	return bytes;
5114}
5115
5116const char *
5117sli_fc_get_status_string(u32 status)
5118{
5119	static struct {
5120		u32 code;
5121		const char *label;
5122	} lookup[] = {
5123		{SLI4_FC_WCQE_STATUS_SUCCESS,		"SUCCESS"},
5124		{SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE,	"FCP_RSP_FAILURE"},
5125		{SLI4_FC_WCQE_STATUS_REMOTE_STOP,	"REMOTE_STOP"},
5126		{SLI4_FC_WCQE_STATUS_LOCAL_REJECT,	"LOCAL_REJECT"},
5127		{SLI4_FC_WCQE_STATUS_NPORT_RJT,		"NPORT_RJT"},
5128		{SLI4_FC_WCQE_STATUS_FABRIC_RJT,	"FABRIC_RJT"},
5129		{SLI4_FC_WCQE_STATUS_NPORT_BSY,		"NPORT_BSY"},
5130		{SLI4_FC_WCQE_STATUS_FABRIC_BSY,	"FABRIC_BSY"},
5131		{SLI4_FC_WCQE_STATUS_LS_RJT,		"LS_RJT"},
5132		{SLI4_FC_WCQE_STATUS_CMD_REJECT,	"CMD_REJECT"},
5133		{SLI4_FC_WCQE_STATUS_FCP_TGT_LENCHECK,	"FCP_TGT_LENCHECK"},
5134		{SLI4_FC_WCQE_STATUS_RQ_BUF_LEN_EXCEEDED, "BUF_LEN_EXCEEDED"},
5135		{SLI4_FC_WCQE_STATUS_RQ_INSUFF_BUF_NEEDED,
5136				"RQ_INSUFF_BUF_NEEDED"},
5137		{SLI4_FC_WCQE_STATUS_RQ_INSUFF_FRM_DISC, "RQ_INSUFF_FRM_DESC"},
5138		{SLI4_FC_WCQE_STATUS_RQ_DMA_FAILURE,	"RQ_DMA_FAILURE"},
5139		{SLI4_FC_WCQE_STATUS_FCP_RSP_TRUNCATE,	"FCP_RSP_TRUNCATE"},
5140		{SLI4_FC_WCQE_STATUS_DI_ERROR,		"DI_ERROR"},
5141		{SLI4_FC_WCQE_STATUS_BA_RJT,		"BA_RJT"},
5142		{SLI4_FC_WCQE_STATUS_RQ_INSUFF_XRI_NEEDED,
5143				"RQ_INSUFF_XRI_NEEDED"},
5144		{SLI4_FC_WCQE_STATUS_RQ_INSUFF_XRI_DISC, "INSUFF_XRI_DISC"},
5145		{SLI4_FC_WCQE_STATUS_RX_ERROR_DETECT,	"RX_ERROR_DETECT"},
5146		{SLI4_FC_WCQE_STATUS_RX_ABORT_REQUEST,	"RX_ABORT_REQUEST"},
5147		};
5148	u32 i;
5149
5150	for (i = 0; i < ARRAY_SIZE(lookup); i++) {
5151		if (status == lookup[i].code)
5152			return lookup[i].label;
5153	}
5154	return "unknown";
5155}
5156