1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses.  You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 *     Redistribution and use in source and binary forms, with or
13 *     without modification, are permitted provided that the following
14 *     conditions are met:
15 *
16 *      - Redistributions of source code must retain the above
17 *        copyright notice, this list of conditions and the following
18 *        disclaimer.
19 *
20 *      - Redistributions in binary form must reproduce the above
21 *        copyright notice, this list of conditions and the following
22 *        disclaimer in the documentation and/or other materials
23 *        provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/delay.h>
36#include "cxgb4.h"
37#include "t4_regs.h"
38#include "t4_values.h"
39#include "t4fw_api.h"
40#include "t4fw_version.h"
41
42/**
43 *	t4_wait_op_done_val - wait until an operation is completed
44 *	@adapter: the adapter performing the operation
45 *	@reg: the register to check for completion
46 *	@mask: a single-bit field within @reg that indicates completion
47 *	@polarity: the value of the field when the operation is completed
48 *	@attempts: number of check iterations
49 *	@delay: delay in usecs between iterations
50 *	@valp: where to store the value of the register at completion time
51 *
52 *	Wait until an operation is completed by checking a bit in a register
53 *	up to @attempts times.  If @valp is not NULL the value of the register
54 *	at the time it indicated completion is stored there.  Returns 0 if the
55 *	operation completes and	-EAGAIN	otherwise.
56 */
57static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
58			       int polarity, int attempts, int delay, u32 *valp)
59{
60	while (1) {
61		u32 val = t4_read_reg(adapter, reg);
62
63		if (!!(val & mask) == polarity) {
64			if (valp)
65				*valp = val;
66			return 0;
67		}
68		if (--attempts == 0)
69			return -EAGAIN;
70		if (delay)
71			udelay(delay);
72	}
73}
74
75static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
76				  int polarity, int attempts, int delay)
77{
78	return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
79				   delay, NULL);
80}
81
82/**
83 *	t4_set_reg_field - set a register field to a value
84 *	@adapter: the adapter to program
85 *	@addr: the register address
86 *	@mask: specifies the portion of the register to modify
87 *	@val: the new value for the register field
88 *
89 *	Sets a register field specified by the supplied mask to the
90 *	given value.
91 */
92void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
93		      u32 val)
94{
95	u32 v = t4_read_reg(adapter, addr) & ~mask;
96
97	t4_write_reg(adapter, addr, v | val);
98	(void) t4_read_reg(adapter, addr);      /* flush */
99}
100
101/**
102 *	t4_read_indirect - read indirectly addressed registers
103 *	@adap: the adapter
104 *	@addr_reg: register holding the indirect address
105 *	@data_reg: register holding the value of the indirect register
106 *	@vals: where the read register values are stored
107 *	@nregs: how many indirect registers to read
108 *	@start_idx: index of first indirect register to read
109 *
110 *	Reads registers that are accessed indirectly through an address/data
111 *	register pair.
112 */
113void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
114			     unsigned int data_reg, u32 *vals,
115			     unsigned int nregs, unsigned int start_idx)
116{
117	while (nregs--) {
118		t4_write_reg(adap, addr_reg, start_idx);
119		*vals++ = t4_read_reg(adap, data_reg);
120		start_idx++;
121	}
122}
123
124/**
125 *	t4_write_indirect - write indirectly addressed registers
126 *	@adap: the adapter
127 *	@addr_reg: register holding the indirect addresses
128 *	@data_reg: register holding the value for the indirect registers
129 *	@vals: values to write
130 *	@nregs: how many indirect registers to write
131 *	@start_idx: address of first indirect register to write
132 *
133 *	Writes a sequential block of registers that are accessed indirectly
134 *	through an address/data register pair.
135 */
136void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
137		       unsigned int data_reg, const u32 *vals,
138		       unsigned int nregs, unsigned int start_idx)
139{
140	while (nregs--) {
141		t4_write_reg(adap, addr_reg, start_idx++);
142		t4_write_reg(adap, data_reg, *vals++);
143	}
144}
145
146/*
147 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
148 * mechanism.  This guarantees that we get the real value even if we're
149 * operating within a Virtual Machine and the Hypervisor is trapping our
150 * Configuration Space accesses.
151 */
152void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
153{
154	u32 req = FUNCTION_V(adap->pf) | REGISTER_V(reg);
155
156	if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
157		req |= ENABLE_F;
158	else
159		req |= T6_ENABLE_F;
160
161	if (is_t4(adap->params.chip))
162		req |= LOCALCFG_F;
163
164	t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, req);
165	*val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA_A);
166
167	/* Reset ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
168	 * Configuration Space read.  (None of the other fields matter when
169	 * ENABLE is 0 so a simple register write is easier than a
170	 * read-modify-write via t4_set_reg_field().)
171	 */
172	t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, 0);
173}
174
175/*
176 * t4_report_fw_error - report firmware error
177 * @adap: the adapter
178 *
179 * The adapter firmware can indicate error conditions to the host.
180 * If the firmware has indicated an error, print out the reason for
181 * the firmware error.
182 */
183static void t4_report_fw_error(struct adapter *adap)
184{
185	static const char *const reason[] = {
186		"Crash",                        /* PCIE_FW_EVAL_CRASH */
187		"During Device Preparation",    /* PCIE_FW_EVAL_PREP */
188		"During Device Configuration",  /* PCIE_FW_EVAL_CONF */
189		"During Device Initialization", /* PCIE_FW_EVAL_INIT */
190		"Unexpected Event",             /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
191		"Insufficient Airflow",         /* PCIE_FW_EVAL_OVERHEAT */
192		"Device Shutdown",              /* PCIE_FW_EVAL_DEVICESHUTDOWN */
193		"Reserved",                     /* reserved */
194	};
195	u32 pcie_fw;
196
197	pcie_fw = t4_read_reg(adap, PCIE_FW_A);
198	if (pcie_fw & PCIE_FW_ERR_F) {
199		dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
200			reason[PCIE_FW_EVAL_G(pcie_fw)]);
201		adap->flags &= ~CXGB4_FW_OK;
202	}
203}
204
205/*
206 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
207 */
208static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
209			 u32 mbox_addr)
210{
211	for ( ; nflit; nflit--, mbox_addr += 8)
212		*rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
213}
214
215/*
216 * Handle a FW assertion reported in a mailbox.
217 */
218static void fw_asrt(struct adapter *adap, u32 mbox_addr)
219{
220	struct fw_debug_cmd asrt;
221
222	get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
223	dev_alert(adap->pdev_dev,
224		  "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
225		  asrt.u.assert.filename_0_7, be32_to_cpu(asrt.u.assert.line),
226		  be32_to_cpu(asrt.u.assert.x), be32_to_cpu(asrt.u.assert.y));
227}
228
229/**
230 *	t4_record_mbox - record a Firmware Mailbox Command/Reply in the log
231 *	@adapter: the adapter
232 *	@cmd: the Firmware Mailbox Command or Reply
233 *	@size: command length in bytes
234 *	@access: the time (ms) needed to access the Firmware Mailbox
235 *	@execute: the time (ms) the command spent being executed
236 */
237static void t4_record_mbox(struct adapter *adapter,
238			   const __be64 *cmd, unsigned int size,
239			   int access, int execute)
240{
241	struct mbox_cmd_log *log = adapter->mbox_log;
242	struct mbox_cmd *entry;
243	int i;
244
245	entry = mbox_cmd_log_entry(log, log->cursor++);
246	if (log->cursor == log->size)
247		log->cursor = 0;
248
249	for (i = 0; i < size / 8; i++)
250		entry->cmd[i] = be64_to_cpu(cmd[i]);
251	while (i < MBOX_LEN / 8)
252		entry->cmd[i++] = 0;
253	entry->timestamp = jiffies;
254	entry->seqno = log->seqno++;
255	entry->access = access;
256	entry->execute = execute;
257}
258
259/**
260 *	t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
261 *	@adap: the adapter
262 *	@mbox: index of the mailbox to use
263 *	@cmd: the command to write
264 *	@size: command length in bytes
265 *	@rpl: where to optionally store the reply
266 *	@sleep_ok: if true we may sleep while awaiting command completion
267 *	@timeout: time to wait for command to finish before timing out
268 *
269 *	Sends the given command to FW through the selected mailbox and waits
270 *	for the FW to execute the command.  If @rpl is not %NULL it is used to
271 *	store the FW's reply to the command.  The command and its optional
272 *	reply are of the same length.  FW can take up to %FW_CMD_MAX_TIMEOUT ms
273 *	to respond.  @sleep_ok determines whether we may sleep while awaiting
274 *	the response.  If sleeping is allowed we use progressive backoff
275 *	otherwise we spin.
276 *
277 *	The return value is 0 on success or a negative errno on failure.  A
278 *	failure can happen either because we are not able to execute the
279 *	command or FW executes it but signals an error.  In the latter case
280 *	the return value is the error code indicated by FW (negated).
281 */
282int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
283			    int size, void *rpl, bool sleep_ok, int timeout)
284{
285	static const int delay[] = {
286		1, 1, 3, 5, 10, 10, 20, 50, 100, 200
287	};
288
289	struct mbox_list entry;
290	u16 access = 0;
291	u16 execute = 0;
292	u32 v;
293	u64 res;
294	int i, ms, delay_idx, ret;
295	const __be64 *p = cmd;
296	u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
297	u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL_A);
298	__be64 cmd_rpl[MBOX_LEN / 8];
299	u32 pcie_fw;
300
301	if ((size & 15) || size > MBOX_LEN)
302		return -EINVAL;
303
304	/*
305	 * If the device is off-line, as in EEH, commands will time out.
306	 * Fail them early so we don't waste time waiting.
307	 */
308	if (adap->pdev->error_state != pci_channel_io_normal)
309		return -EIO;
310
311	/* If we have a negative timeout, that implies that we can't sleep. */
312	if (timeout < 0) {
313		sleep_ok = false;
314		timeout = -timeout;
315	}
316
317	/* Queue ourselves onto the mailbox access list.  When our entry is at
318	 * the front of the list, we have rights to access the mailbox.  So we
319	 * wait [for a while] till we're at the front [or bail out with an
320	 * EBUSY] ...
321	 */
322	spin_lock_bh(&adap->mbox_lock);
323	list_add_tail(&entry.list, &adap->mlist.list);
324	spin_unlock_bh(&adap->mbox_lock);
325
326	delay_idx = 0;
327	ms = delay[0];
328
329	for (i = 0; ; i += ms) {
330		/* If we've waited too long, return a busy indication.  This
331		 * really ought to be based on our initial position in the
332		 * mailbox access list but this is a start.  We very rarely
333		 * contend on access to the mailbox ...
334		 */
335		pcie_fw = t4_read_reg(adap, PCIE_FW_A);
336		if (i > FW_CMD_MAX_TIMEOUT || (pcie_fw & PCIE_FW_ERR_F)) {
337			spin_lock_bh(&adap->mbox_lock);
338			list_del(&entry.list);
339			spin_unlock_bh(&adap->mbox_lock);
340			ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -EBUSY;
341			t4_record_mbox(adap, cmd, size, access, ret);
342			return ret;
343		}
344
345		/* If we're at the head, break out and start the mailbox
346		 * protocol.
347		 */
348		if (list_first_entry(&adap->mlist.list, struct mbox_list,
349				     list) == &entry)
350			break;
351
352		/* Delay for a bit before checking again ... */
353		if (sleep_ok) {
354			ms = delay[delay_idx];  /* last element may repeat */
355			if (delay_idx < ARRAY_SIZE(delay) - 1)
356				delay_idx++;
357			msleep(ms);
358		} else {
359			mdelay(ms);
360		}
361	}
362
363	/* Loop trying to get ownership of the mailbox.  Return an error
364	 * if we can't gain ownership.
365	 */
366	v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
367	for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
368		v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
369	if (v != MBOX_OWNER_DRV) {
370		spin_lock_bh(&adap->mbox_lock);
371		list_del(&entry.list);
372		spin_unlock_bh(&adap->mbox_lock);
373		ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
374		t4_record_mbox(adap, cmd, size, access, ret);
375		return ret;
376	}
377
378	/* Copy in the new mailbox command and send it on its way ... */
379	t4_record_mbox(adap, cmd, size, access, 0);
380	for (i = 0; i < size; i += 8)
381		t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
382
383	t4_write_reg(adap, ctl_reg, MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW));
384	t4_read_reg(adap, ctl_reg);          /* flush write */
385
386	delay_idx = 0;
387	ms = delay[0];
388
389	for (i = 0;
390	     !((pcie_fw = t4_read_reg(adap, PCIE_FW_A)) & PCIE_FW_ERR_F) &&
391	     i < timeout;
392	     i += ms) {
393		if (sleep_ok) {
394			ms = delay[delay_idx];  /* last element may repeat */
395			if (delay_idx < ARRAY_SIZE(delay) - 1)
396				delay_idx++;
397			msleep(ms);
398		} else
399			mdelay(ms);
400
401		v = t4_read_reg(adap, ctl_reg);
402		if (MBOWNER_G(v) == MBOX_OWNER_DRV) {
403			if (!(v & MBMSGVALID_F)) {
404				t4_write_reg(adap, ctl_reg, 0);
405				continue;
406			}
407
408			get_mbox_rpl(adap, cmd_rpl, MBOX_LEN / 8, data_reg);
409			res = be64_to_cpu(cmd_rpl[0]);
410
411			if (FW_CMD_OP_G(res >> 32) == FW_DEBUG_CMD) {
412				fw_asrt(adap, data_reg);
413				res = FW_CMD_RETVAL_V(EIO);
414			} else if (rpl) {
415				memcpy(rpl, cmd_rpl, size);
416			}
417
418			t4_write_reg(adap, ctl_reg, 0);
419
420			execute = i + ms;
421			t4_record_mbox(adap, cmd_rpl,
422				       MBOX_LEN, access, execute);
423			spin_lock_bh(&adap->mbox_lock);
424			list_del(&entry.list);
425			spin_unlock_bh(&adap->mbox_lock);
426			return -FW_CMD_RETVAL_G((int)res);
427		}
428	}
429
430	ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -ETIMEDOUT;
431	t4_record_mbox(adap, cmd, size, access, ret);
432	dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
433		*(const u8 *)cmd, mbox);
434	t4_report_fw_error(adap);
435	spin_lock_bh(&adap->mbox_lock);
436	list_del(&entry.list);
437	spin_unlock_bh(&adap->mbox_lock);
438	t4_fatal_err(adap);
439	return ret;
440}
441
442int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
443		    void *rpl, bool sleep_ok)
444{
445	return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, sleep_ok,
446				       FW_CMD_MAX_TIMEOUT);
447}
448
449static int t4_edc_err_read(struct adapter *adap, int idx)
450{
451	u32 edc_ecc_err_addr_reg;
452	u32 rdata_reg;
453
454	if (is_t4(adap->params.chip)) {
455		CH_WARN(adap, "%s: T4 NOT supported.\n", __func__);
456		return 0;
457	}
458	if (idx != 0 && idx != 1) {
459		CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx);
460		return 0;
461	}
462
463	edc_ecc_err_addr_reg = EDC_T5_REG(EDC_H_ECC_ERR_ADDR_A, idx);
464	rdata_reg = EDC_T5_REG(EDC_H_BIST_STATUS_RDATA_A, idx);
465
466	CH_WARN(adap,
467		"edc%d err addr 0x%x: 0x%x.\n",
468		idx, edc_ecc_err_addr_reg,
469		t4_read_reg(adap, edc_ecc_err_addr_reg));
470	CH_WARN(adap,
471		"bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
472		rdata_reg,
473		(unsigned long long)t4_read_reg64(adap, rdata_reg),
474		(unsigned long long)t4_read_reg64(adap, rdata_reg + 8),
475		(unsigned long long)t4_read_reg64(adap, rdata_reg + 16),
476		(unsigned long long)t4_read_reg64(adap, rdata_reg + 24),
477		(unsigned long long)t4_read_reg64(adap, rdata_reg + 32),
478		(unsigned long long)t4_read_reg64(adap, rdata_reg + 40),
479		(unsigned long long)t4_read_reg64(adap, rdata_reg + 48),
480		(unsigned long long)t4_read_reg64(adap, rdata_reg + 56),
481		(unsigned long long)t4_read_reg64(adap, rdata_reg + 64));
482
483	return 0;
484}
485
486/**
487 * t4_memory_rw_init - Get memory window relative offset, base, and size.
488 * @adap: the adapter
489 * @win: PCI-E Memory Window to use
490 * @mtype: memory type: MEM_EDC0, MEM_EDC1, MEM_HMA or MEM_MC
491 * @mem_off: memory relative offset with respect to @mtype.
492 * @mem_base: configured memory base address.
493 * @mem_aperture: configured memory window aperture.
494 *
495 * Get the configured memory window's relative offset, base, and size.
496 */
497int t4_memory_rw_init(struct adapter *adap, int win, int mtype, u32 *mem_off,
498		      u32 *mem_base, u32 *mem_aperture)
499{
500	u32 edc_size, mc_size, mem_reg;
501
502	/* Offset into the region of memory which is being accessed
503	 * MEM_EDC0 = 0
504	 * MEM_EDC1 = 1
505	 * MEM_MC   = 2 -- MEM_MC for chips with only 1 memory controller
506	 * MEM_MC1  = 3 -- for chips with 2 memory controllers (e.g. T5)
507	 * MEM_HMA  = 4
508	 */
509	edc_size  = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A));
510	if (mtype == MEM_HMA) {
511		*mem_off = 2 * (edc_size * 1024 * 1024);
512	} else if (mtype != MEM_MC1) {
513		*mem_off = (mtype * (edc_size * 1024 * 1024));
514	} else {
515		mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
516						      MA_EXT_MEMORY0_BAR_A));
517		*mem_off = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
518	}
519
520	/* Each PCI-E Memory Window is programmed with a window size -- or
521	 * "aperture" -- which controls the granularity of its mapping onto
522	 * adapter memory.  We need to grab that aperture in order to know
523	 * how to use the specified window.  The window is also programmed
524	 * with the base address of the Memory Window in BAR0's address
525	 * space.  For T4 this is an absolute PCI-E Bus Address.  For T5
526	 * the address is relative to BAR0.
527	 */
528	mem_reg = t4_read_reg(adap,
529			      PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A,
530						  win));
531	/* a dead adapter will return 0xffffffff for PIO reads */
532	if (mem_reg == 0xffffffff)
533		return -ENXIO;
534
535	*mem_aperture = 1 << (WINDOW_G(mem_reg) + WINDOW_SHIFT_X);
536	*mem_base = PCIEOFST_G(mem_reg) << PCIEOFST_SHIFT_X;
537	if (is_t4(adap->params.chip))
538		*mem_base -= adap->t4_bar0;
539
540	return 0;
541}
542
543/**
544 * t4_memory_update_win - Move memory window to specified address.
545 * @adap: the adapter
546 * @win: PCI-E Memory Window to use
547 * @addr: location to move.
548 *
549 * Move memory window to specified address.
550 */
551void t4_memory_update_win(struct adapter *adap, int win, u32 addr)
552{
553	t4_write_reg(adap,
554		     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win),
555		     addr);
556	/* Read it back to ensure that changes propagate before we
557	 * attempt to use the new value.
558	 */
559	t4_read_reg(adap,
560		    PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
561}
562
563/**
564 * t4_memory_rw_residual - Read/Write residual data.
565 * @adap: the adapter
566 * @off: relative offset within residual to start read/write.
567 * @addr: address within indicated memory type.
568 * @buf: host memory buffer
569 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
570 *
571 * Read/Write residual data less than 32-bits.
572 */
573void t4_memory_rw_residual(struct adapter *adap, u32 off, u32 addr, u8 *buf,
574			   int dir)
575{
576	union {
577		u32 word;
578		char byte[4];
579	} last;
580	unsigned char *bp;
581	int i;
582
583	if (dir == T4_MEMORY_READ) {
584		last.word = le32_to_cpu((__force __le32)
585					t4_read_reg(adap, addr));
586		for (bp = (unsigned char *)buf, i = off; i < 4; i++)
587			bp[i] = last.byte[i];
588	} else {
589		last.word = *buf;
590		for (i = off; i < 4; i++)
591			last.byte[i] = 0;
592		t4_write_reg(adap, addr,
593			     (__force u32)cpu_to_le32(last.word));
594	}
595}
596
597/**
598 *	t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
599 *	@adap: the adapter
600 *	@win: PCI-E Memory Window to use
601 *	@mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
602 *	@addr: address within indicated memory type
603 *	@len: amount of memory to transfer
604 *	@hbuf: host memory buffer
605 *	@dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
606 *
607 *	Reads/writes an [almost] arbitrary memory region in the firmware: the
608 *	firmware memory address and host buffer must be aligned on 32-bit
609 *	boundaries; the length may be arbitrary.  The memory is transferred as
610 *	a raw byte sequence from/to the firmware's memory.  If this memory
611 *	contains data structures which contain multi-byte integers, it's the
612 *	caller's responsibility to perform appropriate byte order conversions.
613 */
614int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
615		 u32 len, void *hbuf, int dir)
616{
617	u32 pos, offset, resid, memoffset;
618	u32 win_pf, mem_aperture, mem_base;
619	u32 *buf;
620	int ret;
621
622	/* Argument sanity checks ...
623	 */
624	if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
625		return -EINVAL;
626	buf = (u32 *)hbuf;
627
628	/* It's convenient to be able to handle lengths which aren't a
629	 * multiple of 32-bits because we often end up transferring files to
630	 * the firmware.  So we'll handle that by normalizing the length here
631	 * and then handling any residual transfer at the end.
632	 */
633	resid = len & 0x3;
634	len -= resid;
635
636	ret = t4_memory_rw_init(adap, win, mtype, &memoffset, &mem_base,
637				&mem_aperture);
638	if (ret)
639		return ret;
640
641	/* Determine the PCIE_MEM_ACCESS_OFFSET */
642	addr = addr + memoffset;
643
644	win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->pf);
645
646	/* Calculate our initial PCI-E Memory Window Position and Offset into
647	 * that Window.
648	 */
649	pos = addr & ~(mem_aperture - 1);
650	offset = addr - pos;
651
652	/* Set up initial PCI-E Memory Window to cover the start of our
653	 * transfer.
654	 */
655	t4_memory_update_win(adap, win, pos | win_pf);
656
657	/* Transfer data to/from the adapter as long as there's an integral
658	 * number of 32-bit transfers to complete.
659	 *
660	 * A note on Endianness issues:
661	 *
662	 * The "register" reads and writes below from/to the PCI-E Memory
663	 * Window invoke the standard adapter Big-Endian to PCI-E Link
664	 * Little-Endian "swizzel."  As a result, if we have the following
665	 * data in adapter memory:
666	 *
667	 *     Memory:  ... | b0 | b1 | b2 | b3 | ...
668	 *     Address:      i+0  i+1  i+2  i+3
669	 *
670	 * Then a read of the adapter memory via the PCI-E Memory Window
671	 * will yield:
672	 *
673	 *     x = readl(i)
674	 *         31                  0
675	 *         [ b3 | b2 | b1 | b0 ]
676	 *
677	 * If this value is stored into local memory on a Little-Endian system
678	 * it will show up correctly in local memory as:
679	 *
680	 *     ( ..., b0, b1, b2, b3, ... )
681	 *
682	 * But on a Big-Endian system, the store will show up in memory
683	 * incorrectly swizzled as:
684	 *
685	 *     ( ..., b3, b2, b1, b0, ... )
686	 *
687	 * So we need to account for this in the reads and writes to the
688	 * PCI-E Memory Window below by undoing the register read/write
689	 * swizzels.
690	 */
691	while (len > 0) {
692		if (dir == T4_MEMORY_READ)
693			*buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap,
694						mem_base + offset));
695		else
696			t4_write_reg(adap, mem_base + offset,
697				     (__force u32)cpu_to_le32(*buf++));
698		offset += sizeof(__be32);
699		len -= sizeof(__be32);
700
701		/* If we've reached the end of our current window aperture,
702		 * move the PCI-E Memory Window on to the next.  Note that
703		 * doing this here after "len" may be 0 allows us to set up
704		 * the PCI-E Memory Window for a possible final residual
705		 * transfer below ...
706		 */
707		if (offset == mem_aperture) {
708			pos += mem_aperture;
709			offset = 0;
710			t4_memory_update_win(adap, win, pos | win_pf);
711		}
712	}
713
714	/* If the original transfer had a length which wasn't a multiple of
715	 * 32-bits, now's where we need to finish off the transfer of the
716	 * residual amount.  The PCI-E Memory Window has already been moved
717	 * above (if necessary) to cover this final transfer.
718	 */
719	if (resid)
720		t4_memory_rw_residual(adap, resid, mem_base + offset,
721				      (u8 *)buf, dir);
722
723	return 0;
724}
725
726/* Return the specified PCI-E Configuration Space register from our Physical
727 * Function.  We try first via a Firmware LDST Command since we prefer to let
728 * the firmware own all of these registers, but if that fails we go for it
729 * directly ourselves.
730 */
731u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
732{
733	u32 val, ldst_addrspace;
734
735	/* If fw_attach != 0, construct and send the Firmware LDST Command to
736	 * retrieve the specified PCI-E Configuration Space register.
737	 */
738	struct fw_ldst_cmd ldst_cmd;
739	int ret;
740
741	memset(&ldst_cmd, 0, sizeof(ldst_cmd));
742	ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE);
743	ldst_cmd.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
744					       FW_CMD_REQUEST_F |
745					       FW_CMD_READ_F |
746					       ldst_addrspace);
747	ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
748	ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1);
749	ldst_cmd.u.pcie.ctrl_to_fn =
750		(FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->pf));
751	ldst_cmd.u.pcie.r = reg;
752
753	/* If the LDST Command succeeds, return the result, otherwise
754	 * fall through to reading it directly ourselves ...
755	 */
756	ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
757			 &ldst_cmd);
758	if (ret == 0)
759		val = be32_to_cpu(ldst_cmd.u.pcie.data[0]);
760	else
761		/* Read the desired Configuration Space register via the PCI-E
762		 * Backdoor mechanism.
763		 */
764		t4_hw_pci_read_cfg4(adap, reg, &val);
765	return val;
766}
767
768/* Get the window based on base passed to it.
769 * Window aperture is currently unhandled, but there is no use case for it
770 * right now
771 */
772static u32 t4_get_window(struct adapter *adap, u32 pci_base, u64 pci_mask,
773			 u32 memwin_base)
774{
775	u32 ret;
776
777	if (is_t4(adap->params.chip)) {
778		u32 bar0;
779
780		/* Truncation intentional: we only read the bottom 32-bits of
781		 * the 64-bit BAR0/BAR1 ...  We use the hardware backdoor
782		 * mechanism to read BAR0 instead of using
783		 * pci_resource_start() because we could be operating from
784		 * within a Virtual Machine which is trapping our accesses to
785		 * our Configuration Space and we need to set up the PCI-E
786		 * Memory Window decoders with the actual addresses which will
787		 * be coming across the PCI-E link.
788		 */
789		bar0 = t4_read_pcie_cfg4(adap, pci_base);
790		bar0 &= pci_mask;
791		adap->t4_bar0 = bar0;
792
793		ret = bar0 + memwin_base;
794	} else {
795		/* For T5, only relative offset inside the PCIe BAR is passed */
796		ret = memwin_base;
797	}
798	return ret;
799}
800
801/* Get the default utility window (win0) used by everyone */
802u32 t4_get_util_window(struct adapter *adap)
803{
804	return t4_get_window(adap, PCI_BASE_ADDRESS_0,
805			     PCI_BASE_ADDRESS_MEM_MASK, MEMWIN0_BASE);
806}
807
808/* Set up memory window for accessing adapter memory ranges.  (Read
809 * back MA register to ensure that changes propagate before we attempt
810 * to use the new values.)
811 */
812void t4_setup_memwin(struct adapter *adap, u32 memwin_base, u32 window)
813{
814	t4_write_reg(adap,
815		     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window),
816		     memwin_base | BIR_V(0) |
817		     WINDOW_V(ilog2(MEMWIN0_APERTURE) - WINDOW_SHIFT_X));
818	t4_read_reg(adap,
819		    PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window));
820}
821
822/**
823 *	t4_get_regs_len - return the size of the chips register set
824 *	@adapter: the adapter
825 *
826 *	Returns the size of the chip's BAR0 register space.
827 */
828unsigned int t4_get_regs_len(struct adapter *adapter)
829{
830	unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
831
832	switch (chip_version) {
833	case CHELSIO_T4:
834		return T4_REGMAP_SIZE;
835
836	case CHELSIO_T5:
837	case CHELSIO_T6:
838		return T5_REGMAP_SIZE;
839	}
840
841	dev_err(adapter->pdev_dev,
842		"Unsupported chip version %d\n", chip_version);
843	return 0;
844}
845
846/**
847 *	t4_get_regs - read chip registers into provided buffer
848 *	@adap: the adapter
849 *	@buf: register buffer
850 *	@buf_size: size (in bytes) of register buffer
851 *
852 *	If the provided register buffer isn't large enough for the chip's
853 *	full register range, the register dump will be truncated to the
854 *	register buffer's size.
855 */
856void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
857{
858	static const unsigned int t4_reg_ranges[] = {
859		0x1008, 0x1108,
860		0x1180, 0x1184,
861		0x1190, 0x1194,
862		0x11a0, 0x11a4,
863		0x11b0, 0x11b4,
864		0x11fc, 0x123c,
865		0x1300, 0x173c,
866		0x1800, 0x18fc,
867		0x3000, 0x30d8,
868		0x30e0, 0x30e4,
869		0x30ec, 0x5910,
870		0x5920, 0x5924,
871		0x5960, 0x5960,
872		0x5968, 0x5968,
873		0x5970, 0x5970,
874		0x5978, 0x5978,
875		0x5980, 0x5980,
876		0x5988, 0x5988,
877		0x5990, 0x5990,
878		0x5998, 0x5998,
879		0x59a0, 0x59d4,
880		0x5a00, 0x5ae0,
881		0x5ae8, 0x5ae8,
882		0x5af0, 0x5af0,
883		0x5af8, 0x5af8,
884		0x6000, 0x6098,
885		0x6100, 0x6150,
886		0x6200, 0x6208,
887		0x6240, 0x6248,
888		0x6280, 0x62b0,
889		0x62c0, 0x6338,
890		0x6370, 0x638c,
891		0x6400, 0x643c,
892		0x6500, 0x6524,
893		0x6a00, 0x6a04,
894		0x6a14, 0x6a38,
895		0x6a60, 0x6a70,
896		0x6a78, 0x6a78,
897		0x6b00, 0x6b0c,
898		0x6b1c, 0x6b84,
899		0x6bf0, 0x6bf8,
900		0x6c00, 0x6c0c,
901		0x6c1c, 0x6c84,
902		0x6cf0, 0x6cf8,
903		0x6d00, 0x6d0c,
904		0x6d1c, 0x6d84,
905		0x6df0, 0x6df8,
906		0x6e00, 0x6e0c,
907		0x6e1c, 0x6e84,
908		0x6ef0, 0x6ef8,
909		0x6f00, 0x6f0c,
910		0x6f1c, 0x6f84,
911		0x6ff0, 0x6ff8,
912		0x7000, 0x700c,
913		0x701c, 0x7084,
914		0x70f0, 0x70f8,
915		0x7100, 0x710c,
916		0x711c, 0x7184,
917		0x71f0, 0x71f8,
918		0x7200, 0x720c,
919		0x721c, 0x7284,
920		0x72f0, 0x72f8,
921		0x7300, 0x730c,
922		0x731c, 0x7384,
923		0x73f0, 0x73f8,
924		0x7400, 0x7450,
925		0x7500, 0x7530,
926		0x7600, 0x760c,
927		0x7614, 0x761c,
928		0x7680, 0x76cc,
929		0x7700, 0x7798,
930		0x77c0, 0x77fc,
931		0x7900, 0x79fc,
932		0x7b00, 0x7b58,
933		0x7b60, 0x7b84,
934		0x7b8c, 0x7c38,
935		0x7d00, 0x7d38,
936		0x7d40, 0x7d80,
937		0x7d8c, 0x7ddc,
938		0x7de4, 0x7e04,
939		0x7e10, 0x7e1c,
940		0x7e24, 0x7e38,
941		0x7e40, 0x7e44,
942		0x7e4c, 0x7e78,
943		0x7e80, 0x7ea4,
944		0x7eac, 0x7edc,
945		0x7ee8, 0x7efc,
946		0x8dc0, 0x8e04,
947		0x8e10, 0x8e1c,
948		0x8e30, 0x8e78,
949		0x8ea0, 0x8eb8,
950		0x8ec0, 0x8f6c,
951		0x8fc0, 0x9008,
952		0x9010, 0x9058,
953		0x9060, 0x9060,
954		0x9068, 0x9074,
955		0x90fc, 0x90fc,
956		0x9400, 0x9408,
957		0x9410, 0x9458,
958		0x9600, 0x9600,
959		0x9608, 0x9638,
960		0x9640, 0x96bc,
961		0x9800, 0x9808,
962		0x9820, 0x983c,
963		0x9850, 0x9864,
964		0x9c00, 0x9c6c,
965		0x9c80, 0x9cec,
966		0x9d00, 0x9d6c,
967		0x9d80, 0x9dec,
968		0x9e00, 0x9e6c,
969		0x9e80, 0x9eec,
970		0x9f00, 0x9f6c,
971		0x9f80, 0x9fec,
972		0xd004, 0xd004,
973		0xd010, 0xd03c,
974		0xdfc0, 0xdfe0,
975		0xe000, 0xea7c,
976		0xf000, 0x11110,
977		0x11118, 0x11190,
978		0x19040, 0x1906c,
979		0x19078, 0x19080,
980		0x1908c, 0x190e4,
981		0x190f0, 0x190f8,
982		0x19100, 0x19110,
983		0x19120, 0x19124,
984		0x19150, 0x19194,
985		0x1919c, 0x191b0,
986		0x191d0, 0x191e8,
987		0x19238, 0x1924c,
988		0x193f8, 0x1943c,
989		0x1944c, 0x19474,
990		0x19490, 0x194e0,
991		0x194f0, 0x194f8,
992		0x19800, 0x19c08,
993		0x19c10, 0x19c90,
994		0x19ca0, 0x19ce4,
995		0x19cf0, 0x19d40,
996		0x19d50, 0x19d94,
997		0x19da0, 0x19de8,
998		0x19df0, 0x19e40,
999		0x19e50, 0x19e90,
1000		0x19ea0, 0x19f4c,
1001		0x1a000, 0x1a004,
1002		0x1a010, 0x1a06c,
1003		0x1a0b0, 0x1a0e4,
1004		0x1a0ec, 0x1a0f4,
1005		0x1a100, 0x1a108,
1006		0x1a114, 0x1a120,
1007		0x1a128, 0x1a130,
1008		0x1a138, 0x1a138,
1009		0x1a190, 0x1a1c4,
1010		0x1a1fc, 0x1a1fc,
1011		0x1e040, 0x1e04c,
1012		0x1e284, 0x1e28c,
1013		0x1e2c0, 0x1e2c0,
1014		0x1e2e0, 0x1e2e0,
1015		0x1e300, 0x1e384,
1016		0x1e3c0, 0x1e3c8,
1017		0x1e440, 0x1e44c,
1018		0x1e684, 0x1e68c,
1019		0x1e6c0, 0x1e6c0,
1020		0x1e6e0, 0x1e6e0,
1021		0x1e700, 0x1e784,
1022		0x1e7c0, 0x1e7c8,
1023		0x1e840, 0x1e84c,
1024		0x1ea84, 0x1ea8c,
1025		0x1eac0, 0x1eac0,
1026		0x1eae0, 0x1eae0,
1027		0x1eb00, 0x1eb84,
1028		0x1ebc0, 0x1ebc8,
1029		0x1ec40, 0x1ec4c,
1030		0x1ee84, 0x1ee8c,
1031		0x1eec0, 0x1eec0,
1032		0x1eee0, 0x1eee0,
1033		0x1ef00, 0x1ef84,
1034		0x1efc0, 0x1efc8,
1035		0x1f040, 0x1f04c,
1036		0x1f284, 0x1f28c,
1037		0x1f2c0, 0x1f2c0,
1038		0x1f2e0, 0x1f2e0,
1039		0x1f300, 0x1f384,
1040		0x1f3c0, 0x1f3c8,
1041		0x1f440, 0x1f44c,
1042		0x1f684, 0x1f68c,
1043		0x1f6c0, 0x1f6c0,
1044		0x1f6e0, 0x1f6e0,
1045		0x1f700, 0x1f784,
1046		0x1f7c0, 0x1f7c8,
1047		0x1f840, 0x1f84c,
1048		0x1fa84, 0x1fa8c,
1049		0x1fac0, 0x1fac0,
1050		0x1fae0, 0x1fae0,
1051		0x1fb00, 0x1fb84,
1052		0x1fbc0, 0x1fbc8,
1053		0x1fc40, 0x1fc4c,
1054		0x1fe84, 0x1fe8c,
1055		0x1fec0, 0x1fec0,
1056		0x1fee0, 0x1fee0,
1057		0x1ff00, 0x1ff84,
1058		0x1ffc0, 0x1ffc8,
1059		0x20000, 0x2002c,
1060		0x20100, 0x2013c,
1061		0x20190, 0x201a0,
1062		0x201a8, 0x201b8,
1063		0x201c4, 0x201c8,
1064		0x20200, 0x20318,
1065		0x20400, 0x204b4,
1066		0x204c0, 0x20528,
1067		0x20540, 0x20614,
1068		0x21000, 0x21040,
1069		0x2104c, 0x21060,
1070		0x210c0, 0x210ec,
1071		0x21200, 0x21268,
1072		0x21270, 0x21284,
1073		0x212fc, 0x21388,
1074		0x21400, 0x21404,
1075		0x21500, 0x21500,
1076		0x21510, 0x21518,
1077		0x2152c, 0x21530,
1078		0x2153c, 0x2153c,
1079		0x21550, 0x21554,
1080		0x21600, 0x21600,
1081		0x21608, 0x2161c,
1082		0x21624, 0x21628,
1083		0x21630, 0x21634,
1084		0x2163c, 0x2163c,
1085		0x21700, 0x2171c,
1086		0x21780, 0x2178c,
1087		0x21800, 0x21818,
1088		0x21820, 0x21828,
1089		0x21830, 0x21848,
1090		0x21850, 0x21854,
1091		0x21860, 0x21868,
1092		0x21870, 0x21870,
1093		0x21878, 0x21898,
1094		0x218a0, 0x218a8,
1095		0x218b0, 0x218c8,
1096		0x218d0, 0x218d4,
1097		0x218e0, 0x218e8,
1098		0x218f0, 0x218f0,
1099		0x218f8, 0x21a18,
1100		0x21a20, 0x21a28,
1101		0x21a30, 0x21a48,
1102		0x21a50, 0x21a54,
1103		0x21a60, 0x21a68,
1104		0x21a70, 0x21a70,
1105		0x21a78, 0x21a98,
1106		0x21aa0, 0x21aa8,
1107		0x21ab0, 0x21ac8,
1108		0x21ad0, 0x21ad4,
1109		0x21ae0, 0x21ae8,
1110		0x21af0, 0x21af0,
1111		0x21af8, 0x21c18,
1112		0x21c20, 0x21c20,
1113		0x21c28, 0x21c30,
1114		0x21c38, 0x21c38,
1115		0x21c80, 0x21c98,
1116		0x21ca0, 0x21ca8,
1117		0x21cb0, 0x21cc8,
1118		0x21cd0, 0x21cd4,
1119		0x21ce0, 0x21ce8,
1120		0x21cf0, 0x21cf0,
1121		0x21cf8, 0x21d7c,
1122		0x21e00, 0x21e04,
1123		0x22000, 0x2202c,
1124		0x22100, 0x2213c,
1125		0x22190, 0x221a0,
1126		0x221a8, 0x221b8,
1127		0x221c4, 0x221c8,
1128		0x22200, 0x22318,
1129		0x22400, 0x224b4,
1130		0x224c0, 0x22528,
1131		0x22540, 0x22614,
1132		0x23000, 0x23040,
1133		0x2304c, 0x23060,
1134		0x230c0, 0x230ec,
1135		0x23200, 0x23268,
1136		0x23270, 0x23284,
1137		0x232fc, 0x23388,
1138		0x23400, 0x23404,
1139		0x23500, 0x23500,
1140		0x23510, 0x23518,
1141		0x2352c, 0x23530,
1142		0x2353c, 0x2353c,
1143		0x23550, 0x23554,
1144		0x23600, 0x23600,
1145		0x23608, 0x2361c,
1146		0x23624, 0x23628,
1147		0x23630, 0x23634,
1148		0x2363c, 0x2363c,
1149		0x23700, 0x2371c,
1150		0x23780, 0x2378c,
1151		0x23800, 0x23818,
1152		0x23820, 0x23828,
1153		0x23830, 0x23848,
1154		0x23850, 0x23854,
1155		0x23860, 0x23868,
1156		0x23870, 0x23870,
1157		0x23878, 0x23898,
1158		0x238a0, 0x238a8,
1159		0x238b0, 0x238c8,
1160		0x238d0, 0x238d4,
1161		0x238e0, 0x238e8,
1162		0x238f0, 0x238f0,
1163		0x238f8, 0x23a18,
1164		0x23a20, 0x23a28,
1165		0x23a30, 0x23a48,
1166		0x23a50, 0x23a54,
1167		0x23a60, 0x23a68,
1168		0x23a70, 0x23a70,
1169		0x23a78, 0x23a98,
1170		0x23aa0, 0x23aa8,
1171		0x23ab0, 0x23ac8,
1172		0x23ad0, 0x23ad4,
1173		0x23ae0, 0x23ae8,
1174		0x23af0, 0x23af0,
1175		0x23af8, 0x23c18,
1176		0x23c20, 0x23c20,
1177		0x23c28, 0x23c30,
1178		0x23c38, 0x23c38,
1179		0x23c80, 0x23c98,
1180		0x23ca0, 0x23ca8,
1181		0x23cb0, 0x23cc8,
1182		0x23cd0, 0x23cd4,
1183		0x23ce0, 0x23ce8,
1184		0x23cf0, 0x23cf0,
1185		0x23cf8, 0x23d7c,
1186		0x23e00, 0x23e04,
1187		0x24000, 0x2402c,
1188		0x24100, 0x2413c,
1189		0x24190, 0x241a0,
1190		0x241a8, 0x241b8,
1191		0x241c4, 0x241c8,
1192		0x24200, 0x24318,
1193		0x24400, 0x244b4,
1194		0x244c0, 0x24528,
1195		0x24540, 0x24614,
1196		0x25000, 0x25040,
1197		0x2504c, 0x25060,
1198		0x250c0, 0x250ec,
1199		0x25200, 0x25268,
1200		0x25270, 0x25284,
1201		0x252fc, 0x25388,
1202		0x25400, 0x25404,
1203		0x25500, 0x25500,
1204		0x25510, 0x25518,
1205		0x2552c, 0x25530,
1206		0x2553c, 0x2553c,
1207		0x25550, 0x25554,
1208		0x25600, 0x25600,
1209		0x25608, 0x2561c,
1210		0x25624, 0x25628,
1211		0x25630, 0x25634,
1212		0x2563c, 0x2563c,
1213		0x25700, 0x2571c,
1214		0x25780, 0x2578c,
1215		0x25800, 0x25818,
1216		0x25820, 0x25828,
1217		0x25830, 0x25848,
1218		0x25850, 0x25854,
1219		0x25860, 0x25868,
1220		0x25870, 0x25870,
1221		0x25878, 0x25898,
1222		0x258a0, 0x258a8,
1223		0x258b0, 0x258c8,
1224		0x258d0, 0x258d4,
1225		0x258e0, 0x258e8,
1226		0x258f0, 0x258f0,
1227		0x258f8, 0x25a18,
1228		0x25a20, 0x25a28,
1229		0x25a30, 0x25a48,
1230		0x25a50, 0x25a54,
1231		0x25a60, 0x25a68,
1232		0x25a70, 0x25a70,
1233		0x25a78, 0x25a98,
1234		0x25aa0, 0x25aa8,
1235		0x25ab0, 0x25ac8,
1236		0x25ad0, 0x25ad4,
1237		0x25ae0, 0x25ae8,
1238		0x25af0, 0x25af0,
1239		0x25af8, 0x25c18,
1240		0x25c20, 0x25c20,
1241		0x25c28, 0x25c30,
1242		0x25c38, 0x25c38,
1243		0x25c80, 0x25c98,
1244		0x25ca0, 0x25ca8,
1245		0x25cb0, 0x25cc8,
1246		0x25cd0, 0x25cd4,
1247		0x25ce0, 0x25ce8,
1248		0x25cf0, 0x25cf0,
1249		0x25cf8, 0x25d7c,
1250		0x25e00, 0x25e04,
1251		0x26000, 0x2602c,
1252		0x26100, 0x2613c,
1253		0x26190, 0x261a0,
1254		0x261a8, 0x261b8,
1255		0x261c4, 0x261c8,
1256		0x26200, 0x26318,
1257		0x26400, 0x264b4,
1258		0x264c0, 0x26528,
1259		0x26540, 0x26614,
1260		0x27000, 0x27040,
1261		0x2704c, 0x27060,
1262		0x270c0, 0x270ec,
1263		0x27200, 0x27268,
1264		0x27270, 0x27284,
1265		0x272fc, 0x27388,
1266		0x27400, 0x27404,
1267		0x27500, 0x27500,
1268		0x27510, 0x27518,
1269		0x2752c, 0x27530,
1270		0x2753c, 0x2753c,
1271		0x27550, 0x27554,
1272		0x27600, 0x27600,
1273		0x27608, 0x2761c,
1274		0x27624, 0x27628,
1275		0x27630, 0x27634,
1276		0x2763c, 0x2763c,
1277		0x27700, 0x2771c,
1278		0x27780, 0x2778c,
1279		0x27800, 0x27818,
1280		0x27820, 0x27828,
1281		0x27830, 0x27848,
1282		0x27850, 0x27854,
1283		0x27860, 0x27868,
1284		0x27870, 0x27870,
1285		0x27878, 0x27898,
1286		0x278a0, 0x278a8,
1287		0x278b0, 0x278c8,
1288		0x278d0, 0x278d4,
1289		0x278e0, 0x278e8,
1290		0x278f0, 0x278f0,
1291		0x278f8, 0x27a18,
1292		0x27a20, 0x27a28,
1293		0x27a30, 0x27a48,
1294		0x27a50, 0x27a54,
1295		0x27a60, 0x27a68,
1296		0x27a70, 0x27a70,
1297		0x27a78, 0x27a98,
1298		0x27aa0, 0x27aa8,
1299		0x27ab0, 0x27ac8,
1300		0x27ad0, 0x27ad4,
1301		0x27ae0, 0x27ae8,
1302		0x27af0, 0x27af0,
1303		0x27af8, 0x27c18,
1304		0x27c20, 0x27c20,
1305		0x27c28, 0x27c30,
1306		0x27c38, 0x27c38,
1307		0x27c80, 0x27c98,
1308		0x27ca0, 0x27ca8,
1309		0x27cb0, 0x27cc8,
1310		0x27cd0, 0x27cd4,
1311		0x27ce0, 0x27ce8,
1312		0x27cf0, 0x27cf0,
1313		0x27cf8, 0x27d7c,
1314		0x27e00, 0x27e04,
1315	};
1316
1317	static const unsigned int t5_reg_ranges[] = {
1318		0x1008, 0x10c0,
1319		0x10cc, 0x10f8,
1320		0x1100, 0x1100,
1321		0x110c, 0x1148,
1322		0x1180, 0x1184,
1323		0x1190, 0x1194,
1324		0x11a0, 0x11a4,
1325		0x11b0, 0x11b4,
1326		0x11fc, 0x123c,
1327		0x1280, 0x173c,
1328		0x1800, 0x18fc,
1329		0x3000, 0x3028,
1330		0x3060, 0x30b0,
1331		0x30b8, 0x30d8,
1332		0x30e0, 0x30fc,
1333		0x3140, 0x357c,
1334		0x35a8, 0x35cc,
1335		0x35ec, 0x35ec,
1336		0x3600, 0x5624,
1337		0x56cc, 0x56ec,
1338		0x56f4, 0x5720,
1339		0x5728, 0x575c,
1340		0x580c, 0x5814,
1341		0x5890, 0x589c,
1342		0x58a4, 0x58ac,
1343		0x58b8, 0x58bc,
1344		0x5940, 0x59c8,
1345		0x59d0, 0x59dc,
1346		0x59fc, 0x5a18,
1347		0x5a60, 0x5a70,
1348		0x5a80, 0x5a9c,
1349		0x5b94, 0x5bfc,
1350		0x6000, 0x6020,
1351		0x6028, 0x6040,
1352		0x6058, 0x609c,
1353		0x60a8, 0x614c,
1354		0x7700, 0x7798,
1355		0x77c0, 0x78fc,
1356		0x7b00, 0x7b58,
1357		0x7b60, 0x7b84,
1358		0x7b8c, 0x7c54,
1359		0x7d00, 0x7d38,
1360		0x7d40, 0x7d80,
1361		0x7d8c, 0x7ddc,
1362		0x7de4, 0x7e04,
1363		0x7e10, 0x7e1c,
1364		0x7e24, 0x7e38,
1365		0x7e40, 0x7e44,
1366		0x7e4c, 0x7e78,
1367		0x7e80, 0x7edc,
1368		0x7ee8, 0x7efc,
1369		0x8dc0, 0x8de0,
1370		0x8df8, 0x8e04,
1371		0x8e10, 0x8e84,
1372		0x8ea0, 0x8f84,
1373		0x8fc0, 0x9058,
1374		0x9060, 0x9060,
1375		0x9068, 0x90f8,
1376		0x9400, 0x9408,
1377		0x9410, 0x9470,
1378		0x9600, 0x9600,
1379		0x9608, 0x9638,
1380		0x9640, 0x96f4,
1381		0x9800, 0x9808,
1382		0x9810, 0x9864,
1383		0x9c00, 0x9c6c,
1384		0x9c80, 0x9cec,
1385		0x9d00, 0x9d6c,
1386		0x9d80, 0x9dec,
1387		0x9e00, 0x9e6c,
1388		0x9e80, 0x9eec,
1389		0x9f00, 0x9f6c,
1390		0x9f80, 0xa020,
1391		0xd000, 0xd004,
1392		0xd010, 0xd03c,
1393		0xdfc0, 0xdfe0,
1394		0xe000, 0x1106c,
1395		0x11074, 0x11088,
1396		0x1109c, 0x1117c,
1397		0x11190, 0x11204,
1398		0x19040, 0x1906c,
1399		0x19078, 0x19080,
1400		0x1908c, 0x190e8,
1401		0x190f0, 0x190f8,
1402		0x19100, 0x19110,
1403		0x19120, 0x19124,
1404		0x19150, 0x19194,
1405		0x1919c, 0x191b0,
1406		0x191d0, 0x191e8,
1407		0x19238, 0x19290,
1408		0x193f8, 0x19428,
1409		0x19430, 0x19444,
1410		0x1944c, 0x1946c,
1411		0x19474, 0x19474,
1412		0x19490, 0x194cc,
1413		0x194f0, 0x194f8,
1414		0x19c00, 0x19c08,
1415		0x19c10, 0x19c60,
1416		0x19c94, 0x19ce4,
1417		0x19cf0, 0x19d40,
1418		0x19d50, 0x19d94,
1419		0x19da0, 0x19de8,
1420		0x19df0, 0x19e10,
1421		0x19e50, 0x19e90,
1422		0x19ea0, 0x19f24,
1423		0x19f34, 0x19f34,
1424		0x19f40, 0x19f50,
1425		0x19f90, 0x19fb4,
1426		0x19fc4, 0x19fe4,
1427		0x1a000, 0x1a004,
1428		0x1a010, 0x1a06c,
1429		0x1a0b0, 0x1a0e4,
1430		0x1a0ec, 0x1a0f8,
1431		0x1a100, 0x1a108,
1432		0x1a114, 0x1a130,
1433		0x1a138, 0x1a1c4,
1434		0x1a1fc, 0x1a1fc,
1435		0x1e008, 0x1e00c,
1436		0x1e040, 0x1e044,
1437		0x1e04c, 0x1e04c,
1438		0x1e284, 0x1e290,
1439		0x1e2c0, 0x1e2c0,
1440		0x1e2e0, 0x1e2e0,
1441		0x1e300, 0x1e384,
1442		0x1e3c0, 0x1e3c8,
1443		0x1e408, 0x1e40c,
1444		0x1e440, 0x1e444,
1445		0x1e44c, 0x1e44c,
1446		0x1e684, 0x1e690,
1447		0x1e6c0, 0x1e6c0,
1448		0x1e6e0, 0x1e6e0,
1449		0x1e700, 0x1e784,
1450		0x1e7c0, 0x1e7c8,
1451		0x1e808, 0x1e80c,
1452		0x1e840, 0x1e844,
1453		0x1e84c, 0x1e84c,
1454		0x1ea84, 0x1ea90,
1455		0x1eac0, 0x1eac0,
1456		0x1eae0, 0x1eae0,
1457		0x1eb00, 0x1eb84,
1458		0x1ebc0, 0x1ebc8,
1459		0x1ec08, 0x1ec0c,
1460		0x1ec40, 0x1ec44,
1461		0x1ec4c, 0x1ec4c,
1462		0x1ee84, 0x1ee90,
1463		0x1eec0, 0x1eec0,
1464		0x1eee0, 0x1eee0,
1465		0x1ef00, 0x1ef84,
1466		0x1efc0, 0x1efc8,
1467		0x1f008, 0x1f00c,
1468		0x1f040, 0x1f044,
1469		0x1f04c, 0x1f04c,
1470		0x1f284, 0x1f290,
1471		0x1f2c0, 0x1f2c0,
1472		0x1f2e0, 0x1f2e0,
1473		0x1f300, 0x1f384,
1474		0x1f3c0, 0x1f3c8,
1475		0x1f408, 0x1f40c,
1476		0x1f440, 0x1f444,
1477		0x1f44c, 0x1f44c,
1478		0x1f684, 0x1f690,
1479		0x1f6c0, 0x1f6c0,
1480		0x1f6e0, 0x1f6e0,
1481		0x1f700, 0x1f784,
1482		0x1f7c0, 0x1f7c8,
1483		0x1f808, 0x1f80c,
1484		0x1f840, 0x1f844,
1485		0x1f84c, 0x1f84c,
1486		0x1fa84, 0x1fa90,
1487		0x1fac0, 0x1fac0,
1488		0x1fae0, 0x1fae0,
1489		0x1fb00, 0x1fb84,
1490		0x1fbc0, 0x1fbc8,
1491		0x1fc08, 0x1fc0c,
1492		0x1fc40, 0x1fc44,
1493		0x1fc4c, 0x1fc4c,
1494		0x1fe84, 0x1fe90,
1495		0x1fec0, 0x1fec0,
1496		0x1fee0, 0x1fee0,
1497		0x1ff00, 0x1ff84,
1498		0x1ffc0, 0x1ffc8,
1499		0x30000, 0x30030,
1500		0x30100, 0x30144,
1501		0x30190, 0x301a0,
1502		0x301a8, 0x301b8,
1503		0x301c4, 0x301c8,
1504		0x301d0, 0x301d0,
1505		0x30200, 0x30318,
1506		0x30400, 0x304b4,
1507		0x304c0, 0x3052c,
1508		0x30540, 0x3061c,
1509		0x30800, 0x30828,
1510		0x30834, 0x30834,
1511		0x308c0, 0x30908,
1512		0x30910, 0x309ac,
1513		0x30a00, 0x30a14,
1514		0x30a1c, 0x30a2c,
1515		0x30a44, 0x30a50,
1516		0x30a74, 0x30a74,
1517		0x30a7c, 0x30afc,
1518		0x30b08, 0x30c24,
1519		0x30d00, 0x30d00,
1520		0x30d08, 0x30d14,
1521		0x30d1c, 0x30d20,
1522		0x30d3c, 0x30d3c,
1523		0x30d48, 0x30d50,
1524		0x31200, 0x3120c,
1525		0x31220, 0x31220,
1526		0x31240, 0x31240,
1527		0x31600, 0x3160c,
1528		0x31a00, 0x31a1c,
1529		0x31e00, 0x31e20,
1530		0x31e38, 0x31e3c,
1531		0x31e80, 0x31e80,
1532		0x31e88, 0x31ea8,
1533		0x31eb0, 0x31eb4,
1534		0x31ec8, 0x31ed4,
1535		0x31fb8, 0x32004,
1536		0x32200, 0x32200,
1537		0x32208, 0x32240,
1538		0x32248, 0x32280,
1539		0x32288, 0x322c0,
1540		0x322c8, 0x322fc,
1541		0x32600, 0x32630,
1542		0x32a00, 0x32abc,
1543		0x32b00, 0x32b10,
1544		0x32b20, 0x32b30,
1545		0x32b40, 0x32b50,
1546		0x32b60, 0x32b70,
1547		0x33000, 0x33028,
1548		0x33030, 0x33048,
1549		0x33060, 0x33068,
1550		0x33070, 0x3309c,
1551		0x330f0, 0x33128,
1552		0x33130, 0x33148,
1553		0x33160, 0x33168,
1554		0x33170, 0x3319c,
1555		0x331f0, 0x33238,
1556		0x33240, 0x33240,
1557		0x33248, 0x33250,
1558		0x3325c, 0x33264,
1559		0x33270, 0x332b8,
1560		0x332c0, 0x332e4,
1561		0x332f8, 0x33338,
1562		0x33340, 0x33340,
1563		0x33348, 0x33350,
1564		0x3335c, 0x33364,
1565		0x33370, 0x333b8,
1566		0x333c0, 0x333e4,
1567		0x333f8, 0x33428,
1568		0x33430, 0x33448,
1569		0x33460, 0x33468,
1570		0x33470, 0x3349c,
1571		0x334f0, 0x33528,
1572		0x33530, 0x33548,
1573		0x33560, 0x33568,
1574		0x33570, 0x3359c,
1575		0x335f0, 0x33638,
1576		0x33640, 0x33640,
1577		0x33648, 0x33650,
1578		0x3365c, 0x33664,
1579		0x33670, 0x336b8,
1580		0x336c0, 0x336e4,
1581		0x336f8, 0x33738,
1582		0x33740, 0x33740,
1583		0x33748, 0x33750,
1584		0x3375c, 0x33764,
1585		0x33770, 0x337b8,
1586		0x337c0, 0x337e4,
1587		0x337f8, 0x337fc,
1588		0x33814, 0x33814,
1589		0x3382c, 0x3382c,
1590		0x33880, 0x3388c,
1591		0x338e8, 0x338ec,
1592		0x33900, 0x33928,
1593		0x33930, 0x33948,
1594		0x33960, 0x33968,
1595		0x33970, 0x3399c,
1596		0x339f0, 0x33a38,
1597		0x33a40, 0x33a40,
1598		0x33a48, 0x33a50,
1599		0x33a5c, 0x33a64,
1600		0x33a70, 0x33ab8,
1601		0x33ac0, 0x33ae4,
1602		0x33af8, 0x33b10,
1603		0x33b28, 0x33b28,
1604		0x33b3c, 0x33b50,
1605		0x33bf0, 0x33c10,
1606		0x33c28, 0x33c28,
1607		0x33c3c, 0x33c50,
1608		0x33cf0, 0x33cfc,
1609		0x34000, 0x34030,
1610		0x34100, 0x34144,
1611		0x34190, 0x341a0,
1612		0x341a8, 0x341b8,
1613		0x341c4, 0x341c8,
1614		0x341d0, 0x341d0,
1615		0x34200, 0x34318,
1616		0x34400, 0x344b4,
1617		0x344c0, 0x3452c,
1618		0x34540, 0x3461c,
1619		0x34800, 0x34828,
1620		0x34834, 0x34834,
1621		0x348c0, 0x34908,
1622		0x34910, 0x349ac,
1623		0x34a00, 0x34a14,
1624		0x34a1c, 0x34a2c,
1625		0x34a44, 0x34a50,
1626		0x34a74, 0x34a74,
1627		0x34a7c, 0x34afc,
1628		0x34b08, 0x34c24,
1629		0x34d00, 0x34d00,
1630		0x34d08, 0x34d14,
1631		0x34d1c, 0x34d20,
1632		0x34d3c, 0x34d3c,
1633		0x34d48, 0x34d50,
1634		0x35200, 0x3520c,
1635		0x35220, 0x35220,
1636		0x35240, 0x35240,
1637		0x35600, 0x3560c,
1638		0x35a00, 0x35a1c,
1639		0x35e00, 0x35e20,
1640		0x35e38, 0x35e3c,
1641		0x35e80, 0x35e80,
1642		0x35e88, 0x35ea8,
1643		0x35eb0, 0x35eb4,
1644		0x35ec8, 0x35ed4,
1645		0x35fb8, 0x36004,
1646		0x36200, 0x36200,
1647		0x36208, 0x36240,
1648		0x36248, 0x36280,
1649		0x36288, 0x362c0,
1650		0x362c8, 0x362fc,
1651		0x36600, 0x36630,
1652		0x36a00, 0x36abc,
1653		0x36b00, 0x36b10,
1654		0x36b20, 0x36b30,
1655		0x36b40, 0x36b50,
1656		0x36b60, 0x36b70,
1657		0x37000, 0x37028,
1658		0x37030, 0x37048,
1659		0x37060, 0x37068,
1660		0x37070, 0x3709c,
1661		0x370f0, 0x37128,
1662		0x37130, 0x37148,
1663		0x37160, 0x37168,
1664		0x37170, 0x3719c,
1665		0x371f0, 0x37238,
1666		0x37240, 0x37240,
1667		0x37248, 0x37250,
1668		0x3725c, 0x37264,
1669		0x37270, 0x372b8,
1670		0x372c0, 0x372e4,
1671		0x372f8, 0x37338,
1672		0x37340, 0x37340,
1673		0x37348, 0x37350,
1674		0x3735c, 0x37364,
1675		0x37370, 0x373b8,
1676		0x373c0, 0x373e4,
1677		0x373f8, 0x37428,
1678		0x37430, 0x37448,
1679		0x37460, 0x37468,
1680		0x37470, 0x3749c,
1681		0x374f0, 0x37528,
1682		0x37530, 0x37548,
1683		0x37560, 0x37568,
1684		0x37570, 0x3759c,
1685		0x375f0, 0x37638,
1686		0x37640, 0x37640,
1687		0x37648, 0x37650,
1688		0x3765c, 0x37664,
1689		0x37670, 0x376b8,
1690		0x376c0, 0x376e4,
1691		0x376f8, 0x37738,
1692		0x37740, 0x37740,
1693		0x37748, 0x37750,
1694		0x3775c, 0x37764,
1695		0x37770, 0x377b8,
1696		0x377c0, 0x377e4,
1697		0x377f8, 0x377fc,
1698		0x37814, 0x37814,
1699		0x3782c, 0x3782c,
1700		0x37880, 0x3788c,
1701		0x378e8, 0x378ec,
1702		0x37900, 0x37928,
1703		0x37930, 0x37948,
1704		0x37960, 0x37968,
1705		0x37970, 0x3799c,
1706		0x379f0, 0x37a38,
1707		0x37a40, 0x37a40,
1708		0x37a48, 0x37a50,
1709		0x37a5c, 0x37a64,
1710		0x37a70, 0x37ab8,
1711		0x37ac0, 0x37ae4,
1712		0x37af8, 0x37b10,
1713		0x37b28, 0x37b28,
1714		0x37b3c, 0x37b50,
1715		0x37bf0, 0x37c10,
1716		0x37c28, 0x37c28,
1717		0x37c3c, 0x37c50,
1718		0x37cf0, 0x37cfc,
1719		0x38000, 0x38030,
1720		0x38100, 0x38144,
1721		0x38190, 0x381a0,
1722		0x381a8, 0x381b8,
1723		0x381c4, 0x381c8,
1724		0x381d0, 0x381d0,
1725		0x38200, 0x38318,
1726		0x38400, 0x384b4,
1727		0x384c0, 0x3852c,
1728		0x38540, 0x3861c,
1729		0x38800, 0x38828,
1730		0x38834, 0x38834,
1731		0x388c0, 0x38908,
1732		0x38910, 0x389ac,
1733		0x38a00, 0x38a14,
1734		0x38a1c, 0x38a2c,
1735		0x38a44, 0x38a50,
1736		0x38a74, 0x38a74,
1737		0x38a7c, 0x38afc,
1738		0x38b08, 0x38c24,
1739		0x38d00, 0x38d00,
1740		0x38d08, 0x38d14,
1741		0x38d1c, 0x38d20,
1742		0x38d3c, 0x38d3c,
1743		0x38d48, 0x38d50,
1744		0x39200, 0x3920c,
1745		0x39220, 0x39220,
1746		0x39240, 0x39240,
1747		0x39600, 0x3960c,
1748		0x39a00, 0x39a1c,
1749		0x39e00, 0x39e20,
1750		0x39e38, 0x39e3c,
1751		0x39e80, 0x39e80,
1752		0x39e88, 0x39ea8,
1753		0x39eb0, 0x39eb4,
1754		0x39ec8, 0x39ed4,
1755		0x39fb8, 0x3a004,
1756		0x3a200, 0x3a200,
1757		0x3a208, 0x3a240,
1758		0x3a248, 0x3a280,
1759		0x3a288, 0x3a2c0,
1760		0x3a2c8, 0x3a2fc,
1761		0x3a600, 0x3a630,
1762		0x3aa00, 0x3aabc,
1763		0x3ab00, 0x3ab10,
1764		0x3ab20, 0x3ab30,
1765		0x3ab40, 0x3ab50,
1766		0x3ab60, 0x3ab70,
1767		0x3b000, 0x3b028,
1768		0x3b030, 0x3b048,
1769		0x3b060, 0x3b068,
1770		0x3b070, 0x3b09c,
1771		0x3b0f0, 0x3b128,
1772		0x3b130, 0x3b148,
1773		0x3b160, 0x3b168,
1774		0x3b170, 0x3b19c,
1775		0x3b1f0, 0x3b238,
1776		0x3b240, 0x3b240,
1777		0x3b248, 0x3b250,
1778		0x3b25c, 0x3b264,
1779		0x3b270, 0x3b2b8,
1780		0x3b2c0, 0x3b2e4,
1781		0x3b2f8, 0x3b338,
1782		0x3b340, 0x3b340,
1783		0x3b348, 0x3b350,
1784		0x3b35c, 0x3b364,
1785		0x3b370, 0x3b3b8,
1786		0x3b3c0, 0x3b3e4,
1787		0x3b3f8, 0x3b428,
1788		0x3b430, 0x3b448,
1789		0x3b460, 0x3b468,
1790		0x3b470, 0x3b49c,
1791		0x3b4f0, 0x3b528,
1792		0x3b530, 0x3b548,
1793		0x3b560, 0x3b568,
1794		0x3b570, 0x3b59c,
1795		0x3b5f0, 0x3b638,
1796		0x3b640, 0x3b640,
1797		0x3b648, 0x3b650,
1798		0x3b65c, 0x3b664,
1799		0x3b670, 0x3b6b8,
1800		0x3b6c0, 0x3b6e4,
1801		0x3b6f8, 0x3b738,
1802		0x3b740, 0x3b740,
1803		0x3b748, 0x3b750,
1804		0x3b75c, 0x3b764,
1805		0x3b770, 0x3b7b8,
1806		0x3b7c0, 0x3b7e4,
1807		0x3b7f8, 0x3b7fc,
1808		0x3b814, 0x3b814,
1809		0x3b82c, 0x3b82c,
1810		0x3b880, 0x3b88c,
1811		0x3b8e8, 0x3b8ec,
1812		0x3b900, 0x3b928,
1813		0x3b930, 0x3b948,
1814		0x3b960, 0x3b968,
1815		0x3b970, 0x3b99c,
1816		0x3b9f0, 0x3ba38,
1817		0x3ba40, 0x3ba40,
1818		0x3ba48, 0x3ba50,
1819		0x3ba5c, 0x3ba64,
1820		0x3ba70, 0x3bab8,
1821		0x3bac0, 0x3bae4,
1822		0x3baf8, 0x3bb10,
1823		0x3bb28, 0x3bb28,
1824		0x3bb3c, 0x3bb50,
1825		0x3bbf0, 0x3bc10,
1826		0x3bc28, 0x3bc28,
1827		0x3bc3c, 0x3bc50,
1828		0x3bcf0, 0x3bcfc,
1829		0x3c000, 0x3c030,
1830		0x3c100, 0x3c144,
1831		0x3c190, 0x3c1a0,
1832		0x3c1a8, 0x3c1b8,
1833		0x3c1c4, 0x3c1c8,
1834		0x3c1d0, 0x3c1d0,
1835		0x3c200, 0x3c318,
1836		0x3c400, 0x3c4b4,
1837		0x3c4c0, 0x3c52c,
1838		0x3c540, 0x3c61c,
1839		0x3c800, 0x3c828,
1840		0x3c834, 0x3c834,
1841		0x3c8c0, 0x3c908,
1842		0x3c910, 0x3c9ac,
1843		0x3ca00, 0x3ca14,
1844		0x3ca1c, 0x3ca2c,
1845		0x3ca44, 0x3ca50,
1846		0x3ca74, 0x3ca74,
1847		0x3ca7c, 0x3cafc,
1848		0x3cb08, 0x3cc24,
1849		0x3cd00, 0x3cd00,
1850		0x3cd08, 0x3cd14,
1851		0x3cd1c, 0x3cd20,
1852		0x3cd3c, 0x3cd3c,
1853		0x3cd48, 0x3cd50,
1854		0x3d200, 0x3d20c,
1855		0x3d220, 0x3d220,
1856		0x3d240, 0x3d240,
1857		0x3d600, 0x3d60c,
1858		0x3da00, 0x3da1c,
1859		0x3de00, 0x3de20,
1860		0x3de38, 0x3de3c,
1861		0x3de80, 0x3de80,
1862		0x3de88, 0x3dea8,
1863		0x3deb0, 0x3deb4,
1864		0x3dec8, 0x3ded4,
1865		0x3dfb8, 0x3e004,
1866		0x3e200, 0x3e200,
1867		0x3e208, 0x3e240,
1868		0x3e248, 0x3e280,
1869		0x3e288, 0x3e2c0,
1870		0x3e2c8, 0x3e2fc,
1871		0x3e600, 0x3e630,
1872		0x3ea00, 0x3eabc,
1873		0x3eb00, 0x3eb10,
1874		0x3eb20, 0x3eb30,
1875		0x3eb40, 0x3eb50,
1876		0x3eb60, 0x3eb70,
1877		0x3f000, 0x3f028,
1878		0x3f030, 0x3f048,
1879		0x3f060, 0x3f068,
1880		0x3f070, 0x3f09c,
1881		0x3f0f0, 0x3f128,
1882		0x3f130, 0x3f148,
1883		0x3f160, 0x3f168,
1884		0x3f170, 0x3f19c,
1885		0x3f1f0, 0x3f238,
1886		0x3f240, 0x3f240,
1887		0x3f248, 0x3f250,
1888		0x3f25c, 0x3f264,
1889		0x3f270, 0x3f2b8,
1890		0x3f2c0, 0x3f2e4,
1891		0x3f2f8, 0x3f338,
1892		0x3f340, 0x3f340,
1893		0x3f348, 0x3f350,
1894		0x3f35c, 0x3f364,
1895		0x3f370, 0x3f3b8,
1896		0x3f3c0, 0x3f3e4,
1897		0x3f3f8, 0x3f428,
1898		0x3f430, 0x3f448,
1899		0x3f460, 0x3f468,
1900		0x3f470, 0x3f49c,
1901		0x3f4f0, 0x3f528,
1902		0x3f530, 0x3f548,
1903		0x3f560, 0x3f568,
1904		0x3f570, 0x3f59c,
1905		0x3f5f0, 0x3f638,
1906		0x3f640, 0x3f640,
1907		0x3f648, 0x3f650,
1908		0x3f65c, 0x3f664,
1909		0x3f670, 0x3f6b8,
1910		0x3f6c0, 0x3f6e4,
1911		0x3f6f8, 0x3f738,
1912		0x3f740, 0x3f740,
1913		0x3f748, 0x3f750,
1914		0x3f75c, 0x3f764,
1915		0x3f770, 0x3f7b8,
1916		0x3f7c0, 0x3f7e4,
1917		0x3f7f8, 0x3f7fc,
1918		0x3f814, 0x3f814,
1919		0x3f82c, 0x3f82c,
1920		0x3f880, 0x3f88c,
1921		0x3f8e8, 0x3f8ec,
1922		0x3f900, 0x3f928,
1923		0x3f930, 0x3f948,
1924		0x3f960, 0x3f968,
1925		0x3f970, 0x3f99c,
1926		0x3f9f0, 0x3fa38,
1927		0x3fa40, 0x3fa40,
1928		0x3fa48, 0x3fa50,
1929		0x3fa5c, 0x3fa64,
1930		0x3fa70, 0x3fab8,
1931		0x3fac0, 0x3fae4,
1932		0x3faf8, 0x3fb10,
1933		0x3fb28, 0x3fb28,
1934		0x3fb3c, 0x3fb50,
1935		0x3fbf0, 0x3fc10,
1936		0x3fc28, 0x3fc28,
1937		0x3fc3c, 0x3fc50,
1938		0x3fcf0, 0x3fcfc,
1939		0x40000, 0x4000c,
1940		0x40040, 0x40050,
1941		0x40060, 0x40068,
1942		0x4007c, 0x4008c,
1943		0x40094, 0x400b0,
1944		0x400c0, 0x40144,
1945		0x40180, 0x4018c,
1946		0x40200, 0x40254,
1947		0x40260, 0x40264,
1948		0x40270, 0x40288,
1949		0x40290, 0x40298,
1950		0x402ac, 0x402c8,
1951		0x402d0, 0x402e0,
1952		0x402f0, 0x402f0,
1953		0x40300, 0x4033c,
1954		0x403f8, 0x403fc,
1955		0x41304, 0x413c4,
1956		0x41400, 0x4140c,
1957		0x41414, 0x4141c,
1958		0x41480, 0x414d0,
1959		0x44000, 0x44054,
1960		0x4405c, 0x44078,
1961		0x440c0, 0x44174,
1962		0x44180, 0x441ac,
1963		0x441b4, 0x441b8,
1964		0x441c0, 0x44254,
1965		0x4425c, 0x44278,
1966		0x442c0, 0x44374,
1967		0x44380, 0x443ac,
1968		0x443b4, 0x443b8,
1969		0x443c0, 0x44454,
1970		0x4445c, 0x44478,
1971		0x444c0, 0x44574,
1972		0x44580, 0x445ac,
1973		0x445b4, 0x445b8,
1974		0x445c0, 0x44654,
1975		0x4465c, 0x44678,
1976		0x446c0, 0x44774,
1977		0x44780, 0x447ac,
1978		0x447b4, 0x447b8,
1979		0x447c0, 0x44854,
1980		0x4485c, 0x44878,
1981		0x448c0, 0x44974,
1982		0x44980, 0x449ac,
1983		0x449b4, 0x449b8,
1984		0x449c0, 0x449fc,
1985		0x45000, 0x45004,
1986		0x45010, 0x45030,
1987		0x45040, 0x45060,
1988		0x45068, 0x45068,
1989		0x45080, 0x45084,
1990		0x450a0, 0x450b0,
1991		0x45200, 0x45204,
1992		0x45210, 0x45230,
1993		0x45240, 0x45260,
1994		0x45268, 0x45268,
1995		0x45280, 0x45284,
1996		0x452a0, 0x452b0,
1997		0x460c0, 0x460e4,
1998		0x47000, 0x4703c,
1999		0x47044, 0x4708c,
2000		0x47200, 0x47250,
2001		0x47400, 0x47408,
2002		0x47414, 0x47420,
2003		0x47600, 0x47618,
2004		0x47800, 0x47814,
2005		0x48000, 0x4800c,
2006		0x48040, 0x48050,
2007		0x48060, 0x48068,
2008		0x4807c, 0x4808c,
2009		0x48094, 0x480b0,
2010		0x480c0, 0x48144,
2011		0x48180, 0x4818c,
2012		0x48200, 0x48254,
2013		0x48260, 0x48264,
2014		0x48270, 0x48288,
2015		0x48290, 0x48298,
2016		0x482ac, 0x482c8,
2017		0x482d0, 0x482e0,
2018		0x482f0, 0x482f0,
2019		0x48300, 0x4833c,
2020		0x483f8, 0x483fc,
2021		0x49304, 0x493c4,
2022		0x49400, 0x4940c,
2023		0x49414, 0x4941c,
2024		0x49480, 0x494d0,
2025		0x4c000, 0x4c054,
2026		0x4c05c, 0x4c078,
2027		0x4c0c0, 0x4c174,
2028		0x4c180, 0x4c1ac,
2029		0x4c1b4, 0x4c1b8,
2030		0x4c1c0, 0x4c254,
2031		0x4c25c, 0x4c278,
2032		0x4c2c0, 0x4c374,
2033		0x4c380, 0x4c3ac,
2034		0x4c3b4, 0x4c3b8,
2035		0x4c3c0, 0x4c454,
2036		0x4c45c, 0x4c478,
2037		0x4c4c0, 0x4c574,
2038		0x4c580, 0x4c5ac,
2039		0x4c5b4, 0x4c5b8,
2040		0x4c5c0, 0x4c654,
2041		0x4c65c, 0x4c678,
2042		0x4c6c0, 0x4c774,
2043		0x4c780, 0x4c7ac,
2044		0x4c7b4, 0x4c7b8,
2045		0x4c7c0, 0x4c854,
2046		0x4c85c, 0x4c878,
2047		0x4c8c0, 0x4c974,
2048		0x4c980, 0x4c9ac,
2049		0x4c9b4, 0x4c9b8,
2050		0x4c9c0, 0x4c9fc,
2051		0x4d000, 0x4d004,
2052		0x4d010, 0x4d030,
2053		0x4d040, 0x4d060,
2054		0x4d068, 0x4d068,
2055		0x4d080, 0x4d084,
2056		0x4d0a0, 0x4d0b0,
2057		0x4d200, 0x4d204,
2058		0x4d210, 0x4d230,
2059		0x4d240, 0x4d260,
2060		0x4d268, 0x4d268,
2061		0x4d280, 0x4d284,
2062		0x4d2a0, 0x4d2b0,
2063		0x4e0c0, 0x4e0e4,
2064		0x4f000, 0x4f03c,
2065		0x4f044, 0x4f08c,
2066		0x4f200, 0x4f250,
2067		0x4f400, 0x4f408,
2068		0x4f414, 0x4f420,
2069		0x4f600, 0x4f618,
2070		0x4f800, 0x4f814,
2071		0x50000, 0x50084,
2072		0x50090, 0x500cc,
2073		0x50400, 0x50400,
2074		0x50800, 0x50884,
2075		0x50890, 0x508cc,
2076		0x50c00, 0x50c00,
2077		0x51000, 0x5101c,
2078		0x51300, 0x51308,
2079	};
2080
2081	static const unsigned int t6_reg_ranges[] = {
2082		0x1008, 0x101c,
2083		0x1024, 0x10a8,
2084		0x10b4, 0x10f8,
2085		0x1100, 0x1114,
2086		0x111c, 0x112c,
2087		0x1138, 0x113c,
2088		0x1144, 0x114c,
2089		0x1180, 0x1184,
2090		0x1190, 0x1194,
2091		0x11a0, 0x11a4,
2092		0x11b0, 0x11b4,
2093		0x11fc, 0x123c,
2094		0x1254, 0x1274,
2095		0x1280, 0x133c,
2096		0x1800, 0x18fc,
2097		0x3000, 0x302c,
2098		0x3060, 0x30b0,
2099		0x30b8, 0x30d8,
2100		0x30e0, 0x30fc,
2101		0x3140, 0x357c,
2102		0x35a8, 0x35cc,
2103		0x35ec, 0x35ec,
2104		0x3600, 0x5624,
2105		0x56cc, 0x56ec,
2106		0x56f4, 0x5720,
2107		0x5728, 0x575c,
2108		0x580c, 0x5814,
2109		0x5890, 0x589c,
2110		0x58a4, 0x58ac,
2111		0x58b8, 0x58bc,
2112		0x5940, 0x595c,
2113		0x5980, 0x598c,
2114		0x59b0, 0x59c8,
2115		0x59d0, 0x59dc,
2116		0x59fc, 0x5a18,
2117		0x5a60, 0x5a6c,
2118		0x5a80, 0x5a8c,
2119		0x5a94, 0x5a9c,
2120		0x5b94, 0x5bfc,
2121		0x5c10, 0x5e48,
2122		0x5e50, 0x5e94,
2123		0x5ea0, 0x5eb0,
2124		0x5ec0, 0x5ec0,
2125		0x5ec8, 0x5ed0,
2126		0x5ee0, 0x5ee0,
2127		0x5ef0, 0x5ef0,
2128		0x5f00, 0x5f00,
2129		0x6000, 0x6020,
2130		0x6028, 0x6040,
2131		0x6058, 0x609c,
2132		0x60a8, 0x619c,
2133		0x7700, 0x7798,
2134		0x77c0, 0x7880,
2135		0x78cc, 0x78fc,
2136		0x7b00, 0x7b58,
2137		0x7b60, 0x7b84,
2138		0x7b8c, 0x7c54,
2139		0x7d00, 0x7d38,
2140		0x7d40, 0x7d84,
2141		0x7d8c, 0x7ddc,
2142		0x7de4, 0x7e04,
2143		0x7e10, 0x7e1c,
2144		0x7e24, 0x7e38,
2145		0x7e40, 0x7e44,
2146		0x7e4c, 0x7e78,
2147		0x7e80, 0x7edc,
2148		0x7ee8, 0x7efc,
2149		0x8dc0, 0x8de4,
2150		0x8df8, 0x8e04,
2151		0x8e10, 0x8e84,
2152		0x8ea0, 0x8f88,
2153		0x8fb8, 0x9058,
2154		0x9060, 0x9060,
2155		0x9068, 0x90f8,
2156		0x9100, 0x9124,
2157		0x9400, 0x9470,
2158		0x9600, 0x9600,
2159		0x9608, 0x9638,
2160		0x9640, 0x9704,
2161		0x9710, 0x971c,
2162		0x9800, 0x9808,
2163		0x9810, 0x9864,
2164		0x9c00, 0x9c6c,
2165		0x9c80, 0x9cec,
2166		0x9d00, 0x9d6c,
2167		0x9d80, 0x9dec,
2168		0x9e00, 0x9e6c,
2169		0x9e80, 0x9eec,
2170		0x9f00, 0x9f6c,
2171		0x9f80, 0xa020,
2172		0xd000, 0xd03c,
2173		0xd100, 0xd118,
2174		0xd200, 0xd214,
2175		0xd220, 0xd234,
2176		0xd240, 0xd254,
2177		0xd260, 0xd274,
2178		0xd280, 0xd294,
2179		0xd2a0, 0xd2b4,
2180		0xd2c0, 0xd2d4,
2181		0xd2e0, 0xd2f4,
2182		0xd300, 0xd31c,
2183		0xdfc0, 0xdfe0,
2184		0xe000, 0xf008,
2185		0xf010, 0xf018,
2186		0xf020, 0xf028,
2187		0x11000, 0x11014,
2188		0x11048, 0x1106c,
2189		0x11074, 0x11088,
2190		0x11098, 0x11120,
2191		0x1112c, 0x1117c,
2192		0x11190, 0x112e0,
2193		0x11300, 0x1130c,
2194		0x12000, 0x1206c,
2195		0x19040, 0x1906c,
2196		0x19078, 0x19080,
2197		0x1908c, 0x190e8,
2198		0x190f0, 0x190f8,
2199		0x19100, 0x19110,
2200		0x19120, 0x19124,
2201		0x19150, 0x19194,
2202		0x1919c, 0x191b0,
2203		0x191d0, 0x191e8,
2204		0x19238, 0x19290,
2205		0x192a4, 0x192b0,
2206		0x192bc, 0x192bc,
2207		0x19348, 0x1934c,
2208		0x193f8, 0x19418,
2209		0x19420, 0x19428,
2210		0x19430, 0x19444,
2211		0x1944c, 0x1946c,
2212		0x19474, 0x19474,
2213		0x19490, 0x194cc,
2214		0x194f0, 0x194f8,
2215		0x19c00, 0x19c48,
2216		0x19c50, 0x19c80,
2217		0x19c94, 0x19c98,
2218		0x19ca0, 0x19cbc,
2219		0x19ce4, 0x19ce4,
2220		0x19cf0, 0x19cf8,
2221		0x19d00, 0x19d28,
2222		0x19d50, 0x19d78,
2223		0x19d94, 0x19d98,
2224		0x19da0, 0x19dc8,
2225		0x19df0, 0x19e10,
2226		0x19e50, 0x19e6c,
2227		0x19ea0, 0x19ebc,
2228		0x19ec4, 0x19ef4,
2229		0x19f04, 0x19f2c,
2230		0x19f34, 0x19f34,
2231		0x19f40, 0x19f50,
2232		0x19f90, 0x19fac,
2233		0x19fc4, 0x19fc8,
2234		0x19fd0, 0x19fe4,
2235		0x1a000, 0x1a004,
2236		0x1a010, 0x1a06c,
2237		0x1a0b0, 0x1a0e4,
2238		0x1a0ec, 0x1a0f8,
2239		0x1a100, 0x1a108,
2240		0x1a114, 0x1a130,
2241		0x1a138, 0x1a1c4,
2242		0x1a1fc, 0x1a1fc,
2243		0x1e008, 0x1e00c,
2244		0x1e040, 0x1e044,
2245		0x1e04c, 0x1e04c,
2246		0x1e284, 0x1e290,
2247		0x1e2c0, 0x1e2c0,
2248		0x1e2e0, 0x1e2e0,
2249		0x1e300, 0x1e384,
2250		0x1e3c0, 0x1e3c8,
2251		0x1e408, 0x1e40c,
2252		0x1e440, 0x1e444,
2253		0x1e44c, 0x1e44c,
2254		0x1e684, 0x1e690,
2255		0x1e6c0, 0x1e6c0,
2256		0x1e6e0, 0x1e6e0,
2257		0x1e700, 0x1e784,
2258		0x1e7c0, 0x1e7c8,
2259		0x1e808, 0x1e80c,
2260		0x1e840, 0x1e844,
2261		0x1e84c, 0x1e84c,
2262		0x1ea84, 0x1ea90,
2263		0x1eac0, 0x1eac0,
2264		0x1eae0, 0x1eae0,
2265		0x1eb00, 0x1eb84,
2266		0x1ebc0, 0x1ebc8,
2267		0x1ec08, 0x1ec0c,
2268		0x1ec40, 0x1ec44,
2269		0x1ec4c, 0x1ec4c,
2270		0x1ee84, 0x1ee90,
2271		0x1eec0, 0x1eec0,
2272		0x1eee0, 0x1eee0,
2273		0x1ef00, 0x1ef84,
2274		0x1efc0, 0x1efc8,
2275		0x1f008, 0x1f00c,
2276		0x1f040, 0x1f044,
2277		0x1f04c, 0x1f04c,
2278		0x1f284, 0x1f290,
2279		0x1f2c0, 0x1f2c0,
2280		0x1f2e0, 0x1f2e0,
2281		0x1f300, 0x1f384,
2282		0x1f3c0, 0x1f3c8,
2283		0x1f408, 0x1f40c,
2284		0x1f440, 0x1f444,
2285		0x1f44c, 0x1f44c,
2286		0x1f684, 0x1f690,
2287		0x1f6c0, 0x1f6c0,
2288		0x1f6e0, 0x1f6e0,
2289		0x1f700, 0x1f784,
2290		0x1f7c0, 0x1f7c8,
2291		0x1f808, 0x1f80c,
2292		0x1f840, 0x1f844,
2293		0x1f84c, 0x1f84c,
2294		0x1fa84, 0x1fa90,
2295		0x1fac0, 0x1fac0,
2296		0x1fae0, 0x1fae0,
2297		0x1fb00, 0x1fb84,
2298		0x1fbc0, 0x1fbc8,
2299		0x1fc08, 0x1fc0c,
2300		0x1fc40, 0x1fc44,
2301		0x1fc4c, 0x1fc4c,
2302		0x1fe84, 0x1fe90,
2303		0x1fec0, 0x1fec0,
2304		0x1fee0, 0x1fee0,
2305		0x1ff00, 0x1ff84,
2306		0x1ffc0, 0x1ffc8,
2307		0x30000, 0x30030,
2308		0x30100, 0x30168,
2309		0x30190, 0x301a0,
2310		0x301a8, 0x301b8,
2311		0x301c4, 0x301c8,
2312		0x301d0, 0x301d0,
2313		0x30200, 0x30320,
2314		0x30400, 0x304b4,
2315		0x304c0, 0x3052c,
2316		0x30540, 0x3061c,
2317		0x30800, 0x308a0,
2318		0x308c0, 0x30908,
2319		0x30910, 0x309b8,
2320		0x30a00, 0x30a04,
2321		0x30a0c, 0x30a14,
2322		0x30a1c, 0x30a2c,
2323		0x30a44, 0x30a50,
2324		0x30a74, 0x30a74,
2325		0x30a7c, 0x30afc,
2326		0x30b08, 0x30c24,
2327		0x30d00, 0x30d14,
2328		0x30d1c, 0x30d3c,
2329		0x30d44, 0x30d4c,
2330		0x30d54, 0x30d74,
2331		0x30d7c, 0x30d7c,
2332		0x30de0, 0x30de0,
2333		0x30e00, 0x30ed4,
2334		0x30f00, 0x30fa4,
2335		0x30fc0, 0x30fc4,
2336		0x31000, 0x31004,
2337		0x31080, 0x310fc,
2338		0x31208, 0x31220,
2339		0x3123c, 0x31254,
2340		0x31300, 0x31300,
2341		0x31308, 0x3131c,
2342		0x31338, 0x3133c,
2343		0x31380, 0x31380,
2344		0x31388, 0x313a8,
2345		0x313b4, 0x313b4,
2346		0x31400, 0x31420,
2347		0x31438, 0x3143c,
2348		0x31480, 0x31480,
2349		0x314a8, 0x314a8,
2350		0x314b0, 0x314b4,
2351		0x314c8, 0x314d4,
2352		0x31a40, 0x31a4c,
2353		0x31af0, 0x31b20,
2354		0x31b38, 0x31b3c,
2355		0x31b80, 0x31b80,
2356		0x31ba8, 0x31ba8,
2357		0x31bb0, 0x31bb4,
2358		0x31bc8, 0x31bd4,
2359		0x32140, 0x3218c,
2360		0x321f0, 0x321f4,
2361		0x32200, 0x32200,
2362		0x32218, 0x32218,
2363		0x32400, 0x32400,
2364		0x32408, 0x3241c,
2365		0x32618, 0x32620,
2366		0x32664, 0x32664,
2367		0x326a8, 0x326a8,
2368		0x326ec, 0x326ec,
2369		0x32a00, 0x32abc,
2370		0x32b00, 0x32b18,
2371		0x32b20, 0x32b38,
2372		0x32b40, 0x32b58,
2373		0x32b60, 0x32b78,
2374		0x32c00, 0x32c00,
2375		0x32c08, 0x32c3c,
2376		0x33000, 0x3302c,
2377		0x33034, 0x33050,
2378		0x33058, 0x33058,
2379		0x33060, 0x3308c,
2380		0x3309c, 0x330ac,
2381		0x330c0, 0x330c0,
2382		0x330c8, 0x330d0,
2383		0x330d8, 0x330e0,
2384		0x330ec, 0x3312c,
2385		0x33134, 0x33150,
2386		0x33158, 0x33158,
2387		0x33160, 0x3318c,
2388		0x3319c, 0x331ac,
2389		0x331c0, 0x331c0,
2390		0x331c8, 0x331d0,
2391		0x331d8, 0x331e0,
2392		0x331ec, 0x33290,
2393		0x33298, 0x332c4,
2394		0x332e4, 0x33390,
2395		0x33398, 0x333c4,
2396		0x333e4, 0x3342c,
2397		0x33434, 0x33450,
2398		0x33458, 0x33458,
2399		0x33460, 0x3348c,
2400		0x3349c, 0x334ac,
2401		0x334c0, 0x334c0,
2402		0x334c8, 0x334d0,
2403		0x334d8, 0x334e0,
2404		0x334ec, 0x3352c,
2405		0x33534, 0x33550,
2406		0x33558, 0x33558,
2407		0x33560, 0x3358c,
2408		0x3359c, 0x335ac,
2409		0x335c0, 0x335c0,
2410		0x335c8, 0x335d0,
2411		0x335d8, 0x335e0,
2412		0x335ec, 0x33690,
2413		0x33698, 0x336c4,
2414		0x336e4, 0x33790,
2415		0x33798, 0x337c4,
2416		0x337e4, 0x337fc,
2417		0x33814, 0x33814,
2418		0x33854, 0x33868,
2419		0x33880, 0x3388c,
2420		0x338c0, 0x338d0,
2421		0x338e8, 0x338ec,
2422		0x33900, 0x3392c,
2423		0x33934, 0x33950,
2424		0x33958, 0x33958,
2425		0x33960, 0x3398c,
2426		0x3399c, 0x339ac,
2427		0x339c0, 0x339c0,
2428		0x339c8, 0x339d0,
2429		0x339d8, 0x339e0,
2430		0x339ec, 0x33a90,
2431		0x33a98, 0x33ac4,
2432		0x33ae4, 0x33b10,
2433		0x33b24, 0x33b28,
2434		0x33b38, 0x33b50,
2435		0x33bf0, 0x33c10,
2436		0x33c24, 0x33c28,
2437		0x33c38, 0x33c50,
2438		0x33cf0, 0x33cfc,
2439		0x34000, 0x34030,
2440		0x34100, 0x34168,
2441		0x34190, 0x341a0,
2442		0x341a8, 0x341b8,
2443		0x341c4, 0x341c8,
2444		0x341d0, 0x341d0,
2445		0x34200, 0x34320,
2446		0x34400, 0x344b4,
2447		0x344c0, 0x3452c,
2448		0x34540, 0x3461c,
2449		0x34800, 0x348a0,
2450		0x348c0, 0x34908,
2451		0x34910, 0x349b8,
2452		0x34a00, 0x34a04,
2453		0x34a0c, 0x34a14,
2454		0x34a1c, 0x34a2c,
2455		0x34a44, 0x34a50,
2456		0x34a74, 0x34a74,
2457		0x34a7c, 0x34afc,
2458		0x34b08, 0x34c24,
2459		0x34d00, 0x34d14,
2460		0x34d1c, 0x34d3c,
2461		0x34d44, 0x34d4c,
2462		0x34d54, 0x34d74,
2463		0x34d7c, 0x34d7c,
2464		0x34de0, 0x34de0,
2465		0x34e00, 0x34ed4,
2466		0x34f00, 0x34fa4,
2467		0x34fc0, 0x34fc4,
2468		0x35000, 0x35004,
2469		0x35080, 0x350fc,
2470		0x35208, 0x35220,
2471		0x3523c, 0x35254,
2472		0x35300, 0x35300,
2473		0x35308, 0x3531c,
2474		0x35338, 0x3533c,
2475		0x35380, 0x35380,
2476		0x35388, 0x353a8,
2477		0x353b4, 0x353b4,
2478		0x35400, 0x35420,
2479		0x35438, 0x3543c,
2480		0x35480, 0x35480,
2481		0x354a8, 0x354a8,
2482		0x354b0, 0x354b4,
2483		0x354c8, 0x354d4,
2484		0x35a40, 0x35a4c,
2485		0x35af0, 0x35b20,
2486		0x35b38, 0x35b3c,
2487		0x35b80, 0x35b80,
2488		0x35ba8, 0x35ba8,
2489		0x35bb0, 0x35bb4,
2490		0x35bc8, 0x35bd4,
2491		0x36140, 0x3618c,
2492		0x361f0, 0x361f4,
2493		0x36200, 0x36200,
2494		0x36218, 0x36218,
2495		0x36400, 0x36400,
2496		0x36408, 0x3641c,
2497		0x36618, 0x36620,
2498		0x36664, 0x36664,
2499		0x366a8, 0x366a8,
2500		0x366ec, 0x366ec,
2501		0x36a00, 0x36abc,
2502		0x36b00, 0x36b18,
2503		0x36b20, 0x36b38,
2504		0x36b40, 0x36b58,
2505		0x36b60, 0x36b78,
2506		0x36c00, 0x36c00,
2507		0x36c08, 0x36c3c,
2508		0x37000, 0x3702c,
2509		0x37034, 0x37050,
2510		0x37058, 0x37058,
2511		0x37060, 0x3708c,
2512		0x3709c, 0x370ac,
2513		0x370c0, 0x370c0,
2514		0x370c8, 0x370d0,
2515		0x370d8, 0x370e0,
2516		0x370ec, 0x3712c,
2517		0x37134, 0x37150,
2518		0x37158, 0x37158,
2519		0x37160, 0x3718c,
2520		0x3719c, 0x371ac,
2521		0x371c0, 0x371c0,
2522		0x371c8, 0x371d0,
2523		0x371d8, 0x371e0,
2524		0x371ec, 0x37290,
2525		0x37298, 0x372c4,
2526		0x372e4, 0x37390,
2527		0x37398, 0x373c4,
2528		0x373e4, 0x3742c,
2529		0x37434, 0x37450,
2530		0x37458, 0x37458,
2531		0x37460, 0x3748c,
2532		0x3749c, 0x374ac,
2533		0x374c0, 0x374c0,
2534		0x374c8, 0x374d0,
2535		0x374d8, 0x374e0,
2536		0x374ec, 0x3752c,
2537		0x37534, 0x37550,
2538		0x37558, 0x37558,
2539		0x37560, 0x3758c,
2540		0x3759c, 0x375ac,
2541		0x375c0, 0x375c0,
2542		0x375c8, 0x375d0,
2543		0x375d8, 0x375e0,
2544		0x375ec, 0x37690,
2545		0x37698, 0x376c4,
2546		0x376e4, 0x37790,
2547		0x37798, 0x377c4,
2548		0x377e4, 0x377fc,
2549		0x37814, 0x37814,
2550		0x37854, 0x37868,
2551		0x37880, 0x3788c,
2552		0x378c0, 0x378d0,
2553		0x378e8, 0x378ec,
2554		0x37900, 0x3792c,
2555		0x37934, 0x37950,
2556		0x37958, 0x37958,
2557		0x37960, 0x3798c,
2558		0x3799c, 0x379ac,
2559		0x379c0, 0x379c0,
2560		0x379c8, 0x379d0,
2561		0x379d8, 0x379e0,
2562		0x379ec, 0x37a90,
2563		0x37a98, 0x37ac4,
2564		0x37ae4, 0x37b10,
2565		0x37b24, 0x37b28,
2566		0x37b38, 0x37b50,
2567		0x37bf0, 0x37c10,
2568		0x37c24, 0x37c28,
2569		0x37c38, 0x37c50,
2570		0x37cf0, 0x37cfc,
2571		0x40040, 0x40040,
2572		0x40080, 0x40084,
2573		0x40100, 0x40100,
2574		0x40140, 0x401bc,
2575		0x40200, 0x40214,
2576		0x40228, 0x40228,
2577		0x40240, 0x40258,
2578		0x40280, 0x40280,
2579		0x40304, 0x40304,
2580		0x40330, 0x4033c,
2581		0x41304, 0x413c8,
2582		0x413d0, 0x413dc,
2583		0x413f0, 0x413f0,
2584		0x41400, 0x4140c,
2585		0x41414, 0x4141c,
2586		0x41480, 0x414d0,
2587		0x44000, 0x4407c,
2588		0x440c0, 0x441ac,
2589		0x441b4, 0x4427c,
2590		0x442c0, 0x443ac,
2591		0x443b4, 0x4447c,
2592		0x444c0, 0x445ac,
2593		0x445b4, 0x4467c,
2594		0x446c0, 0x447ac,
2595		0x447b4, 0x4487c,
2596		0x448c0, 0x449ac,
2597		0x449b4, 0x44a7c,
2598		0x44ac0, 0x44bac,
2599		0x44bb4, 0x44c7c,
2600		0x44cc0, 0x44dac,
2601		0x44db4, 0x44e7c,
2602		0x44ec0, 0x44fac,
2603		0x44fb4, 0x4507c,
2604		0x450c0, 0x451ac,
2605		0x451b4, 0x451fc,
2606		0x45800, 0x45804,
2607		0x45810, 0x45830,
2608		0x45840, 0x45860,
2609		0x45868, 0x45868,
2610		0x45880, 0x45884,
2611		0x458a0, 0x458b0,
2612		0x45a00, 0x45a04,
2613		0x45a10, 0x45a30,
2614		0x45a40, 0x45a60,
2615		0x45a68, 0x45a68,
2616		0x45a80, 0x45a84,
2617		0x45aa0, 0x45ab0,
2618		0x460c0, 0x460e4,
2619		0x47000, 0x4703c,
2620		0x47044, 0x4708c,
2621		0x47200, 0x47250,
2622		0x47400, 0x47408,
2623		0x47414, 0x47420,
2624		0x47600, 0x47618,
2625		0x47800, 0x47814,
2626		0x47820, 0x4782c,
2627		0x50000, 0x50084,
2628		0x50090, 0x500cc,
2629		0x50300, 0x50384,
2630		0x50400, 0x50400,
2631		0x50800, 0x50884,
2632		0x50890, 0x508cc,
2633		0x50b00, 0x50b84,
2634		0x50c00, 0x50c00,
2635		0x51000, 0x51020,
2636		0x51028, 0x510b0,
2637		0x51300, 0x51324,
2638	};
2639
2640	u32 *buf_end = (u32 *)((char *)buf + buf_size);
2641	const unsigned int *reg_ranges;
2642	int reg_ranges_size, range;
2643	unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
2644
2645	/* Select the right set of register ranges to dump depending on the
2646	 * adapter chip type.
2647	 */
2648	switch (chip_version) {
2649	case CHELSIO_T4:
2650		reg_ranges = t4_reg_ranges;
2651		reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
2652		break;
2653
2654	case CHELSIO_T5:
2655		reg_ranges = t5_reg_ranges;
2656		reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
2657		break;
2658
2659	case CHELSIO_T6:
2660		reg_ranges = t6_reg_ranges;
2661		reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
2662		break;
2663
2664	default:
2665		dev_err(adap->pdev_dev,
2666			"Unsupported chip version %d\n", chip_version);
2667		return;
2668	}
2669
2670	/* Clear the register buffer and insert the appropriate register
2671	 * values selected by the above register ranges.
2672	 */
2673	memset(buf, 0, buf_size);
2674	for (range = 0; range < reg_ranges_size; range += 2) {
2675		unsigned int reg = reg_ranges[range];
2676		unsigned int last_reg = reg_ranges[range + 1];
2677		u32 *bufp = (u32 *)((char *)buf + reg);
2678
2679		/* Iterate across the register range filling in the register
2680		 * buffer but don't write past the end of the register buffer.
2681		 */
2682		while (reg <= last_reg && bufp < buf_end) {
2683			*bufp++ = t4_read_reg(adap, reg);
2684			reg += sizeof(u32);
2685		}
2686	}
2687}
2688
2689#define EEPROM_STAT_ADDR   0x7bfc
2690#define VPD_BASE           0x400
2691#define VPD_BASE_OLD       0
2692#define VPD_LEN            1024
2693#define CHELSIO_VPD_UNIQUE_ID 0x82
2694
2695/**
2696 * t4_eeprom_ptov - translate a physical EEPROM address to virtual
2697 * @phys_addr: the physical EEPROM address
2698 * @fn: the PCI function number
2699 * @sz: size of function-specific area
2700 *
2701 * Translate a physical EEPROM address to virtual.  The first 1K is
2702 * accessed through virtual addresses starting at 31K, the rest is
2703 * accessed through virtual addresses starting at 0.
2704 *
2705 * The mapping is as follows:
2706 * [0..1K) -> [31K..32K)
2707 * [1K..1K+A) -> [31K-A..31K)
2708 * [1K+A..ES) -> [0..ES-A-1K)
2709 *
2710 * where A = @fn * @sz, and ES = EEPROM size.
2711 */
2712int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2713{
2714	fn *= sz;
2715	if (phys_addr < 1024)
2716		return phys_addr + (31 << 10);
2717	if (phys_addr < 1024 + fn)
2718		return 31744 - fn + phys_addr - 1024;
2719	if (phys_addr < EEPROMSIZE)
2720		return phys_addr - 1024 - fn;
2721	return -EINVAL;
2722}
2723
2724/**
2725 *	t4_seeprom_wp - enable/disable EEPROM write protection
2726 *	@adapter: the adapter
2727 *	@enable: whether to enable or disable write protection
2728 *
2729 *	Enables or disables write protection on the serial EEPROM.
2730 */
2731int t4_seeprom_wp(struct adapter *adapter, bool enable)
2732{
2733	unsigned int v = enable ? 0xc : 0;
2734	int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
2735	return ret < 0 ? ret : 0;
2736}
2737
2738/**
2739 *	t4_get_raw_vpd_params - read VPD parameters from VPD EEPROM
2740 *	@adapter: adapter to read
2741 *	@p: where to store the parameters
2742 *
2743 *	Reads card parameters stored in VPD EEPROM.
2744 */
2745int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
2746{
2747	int i, ret = 0, addr;
2748	int ec, sn, pn, na;
2749	u8 *vpd, csum;
2750	unsigned int vpdr_len, kw_offset, id_len;
2751
2752	vpd = vmalloc(VPD_LEN);
2753	if (!vpd)
2754		return -ENOMEM;
2755
2756	/* Card information normally starts at VPD_BASE but early cards had
2757	 * it at 0.
2758	 */
2759	ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
2760	if (ret < 0)
2761		goto out;
2762
2763	/* The VPD shall have a unique identifier specified by the PCI SIG.
2764	 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
2765	 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
2766	 * is expected to automatically put this entry at the
2767	 * beginning of the VPD.
2768	 */
2769	addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
2770
2771	ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
2772	if (ret < 0)
2773		goto out;
2774
2775	if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
2776		dev_err(adapter->pdev_dev, "missing VPD ID string\n");
2777		ret = -EINVAL;
2778		goto out;
2779	}
2780
2781	id_len = pci_vpd_lrdt_size(vpd);
2782	if (id_len > ID_LEN)
2783		id_len = ID_LEN;
2784
2785	i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
2786	if (i < 0) {
2787		dev_err(adapter->pdev_dev, "missing VPD-R section\n");
2788		ret = -EINVAL;
2789		goto out;
2790	}
2791
2792	vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
2793	kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
2794	if (vpdr_len + kw_offset > VPD_LEN) {
2795		dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
2796		ret = -EINVAL;
2797		goto out;
2798	}
2799
2800#define FIND_VPD_KW(var, name) do { \
2801	var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
2802	if (var < 0) { \
2803		dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
2804		ret = -EINVAL; \
2805		goto out; \
2806	} \
2807	var += PCI_VPD_INFO_FLD_HDR_SIZE; \
2808} while (0)
2809
2810	FIND_VPD_KW(i, "RV");
2811	for (csum = 0; i >= 0; i--)
2812		csum += vpd[i];
2813
2814	if (csum) {
2815		dev_err(adapter->pdev_dev,
2816			"corrupted VPD EEPROM, actual csum %u\n", csum);
2817		ret = -EINVAL;
2818		goto out;
2819	}
2820
2821	FIND_VPD_KW(ec, "EC");
2822	FIND_VPD_KW(sn, "SN");
2823	FIND_VPD_KW(pn, "PN");
2824	FIND_VPD_KW(na, "NA");
2825#undef FIND_VPD_KW
2826
2827	memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
2828	strim(p->id);
2829	memcpy(p->ec, vpd + ec, EC_LEN);
2830	strim(p->ec);
2831	i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
2832	memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
2833	strim(p->sn);
2834	i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE);
2835	memcpy(p->pn, vpd + pn, min(i, PN_LEN));
2836	strim(p->pn);
2837	memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
2838	strim((char *)p->na);
2839
2840out:
2841	vfree(vpd);
2842	return ret < 0 ? ret : 0;
2843}
2844
2845/**
2846 *	t4_get_vpd_params - read VPD parameters & retrieve Core Clock
2847 *	@adapter: adapter to read
2848 *	@p: where to store the parameters
2849 *
2850 *	Reads card parameters stored in VPD EEPROM and retrieves the Core
2851 *	Clock.  This can only be called after a connection to the firmware
2852 *	is established.
2853 */
2854int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p)
2855{
2856	u32 cclk_param, cclk_val;
2857	int ret;
2858
2859	/* Grab the raw VPD parameters.
2860	 */
2861	ret = t4_get_raw_vpd_params(adapter, p);
2862	if (ret)
2863		return ret;
2864
2865	/* Ask firmware for the Core Clock since it knows how to translate the
2866	 * Reference Clock ('V2') VPD field into a Core Clock value ...
2867	 */
2868	cclk_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
2869		      FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK));
2870	ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
2871			      1, &cclk_param, &cclk_val);
2872
2873	if (ret)
2874		return ret;
2875	p->cclk = cclk_val;
2876
2877	return 0;
2878}
2879
2880/**
2881 *	t4_get_pfres - retrieve VF resource limits
2882 *	@adapter: the adapter
2883 *
2884 *	Retrieves configured resource limits and capabilities for a physical
2885 *	function.  The results are stored in @adapter->pfres.
2886 */
2887int t4_get_pfres(struct adapter *adapter)
2888{
2889	struct pf_resources *pfres = &adapter->params.pfres;
2890	struct fw_pfvf_cmd cmd, rpl;
2891	int v;
2892	u32 word;
2893
2894	/* Execute PFVF Read command to get VF resource limits; bail out early
2895	 * with error on command failure.
2896	 */
2897	memset(&cmd, 0, sizeof(cmd));
2898	cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) |
2899				    FW_CMD_REQUEST_F |
2900				    FW_CMD_READ_F |
2901				    FW_PFVF_CMD_PFN_V(adapter->pf) |
2902				    FW_PFVF_CMD_VFN_V(0));
2903	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
2904	v = t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &rpl);
2905	if (v != FW_SUCCESS)
2906		return v;
2907
2908	/* Extract PF resource limits and return success.
2909	 */
2910	word = be32_to_cpu(rpl.niqflint_niq);
2911	pfres->niqflint = FW_PFVF_CMD_NIQFLINT_G(word);
2912	pfres->niq = FW_PFVF_CMD_NIQ_G(word);
2913
2914	word = be32_to_cpu(rpl.type_to_neq);
2915	pfres->neq = FW_PFVF_CMD_NEQ_G(word);
2916	pfres->pmask = FW_PFVF_CMD_PMASK_G(word);
2917
2918	word = be32_to_cpu(rpl.tc_to_nexactf);
2919	pfres->tc = FW_PFVF_CMD_TC_G(word);
2920	pfres->nvi = FW_PFVF_CMD_NVI_G(word);
2921	pfres->nexactf = FW_PFVF_CMD_NEXACTF_G(word);
2922
2923	word = be32_to_cpu(rpl.r_caps_to_nethctrl);
2924	pfres->r_caps = FW_PFVF_CMD_R_CAPS_G(word);
2925	pfres->wx_caps = FW_PFVF_CMD_WX_CAPS_G(word);
2926	pfres->nethctrl = FW_PFVF_CMD_NETHCTRL_G(word);
2927
2928	return 0;
2929}
2930
2931/* serial flash and firmware constants */
2932enum {
2933	SF_ATTEMPTS = 10,             /* max retries for SF operations */
2934
2935	/* flash command opcodes */
2936	SF_PROG_PAGE    = 2,          /* program page */
2937	SF_WR_DISABLE   = 4,          /* disable writes */
2938	SF_RD_STATUS    = 5,          /* read status register */
2939	SF_WR_ENABLE    = 6,          /* enable writes */
2940	SF_RD_DATA_FAST = 0xb,        /* read flash */
2941	SF_RD_ID        = 0x9f,       /* read ID */
2942	SF_ERASE_SECTOR = 0xd8,       /* erase sector */
2943};
2944
2945/**
2946 *	sf1_read - read data from the serial flash
2947 *	@adapter: the adapter
2948 *	@byte_cnt: number of bytes to read
2949 *	@cont: whether another operation will be chained
2950 *	@lock: whether to lock SF for PL access only
2951 *	@valp: where to store the read data
2952 *
2953 *	Reads up to 4 bytes of data from the serial flash.  The location of
2954 *	the read needs to be specified prior to calling this by issuing the
2955 *	appropriate commands to the serial flash.
2956 */
2957static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
2958		    int lock, u32 *valp)
2959{
2960	int ret;
2961
2962	if (!byte_cnt || byte_cnt > 4)
2963		return -EINVAL;
2964	if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
2965		return -EBUSY;
2966	t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
2967		     SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1));
2968	ret = t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
2969	if (!ret)
2970		*valp = t4_read_reg(adapter, SF_DATA_A);
2971	return ret;
2972}
2973
2974/**
2975 *	sf1_write - write data to the serial flash
2976 *	@adapter: the adapter
2977 *	@byte_cnt: number of bytes to write
2978 *	@cont: whether another operation will be chained
2979 *	@lock: whether to lock SF for PL access only
2980 *	@val: value to write
2981 *
2982 *	Writes up to 4 bytes of data to the serial flash.  The location of
2983 *	the write needs to be specified prior to calling this by issuing the
2984 *	appropriate commands to the serial flash.
2985 */
2986static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
2987		     int lock, u32 val)
2988{
2989	if (!byte_cnt || byte_cnt > 4)
2990		return -EINVAL;
2991	if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
2992		return -EBUSY;
2993	t4_write_reg(adapter, SF_DATA_A, val);
2994	t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
2995		     SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1) | OP_V(1));
2996	return t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
2997}
2998
2999/**
3000 *	flash_wait_op - wait for a flash operation to complete
3001 *	@adapter: the adapter
3002 *	@attempts: max number of polls of the status register
3003 *	@delay: delay between polls in ms
3004 *
3005 *	Wait for a flash operation to complete by polling the status register.
3006 */
3007static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
3008{
3009	int ret;
3010	u32 status;
3011
3012	while (1) {
3013		if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
3014		    (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
3015			return ret;
3016		if (!(status & 1))
3017			return 0;
3018		if (--attempts == 0)
3019			return -EAGAIN;
3020		if (delay)
3021			msleep(delay);
3022	}
3023}
3024
3025/**
3026 *	t4_read_flash - read words from serial flash
3027 *	@adapter: the adapter
3028 *	@addr: the start address for the read
3029 *	@nwords: how many 32-bit words to read
3030 *	@data: where to store the read data
3031 *	@byte_oriented: whether to store data as bytes or as words
3032 *
3033 *	Read the specified number of 32-bit words from the serial flash.
3034 *	If @byte_oriented is set the read data is stored as a byte array
3035 *	(i.e., big-endian), otherwise as 32-bit words in the platform's
3036 *	natural endianness.
3037 */
3038int t4_read_flash(struct adapter *adapter, unsigned int addr,
3039		  unsigned int nwords, u32 *data, int byte_oriented)
3040{
3041	int ret;
3042
3043	if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
3044		return -EINVAL;
3045
3046	addr = swab32(addr) | SF_RD_DATA_FAST;
3047
3048	if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
3049	    (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
3050		return ret;
3051
3052	for ( ; nwords; nwords--, data++) {
3053		ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
3054		if (nwords == 1)
3055			t4_write_reg(adapter, SF_OP_A, 0);    /* unlock SF */
3056		if (ret)
3057			return ret;
3058		if (byte_oriented)
3059			*data = (__force __u32)(cpu_to_be32(*data));
3060	}
3061	return 0;
3062}
3063
3064/**
3065 *	t4_write_flash - write up to a page of data to the serial flash
3066 *	@adapter: the adapter
3067 *	@addr: the start address to write
3068 *	@n: length of data to write in bytes
3069 *	@data: the data to write
3070 *	@byte_oriented: whether to store data as bytes or as words
3071 *
3072 *	Writes up to a page of data (256 bytes) to the serial flash starting
3073 *	at the given address.  All the data must be written to the same page.
3074 *	If @byte_oriented is set the write data is stored as byte stream
3075 *	(i.e. matches what on disk), otherwise in big-endian.
3076 */
3077static int t4_write_flash(struct adapter *adapter, unsigned int addr,
3078			  unsigned int n, const u8 *data, bool byte_oriented)
3079{
3080	unsigned int i, c, left, val, offset = addr & 0xff;
3081	u32 buf[64];
3082	int ret;
3083
3084	if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
3085		return -EINVAL;
3086
3087	val = swab32(addr) | SF_PROG_PAGE;
3088
3089	if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3090	    (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
3091		goto unlock;
3092
3093	for (left = n; left; left -= c, data += c) {
3094		c = min(left, 4U);
3095		for (val = 0, i = 0; i < c; ++i) {
3096			if (byte_oriented)
3097				val = (val << 8) + data[i];
3098			else
3099				val = (val << 8) + data[c - i - 1];
3100		}
3101
3102		ret = sf1_write(adapter, c, c != left, 1, val);
3103		if (ret)
3104			goto unlock;
3105	}
3106	ret = flash_wait_op(adapter, 8, 1);
3107	if (ret)
3108		goto unlock;
3109
3110	t4_write_reg(adapter, SF_OP_A, 0);    /* unlock SF */
3111
3112	/* Read the page to verify the write succeeded */
3113	ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
3114			    byte_oriented);
3115	if (ret)
3116		return ret;
3117
3118	if (memcmp(data - n, (u8 *)buf + offset, n)) {
3119		dev_err(adapter->pdev_dev,
3120			"failed to correctly write the flash page at %#x\n",
3121			addr);
3122		return -EIO;
3123	}
3124	return 0;
3125
3126unlock:
3127	t4_write_reg(adapter, SF_OP_A, 0);    /* unlock SF */
3128	return ret;
3129}
3130
3131/**
3132 *	t4_get_fw_version - read the firmware version
3133 *	@adapter: the adapter
3134 *	@vers: where to place the version
3135 *
3136 *	Reads the FW version from flash.
3137 */
3138int t4_get_fw_version(struct adapter *adapter, u32 *vers)
3139{
3140	return t4_read_flash(adapter, FLASH_FW_START +
3141			     offsetof(struct fw_hdr, fw_ver), 1,
3142			     vers, 0);
3143}
3144
3145/**
3146 *	t4_get_bs_version - read the firmware bootstrap version
3147 *	@adapter: the adapter
3148 *	@vers: where to place the version
3149 *
3150 *	Reads the FW Bootstrap version from flash.
3151 */
3152int t4_get_bs_version(struct adapter *adapter, u32 *vers)
3153{
3154	return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
3155			     offsetof(struct fw_hdr, fw_ver), 1,
3156			     vers, 0);
3157}
3158
3159/**
3160 *	t4_get_tp_version - read the TP microcode version
3161 *	@adapter: the adapter
3162 *	@vers: where to place the version
3163 *
3164 *	Reads the TP microcode version from flash.
3165 */
3166int t4_get_tp_version(struct adapter *adapter, u32 *vers)
3167{
3168	return t4_read_flash(adapter, FLASH_FW_START +
3169			     offsetof(struct fw_hdr, tp_microcode_ver),
3170			     1, vers, 0);
3171}
3172
3173/**
3174 *	t4_get_exprom_version - return the Expansion ROM version (if any)
3175 *	@adap: the adapter
3176 *	@vers: where to place the version
3177 *
3178 *	Reads the Expansion ROM header from FLASH and returns the version
3179 *	number (if present) through the @vers return value pointer.  We return
3180 *	this in the Firmware Version Format since it's convenient.  Return
3181 *	0 on success, -ENOENT if no Expansion ROM is present.
3182 */
3183int t4_get_exprom_version(struct adapter *adap, u32 *vers)
3184{
3185	struct exprom_header {
3186		unsigned char hdr_arr[16];	/* must start with 0x55aa */
3187		unsigned char hdr_ver[4];	/* Expansion ROM version */
3188	} *hdr;
3189	u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
3190					   sizeof(u32))];
3191	int ret;
3192
3193	ret = t4_read_flash(adap, FLASH_EXP_ROM_START,
3194			    ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
3195			    0);
3196	if (ret)
3197		return ret;
3198
3199	hdr = (struct exprom_header *)exprom_header_buf;
3200	if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
3201		return -ENOENT;
3202
3203	*vers = (FW_HDR_FW_VER_MAJOR_V(hdr->hdr_ver[0]) |
3204		 FW_HDR_FW_VER_MINOR_V(hdr->hdr_ver[1]) |
3205		 FW_HDR_FW_VER_MICRO_V(hdr->hdr_ver[2]) |
3206		 FW_HDR_FW_VER_BUILD_V(hdr->hdr_ver[3]));
3207	return 0;
3208}
3209
3210/**
3211 *      t4_get_vpd_version - return the VPD version
3212 *      @adapter: the adapter
3213 *      @vers: where to place the version
3214 *
3215 *      Reads the VPD via the Firmware interface (thus this can only be called
3216 *      once we're ready to issue Firmware commands).  The format of the
3217 *      VPD version is adapter specific.  Returns 0 on success, an error on
3218 *      failure.
3219 *
3220 *      Note that early versions of the Firmware didn't include the ability
3221 *      to retrieve the VPD version, so we zero-out the return-value parameter
3222 *      in that case to avoid leaving it with garbage in it.
3223 *
3224 *      Also note that the Firmware will return its cached copy of the VPD
3225 *      Revision ID, not the actual Revision ID as written in the Serial
3226 *      EEPROM.  This is only an issue if a new VPD has been written and the
3227 *      Firmware/Chip haven't yet gone through a RESET sequence.  So it's best
3228 *      to defer calling this routine till after a FW_RESET_CMD has been issued
3229 *      if the Host Driver will be performing a full adapter initialization.
3230 */
3231int t4_get_vpd_version(struct adapter *adapter, u32 *vers)
3232{
3233	u32 vpdrev_param;
3234	int ret;
3235
3236	vpdrev_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3237			FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_VPDREV));
3238	ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3239			      1, &vpdrev_param, vers);
3240	if (ret)
3241		*vers = 0;
3242	return ret;
3243}
3244
3245/**
3246 *      t4_get_scfg_version - return the Serial Configuration version
3247 *      @adapter: the adapter
3248 *      @vers: where to place the version
3249 *
3250 *      Reads the Serial Configuration Version via the Firmware interface
3251 *      (thus this can only be called once we're ready to issue Firmware
3252 *      commands).  The format of the Serial Configuration version is
3253 *      adapter specific.  Returns 0 on success, an error on failure.
3254 *
3255 *      Note that early versions of the Firmware didn't include the ability
3256 *      to retrieve the Serial Configuration version, so we zero-out the
3257 *      return-value parameter in that case to avoid leaving it with
3258 *      garbage in it.
3259 *
3260 *      Also note that the Firmware will return its cached copy of the Serial
3261 *      Initialization Revision ID, not the actual Revision ID as written in
3262 *      the Serial EEPROM.  This is only an issue if a new VPD has been written
3263 *      and the Firmware/Chip haven't yet gone through a RESET sequence.  So
3264 *      it's best to defer calling this routine till after a FW_RESET_CMD has
3265 *      been issued if the Host Driver will be performing a full adapter
3266 *      initialization.
3267 */
3268int t4_get_scfg_version(struct adapter *adapter, u32 *vers)
3269{
3270	u32 scfgrev_param;
3271	int ret;
3272
3273	scfgrev_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3274			 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_SCFGREV));
3275	ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3276			      1, &scfgrev_param, vers);
3277	if (ret)
3278		*vers = 0;
3279	return ret;
3280}
3281
3282/**
3283 *      t4_get_version_info - extract various chip/firmware version information
3284 *      @adapter: the adapter
3285 *
3286 *      Reads various chip/firmware version numbers and stores them into the
3287 *      adapter Adapter Parameters structure.  If any of the efforts fails
3288 *      the first failure will be returned, but all of the version numbers
3289 *      will be read.
3290 */
3291int t4_get_version_info(struct adapter *adapter)
3292{
3293	int ret = 0;
3294
3295	#define FIRST_RET(__getvinfo) \
3296	do { \
3297		int __ret = __getvinfo; \
3298		if (__ret && !ret) \
3299			ret = __ret; \
3300	} while (0)
3301
3302	FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers));
3303	FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers));
3304	FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers));
3305	FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers));
3306	FIRST_RET(t4_get_scfg_version(adapter, &adapter->params.scfg_vers));
3307	FIRST_RET(t4_get_vpd_version(adapter, &adapter->params.vpd_vers));
3308
3309	#undef FIRST_RET
3310	return ret;
3311}
3312
3313/**
3314 *      t4_dump_version_info - dump all of the adapter configuration IDs
3315 *      @adapter: the adapter
3316 *
3317 *      Dumps all of the various bits of adapter configuration version/revision
3318 *      IDs information.  This is typically called at some point after
3319 *      t4_get_version_info() has been called.
3320 */
3321void t4_dump_version_info(struct adapter *adapter)
3322{
3323	/* Device information */
3324	dev_info(adapter->pdev_dev, "Chelsio %s rev %d\n",
3325		 adapter->params.vpd.id,
3326		 CHELSIO_CHIP_RELEASE(adapter->params.chip));
3327	dev_info(adapter->pdev_dev, "S/N: %s, P/N: %s\n",
3328		 adapter->params.vpd.sn, adapter->params.vpd.pn);
3329
3330	/* Firmware Version */
3331	if (!adapter->params.fw_vers)
3332		dev_warn(adapter->pdev_dev, "No firmware loaded\n");
3333	else
3334		dev_info(adapter->pdev_dev, "Firmware version: %u.%u.%u.%u\n",
3335			 FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
3336			 FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers),
3337			 FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers),
3338			 FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers));
3339
3340	/* Bootstrap Firmware Version. (Some adapters don't have Bootstrap
3341	 * Firmware, so dev_info() is more appropriate here.)
3342	 */
3343	if (!adapter->params.bs_vers)
3344		dev_info(adapter->pdev_dev, "No bootstrap loaded\n");
3345	else
3346		dev_info(adapter->pdev_dev, "Bootstrap version: %u.%u.%u.%u\n",
3347			 FW_HDR_FW_VER_MAJOR_G(adapter->params.bs_vers),
3348			 FW_HDR_FW_VER_MINOR_G(adapter->params.bs_vers),
3349			 FW_HDR_FW_VER_MICRO_G(adapter->params.bs_vers),
3350			 FW_HDR_FW_VER_BUILD_G(adapter->params.bs_vers));
3351
3352	/* TP Microcode Version */
3353	if (!adapter->params.tp_vers)
3354		dev_warn(adapter->pdev_dev, "No TP Microcode loaded\n");
3355	else
3356		dev_info(adapter->pdev_dev,
3357			 "TP Microcode version: %u.%u.%u.%u\n",
3358			 FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers),
3359			 FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
3360			 FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
3361			 FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
3362
3363	/* Expansion ROM version */
3364	if (!adapter->params.er_vers)
3365		dev_info(adapter->pdev_dev, "No Expansion ROM loaded\n");
3366	else
3367		dev_info(adapter->pdev_dev,
3368			 "Expansion ROM version: %u.%u.%u.%u\n",
3369			 FW_HDR_FW_VER_MAJOR_G(adapter->params.er_vers),
3370			 FW_HDR_FW_VER_MINOR_G(adapter->params.er_vers),
3371			 FW_HDR_FW_VER_MICRO_G(adapter->params.er_vers),
3372			 FW_HDR_FW_VER_BUILD_G(adapter->params.er_vers));
3373
3374	/* Serial Configuration version */
3375	dev_info(adapter->pdev_dev, "Serial Configuration version: %#x\n",
3376		 adapter->params.scfg_vers);
3377
3378	/* VPD Version */
3379	dev_info(adapter->pdev_dev, "VPD version: %#x\n",
3380		 adapter->params.vpd_vers);
3381}
3382
3383/**
3384 *	t4_check_fw_version - check if the FW is supported with this driver
3385 *	@adap: the adapter
3386 *
3387 *	Checks if an adapter's FW is compatible with the driver.  Returns 0
3388 *	if there's exact match, a negative error if the version could not be
3389 *	read or there's a major version mismatch
3390 */
3391int t4_check_fw_version(struct adapter *adap)
3392{
3393	int i, ret, major, minor, micro;
3394	int exp_major, exp_minor, exp_micro;
3395	unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
3396
3397	ret = t4_get_fw_version(adap, &adap->params.fw_vers);
3398	/* Try multiple times before returning error */
3399	for (i = 0; (ret == -EBUSY || ret == -EAGAIN) && i < 3; i++)
3400		ret = t4_get_fw_version(adap, &adap->params.fw_vers);
3401
3402	if (ret)
3403		return ret;
3404
3405	major = FW_HDR_FW_VER_MAJOR_G(adap->params.fw_vers);
3406	minor = FW_HDR_FW_VER_MINOR_G(adap->params.fw_vers);
3407	micro = FW_HDR_FW_VER_MICRO_G(adap->params.fw_vers);
3408
3409	switch (chip_version) {
3410	case CHELSIO_T4:
3411		exp_major = T4FW_MIN_VERSION_MAJOR;
3412		exp_minor = T4FW_MIN_VERSION_MINOR;
3413		exp_micro = T4FW_MIN_VERSION_MICRO;
3414		break;
3415	case CHELSIO_T5:
3416		exp_major = T5FW_MIN_VERSION_MAJOR;
3417		exp_minor = T5FW_MIN_VERSION_MINOR;
3418		exp_micro = T5FW_MIN_VERSION_MICRO;
3419		break;
3420	case CHELSIO_T6:
3421		exp_major = T6FW_MIN_VERSION_MAJOR;
3422		exp_minor = T6FW_MIN_VERSION_MINOR;
3423		exp_micro = T6FW_MIN_VERSION_MICRO;
3424		break;
3425	default:
3426		dev_err(adap->pdev_dev, "Unsupported chip type, %x\n",
3427			adap->chip);
3428		return -EINVAL;
3429	}
3430
3431	if (major < exp_major || (major == exp_major && minor < exp_minor) ||
3432	    (major == exp_major && minor == exp_minor && micro < exp_micro)) {
3433		dev_err(adap->pdev_dev,
3434			"Card has firmware version %u.%u.%u, minimum "
3435			"supported firmware is %u.%u.%u.\n", major, minor,
3436			micro, exp_major, exp_minor, exp_micro);
3437		return -EFAULT;
3438	}
3439	return 0;
3440}
3441
3442/* Is the given firmware API compatible with the one the driver was compiled
3443 * with?
3444 */
3445static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
3446{
3447
3448	/* short circuit if it's the exact same firmware version */
3449	if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
3450		return 1;
3451
3452#define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
3453	if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
3454	    SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe))
3455		return 1;
3456#undef SAME_INTF
3457
3458	return 0;
3459}
3460
3461/* The firmware in the filesystem is usable, but should it be installed?
3462 * This routine explains itself in detail if it indicates the filesystem
3463 * firmware should be installed.
3464 */
3465static int should_install_fs_fw(struct adapter *adap, int card_fw_usable,
3466				int k, int c)
3467{
3468	const char *reason;
3469
3470	if (!card_fw_usable) {
3471		reason = "incompatible or unusable";
3472		goto install;
3473	}
3474
3475	if (k > c) {
3476		reason = "older than the version supported with this driver";
3477		goto install;
3478	}
3479
3480	return 0;
3481
3482install:
3483	dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, "
3484		"installing firmware %u.%u.%u.%u on card.\n",
3485		FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
3486		FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), reason,
3487		FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
3488		FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
3489
3490	return 1;
3491}
3492
3493int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
3494	       const u8 *fw_data, unsigned int fw_size,
3495	       struct fw_hdr *card_fw, enum dev_state state,
3496	       int *reset)
3497{
3498	int ret, card_fw_usable, fs_fw_usable;
3499	const struct fw_hdr *fs_fw;
3500	const struct fw_hdr *drv_fw;
3501
3502	drv_fw = &fw_info->fw_hdr;
3503
3504	/* Read the header of the firmware on the card */
3505	ret = t4_read_flash(adap, FLASH_FW_START,
3506			    sizeof(*card_fw) / sizeof(uint32_t),
3507			    (uint32_t *)card_fw, 1);
3508	if (ret == 0) {
3509		card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
3510	} else {
3511		dev_err(adap->pdev_dev,
3512			"Unable to read card's firmware header: %d\n", ret);
3513		card_fw_usable = 0;
3514	}
3515
3516	if (fw_data != NULL) {
3517		fs_fw = (const void *)fw_data;
3518		fs_fw_usable = fw_compatible(drv_fw, fs_fw);
3519	} else {
3520		fs_fw = NULL;
3521		fs_fw_usable = 0;
3522	}
3523
3524	if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
3525	    (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
3526		/* Common case: the firmware on the card is an exact match and
3527		 * the filesystem one is an exact match too, or the filesystem
3528		 * one is absent/incompatible.
3529		 */
3530	} else if (fs_fw_usable && state == DEV_STATE_UNINIT &&
3531		   should_install_fs_fw(adap, card_fw_usable,
3532					be32_to_cpu(fs_fw->fw_ver),
3533					be32_to_cpu(card_fw->fw_ver))) {
3534		ret = t4_fw_upgrade(adap, adap->mbox, fw_data,
3535				    fw_size, 0);
3536		if (ret != 0) {
3537			dev_err(adap->pdev_dev,
3538				"failed to install firmware: %d\n", ret);
3539			goto bye;
3540		}
3541
3542		/* Installed successfully, update the cached header too. */
3543		*card_fw = *fs_fw;
3544		card_fw_usable = 1;
3545		*reset = 0;	/* already reset as part of load_fw */
3546	}
3547
3548	if (!card_fw_usable) {
3549		uint32_t d, c, k;
3550
3551		d = be32_to_cpu(drv_fw->fw_ver);
3552		c = be32_to_cpu(card_fw->fw_ver);
3553		k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
3554
3555		dev_err(adap->pdev_dev, "Cannot find a usable firmware: "
3556			"chip state %d, "
3557			"driver compiled with %d.%d.%d.%d, "
3558			"card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
3559			state,
3560			FW_HDR_FW_VER_MAJOR_G(d), FW_HDR_FW_VER_MINOR_G(d),
3561			FW_HDR_FW_VER_MICRO_G(d), FW_HDR_FW_VER_BUILD_G(d),
3562			FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
3563			FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
3564			FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
3565			FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
3566		ret = -EINVAL;
3567		goto bye;
3568	}
3569
3570	/* We're using whatever's on the card and it's known to be good. */
3571	adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver);
3572	adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
3573
3574bye:
3575	return ret;
3576}
3577
3578/**
3579 *	t4_flash_erase_sectors - erase a range of flash sectors
3580 *	@adapter: the adapter
3581 *	@start: the first sector to erase
3582 *	@end: the last sector to erase
3583 *
3584 *	Erases the sectors in the given inclusive range.
3585 */
3586static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
3587{
3588	int ret = 0;
3589
3590	if (end >= adapter->params.sf_nsec)
3591		return -EINVAL;
3592
3593	while (start <= end) {
3594		if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3595		    (ret = sf1_write(adapter, 4, 0, 1,
3596				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
3597		    (ret = flash_wait_op(adapter, 14, 500)) != 0) {
3598			dev_err(adapter->pdev_dev,
3599				"erase of flash sector %d failed, error %d\n",
3600				start, ret);
3601			break;
3602		}
3603		start++;
3604	}
3605	t4_write_reg(adapter, SF_OP_A, 0);    /* unlock SF */
3606	return ret;
3607}
3608
3609/**
3610 *	t4_flash_cfg_addr - return the address of the flash configuration file
3611 *	@adapter: the adapter
3612 *
3613 *	Return the address within the flash where the Firmware Configuration
3614 *	File is stored.
3615 */
3616unsigned int t4_flash_cfg_addr(struct adapter *adapter)
3617{
3618	if (adapter->params.sf_size == 0x100000)
3619		return FLASH_FPGA_CFG_START;
3620	else
3621		return FLASH_CFG_START;
3622}
3623
3624/* Return TRUE if the specified firmware matches the adapter.  I.e. T4
3625 * firmware for T4 adapters, T5 firmware for T5 adapters, etc.  We go ahead
3626 * and emit an error message for mismatched firmware to save our caller the
3627 * effort ...
3628 */
3629static bool t4_fw_matches_chip(const struct adapter *adap,
3630			       const struct fw_hdr *hdr)
3631{
3632	/* The expression below will return FALSE for any unsupported adapter
3633	 * which will keep us "honest" in the future ...
3634	 */
3635	if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) ||
3636	    (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5) ||
3637	    (is_t6(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T6))
3638		return true;
3639
3640	dev_err(adap->pdev_dev,
3641		"FW image (%d) is not suitable for this adapter (%d)\n",
3642		hdr->chip, CHELSIO_CHIP_VERSION(adap->params.chip));
3643	return false;
3644}
3645
3646/**
3647 *	t4_load_fw - download firmware
3648 *	@adap: the adapter
3649 *	@fw_data: the firmware image to write
3650 *	@size: image size
3651 *
3652 *	Write the supplied firmware image to the card's serial flash.
3653 */
3654int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
3655{
3656	u32 csum;
3657	int ret, addr;
3658	unsigned int i;
3659	u8 first_page[SF_PAGE_SIZE];
3660	const __be32 *p = (const __be32 *)fw_data;
3661	const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
3662	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
3663	unsigned int fw_start_sec = FLASH_FW_START_SEC;
3664	unsigned int fw_size = FLASH_FW_MAX_SIZE;
3665	unsigned int fw_start = FLASH_FW_START;
3666
3667	if (!size) {
3668		dev_err(adap->pdev_dev, "FW image has no data\n");
3669		return -EINVAL;
3670	}
3671	if (size & 511) {
3672		dev_err(adap->pdev_dev,
3673			"FW image size not multiple of 512 bytes\n");
3674		return -EINVAL;
3675	}
3676	if ((unsigned int)be16_to_cpu(hdr->len512) * 512 != size) {
3677		dev_err(adap->pdev_dev,
3678			"FW image size differs from size in FW header\n");
3679		return -EINVAL;
3680	}
3681	if (size > fw_size) {
3682		dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
3683			fw_size);
3684		return -EFBIG;
3685	}
3686	if (!t4_fw_matches_chip(adap, hdr))
3687		return -EINVAL;
3688
3689	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
3690		csum += be32_to_cpu(p[i]);
3691
3692	if (csum != 0xffffffff) {
3693		dev_err(adap->pdev_dev,
3694			"corrupted firmware image, checksum %#x\n", csum);
3695		return -EINVAL;
3696	}
3697
3698	i = DIV_ROUND_UP(size, sf_sec_size);        /* # of sectors spanned */
3699	ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
3700	if (ret)
3701		goto out;
3702
3703	/*
3704	 * We write the correct version at the end so the driver can see a bad
3705	 * version if the FW write fails.  Start by writing a copy of the
3706	 * first page with a bad version.
3707	 */
3708	memcpy(first_page, fw_data, SF_PAGE_SIZE);
3709	((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
3710	ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, true);
3711	if (ret)
3712		goto out;
3713
3714	addr = fw_start;
3715	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
3716		addr += SF_PAGE_SIZE;
3717		fw_data += SF_PAGE_SIZE;
3718		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, true);
3719		if (ret)
3720			goto out;
3721	}
3722
3723	ret = t4_write_flash(adap, fw_start + offsetof(struct fw_hdr, fw_ver),
3724			     sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver,
3725			     true);
3726out:
3727	if (ret)
3728		dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
3729			ret);
3730	else
3731		ret = t4_get_fw_version(adap, &adap->params.fw_vers);
3732	return ret;
3733}
3734
3735/**
3736 *	t4_phy_fw_ver - return current PHY firmware version
3737 *	@adap: the adapter
3738 *	@phy_fw_ver: return value buffer for PHY firmware version
3739 *
3740 *	Returns the current version of external PHY firmware on the
3741 *	adapter.
3742 */
3743int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver)
3744{
3745	u32 param, val;
3746	int ret;
3747
3748	param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3749		 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
3750		 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
3751		 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_VERSION));
3752	ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
3753			      &param, &val);
3754	if (ret)
3755		return ret;
3756	*phy_fw_ver = val;
3757	return 0;
3758}
3759
3760/**
3761 *	t4_load_phy_fw - download port PHY firmware
3762 *	@adap: the adapter
3763 *	@win: the PCI-E Memory Window index to use for t4_memory_rw()
3764 *	@phy_fw_version: function to check PHY firmware versions
3765 *	@phy_fw_data: the PHY firmware image to write
3766 *	@phy_fw_size: image size
3767 *
3768 *	Transfer the specified PHY firmware to the adapter.  If a non-NULL
3769 *	@phy_fw_version is supplied, then it will be used to determine if
3770 *	it's necessary to perform the transfer by comparing the version
3771 *	of any existing adapter PHY firmware with that of the passed in
3772 *	PHY firmware image.
3773 *
3774 *	A negative error number will be returned if an error occurs.  If
3775 *	version number support is available and there's no need to upgrade
3776 *	the firmware, 0 will be returned.  If firmware is successfully
3777 *	transferred to the adapter, 1 will be returned.
3778 *
3779 *	NOTE: some adapters only have local RAM to store the PHY firmware.  As
3780 *	a result, a RESET of the adapter would cause that RAM to lose its
3781 *	contents.  Thus, loading PHY firmware on such adapters must happen
3782 *	after any FW_RESET_CMDs ...
3783 */
3784int t4_load_phy_fw(struct adapter *adap, int win,
3785		   int (*phy_fw_version)(const u8 *, size_t),
3786		   const u8 *phy_fw_data, size_t phy_fw_size)
3787{
3788	int cur_phy_fw_ver = 0, new_phy_fw_vers = 0;
3789	unsigned long mtype = 0, maddr = 0;
3790	u32 param, val;
3791	int ret;
3792
3793	/* If we have version number support, then check to see if the adapter
3794	 * already has up-to-date PHY firmware loaded.
3795	 */
3796	if (phy_fw_version) {
3797		new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size);
3798		ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
3799		if (ret < 0)
3800			return ret;
3801
3802		if (cur_phy_fw_ver >= new_phy_fw_vers) {
3803			CH_WARN(adap, "PHY Firmware already up-to-date, "
3804				"version %#x\n", cur_phy_fw_ver);
3805			return 0;
3806		}
3807	}
3808
3809	/* Ask the firmware where it wants us to copy the PHY firmware image.
3810	 * The size of the file requires a special version of the READ command
3811	 * which will pass the file size via the values field in PARAMS_CMD and
3812	 * retrieve the return value from firmware and place it in the same
3813	 * buffer values
3814	 */
3815	param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3816		 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
3817		 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
3818		 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
3819	val = phy_fw_size;
3820	ret = t4_query_params_rw(adap, adap->mbox, adap->pf, 0, 1,
3821				 &param, &val, 1, true);
3822	if (ret < 0)
3823		return ret;
3824	mtype = val >> 8;
3825	maddr = (val & 0xff) << 16;
3826
3827	/* Copy the supplied PHY Firmware image to the adapter memory location
3828	 * allocated by the adapter firmware.
3829	 */
3830	spin_lock_bh(&adap->win0_lock);
3831	ret = t4_memory_rw(adap, win, mtype, maddr,
3832			   phy_fw_size, (__be32 *)phy_fw_data,
3833			   T4_MEMORY_WRITE);
3834	spin_unlock_bh(&adap->win0_lock);
3835	if (ret)
3836		return ret;
3837
3838	/* Tell the firmware that the PHY firmware image has been written to
3839	 * RAM and it can now start copying it over to the PHYs.  The chip
3840	 * firmware will RESET the affected PHYs as part of this operation
3841	 * leaving them running the new PHY firmware image.
3842	 */
3843	param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3844		 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
3845		 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
3846		 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
3847	ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
3848				    &param, &val, 30000);
3849	if (ret)
3850		return ret;
3851
3852	/* If we have version number support, then check to see that the new
3853	 * firmware got loaded properly.
3854	 */
3855	if (phy_fw_version) {
3856		ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
3857		if (ret < 0)
3858			return ret;
3859
3860		if (cur_phy_fw_ver != new_phy_fw_vers) {
3861			CH_WARN(adap, "PHY Firmware did not update: "
3862				"version on adapter %#x, "
3863				"version flashed %#x\n",
3864				cur_phy_fw_ver, new_phy_fw_vers);
3865			return -ENXIO;
3866		}
3867	}
3868
3869	return 1;
3870}
3871
3872/**
3873 *	t4_fwcache - firmware cache operation
3874 *	@adap: the adapter
3875 *	@op  : the operation (flush or flush and invalidate)
3876 */
3877int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
3878{
3879	struct fw_params_cmd c;
3880
3881	memset(&c, 0, sizeof(c));
3882	c.op_to_vfn =
3883		cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
3884			    FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
3885			    FW_PARAMS_CMD_PFN_V(adap->pf) |
3886			    FW_PARAMS_CMD_VFN_V(0));
3887	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3888	c.param[0].mnem =
3889		cpu_to_be32(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3890			    FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWCACHE));
3891	c.param[0].val = cpu_to_be32(op);
3892
3893	return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
3894}
3895
3896void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
3897			unsigned int *pif_req_wrptr,
3898			unsigned int *pif_rsp_wrptr)
3899{
3900	int i, j;
3901	u32 cfg, val, req, rsp;
3902
3903	cfg = t4_read_reg(adap, CIM_DEBUGCFG_A);
3904	if (cfg & LADBGEN_F)
3905		t4_write_reg(adap, CIM_DEBUGCFG_A, cfg ^ LADBGEN_F);
3906
3907	val = t4_read_reg(adap, CIM_DEBUGSTS_A);
3908	req = POLADBGWRPTR_G(val);
3909	rsp = PILADBGWRPTR_G(val);
3910	if (pif_req_wrptr)
3911		*pif_req_wrptr = req;
3912	if (pif_rsp_wrptr)
3913		*pif_rsp_wrptr = rsp;
3914
3915	for (i = 0; i < CIM_PIFLA_SIZE; i++) {
3916		for (j = 0; j < 6; j++) {
3917			t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(req) |
3918				     PILADBGRDPTR_V(rsp));
3919			*pif_req++ = t4_read_reg(adap, CIM_PO_LA_DEBUGDATA_A);
3920			*pif_rsp++ = t4_read_reg(adap, CIM_PI_LA_DEBUGDATA_A);
3921			req++;
3922			rsp++;
3923		}
3924		req = (req + 2) & POLADBGRDPTR_M;
3925		rsp = (rsp + 2) & PILADBGRDPTR_M;
3926	}
3927	t4_write_reg(adap, CIM_DEBUGCFG_A, cfg);
3928}
3929
3930void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
3931{
3932	u32 cfg;
3933	int i, j, idx;
3934
3935	cfg = t4_read_reg(adap, CIM_DEBUGCFG_A);
3936	if (cfg & LADBGEN_F)
3937		t4_write_reg(adap, CIM_DEBUGCFG_A, cfg ^ LADBGEN_F);
3938
3939	for (i = 0; i < CIM_MALA_SIZE; i++) {
3940		for (j = 0; j < 5; j++) {
3941			idx = 8 * i + j;
3942			t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(idx) |
3943				     PILADBGRDPTR_V(idx));
3944			*ma_req++ = t4_read_reg(adap, CIM_PO_LA_MADEBUGDATA_A);
3945			*ma_rsp++ = t4_read_reg(adap, CIM_PI_LA_MADEBUGDATA_A);
3946		}
3947	}
3948	t4_write_reg(adap, CIM_DEBUGCFG_A, cfg);
3949}
3950
3951void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
3952{
3953	unsigned int i, j;
3954
3955	for (i = 0; i < 8; i++) {
3956		u32 *p = la_buf + i;
3957
3958		t4_write_reg(adap, ULP_RX_LA_CTL_A, i);
3959		j = t4_read_reg(adap, ULP_RX_LA_WRPTR_A);
3960		t4_write_reg(adap, ULP_RX_LA_RDPTR_A, j);
3961		for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
3962			*p = t4_read_reg(adap, ULP_RX_LA_RDDATA_A);
3963	}
3964}
3965
3966/* The ADVERT_MASK is used to mask out all of the Advertised Firmware Port
3967 * Capabilities which we control with separate controls -- see, for instance,
3968 * Pause Frames and Forward Error Correction.  In order to determine what the
3969 * full set of Advertised Port Capabilities are, the base Advertised Port
3970 * Capabilities (masked by ADVERT_MASK) must be combined with the Advertised
3971 * Port Capabilities associated with those other controls.  See
3972 * t4_link_acaps() for how this is done.
3973 */
3974#define ADVERT_MASK (FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_M) | \
3975		     FW_PORT_CAP32_ANEG)
3976
3977/**
3978 *	fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits
3979 *	@caps16: a 16-bit Port Capabilities value
3980 *
3981 *	Returns the equivalent 32-bit Port Capabilities value.
3982 */
3983static fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16)
3984{
3985	fw_port_cap32_t caps32 = 0;
3986
3987	#define CAP16_TO_CAP32(__cap) \
3988		do { \
3989			if (caps16 & FW_PORT_CAP_##__cap) \
3990				caps32 |= FW_PORT_CAP32_##__cap; \
3991		} while (0)
3992
3993	CAP16_TO_CAP32(SPEED_100M);
3994	CAP16_TO_CAP32(SPEED_1G);
3995	CAP16_TO_CAP32(SPEED_25G);
3996	CAP16_TO_CAP32(SPEED_10G);
3997	CAP16_TO_CAP32(SPEED_40G);
3998	CAP16_TO_CAP32(SPEED_100G);
3999	CAP16_TO_CAP32(FC_RX);
4000	CAP16_TO_CAP32(FC_TX);
4001	CAP16_TO_CAP32(ANEG);
4002	CAP16_TO_CAP32(FORCE_PAUSE);
4003	CAP16_TO_CAP32(MDIAUTO);
4004	CAP16_TO_CAP32(MDISTRAIGHT);
4005	CAP16_TO_CAP32(FEC_RS);
4006	CAP16_TO_CAP32(FEC_BASER_RS);
4007	CAP16_TO_CAP32(802_3_PAUSE);
4008	CAP16_TO_CAP32(802_3_ASM_DIR);
4009
4010	#undef CAP16_TO_CAP32
4011
4012	return caps32;
4013}
4014
4015/**
4016 *	fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits
4017 *	@caps32: a 32-bit Port Capabilities value
4018 *
4019 *	Returns the equivalent 16-bit Port Capabilities value.  Note that
4020 *	not all 32-bit Port Capabilities can be represented in the 16-bit
4021 *	Port Capabilities and some fields/values may not make it.
4022 */
4023static fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32)
4024{
4025	fw_port_cap16_t caps16 = 0;
4026
4027	#define CAP32_TO_CAP16(__cap) \
4028		do { \
4029			if (caps32 & FW_PORT_CAP32_##__cap) \
4030				caps16 |= FW_PORT_CAP_##__cap; \
4031		} while (0)
4032
4033	CAP32_TO_CAP16(SPEED_100M);
4034	CAP32_TO_CAP16(SPEED_1G);
4035	CAP32_TO_CAP16(SPEED_10G);
4036	CAP32_TO_CAP16(SPEED_25G);
4037	CAP32_TO_CAP16(SPEED_40G);
4038	CAP32_TO_CAP16(SPEED_100G);
4039	CAP32_TO_CAP16(FC_RX);
4040	CAP32_TO_CAP16(FC_TX);
4041	CAP32_TO_CAP16(802_3_PAUSE);
4042	CAP32_TO_CAP16(802_3_ASM_DIR);
4043	CAP32_TO_CAP16(ANEG);
4044	CAP32_TO_CAP16(FORCE_PAUSE);
4045	CAP32_TO_CAP16(MDIAUTO);
4046	CAP32_TO_CAP16(MDISTRAIGHT);
4047	CAP32_TO_CAP16(FEC_RS);
4048	CAP32_TO_CAP16(FEC_BASER_RS);
4049
4050	#undef CAP32_TO_CAP16
4051
4052	return caps16;
4053}
4054
4055/* Translate Firmware Port Capabilities Pause specification to Common Code */
4056static inline enum cc_pause fwcap_to_cc_pause(fw_port_cap32_t fw_pause)
4057{
4058	enum cc_pause cc_pause = 0;
4059
4060	if (fw_pause & FW_PORT_CAP32_FC_RX)
4061		cc_pause |= PAUSE_RX;
4062	if (fw_pause & FW_PORT_CAP32_FC_TX)
4063		cc_pause |= PAUSE_TX;
4064
4065	return cc_pause;
4066}
4067
4068/* Translate Common Code Pause specification into Firmware Port Capabilities */
4069static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause)
4070{
4071	/* Translate orthogonal RX/TX Pause Controls for L1 Configure
4072	 * commands, etc.
4073	 */
4074	fw_port_cap32_t fw_pause = 0;
4075
4076	if (cc_pause & PAUSE_RX)
4077		fw_pause |= FW_PORT_CAP32_FC_RX;
4078	if (cc_pause & PAUSE_TX)
4079		fw_pause |= FW_PORT_CAP32_FC_TX;
4080	if (!(cc_pause & PAUSE_AUTONEG))
4081		fw_pause |= FW_PORT_CAP32_FORCE_PAUSE;
4082
4083	/* Translate orthogonal Pause controls into IEEE 802.3 Pause,
4084	 * Asymmetrical Pause for use in reporting to upper layer OS code, etc.
4085	 * Note that these bits are ignored in L1 Configure commands.
4086	 */
4087	if (cc_pause & PAUSE_RX) {
4088		if (cc_pause & PAUSE_TX)
4089			fw_pause |= FW_PORT_CAP32_802_3_PAUSE;
4090		else
4091			fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR |
4092				    FW_PORT_CAP32_802_3_PAUSE;
4093	} else if (cc_pause & PAUSE_TX) {
4094		fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR;
4095	}
4096
4097	return fw_pause;
4098}
4099
4100/* Translate Firmware Forward Error Correction specification to Common Code */
4101static inline enum cc_fec fwcap_to_cc_fec(fw_port_cap32_t fw_fec)
4102{
4103	enum cc_fec cc_fec = 0;
4104
4105	if (fw_fec & FW_PORT_CAP32_FEC_RS)
4106		cc_fec |= FEC_RS;
4107	if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS)
4108		cc_fec |= FEC_BASER_RS;
4109
4110	return cc_fec;
4111}
4112
4113/* Translate Common Code Forward Error Correction specification to Firmware */
4114static inline fw_port_cap32_t cc_to_fwcap_fec(enum cc_fec cc_fec)
4115{
4116	fw_port_cap32_t fw_fec = 0;
4117
4118	if (cc_fec & FEC_RS)
4119		fw_fec |= FW_PORT_CAP32_FEC_RS;
4120	if (cc_fec & FEC_BASER_RS)
4121		fw_fec |= FW_PORT_CAP32_FEC_BASER_RS;
4122
4123	return fw_fec;
4124}
4125
4126/**
4127 *	t4_link_acaps - compute Link Advertised Port Capabilities
4128 *	@adapter: the adapter
4129 *	@port: the Port ID
4130 *	@lc: the Port's Link Configuration
4131 *
4132 *	Synthesize the Advertised Port Capabilities we'll be using based on
4133 *	the base Advertised Port Capabilities (which have been filtered by
4134 *	ADVERT_MASK) plus the individual controls for things like Pause
4135 *	Frames, Forward Error Correction, MDI, etc.
4136 */
4137fw_port_cap32_t t4_link_acaps(struct adapter *adapter, unsigned int port,
4138			      struct link_config *lc)
4139{
4140	fw_port_cap32_t fw_fc, fw_fec, acaps;
4141	unsigned int fw_mdi;
4142	char cc_fec;
4143
4144	fw_mdi = (FW_PORT_CAP32_MDI_V(FW_PORT_CAP32_MDI_AUTO) & lc->pcaps);
4145
4146	/* Convert driver coding of Pause Frame Flow Control settings into the
4147	 * Firmware's API.
4148	 */
4149	fw_fc = cc_to_fwcap_pause(lc->requested_fc);
4150
4151	/* Convert Common Code Forward Error Control settings into the
4152	 * Firmware's API.  If the current Requested FEC has "Automatic"
4153	 * (IEEE 802.3) specified, then we use whatever the Firmware
4154	 * sent us as part of its IEEE 802.3-based interpretation of
4155	 * the Transceiver Module EPROM FEC parameters.  Otherwise we
4156	 * use whatever is in the current Requested FEC settings.
4157	 */
4158	if (lc->requested_fec & FEC_AUTO)
4159		cc_fec = fwcap_to_cc_fec(lc->def_acaps);
4160	else
4161		cc_fec = lc->requested_fec;
4162	fw_fec = cc_to_fwcap_fec(cc_fec);
4163
4164	/* Figure out what our Requested Port Capabilities are going to be.
4165	 * Note parallel structure in t4_handle_get_port_info() and
4166	 * init_link_config().
4167	 */
4168	if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) {
4169		acaps = lc->acaps | fw_fc | fw_fec;
4170		lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
4171		lc->fec = cc_fec;
4172	} else if (lc->autoneg == AUTONEG_DISABLE) {
4173		acaps = lc->speed_caps | fw_fc | fw_fec | fw_mdi;
4174		lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
4175		lc->fec = cc_fec;
4176	} else {
4177		acaps = lc->acaps | fw_fc | fw_fec | fw_mdi;
4178	}
4179
4180	/* Some Requested Port Capabilities are trivially wrong if they exceed
4181	 * the Physical Port Capabilities.  We can check that here and provide
4182	 * moderately useful feedback in the system log.
4183	 *
4184	 * Note that older Firmware doesn't have FW_PORT_CAP32_FORCE_PAUSE, so
4185	 * we need to exclude this from this check in order to maintain
4186	 * compatibility ...
4187	 */
4188	if ((acaps & ~lc->pcaps) & ~FW_PORT_CAP32_FORCE_PAUSE) {
4189		dev_err(adapter->pdev_dev, "Requested Port Capabilities %#x exceed Physical Port Capabilities %#x\n",
4190			acaps, lc->pcaps);
4191		return -EINVAL;
4192	}
4193
4194	return acaps;
4195}
4196
4197/**
4198 *	t4_link_l1cfg_core - apply link configuration to MAC/PHY
4199 *	@adapter: the adapter
4200 *	@mbox: the Firmware Mailbox to use
4201 *	@port: the Port ID
4202 *	@lc: the Port's Link Configuration
4203 *	@sleep_ok: if true we may sleep while awaiting command completion
4204 *	@timeout: time to wait for command to finish before timing out
4205 *		(negative implies @sleep_ok=false)
4206 *
4207 *	Set up a port's MAC and PHY according to a desired link configuration.
4208 *	- If the PHY can auto-negotiate first decide what to advertise, then
4209 *	  enable/disable auto-negotiation as desired, and reset.
4210 *	- If the PHY does not auto-negotiate just reset it.
4211 *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
4212 *	  otherwise do it later based on the outcome of auto-negotiation.
4213 */
4214int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
4215		       unsigned int port, struct link_config *lc,
4216		       u8 sleep_ok, int timeout)
4217{
4218	unsigned int fw_caps = adapter->params.fw_caps_support;
4219	struct fw_port_cmd cmd;
4220	fw_port_cap32_t rcap;
4221	int ret;
4222
4223	if (!(lc->pcaps & FW_PORT_CAP32_ANEG) &&
4224	    lc->autoneg == AUTONEG_ENABLE) {
4225		return -EINVAL;
4226	}
4227
4228	/* Compute our Requested Port Capabilities and send that on to the
4229	 * Firmware.
4230	 */
4231	rcap = t4_link_acaps(adapter, port, lc);
4232	memset(&cmd, 0, sizeof(cmd));
4233	cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
4234				       FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
4235				       FW_PORT_CMD_PORTID_V(port));
4236	cmd.action_to_len16 =
4237		cpu_to_be32(FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
4238						 ? FW_PORT_ACTION_L1_CFG
4239						 : FW_PORT_ACTION_L1_CFG32) |
4240						 FW_LEN16(cmd));
4241	if (fw_caps == FW_CAPS16)
4242		cmd.u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(rcap));
4243	else
4244		cmd.u.l1cfg32.rcap32 = cpu_to_be32(rcap);
4245
4246	ret = t4_wr_mbox_meat_timeout(adapter, mbox, &cmd, sizeof(cmd), NULL,
4247				      sleep_ok, timeout);
4248
4249	/* Unfortunately, even if the Requested Port Capabilities "fit" within
4250	 * the Physical Port Capabilities, some combinations of features may
4251	 * still not be legal.  For example, 40Gb/s and Reed-Solomon Forward
4252	 * Error Correction.  So if the Firmware rejects the L1 Configure
4253	 * request, flag that here.
4254	 */
4255	if (ret) {
4256		dev_err(adapter->pdev_dev,
4257			"Requested Port Capabilities %#x rejected, error %d\n",
4258			rcap, -ret);
4259		return ret;
4260	}
4261	return 0;
4262}
4263
4264/**
4265 *	t4_restart_aneg - restart autonegotiation
4266 *	@adap: the adapter
4267 *	@mbox: mbox to use for the FW command
4268 *	@port: the port id
4269 *
4270 *	Restarts autonegotiation for the selected port.
4271 */
4272int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
4273{
4274	unsigned int fw_caps = adap->params.fw_caps_support;
4275	struct fw_port_cmd c;
4276
4277	memset(&c, 0, sizeof(c));
4278	c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
4279				     FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
4280				     FW_PORT_CMD_PORTID_V(port));
4281	c.action_to_len16 =
4282		cpu_to_be32(FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
4283						 ? FW_PORT_ACTION_L1_CFG
4284						 : FW_PORT_ACTION_L1_CFG32) |
4285			    FW_LEN16(c));
4286	if (fw_caps == FW_CAPS16)
4287		c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG);
4288	else
4289		c.u.l1cfg32.rcap32 = cpu_to_be32(FW_PORT_CAP32_ANEG);
4290	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4291}
4292
4293typedef void (*int_handler_t)(struct adapter *adap);
4294
4295struct intr_info {
4296	unsigned int mask;       /* bits to check in interrupt status */
4297	const char *msg;         /* message to print or NULL */
4298	short stat_idx;          /* stat counter to increment or -1 */
4299	unsigned short fatal;    /* whether the condition reported is fatal */
4300	int_handler_t int_handler; /* platform-specific int handler */
4301};
4302
4303/**
4304 *	t4_handle_intr_status - table driven interrupt handler
4305 *	@adapter: the adapter that generated the interrupt
4306 *	@reg: the interrupt status register to process
4307 *	@acts: table of interrupt actions
4308 *
4309 *	A table driven interrupt handler that applies a set of masks to an
4310 *	interrupt status word and performs the corresponding actions if the
4311 *	interrupts described by the mask have occurred.  The actions include
4312 *	optionally emitting a warning or alert message.  The table is terminated
4313 *	by an entry specifying mask 0.  Returns the number of fatal interrupt
4314 *	conditions.
4315 */
4316static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
4317				 const struct intr_info *acts)
4318{
4319	int fatal = 0;
4320	unsigned int mask = 0;
4321	unsigned int status = t4_read_reg(adapter, reg);
4322
4323	for ( ; acts->mask; ++acts) {
4324		if (!(status & acts->mask))
4325			continue;
4326		if (acts->fatal) {
4327			fatal++;
4328			dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
4329				  status & acts->mask);
4330		} else if (acts->msg && printk_ratelimit())
4331			dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
4332				 status & acts->mask);
4333		if (acts->int_handler)
4334			acts->int_handler(adapter);
4335		mask |= acts->mask;
4336	}
4337	status &= mask;
4338	if (status)                           /* clear processed interrupts */
4339		t4_write_reg(adapter, reg, status);
4340	return fatal;
4341}
4342
4343/*
4344 * Interrupt handler for the PCIE module.
4345 */
4346static void pcie_intr_handler(struct adapter *adapter)
4347{
4348	static const struct intr_info sysbus_intr_info[] = {
4349		{ RNPP_F, "RXNP array parity error", -1, 1 },
4350		{ RPCP_F, "RXPC array parity error", -1, 1 },
4351		{ RCIP_F, "RXCIF array parity error", -1, 1 },
4352		{ RCCP_F, "Rx completions control array parity error", -1, 1 },
4353		{ RFTP_F, "RXFT array parity error", -1, 1 },
4354		{ 0 }
4355	};
4356	static const struct intr_info pcie_port_intr_info[] = {
4357		{ TPCP_F, "TXPC array parity error", -1, 1 },
4358		{ TNPP_F, "TXNP array parity error", -1, 1 },
4359		{ TFTP_F, "TXFT array parity error", -1, 1 },
4360		{ TCAP_F, "TXCA array parity error", -1, 1 },
4361		{ TCIP_F, "TXCIF array parity error", -1, 1 },
4362		{ RCAP_F, "RXCA array parity error", -1, 1 },
4363		{ OTDD_F, "outbound request TLP discarded", -1, 1 },
4364		{ RDPE_F, "Rx data parity error", -1, 1 },
4365		{ TDUE_F, "Tx uncorrectable data error", -1, 1 },
4366		{ 0 }
4367	};
4368	static const struct intr_info pcie_intr_info[] = {
4369		{ MSIADDRLPERR_F, "MSI AddrL parity error", -1, 1 },
4370		{ MSIADDRHPERR_F, "MSI AddrH parity error", -1, 1 },
4371		{ MSIDATAPERR_F, "MSI data parity error", -1, 1 },
4372		{ MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
4373		{ MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
4374		{ MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
4375		{ MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
4376		{ PIOCPLPERR_F, "PCI PIO completion FIFO parity error", -1, 1 },
4377		{ PIOREQPERR_F, "PCI PIO request FIFO parity error", -1, 1 },
4378		{ TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
4379		{ CCNTPERR_F, "PCI CMD channel count parity error", -1, 1 },
4380		{ CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
4381		{ CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
4382		{ DCNTPERR_F, "PCI DMA channel count parity error", -1, 1 },
4383		{ DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
4384		{ DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
4385		{ HCNTPERR_F, "PCI HMA channel count parity error", -1, 1 },
4386		{ HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
4387		{ HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
4388		{ CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
4389		{ FIDPERR_F, "PCI FID parity error", -1, 1 },
4390		{ INTXCLRPERR_F, "PCI INTx clear parity error", -1, 1 },
4391		{ MATAGPERR_F, "PCI MA tag parity error", -1, 1 },
4392		{ PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
4393		{ RXCPLPERR_F, "PCI Rx completion parity error", -1, 1 },
4394		{ RXWRPERR_F, "PCI Rx write parity error", -1, 1 },
4395		{ RPLPERR_F, "PCI replay buffer parity error", -1, 1 },
4396		{ PCIESINT_F, "PCI core secondary fault", -1, 1 },
4397		{ PCIEPINT_F, "PCI core primary fault", -1, 1 },
4398		{ UNXSPLCPLERR_F, "PCI unexpected split completion error",
4399		  -1, 0 },
4400		{ 0 }
4401	};
4402
4403	static struct intr_info t5_pcie_intr_info[] = {
4404		{ MSTGRPPERR_F, "Master Response Read Queue parity error",
4405		  -1, 1 },
4406		{ MSTTIMEOUTPERR_F, "Master Timeout FIFO parity error", -1, 1 },
4407		{ MSIXSTIPERR_F, "MSI-X STI SRAM parity error", -1, 1 },
4408		{ MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
4409		{ MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
4410		{ MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
4411		{ MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
4412		{ PIOCPLGRPPERR_F, "PCI PIO completion Group FIFO parity error",
4413		  -1, 1 },
4414		{ PIOREQGRPPERR_F, "PCI PIO request Group FIFO parity error",
4415		  -1, 1 },
4416		{ TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
4417		{ MSTTAGQPERR_F, "PCI master tag queue parity error", -1, 1 },
4418		{ CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
4419		{ CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
4420		{ DREQWRPERR_F, "PCI DMA channel write request parity error",
4421		  -1, 1 },
4422		{ DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
4423		{ DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
4424		{ HREQWRPERR_F, "PCI HMA channel count parity error", -1, 1 },
4425		{ HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
4426		{ HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
4427		{ CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
4428		{ FIDPERR_F, "PCI FID parity error", -1, 1 },
4429		{ VFIDPERR_F, "PCI INTx clear parity error", -1, 1 },
4430		{ MAGRPPERR_F, "PCI MA group FIFO parity error", -1, 1 },
4431		{ PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
4432		{ IPRXHDRGRPPERR_F, "PCI IP Rx header group parity error",
4433		  -1, 1 },
4434		{ IPRXDATAGRPPERR_F, "PCI IP Rx data group parity error",
4435		  -1, 1 },
4436		{ RPLPERR_F, "PCI IP replay buffer parity error", -1, 1 },
4437		{ IPSOTPERR_F, "PCI IP SOT buffer parity error", -1, 1 },
4438		{ TRGT1GRPPERR_F, "PCI TRGT1 group FIFOs parity error", -1, 1 },
4439		{ READRSPERR_F, "Outbound read error", -1, 0 },
4440		{ 0 }
4441	};
4442
4443	int fat;
4444
4445	if (is_t4(adapter->params.chip))
4446		fat = t4_handle_intr_status(adapter,
4447				PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A,
4448				sysbus_intr_info) +
4449			t4_handle_intr_status(adapter,
4450					PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A,
4451					pcie_port_intr_info) +
4452			t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
4453					      pcie_intr_info);
4454	else
4455		fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
4456					    t5_pcie_intr_info);
4457
4458	if (fat)
4459		t4_fatal_err(adapter);
4460}
4461
4462/*
4463 * TP interrupt handler.
4464 */
4465static void tp_intr_handler(struct adapter *adapter)
4466{
4467	static const struct intr_info tp_intr_info[] = {
4468		{ 0x3fffffff, "TP parity error", -1, 1 },
4469		{ FLMTXFLSTEMPTY_F, "TP out of Tx pages", -1, 1 },
4470		{ 0 }
4471	};
4472
4473	if (t4_handle_intr_status(adapter, TP_INT_CAUSE_A, tp_intr_info))
4474		t4_fatal_err(adapter);
4475}
4476
4477/*
4478 * SGE interrupt handler.
4479 */
4480static void sge_intr_handler(struct adapter *adapter)
4481{
4482	u32 v = 0, perr;
4483	u32 err;
4484
4485	static const struct intr_info sge_intr_info[] = {
4486		{ ERR_CPL_EXCEED_IQE_SIZE_F,
4487		  "SGE received CPL exceeding IQE size", -1, 1 },
4488		{ ERR_INVALID_CIDX_INC_F,
4489		  "SGE GTS CIDX increment too large", -1, 0 },
4490		{ ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
4491		{ DBFIFO_LP_INT_F, NULL, -1, 0, t4_db_full },
4492		{ ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
4493		  "SGE IQID > 1023 received CPL for FL", -1, 0 },
4494		{ ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
4495		  0 },
4496		{ ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1,
4497		  0 },
4498		{ ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1,
4499		  0 },
4500		{ ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1,
4501		  0 },
4502		{ ERR_ING_CTXT_PRIO_F,
4503		  "SGE too many priority ingress contexts", -1, 0 },
4504		{ INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
4505		{ EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
4506		{ 0 }
4507	};
4508
4509	static struct intr_info t4t5_sge_intr_info[] = {
4510		{ ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped },
4511		{ DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full },
4512		{ ERR_EGR_CTXT_PRIO_F,
4513		  "SGE too many priority egress contexts", -1, 0 },
4514		{ 0 }
4515	};
4516
4517	perr = t4_read_reg(adapter, SGE_INT_CAUSE1_A);
4518	if (perr) {
4519		v |= perr;
4520		dev_alert(adapter->pdev_dev, "SGE Cause1 Parity Error %#x\n",
4521			  perr);
4522	}
4523
4524	perr = t4_read_reg(adapter, SGE_INT_CAUSE2_A);
4525	if (perr) {
4526		v |= perr;
4527		dev_alert(adapter->pdev_dev, "SGE Cause2 Parity Error %#x\n",
4528			  perr);
4529	}
4530
4531	if (CHELSIO_CHIP_VERSION(adapter->params.chip) >= CHELSIO_T5) {
4532		perr = t4_read_reg(adapter, SGE_INT_CAUSE5_A);
4533		/* Parity error (CRC) for err_T_RxCRC is trivial, ignore it */
4534		perr &= ~ERR_T_RXCRC_F;
4535		if (perr) {
4536			v |= perr;
4537			dev_alert(adapter->pdev_dev,
4538				  "SGE Cause5 Parity Error %#x\n", perr);
4539		}
4540	}
4541
4542	v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info);
4543	if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
4544		v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A,
4545					   t4t5_sge_intr_info);
4546
4547	err = t4_read_reg(adapter, SGE_ERROR_STATS_A);
4548	if (err & ERROR_QID_VALID_F) {
4549		dev_err(adapter->pdev_dev, "SGE error for queue %u\n",
4550			ERROR_QID_G(err));
4551		if (err & UNCAPTURED_ERROR_F)
4552			dev_err(adapter->pdev_dev,
4553				"SGE UNCAPTURED_ERROR set (clearing)\n");
4554		t4_write_reg(adapter, SGE_ERROR_STATS_A, ERROR_QID_VALID_F |
4555			     UNCAPTURED_ERROR_F);
4556	}
4557
4558	if (v != 0)
4559		t4_fatal_err(adapter);
4560}
4561
4562#define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\
4563		      OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F)
4564#define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\
4565		      IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F)
4566
4567/*
4568 * CIM interrupt handler.
4569 */
4570static void cim_intr_handler(struct adapter *adapter)
4571{
4572	static const struct intr_info cim_intr_info[] = {
4573		{ PREFDROPINT_F, "CIM control register prefetch drop", -1, 1 },
4574		{ CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
4575		{ CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
4576		{ MBUPPARERR_F, "CIM mailbox uP parity error", -1, 1 },
4577		{ MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 },
4578		{ TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 },
4579		{ TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 },
4580		{ TIMER0INT_F, "CIM TIMER0 interrupt", -1, 1 },
4581		{ 0 }
4582	};
4583	static const struct intr_info cim_upintr_info[] = {
4584		{ RSVDSPACEINT_F, "CIM reserved space access", -1, 1 },
4585		{ ILLTRANSINT_F, "CIM illegal transaction", -1, 1 },
4586		{ ILLWRINT_F, "CIM illegal write", -1, 1 },
4587		{ ILLRDINT_F, "CIM illegal read", -1, 1 },
4588		{ ILLRDBEINT_F, "CIM illegal read BE", -1, 1 },
4589		{ ILLWRBEINT_F, "CIM illegal write BE", -1, 1 },
4590		{ SGLRDBOOTINT_F, "CIM single read from boot space", -1, 1 },
4591		{ SGLWRBOOTINT_F, "CIM single write to boot space", -1, 1 },
4592		{ BLKWRBOOTINT_F, "CIM block write to boot space", -1, 1 },
4593		{ SGLRDFLASHINT_F, "CIM single read from flash space", -1, 1 },
4594		{ SGLWRFLASHINT_F, "CIM single write to flash space", -1, 1 },
4595		{ BLKWRFLASHINT_F, "CIM block write to flash space", -1, 1 },
4596		{ SGLRDEEPROMINT_F, "CIM single EEPROM read", -1, 1 },
4597		{ SGLWREEPROMINT_F, "CIM single EEPROM write", -1, 1 },
4598		{ BLKRDEEPROMINT_F, "CIM block EEPROM read", -1, 1 },
4599		{ BLKWREEPROMINT_F, "CIM block EEPROM write", -1, 1 },
4600		{ SGLRDCTLINT_F, "CIM single read from CTL space", -1, 1 },
4601		{ SGLWRCTLINT_F, "CIM single write to CTL space", -1, 1 },
4602		{ BLKRDCTLINT_F, "CIM block read from CTL space", -1, 1 },
4603		{ BLKWRCTLINT_F, "CIM block write to CTL space", -1, 1 },
4604		{ SGLRDPLINT_F, "CIM single read from PL space", -1, 1 },
4605		{ SGLWRPLINT_F, "CIM single write to PL space", -1, 1 },
4606		{ BLKRDPLINT_F, "CIM block read from PL space", -1, 1 },
4607		{ BLKWRPLINT_F, "CIM block write to PL space", -1, 1 },
4608		{ REQOVRLOOKUPINT_F, "CIM request FIFO overwrite", -1, 1 },
4609		{ RSPOVRLOOKUPINT_F, "CIM response FIFO overwrite", -1, 1 },
4610		{ TIMEOUTINT_F, "CIM PIF timeout", -1, 1 },
4611		{ TIMEOUTMAINT_F, "CIM PIF MA timeout", -1, 1 },
4612		{ 0 }
4613	};
4614
4615	u32 val, fw_err;
4616	int fat;
4617
4618	fw_err = t4_read_reg(adapter, PCIE_FW_A);
4619	if (fw_err & PCIE_FW_ERR_F)
4620		t4_report_fw_error(adapter);
4621
4622	/* When the Firmware detects an internal error which normally
4623	 * wouldn't raise a Host Interrupt, it forces a CIM Timer0 interrupt
4624	 * in order to make sure the Host sees the Firmware Crash.  So
4625	 * if we have a Timer0 interrupt and don't see a Firmware Crash,
4626	 * ignore the Timer0 interrupt.
4627	 */
4628
4629	val = t4_read_reg(adapter, CIM_HOST_INT_CAUSE_A);
4630	if (val & TIMER0INT_F)
4631		if (!(fw_err & PCIE_FW_ERR_F) ||
4632		    (PCIE_FW_EVAL_G(fw_err) != PCIE_FW_EVAL_CRASH))
4633			t4_write_reg(adapter, CIM_HOST_INT_CAUSE_A,
4634				     TIMER0INT_F);
4635
4636	fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE_A,
4637				    cim_intr_info) +
4638	      t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE_A,
4639				    cim_upintr_info);
4640	if (fat)
4641		t4_fatal_err(adapter);
4642}
4643
4644/*
4645 * ULP RX interrupt handler.
4646 */
4647static void ulprx_intr_handler(struct adapter *adapter)
4648{
4649	static const struct intr_info ulprx_intr_info[] = {
4650		{ 0x1800000, "ULPRX context error", -1, 1 },
4651		{ 0x7fffff, "ULPRX parity error", -1, 1 },
4652		{ 0 }
4653	};
4654
4655	if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE_A, ulprx_intr_info))
4656		t4_fatal_err(adapter);
4657}
4658
4659/*
4660 * ULP TX interrupt handler.
4661 */
4662static void ulptx_intr_handler(struct adapter *adapter)
4663{
4664	static const struct intr_info ulptx_intr_info[] = {
4665		{ PBL_BOUND_ERR_CH3_F, "ULPTX channel 3 PBL out of bounds", -1,
4666		  0 },
4667		{ PBL_BOUND_ERR_CH2_F, "ULPTX channel 2 PBL out of bounds", -1,
4668		  0 },
4669		{ PBL_BOUND_ERR_CH1_F, "ULPTX channel 1 PBL out of bounds", -1,
4670		  0 },
4671		{ PBL_BOUND_ERR_CH0_F, "ULPTX channel 0 PBL out of bounds", -1,
4672		  0 },
4673		{ 0xfffffff, "ULPTX parity error", -1, 1 },
4674		{ 0 }
4675	};
4676
4677	if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE_A, ulptx_intr_info))
4678		t4_fatal_err(adapter);
4679}
4680
4681/*
4682 * PM TX interrupt handler.
4683 */
4684static void pmtx_intr_handler(struct adapter *adapter)
4685{
4686	static const struct intr_info pmtx_intr_info[] = {
4687		{ PCMD_LEN_OVFL0_F, "PMTX channel 0 pcmd too large", -1, 1 },
4688		{ PCMD_LEN_OVFL1_F, "PMTX channel 1 pcmd too large", -1, 1 },
4689		{ PCMD_LEN_OVFL2_F, "PMTX channel 2 pcmd too large", -1, 1 },
4690		{ ZERO_C_CMD_ERROR_F, "PMTX 0-length pcmd", -1, 1 },
4691		{ PMTX_FRAMING_ERROR_F, "PMTX framing error", -1, 1 },
4692		{ OESPI_PAR_ERROR_F, "PMTX oespi parity error", -1, 1 },
4693		{ DB_OPTIONS_PAR_ERROR_F, "PMTX db_options parity error",
4694		  -1, 1 },
4695		{ ICSPI_PAR_ERROR_F, "PMTX icspi parity error", -1, 1 },
4696		{ PMTX_C_PCMD_PAR_ERROR_F, "PMTX c_pcmd parity error", -1, 1},
4697		{ 0 }
4698	};
4699
4700	if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE_A, pmtx_intr_info))
4701		t4_fatal_err(adapter);
4702}
4703
4704/*
4705 * PM RX interrupt handler.
4706 */
4707static void pmrx_intr_handler(struct adapter *adapter)
4708{
4709	static const struct intr_info pmrx_intr_info[] = {
4710		{ ZERO_E_CMD_ERROR_F, "PMRX 0-length pcmd", -1, 1 },
4711		{ PMRX_FRAMING_ERROR_F, "PMRX framing error", -1, 1 },
4712		{ OCSPI_PAR_ERROR_F, "PMRX ocspi parity error", -1, 1 },
4713		{ DB_OPTIONS_PAR_ERROR_F, "PMRX db_options parity error",
4714		  -1, 1 },
4715		{ IESPI_PAR_ERROR_F, "PMRX iespi parity error", -1, 1 },
4716		{ PMRX_E_PCMD_PAR_ERROR_F, "PMRX e_pcmd parity error", -1, 1},
4717		{ 0 }
4718	};
4719
4720	if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE_A, pmrx_intr_info))
4721		t4_fatal_err(adapter);
4722}
4723
4724/*
4725 * CPL switch interrupt handler.
4726 */
4727static void cplsw_intr_handler(struct adapter *adapter)
4728{
4729	static const struct intr_info cplsw_intr_info[] = {
4730		{ CIM_OP_MAP_PERR_F, "CPLSW CIM op_map parity error", -1, 1 },
4731		{ CIM_OVFL_ERROR_F, "CPLSW CIM overflow", -1, 1 },
4732		{ TP_FRAMING_ERROR_F, "CPLSW TP framing error", -1, 1 },
4733		{ SGE_FRAMING_ERROR_F, "CPLSW SGE framing error", -1, 1 },
4734		{ CIM_FRAMING_ERROR_F, "CPLSW CIM framing error", -1, 1 },
4735		{ ZERO_SWITCH_ERROR_F, "CPLSW no-switch error", -1, 1 },
4736		{ 0 }
4737	};
4738
4739	if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE_A, cplsw_intr_info))
4740		t4_fatal_err(adapter);
4741}
4742
4743/*
4744 * LE interrupt handler.
4745 */
4746static void le_intr_handler(struct adapter *adap)
4747{
4748	enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
4749	static const struct intr_info le_intr_info[] = {
4750		{ LIPMISS_F, "LE LIP miss", -1, 0 },
4751		{ LIP0_F, "LE 0 LIP error", -1, 0 },
4752		{ PARITYERR_F, "LE parity error", -1, 1 },
4753		{ UNKNOWNCMD_F, "LE unknown command", -1, 1 },
4754		{ REQQPARERR_F, "LE request queue parity error", -1, 1 },
4755		{ 0 }
4756	};
4757
4758	static struct intr_info t6_le_intr_info[] = {
4759		{ T6_LIPMISS_F, "LE LIP miss", -1, 0 },
4760		{ T6_LIP0_F, "LE 0 LIP error", -1, 0 },
4761		{ CMDTIDERR_F, "LE cmd tid error", -1, 1 },
4762		{ TCAMINTPERR_F, "LE parity error", -1, 1 },
4763		{ T6_UNKNOWNCMD_F, "LE unknown command", -1, 1 },
4764		{ SSRAMINTPERR_F, "LE request queue parity error", -1, 1 },
4765		{ HASHTBLMEMCRCERR_F, "LE hash table mem crc error", -1, 0 },
4766		{ 0 }
4767	};
4768
4769	if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A,
4770				  (chip <= CHELSIO_T5) ?
4771				  le_intr_info : t6_le_intr_info))
4772		t4_fatal_err(adap);
4773}
4774
4775/*
4776 * MPS interrupt handler.
4777 */
4778static void mps_intr_handler(struct adapter *adapter)
4779{
4780	static const struct intr_info mps_rx_intr_info[] = {
4781		{ 0xffffff, "MPS Rx parity error", -1, 1 },
4782		{ 0 }
4783	};
4784	static const struct intr_info mps_tx_intr_info[] = {
4785		{ TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
4786		{ NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
4787		{ TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
4788		  -1, 1 },
4789		{ TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
4790		  -1, 1 },
4791		{ BUBBLE_F, "MPS Tx underflow", -1, 1 },
4792		{ SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
4793		{ FRMERR_F, "MPS Tx framing error", -1, 1 },
4794		{ 0 }
4795	};
4796	static const struct intr_info t6_mps_tx_intr_info[] = {
4797		{ TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
4798		{ NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
4799		{ TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
4800		  -1, 1 },
4801		{ TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
4802		  -1, 1 },
4803		/* MPS Tx Bubble is normal for T6 */
4804		{ SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
4805		{ FRMERR_F, "MPS Tx framing error", -1, 1 },
4806		{ 0 }
4807	};
4808	static const struct intr_info mps_trc_intr_info[] = {
4809		{ FILTMEM_V(FILTMEM_M), "MPS TRC filter parity error", -1, 1 },
4810		{ PKTFIFO_V(PKTFIFO_M), "MPS TRC packet FIFO parity error",
4811		  -1, 1 },
4812		{ MISCPERR_F, "MPS TRC misc parity error", -1, 1 },
4813		{ 0 }
4814	};
4815	static const struct intr_info mps_stat_sram_intr_info[] = {
4816		{ 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
4817		{ 0 }
4818	};
4819	static const struct intr_info mps_stat_tx_intr_info[] = {
4820		{ 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
4821		{ 0 }
4822	};
4823	static const struct intr_info mps_stat_rx_intr_info[] = {
4824		{ 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
4825		{ 0 }
4826	};
4827	static const struct intr_info mps_cls_intr_info[] = {
4828		{ MATCHSRAM_F, "MPS match SRAM parity error", -1, 1 },
4829		{ MATCHTCAM_F, "MPS match TCAM parity error", -1, 1 },
4830		{ HASHSRAM_F, "MPS hash SRAM parity error", -1, 1 },
4831		{ 0 }
4832	};
4833
4834	int fat;
4835
4836	fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE_A,
4837				    mps_rx_intr_info) +
4838	      t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE_A,
4839				    is_t6(adapter->params.chip)
4840				    ? t6_mps_tx_intr_info
4841				    : mps_tx_intr_info) +
4842	      t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE_A,
4843				    mps_trc_intr_info) +
4844	      t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM_A,
4845				    mps_stat_sram_intr_info) +
4846	      t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A,
4847				    mps_stat_tx_intr_info) +
4848	      t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A,
4849				    mps_stat_rx_intr_info) +
4850	      t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE_A,
4851				    mps_cls_intr_info);
4852
4853	t4_write_reg(adapter, MPS_INT_CAUSE_A, 0);
4854	t4_read_reg(adapter, MPS_INT_CAUSE_A);                    /* flush */
4855	if (fat)
4856		t4_fatal_err(adapter);
4857}
4858
4859#define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \
4860		      ECC_UE_INT_CAUSE_F)
4861
4862/*
4863 * EDC/MC interrupt handler.
4864 */
4865static void mem_intr_handler(struct adapter *adapter, int idx)
4866{
4867	static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
4868
4869	unsigned int addr, cnt_addr, v;
4870
4871	if (idx <= MEM_EDC1) {
4872		addr = EDC_REG(EDC_INT_CAUSE_A, idx);
4873		cnt_addr = EDC_REG(EDC_ECC_STATUS_A, idx);
4874	} else if (idx == MEM_MC) {
4875		if (is_t4(adapter->params.chip)) {
4876			addr = MC_INT_CAUSE_A;
4877			cnt_addr = MC_ECC_STATUS_A;
4878		} else {
4879			addr = MC_P_INT_CAUSE_A;
4880			cnt_addr = MC_P_ECC_STATUS_A;
4881		}
4882	} else {
4883		addr = MC_REG(MC_P_INT_CAUSE_A, 1);
4884		cnt_addr = MC_REG(MC_P_ECC_STATUS_A, 1);
4885	}
4886
4887	v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
4888	if (v & PERR_INT_CAUSE_F)
4889		dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
4890			  name[idx]);
4891	if (v & ECC_CE_INT_CAUSE_F) {
4892		u32 cnt = ECC_CECNT_G(t4_read_reg(adapter, cnt_addr));
4893
4894		t4_edc_err_read(adapter, idx);
4895
4896		t4_write_reg(adapter, cnt_addr, ECC_CECNT_V(ECC_CECNT_M));
4897		if (printk_ratelimit())
4898			dev_warn(adapter->pdev_dev,
4899				 "%u %s correctable ECC data error%s\n",
4900				 cnt, name[idx], cnt > 1 ? "s" : "");
4901	}
4902	if (v & ECC_UE_INT_CAUSE_F)
4903		dev_alert(adapter->pdev_dev,
4904			  "%s uncorrectable ECC data error\n", name[idx]);
4905
4906	t4_write_reg(adapter, addr, v);
4907	if (v & (PERR_INT_CAUSE_F | ECC_UE_INT_CAUSE_F))
4908		t4_fatal_err(adapter);
4909}
4910
4911/*
4912 * MA interrupt handler.
4913 */
4914static void ma_intr_handler(struct adapter *adap)
4915{
4916	u32 v, status = t4_read_reg(adap, MA_INT_CAUSE_A);
4917
4918	if (status & MEM_PERR_INT_CAUSE_F) {
4919		dev_alert(adap->pdev_dev,
4920			  "MA parity error, parity status %#x\n",
4921			  t4_read_reg(adap, MA_PARITY_ERROR_STATUS1_A));
4922		if (is_t5(adap->params.chip))
4923			dev_alert(adap->pdev_dev,
4924				  "MA parity error, parity status %#x\n",
4925				  t4_read_reg(adap,
4926					      MA_PARITY_ERROR_STATUS2_A));
4927	}
4928	if (status & MEM_WRAP_INT_CAUSE_F) {
4929		v = t4_read_reg(adap, MA_INT_WRAP_STATUS_A);
4930		dev_alert(adap->pdev_dev, "MA address wrap-around error by "
4931			  "client %u to address %#x\n",
4932			  MEM_WRAP_CLIENT_NUM_G(v),
4933			  MEM_WRAP_ADDRESS_G(v) << 4);
4934	}
4935	t4_write_reg(adap, MA_INT_CAUSE_A, status);
4936	t4_fatal_err(adap);
4937}
4938
4939/*
4940 * SMB interrupt handler.
4941 */
4942static void smb_intr_handler(struct adapter *adap)
4943{
4944	static const struct intr_info smb_intr_info[] = {
4945		{ MSTTXFIFOPARINT_F, "SMB master Tx FIFO parity error", -1, 1 },
4946		{ MSTRXFIFOPARINT_F, "SMB master Rx FIFO parity error", -1, 1 },
4947		{ SLVFIFOPARINT_F, "SMB slave FIFO parity error", -1, 1 },
4948		{ 0 }
4949	};
4950
4951	if (t4_handle_intr_status(adap, SMB_INT_CAUSE_A, smb_intr_info))
4952		t4_fatal_err(adap);
4953}
4954
4955/*
4956 * NC-SI interrupt handler.
4957 */
4958static void ncsi_intr_handler(struct adapter *adap)
4959{
4960	static const struct intr_info ncsi_intr_info[] = {
4961		{ CIM_DM_PRTY_ERR_F, "NC-SI CIM parity error", -1, 1 },
4962		{ MPS_DM_PRTY_ERR_F, "NC-SI MPS parity error", -1, 1 },
4963		{ TXFIFO_PRTY_ERR_F, "NC-SI Tx FIFO parity error", -1, 1 },
4964		{ RXFIFO_PRTY_ERR_F, "NC-SI Rx FIFO parity error", -1, 1 },
4965		{ 0 }
4966	};
4967
4968	if (t4_handle_intr_status(adap, NCSI_INT_CAUSE_A, ncsi_intr_info))
4969		t4_fatal_err(adap);
4970}
4971
4972/*
4973 * XGMAC interrupt handler.
4974 */
4975static void xgmac_intr_handler(struct adapter *adap, int port)
4976{
4977	u32 v, int_cause_reg;
4978
4979	if (is_t4(adap->params.chip))
4980		int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE_A);
4981	else
4982		int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A);
4983
4984	v = t4_read_reg(adap, int_cause_reg);
4985
4986	v &= TXFIFO_PRTY_ERR_F | RXFIFO_PRTY_ERR_F;
4987	if (!v)
4988		return;
4989
4990	if (v & TXFIFO_PRTY_ERR_F)
4991		dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
4992			  port);
4993	if (v & RXFIFO_PRTY_ERR_F)
4994		dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
4995			  port);
4996	t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE_A), v);
4997	t4_fatal_err(adap);
4998}
4999
5000/*
5001 * PL interrupt handler.
5002 */
5003static void pl_intr_handler(struct adapter *adap)
5004{
5005	static const struct intr_info pl_intr_info[] = {
5006		{ FATALPERR_F, "T4 fatal parity error", -1, 1 },
5007		{ PERRVFID_F, "PL VFID_MAP parity error", -1, 1 },
5008		{ 0 }
5009	};
5010
5011	if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE_A, pl_intr_info))
5012		t4_fatal_err(adap);
5013}
5014
5015#define PF_INTR_MASK (PFSW_F)
5016#define GLBL_INTR_MASK (CIM_F | MPS_F | PL_F | PCIE_F | MC_F | EDC0_F | \
5017		EDC1_F | LE_F | TP_F | MA_F | PM_TX_F | PM_RX_F | ULP_RX_F | \
5018		CPL_SWITCH_F | SGE_F | ULP_TX_F | SF_F)
5019
5020/**
5021 *	t4_slow_intr_handler - control path interrupt handler
5022 *	@adapter: the adapter
5023 *
5024 *	T4 interrupt handler for non-data global interrupt events, e.g., errors.
5025 *	The designation 'slow' is because it involves register reads, while
5026 *	data interrupts typically don't involve any MMIOs.
5027 */
5028int t4_slow_intr_handler(struct adapter *adapter)
5029{
5030	/* There are rare cases where a PL_INT_CAUSE bit may end up getting
5031	 * set when the corresponding PL_INT_ENABLE bit isn't set.  It's
5032	 * easiest just to mask that case here.
5033	 */
5034	u32 raw_cause = t4_read_reg(adapter, PL_INT_CAUSE_A);
5035	u32 enable = t4_read_reg(adapter, PL_INT_ENABLE_A);
5036	u32 cause = raw_cause & enable;
5037
5038	if (!(cause & GLBL_INTR_MASK))
5039		return 0;
5040	if (cause & CIM_F)
5041		cim_intr_handler(adapter);
5042	if (cause & MPS_F)
5043		mps_intr_handler(adapter);
5044	if (cause & NCSI_F)
5045		ncsi_intr_handler(adapter);
5046	if (cause & PL_F)
5047		pl_intr_handler(adapter);
5048	if (cause & SMB_F)
5049		smb_intr_handler(adapter);
5050	if (cause & XGMAC0_F)
5051		xgmac_intr_handler(adapter, 0);
5052	if (cause & XGMAC1_F)
5053		xgmac_intr_handler(adapter, 1);
5054	if (cause & XGMAC_KR0_F)
5055		xgmac_intr_handler(adapter, 2);
5056	if (cause & XGMAC_KR1_F)
5057		xgmac_intr_handler(adapter, 3);
5058	if (cause & PCIE_F)
5059		pcie_intr_handler(adapter);
5060	if (cause & MC_F)
5061		mem_intr_handler(adapter, MEM_MC);
5062	if (is_t5(adapter->params.chip) && (cause & MC1_F))
5063		mem_intr_handler(adapter, MEM_MC1);
5064	if (cause & EDC0_F)
5065		mem_intr_handler(adapter, MEM_EDC0);
5066	if (cause & EDC1_F)
5067		mem_intr_handler(adapter, MEM_EDC1);
5068	if (cause & LE_F)
5069		le_intr_handler(adapter);
5070	if (cause & TP_F)
5071		tp_intr_handler(adapter);
5072	if (cause & MA_F)
5073		ma_intr_handler(adapter);
5074	if (cause & PM_TX_F)
5075		pmtx_intr_handler(adapter);
5076	if (cause & PM_RX_F)
5077		pmrx_intr_handler(adapter);
5078	if (cause & ULP_RX_F)
5079		ulprx_intr_handler(adapter);
5080	if (cause & CPL_SWITCH_F)
5081		cplsw_intr_handler(adapter);
5082	if (cause & SGE_F)
5083		sge_intr_handler(adapter);
5084	if (cause & ULP_TX_F)
5085		ulptx_intr_handler(adapter);
5086
5087	/* Clear the interrupts just processed for which we are the master. */
5088	t4_write_reg(adapter, PL_INT_CAUSE_A, raw_cause & GLBL_INTR_MASK);
5089	(void)t4_read_reg(adapter, PL_INT_CAUSE_A); /* flush */
5090	return 1;
5091}
5092
5093/**
5094 *	t4_intr_enable - enable interrupts
5095 *	@adapter: the adapter whose interrupts should be enabled
5096 *
5097 *	Enable PF-specific interrupts for the calling function and the top-level
5098 *	interrupt concentrator for global interrupts.  Interrupts are already
5099 *	enabled at each module,	here we just enable the roots of the interrupt
5100 *	hierarchies.
5101 *
5102 *	Note: this function should be called only when the driver manages
5103 *	non PF-specific interrupts from the various HW modules.  Only one PCI
5104 *	function at a time should be doing this.
5105 */
5106void t4_intr_enable(struct adapter *adapter)
5107{
5108	u32 val = 0;
5109	u32 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
5110	u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
5111			SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
5112
5113	if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
5114		val = ERR_DROPPED_DB_F | ERR_EGR_CTXT_PRIO_F | DBFIFO_HP_INT_F;
5115	t4_write_reg(adapter, SGE_INT_ENABLE3_A, ERR_CPL_EXCEED_IQE_SIZE_F |
5116		     ERR_INVALID_CIDX_INC_F | ERR_CPL_OPCODE_0_F |
5117		     ERR_DATA_CPL_ON_HIGH_QID1_F | INGRESS_SIZE_ERR_F |
5118		     ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
5119		     ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
5120		     ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
5121		     DBFIFO_LP_INT_F | EGRESS_SIZE_ERR_F | val);
5122	t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), PF_INTR_MASK);
5123	t4_set_reg_field(adapter, PL_INT_MAP0_A, 0, 1 << pf);
5124}
5125
5126/**
5127 *	t4_intr_disable - disable interrupts
5128 *	@adapter: the adapter whose interrupts should be disabled
5129 *
5130 *	Disable interrupts.  We only disable the top-level interrupt
5131 *	concentrators.  The caller must be a PCI function managing global
5132 *	interrupts.
5133 */
5134void t4_intr_disable(struct adapter *adapter)
5135{
5136	u32 whoami, pf;
5137
5138	if (pci_channel_offline(adapter->pdev))
5139		return;
5140
5141	whoami = t4_read_reg(adapter, PL_WHOAMI_A);
5142	pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
5143			SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
5144
5145	t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0);
5146	t4_set_reg_field(adapter, PL_INT_MAP0_A, 1 << pf, 0);
5147}
5148
5149unsigned int t4_chip_rss_size(struct adapter *adap)
5150{
5151	if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
5152		return RSS_NENTRIES;
5153	else
5154		return T6_RSS_NENTRIES;
5155}
5156
5157/**
5158 *	t4_config_rss_range - configure a portion of the RSS mapping table
5159 *	@adapter: the adapter
5160 *	@mbox: mbox to use for the FW command
5161 *	@viid: virtual interface whose RSS subtable is to be written
5162 *	@start: start entry in the table to write
5163 *	@n: how many table entries to write
5164 *	@rspq: values for the response queue lookup table
5165 *	@nrspq: number of values in @rspq
5166 *
5167 *	Programs the selected part of the VI's RSS mapping table with the
5168 *	provided values.  If @nrspq < @n the supplied values are used repeatedly
5169 *	until the full table range is populated.
5170 *
5171 *	The caller must ensure the values in @rspq are in the range allowed for
5172 *	@viid.
5173 */
5174int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
5175			int start, int n, const u16 *rspq, unsigned int nrspq)
5176{
5177	int ret;
5178	const u16 *rsp = rspq;
5179	const u16 *rsp_end = rspq + nrspq;
5180	struct fw_rss_ind_tbl_cmd cmd;
5181
5182	memset(&cmd, 0, sizeof(cmd));
5183	cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) |
5184			       FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
5185			       FW_RSS_IND_TBL_CMD_VIID_V(viid));
5186	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
5187
5188	/* each fw_rss_ind_tbl_cmd takes up to 32 entries */
5189	while (n > 0) {
5190		int nq = min(n, 32);
5191		__be32 *qp = &cmd.iq0_to_iq2;
5192
5193		cmd.niqid = cpu_to_be16(nq);
5194		cmd.startidx = cpu_to_be16(start);
5195
5196		start += nq;
5197		n -= nq;
5198
5199		while (nq > 0) {
5200			unsigned int v;
5201
5202			v = FW_RSS_IND_TBL_CMD_IQ0_V(*rsp);
5203			if (++rsp >= rsp_end)
5204				rsp = rspq;
5205			v |= FW_RSS_IND_TBL_CMD_IQ1_V(*rsp);
5206			if (++rsp >= rsp_end)
5207				rsp = rspq;
5208			v |= FW_RSS_IND_TBL_CMD_IQ2_V(*rsp);
5209			if (++rsp >= rsp_end)
5210				rsp = rspq;
5211
5212			*qp++ = cpu_to_be32(v);
5213			nq -= 3;
5214		}
5215
5216		ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
5217		if (ret)
5218			return ret;
5219	}
5220	return 0;
5221}
5222
5223/**
5224 *	t4_config_glbl_rss - configure the global RSS mode
5225 *	@adapter: the adapter
5226 *	@mbox: mbox to use for the FW command
5227 *	@mode: global RSS mode
5228 *	@flags: mode-specific flags
5229 *
5230 *	Sets the global RSS mode.
5231 */
5232int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
5233		       unsigned int flags)
5234{
5235	struct fw_rss_glb_config_cmd c;
5236
5237	memset(&c, 0, sizeof(c));
5238	c.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) |
5239				    FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
5240	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5241	if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
5242		c.u.manual.mode_pkd =
5243			cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
5244	} else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
5245		c.u.basicvirtual.mode_pkd =
5246			cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
5247		c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
5248	} else
5249		return -EINVAL;
5250	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
5251}
5252
5253/**
5254 *	t4_config_vi_rss - configure per VI RSS settings
5255 *	@adapter: the adapter
5256 *	@mbox: mbox to use for the FW command
5257 *	@viid: the VI id
5258 *	@flags: RSS flags
5259 *	@defq: id of the default RSS queue for the VI.
5260 *
5261 *	Configures VI-specific RSS properties.
5262 */
5263int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
5264		     unsigned int flags, unsigned int defq)
5265{
5266	struct fw_rss_vi_config_cmd c;
5267
5268	memset(&c, 0, sizeof(c));
5269	c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
5270				   FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
5271				   FW_RSS_VI_CONFIG_CMD_VIID_V(viid));
5272	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5273	c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
5274					FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V(defq));
5275	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
5276}
5277
5278/* Read an RSS table row */
5279static int rd_rss_row(struct adapter *adap, int row, u32 *val)
5280{
5281	t4_write_reg(adap, TP_RSS_LKP_TABLE_A, 0xfff00000 | row);
5282	return t4_wait_op_done_val(adap, TP_RSS_LKP_TABLE_A, LKPTBLROWVLD_F, 1,
5283				   5, 0, val);
5284}
5285
5286/**
5287 *	t4_read_rss - read the contents of the RSS mapping table
5288 *	@adapter: the adapter
5289 *	@map: holds the contents of the RSS mapping table
5290 *
5291 *	Reads the contents of the RSS hash->queue mapping table.
5292 */
5293int t4_read_rss(struct adapter *adapter, u16 *map)
5294{
5295	int i, ret, nentries;
5296	u32 val;
5297
5298	nentries = t4_chip_rss_size(adapter);
5299	for (i = 0; i < nentries / 2; ++i) {
5300		ret = rd_rss_row(adapter, i, &val);
5301		if (ret)
5302			return ret;
5303		*map++ = LKPTBLQUEUE0_G(val);
5304		*map++ = LKPTBLQUEUE1_G(val);
5305	}
5306	return 0;
5307}
5308
5309static unsigned int t4_use_ldst(struct adapter *adap)
5310{
5311	return (adap->flags & CXGB4_FW_OK) && !adap->use_bd;
5312}
5313
5314/**
5315 * t4_tp_fw_ldst_rw - Access TP indirect register through LDST
5316 * @adap: the adapter
5317 * @cmd: TP fw ldst address space type
5318 * @vals: where the indirect register values are stored/written
5319 * @nregs: how many indirect registers to read/write
5320 * @start_index: index of first indirect register to read/write
5321 * @rw: Read (1) or Write (0)
5322 * @sleep_ok: if true we may sleep while awaiting command completion
5323 *
5324 * Access TP indirect registers through LDST
5325 */
5326static int t4_tp_fw_ldst_rw(struct adapter *adap, int cmd, u32 *vals,
5327			    unsigned int nregs, unsigned int start_index,
5328			    unsigned int rw, bool sleep_ok)
5329{
5330	int ret = 0;
5331	unsigned int i;
5332	struct fw_ldst_cmd c;
5333
5334	for (i = 0; i < nregs; i++) {
5335		memset(&c, 0, sizeof(c));
5336		c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
5337						FW_CMD_REQUEST_F |
5338						(rw ? FW_CMD_READ_F :
5339						      FW_CMD_WRITE_F) |
5340						FW_LDST_CMD_ADDRSPACE_V(cmd));
5341		c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
5342
5343		c.u.addrval.addr = cpu_to_be32(start_index + i);
5344		c.u.addrval.val  = rw ? 0 : cpu_to_be32(vals[i]);
5345		ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c,
5346				      sleep_ok);
5347		if (ret)
5348			return ret;
5349
5350		if (rw)
5351			vals[i] = be32_to_cpu(c.u.addrval.val);
5352	}
5353	return 0;
5354}
5355
5356/**
5357 * t4_tp_indirect_rw - Read/Write TP indirect register through LDST or backdoor
5358 * @adap: the adapter
5359 * @reg_addr: Address Register
5360 * @reg_data: Data register
5361 * @buff: where the indirect register values are stored/written
5362 * @nregs: how many indirect registers to read/write
5363 * @start_index: index of first indirect register to read/write
5364 * @rw: READ(1) or WRITE(0)
5365 * @sleep_ok: if true we may sleep while awaiting command completion
5366 *
5367 * Read/Write TP indirect registers through LDST if possible.
5368 * Else, use backdoor access
5369 **/
5370static void t4_tp_indirect_rw(struct adapter *adap, u32 reg_addr, u32 reg_data,
5371			      u32 *buff, u32 nregs, u32 start_index, int rw,
5372			      bool sleep_ok)
5373{
5374	int rc = -EINVAL;
5375	int cmd;
5376
5377	switch (reg_addr) {
5378	case TP_PIO_ADDR_A:
5379		cmd = FW_LDST_ADDRSPC_TP_PIO;
5380		break;
5381	case TP_TM_PIO_ADDR_A:
5382		cmd = FW_LDST_ADDRSPC_TP_TM_PIO;
5383		break;
5384	case TP_MIB_INDEX_A:
5385		cmd = FW_LDST_ADDRSPC_TP_MIB;
5386		break;
5387	default:
5388		goto indirect_access;
5389	}
5390
5391	if (t4_use_ldst(adap))
5392		rc = t4_tp_fw_ldst_rw(adap, cmd, buff, nregs, start_index, rw,
5393				      sleep_ok);
5394
5395indirect_access:
5396
5397	if (rc) {
5398		if (rw)
5399			t4_read_indirect(adap, reg_addr, reg_data, buff, nregs,
5400					 start_index);
5401		else
5402			t4_write_indirect(adap, reg_addr, reg_data, buff, nregs,
5403					  start_index);
5404	}
5405}
5406
5407/**
5408 * t4_tp_pio_read - Read TP PIO registers
5409 * @adap: the adapter
5410 * @buff: where the indirect register values are written
5411 * @nregs: how many indirect registers to read
5412 * @start_index: index of first indirect register to read
5413 * @sleep_ok: if true we may sleep while awaiting command completion
5414 *
5415 * Read TP PIO Registers
5416 **/
5417void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
5418		    u32 start_index, bool sleep_ok)
5419{
5420	t4_tp_indirect_rw(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, buff, nregs,
5421			  start_index, 1, sleep_ok);
5422}
5423
5424/**
5425 * t4_tp_pio_write - Write TP PIO registers
5426 * @adap: the adapter
5427 * @buff: where the indirect register values are stored
5428 * @nregs: how many indirect registers to write
5429 * @start_index: index of first indirect register to write
5430 * @sleep_ok: if true we may sleep while awaiting command completion
5431 *
5432 * Write TP PIO Registers
5433 **/
5434static void t4_tp_pio_write(struct adapter *adap, u32 *buff, u32 nregs,
5435			    u32 start_index, bool sleep_ok)
5436{
5437	t4_tp_indirect_rw(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, buff, nregs,
5438			  start_index, 0, sleep_ok);
5439}
5440
5441/**
5442 * t4_tp_tm_pio_read - Read TP TM PIO registers
5443 * @adap: the adapter
5444 * @buff: where the indirect register values are written
5445 * @nregs: how many indirect registers to read
5446 * @start_index: index of first indirect register to read
5447 * @sleep_ok: if true we may sleep while awaiting command completion
5448 *
5449 * Read TP TM PIO Registers
5450 **/
5451void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
5452		       u32 start_index, bool sleep_ok)
5453{
5454	t4_tp_indirect_rw(adap, TP_TM_PIO_ADDR_A, TP_TM_PIO_DATA_A, buff,
5455			  nregs, start_index, 1, sleep_ok);
5456}
5457
5458/**
5459 * t4_tp_mib_read - Read TP MIB registers
5460 * @adap: the adapter
5461 * @buff: where the indirect register values are written
5462 * @nregs: how many indirect registers to read
5463 * @start_index: index of first indirect register to read
5464 * @sleep_ok: if true we may sleep while awaiting command completion
5465 *
5466 * Read TP MIB Registers
5467 **/
5468void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index,
5469		    bool sleep_ok)
5470{
5471	t4_tp_indirect_rw(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, buff, nregs,
5472			  start_index, 1, sleep_ok);
5473}
5474
5475/**
5476 *	t4_read_rss_key - read the global RSS key
5477 *	@adap: the adapter
5478 *	@key: 10-entry array holding the 320-bit RSS key
5479 *      @sleep_ok: if true we may sleep while awaiting command completion
5480 *
5481 *	Reads the global 320-bit RSS key.
5482 */
5483void t4_read_rss_key(struct adapter *adap, u32 *key, bool sleep_ok)
5484{
5485	t4_tp_pio_read(adap, key, 10, TP_RSS_SECRET_KEY0_A, sleep_ok);
5486}
5487
5488/**
5489 *	t4_write_rss_key - program one of the RSS keys
5490 *	@adap: the adapter
5491 *	@key: 10-entry array holding the 320-bit RSS key
5492 *	@idx: which RSS key to write
5493 *      @sleep_ok: if true we may sleep while awaiting command completion
5494 *
5495 *	Writes one of the RSS keys with the given 320-bit value.  If @idx is
5496 *	0..15 the corresponding entry in the RSS key table is written,
5497 *	otherwise the global RSS key is written.
5498 */
5499void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx,
5500		      bool sleep_ok)
5501{
5502	u8 rss_key_addr_cnt = 16;
5503	u32 vrt = t4_read_reg(adap, TP_RSS_CONFIG_VRT_A);
5504
5505	/* T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
5506	 * allows access to key addresses 16-63 by using KeyWrAddrX
5507	 * as index[5:4](upper 2) into key table
5508	 */
5509	if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) &&
5510	    (vrt & KEYEXTEND_F) && (KEYMODE_G(vrt) == 3))
5511		rss_key_addr_cnt = 32;
5512
5513	t4_tp_pio_write(adap, (void *)key, 10, TP_RSS_SECRET_KEY0_A, sleep_ok);
5514
5515	if (idx >= 0 && idx < rss_key_addr_cnt) {
5516		if (rss_key_addr_cnt > 16)
5517			t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
5518				     KEYWRADDRX_V(idx >> 4) |
5519				     T6_VFWRADDR_V(idx) | KEYWREN_F);
5520		else
5521			t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
5522				     KEYWRADDR_V(idx) | KEYWREN_F);
5523	}
5524}
5525
5526/**
5527 *	t4_read_rss_pf_config - read PF RSS Configuration Table
5528 *	@adapter: the adapter
5529 *	@index: the entry in the PF RSS table to read
5530 *	@valp: where to store the returned value
5531 *      @sleep_ok: if true we may sleep while awaiting command completion
5532 *
5533 *	Reads the PF RSS Configuration Table at the specified index and returns
5534 *	the value found there.
5535 */
5536void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
5537			   u32 *valp, bool sleep_ok)
5538{
5539	t4_tp_pio_read(adapter, valp, 1, TP_RSS_PF0_CONFIG_A + index, sleep_ok);
5540}
5541
5542/**
5543 *	t4_read_rss_vf_config - read VF RSS Configuration Table
5544 *	@adapter: the adapter
5545 *	@index: the entry in the VF RSS table to read
5546 *	@vfl: where to store the returned VFL
5547 *	@vfh: where to store the returned VFH
5548 *      @sleep_ok: if true we may sleep while awaiting command completion
5549 *
5550 *	Reads the VF RSS Configuration Table at the specified index and returns
5551 *	the (VFL, VFH) values found there.
5552 */
5553void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
5554			   u32 *vfl, u32 *vfh, bool sleep_ok)
5555{
5556	u32 vrt, mask, data;
5557
5558	if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) {
5559		mask = VFWRADDR_V(VFWRADDR_M);
5560		data = VFWRADDR_V(index);
5561	} else {
5562		 mask =  T6_VFWRADDR_V(T6_VFWRADDR_M);
5563		 data = T6_VFWRADDR_V(index);
5564	}
5565
5566	/* Request that the index'th VF Table values be read into VFL/VFH.
5567	 */
5568	vrt = t4_read_reg(adapter, TP_RSS_CONFIG_VRT_A);
5569	vrt &= ~(VFRDRG_F | VFWREN_F | KEYWREN_F | mask);
5570	vrt |= data | VFRDEN_F;
5571	t4_write_reg(adapter, TP_RSS_CONFIG_VRT_A, vrt);
5572
5573	/* Grab the VFL/VFH values ...
5574	 */
5575	t4_tp_pio_read(adapter, vfl, 1, TP_RSS_VFL_CONFIG_A, sleep_ok);
5576	t4_tp_pio_read(adapter, vfh, 1, TP_RSS_VFH_CONFIG_A, sleep_ok);
5577}
5578
5579/**
5580 *	t4_read_rss_pf_map - read PF RSS Map
5581 *	@adapter: the adapter
5582 *      @sleep_ok: if true we may sleep while awaiting command completion
5583 *
5584 *	Reads the PF RSS Map register and returns its value.
5585 */
5586u32 t4_read_rss_pf_map(struct adapter *adapter, bool sleep_ok)
5587{
5588	u32 pfmap;
5589
5590	t4_tp_pio_read(adapter, &pfmap, 1, TP_RSS_PF_MAP_A, sleep_ok);
5591	return pfmap;
5592}
5593
5594/**
5595 *	t4_read_rss_pf_mask - read PF RSS Mask
5596 *	@adapter: the adapter
5597 *      @sleep_ok: if true we may sleep while awaiting command completion
5598 *
5599 *	Reads the PF RSS Mask register and returns its value.
5600 */
5601u32 t4_read_rss_pf_mask(struct adapter *adapter, bool sleep_ok)
5602{
5603	u32 pfmask;
5604
5605	t4_tp_pio_read(adapter, &pfmask, 1, TP_RSS_PF_MSK_A, sleep_ok);
5606	return pfmask;
5607}
5608
5609/**
5610 *	t4_tp_get_tcp_stats - read TP's TCP MIB counters
5611 *	@adap: the adapter
5612 *	@v4: holds the TCP/IP counter values
5613 *	@v6: holds the TCP/IPv6 counter values
5614 *      @sleep_ok: if true we may sleep while awaiting command completion
5615 *
5616 *	Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
5617 *	Either @v4 or @v6 may be %NULL to skip the corresponding stats.
5618 */
5619void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
5620			 struct tp_tcp_stats *v6, bool sleep_ok)
5621{
5622	u32 val[TP_MIB_TCP_RXT_SEG_LO_A - TP_MIB_TCP_OUT_RST_A + 1];
5623
5624#define STAT_IDX(x) ((TP_MIB_TCP_##x##_A) - TP_MIB_TCP_OUT_RST_A)
5625#define STAT(x)     val[STAT_IDX(x)]
5626#define STAT64(x)   (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
5627
5628	if (v4) {
5629		t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
5630			       TP_MIB_TCP_OUT_RST_A, sleep_ok);
5631		v4->tcp_out_rsts = STAT(OUT_RST);
5632		v4->tcp_in_segs  = STAT64(IN_SEG);
5633		v4->tcp_out_segs = STAT64(OUT_SEG);
5634		v4->tcp_retrans_segs = STAT64(RXT_SEG);
5635	}
5636	if (v6) {
5637		t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
5638			       TP_MIB_TCP_V6OUT_RST_A, sleep_ok);
5639		v6->tcp_out_rsts = STAT(OUT_RST);
5640		v6->tcp_in_segs  = STAT64(IN_SEG);
5641		v6->tcp_out_segs = STAT64(OUT_SEG);
5642		v6->tcp_retrans_segs = STAT64(RXT_SEG);
5643	}
5644#undef STAT64
5645#undef STAT
5646#undef STAT_IDX
5647}
5648
5649/**
5650 *	t4_tp_get_err_stats - read TP's error MIB counters
5651 *	@adap: the adapter
5652 *	@st: holds the counter values
5653 *      @sleep_ok: if true we may sleep while awaiting command completion
5654 *
5655 *	Returns the values of TP's error counters.
5656 */
5657void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st,
5658			 bool sleep_ok)
5659{
5660	int nchan = adap->params.arch.nchan;
5661
5662	t4_tp_mib_read(adap, st->mac_in_errs, nchan, TP_MIB_MAC_IN_ERR_0_A,
5663		       sleep_ok);
5664	t4_tp_mib_read(adap, st->hdr_in_errs, nchan, TP_MIB_HDR_IN_ERR_0_A,
5665		       sleep_ok);
5666	t4_tp_mib_read(adap, st->tcp_in_errs, nchan, TP_MIB_TCP_IN_ERR_0_A,
5667		       sleep_ok);
5668	t4_tp_mib_read(adap, st->tnl_cong_drops, nchan,
5669		       TP_MIB_TNL_CNG_DROP_0_A, sleep_ok);
5670	t4_tp_mib_read(adap, st->ofld_chan_drops, nchan,
5671		       TP_MIB_OFD_CHN_DROP_0_A, sleep_ok);
5672	t4_tp_mib_read(adap, st->tnl_tx_drops, nchan, TP_MIB_TNL_DROP_0_A,
5673		       sleep_ok);
5674	t4_tp_mib_read(adap, st->ofld_vlan_drops, nchan,
5675		       TP_MIB_OFD_VLN_DROP_0_A, sleep_ok);
5676	t4_tp_mib_read(adap, st->tcp6_in_errs, nchan,
5677		       TP_MIB_TCP_V6IN_ERR_0_A, sleep_ok);
5678	t4_tp_mib_read(adap, &st->ofld_no_neigh, 2, TP_MIB_OFD_ARP_DROP_A,
5679		       sleep_ok);
5680}
5681
5682/**
5683 *	t4_tp_get_cpl_stats - read TP's CPL MIB counters
5684 *	@adap: the adapter
5685 *	@st: holds the counter values
5686 *      @sleep_ok: if true we may sleep while awaiting command completion
5687 *
5688 *	Returns the values of TP's CPL counters.
5689 */
5690void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st,
5691			 bool sleep_ok)
5692{
5693	int nchan = adap->params.arch.nchan;
5694
5695	t4_tp_mib_read(adap, st->req, nchan, TP_MIB_CPL_IN_REQ_0_A, sleep_ok);
5696
5697	t4_tp_mib_read(adap, st->rsp, nchan, TP_MIB_CPL_OUT_RSP_0_A, sleep_ok);
5698}
5699
5700/**
5701 *	t4_tp_get_rdma_stats - read TP's RDMA MIB counters
5702 *	@adap: the adapter
5703 *	@st: holds the counter values
5704 *      @sleep_ok: if true we may sleep while awaiting command completion
5705 *
5706 *	Returns the values of TP's RDMA counters.
5707 */
5708void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st,
5709			  bool sleep_ok)
5710{
5711	t4_tp_mib_read(adap, &st->rqe_dfr_pkt, 2, TP_MIB_RQE_DFR_PKT_A,
5712		       sleep_ok);
5713}
5714
5715/**
5716 *	t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
5717 *	@adap: the adapter
5718 *	@idx: the port index
5719 *	@st: holds the counter values
5720 *      @sleep_ok: if true we may sleep while awaiting command completion
5721 *
5722 *	Returns the values of TP's FCoE counters for the selected port.
5723 */
5724void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
5725		       struct tp_fcoe_stats *st, bool sleep_ok)
5726{
5727	u32 val[2];
5728
5729	t4_tp_mib_read(adap, &st->frames_ddp, 1, TP_MIB_FCOE_DDP_0_A + idx,
5730		       sleep_ok);
5731
5732	t4_tp_mib_read(adap, &st->frames_drop, 1,
5733		       TP_MIB_FCOE_DROP_0_A + idx, sleep_ok);
5734
5735	t4_tp_mib_read(adap, val, 2, TP_MIB_FCOE_BYTE_0_HI_A + 2 * idx,
5736		       sleep_ok);
5737
5738	st->octets_ddp = ((u64)val[0] << 32) | val[1];
5739}
5740
5741/**
5742 *	t4_get_usm_stats - read TP's non-TCP DDP MIB counters
5743 *	@adap: the adapter
5744 *	@st: holds the counter values
5745 *      @sleep_ok: if true we may sleep while awaiting command completion
5746 *
5747 *	Returns the values of TP's counters for non-TCP directly-placed packets.
5748 */
5749void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st,
5750		      bool sleep_ok)
5751{
5752	u32 val[4];
5753
5754	t4_tp_mib_read(adap, val, 4, TP_MIB_USM_PKTS_A, sleep_ok);
5755	st->frames = val[0];
5756	st->drops = val[1];
5757	st->octets = ((u64)val[2] << 32) | val[3];
5758}
5759
5760/**
5761 *	t4_read_mtu_tbl - returns the values in the HW path MTU table
5762 *	@adap: the adapter
5763 *	@mtus: where to store the MTU values
5764 *	@mtu_log: where to store the MTU base-2 log (may be %NULL)
5765 *
5766 *	Reads the HW path MTU table.
5767 */
5768void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
5769{
5770	u32 v;
5771	int i;
5772
5773	for (i = 0; i < NMTUS; ++i) {
5774		t4_write_reg(adap, TP_MTU_TABLE_A,
5775			     MTUINDEX_V(0xff) | MTUVALUE_V(i));
5776		v = t4_read_reg(adap, TP_MTU_TABLE_A);
5777		mtus[i] = MTUVALUE_G(v);
5778		if (mtu_log)
5779			mtu_log[i] = MTUWIDTH_G(v);
5780	}
5781}
5782
5783/**
5784 *	t4_read_cong_tbl - reads the congestion control table
5785 *	@adap: the adapter
5786 *	@incr: where to store the alpha values
5787 *
5788 *	Reads the additive increments programmed into the HW congestion
5789 *	control table.
5790 */
5791void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
5792{
5793	unsigned int mtu, w;
5794
5795	for (mtu = 0; mtu < NMTUS; ++mtu)
5796		for (w = 0; w < NCCTRL_WIN; ++w) {
5797			t4_write_reg(adap, TP_CCTRL_TABLE_A,
5798				     ROWINDEX_V(0xffff) | (mtu << 5) | w);
5799			incr[mtu][w] = (u16)t4_read_reg(adap,
5800						TP_CCTRL_TABLE_A) & 0x1fff;
5801		}
5802}
5803
5804/**
5805 *	t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
5806 *	@adap: the adapter
5807 *	@addr: the indirect TP register address
5808 *	@mask: specifies the field within the register to modify
5809 *	@val: new value for the field
5810 *
5811 *	Sets a field of an indirect TP register to the given value.
5812 */
5813void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
5814			    unsigned int mask, unsigned int val)
5815{
5816	t4_write_reg(adap, TP_PIO_ADDR_A, addr);
5817	val |= t4_read_reg(adap, TP_PIO_DATA_A) & ~mask;
5818	t4_write_reg(adap, TP_PIO_DATA_A, val);
5819}
5820
5821/**
5822 *	init_cong_ctrl - initialize congestion control parameters
5823 *	@a: the alpha values for congestion control
5824 *	@b: the beta values for congestion control
5825 *
5826 *	Initialize the congestion control parameters.
5827 */
5828static void init_cong_ctrl(unsigned short *a, unsigned short *b)
5829{
5830	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
5831	a[9] = 2;
5832	a[10] = 3;
5833	a[11] = 4;
5834	a[12] = 5;
5835	a[13] = 6;
5836	a[14] = 7;
5837	a[15] = 8;
5838	a[16] = 9;
5839	a[17] = 10;
5840	a[18] = 14;
5841	a[19] = 17;
5842	a[20] = 21;
5843	a[21] = 25;
5844	a[22] = 30;
5845	a[23] = 35;
5846	a[24] = 45;
5847	a[25] = 60;
5848	a[26] = 80;
5849	a[27] = 100;
5850	a[28] = 200;
5851	a[29] = 300;
5852	a[30] = 400;
5853	a[31] = 500;
5854
5855	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
5856	b[9] = b[10] = 1;
5857	b[11] = b[12] = 2;
5858	b[13] = b[14] = b[15] = b[16] = 3;
5859	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
5860	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
5861	b[28] = b[29] = 6;
5862	b[30] = b[31] = 7;
5863}
5864
5865/* The minimum additive increment value for the congestion control table */
5866#define CC_MIN_INCR 2U
5867
5868/**
5869 *	t4_load_mtus - write the MTU and congestion control HW tables
5870 *	@adap: the adapter
5871 *	@mtus: the values for the MTU table
5872 *	@alpha: the values for the congestion control alpha parameter
5873 *	@beta: the values for the congestion control beta parameter
5874 *
5875 *	Write the HW MTU table with the supplied MTUs and the high-speed
5876 *	congestion control table with the supplied alpha, beta, and MTUs.
5877 *	We write the two tables together because the additive increments
5878 *	depend on the MTUs.
5879 */
5880void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
5881		  const unsigned short *alpha, const unsigned short *beta)
5882{
5883	static const unsigned int avg_pkts[NCCTRL_WIN] = {
5884		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
5885		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
5886		28672, 40960, 57344, 81920, 114688, 163840, 229376
5887	};
5888
5889	unsigned int i, w;
5890
5891	for (i = 0; i < NMTUS; ++i) {
5892		unsigned int mtu = mtus[i];
5893		unsigned int log2 = fls(mtu);
5894
5895		if (!(mtu & ((1 << log2) >> 2)))     /* round */
5896			log2--;
5897		t4_write_reg(adap, TP_MTU_TABLE_A, MTUINDEX_V(i) |
5898			     MTUWIDTH_V(log2) | MTUVALUE_V(mtu));
5899
5900		for (w = 0; w < NCCTRL_WIN; ++w) {
5901			unsigned int inc;
5902
5903			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
5904				  CC_MIN_INCR);
5905
5906			t4_write_reg(adap, TP_CCTRL_TABLE_A, (i << 21) |
5907				     (w << 16) | (beta[w] << 13) | inc);
5908		}
5909	}
5910}
5911
5912/* Calculates a rate in bytes/s given the number of 256-byte units per 4K core
5913 * clocks.  The formula is
5914 *
5915 * bytes/s = bytes256 * 256 * ClkFreq / 4096
5916 *
5917 * which is equivalent to
5918 *
5919 * bytes/s = 62.5 * bytes256 * ClkFreq_ms
5920 */
5921static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
5922{
5923	u64 v = bytes256 * adap->params.vpd.cclk;
5924
5925	return v * 62 + v / 2;
5926}
5927
5928/**
5929 *	t4_get_chan_txrate - get the current per channel Tx rates
5930 *	@adap: the adapter
5931 *	@nic_rate: rates for NIC traffic
5932 *	@ofld_rate: rates for offloaded traffic
5933 *
5934 *	Return the current Tx rates in bytes/s for NIC and offloaded traffic
5935 *	for each channel.
5936 */
5937void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
5938{
5939	u32 v;
5940
5941	v = t4_read_reg(adap, TP_TX_TRATE_A);
5942	nic_rate[0] = chan_rate(adap, TNLRATE0_G(v));
5943	nic_rate[1] = chan_rate(adap, TNLRATE1_G(v));
5944	if (adap->params.arch.nchan == NCHAN) {
5945		nic_rate[2] = chan_rate(adap, TNLRATE2_G(v));
5946		nic_rate[3] = chan_rate(adap, TNLRATE3_G(v));
5947	}
5948
5949	v = t4_read_reg(adap, TP_TX_ORATE_A);
5950	ofld_rate[0] = chan_rate(adap, OFDRATE0_G(v));
5951	ofld_rate[1] = chan_rate(adap, OFDRATE1_G(v));
5952	if (adap->params.arch.nchan == NCHAN) {
5953		ofld_rate[2] = chan_rate(adap, OFDRATE2_G(v));
5954		ofld_rate[3] = chan_rate(adap, OFDRATE3_G(v));
5955	}
5956}
5957
5958/**
5959 *	t4_set_trace_filter - configure one of the tracing filters
5960 *	@adap: the adapter
5961 *	@tp: the desired trace filter parameters
5962 *	@idx: which filter to configure
5963 *	@enable: whether to enable or disable the filter
5964 *
5965 *	Configures one of the tracing filters available in HW.  If @enable is
5966 *	%0 @tp is not examined and may be %NULL. The user is responsible to
5967 *	set the single/multiple trace mode by writing to MPS_TRC_CFG_A register
5968 */
5969int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
5970			int idx, int enable)
5971{
5972	int i, ofst = idx * 4;
5973	u32 data_reg, mask_reg, cfg;
5974
5975	if (!enable) {
5976		t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0);
5977		return 0;
5978	}
5979
5980	cfg = t4_read_reg(adap, MPS_TRC_CFG_A);
5981	if (cfg & TRCMULTIFILTER_F) {
5982		/* If multiple tracers are enabled, then maximum
5983		 * capture size is 2.5KB (FIFO size of a single channel)
5984		 * minus 2 flits for CPL_TRACE_PKT header.
5985		 */
5986		if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
5987			return -EINVAL;
5988	} else {
5989		/* If multiple tracers are disabled, to avoid deadlocks
5990		 * maximum packet capture size of 9600 bytes is recommended.
5991		 * Also in this mode, only trace0 can be enabled and running.
5992		 */
5993		if (tp->snap_len > 9600 || idx)
5994			return -EINVAL;
5995	}
5996
5997	if (tp->port > (is_t4(adap->params.chip) ? 11 : 19) || tp->invert > 1 ||
5998	    tp->skip_len > TFLENGTH_M || tp->skip_ofst > TFOFFSET_M ||
5999	    tp->min_len > TFMINPKTSIZE_M)
6000		return -EINVAL;
6001
6002	/* stop the tracer we'll be changing */
6003	t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0);
6004
6005	idx *= (MPS_TRC_FILTER1_MATCH_A - MPS_TRC_FILTER0_MATCH_A);
6006	data_reg = MPS_TRC_FILTER0_MATCH_A + idx;
6007	mask_reg = MPS_TRC_FILTER0_DONT_CARE_A + idx;
6008
6009	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
6010		t4_write_reg(adap, data_reg, tp->data[i]);
6011		t4_write_reg(adap, mask_reg, ~tp->mask[i]);
6012	}
6013	t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B_A + ofst,
6014		     TFCAPTUREMAX_V(tp->snap_len) |
6015		     TFMINPKTSIZE_V(tp->min_len));
6016	t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst,
6017		     TFOFFSET_V(tp->skip_ofst) | TFLENGTH_V(tp->skip_len) |
6018		     (is_t4(adap->params.chip) ?
6019		     TFPORT_V(tp->port) | TFEN_F | TFINVERTMATCH_V(tp->invert) :
6020		     T5_TFPORT_V(tp->port) | T5_TFEN_F |
6021		     T5_TFINVERTMATCH_V(tp->invert)));
6022
6023	return 0;
6024}
6025
6026/**
6027 *	t4_get_trace_filter - query one of the tracing filters
6028 *	@adap: the adapter
6029 *	@tp: the current trace filter parameters
6030 *	@idx: which trace filter to query
6031 *	@enabled: non-zero if the filter is enabled
6032 *
6033 *	Returns the current settings of one of the HW tracing filters.
6034 */
6035void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
6036			 int *enabled)
6037{
6038	u32 ctla, ctlb;
6039	int i, ofst = idx * 4;
6040	u32 data_reg, mask_reg;
6041
6042	ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst);
6043	ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B_A + ofst);
6044
6045	if (is_t4(adap->params.chip)) {
6046		*enabled = !!(ctla & TFEN_F);
6047		tp->port =  TFPORT_G(ctla);
6048		tp->invert = !!(ctla & TFINVERTMATCH_F);
6049	} else {
6050		*enabled = !!(ctla & T5_TFEN_F);
6051		tp->port = T5_TFPORT_G(ctla);
6052		tp->invert = !!(ctla & T5_TFINVERTMATCH_F);
6053	}
6054	tp->snap_len = TFCAPTUREMAX_G(ctlb);
6055	tp->min_len = TFMINPKTSIZE_G(ctlb);
6056	tp->skip_ofst = TFOFFSET_G(ctla);
6057	tp->skip_len = TFLENGTH_G(ctla);
6058
6059	ofst = (MPS_TRC_FILTER1_MATCH_A - MPS_TRC_FILTER0_MATCH_A) * idx;
6060	data_reg = MPS_TRC_FILTER0_MATCH_A + ofst;
6061	mask_reg = MPS_TRC_FILTER0_DONT_CARE_A + ofst;
6062
6063	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
6064		tp->mask[i] = ~t4_read_reg(adap, mask_reg);
6065		tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
6066	}
6067}
6068
6069/**
6070 *	t4_pmtx_get_stats - returns the HW stats from PMTX
6071 *	@adap: the adapter
6072 *	@cnt: where to store the count statistics
6073 *	@cycles: where to store the cycle statistics
6074 *
6075 *	Returns performance statistics from PMTX.
6076 */
6077void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
6078{
6079	int i;
6080	u32 data[2];
6081
6082	for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) {
6083		t4_write_reg(adap, PM_TX_STAT_CONFIG_A, i + 1);
6084		cnt[i] = t4_read_reg(adap, PM_TX_STAT_COUNT_A);
6085		if (is_t4(adap->params.chip)) {
6086			cycles[i] = t4_read_reg64(adap, PM_TX_STAT_LSB_A);
6087		} else {
6088			t4_read_indirect(adap, PM_TX_DBG_CTRL_A,
6089					 PM_TX_DBG_DATA_A, data, 2,
6090					 PM_TX_DBG_STAT_MSB_A);
6091			cycles[i] = (((u64)data[0] << 32) | data[1]);
6092		}
6093	}
6094}
6095
6096/**
6097 *	t4_pmrx_get_stats - returns the HW stats from PMRX
6098 *	@adap: the adapter
6099 *	@cnt: where to store the count statistics
6100 *	@cycles: where to store the cycle statistics
6101 *
6102 *	Returns performance statistics from PMRX.
6103 */
6104void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
6105{
6106	int i;
6107	u32 data[2];
6108
6109	for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) {
6110		t4_write_reg(adap, PM_RX_STAT_CONFIG_A, i + 1);
6111		cnt[i] = t4_read_reg(adap, PM_RX_STAT_COUNT_A);
6112		if (is_t4(adap->params.chip)) {
6113			cycles[i] = t4_read_reg64(adap, PM_RX_STAT_LSB_A);
6114		} else {
6115			t4_read_indirect(adap, PM_RX_DBG_CTRL_A,
6116					 PM_RX_DBG_DATA_A, data, 2,
6117					 PM_RX_DBG_STAT_MSB_A);
6118			cycles[i] = (((u64)data[0] << 32) | data[1]);
6119		}
6120	}
6121}
6122
6123/**
6124 *	compute_mps_bg_map - compute the MPS Buffer Group Map for a Port
6125 *	@adapter: the adapter
6126 *	@pidx: the port index
6127 *
6128 *	Computes and returns a bitmap indicating which MPS buffer groups are
6129 *	associated with the given Port.  Bit i is set if buffer group i is
6130 *	used by the Port.
6131 */
6132static inline unsigned int compute_mps_bg_map(struct adapter *adapter,
6133					      int pidx)
6134{
6135	unsigned int chip_version, nports;
6136
6137	chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
6138	nports = 1 << NUMPORTS_G(t4_read_reg(adapter, MPS_CMN_CTL_A));
6139
6140	switch (chip_version) {
6141	case CHELSIO_T4:
6142	case CHELSIO_T5:
6143		switch (nports) {
6144		case 1: return 0xf;
6145		case 2: return 3 << (2 * pidx);
6146		case 4: return 1 << pidx;
6147		}
6148		break;
6149
6150	case CHELSIO_T6:
6151		switch (nports) {
6152		case 2: return 1 << (2 * pidx);
6153		}
6154		break;
6155	}
6156
6157	dev_err(adapter->pdev_dev, "Need MPS Buffer Group Map for Chip %0x, Nports %d\n",
6158		chip_version, nports);
6159
6160	return 0;
6161}
6162
6163/**
6164 *	t4_get_mps_bg_map - return the buffer groups associated with a port
6165 *	@adapter: the adapter
6166 *	@pidx: the port index
6167 *
6168 *	Returns a bitmap indicating which MPS buffer groups are associated
6169 *	with the given Port.  Bit i is set if buffer group i is used by the
6170 *	Port.
6171 */
6172unsigned int t4_get_mps_bg_map(struct adapter *adapter, int pidx)
6173{
6174	u8 *mps_bg_map;
6175	unsigned int nports;
6176
6177	nports = 1 << NUMPORTS_G(t4_read_reg(adapter, MPS_CMN_CTL_A));
6178	if (pidx >= nports) {
6179		CH_WARN(adapter, "MPS Port Index %d >= Nports %d\n",
6180			pidx, nports);
6181		return 0;
6182	}
6183
6184	/* If we've already retrieved/computed this, just return the result.
6185	 */
6186	mps_bg_map = adapter->params.mps_bg_map;
6187	if (mps_bg_map[pidx])
6188		return mps_bg_map[pidx];
6189
6190	/* Newer Firmware can tell us what the MPS Buffer Group Map is.
6191	 * If we're talking to such Firmware, let it tell us.  If the new
6192	 * API isn't supported, revert back to old hardcoded way.  The value
6193	 * obtained from Firmware is encoded in below format:
6194	 *
6195	 * val = (( MPSBGMAP[Port 3] << 24 ) |
6196	 *        ( MPSBGMAP[Port 2] << 16 ) |
6197	 *        ( MPSBGMAP[Port 1] <<  8 ) |
6198	 *        ( MPSBGMAP[Port 0] <<  0 ))
6199	 */
6200	if (adapter->flags & CXGB4_FW_OK) {
6201		u32 param, val;
6202		int ret;
6203
6204		param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
6205			 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_MPSBGMAP));
6206		ret = t4_query_params_ns(adapter, adapter->mbox, adapter->pf,
6207					 0, 1, &param, &val);
6208		if (!ret) {
6209			int p;
6210
6211			/* Store the BG Map for all of the Ports in order to
6212			 * avoid more calls to the Firmware in the future.
6213			 */
6214			for (p = 0; p < MAX_NPORTS; p++, val >>= 8)
6215				mps_bg_map[p] = val & 0xff;
6216
6217			return mps_bg_map[pidx];
6218		}
6219	}
6220
6221	/* Either we're not talking to the Firmware or we're dealing with
6222	 * older Firmware which doesn't support the new API to get the MPS
6223	 * Buffer Group Map.  Fall back to computing it ourselves.
6224	 */
6225	mps_bg_map[pidx] = compute_mps_bg_map(adapter, pidx);
6226	return mps_bg_map[pidx];
6227}
6228
6229/**
6230 *      t4_get_tp_e2c_map - return the E2C channel map associated with a port
6231 *      @adapter: the adapter
6232 *      @pidx: the port index
6233 */
6234static unsigned int t4_get_tp_e2c_map(struct adapter *adapter, int pidx)
6235{
6236	unsigned int nports;
6237	u32 param, val = 0;
6238	int ret;
6239
6240	nports = 1 << NUMPORTS_G(t4_read_reg(adapter, MPS_CMN_CTL_A));
6241	if (pidx >= nports) {
6242		CH_WARN(adapter, "TP E2C Channel Port Index %d >= Nports %d\n",
6243			pidx, nports);
6244		return 0;
6245	}
6246
6247	/* FW version >= 1.16.44.0 can determine E2C channel map using
6248	 * FW_PARAMS_PARAM_DEV_TPCHMAP API.
6249	 */
6250	param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
6251		 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_TPCHMAP));
6252	ret = t4_query_params_ns(adapter, adapter->mbox, adapter->pf,
6253				 0, 1, &param, &val);
6254	if (!ret)
6255		return (val >> (8 * pidx)) & 0xff;
6256
6257	return 0;
6258}
6259
6260/**
6261 *	t4_get_tp_ch_map - return TP ingress channels associated with a port
6262 *	@adap: the adapter
6263 *	@pidx: the port index
6264 *
6265 *	Returns a bitmap indicating which TP Ingress Channels are associated
6266 *	with a given Port.  Bit i is set if TP Ingress Channel i is used by
6267 *	the Port.
6268 */
6269unsigned int t4_get_tp_ch_map(struct adapter *adap, int pidx)
6270{
6271	unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
6272	unsigned int nports = 1 << NUMPORTS_G(t4_read_reg(adap, MPS_CMN_CTL_A));
6273
6274	if (pidx >= nports) {
6275		dev_warn(adap->pdev_dev, "TP Port Index %d >= Nports %d\n",
6276			 pidx, nports);
6277		return 0;
6278	}
6279
6280	switch (chip_version) {
6281	case CHELSIO_T4:
6282	case CHELSIO_T5:
6283		/* Note that this happens to be the same values as the MPS
6284		 * Buffer Group Map for these Chips.  But we replicate the code
6285		 * here because they're really separate concepts.
6286		 */
6287		switch (nports) {
6288		case 1: return 0xf;
6289		case 2: return 3 << (2 * pidx);
6290		case 4: return 1 << pidx;
6291		}
6292		break;
6293
6294	case CHELSIO_T6:
6295		switch (nports) {
6296		case 1:
6297		case 2: return 1 << pidx;
6298		}
6299		break;
6300	}
6301
6302	dev_err(adap->pdev_dev, "Need TP Channel Map for Chip %0x, Nports %d\n",
6303		chip_version, nports);
6304	return 0;
6305}
6306
6307/**
6308 *      t4_get_port_type_description - return Port Type string description
6309 *      @port_type: firmware Port Type enumeration
6310 */
6311const char *t4_get_port_type_description(enum fw_port_type port_type)
6312{
6313	static const char *const port_type_description[] = {
6314		"Fiber_XFI",
6315		"Fiber_XAUI",
6316		"BT_SGMII",
6317		"BT_XFI",
6318		"BT_XAUI",
6319		"KX4",
6320		"CX4",
6321		"KX",
6322		"KR",
6323		"SFP",
6324		"BP_AP",
6325		"BP4_AP",
6326		"QSFP_10G",
6327		"QSA",
6328		"QSFP",
6329		"BP40_BA",
6330		"KR4_100G",
6331		"CR4_QSFP",
6332		"CR_QSFP",
6333		"CR2_QSFP",
6334		"SFP28",
6335		"KR_SFP28",
6336		"KR_XLAUI"
6337	};
6338
6339	if (port_type < ARRAY_SIZE(port_type_description))
6340		return port_type_description[port_type];
6341	return "UNKNOWN";
6342}
6343
6344/**
6345 *      t4_get_port_stats_offset - collect port stats relative to a previous
6346 *                                 snapshot
6347 *      @adap: The adapter
6348 *      @idx: The port
6349 *      @stats: Current stats to fill
6350 *      @offset: Previous stats snapshot
6351 */
6352void t4_get_port_stats_offset(struct adapter *adap, int idx,
6353			      struct port_stats *stats,
6354			      struct port_stats *offset)
6355{
6356	u64 *s, *o;
6357	int i;
6358
6359	t4_get_port_stats(adap, idx, stats);
6360	for (i = 0, s = (u64 *)stats, o = (u64 *)offset;
6361			i < (sizeof(struct port_stats) / sizeof(u64));
6362			i++, s++, o++)
6363		*s -= *o;
6364}
6365
6366/**
6367 *	t4_get_port_stats - collect port statistics
6368 *	@adap: the adapter
6369 *	@idx: the port index
6370 *	@p: the stats structure to fill
6371 *
6372 *	Collect statistics related to the given port from HW.
6373 */
6374void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
6375{
6376	u32 bgmap = t4_get_mps_bg_map(adap, idx);
6377	u32 stat_ctl = t4_read_reg(adap, MPS_STAT_CTL_A);
6378
6379#define GET_STAT(name) \
6380	t4_read_reg64(adap, \
6381	(is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \
6382	T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L)))
6383#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
6384
6385	p->tx_octets           = GET_STAT(TX_PORT_BYTES);
6386	p->tx_frames           = GET_STAT(TX_PORT_FRAMES);
6387	p->tx_bcast_frames     = GET_STAT(TX_PORT_BCAST);
6388	p->tx_mcast_frames     = GET_STAT(TX_PORT_MCAST);
6389	p->tx_ucast_frames     = GET_STAT(TX_PORT_UCAST);
6390	p->tx_error_frames     = GET_STAT(TX_PORT_ERROR);
6391	p->tx_frames_64        = GET_STAT(TX_PORT_64B);
6392	p->tx_frames_65_127    = GET_STAT(TX_PORT_65B_127B);
6393	p->tx_frames_128_255   = GET_STAT(TX_PORT_128B_255B);
6394	p->tx_frames_256_511   = GET_STAT(TX_PORT_256B_511B);
6395	p->tx_frames_512_1023  = GET_STAT(TX_PORT_512B_1023B);
6396	p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
6397	p->tx_frames_1519_max  = GET_STAT(TX_PORT_1519B_MAX);
6398	p->tx_drop             = GET_STAT(TX_PORT_DROP);
6399	p->tx_pause            = GET_STAT(TX_PORT_PAUSE);
6400	p->tx_ppp0             = GET_STAT(TX_PORT_PPP0);
6401	p->tx_ppp1             = GET_STAT(TX_PORT_PPP1);
6402	p->tx_ppp2             = GET_STAT(TX_PORT_PPP2);
6403	p->tx_ppp3             = GET_STAT(TX_PORT_PPP3);
6404	p->tx_ppp4             = GET_STAT(TX_PORT_PPP4);
6405	p->tx_ppp5             = GET_STAT(TX_PORT_PPP5);
6406	p->tx_ppp6             = GET_STAT(TX_PORT_PPP6);
6407	p->tx_ppp7             = GET_STAT(TX_PORT_PPP7);
6408
6409	if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
6410		if (stat_ctl & COUNTPAUSESTATTX_F)
6411			p->tx_frames_64 -= p->tx_pause;
6412		if (stat_ctl & COUNTPAUSEMCTX_F)
6413			p->tx_mcast_frames -= p->tx_pause;
6414	}
6415	p->rx_octets           = GET_STAT(RX_PORT_BYTES);
6416	p->rx_frames           = GET_STAT(RX_PORT_FRAMES);
6417	p->rx_bcast_frames     = GET_STAT(RX_PORT_BCAST);
6418	p->rx_mcast_frames     = GET_STAT(RX_PORT_MCAST);
6419	p->rx_ucast_frames     = GET_STAT(RX_PORT_UCAST);
6420	p->rx_too_long         = GET_STAT(RX_PORT_MTU_ERROR);
6421	p->rx_jabber           = GET_STAT(RX_PORT_MTU_CRC_ERROR);
6422	p->rx_fcs_err          = GET_STAT(RX_PORT_CRC_ERROR);
6423	p->rx_len_err          = GET_STAT(RX_PORT_LEN_ERROR);
6424	p->rx_symbol_err       = GET_STAT(RX_PORT_SYM_ERROR);
6425	p->rx_runt             = GET_STAT(RX_PORT_LESS_64B);
6426	p->rx_frames_64        = GET_STAT(RX_PORT_64B);
6427	p->rx_frames_65_127    = GET_STAT(RX_PORT_65B_127B);
6428	p->rx_frames_128_255   = GET_STAT(RX_PORT_128B_255B);
6429	p->rx_frames_256_511   = GET_STAT(RX_PORT_256B_511B);
6430	p->rx_frames_512_1023  = GET_STAT(RX_PORT_512B_1023B);
6431	p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
6432	p->rx_frames_1519_max  = GET_STAT(RX_PORT_1519B_MAX);
6433	p->rx_pause            = GET_STAT(RX_PORT_PAUSE);
6434	p->rx_ppp0             = GET_STAT(RX_PORT_PPP0);
6435	p->rx_ppp1             = GET_STAT(RX_PORT_PPP1);
6436	p->rx_ppp2             = GET_STAT(RX_PORT_PPP2);
6437	p->rx_ppp3             = GET_STAT(RX_PORT_PPP3);
6438	p->rx_ppp4             = GET_STAT(RX_PORT_PPP4);
6439	p->rx_ppp5             = GET_STAT(RX_PORT_PPP5);
6440	p->rx_ppp6             = GET_STAT(RX_PORT_PPP6);
6441	p->rx_ppp7             = GET_STAT(RX_PORT_PPP7);
6442
6443	if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
6444		if (stat_ctl & COUNTPAUSESTATRX_F)
6445			p->rx_frames_64 -= p->rx_pause;
6446		if (stat_ctl & COUNTPAUSEMCRX_F)
6447			p->rx_mcast_frames -= p->rx_pause;
6448	}
6449
6450	p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
6451	p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
6452	p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
6453	p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
6454	p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
6455	p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
6456	p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
6457	p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
6458
6459#undef GET_STAT
6460#undef GET_STAT_COM
6461}
6462
6463/**
6464 *	t4_get_lb_stats - collect loopback port statistics
6465 *	@adap: the adapter
6466 *	@idx: the loopback port index
6467 *	@p: the stats structure to fill
6468 *
6469 *	Return HW statistics for the given loopback port.
6470 */
6471void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
6472{
6473	u32 bgmap = t4_get_mps_bg_map(adap, idx);
6474
6475#define GET_STAT(name) \
6476	t4_read_reg64(adap, \
6477	(is_t4(adap->params.chip) ? \
6478	PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L) : \
6479	T5_PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L)))
6480#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
6481
6482	p->octets           = GET_STAT(BYTES);
6483	p->frames           = GET_STAT(FRAMES);
6484	p->bcast_frames     = GET_STAT(BCAST);
6485	p->mcast_frames     = GET_STAT(MCAST);
6486	p->ucast_frames     = GET_STAT(UCAST);
6487	p->error_frames     = GET_STAT(ERROR);
6488
6489	p->frames_64        = GET_STAT(64B);
6490	p->frames_65_127    = GET_STAT(65B_127B);
6491	p->frames_128_255   = GET_STAT(128B_255B);
6492	p->frames_256_511   = GET_STAT(256B_511B);
6493	p->frames_512_1023  = GET_STAT(512B_1023B);
6494	p->frames_1024_1518 = GET_STAT(1024B_1518B);
6495	p->frames_1519_max  = GET_STAT(1519B_MAX);
6496	p->drop             = GET_STAT(DROP_FRAMES);
6497
6498	p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
6499	p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
6500	p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
6501	p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
6502	p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
6503	p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
6504	p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
6505	p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
6506
6507#undef GET_STAT
6508#undef GET_STAT_COM
6509}
6510
6511/*     t4_mk_filtdelwr - create a delete filter WR
6512 *     @ftid: the filter ID
6513 *     @wr: the filter work request to populate
6514 *     @qid: ingress queue to receive the delete notification
6515 *
6516 *     Creates a filter work request to delete the supplied filter.  If @qid is
6517 *     negative the delete notification is suppressed.
6518 */
6519void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
6520{
6521	memset(wr, 0, sizeof(*wr));
6522	wr->op_pkd = cpu_to_be32(FW_WR_OP_V(FW_FILTER_WR));
6523	wr->len16_pkd = cpu_to_be32(FW_WR_LEN16_V(sizeof(*wr) / 16));
6524	wr->tid_to_iq = cpu_to_be32(FW_FILTER_WR_TID_V(ftid) |
6525				    FW_FILTER_WR_NOREPLY_V(qid < 0));
6526	wr->del_filter_to_l2tix = cpu_to_be32(FW_FILTER_WR_DEL_FILTER_F);
6527	if (qid >= 0)
6528		wr->rx_chan_rx_rpl_iq =
6529			cpu_to_be16(FW_FILTER_WR_RX_RPL_IQ_V(qid));
6530}
6531
6532#define INIT_CMD(var, cmd, rd_wr) do { \
6533	(var).op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_##cmd##_CMD) | \
6534					FW_CMD_REQUEST_F | \
6535					FW_CMD_##rd_wr##_F); \
6536	(var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
6537} while (0)
6538
6539int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
6540			  u32 addr, u32 val)
6541{
6542	u32 ldst_addrspace;
6543	struct fw_ldst_cmd c;
6544
6545	memset(&c, 0, sizeof(c));
6546	ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FIRMWARE);
6547	c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
6548					FW_CMD_REQUEST_F |
6549					FW_CMD_WRITE_F |
6550					ldst_addrspace);
6551	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6552	c.u.addrval.addr = cpu_to_be32(addr);
6553	c.u.addrval.val = cpu_to_be32(val);
6554
6555	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6556}
6557
6558/**
6559 *	t4_mdio_rd - read a PHY register through MDIO
6560 *	@adap: the adapter
6561 *	@mbox: mailbox to use for the FW command
6562 *	@phy_addr: the PHY address
6563 *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
6564 *	@reg: the register to read
6565 *	@valp: where to store the value
6566 *
6567 *	Issues a FW command through the given mailbox to read a PHY register.
6568 */
6569int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
6570	       unsigned int mmd, unsigned int reg, u16 *valp)
6571{
6572	int ret;
6573	u32 ldst_addrspace;
6574	struct fw_ldst_cmd c;
6575
6576	memset(&c, 0, sizeof(c));
6577	ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
6578	c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
6579					FW_CMD_REQUEST_F | FW_CMD_READ_F |
6580					ldst_addrspace);
6581	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6582	c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
6583					 FW_LDST_CMD_MMD_V(mmd));
6584	c.u.mdio.raddr = cpu_to_be16(reg);
6585
6586	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6587	if (ret == 0)
6588		*valp = be16_to_cpu(c.u.mdio.rval);
6589	return ret;
6590}
6591
6592/**
6593 *	t4_mdio_wr - write a PHY register through MDIO
6594 *	@adap: the adapter
6595 *	@mbox: mailbox to use for the FW command
6596 *	@phy_addr: the PHY address
6597 *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
6598 *	@reg: the register to write
6599 *	@val: value to write
6600 *
6601 *	Issues a FW command through the given mailbox to write a PHY register.
6602 */
6603int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
6604	       unsigned int mmd, unsigned int reg, u16 val)
6605{
6606	u32 ldst_addrspace;
6607	struct fw_ldst_cmd c;
6608
6609	memset(&c, 0, sizeof(c));
6610	ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
6611	c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
6612					FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
6613					ldst_addrspace);
6614	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6615	c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
6616					 FW_LDST_CMD_MMD_V(mmd));
6617	c.u.mdio.raddr = cpu_to_be16(reg);
6618	c.u.mdio.rval = cpu_to_be16(val);
6619
6620	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6621}
6622
6623/**
6624 *	t4_sge_decode_idma_state - decode the idma state
6625 *	@adapter: the adapter
6626 *	@state: the state idma is stuck in
6627 */
6628void t4_sge_decode_idma_state(struct adapter *adapter, int state)
6629{
6630	static const char * const t4_decode[] = {
6631		"IDMA_IDLE",
6632		"IDMA_PUSH_MORE_CPL_FIFO",
6633		"IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6634		"Not used",
6635		"IDMA_PHYSADDR_SEND_PCIEHDR",
6636		"IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6637		"IDMA_PHYSADDR_SEND_PAYLOAD",
6638		"IDMA_SEND_FIFO_TO_IMSG",
6639		"IDMA_FL_REQ_DATA_FL_PREP",
6640		"IDMA_FL_REQ_DATA_FL",
6641		"IDMA_FL_DROP",
6642		"IDMA_FL_H_REQ_HEADER_FL",
6643		"IDMA_FL_H_SEND_PCIEHDR",
6644		"IDMA_FL_H_PUSH_CPL_FIFO",
6645		"IDMA_FL_H_SEND_CPL",
6646		"IDMA_FL_H_SEND_IP_HDR_FIRST",
6647		"IDMA_FL_H_SEND_IP_HDR",
6648		"IDMA_FL_H_REQ_NEXT_HEADER_FL",
6649		"IDMA_FL_H_SEND_NEXT_PCIEHDR",
6650		"IDMA_FL_H_SEND_IP_HDR_PADDING",
6651		"IDMA_FL_D_SEND_PCIEHDR",
6652		"IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6653		"IDMA_FL_D_REQ_NEXT_DATA_FL",
6654		"IDMA_FL_SEND_PCIEHDR",
6655		"IDMA_FL_PUSH_CPL_FIFO",
6656		"IDMA_FL_SEND_CPL",
6657		"IDMA_FL_SEND_PAYLOAD_FIRST",
6658		"IDMA_FL_SEND_PAYLOAD",
6659		"IDMA_FL_REQ_NEXT_DATA_FL",
6660		"IDMA_FL_SEND_NEXT_PCIEHDR",
6661		"IDMA_FL_SEND_PADDING",
6662		"IDMA_FL_SEND_COMPLETION_TO_IMSG",
6663		"IDMA_FL_SEND_FIFO_TO_IMSG",
6664		"IDMA_FL_REQ_DATAFL_DONE",
6665		"IDMA_FL_REQ_HEADERFL_DONE",
6666	};
6667	static const char * const t5_decode[] = {
6668		"IDMA_IDLE",
6669		"IDMA_ALMOST_IDLE",
6670		"IDMA_PUSH_MORE_CPL_FIFO",
6671		"IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6672		"IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
6673		"IDMA_PHYSADDR_SEND_PCIEHDR",
6674		"IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6675		"IDMA_PHYSADDR_SEND_PAYLOAD",
6676		"IDMA_SEND_FIFO_TO_IMSG",
6677		"IDMA_FL_REQ_DATA_FL",
6678		"IDMA_FL_DROP",
6679		"IDMA_FL_DROP_SEND_INC",
6680		"IDMA_FL_H_REQ_HEADER_FL",
6681		"IDMA_FL_H_SEND_PCIEHDR",
6682		"IDMA_FL_H_PUSH_CPL_FIFO",
6683		"IDMA_FL_H_SEND_CPL",
6684		"IDMA_FL_H_SEND_IP_HDR_FIRST",
6685		"IDMA_FL_H_SEND_IP_HDR",
6686		"IDMA_FL_H_REQ_NEXT_HEADER_FL",
6687		"IDMA_FL_H_SEND_NEXT_PCIEHDR",
6688		"IDMA_FL_H_SEND_IP_HDR_PADDING",
6689		"IDMA_FL_D_SEND_PCIEHDR",
6690		"IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6691		"IDMA_FL_D_REQ_NEXT_DATA_FL",
6692		"IDMA_FL_SEND_PCIEHDR",
6693		"IDMA_FL_PUSH_CPL_FIFO",
6694		"IDMA_FL_SEND_CPL",
6695		"IDMA_FL_SEND_PAYLOAD_FIRST",
6696		"IDMA_FL_SEND_PAYLOAD",
6697		"IDMA_FL_REQ_NEXT_DATA_FL",
6698		"IDMA_FL_SEND_NEXT_PCIEHDR",
6699		"IDMA_FL_SEND_PADDING",
6700		"IDMA_FL_SEND_COMPLETION_TO_IMSG",
6701	};
6702	static const char * const t6_decode[] = {
6703		"IDMA_IDLE",
6704		"IDMA_PUSH_MORE_CPL_FIFO",
6705		"IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6706		"IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
6707		"IDMA_PHYSADDR_SEND_PCIEHDR",
6708		"IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6709		"IDMA_PHYSADDR_SEND_PAYLOAD",
6710		"IDMA_FL_REQ_DATA_FL",
6711		"IDMA_FL_DROP",
6712		"IDMA_FL_DROP_SEND_INC",
6713		"IDMA_FL_H_REQ_HEADER_FL",
6714		"IDMA_FL_H_SEND_PCIEHDR",
6715		"IDMA_FL_H_PUSH_CPL_FIFO",
6716		"IDMA_FL_H_SEND_CPL",
6717		"IDMA_FL_H_SEND_IP_HDR_FIRST",
6718		"IDMA_FL_H_SEND_IP_HDR",
6719		"IDMA_FL_H_REQ_NEXT_HEADER_FL",
6720		"IDMA_FL_H_SEND_NEXT_PCIEHDR",
6721		"IDMA_FL_H_SEND_IP_HDR_PADDING",
6722		"IDMA_FL_D_SEND_PCIEHDR",
6723		"IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6724		"IDMA_FL_D_REQ_NEXT_DATA_FL",
6725		"IDMA_FL_SEND_PCIEHDR",
6726		"IDMA_FL_PUSH_CPL_FIFO",
6727		"IDMA_FL_SEND_CPL",
6728		"IDMA_FL_SEND_PAYLOAD_FIRST",
6729		"IDMA_FL_SEND_PAYLOAD",
6730		"IDMA_FL_REQ_NEXT_DATA_FL",
6731		"IDMA_FL_SEND_NEXT_PCIEHDR",
6732		"IDMA_FL_SEND_PADDING",
6733		"IDMA_FL_SEND_COMPLETION_TO_IMSG",
6734	};
6735	static const u32 sge_regs[] = {
6736		SGE_DEBUG_DATA_LOW_INDEX_2_A,
6737		SGE_DEBUG_DATA_LOW_INDEX_3_A,
6738		SGE_DEBUG_DATA_HIGH_INDEX_10_A,
6739	};
6740	const char **sge_idma_decode;
6741	int sge_idma_decode_nstates;
6742	int i;
6743	unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
6744
6745	/* Select the right set of decode strings to dump depending on the
6746	 * adapter chip type.
6747	 */
6748	switch (chip_version) {
6749	case CHELSIO_T4:
6750		sge_idma_decode = (const char **)t4_decode;
6751		sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
6752		break;
6753
6754	case CHELSIO_T5:
6755		sge_idma_decode = (const char **)t5_decode;
6756		sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
6757		break;
6758
6759	case CHELSIO_T6:
6760		sge_idma_decode = (const char **)t6_decode;
6761		sge_idma_decode_nstates = ARRAY_SIZE(t6_decode);
6762		break;
6763
6764	default:
6765		dev_err(adapter->pdev_dev,
6766			"Unsupported chip version %d\n", chip_version);
6767		return;
6768	}
6769
6770	if (is_t4(adapter->params.chip)) {
6771		sge_idma_decode = (const char **)t4_decode;
6772		sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
6773	} else {
6774		sge_idma_decode = (const char **)t5_decode;
6775		sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
6776	}
6777
6778	if (state < sge_idma_decode_nstates)
6779		CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
6780	else
6781		CH_WARN(adapter, "idma state %d unknown\n", state);
6782
6783	for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
6784		CH_WARN(adapter, "SGE register %#x value %#x\n",
6785			sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
6786}
6787
6788/**
6789 *      t4_sge_ctxt_flush - flush the SGE context cache
6790 *      @adap: the adapter
6791 *      @mbox: mailbox to use for the FW command
6792 *      @ctxt_type: Egress or Ingress
6793 *
6794 *      Issues a FW command through the given mailbox to flush the
6795 *      SGE context cache.
6796 */
6797int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type)
6798{
6799	int ret;
6800	u32 ldst_addrspace;
6801	struct fw_ldst_cmd c;
6802
6803	memset(&c, 0, sizeof(c));
6804	ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(ctxt_type == CTXT_EGRESS ?
6805						 FW_LDST_ADDRSPC_SGE_EGRC :
6806						 FW_LDST_ADDRSPC_SGE_INGC);
6807	c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
6808					FW_CMD_REQUEST_F | FW_CMD_READ_F |
6809					ldst_addrspace);
6810	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6811	c.u.idctxt.msg_ctxtflush = cpu_to_be32(FW_LDST_CMD_CTXTFLUSH_F);
6812
6813	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6814	return ret;
6815}
6816
6817/**
6818 *	t4_read_sge_dbqtimers - read SGE Doorbell Queue Timer values
6819 *	@adap: the adapter
6820 *	@ndbqtimers: size of the provided SGE Doorbell Queue Timer table
6821 *	@dbqtimers: SGE Doorbell Queue Timer table
6822 *
6823 *	Reads the SGE Doorbell Queue Timer values into the provided table.
6824 *	Returns 0 on success (Firmware and Hardware support this feature),
6825 *	an error on failure.
6826 */
6827int t4_read_sge_dbqtimers(struct adapter *adap, unsigned int ndbqtimers,
6828			  u16 *dbqtimers)
6829{
6830	int ret, dbqtimerix;
6831
6832	ret = 0;
6833	dbqtimerix = 0;
6834	while (dbqtimerix < ndbqtimers) {
6835		int nparams, param;
6836		u32 params[7], vals[7];
6837
6838		nparams = ndbqtimers - dbqtimerix;
6839		if (nparams > ARRAY_SIZE(params))
6840			nparams = ARRAY_SIZE(params);
6841
6842		for (param = 0; param < nparams; param++)
6843			params[param] =
6844			  (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
6845			   FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DBQ_TIMER) |
6846			   FW_PARAMS_PARAM_Y_V(dbqtimerix + param));
6847		ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
6848				      nparams, params, vals);
6849		if (ret)
6850			break;
6851
6852		for (param = 0; param < nparams; param++)
6853			dbqtimers[dbqtimerix++] = vals[param];
6854	}
6855	return ret;
6856}
6857
6858/**
6859 *      t4_fw_hello - establish communication with FW
6860 *      @adap: the adapter
6861 *      @mbox: mailbox to use for the FW command
6862 *      @evt_mbox: mailbox to receive async FW events
6863 *      @master: specifies the caller's willingness to be the device master
6864 *	@state: returns the current device state (if non-NULL)
6865 *
6866 *	Issues a command to establish communication with FW.  Returns either
6867 *	an error (negative integer) or the mailbox of the Master PF.
6868 */
6869int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
6870		enum dev_master master, enum dev_state *state)
6871{
6872	int ret;
6873	struct fw_hello_cmd c;
6874	u32 v;
6875	unsigned int master_mbox;
6876	int retries = FW_CMD_HELLO_RETRIES;
6877
6878retry:
6879	memset(&c, 0, sizeof(c));
6880	INIT_CMD(c, HELLO, WRITE);
6881	c.err_to_clearinit = cpu_to_be32(
6882		FW_HELLO_CMD_MASTERDIS_V(master == MASTER_CANT) |
6883		FW_HELLO_CMD_MASTERFORCE_V(master == MASTER_MUST) |
6884		FW_HELLO_CMD_MBMASTER_V(master == MASTER_MUST ?
6885					mbox : FW_HELLO_CMD_MBMASTER_M) |
6886		FW_HELLO_CMD_MBASYNCNOT_V(evt_mbox) |
6887		FW_HELLO_CMD_STAGE_V(fw_hello_cmd_stage_os) |
6888		FW_HELLO_CMD_CLEARINIT_F);
6889
6890	/*
6891	 * Issue the HELLO command to the firmware.  If it's not successful
6892	 * but indicates that we got a "busy" or "timeout" condition, retry
6893	 * the HELLO until we exhaust our retry limit.  If we do exceed our
6894	 * retry limit, check to see if the firmware left us any error
6895	 * information and report that if so.
6896	 */
6897	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6898	if (ret < 0) {
6899		if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
6900			goto retry;
6901		if (t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_ERR_F)
6902			t4_report_fw_error(adap);
6903		return ret;
6904	}
6905
6906	v = be32_to_cpu(c.err_to_clearinit);
6907	master_mbox = FW_HELLO_CMD_MBMASTER_G(v);
6908	if (state) {
6909		if (v & FW_HELLO_CMD_ERR_F)
6910			*state = DEV_STATE_ERR;
6911		else if (v & FW_HELLO_CMD_INIT_F)
6912			*state = DEV_STATE_INIT;
6913		else
6914			*state = DEV_STATE_UNINIT;
6915	}
6916
6917	/*
6918	 * If we're not the Master PF then we need to wait around for the
6919	 * Master PF Driver to finish setting up the adapter.
6920	 *
6921	 * Note that we also do this wait if we're a non-Master-capable PF and
6922	 * there is no current Master PF; a Master PF may show up momentarily
6923	 * and we wouldn't want to fail pointlessly.  (This can happen when an
6924	 * OS loads lots of different drivers rapidly at the same time).  In
6925	 * this case, the Master PF returned by the firmware will be
6926	 * PCIE_FW_MASTER_M so the test below will work ...
6927	 */
6928	if ((v & (FW_HELLO_CMD_ERR_F|FW_HELLO_CMD_INIT_F)) == 0 &&
6929	    master_mbox != mbox) {
6930		int waiting = FW_CMD_HELLO_TIMEOUT;
6931
6932		/*
6933		 * Wait for the firmware to either indicate an error or
6934		 * initialized state.  If we see either of these we bail out
6935		 * and report the issue to the caller.  If we exhaust the
6936		 * "hello timeout" and we haven't exhausted our retries, try
6937		 * again.  Otherwise bail with a timeout error.
6938		 */
6939		for (;;) {
6940			u32 pcie_fw;
6941
6942			msleep(50);
6943			waiting -= 50;
6944
6945			/*
6946			 * If neither Error nor Initialized are indicated
6947			 * by the firmware keep waiting till we exhaust our
6948			 * timeout ... and then retry if we haven't exhausted
6949			 * our retries ...
6950			 */
6951			pcie_fw = t4_read_reg(adap, PCIE_FW_A);
6952			if (!(pcie_fw & (PCIE_FW_ERR_F|PCIE_FW_INIT_F))) {
6953				if (waiting <= 0) {
6954					if (retries-- > 0)
6955						goto retry;
6956
6957					return -ETIMEDOUT;
6958				}
6959				continue;
6960			}
6961
6962			/*
6963			 * We either have an Error or Initialized condition
6964			 * report errors preferentially.
6965			 */
6966			if (state) {
6967				if (pcie_fw & PCIE_FW_ERR_F)
6968					*state = DEV_STATE_ERR;
6969				else if (pcie_fw & PCIE_FW_INIT_F)
6970					*state = DEV_STATE_INIT;
6971			}
6972
6973			/*
6974			 * If we arrived before a Master PF was selected and
6975			 * there's not a valid Master PF, grab its identity
6976			 * for our caller.
6977			 */
6978			if (master_mbox == PCIE_FW_MASTER_M &&
6979			    (pcie_fw & PCIE_FW_MASTER_VLD_F))
6980				master_mbox = PCIE_FW_MASTER_G(pcie_fw);
6981			break;
6982		}
6983	}
6984
6985	return master_mbox;
6986}
6987
6988/**
6989 *	t4_fw_bye - end communication with FW
6990 *	@adap: the adapter
6991 *	@mbox: mailbox to use for the FW command
6992 *
6993 *	Issues a command to terminate communication with FW.
6994 */
6995int t4_fw_bye(struct adapter *adap, unsigned int mbox)
6996{
6997	struct fw_bye_cmd c;
6998
6999	memset(&c, 0, sizeof(c));
7000	INIT_CMD(c, BYE, WRITE);
7001	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7002}
7003
7004/**
7005 *	t4_init_cmd - ask FW to initialize the device
7006 *	@adap: the adapter
7007 *	@mbox: mailbox to use for the FW command
7008 *
7009 *	Issues a command to FW to partially initialize the device.  This
7010 *	performs initialization that generally doesn't depend on user input.
7011 */
7012int t4_early_init(struct adapter *adap, unsigned int mbox)
7013{
7014	struct fw_initialize_cmd c;
7015
7016	memset(&c, 0, sizeof(c));
7017	INIT_CMD(c, INITIALIZE, WRITE);
7018	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7019}
7020
7021/**
7022 *	t4_fw_reset - issue a reset to FW
7023 *	@adap: the adapter
7024 *	@mbox: mailbox to use for the FW command
7025 *	@reset: specifies the type of reset to perform
7026 *
7027 *	Issues a reset command of the specified type to FW.
7028 */
7029int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
7030{
7031	struct fw_reset_cmd c;
7032
7033	memset(&c, 0, sizeof(c));
7034	INIT_CMD(c, RESET, WRITE);
7035	c.val = cpu_to_be32(reset);
7036	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7037}
7038
7039/**
7040 *	t4_fw_halt - issue a reset/halt to FW and put uP into RESET
7041 *	@adap: the adapter
7042 *	@mbox: mailbox to use for the FW RESET command (if desired)
7043 *	@force: force uP into RESET even if FW RESET command fails
7044 *
7045 *	Issues a RESET command to firmware (if desired) with a HALT indication
7046 *	and then puts the microprocessor into RESET state.  The RESET command
7047 *	will only be issued if a legitimate mailbox is provided (mbox <=
7048 *	PCIE_FW_MASTER_M).
7049 *
7050 *	This is generally used in order for the host to safely manipulate the
7051 *	adapter without fear of conflicting with whatever the firmware might
7052 *	be doing.  The only way out of this state is to RESTART the firmware
7053 *	...
7054 */
7055static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
7056{
7057	int ret = 0;
7058
7059	/*
7060	 * If a legitimate mailbox is provided, issue a RESET command
7061	 * with a HALT indication.
7062	 */
7063	if (mbox <= PCIE_FW_MASTER_M) {
7064		struct fw_reset_cmd c;
7065
7066		memset(&c, 0, sizeof(c));
7067		INIT_CMD(c, RESET, WRITE);
7068		c.val = cpu_to_be32(PIORST_F | PIORSTMODE_F);
7069		c.halt_pkd = cpu_to_be32(FW_RESET_CMD_HALT_F);
7070		ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7071	}
7072
7073	/*
7074	 * Normally we won't complete the operation if the firmware RESET
7075	 * command fails but if our caller insists we'll go ahead and put the
7076	 * uP into RESET.  This can be useful if the firmware is hung or even
7077	 * missing ...  We'll have to take the risk of putting the uP into
7078	 * RESET without the cooperation of firmware in that case.
7079	 *
7080	 * We also force the firmware's HALT flag to be on in case we bypassed
7081	 * the firmware RESET command above or we're dealing with old firmware
7082	 * which doesn't have the HALT capability.  This will serve as a flag
7083	 * for the incoming firmware to know that it's coming out of a HALT
7084	 * rather than a RESET ... if it's new enough to understand that ...
7085	 */
7086	if (ret == 0 || force) {
7087		t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F);
7088		t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F,
7089				 PCIE_FW_HALT_F);
7090	}
7091
7092	/*
7093	 * And we always return the result of the firmware RESET command
7094	 * even when we force the uP into RESET ...
7095	 */
7096	return ret;
7097}
7098
7099/**
7100 *	t4_fw_restart - restart the firmware by taking the uP out of RESET
7101 *	@adap: the adapter
7102 *	@mbox: mailbox to use for the FW command
7103 *	@reset: if we want to do a RESET to restart things
7104 *
7105 *	Restart firmware previously halted by t4_fw_halt().  On successful
7106 *	return the previous PF Master remains as the new PF Master and there
7107 *	is no need to issue a new HELLO command, etc.
7108 *
7109 *	We do this in two ways:
7110 *
7111 *	 1. If we're dealing with newer firmware we'll simply want to take
7112 *	    the chip's microprocessor out of RESET.  This will cause the
7113 *	    firmware to start up from its start vector.  And then we'll loop
7114 *	    until the firmware indicates it's started again (PCIE_FW.HALT
7115 *	    reset to 0) or we timeout.
7116 *
7117 *	 2. If we're dealing with older firmware then we'll need to RESET
7118 *	    the chip since older firmware won't recognize the PCIE_FW.HALT
7119 *	    flag and automatically RESET itself on startup.
7120 */
7121static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
7122{
7123	if (reset) {
7124		/*
7125		 * Since we're directing the RESET instead of the firmware
7126		 * doing it automatically, we need to clear the PCIE_FW.HALT
7127		 * bit.
7128		 */
7129		t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F, 0);
7130
7131		/*
7132		 * If we've been given a valid mailbox, first try to get the
7133		 * firmware to do the RESET.  If that works, great and we can
7134		 * return success.  Otherwise, if we haven't been given a
7135		 * valid mailbox or the RESET command failed, fall back to
7136		 * hitting the chip with a hammer.
7137		 */
7138		if (mbox <= PCIE_FW_MASTER_M) {
7139			t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
7140			msleep(100);
7141			if (t4_fw_reset(adap, mbox,
7142					PIORST_F | PIORSTMODE_F) == 0)
7143				return 0;
7144		}
7145
7146		t4_write_reg(adap, PL_RST_A, PIORST_F | PIORSTMODE_F);
7147		msleep(2000);
7148	} else {
7149		int ms;
7150
7151		t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
7152		for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
7153			if (!(t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_HALT_F))
7154				return 0;
7155			msleep(100);
7156			ms += 100;
7157		}
7158		return -ETIMEDOUT;
7159	}
7160	return 0;
7161}
7162
7163/**
7164 *	t4_fw_upgrade - perform all of the steps necessary to upgrade FW
7165 *	@adap: the adapter
7166 *	@mbox: mailbox to use for the FW RESET command (if desired)
7167 *	@fw_data: the firmware image to write
7168 *	@size: image size
7169 *	@force: force upgrade even if firmware doesn't cooperate
7170 *
7171 *	Perform all of the steps necessary for upgrading an adapter's
7172 *	firmware image.  Normally this requires the cooperation of the
7173 *	existing firmware in order to halt all existing activities
7174 *	but if an invalid mailbox token is passed in we skip that step
7175 *	(though we'll still put the adapter microprocessor into RESET in
7176 *	that case).
7177 *
7178 *	On successful return the new firmware will have been loaded and
7179 *	the adapter will have been fully RESET losing all previous setup
7180 *	state.  On unsuccessful return the adapter may be completely hosed ...
7181 *	positive errno indicates that the adapter is ~probably~ intact, a
7182 *	negative errno indicates that things are looking bad ...
7183 */
7184int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
7185		  const u8 *fw_data, unsigned int size, int force)
7186{
7187	const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
7188	int reset, ret;
7189
7190	if (!t4_fw_matches_chip(adap, fw_hdr))
7191		return -EINVAL;
7192
7193	/* Disable CXGB4_FW_OK flag so that mbox commands with CXGB4_FW_OK flag
7194	 * set wont be sent when we are flashing FW.
7195	 */
7196	adap->flags &= ~CXGB4_FW_OK;
7197
7198	ret = t4_fw_halt(adap, mbox, force);
7199	if (ret < 0 && !force)
7200		goto out;
7201
7202	ret = t4_load_fw(adap, fw_data, size);
7203	if (ret < 0)
7204		goto out;
7205
7206	/*
7207	 * If there was a Firmware Configuration File stored in FLASH,
7208	 * there's a good chance that it won't be compatible with the new
7209	 * Firmware.  In order to prevent difficult to diagnose adapter
7210	 * initialization issues, we clear out the Firmware Configuration File
7211	 * portion of the FLASH .  The user will need to re-FLASH a new
7212	 * Firmware Configuration File which is compatible with the new
7213	 * Firmware if that's desired.
7214	 */
7215	(void)t4_load_cfg(adap, NULL, 0);
7216
7217	/*
7218	 * Older versions of the firmware don't understand the new
7219	 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
7220	 * restart.  So for newly loaded older firmware we'll have to do the
7221	 * RESET for it so it starts up on a clean slate.  We can tell if
7222	 * the newly loaded firmware will handle this right by checking
7223	 * its header flags to see if it advertises the capability.
7224	 */
7225	reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
7226	ret = t4_fw_restart(adap, mbox, reset);
7227
7228	/* Grab potentially new Firmware Device Log parameters so we can see
7229	 * how healthy the new Firmware is.  It's okay to contact the new
7230	 * Firmware for these parameters even though, as far as it's
7231	 * concerned, we've never said "HELLO" to it ...
7232	 */
7233	(void)t4_init_devlog_params(adap);
7234out:
7235	adap->flags |= CXGB4_FW_OK;
7236	return ret;
7237}
7238
7239/**
7240 *	t4_fl_pkt_align - return the fl packet alignment
7241 *	@adap: the adapter
7242 *
7243 *	T4 has a single field to specify the packing and padding boundary.
7244 *	T5 onwards has separate fields for this and hence the alignment for
7245 *	next packet offset is maximum of these two.
7246 *
7247 */
7248int t4_fl_pkt_align(struct adapter *adap)
7249{
7250	u32 sge_control, sge_control2;
7251	unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
7252
7253	sge_control = t4_read_reg(adap, SGE_CONTROL_A);
7254
7255	/* T4 uses a single control field to specify both the PCIe Padding and
7256	 * Packing Boundary.  T5 introduced the ability to specify these
7257	 * separately.  The actual Ingress Packet Data alignment boundary
7258	 * within Packed Buffer Mode is the maximum of these two
7259	 * specifications.  (Note that it makes no real practical sense to
7260	 * have the Padding Boundary be larger than the Packing Boundary but you
7261	 * could set the chip up that way and, in fact, legacy T4 code would
7262	 * end doing this because it would initialize the Padding Boundary and
7263	 * leave the Packing Boundary initialized to 0 (16 bytes).)
7264	 * Padding Boundary values in T6 starts from 8B,
7265	 * where as it is 32B for T4 and T5.
7266	 */
7267	if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
7268		ingpad_shift = INGPADBOUNDARY_SHIFT_X;
7269	else
7270		ingpad_shift = T6_INGPADBOUNDARY_SHIFT_X;
7271
7272	ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) + ingpad_shift);
7273
7274	fl_align = ingpadboundary;
7275	if (!is_t4(adap->params.chip)) {
7276		/* T5 has a weird interpretation of one of the PCIe Packing
7277		 * Boundary values.  No idea why ...
7278		 */
7279		sge_control2 = t4_read_reg(adap, SGE_CONTROL2_A);
7280		ingpackboundary = INGPACKBOUNDARY_G(sge_control2);
7281		if (ingpackboundary == INGPACKBOUNDARY_16B_X)
7282			ingpackboundary = 16;
7283		else
7284			ingpackboundary = 1 << (ingpackboundary +
7285						INGPACKBOUNDARY_SHIFT_X);
7286
7287		fl_align = max(ingpadboundary, ingpackboundary);
7288	}
7289	return fl_align;
7290}
7291
7292/**
7293 *	t4_fixup_host_params - fix up host-dependent parameters
7294 *	@adap: the adapter
7295 *	@page_size: the host's Base Page Size
7296 *	@cache_line_size: the host's Cache Line Size
7297 *
7298 *	Various registers in T4 contain values which are dependent on the
7299 *	host's Base Page and Cache Line Sizes.  This function will fix all of
7300 *	those registers with the appropriate values as passed in ...
7301 */
7302int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
7303			 unsigned int cache_line_size)
7304{
7305	unsigned int page_shift = fls(page_size) - 1;
7306	unsigned int sge_hps = page_shift - 10;
7307	unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
7308	unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
7309	unsigned int fl_align_log = fls(fl_align) - 1;
7310
7311	t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A,
7312		     HOSTPAGESIZEPF0_V(sge_hps) |
7313		     HOSTPAGESIZEPF1_V(sge_hps) |
7314		     HOSTPAGESIZEPF2_V(sge_hps) |
7315		     HOSTPAGESIZEPF3_V(sge_hps) |
7316		     HOSTPAGESIZEPF4_V(sge_hps) |
7317		     HOSTPAGESIZEPF5_V(sge_hps) |
7318		     HOSTPAGESIZEPF6_V(sge_hps) |
7319		     HOSTPAGESIZEPF7_V(sge_hps));
7320
7321	if (is_t4(adap->params.chip)) {
7322		t4_set_reg_field(adap, SGE_CONTROL_A,
7323				 INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
7324				 EGRSTATUSPAGESIZE_F,
7325				 INGPADBOUNDARY_V(fl_align_log -
7326						  INGPADBOUNDARY_SHIFT_X) |
7327				 EGRSTATUSPAGESIZE_V(stat_len != 64));
7328	} else {
7329		unsigned int pack_align;
7330		unsigned int ingpad, ingpack;
7331
7332		/* T5 introduced the separation of the Free List Padding and
7333		 * Packing Boundaries.  Thus, we can select a smaller Padding
7334		 * Boundary to avoid uselessly chewing up PCIe Link and Memory
7335		 * Bandwidth, and use a Packing Boundary which is large enough
7336		 * to avoid false sharing between CPUs, etc.
7337		 *
7338		 * For the PCI Link, the smaller the Padding Boundary the
7339		 * better.  For the Memory Controller, a smaller Padding
7340		 * Boundary is better until we cross under the Memory Line
7341		 * Size (the minimum unit of transfer to/from Memory).  If we
7342		 * have a Padding Boundary which is smaller than the Memory
7343		 * Line Size, that'll involve a Read-Modify-Write cycle on the
7344		 * Memory Controller which is never good.
7345		 */
7346
7347		/* We want the Packing Boundary to be based on the Cache Line
7348		 * Size in order to help avoid False Sharing performance
7349		 * issues between CPUs, etc.  We also want the Packing
7350		 * Boundary to incorporate the PCI-E Maximum Payload Size.  We
7351		 * get best performance when the Packing Boundary is a
7352		 * multiple of the Maximum Payload Size.
7353		 */
7354		pack_align = fl_align;
7355		if (pci_is_pcie(adap->pdev)) {
7356			unsigned int mps, mps_log;
7357			u16 devctl;
7358
7359			/* The PCIe Device Control Maximum Payload Size field
7360			 * [bits 7:5] encodes sizes as powers of 2 starting at
7361			 * 128 bytes.
7362			 */
7363			pcie_capability_read_word(adap->pdev, PCI_EXP_DEVCTL,
7364						  &devctl);
7365			mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7;
7366			mps = 1 << mps_log;
7367			if (mps > pack_align)
7368				pack_align = mps;
7369		}
7370
7371		/* N.B. T5/T6 have a crazy special interpretation of the "0"
7372		 * value for the Packing Boundary.  This corresponds to 16
7373		 * bytes instead of the expected 32 bytes.  So if we want 32
7374		 * bytes, the best we can really do is 64 bytes ...
7375		 */
7376		if (pack_align <= 16) {
7377			ingpack = INGPACKBOUNDARY_16B_X;
7378			fl_align = 16;
7379		} else if (pack_align == 32) {
7380			ingpack = INGPACKBOUNDARY_64B_X;
7381			fl_align = 64;
7382		} else {
7383			unsigned int pack_align_log = fls(pack_align) - 1;
7384
7385			ingpack = pack_align_log - INGPACKBOUNDARY_SHIFT_X;
7386			fl_align = pack_align;
7387		}
7388
7389		/* Use the smallest Ingress Padding which isn't smaller than
7390		 * the Memory Controller Read/Write Size.  We'll take that as
7391		 * being 8 bytes since we don't know of any system with a
7392		 * wider Memory Controller Bus Width.
7393		 */
7394		if (is_t5(adap->params.chip))
7395			ingpad = INGPADBOUNDARY_32B_X;
7396		else
7397			ingpad = T6_INGPADBOUNDARY_8B_X;
7398
7399		t4_set_reg_field(adap, SGE_CONTROL_A,
7400				 INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
7401				 EGRSTATUSPAGESIZE_F,
7402				 INGPADBOUNDARY_V(ingpad) |
7403				 EGRSTATUSPAGESIZE_V(stat_len != 64));
7404		t4_set_reg_field(adap, SGE_CONTROL2_A,
7405				 INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
7406				 INGPACKBOUNDARY_V(ingpack));
7407	}
7408	/*
7409	 * Adjust various SGE Free List Host Buffer Sizes.
7410	 *
7411	 * This is something of a crock since we're using fixed indices into
7412	 * the array which are also known by the sge.c code and the T4
7413	 * Firmware Configuration File.  We need to come up with a much better
7414	 * approach to managing this array.  For now, the first four entries
7415	 * are:
7416	 *
7417	 *   0: Host Page Size
7418	 *   1: 64KB
7419	 *   2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
7420	 *   3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
7421	 *
7422	 * For the single-MTU buffers in unpacked mode we need to include
7423	 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
7424	 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
7425	 * Padding boundary.  All of these are accommodated in the Factory
7426	 * Default Firmware Configuration File but we need to adjust it for
7427	 * this host's cache line size.
7428	 */
7429	t4_write_reg(adap, SGE_FL_BUFFER_SIZE0_A, page_size);
7430	t4_write_reg(adap, SGE_FL_BUFFER_SIZE2_A,
7431		     (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2_A) + fl_align-1)
7432		     & ~(fl_align-1));
7433	t4_write_reg(adap, SGE_FL_BUFFER_SIZE3_A,
7434		     (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3_A) + fl_align-1)
7435		     & ~(fl_align-1));
7436
7437	t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(page_shift - 12));
7438
7439	return 0;
7440}
7441
7442/**
7443 *	t4_fw_initialize - ask FW to initialize the device
7444 *	@adap: the adapter
7445 *	@mbox: mailbox to use for the FW command
7446 *
7447 *	Issues a command to FW to partially initialize the device.  This
7448 *	performs initialization that generally doesn't depend on user input.
7449 */
7450int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
7451{
7452	struct fw_initialize_cmd c;
7453
7454	memset(&c, 0, sizeof(c));
7455	INIT_CMD(c, INITIALIZE, WRITE);
7456	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7457}
7458
7459/**
7460 *	t4_query_params_rw - query FW or device parameters
7461 *	@adap: the adapter
7462 *	@mbox: mailbox to use for the FW command
7463 *	@pf: the PF
7464 *	@vf: the VF
7465 *	@nparams: the number of parameters
7466 *	@params: the parameter names
7467 *	@val: the parameter values
7468 *	@rw: Write and read flag
7469 *	@sleep_ok: if true, we may sleep awaiting mbox cmd completion
7470 *
7471 *	Reads the value of FW or device parameters.  Up to 7 parameters can be
7472 *	queried at once.
7473 */
7474int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
7475		       unsigned int vf, unsigned int nparams, const u32 *params,
7476		       u32 *val, int rw, bool sleep_ok)
7477{
7478	int i, ret;
7479	struct fw_params_cmd c;
7480	__be32 *p = &c.param[0].mnem;
7481
7482	if (nparams > 7)
7483		return -EINVAL;
7484
7485	memset(&c, 0, sizeof(c));
7486	c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
7487				  FW_CMD_REQUEST_F | FW_CMD_READ_F |
7488				  FW_PARAMS_CMD_PFN_V(pf) |
7489				  FW_PARAMS_CMD_VFN_V(vf));
7490	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7491
7492	for (i = 0; i < nparams; i++) {
7493		*p++ = cpu_to_be32(*params++);
7494		if (rw)
7495			*p = cpu_to_be32(*(val + i));
7496		p++;
7497	}
7498
7499	ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
7500	if (ret == 0)
7501		for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
7502			*val++ = be32_to_cpu(*p);
7503	return ret;
7504}
7505
7506int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
7507		    unsigned int vf, unsigned int nparams, const u32 *params,
7508		    u32 *val)
7509{
7510	return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0,
7511				  true);
7512}
7513
7514int t4_query_params_ns(struct adapter *adap, unsigned int mbox, unsigned int pf,
7515		       unsigned int vf, unsigned int nparams, const u32 *params,
7516		       u32 *val)
7517{
7518	return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0,
7519				  false);
7520}
7521
7522/**
7523 *      t4_set_params_timeout - sets FW or device parameters
7524 *      @adap: the adapter
7525 *      @mbox: mailbox to use for the FW command
7526 *      @pf: the PF
7527 *      @vf: the VF
7528 *      @nparams: the number of parameters
7529 *      @params: the parameter names
7530 *      @val: the parameter values
7531 *      @timeout: the timeout time
7532 *
7533 *      Sets the value of FW or device parameters.  Up to 7 parameters can be
7534 *      specified at once.
7535 */
7536int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
7537			  unsigned int pf, unsigned int vf,
7538			  unsigned int nparams, const u32 *params,
7539			  const u32 *val, int timeout)
7540{
7541	struct fw_params_cmd c;
7542	__be32 *p = &c.param[0].mnem;
7543
7544	if (nparams > 7)
7545		return -EINVAL;
7546
7547	memset(&c, 0, sizeof(c));
7548	c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
7549				  FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7550				  FW_PARAMS_CMD_PFN_V(pf) |
7551				  FW_PARAMS_CMD_VFN_V(vf));
7552	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7553
7554	while (nparams--) {
7555		*p++ = cpu_to_be32(*params++);
7556		*p++ = cpu_to_be32(*val++);
7557	}
7558
7559	return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
7560}
7561
7562/**
7563 *	t4_set_params - sets FW or device parameters
7564 *	@adap: the adapter
7565 *	@mbox: mailbox to use for the FW command
7566 *	@pf: the PF
7567 *	@vf: the VF
7568 *	@nparams: the number of parameters
7569 *	@params: the parameter names
7570 *	@val: the parameter values
7571 *
7572 *	Sets the value of FW or device parameters.  Up to 7 parameters can be
7573 *	specified at once.
7574 */
7575int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
7576		  unsigned int vf, unsigned int nparams, const u32 *params,
7577		  const u32 *val)
7578{
7579	return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
7580				     FW_CMD_MAX_TIMEOUT);
7581}
7582
7583/**
7584 *	t4_cfg_pfvf - configure PF/VF resource limits
7585 *	@adap: the adapter
7586 *	@mbox: mailbox to use for the FW command
7587 *	@pf: the PF being configured
7588 *	@vf: the VF being configured
7589 *	@txq: the max number of egress queues
7590 *	@txq_eth_ctrl: the max number of egress Ethernet or control queues
7591 *	@rxqi: the max number of interrupt-capable ingress queues
7592 *	@rxq: the max number of interruptless ingress queues
7593 *	@tc: the PCI traffic class
7594 *	@vi: the max number of virtual interfaces
7595 *	@cmask: the channel access rights mask for the PF/VF
7596 *	@pmask: the port access rights mask for the PF/VF
7597 *	@nexact: the maximum number of exact MPS filters
7598 *	@rcaps: read capabilities
7599 *	@wxcaps: write/execute capabilities
7600 *
7601 *	Configures resource limits and capabilities for a physical or virtual
7602 *	function.
7603 */
7604int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
7605		unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
7606		unsigned int rxqi, unsigned int rxq, unsigned int tc,
7607		unsigned int vi, unsigned int cmask, unsigned int pmask,
7608		unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
7609{
7610	struct fw_pfvf_cmd c;
7611
7612	memset(&c, 0, sizeof(c));
7613	c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) | FW_CMD_REQUEST_F |
7614				  FW_CMD_WRITE_F | FW_PFVF_CMD_PFN_V(pf) |
7615				  FW_PFVF_CMD_VFN_V(vf));
7616	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7617	c.niqflint_niq = cpu_to_be32(FW_PFVF_CMD_NIQFLINT_V(rxqi) |
7618				     FW_PFVF_CMD_NIQ_V(rxq));
7619	c.type_to_neq = cpu_to_be32(FW_PFVF_CMD_CMASK_V(cmask) |
7620				    FW_PFVF_CMD_PMASK_V(pmask) |
7621				    FW_PFVF_CMD_NEQ_V(txq));
7622	c.tc_to_nexactf = cpu_to_be32(FW_PFVF_CMD_TC_V(tc) |
7623				      FW_PFVF_CMD_NVI_V(vi) |
7624				      FW_PFVF_CMD_NEXACTF_V(nexact));
7625	c.r_caps_to_nethctrl = cpu_to_be32(FW_PFVF_CMD_R_CAPS_V(rcaps) |
7626					FW_PFVF_CMD_WX_CAPS_V(wxcaps) |
7627					FW_PFVF_CMD_NETHCTRL_V(txq_eth_ctrl));
7628	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7629}
7630
7631/**
7632 *	t4_alloc_vi - allocate a virtual interface
7633 *	@adap: the adapter
7634 *	@mbox: mailbox to use for the FW command
7635 *	@port: physical port associated with the VI
7636 *	@pf: the PF owning the VI
7637 *	@vf: the VF owning the VI
7638 *	@nmac: number of MAC addresses needed (1 to 5)
7639 *	@mac: the MAC addresses of the VI
7640 *	@rss_size: size of RSS table slice associated with this VI
7641 *	@vivld: the destination to store the VI Valid value.
7642 *	@vin: the destination to store the VIN value.
7643 *
7644 *	Allocates a virtual interface for the given physical port.  If @mac is
7645 *	not %NULL it contains the MAC addresses of the VI as assigned by FW.
7646 *	@mac should be large enough to hold @nmac Ethernet addresses, they are
7647 *	stored consecutively so the space needed is @nmac * 6 bytes.
7648 *	Returns a negative error number or the non-negative VI id.
7649 */
7650int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
7651		unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
7652		unsigned int *rss_size, u8 *vivld, u8 *vin)
7653{
7654	int ret;
7655	struct fw_vi_cmd c;
7656
7657	memset(&c, 0, sizeof(c));
7658	c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | FW_CMD_REQUEST_F |
7659				  FW_CMD_WRITE_F | FW_CMD_EXEC_F |
7660				  FW_VI_CMD_PFN_V(pf) | FW_VI_CMD_VFN_V(vf));
7661	c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_ALLOC_F | FW_LEN16(c));
7662	c.portid_pkd = FW_VI_CMD_PORTID_V(port);
7663	c.nmac = nmac - 1;
7664
7665	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7666	if (ret)
7667		return ret;
7668
7669	if (mac) {
7670		memcpy(mac, c.mac, sizeof(c.mac));
7671		switch (nmac) {
7672		case 5:
7673			memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
7674			fallthrough;
7675		case 4:
7676			memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
7677			fallthrough;
7678		case 3:
7679			memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
7680			fallthrough;
7681		case 2:
7682			memcpy(mac + 6,  c.nmac0, sizeof(c.nmac0));
7683		}
7684	}
7685	if (rss_size)
7686		*rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(c.rsssize_pkd));
7687
7688	if (vivld)
7689		*vivld = FW_VI_CMD_VFVLD_G(be32_to_cpu(c.alloc_to_len16));
7690
7691	if (vin)
7692		*vin = FW_VI_CMD_VIN_G(be32_to_cpu(c.alloc_to_len16));
7693
7694	return FW_VI_CMD_VIID_G(be16_to_cpu(c.type_viid));
7695}
7696
7697/**
7698 *	t4_free_vi - free a virtual interface
7699 *	@adap: the adapter
7700 *	@mbox: mailbox to use for the FW command
7701 *	@pf: the PF owning the VI
7702 *	@vf: the VF owning the VI
7703 *	@viid: virtual interface identifiler
7704 *
7705 *	Free a previously allocated virtual interface.
7706 */
7707int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
7708	       unsigned int vf, unsigned int viid)
7709{
7710	struct fw_vi_cmd c;
7711
7712	memset(&c, 0, sizeof(c));
7713	c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
7714				  FW_CMD_REQUEST_F |
7715				  FW_CMD_EXEC_F |
7716				  FW_VI_CMD_PFN_V(pf) |
7717				  FW_VI_CMD_VFN_V(vf));
7718	c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_FREE_F | FW_LEN16(c));
7719	c.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(viid));
7720
7721	return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7722}
7723
7724/**
7725 *	t4_set_rxmode - set Rx properties of a virtual interface
7726 *	@adap: the adapter
7727 *	@mbox: mailbox to use for the FW command
7728 *	@viid: the VI id
7729 *	@viid_mirror: the mirror VI id
7730 *	@mtu: the new MTU or -1
7731 *	@promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
7732 *	@all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
7733 *	@bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
7734 *	@vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
7735 *	@sleep_ok: if true we may sleep while awaiting command completion
7736 *
7737 *	Sets Rx properties of a virtual interface.
7738 */
7739int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
7740		  unsigned int viid_mirror, int mtu, int promisc, int all_multi,
7741		  int bcast, int vlanex, bool sleep_ok)
7742{
7743	struct fw_vi_rxmode_cmd c, c_mirror;
7744	int ret;
7745
7746	/* convert to FW values */
7747	if (mtu < 0)
7748		mtu = FW_RXMODE_MTU_NO_CHG;
7749	if (promisc < 0)
7750		promisc = FW_VI_RXMODE_CMD_PROMISCEN_M;
7751	if (all_multi < 0)
7752		all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M;
7753	if (bcast < 0)
7754		bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M;
7755	if (vlanex < 0)
7756		vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M;
7757
7758	memset(&c, 0, sizeof(c));
7759	c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) |
7760				   FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7761				   FW_VI_RXMODE_CMD_VIID_V(viid));
7762	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7763	c.mtu_to_vlanexen =
7764		cpu_to_be32(FW_VI_RXMODE_CMD_MTU_V(mtu) |
7765			    FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) |
7766			    FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) |
7767			    FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) |
7768			    FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex));
7769
7770	if (viid_mirror) {
7771		memcpy(&c_mirror, &c, sizeof(c_mirror));
7772		c_mirror.op_to_viid =
7773			cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) |
7774				    FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7775				    FW_VI_RXMODE_CMD_VIID_V(viid_mirror));
7776	}
7777
7778	ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
7779	if (ret)
7780		return ret;
7781
7782	if (viid_mirror)
7783		ret = t4_wr_mbox_meat(adap, mbox, &c_mirror, sizeof(c_mirror),
7784				      NULL, sleep_ok);
7785
7786	return ret;
7787}
7788
7789/**
7790 *      t4_free_encap_mac_filt - frees MPS entry at given index
7791 *      @adap: the adapter
7792 *      @viid: the VI id
7793 *      @idx: index of MPS entry to be freed
7794 *      @sleep_ok: call is allowed to sleep
7795 *
7796 *      Frees the MPS entry at supplied index
7797 *
7798 *      Returns a negative error number or zero on success
7799 */
7800int t4_free_encap_mac_filt(struct adapter *adap, unsigned int viid,
7801			   int idx, bool sleep_ok)
7802{
7803	struct fw_vi_mac_exact *p;
7804	u8 addr[] = {0, 0, 0, 0, 0, 0};
7805	struct fw_vi_mac_cmd c;
7806	int ret = 0;
7807	u32 exact;
7808
7809	memset(&c, 0, sizeof(c));
7810	c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7811				   FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7812				   FW_CMD_EXEC_V(0) |
7813				   FW_VI_MAC_CMD_VIID_V(viid));
7814	exact = FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_EXACTMAC);
7815	c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
7816					  exact |
7817					  FW_CMD_LEN16_V(1));
7818	p = c.u.exact;
7819	p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
7820				      FW_VI_MAC_CMD_IDX_V(idx));
7821	memcpy(p->macaddr, addr, sizeof(p->macaddr));
7822	ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
7823	return ret;
7824}
7825
7826/**
7827 *	t4_free_raw_mac_filt - Frees a raw mac entry in mps tcam
7828 *	@adap: the adapter
7829 *	@viid: the VI id
7830 *	@addr: the MAC address
7831 *	@mask: the mask
7832 *	@idx: index of the entry in mps tcam
7833 *	@lookup_type: MAC address for inner (1) or outer (0) header
7834 *	@port_id: the port index
7835 *	@sleep_ok: call is allowed to sleep
7836 *
7837 *	Removes the mac entry at the specified index using raw mac interface.
7838 *
7839 *	Returns a negative error number on failure.
7840 */
7841int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
7842			 const u8 *addr, const u8 *mask, unsigned int idx,
7843			 u8 lookup_type, u8 port_id, bool sleep_ok)
7844{
7845	struct fw_vi_mac_cmd c;
7846	struct fw_vi_mac_raw *p = &c.u.raw;
7847	u32 val;
7848
7849	memset(&c, 0, sizeof(c));
7850	c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7851				   FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7852				   FW_CMD_EXEC_V(0) |
7853				   FW_VI_MAC_CMD_VIID_V(viid));
7854	val = FW_CMD_LEN16_V(1) |
7855	      FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_RAW);
7856	c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
7857					  FW_CMD_LEN16_V(val));
7858
7859	p->raw_idx_pkd = cpu_to_be32(FW_VI_MAC_CMD_RAW_IDX_V(idx) |
7860				     FW_VI_MAC_ID_BASED_FREE);
7861
7862	/* Lookup Type. Outer header: 0, Inner header: 1 */
7863	p->data0_pkd = cpu_to_be32(DATALKPTYPE_V(lookup_type) |
7864				   DATAPORTNUM_V(port_id));
7865	/* Lookup mask and port mask */
7866	p->data0m_pkd = cpu_to_be64(DATALKPTYPE_V(DATALKPTYPE_M) |
7867				    DATAPORTNUM_V(DATAPORTNUM_M));
7868
7869	/* Copy the address and the mask */
7870	memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN);
7871	memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN);
7872
7873	return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
7874}
7875
7876/**
7877 *      t4_alloc_encap_mac_filt - Adds a mac entry in mps tcam with VNI support
7878 *      @adap: the adapter
7879 *      @viid: the VI id
7880 *      @addr: the MAC address
7881 *      @mask: the mask
7882 *      @vni: the VNI id for the tunnel protocol
7883 *      @vni_mask: mask for the VNI id
7884 *      @dip_hit: to enable DIP match for the MPS entry
7885 *      @lookup_type: MAC address for inner (1) or outer (0) header
7886 *      @sleep_ok: call is allowed to sleep
7887 *
7888 *      Allocates an MPS entry with specified MAC address and VNI value.
7889 *
7890 *      Returns a negative error number or the allocated index for this mac.
7891 */
7892int t4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid,
7893			    const u8 *addr, const u8 *mask, unsigned int vni,
7894			    unsigned int vni_mask, u8 dip_hit, u8 lookup_type,
7895			    bool sleep_ok)
7896{
7897	struct fw_vi_mac_cmd c;
7898	struct fw_vi_mac_vni *p = c.u.exact_vni;
7899	int ret = 0;
7900	u32 val;
7901
7902	memset(&c, 0, sizeof(c));
7903	c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7904				   FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7905				   FW_VI_MAC_CMD_VIID_V(viid));
7906	val = FW_CMD_LEN16_V(1) |
7907	      FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_EXACTMAC_VNI);
7908	c.freemacs_to_len16 = cpu_to_be32(val);
7909	p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
7910				      FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC));
7911	memcpy(p->macaddr, addr, sizeof(p->macaddr));
7912	memcpy(p->macaddr_mask, mask, sizeof(p->macaddr_mask));
7913
7914	p->lookup_type_to_vni =
7915		cpu_to_be32(FW_VI_MAC_CMD_VNI_V(vni) |
7916			    FW_VI_MAC_CMD_DIP_HIT_V(dip_hit) |
7917			    FW_VI_MAC_CMD_LOOKUP_TYPE_V(lookup_type));
7918	p->vni_mask_pkd = cpu_to_be32(FW_VI_MAC_CMD_VNI_MASK_V(vni_mask));
7919	ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
7920	if (ret == 0)
7921		ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
7922	return ret;
7923}
7924
7925/**
7926 *	t4_alloc_raw_mac_filt - Adds a mac entry in mps tcam
7927 *	@adap: the adapter
7928 *	@viid: the VI id
7929 *	@addr: the MAC address
7930 *	@mask: the mask
7931 *	@idx: index at which to add this entry
7932 *	@lookup_type: MAC address for inner (1) or outer (0) header
7933 *	@port_id: the port index
7934 *	@sleep_ok: call is allowed to sleep
7935 *
7936 *	Adds the mac entry at the specified index using raw mac interface.
7937 *
7938 *	Returns a negative error number or the allocated index for this mac.
7939 */
7940int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
7941			  const u8 *addr, const u8 *mask, unsigned int idx,
7942			  u8 lookup_type, u8 port_id, bool sleep_ok)
7943{
7944	int ret = 0;
7945	struct fw_vi_mac_cmd c;
7946	struct fw_vi_mac_raw *p = &c.u.raw;
7947	u32 val;
7948
7949	memset(&c, 0, sizeof(c));
7950	c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7951				   FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7952				   FW_VI_MAC_CMD_VIID_V(viid));
7953	val = FW_CMD_LEN16_V(1) |
7954	      FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_RAW);
7955	c.freemacs_to_len16 = cpu_to_be32(val);
7956
7957	/* Specify that this is an inner mac address */
7958	p->raw_idx_pkd = cpu_to_be32(FW_VI_MAC_CMD_RAW_IDX_V(idx));
7959
7960	/* Lookup Type. Outer header: 0, Inner header: 1 */
7961	p->data0_pkd = cpu_to_be32(DATALKPTYPE_V(lookup_type) |
7962				   DATAPORTNUM_V(port_id));
7963	/* Lookup mask and port mask */
7964	p->data0m_pkd = cpu_to_be64(DATALKPTYPE_V(DATALKPTYPE_M) |
7965				    DATAPORTNUM_V(DATAPORTNUM_M));
7966
7967	/* Copy the address and the mask */
7968	memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN);
7969	memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN);
7970
7971	ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
7972	if (ret == 0) {
7973		ret = FW_VI_MAC_CMD_RAW_IDX_G(be32_to_cpu(p->raw_idx_pkd));
7974		if (ret != idx)
7975			ret = -ENOMEM;
7976	}
7977
7978	return ret;
7979}
7980
7981/**
7982 *	t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
7983 *	@adap: the adapter
7984 *	@mbox: mailbox to use for the FW command
7985 *	@viid: the VI id
7986 *	@free: if true any existing filters for this VI id are first removed
7987 *	@naddr: the number of MAC addresses to allocate filters for (up to 7)
7988 *	@addr: the MAC address(es)
7989 *	@idx: where to store the index of each allocated filter
7990 *	@hash: pointer to hash address filter bitmap
7991 *	@sleep_ok: call is allowed to sleep
7992 *
7993 *	Allocates an exact-match filter for each of the supplied addresses and
7994 *	sets it to the corresponding address.  If @idx is not %NULL it should
7995 *	have at least @naddr entries, each of which will be set to the index of
7996 *	the filter allocated for the corresponding MAC address.  If a filter
7997 *	could not be allocated for an address its index is set to 0xffff.
7998 *	If @hash is not %NULL addresses that fail to allocate an exact filter
7999 *	are hashed and update the hash filter bitmap pointed at by @hash.
8000 *
8001 *	Returns a negative error number or the number of filters allocated.
8002 */
8003int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
8004		      unsigned int viid, bool free, unsigned int naddr,
8005		      const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
8006{
8007	int offset, ret = 0;
8008	struct fw_vi_mac_cmd c;
8009	unsigned int nfilters = 0;
8010	unsigned int max_naddr = adap->params.arch.mps_tcam_size;
8011	unsigned int rem = naddr;
8012
8013	if (naddr > max_naddr)
8014		return -EINVAL;
8015
8016	for (offset = 0; offset < naddr ; /**/) {
8017		unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) ?
8018					 rem : ARRAY_SIZE(c.u.exact));
8019		size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
8020						     u.exact[fw_naddr]), 16);
8021		struct fw_vi_mac_exact *p;
8022		int i;
8023
8024		memset(&c, 0, sizeof(c));
8025		c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
8026					   FW_CMD_REQUEST_F |
8027					   FW_CMD_WRITE_F |
8028					   FW_CMD_EXEC_V(free) |
8029					   FW_VI_MAC_CMD_VIID_V(viid));
8030		c.freemacs_to_len16 =
8031			cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) |
8032				    FW_CMD_LEN16_V(len16));
8033
8034		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
8035			p->valid_to_idx =
8036				cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
8037					    FW_VI_MAC_CMD_IDX_V(
8038						    FW_VI_MAC_ADD_MAC));
8039			memcpy(p->macaddr, addr[offset + i],
8040			       sizeof(p->macaddr));
8041		}
8042
8043		/* It's okay if we run out of space in our MAC address arena.
8044		 * Some of the addresses we submit may get stored so we need
8045		 * to run through the reply to see what the results were ...
8046		 */
8047		ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
8048		if (ret && ret != -FW_ENOMEM)
8049			break;
8050
8051		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
8052			u16 index = FW_VI_MAC_CMD_IDX_G(
8053					be16_to_cpu(p->valid_to_idx));
8054
8055			if (idx)
8056				idx[offset + i] = (index >= max_naddr ?
8057						   0xffff : index);
8058			if (index < max_naddr)
8059				nfilters++;
8060			else if (hash)
8061				*hash |= (1ULL <<
8062					  hash_mac_addr(addr[offset + i]));
8063		}
8064
8065		free = false;
8066		offset += fw_naddr;
8067		rem -= fw_naddr;
8068	}
8069
8070	if (ret == 0 || ret == -FW_ENOMEM)
8071		ret = nfilters;
8072	return ret;
8073}
8074
8075/**
8076 *	t4_free_mac_filt - frees exact-match filters of given MAC addresses
8077 *	@adap: the adapter
8078 *	@mbox: mailbox to use for the FW command
8079 *	@viid: the VI id
8080 *	@naddr: the number of MAC addresses to allocate filters for (up to 7)
8081 *	@addr: the MAC address(es)
8082 *	@sleep_ok: call is allowed to sleep
8083 *
8084 *	Frees the exact-match filter for each of the supplied addresses
8085 *
8086 *	Returns a negative error number or the number of filters freed.
8087 */
8088int t4_free_mac_filt(struct adapter *adap, unsigned int mbox,
8089		     unsigned int viid, unsigned int naddr,
8090		     const u8 **addr, bool sleep_ok)
8091{
8092	int offset, ret = 0;
8093	struct fw_vi_mac_cmd c;
8094	unsigned int nfilters = 0;
8095	unsigned int max_naddr = is_t4(adap->params.chip) ?
8096				       NUM_MPS_CLS_SRAM_L_INSTANCES :
8097				       NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
8098	unsigned int rem = naddr;
8099
8100	if (naddr > max_naddr)
8101		return -EINVAL;
8102
8103	for (offset = 0; offset < (int)naddr ; /**/) {
8104		unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
8105					 ? rem
8106					 : ARRAY_SIZE(c.u.exact));
8107		size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
8108						     u.exact[fw_naddr]), 16);
8109		struct fw_vi_mac_exact *p;
8110		int i;
8111
8112		memset(&c, 0, sizeof(c));
8113		c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
8114				     FW_CMD_REQUEST_F |
8115				     FW_CMD_WRITE_F |
8116				     FW_CMD_EXEC_V(0) |
8117				     FW_VI_MAC_CMD_VIID_V(viid));
8118		c.freemacs_to_len16 =
8119				cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
8120					    FW_CMD_LEN16_V(len16));
8121
8122		for (i = 0, p = c.u.exact; i < (int)fw_naddr; i++, p++) {
8123			p->valid_to_idx = cpu_to_be16(
8124				FW_VI_MAC_CMD_VALID_F |
8125				FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_MAC_BASED_FREE));
8126			memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
8127		}
8128
8129		ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
8130		if (ret)
8131			break;
8132
8133		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
8134			u16 index = FW_VI_MAC_CMD_IDX_G(
8135						be16_to_cpu(p->valid_to_idx));
8136
8137			if (index < max_naddr)
8138				nfilters++;
8139		}
8140
8141		offset += fw_naddr;
8142		rem -= fw_naddr;
8143	}
8144
8145	if (ret == 0)
8146		ret = nfilters;
8147	return ret;
8148}
8149
8150/**
8151 *	t4_change_mac - modifies the exact-match filter for a MAC address
8152 *	@adap: the adapter
8153 *	@mbox: mailbox to use for the FW command
8154 *	@viid: the VI id
8155 *	@idx: index of existing filter for old value of MAC address, or -1
8156 *	@addr: the new MAC address value
8157 *	@persist: whether a new MAC allocation should be persistent
8158 *	@smt_idx: the destination to store the new SMT index.
8159 *
8160 *	Modifies an exact-match filter and sets it to the new MAC address.
8161 *	Note that in general it is not possible to modify the value of a given
8162 *	filter so the generic way to modify an address filter is to free the one
8163 *	being used by the old address value and allocate a new filter for the
8164 *	new address value.  @idx can be -1 if the address is a new addition.
8165 *
8166 *	Returns a negative error number or the index of the filter with the new
8167 *	MAC value.
8168 */
8169int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
8170		  int idx, const u8 *addr, bool persist, u8 *smt_idx)
8171{
8172	int ret, mode;
8173	struct fw_vi_mac_cmd c;
8174	struct fw_vi_mac_exact *p = c.u.exact;
8175	unsigned int max_mac_addr = adap->params.arch.mps_tcam_size;
8176
8177	if (idx < 0)                             /* new allocation */
8178		idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
8179	mode = smt_idx ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
8180
8181	memset(&c, 0, sizeof(c));
8182	c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
8183				   FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
8184				   FW_VI_MAC_CMD_VIID_V(viid));
8185	c.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16_V(1));
8186	p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
8187				      FW_VI_MAC_CMD_SMAC_RESULT_V(mode) |
8188				      FW_VI_MAC_CMD_IDX_V(idx));
8189	memcpy(p->macaddr, addr, sizeof(p->macaddr));
8190
8191	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
8192	if (ret == 0) {
8193		ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
8194		if (ret >= max_mac_addr)
8195			ret = -ENOMEM;
8196		if (smt_idx) {
8197			if (adap->params.viid_smt_extn_support) {
8198				*smt_idx = FW_VI_MAC_CMD_SMTID_G
8199						    (be32_to_cpu(c.op_to_viid));
8200			} else {
8201				/* In T4/T5, SMT contains 256 SMAC entries
8202				 * organized in 128 rows of 2 entries each.
8203				 * In T6, SMT contains 256 SMAC entries in
8204				 * 256 rows.
8205				 */
8206				if (CHELSIO_CHIP_VERSION(adap->params.chip) <=
8207								     CHELSIO_T5)
8208					*smt_idx = (viid & FW_VIID_VIN_M) << 1;
8209				else
8210					*smt_idx = (viid & FW_VIID_VIN_M);
8211			}
8212		}
8213	}
8214	return ret;
8215}
8216
8217/**
8218 *	t4_set_addr_hash - program the MAC inexact-match hash filter
8219 *	@adap: the adapter
8220 *	@mbox: mailbox to use for the FW command
8221 *	@viid: the VI id
8222 *	@ucast: whether the hash filter should also match unicast addresses
8223 *	@vec: the value to be written to the hash filter
8224 *	@sleep_ok: call is allowed to sleep
8225 *
8226 *	Sets the 64-bit inexact-match hash filter for a virtual interface.
8227 */
8228int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
8229		     bool ucast, u64 vec, bool sleep_ok)
8230{
8231	struct fw_vi_mac_cmd c;
8232
8233	memset(&c, 0, sizeof(c));
8234	c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
8235				   FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
8236				   FW_VI_ENABLE_CMD_VIID_V(viid));
8237	c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN_F |
8238					  FW_VI_MAC_CMD_HASHUNIEN_V(ucast) |
8239					  FW_CMD_LEN16_V(1));
8240	c.u.hash.hashvec = cpu_to_be64(vec);
8241	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
8242}
8243
8244/**
8245 *      t4_enable_vi_params - enable/disable a virtual interface
8246 *      @adap: the adapter
8247 *      @mbox: mailbox to use for the FW command
8248 *      @viid: the VI id
8249 *      @rx_en: 1=enable Rx, 0=disable Rx
8250 *      @tx_en: 1=enable Tx, 0=disable Tx
8251 *      @dcb_en: 1=enable delivery of Data Center Bridging messages.
8252 *
8253 *      Enables/disables a virtual interface.  Note that setting DCB Enable
8254 *      only makes sense when enabling a Virtual Interface ...
8255 */
8256int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
8257			unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
8258{
8259	struct fw_vi_enable_cmd c;
8260
8261	memset(&c, 0, sizeof(c));
8262	c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
8263				   FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
8264				   FW_VI_ENABLE_CMD_VIID_V(viid));
8265	c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN_V(rx_en) |
8266				     FW_VI_ENABLE_CMD_EEN_V(tx_en) |
8267				     FW_VI_ENABLE_CMD_DCB_INFO_V(dcb_en) |
8268				     FW_LEN16(c));
8269	return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
8270}
8271
8272/**
8273 *	t4_enable_vi - enable/disable a virtual interface
8274 *	@adap: the adapter
8275 *	@mbox: mailbox to use for the FW command
8276 *	@viid: the VI id
8277 *	@rx_en: 1=enable Rx, 0=disable Rx
8278 *	@tx_en: 1=enable Tx, 0=disable Tx
8279 *
8280 *	Enables/disables a virtual interface.
8281 */
8282int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
8283		 bool rx_en, bool tx_en)
8284{
8285	return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
8286}
8287
8288/**
8289 *	t4_enable_pi_params - enable/disable a Port's Virtual Interface
8290 *      @adap: the adapter
8291 *      @mbox: mailbox to use for the FW command
8292 *      @pi: the Port Information structure
8293 *      @rx_en: 1=enable Rx, 0=disable Rx
8294 *      @tx_en: 1=enable Tx, 0=disable Tx
8295 *      @dcb_en: 1=enable delivery of Data Center Bridging messages.
8296 *
8297 *      Enables/disables a Port's Virtual Interface.  Note that setting DCB
8298 *	Enable only makes sense when enabling a Virtual Interface ...
8299 *	If the Virtual Interface enable/disable operation is successful,
8300 *	we notify the OS-specific code of a potential Link Status change
8301 *	via the OS Contract API t4_os_link_changed().
8302 */
8303int t4_enable_pi_params(struct adapter *adap, unsigned int mbox,
8304			struct port_info *pi,
8305			bool rx_en, bool tx_en, bool dcb_en)
8306{
8307	int ret = t4_enable_vi_params(adap, mbox, pi->viid,
8308				      rx_en, tx_en, dcb_en);
8309	if (ret)
8310		return ret;
8311	t4_os_link_changed(adap, pi->port_id,
8312			   rx_en && tx_en && pi->link_cfg.link_ok);
8313	return 0;
8314}
8315
8316/**
8317 *	t4_identify_port - identify a VI's port by blinking its LED
8318 *	@adap: the adapter
8319 *	@mbox: mailbox to use for the FW command
8320 *	@viid: the VI id
8321 *	@nblinks: how many times to blink LED at 2.5 Hz
8322 *
8323 *	Identifies a VI's port by blinking its LED.
8324 */
8325int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
8326		     unsigned int nblinks)
8327{
8328	struct fw_vi_enable_cmd c;
8329
8330	memset(&c, 0, sizeof(c));
8331	c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
8332				   FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
8333				   FW_VI_ENABLE_CMD_VIID_V(viid));
8334	c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED_F | FW_LEN16(c));
8335	c.blinkdur = cpu_to_be16(nblinks);
8336	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8337}
8338
8339/**
8340 *	t4_iq_stop - stop an ingress queue and its FLs
8341 *	@adap: the adapter
8342 *	@mbox: mailbox to use for the FW command
8343 *	@pf: the PF owning the queues
8344 *	@vf: the VF owning the queues
8345 *	@iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
8346 *	@iqid: ingress queue id
8347 *	@fl0id: FL0 queue id or 0xffff if no attached FL0
8348 *	@fl1id: FL1 queue id or 0xffff if no attached FL1
8349 *
8350 *	Stops an ingress queue and its associated FLs, if any.  This causes
8351 *	any current or future data/messages destined for these queues to be
8352 *	tossed.
8353 */
8354int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
8355	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
8356	       unsigned int fl0id, unsigned int fl1id)
8357{
8358	struct fw_iq_cmd c;
8359
8360	memset(&c, 0, sizeof(c));
8361	c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
8362				  FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
8363				  FW_IQ_CMD_VFN_V(vf));
8364	c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_IQSTOP_F | FW_LEN16(c));
8365	c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
8366	c.iqid = cpu_to_be16(iqid);
8367	c.fl0id = cpu_to_be16(fl0id);
8368	c.fl1id = cpu_to_be16(fl1id);
8369	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8370}
8371
8372/**
8373 *	t4_iq_free - free an ingress queue and its FLs
8374 *	@adap: the adapter
8375 *	@mbox: mailbox to use for the FW command
8376 *	@pf: the PF owning the queues
8377 *	@vf: the VF owning the queues
8378 *	@iqtype: the ingress queue type
8379 *	@iqid: ingress queue id
8380 *	@fl0id: FL0 queue id or 0xffff if no attached FL0
8381 *	@fl1id: FL1 queue id or 0xffff if no attached FL1
8382 *
8383 *	Frees an ingress queue and its associated FLs, if any.
8384 */
8385int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8386	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
8387	       unsigned int fl0id, unsigned int fl1id)
8388{
8389	struct fw_iq_cmd c;
8390
8391	memset(&c, 0, sizeof(c));
8392	c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
8393				  FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
8394				  FW_IQ_CMD_VFN_V(vf));
8395	c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE_F | FW_LEN16(c));
8396	c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
8397	c.iqid = cpu_to_be16(iqid);
8398	c.fl0id = cpu_to_be16(fl0id);
8399	c.fl1id = cpu_to_be16(fl1id);
8400	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8401}
8402
8403/**
8404 *	t4_eth_eq_free - free an Ethernet egress queue
8405 *	@adap: the adapter
8406 *	@mbox: mailbox to use for the FW command
8407 *	@pf: the PF owning the queue
8408 *	@vf: the VF owning the queue
8409 *	@eqid: egress queue id
8410 *
8411 *	Frees an Ethernet egress queue.
8412 */
8413int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8414		   unsigned int vf, unsigned int eqid)
8415{
8416	struct fw_eq_eth_cmd c;
8417
8418	memset(&c, 0, sizeof(c));
8419	c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) |
8420				  FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
8421				  FW_EQ_ETH_CMD_PFN_V(pf) |
8422				  FW_EQ_ETH_CMD_VFN_V(vf));
8423	c.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE_F | FW_LEN16(c));
8424	c.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID_V(eqid));
8425	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8426}
8427
8428/**
8429 *	t4_ctrl_eq_free - free a control egress queue
8430 *	@adap: the adapter
8431 *	@mbox: mailbox to use for the FW command
8432 *	@pf: the PF owning the queue
8433 *	@vf: the VF owning the queue
8434 *	@eqid: egress queue id
8435 *
8436 *	Frees a control egress queue.
8437 */
8438int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8439		    unsigned int vf, unsigned int eqid)
8440{
8441	struct fw_eq_ctrl_cmd c;
8442
8443	memset(&c, 0, sizeof(c));
8444	c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_CTRL_CMD) |
8445				  FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
8446				  FW_EQ_CTRL_CMD_PFN_V(pf) |
8447				  FW_EQ_CTRL_CMD_VFN_V(vf));
8448	c.alloc_to_len16 = cpu_to_be32(FW_EQ_CTRL_CMD_FREE_F | FW_LEN16(c));
8449	c.cmpliqid_eqid = cpu_to_be32(FW_EQ_CTRL_CMD_EQID_V(eqid));
8450	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8451}
8452
8453/**
8454 *	t4_ofld_eq_free - free an offload egress queue
8455 *	@adap: the adapter
8456 *	@mbox: mailbox to use for the FW command
8457 *	@pf: the PF owning the queue
8458 *	@vf: the VF owning the queue
8459 *	@eqid: egress queue id
8460 *
8461 *	Frees a control egress queue.
8462 */
8463int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8464		    unsigned int vf, unsigned int eqid)
8465{
8466	struct fw_eq_ofld_cmd c;
8467
8468	memset(&c, 0, sizeof(c));
8469	c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_OFLD_CMD) |
8470				  FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
8471				  FW_EQ_OFLD_CMD_PFN_V(pf) |
8472				  FW_EQ_OFLD_CMD_VFN_V(vf));
8473	c.alloc_to_len16 = cpu_to_be32(FW_EQ_OFLD_CMD_FREE_F | FW_LEN16(c));
8474	c.eqid_pkd = cpu_to_be32(FW_EQ_OFLD_CMD_EQID_V(eqid));
8475	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8476}
8477
8478/**
8479 *	t4_link_down_rc_str - return a string for a Link Down Reason Code
8480 *	@link_down_rc: Link Down Reason Code
8481 *
8482 *	Returns a string representation of the Link Down Reason Code.
8483 */
8484static const char *t4_link_down_rc_str(unsigned char link_down_rc)
8485{
8486	static const char * const reason[] = {
8487		"Link Down",
8488		"Remote Fault",
8489		"Auto-negotiation Failure",
8490		"Reserved",
8491		"Insufficient Airflow",
8492		"Unable To Determine Reason",
8493		"No RX Signal Detected",
8494		"Reserved",
8495	};
8496
8497	if (link_down_rc >= ARRAY_SIZE(reason))
8498		return "Bad Reason Code";
8499
8500	return reason[link_down_rc];
8501}
8502
8503/* Return the highest speed set in the port capabilities, in Mb/s. */
8504static unsigned int fwcap_to_speed(fw_port_cap32_t caps)
8505{
8506	#define TEST_SPEED_RETURN(__caps_speed, __speed) \
8507		do { \
8508			if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \
8509				return __speed; \
8510		} while (0)
8511
8512	TEST_SPEED_RETURN(400G, 400000);
8513	TEST_SPEED_RETURN(200G, 200000);
8514	TEST_SPEED_RETURN(100G, 100000);
8515	TEST_SPEED_RETURN(50G,   50000);
8516	TEST_SPEED_RETURN(40G,   40000);
8517	TEST_SPEED_RETURN(25G,   25000);
8518	TEST_SPEED_RETURN(10G,   10000);
8519	TEST_SPEED_RETURN(1G,     1000);
8520	TEST_SPEED_RETURN(100M,    100);
8521
8522	#undef TEST_SPEED_RETURN
8523
8524	return 0;
8525}
8526
8527/**
8528 *	fwcap_to_fwspeed - return highest speed in Port Capabilities
8529 *	@acaps: advertised Port Capabilities
8530 *
8531 *	Get the highest speed for the port from the advertised Port
8532 *	Capabilities.  It will be either the highest speed from the list of
8533 *	speeds or whatever user has set using ethtool.
8534 */
8535static fw_port_cap32_t fwcap_to_fwspeed(fw_port_cap32_t acaps)
8536{
8537	#define TEST_SPEED_RETURN(__caps_speed) \
8538		do { \
8539			if (acaps & FW_PORT_CAP32_SPEED_##__caps_speed) \
8540				return FW_PORT_CAP32_SPEED_##__caps_speed; \
8541		} while (0)
8542
8543	TEST_SPEED_RETURN(400G);
8544	TEST_SPEED_RETURN(200G);
8545	TEST_SPEED_RETURN(100G);
8546	TEST_SPEED_RETURN(50G);
8547	TEST_SPEED_RETURN(40G);
8548	TEST_SPEED_RETURN(25G);
8549	TEST_SPEED_RETURN(10G);
8550	TEST_SPEED_RETURN(1G);
8551	TEST_SPEED_RETURN(100M);
8552
8553	#undef TEST_SPEED_RETURN
8554
8555	return 0;
8556}
8557
8558/**
8559 *	lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities
8560 *	@lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value
8561 *
8562 *	Translates old FW_PORT_ACTION_GET_PORT_INFO lstatus field into new
8563 *	32-bit Port Capabilities value.
8564 */
8565static fw_port_cap32_t lstatus_to_fwcap(u32 lstatus)
8566{
8567	fw_port_cap32_t linkattr = 0;
8568
8569	/* Unfortunately the format of the Link Status in the old
8570	 * 16-bit Port Information message isn't the same as the
8571	 * 16-bit Port Capabilities bitfield used everywhere else ...
8572	 */
8573	if (lstatus & FW_PORT_CMD_RXPAUSE_F)
8574		linkattr |= FW_PORT_CAP32_FC_RX;
8575	if (lstatus & FW_PORT_CMD_TXPAUSE_F)
8576		linkattr |= FW_PORT_CAP32_FC_TX;
8577	if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
8578		linkattr |= FW_PORT_CAP32_SPEED_100M;
8579	if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
8580		linkattr |= FW_PORT_CAP32_SPEED_1G;
8581	if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
8582		linkattr |= FW_PORT_CAP32_SPEED_10G;
8583	if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G))
8584		linkattr |= FW_PORT_CAP32_SPEED_25G;
8585	if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
8586		linkattr |= FW_PORT_CAP32_SPEED_40G;
8587	if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G))
8588		linkattr |= FW_PORT_CAP32_SPEED_100G;
8589
8590	return linkattr;
8591}
8592
8593/**
8594 *	t4_handle_get_port_info - process a FW reply message
8595 *	@pi: the port info
8596 *	@rpl: start of the FW message
8597 *
8598 *	Processes a GET_PORT_INFO FW reply message.
8599 */
8600void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
8601{
8602	const struct fw_port_cmd *cmd = (const void *)rpl;
8603	fw_port_cap32_t pcaps, acaps, lpacaps, linkattr;
8604	struct link_config *lc = &pi->link_cfg;
8605	struct adapter *adapter = pi->adapter;
8606	unsigned int speed, fc, fec, adv_fc;
8607	enum fw_port_module_type mod_type;
8608	int action, link_ok, linkdnrc;
8609	enum fw_port_type port_type;
8610
8611	/* Extract the various fields from the Port Information message.
8612	 */
8613	action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16));
8614	switch (action) {
8615	case FW_PORT_ACTION_GET_PORT_INFO: {
8616		u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype);
8617
8618		link_ok = (lstatus & FW_PORT_CMD_LSTATUS_F) != 0;
8619		linkdnrc = FW_PORT_CMD_LINKDNRC_G(lstatus);
8620		port_type = FW_PORT_CMD_PTYPE_G(lstatus);
8621		mod_type = FW_PORT_CMD_MODTYPE_G(lstatus);
8622		pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.pcap));
8623		acaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.acap));
8624		lpacaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.lpacap));
8625		linkattr = lstatus_to_fwcap(lstatus);
8626		break;
8627	}
8628
8629	case FW_PORT_ACTION_GET_PORT_INFO32: {
8630		u32 lstatus32;
8631
8632		lstatus32 = be32_to_cpu(cmd->u.info32.lstatus32_to_cbllen32);
8633		link_ok = (lstatus32 & FW_PORT_CMD_LSTATUS32_F) != 0;
8634		linkdnrc = FW_PORT_CMD_LINKDNRC32_G(lstatus32);
8635		port_type = FW_PORT_CMD_PORTTYPE32_G(lstatus32);
8636		mod_type = FW_PORT_CMD_MODTYPE32_G(lstatus32);
8637		pcaps = be32_to_cpu(cmd->u.info32.pcaps32);
8638		acaps = be32_to_cpu(cmd->u.info32.acaps32);
8639		lpacaps = be32_to_cpu(cmd->u.info32.lpacaps32);
8640		linkattr = be32_to_cpu(cmd->u.info32.linkattr32);
8641		break;
8642	}
8643
8644	default:
8645		dev_err(adapter->pdev_dev, "Handle Port Information: Bad Command/Action %#x\n",
8646			be32_to_cpu(cmd->action_to_len16));
8647		return;
8648	}
8649
8650	fec = fwcap_to_cc_fec(acaps);
8651	adv_fc = fwcap_to_cc_pause(acaps);
8652	fc = fwcap_to_cc_pause(linkattr);
8653	speed = fwcap_to_speed(linkattr);
8654
8655	/* Reset state for communicating new Transceiver Module status and
8656	 * whether the OS-dependent layer wants us to redo the current
8657	 * "sticky" L1 Configure Link Parameters.
8658	 */
8659	lc->new_module = false;
8660	lc->redo_l1cfg = false;
8661
8662	if (mod_type != pi->mod_type) {
8663		/* With the newer SFP28 and QSFP28 Transceiver Module Types,
8664		 * various fundamental Port Capabilities which used to be
8665		 * immutable can now change radically.  We can now have
8666		 * Speeds, Auto-Negotiation, Forward Error Correction, etc.
8667		 * all change based on what Transceiver Module is inserted.
8668		 * So we need to record the Physical "Port" Capabilities on
8669		 * every Transceiver Module change.
8670		 */
8671		lc->pcaps = pcaps;
8672
8673		/* When a new Transceiver Module is inserted, the Firmware
8674		 * will examine its i2c EPROM to determine its type and
8675		 * general operating parameters including things like Forward
8676		 * Error Control, etc.  Various IEEE 802.3 standards dictate
8677		 * how to interpret these i2c values to determine default
8678		 * "sutomatic" settings.  We record these for future use when
8679		 * the user explicitly requests these standards-based values.
8680		 */
8681		lc->def_acaps = acaps;
8682
8683		/* Some versions of the early T6 Firmware "cheated" when
8684		 * handling different Transceiver Modules by changing the
8685		 * underlaying Port Type reported to the Host Drivers.  As
8686		 * such we need to capture whatever Port Type the Firmware
8687		 * sends us and record it in case it's different from what we
8688		 * were told earlier.  Unfortunately, since Firmware is
8689		 * forever, we'll need to keep this code here forever, but in
8690		 * later T6 Firmware it should just be an assignment of the
8691		 * same value already recorded.
8692		 */
8693		pi->port_type = port_type;
8694
8695		/* Record new Module Type information.
8696		 */
8697		pi->mod_type = mod_type;
8698
8699		/* Let the OS-dependent layer know if we have a new
8700		 * Transceiver Module inserted.
8701		 */
8702		lc->new_module = t4_is_inserted_mod_type(mod_type);
8703
8704		t4_os_portmod_changed(adapter, pi->port_id);
8705	}
8706
8707	if (link_ok != lc->link_ok || speed != lc->speed ||
8708	    fc != lc->fc || adv_fc != lc->advertised_fc ||
8709	    fec != lc->fec) {
8710		/* something changed */
8711		if (!link_ok && lc->link_ok) {
8712			lc->link_down_rc = linkdnrc;
8713			dev_warn_ratelimited(adapter->pdev_dev,
8714					     "Port %d link down, reason: %s\n",
8715					     pi->tx_chan,
8716					     t4_link_down_rc_str(linkdnrc));
8717		}
8718		lc->link_ok = link_ok;
8719		lc->speed = speed;
8720		lc->advertised_fc = adv_fc;
8721		lc->fc = fc;
8722		lc->fec = fec;
8723
8724		lc->lpacaps = lpacaps;
8725		lc->acaps = acaps & ADVERT_MASK;
8726
8727		/* If we're not physically capable of Auto-Negotiation, note
8728		 * this as Auto-Negotiation disabled.  Otherwise, we track
8729		 * what Auto-Negotiation settings we have.  Note parallel
8730		 * structure in t4_link_l1cfg_core() and init_link_config().
8731		 */
8732		if (!(lc->acaps & FW_PORT_CAP32_ANEG)) {
8733			lc->autoneg = AUTONEG_DISABLE;
8734		} else if (lc->acaps & FW_PORT_CAP32_ANEG) {
8735			lc->autoneg = AUTONEG_ENABLE;
8736		} else {
8737			/* When Autoneg is disabled, user needs to set
8738			 * single speed.
8739			 * Similar to cxgb4_ethtool.c: set_link_ksettings
8740			 */
8741			lc->acaps = 0;
8742			lc->speed_caps = fwcap_to_fwspeed(acaps);
8743			lc->autoneg = AUTONEG_DISABLE;
8744		}
8745
8746		t4_os_link_changed(adapter, pi->port_id, link_ok);
8747	}
8748
8749	/* If we have a new Transceiver Module and the OS-dependent code has
8750	 * told us that it wants us to redo whatever "sticky" L1 Configuration
8751	 * Link Parameters are set, do that now.
8752	 */
8753	if (lc->new_module && lc->redo_l1cfg) {
8754		struct link_config old_lc;
8755		int ret;
8756
8757		/* Save the current L1 Configuration and restore it if an
8758		 * error occurs.  We probably should fix the l1_cfg*()
8759		 * routines not to change the link_config when an error
8760		 * occurs ...
8761		 */
8762		old_lc = *lc;
8763		ret = t4_link_l1cfg_ns(adapter, adapter->mbox, pi->lport, lc);
8764		if (ret) {
8765			*lc = old_lc;
8766			dev_warn(adapter->pdev_dev,
8767				 "Attempt to update new Transceiver Module settings failed\n");
8768		}
8769	}
8770	lc->new_module = false;
8771	lc->redo_l1cfg = false;
8772}
8773
8774/**
8775 *	t4_update_port_info - retrieve and update port information if changed
8776 *	@pi: the port_info
8777 *
8778 *	We issue a Get Port Information Command to the Firmware and, if
8779 *	successful, we check to see if anything is different from what we
8780 *	last recorded and update things accordingly.
8781 */
8782int t4_update_port_info(struct port_info *pi)
8783{
8784	unsigned int fw_caps = pi->adapter->params.fw_caps_support;
8785	struct fw_port_cmd port_cmd;
8786	int ret;
8787
8788	memset(&port_cmd, 0, sizeof(port_cmd));
8789	port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
8790					    FW_CMD_REQUEST_F | FW_CMD_READ_F |
8791					    FW_PORT_CMD_PORTID_V(pi->tx_chan));
8792	port_cmd.action_to_len16 = cpu_to_be32(
8793		FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
8794				     ? FW_PORT_ACTION_GET_PORT_INFO
8795				     : FW_PORT_ACTION_GET_PORT_INFO32) |
8796		FW_LEN16(port_cmd));
8797	ret = t4_wr_mbox(pi->adapter, pi->adapter->mbox,
8798			 &port_cmd, sizeof(port_cmd), &port_cmd);
8799	if (ret)
8800		return ret;
8801
8802	t4_handle_get_port_info(pi, (__be64 *)&port_cmd);
8803	return 0;
8804}
8805
8806/**
8807 *	t4_get_link_params - retrieve basic link parameters for given port
8808 *	@pi: the port
8809 *	@link_okp: value return pointer for link up/down
8810 *	@speedp: value return pointer for speed (Mb/s)
8811 *	@mtup: value return pointer for mtu
8812 *
8813 *	Retrieves basic link parameters for a port: link up/down, speed (Mb/s),
8814 *	and MTU for a specified port.  A negative error is returned on
8815 *	failure; 0 on success.
8816 */
8817int t4_get_link_params(struct port_info *pi, unsigned int *link_okp,
8818		       unsigned int *speedp, unsigned int *mtup)
8819{
8820	unsigned int fw_caps = pi->adapter->params.fw_caps_support;
8821	unsigned int action, link_ok, mtu;
8822	struct fw_port_cmd port_cmd;
8823	fw_port_cap32_t linkattr;
8824	int ret;
8825
8826	memset(&port_cmd, 0, sizeof(port_cmd));
8827	port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
8828					    FW_CMD_REQUEST_F | FW_CMD_READ_F |
8829					    FW_PORT_CMD_PORTID_V(pi->tx_chan));
8830	action = (fw_caps == FW_CAPS16
8831		  ? FW_PORT_ACTION_GET_PORT_INFO
8832		  : FW_PORT_ACTION_GET_PORT_INFO32);
8833	port_cmd.action_to_len16 = cpu_to_be32(
8834		FW_PORT_CMD_ACTION_V(action) |
8835		FW_LEN16(port_cmd));
8836	ret = t4_wr_mbox(pi->adapter, pi->adapter->mbox,
8837			 &port_cmd, sizeof(port_cmd), &port_cmd);
8838	if (ret)
8839		return ret;
8840
8841	if (action == FW_PORT_ACTION_GET_PORT_INFO) {
8842		u32 lstatus = be32_to_cpu(port_cmd.u.info.lstatus_to_modtype);
8843
8844		link_ok = !!(lstatus & FW_PORT_CMD_LSTATUS_F);
8845		linkattr = lstatus_to_fwcap(lstatus);
8846		mtu = be16_to_cpu(port_cmd.u.info.mtu);
8847	} else {
8848		u32 lstatus32 =
8849			   be32_to_cpu(port_cmd.u.info32.lstatus32_to_cbllen32);
8850
8851		link_ok = !!(lstatus32 & FW_PORT_CMD_LSTATUS32_F);
8852		linkattr = be32_to_cpu(port_cmd.u.info32.linkattr32);
8853		mtu = FW_PORT_CMD_MTU32_G(
8854			be32_to_cpu(port_cmd.u.info32.auxlinfo32_mtu32));
8855	}
8856
8857	if (link_okp)
8858		*link_okp = link_ok;
8859	if (speedp)
8860		*speedp = fwcap_to_speed(linkattr);
8861	if (mtup)
8862		*mtup = mtu;
8863
8864	return 0;
8865}
8866
8867/**
8868 *      t4_handle_fw_rpl - process a FW reply message
8869 *      @adap: the adapter
8870 *      @rpl: start of the FW message
8871 *
8872 *      Processes a FW message, such as link state change messages.
8873 */
8874int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
8875{
8876	u8 opcode = *(const u8 *)rpl;
8877
8878	/* This might be a port command ... this simplifies the following
8879	 * conditionals ...  We can get away with pre-dereferencing
8880	 * action_to_len16 because it's in the first 16 bytes and all messages
8881	 * will be at least that long.
8882	 */
8883	const struct fw_port_cmd *p = (const void *)rpl;
8884	unsigned int action =
8885		FW_PORT_CMD_ACTION_G(be32_to_cpu(p->action_to_len16));
8886
8887	if (opcode == FW_PORT_CMD &&
8888	    (action == FW_PORT_ACTION_GET_PORT_INFO ||
8889	     action == FW_PORT_ACTION_GET_PORT_INFO32)) {
8890		int i;
8891		int chan = FW_PORT_CMD_PORTID_G(be32_to_cpu(p->op_to_portid));
8892		struct port_info *pi = NULL;
8893
8894		for_each_port(adap, i) {
8895			pi = adap2pinfo(adap, i);
8896			if (pi->tx_chan == chan)
8897				break;
8898		}
8899
8900		t4_handle_get_port_info(pi, rpl);
8901	} else {
8902		dev_warn(adap->pdev_dev, "Unknown firmware reply %d\n",
8903			 opcode);
8904		return -EINVAL;
8905	}
8906	return 0;
8907}
8908
8909static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
8910{
8911	u16 val;
8912
8913	if (pci_is_pcie(adapter->pdev)) {
8914		pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
8915		p->speed = val & PCI_EXP_LNKSTA_CLS;
8916		p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
8917	}
8918}
8919
8920/**
8921 *	init_link_config - initialize a link's SW state
8922 *	@lc: pointer to structure holding the link state
8923 *	@pcaps: link Port Capabilities
8924 *	@acaps: link current Advertised Port Capabilities
8925 *
8926 *	Initializes the SW state maintained for each link, including the link's
8927 *	capabilities and default speed/flow-control/autonegotiation settings.
8928 */
8929static void init_link_config(struct link_config *lc, fw_port_cap32_t pcaps,
8930			     fw_port_cap32_t acaps)
8931{
8932	lc->pcaps = pcaps;
8933	lc->def_acaps = acaps;
8934	lc->lpacaps = 0;
8935	lc->speed_caps = 0;
8936	lc->speed = 0;
8937	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
8938
8939	/* For Forward Error Control, we default to whatever the Firmware
8940	 * tells us the Link is currently advertising.
8941	 */
8942	lc->requested_fec = FEC_AUTO;
8943	lc->fec = fwcap_to_cc_fec(lc->def_acaps);
8944
8945	/* If the Port is capable of Auto-Negtotiation, initialize it as
8946	 * "enabled" and copy over all of the Physical Port Capabilities
8947	 * to the Advertised Port Capabilities.  Otherwise mark it as
8948	 * Auto-Negotiate disabled and select the highest supported speed
8949	 * for the link.  Note parallel structure in t4_link_l1cfg_core()
8950	 * and t4_handle_get_port_info().
8951	 */
8952	if (lc->pcaps & FW_PORT_CAP32_ANEG) {
8953		lc->acaps = lc->pcaps & ADVERT_MASK;
8954		lc->autoneg = AUTONEG_ENABLE;
8955		lc->requested_fc |= PAUSE_AUTONEG;
8956	} else {
8957		lc->acaps = 0;
8958		lc->autoneg = AUTONEG_DISABLE;
8959		lc->speed_caps = fwcap_to_fwspeed(acaps);
8960	}
8961}
8962
8963#define CIM_PF_NOACCESS 0xeeeeeeee
8964
8965int t4_wait_dev_ready(void __iomem *regs)
8966{
8967	u32 whoami;
8968
8969	whoami = readl(regs + PL_WHOAMI_A);
8970	if (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS)
8971		return 0;
8972
8973	msleep(500);
8974	whoami = readl(regs + PL_WHOAMI_A);
8975	return (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS ? 0 : -EIO);
8976}
8977
8978struct flash_desc {
8979	u32 vendor_and_model_id;
8980	u32 size_mb;
8981};
8982
8983static int t4_get_flash_params(struct adapter *adap)
8984{
8985	/* Table for non-Numonix supported flash parts.  Numonix parts are left
8986	 * to the preexisting code.  All flash parts have 64KB sectors.
8987	 */
8988	static struct flash_desc supported_flash[] = {
8989		{ 0x150201, 4 << 20 },       /* Spansion 4MB S25FL032P */
8990	};
8991
8992	unsigned int part, manufacturer;
8993	unsigned int density, size = 0;
8994	u32 flashid = 0;
8995	int ret;
8996
8997	/* Issue a Read ID Command to the Flash part.  We decode supported
8998	 * Flash parts and their sizes from this.  There's a newer Query
8999	 * Command which can retrieve detailed geometry information but many
9000	 * Flash parts don't support it.
9001	 */
9002
9003	ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
9004	if (!ret)
9005		ret = sf1_read(adap, 3, 0, 1, &flashid);
9006	t4_write_reg(adap, SF_OP_A, 0);                    /* unlock SF */
9007	if (ret)
9008		return ret;
9009
9010	/* Check to see if it's one of our non-standard supported Flash parts.
9011	 */
9012	for (part = 0; part < ARRAY_SIZE(supported_flash); part++)
9013		if (supported_flash[part].vendor_and_model_id == flashid) {
9014			adap->params.sf_size = supported_flash[part].size_mb;
9015			adap->params.sf_nsec =
9016				adap->params.sf_size / SF_SEC_SIZE;
9017			goto found;
9018		}
9019
9020	/* Decode Flash part size.  The code below looks repetitive with
9021	 * common encodings, but that's not guaranteed in the JEDEC
9022	 * specification for the Read JEDEC ID command.  The only thing that
9023	 * we're guaranteed by the JEDEC specification is where the
9024	 * Manufacturer ID is in the returned result.  After that each
9025	 * Manufacturer ~could~ encode things completely differently.
9026	 * Note, all Flash parts must have 64KB sectors.
9027	 */
9028	manufacturer = flashid & 0xff;
9029	switch (manufacturer) {
9030	case 0x20: { /* Micron/Numonix */
9031		/* This Density -> Size decoding table is taken from Micron
9032		 * Data Sheets.
9033		 */
9034		density = (flashid >> 16) & 0xff;
9035		switch (density) {
9036		case 0x14: /* 1MB */
9037			size = 1 << 20;
9038			break;
9039		case 0x15: /* 2MB */
9040			size = 1 << 21;
9041			break;
9042		case 0x16: /* 4MB */
9043			size = 1 << 22;
9044			break;
9045		case 0x17: /* 8MB */
9046			size = 1 << 23;
9047			break;
9048		case 0x18: /* 16MB */
9049			size = 1 << 24;
9050			break;
9051		case 0x19: /* 32MB */
9052			size = 1 << 25;
9053			break;
9054		case 0x20: /* 64MB */
9055			size = 1 << 26;
9056			break;
9057		case 0x21: /* 128MB */
9058			size = 1 << 27;
9059			break;
9060		case 0x22: /* 256MB */
9061			size = 1 << 28;
9062			break;
9063		}
9064		break;
9065	}
9066	case 0x9d: { /* ISSI -- Integrated Silicon Solution, Inc. */
9067		/* This Density -> Size decoding table is taken from ISSI
9068		 * Data Sheets.
9069		 */
9070		density = (flashid >> 16) & 0xff;
9071		switch (density) {
9072		case 0x16: /* 32 MB */
9073			size = 1 << 25;
9074			break;
9075		case 0x17: /* 64MB */
9076			size = 1 << 26;
9077			break;
9078		}
9079		break;
9080	}
9081	case 0xc2: { /* Macronix */
9082		/* This Density -> Size decoding table is taken from Macronix
9083		 * Data Sheets.
9084		 */
9085		density = (flashid >> 16) & 0xff;
9086		switch (density) {
9087		case 0x17: /* 8MB */
9088			size = 1 << 23;
9089			break;
9090		case 0x18: /* 16MB */
9091			size = 1 << 24;
9092			break;
9093		}
9094		break;
9095	}
9096	case 0xef: { /* Winbond */
9097		/* This Density -> Size decoding table is taken from Winbond
9098		 * Data Sheets.
9099		 */
9100		density = (flashid >> 16) & 0xff;
9101		switch (density) {
9102		case 0x17: /* 8MB */
9103			size = 1 << 23;
9104			break;
9105		case 0x18: /* 16MB */
9106			size = 1 << 24;
9107			break;
9108		}
9109		break;
9110	}
9111	}
9112
9113	/* If we didn't recognize the FLASH part, that's no real issue: the
9114	 * Hardware/Software contract says that Hardware will _*ALWAYS*_
9115	 * use a FLASH part which is at least 4MB in size and has 64KB
9116	 * sectors.  The unrecognized FLASH part is likely to be much larger
9117	 * than 4MB, but that's all we really need.
9118	 */
9119	if (size == 0) {
9120		dev_warn(adap->pdev_dev, "Unknown Flash Part, ID = %#x, assuming 4MB\n",
9121			 flashid);
9122		size = 1 << 22;
9123	}
9124
9125	/* Store decoded Flash size and fall through into vetting code. */
9126	adap->params.sf_size = size;
9127	adap->params.sf_nsec = size / SF_SEC_SIZE;
9128
9129found:
9130	if (adap->params.sf_size < FLASH_MIN_SIZE)
9131		dev_warn(adap->pdev_dev, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
9132			 flashid, adap->params.sf_size, FLASH_MIN_SIZE);
9133	return 0;
9134}
9135
9136/**
9137 *	t4_prep_adapter - prepare SW and HW for operation
9138 *	@adapter: the adapter
9139 *
9140 *	Initialize adapter SW state for the various HW modules, set initial
9141 *	values for some adapter tunables, take PHYs out of reset, and
9142 *	initialize the MDIO interface.
9143 */
9144int t4_prep_adapter(struct adapter *adapter)
9145{
9146	int ret, ver;
9147	uint16_t device_id;
9148	u32 pl_rev;
9149
9150	get_pci_mode(adapter, &adapter->params.pci);
9151	pl_rev = REV_G(t4_read_reg(adapter, PL_REV_A));
9152
9153	ret = t4_get_flash_params(adapter);
9154	if (ret < 0) {
9155		dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
9156		return ret;
9157	}
9158
9159	/* Retrieve adapter's device ID
9160	 */
9161	pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id);
9162	ver = device_id >> 12;
9163	adapter->params.chip = 0;
9164	switch (ver) {
9165	case CHELSIO_T4:
9166		adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
9167		adapter->params.arch.sge_fl_db = DBPRIO_F;
9168		adapter->params.arch.mps_tcam_size =
9169				 NUM_MPS_CLS_SRAM_L_INSTANCES;
9170		adapter->params.arch.mps_rplc_size = 128;
9171		adapter->params.arch.nchan = NCHAN;
9172		adapter->params.arch.pm_stats_cnt = PM_NSTATS;
9173		adapter->params.arch.vfcount = 128;
9174		/* Congestion map is for 4 channels so that
9175		 * MPS can have 4 priority per port.
9176		 */
9177		adapter->params.arch.cng_ch_bits_log = 2;
9178		break;
9179	case CHELSIO_T5:
9180		adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
9181		adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F;
9182		adapter->params.arch.mps_tcam_size =
9183				 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
9184		adapter->params.arch.mps_rplc_size = 128;
9185		adapter->params.arch.nchan = NCHAN;
9186		adapter->params.arch.pm_stats_cnt = PM_NSTATS;
9187		adapter->params.arch.vfcount = 128;
9188		adapter->params.arch.cng_ch_bits_log = 2;
9189		break;
9190	case CHELSIO_T6:
9191		adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
9192		adapter->params.arch.sge_fl_db = 0;
9193		adapter->params.arch.mps_tcam_size =
9194				 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
9195		adapter->params.arch.mps_rplc_size = 256;
9196		adapter->params.arch.nchan = 2;
9197		adapter->params.arch.pm_stats_cnt = T6_PM_NSTATS;
9198		adapter->params.arch.vfcount = 256;
9199		/* Congestion map will be for 2 channels so that
9200		 * MPS can have 8 priority per port.
9201		 */
9202		adapter->params.arch.cng_ch_bits_log = 3;
9203		break;
9204	default:
9205		dev_err(adapter->pdev_dev, "Device %d is not supported\n",
9206			device_id);
9207		return -EINVAL;
9208	}
9209
9210	adapter->params.cim_la_size = CIMLA_SIZE;
9211	init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
9212
9213	/*
9214	 * Default port for debugging in case we can't reach FW.
9215	 */
9216	adapter->params.nports = 1;
9217	adapter->params.portvec = 1;
9218	adapter->params.vpd.cclk = 50000;
9219
9220	/* Set PCIe completion timeout to 4 seconds. */
9221	pcie_capability_clear_and_set_word(adapter->pdev, PCI_EXP_DEVCTL2,
9222					   PCI_EXP_DEVCTL2_COMP_TIMEOUT, 0xd);
9223	return 0;
9224}
9225
9226/**
9227 *	t4_shutdown_adapter - shut down adapter, host & wire
9228 *	@adapter: the adapter
9229 *
9230 *	Perform an emergency shutdown of the adapter and stop it from
9231 *	continuing any further communication on the ports or DMA to the
9232 *	host.  This is typically used when the adapter and/or firmware
9233 *	have crashed and we want to prevent any further accidental
9234 *	communication with the rest of the world.  This will also force
9235 *	the port Link Status to go down -- if register writes work --
9236 *	which should help our peers figure out that we're down.
9237 */
9238int t4_shutdown_adapter(struct adapter *adapter)
9239{
9240	int port;
9241
9242	t4_intr_disable(adapter);
9243	t4_write_reg(adapter, DBG_GPIO_EN_A, 0);
9244	for_each_port(adapter, port) {
9245		u32 a_port_cfg = is_t4(adapter->params.chip) ?
9246				       PORT_REG(port, XGMAC_PORT_CFG_A) :
9247				       T5_PORT_REG(port, MAC_PORT_CFG_A);
9248
9249		t4_write_reg(adapter, a_port_cfg,
9250			     t4_read_reg(adapter, a_port_cfg)
9251			     & ~SIGNAL_DET_V(1));
9252	}
9253	t4_set_reg_field(adapter, SGE_CONTROL_A, GLOBALENABLE_F, 0);
9254
9255	return 0;
9256}
9257
9258/**
9259 *	t4_bar2_sge_qregs - return BAR2 SGE Queue register information
9260 *	@adapter: the adapter
9261 *	@qid: the Queue ID
9262 *	@qtype: the Ingress or Egress type for @qid
9263 *	@user: true if this request is for a user mode queue
9264 *	@pbar2_qoffset: BAR2 Queue Offset
9265 *	@pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
9266 *
9267 *	Returns the BAR2 SGE Queue Registers information associated with the
9268 *	indicated Absolute Queue ID.  These are passed back in return value
9269 *	pointers.  @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
9270 *	and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
9271 *
9272 *	This may return an error which indicates that BAR2 SGE Queue
9273 *	registers aren't available.  If an error is not returned, then the
9274 *	following values are returned:
9275 *
9276 *	  *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
9277 *	  *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
9278 *
9279 *	If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
9280 *	require the "Inferred Queue ID" ability may be used.  E.g. the
9281 *	Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
9282 *	then these "Inferred Queue ID" register may not be used.
9283 */
9284int t4_bar2_sge_qregs(struct adapter *adapter,
9285		      unsigned int qid,
9286		      enum t4_bar2_qtype qtype,
9287		      int user,
9288		      u64 *pbar2_qoffset,
9289		      unsigned int *pbar2_qid)
9290{
9291	unsigned int page_shift, page_size, qpp_shift, qpp_mask;
9292	u64 bar2_page_offset, bar2_qoffset;
9293	unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
9294
9295	/* T4 doesn't support BAR2 SGE Queue registers for kernel mode queues */
9296	if (!user && is_t4(adapter->params.chip))
9297		return -EINVAL;
9298
9299	/* Get our SGE Page Size parameters.
9300	 */
9301	page_shift = adapter->params.sge.hps + 10;
9302	page_size = 1 << page_shift;
9303
9304	/* Get the right Queues per Page parameters for our Queue.
9305	 */
9306	qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
9307		     ? adapter->params.sge.eq_qpp
9308		     : adapter->params.sge.iq_qpp);
9309	qpp_mask = (1 << qpp_shift) - 1;
9310
9311	/*  Calculate the basics of the BAR2 SGE Queue register area:
9312	 *  o The BAR2 page the Queue registers will be in.
9313	 *  o The BAR2 Queue ID.
9314	 *  o The BAR2 Queue ID Offset into the BAR2 page.
9315	 */
9316	bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
9317	bar2_qid = qid & qpp_mask;
9318	bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
9319
9320	/* If the BAR2 Queue ID Offset is less than the Page Size, then the
9321	 * hardware will infer the Absolute Queue ID simply from the writes to
9322	 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
9323	 * BAR2 Queue ID of 0 for those writes).  Otherwise, we'll simply
9324	 * write to the first BAR2 SGE Queue Area within the BAR2 Page with
9325	 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
9326	 * from the BAR2 Page and BAR2 Queue ID.
9327	 *
9328	 * One important censequence of this is that some BAR2 SGE registers
9329	 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
9330	 * there.  But other registers synthesize the SGE Queue ID purely
9331	 * from the writes to the registers -- the Write Combined Doorbell
9332	 * Buffer is a good example.  These BAR2 SGE Registers are only
9333	 * available for those BAR2 SGE Register areas where the SGE Absolute
9334	 * Queue ID can be inferred from simple writes.
9335	 */
9336	bar2_qoffset = bar2_page_offset;
9337	bar2_qinferred = (bar2_qid_offset < page_size);
9338	if (bar2_qinferred) {
9339		bar2_qoffset += bar2_qid_offset;
9340		bar2_qid = 0;
9341	}
9342
9343	*pbar2_qoffset = bar2_qoffset;
9344	*pbar2_qid = bar2_qid;
9345	return 0;
9346}
9347
9348/**
9349 *	t4_init_devlog_params - initialize adapter->params.devlog
9350 *	@adap: the adapter
9351 *
9352 *	Initialize various fields of the adapter's Firmware Device Log
9353 *	Parameters structure.
9354 */
9355int t4_init_devlog_params(struct adapter *adap)
9356{
9357	struct devlog_params *dparams = &adap->params.devlog;
9358	u32 pf_dparams;
9359	unsigned int devlog_meminfo;
9360	struct fw_devlog_cmd devlog_cmd;
9361	int ret;
9362
9363	/* If we're dealing with newer firmware, the Device Log Parameters
9364	 * are stored in a designated register which allows us to access the
9365	 * Device Log even if we can't talk to the firmware.
9366	 */
9367	pf_dparams =
9368		t4_read_reg(adap, PCIE_FW_REG(PCIE_FW_PF_A, PCIE_FW_PF_DEVLOG));
9369	if (pf_dparams) {
9370		unsigned int nentries, nentries128;
9371
9372		dparams->memtype = PCIE_FW_PF_DEVLOG_MEMTYPE_G(pf_dparams);
9373		dparams->start = PCIE_FW_PF_DEVLOG_ADDR16_G(pf_dparams) << 4;
9374
9375		nentries128 = PCIE_FW_PF_DEVLOG_NENTRIES128_G(pf_dparams);
9376		nentries = (nentries128 + 1) * 128;
9377		dparams->size = nentries * sizeof(struct fw_devlog_e);
9378
9379		return 0;
9380	}
9381
9382	/* Otherwise, ask the firmware for it's Device Log Parameters.
9383	 */
9384	memset(&devlog_cmd, 0, sizeof(devlog_cmd));
9385	devlog_cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_DEVLOG_CMD) |
9386					     FW_CMD_REQUEST_F | FW_CMD_READ_F);
9387	devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
9388	ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
9389			 &devlog_cmd);
9390	if (ret)
9391		return ret;
9392
9393	devlog_meminfo =
9394		be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
9395	dparams->memtype = FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
9396	dparams->start = FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
9397	dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
9398
9399	return 0;
9400}
9401
9402/**
9403 *	t4_init_sge_params - initialize adap->params.sge
9404 *	@adapter: the adapter
9405 *
9406 *	Initialize various fields of the adapter's SGE Parameters structure.
9407 */
9408int t4_init_sge_params(struct adapter *adapter)
9409{
9410	struct sge_params *sge_params = &adapter->params.sge;
9411	u32 hps, qpp;
9412	unsigned int s_hps, s_qpp;
9413
9414	/* Extract the SGE Page Size for our PF.
9415	 */
9416	hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE_A);
9417	s_hps = (HOSTPAGESIZEPF0_S +
9418		 (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->pf);
9419	sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M);
9420
9421	/* Extract the SGE Egress and Ingess Queues Per Page for our PF.
9422	 */
9423	s_qpp = (QUEUESPERPAGEPF0_S +
9424		(QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->pf);
9425	qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF_A);
9426	sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
9427	qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF_A);
9428	sge_params->iq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
9429
9430	return 0;
9431}
9432
9433/**
9434 *      t4_init_tp_params - initialize adap->params.tp
9435 *      @adap: the adapter
9436 *      @sleep_ok: if true we may sleep while awaiting command completion
9437 *
9438 *      Initialize various fields of the adapter's TP Parameters structure.
9439 */
9440int t4_init_tp_params(struct adapter *adap, bool sleep_ok)
9441{
9442	u32 param, val, v;
9443	int chan, ret;
9444
9445
9446	v = t4_read_reg(adap, TP_TIMER_RESOLUTION_A);
9447	adap->params.tp.tre = TIMERRESOLUTION_G(v);
9448	adap->params.tp.dack_re = DELAYEDACKRESOLUTION_G(v);
9449
9450	/* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
9451	for (chan = 0; chan < NCHAN; chan++)
9452		adap->params.tp.tx_modq[chan] = chan;
9453
9454	/* Cache the adapter's Compressed Filter Mode/Mask and global Ingress
9455	 * Configuration.
9456	 */
9457	param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
9458		 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FILTER) |
9459		 FW_PARAMS_PARAM_Y_V(FW_PARAM_DEV_FILTER_MODE_MASK));
9460
9461	/* Read current value */
9462	ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
9463			      &param, &val);
9464	if (ret == 0) {
9465		dev_info(adap->pdev_dev,
9466			 "Current filter mode/mask 0x%x:0x%x\n",
9467			 FW_PARAMS_PARAM_FILTER_MODE_G(val),
9468			 FW_PARAMS_PARAM_FILTER_MASK_G(val));
9469		adap->params.tp.vlan_pri_map =
9470			FW_PARAMS_PARAM_FILTER_MODE_G(val);
9471		adap->params.tp.filter_mask =
9472			FW_PARAMS_PARAM_FILTER_MASK_G(val);
9473	} else {
9474		dev_info(adap->pdev_dev,
9475			 "Failed to read filter mode/mask via fw api, using indirect-reg-read\n");
9476
9477		/* Incase of older-fw (which doesn't expose the api
9478		 * FW_PARAM_DEV_FILTER_MODE_MASK) and newer-driver (which uses
9479		 * the fw api) combination, fall-back to older method of reading
9480		 * the filter mode from indirect-register
9481		 */
9482		t4_tp_pio_read(adap, &adap->params.tp.vlan_pri_map, 1,
9483			       TP_VLAN_PRI_MAP_A, sleep_ok);
9484
9485		/* With the older-fw and newer-driver combination we might run
9486		 * into an issue when user wants to use hash filter region but
9487		 * the filter_mask is zero, in this case filter_mask validation
9488		 * is tough. To avoid that we set the filter_mask same as filter
9489		 * mode, which will behave exactly as the older way of ignoring
9490		 * the filter mask validation.
9491		 */
9492		adap->params.tp.filter_mask = adap->params.tp.vlan_pri_map;
9493	}
9494
9495	t4_tp_pio_read(adap, &adap->params.tp.ingress_config, 1,
9496		       TP_INGRESS_CONFIG_A, sleep_ok);
9497
9498	/* For T6, cache the adapter's compressed error vector
9499	 * and passing outer header info for encapsulated packets.
9500	 */
9501	if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
9502		v = t4_read_reg(adap, TP_OUT_CONFIG_A);
9503		adap->params.tp.rx_pkt_encap = (v & CRXPKTENC_F) ? 1 : 0;
9504	}
9505
9506	/* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
9507	 * shift positions of several elements of the Compressed Filter Tuple
9508	 * for this adapter which we need frequently ...
9509	 */
9510	adap->params.tp.fcoe_shift = t4_filter_field_shift(adap, FCOE_F);
9511	adap->params.tp.port_shift = t4_filter_field_shift(adap, PORT_F);
9512	adap->params.tp.vnic_shift = t4_filter_field_shift(adap, VNIC_ID_F);
9513	adap->params.tp.vlan_shift = t4_filter_field_shift(adap, VLAN_F);
9514	adap->params.tp.tos_shift = t4_filter_field_shift(adap, TOS_F);
9515	adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
9516							       PROTOCOL_F);
9517	adap->params.tp.ethertype_shift = t4_filter_field_shift(adap,
9518								ETHERTYPE_F);
9519	adap->params.tp.macmatch_shift = t4_filter_field_shift(adap,
9520							       MACMATCH_F);
9521	adap->params.tp.matchtype_shift = t4_filter_field_shift(adap,
9522								MPSHITTYPE_F);
9523	adap->params.tp.frag_shift = t4_filter_field_shift(adap,
9524							   FRAGMENTATION_F);
9525
9526	/* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
9527	 * represents the presence of an Outer VLAN instead of a VNIC ID.
9528	 */
9529	if ((adap->params.tp.ingress_config & VNIC_F) == 0)
9530		adap->params.tp.vnic_shift = -1;
9531
9532	v = t4_read_reg(adap, LE_3_DB_HASH_MASK_GEN_IPV4_T6_A);
9533	adap->params.tp.hash_filter_mask = v;
9534	v = t4_read_reg(adap, LE_4_DB_HASH_MASK_GEN_IPV4_T6_A);
9535	adap->params.tp.hash_filter_mask |= ((u64)v << 32);
9536	return 0;
9537}
9538
9539/**
9540 *      t4_filter_field_shift - calculate filter field shift
9541 *      @adap: the adapter
9542 *      @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
9543 *
9544 *      Return the shift position of a filter field within the Compressed
9545 *      Filter Tuple.  The filter field is specified via its selection bit
9546 *      within TP_VLAN_PRI_MAL (filter mode).  E.g. F_VLAN.
9547 */
9548int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
9549{
9550	unsigned int filter_mode = adap->params.tp.vlan_pri_map;
9551	unsigned int sel;
9552	int field_shift;
9553
9554	if ((filter_mode & filter_sel) == 0)
9555		return -1;
9556
9557	for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
9558		switch (filter_mode & sel) {
9559		case FCOE_F:
9560			field_shift += FT_FCOE_W;
9561			break;
9562		case PORT_F:
9563			field_shift += FT_PORT_W;
9564			break;
9565		case VNIC_ID_F:
9566			field_shift += FT_VNIC_ID_W;
9567			break;
9568		case VLAN_F:
9569			field_shift += FT_VLAN_W;
9570			break;
9571		case TOS_F:
9572			field_shift += FT_TOS_W;
9573			break;
9574		case PROTOCOL_F:
9575			field_shift += FT_PROTOCOL_W;
9576			break;
9577		case ETHERTYPE_F:
9578			field_shift += FT_ETHERTYPE_W;
9579			break;
9580		case MACMATCH_F:
9581			field_shift += FT_MACMATCH_W;
9582			break;
9583		case MPSHITTYPE_F:
9584			field_shift += FT_MPSHITTYPE_W;
9585			break;
9586		case FRAGMENTATION_F:
9587			field_shift += FT_FRAGMENTATION_W;
9588			break;
9589		}
9590	}
9591	return field_shift;
9592}
9593
9594int t4_init_rss_mode(struct adapter *adap, int mbox)
9595{
9596	int i, ret;
9597	struct fw_rss_vi_config_cmd rvc;
9598
9599	memset(&rvc, 0, sizeof(rvc));
9600
9601	for_each_port(adap, i) {
9602		struct port_info *p = adap2pinfo(adap, i);
9603
9604		rvc.op_to_viid =
9605			cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
9606				    FW_CMD_REQUEST_F | FW_CMD_READ_F |
9607				    FW_RSS_VI_CONFIG_CMD_VIID_V(p->viid));
9608		rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc));
9609		ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
9610		if (ret)
9611			return ret;
9612		p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
9613	}
9614	return 0;
9615}
9616
9617/**
9618 *	t4_init_portinfo - allocate a virtual interface and initialize port_info
9619 *	@pi: the port_info
9620 *	@mbox: mailbox to use for the FW command
9621 *	@port: physical port associated with the VI
9622 *	@pf: the PF owning the VI
9623 *	@vf: the VF owning the VI
9624 *	@mac: the MAC address of the VI
9625 *
9626 *	Allocates a virtual interface for the given physical port.  If @mac is
9627 *	not %NULL it contains the MAC address of the VI as assigned by FW.
9628 *	@mac should be large enough to hold an Ethernet address.
9629 *	Returns < 0 on error.
9630 */
9631int t4_init_portinfo(struct port_info *pi, int mbox,
9632		     int port, int pf, int vf, u8 mac[])
9633{
9634	struct adapter *adapter = pi->adapter;
9635	unsigned int fw_caps = adapter->params.fw_caps_support;
9636	struct fw_port_cmd cmd;
9637	unsigned int rss_size;
9638	enum fw_port_type port_type;
9639	int mdio_addr;
9640	fw_port_cap32_t pcaps, acaps;
9641	u8 vivld = 0, vin = 0;
9642	int ret;
9643
9644	/* If we haven't yet determined whether we're talking to Firmware
9645	 * which knows the new 32-bit Port Capabilities, it's time to find
9646	 * out now.  This will also tell new Firmware to send us Port Status
9647	 * Updates using the new 32-bit Port Capabilities version of the
9648	 * Port Information message.
9649	 */
9650	if (fw_caps == FW_CAPS_UNKNOWN) {
9651		u32 param, val;
9652
9653		param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
9654			 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_PORT_CAPS32));
9655		val = 1;
9656		ret = t4_set_params(adapter, mbox, pf, vf, 1, &param, &val);
9657		fw_caps = (ret == 0 ? FW_CAPS32 : FW_CAPS16);
9658		adapter->params.fw_caps_support = fw_caps;
9659	}
9660
9661	memset(&cmd, 0, sizeof(cmd));
9662	cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
9663				       FW_CMD_REQUEST_F | FW_CMD_READ_F |
9664				       FW_PORT_CMD_PORTID_V(port));
9665	cmd.action_to_len16 = cpu_to_be32(
9666		FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
9667				     ? FW_PORT_ACTION_GET_PORT_INFO
9668				     : FW_PORT_ACTION_GET_PORT_INFO32) |
9669		FW_LEN16(cmd));
9670	ret = t4_wr_mbox(pi->adapter, mbox, &cmd, sizeof(cmd), &cmd);
9671	if (ret)
9672		return ret;
9673
9674	/* Extract the various fields from the Port Information message.
9675	 */
9676	if (fw_caps == FW_CAPS16) {
9677		u32 lstatus = be32_to_cpu(cmd.u.info.lstatus_to_modtype);
9678
9679		port_type = FW_PORT_CMD_PTYPE_G(lstatus);
9680		mdio_addr = ((lstatus & FW_PORT_CMD_MDIOCAP_F)
9681			     ? FW_PORT_CMD_MDIOADDR_G(lstatus)
9682			     : -1);
9683		pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd.u.info.pcap));
9684		acaps = fwcaps16_to_caps32(be16_to_cpu(cmd.u.info.acap));
9685	} else {
9686		u32 lstatus32 = be32_to_cpu(cmd.u.info32.lstatus32_to_cbllen32);
9687
9688		port_type = FW_PORT_CMD_PORTTYPE32_G(lstatus32);
9689		mdio_addr = ((lstatus32 & FW_PORT_CMD_MDIOCAP32_F)
9690			     ? FW_PORT_CMD_MDIOADDR32_G(lstatus32)
9691			     : -1);
9692		pcaps = be32_to_cpu(cmd.u.info32.pcaps32);
9693		acaps = be32_to_cpu(cmd.u.info32.acaps32);
9694	}
9695
9696	ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, mac, &rss_size,
9697			  &vivld, &vin);
9698	if (ret < 0)
9699		return ret;
9700
9701	pi->viid = ret;
9702	pi->tx_chan = port;
9703	pi->lport = port;
9704	pi->rss_size = rss_size;
9705	pi->rx_cchan = t4_get_tp_e2c_map(pi->adapter, port);
9706
9707	/* If fw supports returning the VIN as part of FW_VI_CMD,
9708	 * save the returned values.
9709	 */
9710	if (adapter->params.viid_smt_extn_support) {
9711		pi->vivld = vivld;
9712		pi->vin = vin;
9713	} else {
9714		/* Retrieve the values from VIID */
9715		pi->vivld = FW_VIID_VIVLD_G(pi->viid);
9716		pi->vin =  FW_VIID_VIN_G(pi->viid);
9717	}
9718
9719	pi->port_type = port_type;
9720	pi->mdio_addr = mdio_addr;
9721	pi->mod_type = FW_PORT_MOD_TYPE_NA;
9722
9723	init_link_config(&pi->link_cfg, pcaps, acaps);
9724	return 0;
9725}
9726
9727int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
9728{
9729	u8 addr[6];
9730	int ret, i, j = 0;
9731
9732	for_each_port(adap, i) {
9733		struct port_info *pi = adap2pinfo(adap, i);
9734
9735		while ((adap->params.portvec & (1 << j)) == 0)
9736			j++;
9737
9738		ret = t4_init_portinfo(pi, mbox, j, pf, vf, addr);
9739		if (ret)
9740			return ret;
9741
9742		memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
9743		j++;
9744	}
9745	return 0;
9746}
9747
9748int t4_init_port_mirror(struct port_info *pi, u8 mbox, u8 port, u8 pf, u8 vf,
9749			u16 *mirror_viid)
9750{
9751	int ret;
9752
9753	ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, NULL, NULL,
9754			  NULL, NULL);
9755	if (ret < 0)
9756		return ret;
9757
9758	if (mirror_viid)
9759		*mirror_viid = ret;
9760
9761	return 0;
9762}
9763
9764/**
9765 *	t4_read_cimq_cfg - read CIM queue configuration
9766 *	@adap: the adapter
9767 *	@base: holds the queue base addresses in bytes
9768 *	@size: holds the queue sizes in bytes
9769 *	@thres: holds the queue full thresholds in bytes
9770 *
9771 *	Returns the current configuration of the CIM queues, starting with
9772 *	the IBQs, then the OBQs.
9773 */
9774void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
9775{
9776	unsigned int i, v;
9777	int cim_num_obq = is_t4(adap->params.chip) ?
9778				CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
9779
9780	for (i = 0; i < CIM_NUM_IBQ; i++) {
9781		t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, IBQSELECT_F |
9782			     QUENUMSELECT_V(i));
9783		v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
9784		/* value is in 256-byte units */
9785		*base++ = CIMQBASE_G(v) * 256;
9786		*size++ = CIMQSIZE_G(v) * 256;
9787		*thres++ = QUEFULLTHRSH_G(v) * 8; /* 8-byte unit */
9788	}
9789	for (i = 0; i < cim_num_obq; i++) {
9790		t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
9791			     QUENUMSELECT_V(i));
9792		v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
9793		/* value is in 256-byte units */
9794		*base++ = CIMQBASE_G(v) * 256;
9795		*size++ = CIMQSIZE_G(v) * 256;
9796	}
9797}
9798
9799/**
9800 *	t4_read_cim_ibq - read the contents of a CIM inbound queue
9801 *	@adap: the adapter
9802 *	@qid: the queue index
9803 *	@data: where to store the queue contents
9804 *	@n: capacity of @data in 32-bit words
9805 *
9806 *	Reads the contents of the selected CIM queue starting at address 0 up
9807 *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
9808 *	error and the number of 32-bit words actually read on success.
9809 */
9810int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
9811{
9812	int i, err, attempts;
9813	unsigned int addr;
9814	const unsigned int nwords = CIM_IBQ_SIZE * 4;
9815
9816	if (qid > 5 || (n & 3))
9817		return -EINVAL;
9818
9819	addr = qid * nwords;
9820	if (n > nwords)
9821		n = nwords;
9822
9823	/* It might take 3-10ms before the IBQ debug read access is allowed.
9824	 * Wait for 1 Sec with a delay of 1 usec.
9825	 */
9826	attempts = 1000000;
9827
9828	for (i = 0; i < n; i++, addr++) {
9829		t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, IBQDBGADDR_V(addr) |
9830			     IBQDBGEN_F);
9831		err = t4_wait_op_done(adap, CIM_IBQ_DBG_CFG_A, IBQDBGBUSY_F, 0,
9832				      attempts, 1);
9833		if (err)
9834			return err;
9835		*data++ = t4_read_reg(adap, CIM_IBQ_DBG_DATA_A);
9836	}
9837	t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, 0);
9838	return i;
9839}
9840
9841/**
9842 *	t4_read_cim_obq - read the contents of a CIM outbound queue
9843 *	@adap: the adapter
9844 *	@qid: the queue index
9845 *	@data: where to store the queue contents
9846 *	@n: capacity of @data in 32-bit words
9847 *
9848 *	Reads the contents of the selected CIM queue starting at address 0 up
9849 *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
9850 *	error and the number of 32-bit words actually read on success.
9851 */
9852int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
9853{
9854	int i, err;
9855	unsigned int addr, v, nwords;
9856	int cim_num_obq = is_t4(adap->params.chip) ?
9857				CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
9858
9859	if ((qid > (cim_num_obq - 1)) || (n & 3))
9860		return -EINVAL;
9861
9862	t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
9863		     QUENUMSELECT_V(qid));
9864	v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
9865
9866	addr = CIMQBASE_G(v) * 64;    /* muliple of 256 -> muliple of 4 */
9867	nwords = CIMQSIZE_G(v) * 64;  /* same */
9868	if (n > nwords)
9869		n = nwords;
9870
9871	for (i = 0; i < n; i++, addr++) {
9872		t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, OBQDBGADDR_V(addr) |
9873			     OBQDBGEN_F);
9874		err = t4_wait_op_done(adap, CIM_OBQ_DBG_CFG_A, OBQDBGBUSY_F, 0,
9875				      2, 1);
9876		if (err)
9877			return err;
9878		*data++ = t4_read_reg(adap, CIM_OBQ_DBG_DATA_A);
9879	}
9880	t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, 0);
9881	return i;
9882}
9883
9884/**
9885 *	t4_cim_read - read a block from CIM internal address space
9886 *	@adap: the adapter
9887 *	@addr: the start address within the CIM address space
9888 *	@n: number of words to read
9889 *	@valp: where to store the result
9890 *
9891 *	Reads a block of 4-byte words from the CIM intenal address space.
9892 */
9893int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
9894		unsigned int *valp)
9895{
9896	int ret = 0;
9897
9898	if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
9899		return -EBUSY;
9900
9901	for ( ; !ret && n--; addr += 4) {
9902		t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr);
9903		ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
9904				      0, 5, 2);
9905		if (!ret)
9906			*valp++ = t4_read_reg(adap, CIM_HOST_ACC_DATA_A);
9907	}
9908	return ret;
9909}
9910
9911/**
9912 *	t4_cim_write - write a block into CIM internal address space
9913 *	@adap: the adapter
9914 *	@addr: the start address within the CIM address space
9915 *	@n: number of words to write
9916 *	@valp: set of values to write
9917 *
9918 *	Writes a block of 4-byte words into the CIM intenal address space.
9919 */
9920int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
9921		 const unsigned int *valp)
9922{
9923	int ret = 0;
9924
9925	if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
9926		return -EBUSY;
9927
9928	for ( ; !ret && n--; addr += 4) {
9929		t4_write_reg(adap, CIM_HOST_ACC_DATA_A, *valp++);
9930		t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr | HOSTWRITE_F);
9931		ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
9932				      0, 5, 2);
9933	}
9934	return ret;
9935}
9936
9937static int t4_cim_write1(struct adapter *adap, unsigned int addr,
9938			 unsigned int val)
9939{
9940	return t4_cim_write(adap, addr, 1, &val);
9941}
9942
9943/**
9944 *	t4_cim_read_la - read CIM LA capture buffer
9945 *	@adap: the adapter
9946 *	@la_buf: where to store the LA data
9947 *	@wrptr: the HW write pointer within the capture buffer
9948 *
9949 *	Reads the contents of the CIM LA buffer with the most recent entry at
9950 *	the end	of the returned data and with the entry at @wrptr first.
9951 *	We try to leave the LA in the running state we find it in.
9952 */
9953int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
9954{
9955	int i, ret;
9956	unsigned int cfg, val, idx;
9957
9958	ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
9959	if (ret)
9960		return ret;
9961
9962	if (cfg & UPDBGLAEN_F) {	/* LA is running, freeze it */
9963		ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A, 0);
9964		if (ret)
9965			return ret;
9966	}
9967
9968	ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
9969	if (ret)
9970		goto restart;
9971
9972	idx = UPDBGLAWRPTR_G(val);
9973	if (wrptr)
9974		*wrptr = idx;
9975
9976	for (i = 0; i < adap->params.cim_la_size; i++) {
9977		ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
9978				    UPDBGLARDPTR_V(idx) | UPDBGLARDEN_F);
9979		if (ret)
9980			break;
9981		ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
9982		if (ret)
9983			break;
9984		if (val & UPDBGLARDEN_F) {
9985			ret = -ETIMEDOUT;
9986			break;
9987		}
9988		ret = t4_cim_read(adap, UP_UP_DBG_LA_DATA_A, 1, &la_buf[i]);
9989		if (ret)
9990			break;
9991
9992		/* Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
9993		 * identify the 32-bit portion of the full 312-bit data
9994		 */
9995		if (is_t6(adap->params.chip) && (idx & 0xf) >= 9)
9996			idx = (idx & 0xff0) + 0x10;
9997		else
9998			idx++;
9999		/* address can't exceed 0xfff */
10000		idx &= UPDBGLARDPTR_M;
10001	}
10002restart:
10003	if (cfg & UPDBGLAEN_F) {
10004		int r = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
10005				      cfg & ~UPDBGLARDEN_F);
10006		if (!ret)
10007			ret = r;
10008	}
10009	return ret;
10010}
10011
10012/**
10013 *	t4_tp_read_la - read TP LA capture buffer
10014 *	@adap: the adapter
10015 *	@la_buf: where to store the LA data
10016 *	@wrptr: the HW write pointer within the capture buffer
10017 *
10018 *	Reads the contents of the TP LA buffer with the most recent entry at
10019 *	the end	of the returned data and with the entry at @wrptr first.
10020 *	We leave the LA in the running state we find it in.
10021 */
10022void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
10023{
10024	bool last_incomplete;
10025	unsigned int i, cfg, val, idx;
10026
10027	cfg = t4_read_reg(adap, TP_DBG_LA_CONFIG_A) & 0xffff;
10028	if (cfg & DBGLAENABLE_F)			/* freeze LA */
10029		t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
10030			     adap->params.tp.la_mask | (cfg ^ DBGLAENABLE_F));
10031
10032	val = t4_read_reg(adap, TP_DBG_LA_CONFIG_A);
10033	idx = DBGLAWPTR_G(val);
10034	last_incomplete = DBGLAMODE_G(val) >= 2 && (val & DBGLAWHLF_F) == 0;
10035	if (last_incomplete)
10036		idx = (idx + 1) & DBGLARPTR_M;
10037	if (wrptr)
10038		*wrptr = idx;
10039
10040	val &= 0xffff;
10041	val &= ~DBGLARPTR_V(DBGLARPTR_M);
10042	val |= adap->params.tp.la_mask;
10043
10044	for (i = 0; i < TPLA_SIZE; i++) {
10045		t4_write_reg(adap, TP_DBG_LA_CONFIG_A, DBGLARPTR_V(idx) | val);
10046		la_buf[i] = t4_read_reg64(adap, TP_DBG_LA_DATAL_A);
10047		idx = (idx + 1) & DBGLARPTR_M;
10048	}
10049
10050	/* Wipe out last entry if it isn't valid */
10051	if (last_incomplete)
10052		la_buf[TPLA_SIZE - 1] = ~0ULL;
10053
10054	if (cfg & DBGLAENABLE_F)                    /* restore running state */
10055		t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
10056			     cfg | adap->params.tp.la_mask);
10057}
10058
10059/* SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
10060 * seconds).  If we find one of the SGE Ingress DMA State Machines in the same
10061 * state for more than the Warning Threshold then we'll issue a warning about
10062 * a potential hang.  We'll repeat the warning as the SGE Ingress DMA Channel
10063 * appears to be hung every Warning Repeat second till the situation clears.
10064 * If the situation clears, we'll note that as well.
10065 */
10066#define SGE_IDMA_WARN_THRESH 1
10067#define SGE_IDMA_WARN_REPEAT 300
10068
10069/**
10070 *	t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
10071 *	@adapter: the adapter
10072 *	@idma: the adapter IDMA Monitor state
10073 *
10074 *	Initialize the state of an SGE Ingress DMA Monitor.
10075 */
10076void t4_idma_monitor_init(struct adapter *adapter,
10077			  struct sge_idma_monitor_state *idma)
10078{
10079	/* Initialize the state variables for detecting an SGE Ingress DMA
10080	 * hang.  The SGE has internal counters which count up on each clock
10081	 * tick whenever the SGE finds its Ingress DMA State Engines in the
10082	 * same state they were on the previous clock tick.  The clock used is
10083	 * the Core Clock so we have a limit on the maximum "time" they can
10084	 * record; typically a very small number of seconds.  For instance,
10085	 * with a 600MHz Core Clock, we can only count up to a bit more than
10086	 * 7s.  So we'll synthesize a larger counter in order to not run the
10087	 * risk of having the "timers" overflow and give us the flexibility to
10088	 * maintain a Hung SGE State Machine of our own which operates across
10089	 * a longer time frame.
10090	 */
10091	idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
10092	idma->idma_stalled[0] = 0;
10093	idma->idma_stalled[1] = 0;
10094}
10095
10096/**
10097 *	t4_idma_monitor - monitor SGE Ingress DMA state
10098 *	@adapter: the adapter
10099 *	@idma: the adapter IDMA Monitor state
10100 *	@hz: number of ticks/second
10101 *	@ticks: number of ticks since the last IDMA Monitor call
10102 */
10103void t4_idma_monitor(struct adapter *adapter,
10104		     struct sge_idma_monitor_state *idma,
10105		     int hz, int ticks)
10106{
10107	int i, idma_same_state_cnt[2];
10108
10109	 /* Read the SGE Debug Ingress DMA Same State Count registers.  These
10110	  * are counters inside the SGE which count up on each clock when the
10111	  * SGE finds its Ingress DMA State Engines in the same states they
10112	  * were in the previous clock.  The counters will peg out at
10113	  * 0xffffffff without wrapping around so once they pass the 1s
10114	  * threshold they'll stay above that till the IDMA state changes.
10115	  */
10116	t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 13);
10117	idma_same_state_cnt[0] = t4_read_reg(adapter, SGE_DEBUG_DATA_HIGH_A);
10118	idma_same_state_cnt[1] = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
10119
10120	for (i = 0; i < 2; i++) {
10121		u32 debug0, debug11;
10122
10123		/* If the Ingress DMA Same State Counter ("timer") is less
10124		 * than 1s, then we can reset our synthesized Stall Timer and
10125		 * continue.  If we have previously emitted warnings about a
10126		 * potential stalled Ingress Queue, issue a note indicating
10127		 * that the Ingress Queue has resumed forward progress.
10128		 */
10129		if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
10130			if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH * hz)
10131				dev_warn(adapter->pdev_dev, "SGE idma%d, queue %u, "
10132					 "resumed after %d seconds\n",
10133					 i, idma->idma_qid[i],
10134					 idma->idma_stalled[i] / hz);
10135			idma->idma_stalled[i] = 0;
10136			continue;
10137		}
10138
10139		/* Synthesize an SGE Ingress DMA Same State Timer in the Hz
10140		 * domain.  The first time we get here it'll be because we
10141		 * passed the 1s Threshold; each additional time it'll be
10142		 * because the RX Timer Callback is being fired on its regular
10143		 * schedule.
10144		 *
10145		 * If the stall is below our Potential Hung Ingress Queue
10146		 * Warning Threshold, continue.
10147		 */
10148		if (idma->idma_stalled[i] == 0) {
10149			idma->idma_stalled[i] = hz;
10150			idma->idma_warn[i] = 0;
10151		} else {
10152			idma->idma_stalled[i] += ticks;
10153			idma->idma_warn[i] -= ticks;
10154		}
10155
10156		if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH * hz)
10157			continue;
10158
10159		/* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
10160		 */
10161		if (idma->idma_warn[i] > 0)
10162			continue;
10163		idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT * hz;
10164
10165		/* Read and save the SGE IDMA State and Queue ID information.
10166		 * We do this every time in case it changes across time ...
10167		 * can't be too careful ...
10168		 */
10169		t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 0);
10170		debug0 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
10171		idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
10172
10173		t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 11);
10174		debug11 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
10175		idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
10176
10177		dev_warn(adapter->pdev_dev, "SGE idma%u, queue %u, potentially stuck in "
10178			 "state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
10179			 i, idma->idma_qid[i], idma->idma_state[i],
10180			 idma->idma_stalled[i] / hz,
10181			 debug0, debug11);
10182		t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
10183	}
10184}
10185
10186/**
10187 *	t4_load_cfg - download config file
10188 *	@adap: the adapter
10189 *	@cfg_data: the cfg text file to write
10190 *	@size: text file size
10191 *
10192 *	Write the supplied config text file to the card's serial flash.
10193 */
10194int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
10195{
10196	int ret, i, n, cfg_addr;
10197	unsigned int addr;
10198	unsigned int flash_cfg_start_sec;
10199	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
10200
10201	cfg_addr = t4_flash_cfg_addr(adap);
10202	if (cfg_addr < 0)
10203		return cfg_addr;
10204
10205	addr = cfg_addr;
10206	flash_cfg_start_sec = addr / SF_SEC_SIZE;
10207
10208	if (size > FLASH_CFG_MAX_SIZE) {
10209		dev_err(adap->pdev_dev, "cfg file too large, max is %u bytes\n",
10210			FLASH_CFG_MAX_SIZE);
10211		return -EFBIG;
10212	}
10213
10214	i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE,	/* # of sectors spanned */
10215			 sf_sec_size);
10216	ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
10217				     flash_cfg_start_sec + i - 1);
10218	/* If size == 0 then we're simply erasing the FLASH sectors associated
10219	 * with the on-adapter Firmware Configuration File.
10220	 */
10221	if (ret || size == 0)
10222		goto out;
10223
10224	/* this will write to the flash up to SF_PAGE_SIZE at a time */
10225	for (i = 0; i < size; i += SF_PAGE_SIZE) {
10226		if ((size - i) <  SF_PAGE_SIZE)
10227			n = size - i;
10228		else
10229			n = SF_PAGE_SIZE;
10230		ret = t4_write_flash(adap, addr, n, cfg_data, true);
10231		if (ret)
10232			goto out;
10233
10234		addr += SF_PAGE_SIZE;
10235		cfg_data += SF_PAGE_SIZE;
10236	}
10237
10238out:
10239	if (ret)
10240		dev_err(adap->pdev_dev, "config file %s failed %d\n",
10241			(size == 0 ? "clear" : "download"), ret);
10242	return ret;
10243}
10244
10245/**
10246 *	t4_set_vf_mac - Set MAC address for the specified VF
10247 *	@adapter: The adapter
10248 *	@vf: one of the VFs instantiated by the specified PF
10249 *	@naddr: the number of MAC addresses
10250 *	@addr: the MAC address(es) to be set to the specified VF
10251 */
10252int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
10253		      unsigned int naddr, u8 *addr)
10254{
10255	struct fw_acl_mac_cmd cmd;
10256
10257	memset(&cmd, 0, sizeof(cmd));
10258	cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_ACL_MAC_CMD) |
10259				    FW_CMD_REQUEST_F |
10260				    FW_CMD_WRITE_F |
10261				    FW_ACL_MAC_CMD_PFN_V(adapter->pf) |
10262				    FW_ACL_MAC_CMD_VFN_V(vf));
10263
10264	/* Note: Do not enable the ACL */
10265	cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd));
10266	cmd.nmac = naddr;
10267
10268	switch (adapter->pf) {
10269	case 3:
10270		memcpy(cmd.macaddr3, addr, sizeof(cmd.macaddr3));
10271		break;
10272	case 2:
10273		memcpy(cmd.macaddr2, addr, sizeof(cmd.macaddr2));
10274		break;
10275	case 1:
10276		memcpy(cmd.macaddr1, addr, sizeof(cmd.macaddr1));
10277		break;
10278	case 0:
10279		memcpy(cmd.macaddr0, addr, sizeof(cmd.macaddr0));
10280		break;
10281	}
10282
10283	return t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &cmd);
10284}
10285
10286/**
10287 * t4_read_pace_tbl - read the pace table
10288 * @adap: the adapter
10289 * @pace_vals: holds the returned values
10290 *
10291 * Returns the values of TP's pace table in microseconds.
10292 */
10293void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
10294{
10295	unsigned int i, v;
10296
10297	for (i = 0; i < NTX_SCHED; i++) {
10298		t4_write_reg(adap, TP_PACE_TABLE_A, 0xffff0000 + i);
10299		v = t4_read_reg(adap, TP_PACE_TABLE_A);
10300		pace_vals[i] = dack_ticks_to_usec(adap, v);
10301	}
10302}
10303
10304/**
10305 * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
10306 * @adap: the adapter
10307 * @sched: the scheduler index
10308 * @kbps: the byte rate in Kbps
10309 * @ipg: the interpacket delay in tenths of nanoseconds
10310 * @sleep_ok: if true we may sleep while awaiting command completion
10311 *
10312 * Return the current configuration of a HW Tx scheduler.
10313 */
10314void t4_get_tx_sched(struct adapter *adap, unsigned int sched,
10315		     unsigned int *kbps, unsigned int *ipg, bool sleep_ok)
10316{
10317	unsigned int v, addr, bpt, cpt;
10318
10319	if (kbps) {
10320		addr = TP_TX_MOD_Q1_Q0_RATE_LIMIT_A - sched / 2;
10321		t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
10322		if (sched & 1)
10323			v >>= 16;
10324		bpt = (v >> 8) & 0xff;
10325		cpt = v & 0xff;
10326		if (!cpt) {
10327			*kbps = 0;	/* scheduler disabled */
10328		} else {
10329			v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
10330			*kbps = (v * bpt) / 125;
10331		}
10332	}
10333	if (ipg) {
10334		addr = TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR_A - sched / 2;
10335		t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
10336		if (sched & 1)
10337			v >>= 16;
10338		v &= 0xffff;
10339		*ipg = (10000 * v) / core_ticks_per_usec(adap);
10340	}
10341}
10342
10343/* t4_sge_ctxt_rd - read an SGE context through FW
10344 * @adap: the adapter
10345 * @mbox: mailbox to use for the FW command
10346 * @cid: the context id
10347 * @ctype: the context type
10348 * @data: where to store the context data
10349 *
10350 * Issues a FW command through the given mailbox to read an SGE context.
10351 */
10352int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
10353		   enum ctxt_type ctype, u32 *data)
10354{
10355	struct fw_ldst_cmd c;
10356	int ret;
10357
10358	if (ctype == CTXT_FLM)
10359		ret = FW_LDST_ADDRSPC_SGE_FLMC;
10360	else
10361		ret = FW_LDST_ADDRSPC_SGE_CONMC;
10362
10363	memset(&c, 0, sizeof(c));
10364	c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
10365					FW_CMD_REQUEST_F | FW_CMD_READ_F |
10366					FW_LDST_CMD_ADDRSPACE_V(ret));
10367	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
10368	c.u.idctxt.physid = cpu_to_be32(cid);
10369
10370	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
10371	if (ret == 0) {
10372		data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0);
10373		data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1);
10374		data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2);
10375		data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3);
10376		data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4);
10377		data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5);
10378	}
10379	return ret;
10380}
10381
10382/**
10383 * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
10384 * @adap: the adapter
10385 * @cid: the context id
10386 * @ctype: the context type
10387 * @data: where to store the context data
10388 *
10389 * Reads an SGE context directly, bypassing FW.  This is only for
10390 * debugging when FW is unavailable.
10391 */
10392int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid,
10393		      enum ctxt_type ctype, u32 *data)
10394{
10395	int i, ret;
10396
10397	t4_write_reg(adap, SGE_CTXT_CMD_A, CTXTQID_V(cid) | CTXTTYPE_V(ctype));
10398	ret = t4_wait_op_done(adap, SGE_CTXT_CMD_A, BUSY_F, 0, 3, 1);
10399	if (!ret)
10400		for (i = SGE_CTXT_DATA0_A; i <= SGE_CTXT_DATA5_A; i += 4)
10401			*data++ = t4_read_reg(adap, i);
10402	return ret;
10403}
10404
10405int t4_sched_params(struct adapter *adapter, u8 type, u8 level, u8 mode,
10406		    u8 rateunit, u8 ratemode, u8 channel, u8 class,
10407		    u32 minrate, u32 maxrate, u16 weight, u16 pktsize,
10408		    u16 burstsize)
10409{
10410	struct fw_sched_cmd cmd;
10411
10412	memset(&cmd, 0, sizeof(cmd));
10413	cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_SCHED_CMD) |
10414				      FW_CMD_REQUEST_F |
10415				      FW_CMD_WRITE_F);
10416	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
10417
10418	cmd.u.params.sc = FW_SCHED_SC_PARAMS;
10419	cmd.u.params.type = type;
10420	cmd.u.params.level = level;
10421	cmd.u.params.mode = mode;
10422	cmd.u.params.ch = channel;
10423	cmd.u.params.cl = class;
10424	cmd.u.params.unit = rateunit;
10425	cmd.u.params.rate = ratemode;
10426	cmd.u.params.min = cpu_to_be32(minrate);
10427	cmd.u.params.max = cpu_to_be32(maxrate);
10428	cmd.u.params.weight = cpu_to_be16(weight);
10429	cmd.u.params.pktsize = cpu_to_be16(pktsize);
10430	cmd.u.params.burstsize = cpu_to_be16(burstsize);
10431
10432	return t4_wr_mbox_meat(adapter, adapter->mbox, &cmd, sizeof(cmd),
10433			       NULL, 1);
10434}
10435
10436/**
10437 *	t4_i2c_rd - read I2C data from adapter
10438 *	@adap: the adapter
10439 *	@mbox: mailbox to use for the FW command
10440 *	@port: Port number if per-port device; <0 if not
10441 *	@devid: per-port device ID or absolute device ID
10442 *	@offset: byte offset into device I2C space
10443 *	@len: byte length of I2C space data
10444 *	@buf: buffer in which to return I2C data
10445 *
10446 *	Reads the I2C data from the indicated device and location.
10447 */
10448int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port,
10449	      unsigned int devid, unsigned int offset,
10450	      unsigned int len, u8 *buf)
10451{
10452	struct fw_ldst_cmd ldst_cmd, ldst_rpl;
10453	unsigned int i2c_max = sizeof(ldst_cmd.u.i2c.data);
10454	int ret = 0;
10455
10456	if (len > I2C_PAGE_SIZE)
10457		return -EINVAL;
10458
10459	/* Dont allow reads that spans multiple pages */
10460	if (offset < I2C_PAGE_SIZE && offset + len > I2C_PAGE_SIZE)
10461		return -EINVAL;
10462
10463	memset(&ldst_cmd, 0, sizeof(ldst_cmd));
10464	ldst_cmd.op_to_addrspace =
10465		cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
10466			    FW_CMD_REQUEST_F |
10467			    FW_CMD_READ_F |
10468			    FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_I2C));
10469	ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
10470	ldst_cmd.u.i2c.pid = (port < 0 ? 0xff : port);
10471	ldst_cmd.u.i2c.did = devid;
10472
10473	while (len > 0) {
10474		unsigned int i2c_len = (len < i2c_max) ? len : i2c_max;
10475
10476		ldst_cmd.u.i2c.boffset = offset;
10477		ldst_cmd.u.i2c.blen = i2c_len;
10478
10479		ret = t4_wr_mbox(adap, mbox, &ldst_cmd, sizeof(ldst_cmd),
10480				 &ldst_rpl);
10481		if (ret)
10482			break;
10483
10484		memcpy(buf, ldst_rpl.u.i2c.data, i2c_len);
10485		offset += i2c_len;
10486		buf += i2c_len;
10487		len -= i2c_len;
10488	}
10489
10490	return ret;
10491}
10492
10493/**
10494 *      t4_set_vlan_acl - Set a VLAN id for the specified VF
10495 *      @adap: the adapter
10496 *      @mbox: mailbox to use for the FW command
10497 *      @vf: one of the VFs instantiated by the specified PF
10498 *      @vlan: The vlanid to be set
10499 */
10500int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
10501		    u16 vlan)
10502{
10503	struct fw_acl_vlan_cmd vlan_cmd;
10504	unsigned int enable;
10505
10506	enable = (vlan ? FW_ACL_VLAN_CMD_EN_F : 0);
10507	memset(&vlan_cmd, 0, sizeof(vlan_cmd));
10508	vlan_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_ACL_VLAN_CMD) |
10509					 FW_CMD_REQUEST_F |
10510					 FW_CMD_WRITE_F |
10511					 FW_CMD_EXEC_F |
10512					 FW_ACL_VLAN_CMD_PFN_V(adap->pf) |
10513					 FW_ACL_VLAN_CMD_VFN_V(vf));
10514	vlan_cmd.en_to_len16 = cpu_to_be32(enable | FW_LEN16(vlan_cmd));
10515	/* Drop all packets that donot match vlan id */
10516	vlan_cmd.dropnovlan_fm = (enable
10517				  ? (FW_ACL_VLAN_CMD_DROPNOVLAN_F |
10518				     FW_ACL_VLAN_CMD_FM_F) : 0);
10519	if (enable != 0) {
10520		vlan_cmd.nvlan = 1;
10521		vlan_cmd.vlanid[0] = cpu_to_be16(vlan);
10522	}
10523
10524	return t4_wr_mbox(adap, adap->mbox, &vlan_cmd, sizeof(vlan_cmd), NULL);
10525}
10526
10527/**
10528 *	modify_device_id - Modifies the device ID of the Boot BIOS image
10529 *	@device_id: the device ID to write.
10530 *	@boot_data: the boot image to modify.
10531 *
10532 *	Write the supplied device ID to the boot BIOS image.
10533 */
10534static void modify_device_id(int device_id, u8 *boot_data)
10535{
10536	struct cxgb4_pcir_data *pcir_header;
10537	struct legacy_pci_rom_hdr *header;
10538	u8 *cur_header = boot_data;
10539	u16 pcir_offset;
10540
10541	 /* Loop through all chained images and change the device ID's */
10542	do {
10543		header = (struct legacy_pci_rom_hdr *)cur_header;
10544		pcir_offset = le16_to_cpu(header->pcir_offset);
10545		pcir_header = (struct cxgb4_pcir_data *)(cur_header +
10546			      pcir_offset);
10547
10548		/**
10549		 * Only modify the Device ID if code type is Legacy or HP.
10550		 * 0x00: Okay to modify
10551		 * 0x01: FCODE. Do not modify
10552		 * 0x03: Okay to modify
10553		 * 0x04-0xFF: Do not modify
10554		 */
10555		if (pcir_header->code_type == CXGB4_HDR_CODE1) {
10556			u8 csum = 0;
10557			int i;
10558
10559			/**
10560			 * Modify Device ID to match current adatper
10561			 */
10562			pcir_header->device_id = cpu_to_le16(device_id);
10563
10564			/**
10565			 * Set checksum temporarily to 0.
10566			 * We will recalculate it later.
10567			 */
10568			header->cksum = 0x0;
10569
10570			/**
10571			 * Calculate and update checksum
10572			 */
10573			for (i = 0; i < (header->size512 * 512); i++)
10574				csum += cur_header[i];
10575
10576			/**
10577			 * Invert summed value to create the checksum
10578			 * Writing new checksum value directly to the boot data
10579			 */
10580			cur_header[7] = -csum;
10581
10582		} else if (pcir_header->code_type == CXGB4_HDR_CODE2) {
10583			/**
10584			 * Modify Device ID to match current adatper
10585			 */
10586			pcir_header->device_id = cpu_to_le16(device_id);
10587		}
10588
10589		/**
10590		 * Move header pointer up to the next image in the ROM.
10591		 */
10592		cur_header += header->size512 * 512;
10593	} while (!(pcir_header->indicator & CXGB4_HDR_INDI));
10594}
10595
10596/**
10597 *	t4_load_boot - download boot flash
10598 *	@adap: the adapter
10599 *	@boot_data: the boot image to write
10600 *	@boot_addr: offset in flash to write boot_data
10601 *	@size: image size
10602 *
10603 *	Write the supplied boot image to the card's serial flash.
10604 *	The boot image has the following sections: a 28-byte header and the
10605 *	boot image.
10606 */
10607int t4_load_boot(struct adapter *adap, u8 *boot_data,
10608		 unsigned int boot_addr, unsigned int size)
10609{
10610	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
10611	unsigned int boot_sector = (boot_addr * 1024);
10612	struct cxgb4_pci_exp_rom_header *header;
10613	struct cxgb4_pcir_data *pcir_header;
10614	int pcir_offset;
10615	unsigned int i;
10616	u16 device_id;
10617	int ret, addr;
10618
10619	/**
10620	 * Make sure the boot image does not encroach on the firmware region
10621	 */
10622	if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
10623		dev_err(adap->pdev_dev, "boot image encroaching on firmware region\n");
10624		return -EFBIG;
10625	}
10626
10627	/* Get boot header */
10628	header = (struct cxgb4_pci_exp_rom_header *)boot_data;
10629	pcir_offset = le16_to_cpu(header->pcir_offset);
10630	/* PCIR Data Structure */
10631	pcir_header = (struct cxgb4_pcir_data *)&boot_data[pcir_offset];
10632
10633	/**
10634	 * Perform some primitive sanity testing to avoid accidentally
10635	 * writing garbage over the boot sectors.  We ought to check for
10636	 * more but it's not worth it for now ...
10637	 */
10638	if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
10639		dev_err(adap->pdev_dev, "boot image too small/large\n");
10640		return -EFBIG;
10641	}
10642
10643	if (le16_to_cpu(header->signature) != BOOT_SIGNATURE) {
10644		dev_err(adap->pdev_dev, "Boot image missing signature\n");
10645		return -EINVAL;
10646	}
10647
10648	/* Check PCI header signature */
10649	if (le32_to_cpu(pcir_header->signature) != PCIR_SIGNATURE) {
10650		dev_err(adap->pdev_dev, "PCI header missing signature\n");
10651		return -EINVAL;
10652	}
10653
10654	/* Check Vendor ID matches Chelsio ID*/
10655	if (le16_to_cpu(pcir_header->vendor_id) != PCI_VENDOR_ID_CHELSIO) {
10656		dev_err(adap->pdev_dev, "Vendor ID missing signature\n");
10657		return -EINVAL;
10658	}
10659
10660	/**
10661	 * The boot sector is comprised of the Expansion-ROM boot, iSCSI boot,
10662	 * and Boot configuration data sections. These 3 boot sections span
10663	 * sectors 0 to 7 in flash and live right before the FW image location.
10664	 */
10665	i = DIV_ROUND_UP(size ? size : FLASH_FW_START,  sf_sec_size);
10666	ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
10667				     (boot_sector >> 16) + i - 1);
10668
10669	/**
10670	 * If size == 0 then we're simply erasing the FLASH sectors associated
10671	 * with the on-adapter option ROM file
10672	 */
10673	if (ret || size == 0)
10674		goto out;
10675	/* Retrieve adapter's device ID */
10676	pci_read_config_word(adap->pdev, PCI_DEVICE_ID, &device_id);
10677       /* Want to deal with PF 0 so I strip off PF 4 indicator */
10678	device_id = device_id & 0xf0ff;
10679
10680	 /* Check PCIE Device ID */
10681	if (le16_to_cpu(pcir_header->device_id) != device_id) {
10682		/**
10683		 * Change the device ID in the Boot BIOS image to match
10684		 * the Device ID of the current adapter.
10685		 */
10686		modify_device_id(device_id, boot_data);
10687	}
10688
10689	/**
10690	 * Skip over the first SF_PAGE_SIZE worth of data and write it after
10691	 * we finish copying the rest of the boot image. This will ensure
10692	 * that the BIOS boot header will only be written if the boot image
10693	 * was written in full.
10694	 */
10695	addr = boot_sector;
10696	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
10697		addr += SF_PAGE_SIZE;
10698		boot_data += SF_PAGE_SIZE;
10699		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data,
10700				     false);
10701		if (ret)
10702			goto out;
10703	}
10704
10705	ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE,
10706			     (const u8 *)header, false);
10707
10708out:
10709	if (ret)
10710		dev_err(adap->pdev_dev, "boot image load failed, error %d\n",
10711			ret);
10712	return ret;
10713}
10714
10715/**
10716 *	t4_flash_bootcfg_addr - return the address of the flash
10717 *	optionrom configuration
10718 *	@adapter: the adapter
10719 *
10720 *	Return the address within the flash where the OptionROM Configuration
10721 *	is stored, or an error if the device FLASH is too small to contain
10722 *	a OptionROM Configuration.
10723 */
10724static int t4_flash_bootcfg_addr(struct adapter *adapter)
10725{
10726	/**
10727	 * If the device FLASH isn't large enough to hold a Firmware
10728	 * Configuration File, return an error.
10729	 */
10730	if (adapter->params.sf_size <
10731	    FLASH_BOOTCFG_START + FLASH_BOOTCFG_MAX_SIZE)
10732		return -ENOSPC;
10733
10734	return FLASH_BOOTCFG_START;
10735}
10736
10737int t4_load_bootcfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
10738{
10739	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
10740	struct cxgb4_bootcfg_data *header;
10741	unsigned int flash_cfg_start_sec;
10742	unsigned int addr, npad;
10743	int ret, i, n, cfg_addr;
10744
10745	cfg_addr = t4_flash_bootcfg_addr(adap);
10746	if (cfg_addr < 0)
10747		return cfg_addr;
10748
10749	addr = cfg_addr;
10750	flash_cfg_start_sec = addr / SF_SEC_SIZE;
10751
10752	if (size > FLASH_BOOTCFG_MAX_SIZE) {
10753		dev_err(adap->pdev_dev, "bootcfg file too large, max is %u bytes\n",
10754			FLASH_BOOTCFG_MAX_SIZE);
10755		return -EFBIG;
10756	}
10757
10758	header = (struct cxgb4_bootcfg_data *)cfg_data;
10759	if (le16_to_cpu(header->signature) != BOOT_CFG_SIG) {
10760		dev_err(adap->pdev_dev, "Wrong bootcfg signature\n");
10761		ret = -EINVAL;
10762		goto out;
10763	}
10764
10765	i = DIV_ROUND_UP(FLASH_BOOTCFG_MAX_SIZE,
10766			 sf_sec_size);
10767	ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
10768				     flash_cfg_start_sec + i - 1);
10769
10770	/**
10771	 * If size == 0 then we're simply erasing the FLASH sectors associated
10772	 * with the on-adapter OptionROM Configuration File.
10773	 */
10774	if (ret || size == 0)
10775		goto out;
10776
10777	/* this will write to the flash up to SF_PAGE_SIZE at a time */
10778	for (i = 0; i < size; i += SF_PAGE_SIZE) {
10779		n = min_t(u32, size - i, SF_PAGE_SIZE);
10780
10781		ret = t4_write_flash(adap, addr, n, cfg_data, false);
10782		if (ret)
10783			goto out;
10784
10785		addr += SF_PAGE_SIZE;
10786		cfg_data += SF_PAGE_SIZE;
10787	}
10788
10789	npad = ((size + 4 - 1) & ~3) - size;
10790	for (i = 0; i < npad; i++) {
10791		u8 data = 0;
10792
10793		ret = t4_write_flash(adap, cfg_addr + size + i, 1, &data,
10794				     false);
10795		if (ret)
10796			goto out;
10797	}
10798
10799out:
10800	if (ret)
10801		dev_err(adap->pdev_dev, "boot config data %s failed %d\n",
10802			(size == 0 ? "clear" : "download"), ret);
10803	return ret;
10804}
10805