1/*
2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include "common.h"
33#include "regs.h"
34#include "sge_defs.h"
35#include "firmware_exports.h"
36
37static void t3_port_intr_clear(struct adapter *adapter, int idx);
38
39/**
40 *	t3_wait_op_done_val - wait until an operation is completed
41 *	@adapter: the adapter performing the operation
42 *	@reg: the register to check for completion
43 *	@mask: a single-bit field within @reg that indicates completion
44 *	@polarity: the value of the field when the operation is completed
45 *	@attempts: number of check iterations
46 *	@delay: delay in usecs between iterations
47 *	@valp: where to store the value of the register at completion time
48 *
49 *	Wait until an operation is completed by checking a bit in a register
50 *	up to @attempts times.  If @valp is not NULL the value of the register
51 *	at the time it indicated completion is stored there.  Returns 0 if the
52 *	operation completes and -EAGAIN otherwise.
53 */
54
55int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
56			int polarity, int attempts, int delay, u32 *valp)
57{
58	while (1) {
59		u32 val = t3_read_reg(adapter, reg);
60
61		if (!!(val & mask) == polarity) {
62			if (valp)
63				*valp = val;
64			return 0;
65		}
66		if (--attempts == 0)
67			return -EAGAIN;
68		if (delay)
69			udelay(delay);
70	}
71}
72
73/**
74 *	t3_write_regs - write a bunch of registers
75 *	@adapter: the adapter to program
76 *	@p: an array of register address/register value pairs
77 *	@n: the number of address/value pairs
78 *	@offset: register address offset
79 *
80 *	Takes an array of register address/register value pairs and writes each
81 *	value to the corresponding register.  Register addresses are adjusted
82 *	by the supplied offset.
83 */
84void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
85		   int n, unsigned int offset)
86{
87	while (n--) {
88		t3_write_reg(adapter, p->reg_addr + offset, p->val);
89		p++;
90	}
91}
92
93/**
94 *	t3_set_reg_field - set a register field to a value
95 *	@adapter: the adapter to program
96 *	@addr: the register address
97 *	@mask: specifies the portion of the register to modify
98 *	@val: the new value for the register field
99 *
100 *	Sets a register field specified by the supplied mask to the
101 *	given value.
102 */
103void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
104		      u32 val)
105{
106	u32 v = t3_read_reg(adapter, addr) & ~mask;
107
108	t3_write_reg(adapter, addr, v | val);
109	t3_read_reg(adapter, addr);	/* flush */
110}
111
112/**
113 *	t3_read_indirect - read indirectly addressed registers
114 *	@adap: the adapter
115 *	@addr_reg: register holding the indirect address
116 *	@data_reg: register holding the value of the indirect register
117 *	@vals: where the read register values are stored
118 *	@start_idx: index of first indirect register to read
119 *	@nregs: how many indirect registers to read
120 *
121 *	Reads registers that are accessed indirectly through an address/data
122 *	register pair.
123 */
124static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
125			     unsigned int data_reg, u32 *vals,
126			     unsigned int nregs, unsigned int start_idx)
127{
128	while (nregs--) {
129		t3_write_reg(adap, addr_reg, start_idx);
130		*vals++ = t3_read_reg(adap, data_reg);
131		start_idx++;
132	}
133}
134
135/**
136 *	t3_mc7_bd_read - read from MC7 through backdoor accesses
137 *	@mc7: identifies MC7 to read from
138 *	@start: index of first 64-bit word to read
139 *	@n: number of 64-bit words to read
140 *	@buf: where to store the read result
141 *
142 *	Read n 64-bit words from MC7 starting at word start, using backdoor
143 *	accesses.
144 */
145int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
146		   u64 *buf)
147{
148	static const int shift[] = { 0, 0, 16, 24 };
149	static const int step[] = { 0, 32, 16, 8 };
150
151	unsigned int size64 = mc7->size / 8;	/* # of 64-bit words */
152	struct adapter *adap = mc7->adapter;
153
154	if (start >= size64 || start + n > size64)
155		return -EINVAL;
156
157	start *= (8 << mc7->width);
158	while (n--) {
159		int i;
160		u64 val64 = 0;
161
162		for (i = (1 << mc7->width) - 1; i >= 0; --i) {
163			int attempts = 10;
164			u32 val;
165
166			t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
167			t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
168			val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
169			while ((val & F_BUSY) && attempts--)
170				val = t3_read_reg(adap,
171						  mc7->offset + A_MC7_BD_OP);
172			if (val & F_BUSY)
173				return -EIO;
174
175			val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
176			if (mc7->width == 0) {
177				val64 = t3_read_reg(adap,
178						    mc7->offset +
179						    A_MC7_BD_DATA0);
180				val64 |= (u64) val << 32;
181			} else {
182				if (mc7->width > 1)
183					val >>= shift[mc7->width];
184				val64 |= (u64) val << (step[mc7->width] * i);
185			}
186			start += 8;
187		}
188		*buf++ = val64;
189	}
190	return 0;
191}
192
193/*
194 * Initialize MI1.
195 */
196static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
197{
198	u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
199	u32 val = F_PREEN | V_CLKDIV(clkdiv);
200
201	t3_write_reg(adap, A_MI1_CFG, val);
202}
203
204#define MDIO_ATTEMPTS 20
205
206/*
207 * MI1 read/write operations for clause 22 PHYs.
208 */
209static int t3_mi1_read(struct net_device *dev, int phy_addr, int mmd_addr,
210		       u16 reg_addr)
211{
212	struct port_info *pi = netdev_priv(dev);
213	struct adapter *adapter = pi->adapter;
214	int ret;
215	u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
216
217	mutex_lock(&adapter->mdio_lock);
218	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
219	t3_write_reg(adapter, A_MI1_ADDR, addr);
220	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
221	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
222	if (!ret)
223		ret = t3_read_reg(adapter, A_MI1_DATA);
224	mutex_unlock(&adapter->mdio_lock);
225	return ret;
226}
227
228static int t3_mi1_write(struct net_device *dev, int phy_addr, int mmd_addr,
229			u16 reg_addr, u16 val)
230{
231	struct port_info *pi = netdev_priv(dev);
232	struct adapter *adapter = pi->adapter;
233	int ret;
234	u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
235
236	mutex_lock(&adapter->mdio_lock);
237	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
238	t3_write_reg(adapter, A_MI1_ADDR, addr);
239	t3_write_reg(adapter, A_MI1_DATA, val);
240	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
241	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
242	mutex_unlock(&adapter->mdio_lock);
243	return ret;
244}
245
246static const struct mdio_ops mi1_mdio_ops = {
247	.read = t3_mi1_read,
248	.write = t3_mi1_write,
249	.mode_support = MDIO_SUPPORTS_C22
250};
251
252/*
253 * Performs the address cycle for clause 45 PHYs.
254 * Must be called with the MDIO_LOCK held.
255 */
256static int mi1_wr_addr(struct adapter *adapter, int phy_addr, int mmd_addr,
257		       int reg_addr)
258{
259	u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
260
261	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
262	t3_write_reg(adapter, A_MI1_ADDR, addr);
263	t3_write_reg(adapter, A_MI1_DATA, reg_addr);
264	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
265	return t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
266			       MDIO_ATTEMPTS, 10);
267}
268
269/*
270 * MI1 read/write operations for indirect-addressed PHYs.
271 */
272static int mi1_ext_read(struct net_device *dev, int phy_addr, int mmd_addr,
273			u16 reg_addr)
274{
275	struct port_info *pi = netdev_priv(dev);
276	struct adapter *adapter = pi->adapter;
277	int ret;
278
279	mutex_lock(&adapter->mdio_lock);
280	ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
281	if (!ret) {
282		t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
283		ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
284				      MDIO_ATTEMPTS, 10);
285		if (!ret)
286			ret = t3_read_reg(adapter, A_MI1_DATA);
287	}
288	mutex_unlock(&adapter->mdio_lock);
289	return ret;
290}
291
292static int mi1_ext_write(struct net_device *dev, int phy_addr, int mmd_addr,
293			 u16 reg_addr, u16 val)
294{
295	struct port_info *pi = netdev_priv(dev);
296	struct adapter *adapter = pi->adapter;
297	int ret;
298
299	mutex_lock(&adapter->mdio_lock);
300	ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
301	if (!ret) {
302		t3_write_reg(adapter, A_MI1_DATA, val);
303		t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
304		ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
305				      MDIO_ATTEMPTS, 10);
306	}
307	mutex_unlock(&adapter->mdio_lock);
308	return ret;
309}
310
311static const struct mdio_ops mi1_mdio_ext_ops = {
312	.read = mi1_ext_read,
313	.write = mi1_ext_write,
314	.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22
315};
316
317/**
318 *	t3_mdio_change_bits - modify the value of a PHY register
319 *	@phy: the PHY to operate on
320 *	@mmd: the device address
321 *	@reg: the register address
322 *	@clear: what part of the register value to mask off
323 *	@set: what part of the register value to set
324 *
325 *	Changes the value of a PHY register by applying a mask to its current
326 *	value and ORing the result with a new value.
327 */
328int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
329			unsigned int set)
330{
331	int ret;
332	unsigned int val;
333
334	ret = t3_mdio_read(phy, mmd, reg, &val);
335	if (!ret) {
336		val &= ~clear;
337		ret = t3_mdio_write(phy, mmd, reg, val | set);
338	}
339	return ret;
340}
341
342/**
343 *	t3_phy_reset - reset a PHY block
344 *	@phy: the PHY to operate on
345 *	@mmd: the device address of the PHY block to reset
346 *	@wait: how long to wait for the reset to complete in 1ms increments
347 *
348 *	Resets a PHY block and optionally waits for the reset to complete.
349 *	@mmd should be 0 for 10/100/1000 PHYs and the device address to reset
350 *	for 10G PHYs.
351 */
352int t3_phy_reset(struct cphy *phy, int mmd, int wait)
353{
354	int err;
355	unsigned int ctl;
356
357	err = t3_mdio_change_bits(phy, mmd, MDIO_CTRL1, MDIO_CTRL1_LPOWER,
358				  MDIO_CTRL1_RESET);
359	if (err || !wait)
360		return err;
361
362	do {
363		err = t3_mdio_read(phy, mmd, MDIO_CTRL1, &ctl);
364		if (err)
365			return err;
366		ctl &= MDIO_CTRL1_RESET;
367		if (ctl)
368			msleep(1);
369	} while (ctl && --wait);
370
371	return ctl ? -1 : 0;
372}
373
374/**
375 *	t3_phy_advertise - set the PHY advertisement registers for autoneg
376 *	@phy: the PHY to operate on
377 *	@advert: bitmap of capabilities the PHY should advertise
378 *
379 *	Sets a 10/100/1000 PHY's advertisement registers to advertise the
380 *	requested capabilities.
381 */
382int t3_phy_advertise(struct cphy *phy, unsigned int advert)
383{
384	int err;
385	unsigned int val = 0;
386
387	err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_CTRL1000, &val);
388	if (err)
389		return err;
390
391	val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
392	if (advert & ADVERTISED_1000baseT_Half)
393		val |= ADVERTISE_1000HALF;
394	if (advert & ADVERTISED_1000baseT_Full)
395		val |= ADVERTISE_1000FULL;
396
397	err = t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_CTRL1000, val);
398	if (err)
399		return err;
400
401	val = 1;
402	if (advert & ADVERTISED_10baseT_Half)
403		val |= ADVERTISE_10HALF;
404	if (advert & ADVERTISED_10baseT_Full)
405		val |= ADVERTISE_10FULL;
406	if (advert & ADVERTISED_100baseT_Half)
407		val |= ADVERTISE_100HALF;
408	if (advert & ADVERTISED_100baseT_Full)
409		val |= ADVERTISE_100FULL;
410	if (advert & ADVERTISED_Pause)
411		val |= ADVERTISE_PAUSE_CAP;
412	if (advert & ADVERTISED_Asym_Pause)
413		val |= ADVERTISE_PAUSE_ASYM;
414	return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val);
415}
416
417/**
418 *	t3_phy_advertise_fiber - set fiber PHY advertisement register
419 *	@phy: the PHY to operate on
420 *	@advert: bitmap of capabilities the PHY should advertise
421 *
422 *	Sets a fiber PHY's advertisement register to advertise the
423 *	requested capabilities.
424 */
425int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert)
426{
427	unsigned int val = 0;
428
429	if (advert & ADVERTISED_1000baseT_Half)
430		val |= ADVERTISE_1000XHALF;
431	if (advert & ADVERTISED_1000baseT_Full)
432		val |= ADVERTISE_1000XFULL;
433	if (advert & ADVERTISED_Pause)
434		val |= ADVERTISE_1000XPAUSE;
435	if (advert & ADVERTISED_Asym_Pause)
436		val |= ADVERTISE_1000XPSE_ASYM;
437	return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val);
438}
439
440/**
441 *	t3_set_phy_speed_duplex - force PHY speed and duplex
442 *	@phy: the PHY to operate on
443 *	@speed: requested PHY speed
444 *	@duplex: requested PHY duplex
445 *
446 *	Force a 10/100/1000 PHY's speed and duplex.  This also disables
447 *	auto-negotiation except for GigE, where auto-negotiation is mandatory.
448 */
449int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
450{
451	int err;
452	unsigned int ctl;
453
454	err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_BMCR, &ctl);
455	if (err)
456		return err;
457
458	if (speed >= 0) {
459		ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
460		if (speed == SPEED_100)
461			ctl |= BMCR_SPEED100;
462		else if (speed == SPEED_1000)
463			ctl |= BMCR_SPEED1000;
464	}
465	if (duplex >= 0) {
466		ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
467		if (duplex == DUPLEX_FULL)
468			ctl |= BMCR_FULLDPLX;
469	}
470	if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
471		ctl |= BMCR_ANENABLE;
472	return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_BMCR, ctl);
473}
474
475int t3_phy_lasi_intr_enable(struct cphy *phy)
476{
477	return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL,
478			     MDIO_PMA_LASI_LSALARM);
479}
480
481int t3_phy_lasi_intr_disable(struct cphy *phy)
482{
483	return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0);
484}
485
486int t3_phy_lasi_intr_clear(struct cphy *phy)
487{
488	u32 val;
489
490	return t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, &val);
491}
492
493int t3_phy_lasi_intr_handler(struct cphy *phy)
494{
495	unsigned int status;
496	int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT,
497			       &status);
498
499	if (err)
500		return err;
501	return (status & MDIO_PMA_LASI_LSALARM) ? cphy_cause_link_change : 0;
502}
503
504static const struct adapter_info t3_adap_info[] = {
505	{1, 1, 0,
506	 F_GPIO2_OEN | F_GPIO4_OEN |
507	 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
508	 &mi1_mdio_ops, "Chelsio PE9000"},
509	{1, 1, 0,
510	 F_GPIO2_OEN | F_GPIO4_OEN |
511	 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
512	 &mi1_mdio_ops, "Chelsio T302"},
513	{1, 0, 0,
514	 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
515	 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
516	 { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
517	 &mi1_mdio_ext_ops, "Chelsio T310"},
518	{1, 1, 0,
519	 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
520	 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
521	 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
522	 { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
523	 &mi1_mdio_ext_ops, "Chelsio T320"},
524	{},
525	{},
526	{1, 0, 0,
527	 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
528	 F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
529	 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
530	 &mi1_mdio_ext_ops, "Chelsio T310" },
531	{1, 0, 0,
532	 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
533	 F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL,
534	 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
535	 &mi1_mdio_ext_ops, "Chelsio N320E-G2" },
536};
537
538/*
539 * Return the adapter_info structure with a given index.  Out-of-range indices
540 * return NULL.
541 */
542const struct adapter_info *t3_get_adapter_info(unsigned int id)
543{
544	return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
545}
546
547struct port_type_info {
548	int (*phy_prep)(struct cphy *phy, struct adapter *adapter,
549			int phy_addr, const struct mdio_ops *ops);
550};
551
552static const struct port_type_info port_types[] = {
553	{ NULL },
554	{ t3_ael1002_phy_prep },
555	{ t3_vsc8211_phy_prep },
556	{ NULL},
557	{ t3_xaui_direct_phy_prep },
558	{ t3_ael2005_phy_prep },
559	{ t3_qt2045_phy_prep },
560	{ t3_ael1006_phy_prep },
561	{ NULL },
562	{ t3_aq100x_phy_prep },
563	{ t3_ael2020_phy_prep },
564};
565
566#define VPD_ENTRY(name, len) \
567	u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
568
569/*
570 * Partial EEPROM Vital Product Data structure.  Includes only the ID and
571 * VPD-R sections.
572 */
573struct t3_vpd {
574	u8 id_tag;
575	u8 id_len[2];
576	u8 id_data[16];
577	u8 vpdr_tag;
578	u8 vpdr_len[2];
579	VPD_ENTRY(pn, 16);	/* part number */
580	VPD_ENTRY(ec, 16);	/* EC level */
581	VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
582	VPD_ENTRY(na, 12);	/* MAC address base */
583	VPD_ENTRY(cclk, 6);	/* core clock */
584	VPD_ENTRY(mclk, 6);	/* mem clock */
585	VPD_ENTRY(uclk, 6);	/* uP clk */
586	VPD_ENTRY(mdc, 6);	/* MDIO clk */
587	VPD_ENTRY(mt, 2);	/* mem timing */
588	VPD_ENTRY(xaui0cfg, 6);	/* XAUI0 config */
589	VPD_ENTRY(xaui1cfg, 6);	/* XAUI1 config */
590	VPD_ENTRY(port0, 2);	/* PHY0 complex */
591	VPD_ENTRY(port1, 2);	/* PHY1 complex */
592	VPD_ENTRY(port2, 2);	/* PHY2 complex */
593	VPD_ENTRY(port3, 2);	/* PHY3 complex */
594	VPD_ENTRY(rv, 1);	/* csum */
595	u32 pad;		/* for multiple-of-4 sizing and alignment */
596};
597
598#define EEPROM_MAX_POLL   40
599#define EEPROM_STAT_ADDR  0x4000
600#define VPD_BASE          0xc00
601
602/**
603 *	t3_seeprom_read - read a VPD EEPROM location
604 *	@adapter: adapter to read
605 *	@addr: EEPROM address
606 *	@data: where to store the read data
607 *
608 *	Read a 32-bit word from a location in VPD EEPROM using the card's PCI
609 *	VPD ROM capability.  A zero is written to the flag bit when the
610 *	address is written to the control register.  The hardware device will
611 *	set the flag to 1 when 4 bytes have been read into the data register.
612 */
613int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data)
614{
615	u16 val;
616	int attempts = EEPROM_MAX_POLL;
617	u32 v;
618	unsigned int base = adapter->params.pci.vpd_cap_addr;
619
620	if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
621		return -EINVAL;
622
623	pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
624	do {
625		udelay(10);
626		pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
627	} while (!(val & PCI_VPD_ADDR_F) && --attempts);
628
629	if (!(val & PCI_VPD_ADDR_F)) {
630		CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
631		return -EIO;
632	}
633	pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, &v);
634	*data = cpu_to_le32(v);
635	return 0;
636}
637
638/**
639 *	t3_seeprom_write - write a VPD EEPROM location
640 *	@adapter: adapter to write
641 *	@addr: EEPROM address
642 *	@data: value to write
643 *
644 *	Write a 32-bit word to a location in VPD EEPROM using the card's PCI
645 *	VPD ROM capability.
646 */
647int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data)
648{
649	u16 val;
650	int attempts = EEPROM_MAX_POLL;
651	unsigned int base = adapter->params.pci.vpd_cap_addr;
652
653	if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
654		return -EINVAL;
655
656	pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
657			       le32_to_cpu(data));
658	pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
659			      addr | PCI_VPD_ADDR_F);
660	do {
661		msleep(1);
662		pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
663	} while ((val & PCI_VPD_ADDR_F) && --attempts);
664
665	if (val & PCI_VPD_ADDR_F) {
666		CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
667		return -EIO;
668	}
669	return 0;
670}
671
672/**
673 *	t3_seeprom_wp - enable/disable EEPROM write protection
674 *	@adapter: the adapter
675 *	@enable: 1 to enable write protection, 0 to disable it
676 *
677 *	Enables or disables write protection on the serial EEPROM.
678 */
679int t3_seeprom_wp(struct adapter *adapter, int enable)
680{
681	return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
682}
683
684static int vpdstrtouint(char *s, u8 len, unsigned int base, unsigned int *val)
685{
686	char tok[256];
687
688	memcpy(tok, s, len);
689	tok[len] = 0;
690	return kstrtouint(strim(tok), base, val);
691}
692
693static int vpdstrtou16(char *s, u8 len, unsigned int base, u16 *val)
694{
695	char tok[256];
696
697	memcpy(tok, s, len);
698	tok[len] = 0;
699	return kstrtou16(strim(tok), base, val);
700}
701
702/**
703 *	get_vpd_params - read VPD parameters from VPD EEPROM
704 *	@adapter: adapter to read
705 *	@p: where to store the parameters
706 *
707 *	Reads card parameters stored in VPD EEPROM.
708 */
709static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
710{
711	int i, addr, ret;
712	struct t3_vpd vpd;
713
714	/*
715	 * Card information is normally at VPD_BASE but some early cards had
716	 * it at 0.
717	 */
718	ret = t3_seeprom_read(adapter, VPD_BASE, (__le32 *)&vpd);
719	if (ret)
720		return ret;
721	addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
722
723	for (i = 0; i < sizeof(vpd); i += 4) {
724		ret = t3_seeprom_read(adapter, addr + i,
725				      (__le32 *)((u8 *)&vpd + i));
726		if (ret)
727			return ret;
728	}
729
730	ret = vpdstrtouint(vpd.cclk_data, vpd.cclk_len, 10, &p->cclk);
731	if (ret)
732		return ret;
733	ret = vpdstrtouint(vpd.mclk_data, vpd.mclk_len, 10, &p->mclk);
734	if (ret)
735		return ret;
736	ret = vpdstrtouint(vpd.uclk_data, vpd.uclk_len, 10, &p->uclk);
737	if (ret)
738		return ret;
739	ret = vpdstrtouint(vpd.mdc_data, vpd.mdc_len, 10, &p->mdc);
740	if (ret)
741		return ret;
742	ret = vpdstrtouint(vpd.mt_data, vpd.mt_len, 10, &p->mem_timing);
743	if (ret)
744		return ret;
745	memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
746
747	/* Old eeproms didn't have port information */
748	if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
749		p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
750		p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
751	} else {
752		p->port_type[0] = hex_to_bin(vpd.port0_data[0]);
753		p->port_type[1] = hex_to_bin(vpd.port1_data[0]);
754		ret = vpdstrtou16(vpd.xaui0cfg_data, vpd.xaui0cfg_len, 16,
755				  &p->xauicfg[0]);
756		if (ret)
757			return ret;
758		ret = vpdstrtou16(vpd.xaui1cfg_data, vpd.xaui1cfg_len, 16,
759				  &p->xauicfg[1]);
760		if (ret)
761			return ret;
762	}
763
764	ret = hex2bin(p->eth_base, vpd.na_data, 6);
765	if (ret < 0)
766		return -EINVAL;
767	return 0;
768}
769
770/* serial flash and firmware constants */
771enum {
772	SF_ATTEMPTS = 5,	/* max retries for SF1 operations */
773	SF_SEC_SIZE = 64 * 1024,	/* serial flash sector size */
774	SF_SIZE = SF_SEC_SIZE * 8,	/* serial flash size */
775
776	/* flash command opcodes */
777	SF_PROG_PAGE = 2,	/* program page */
778	SF_WR_DISABLE = 4,	/* disable writes */
779	SF_RD_STATUS = 5,	/* read status register */
780	SF_WR_ENABLE = 6,	/* enable writes */
781	SF_RD_DATA_FAST = 0xb,	/* read flash */
782	SF_ERASE_SECTOR = 0xd8,	/* erase sector */
783
784	FW_FLASH_BOOT_ADDR = 0x70000,	/* start address of FW in flash */
785	FW_VERS_ADDR = 0x7fffc,    /* flash address holding FW version */
786	FW_MIN_SIZE = 8            /* at least version and csum */
787};
788
789/**
790 *	sf1_read - read data from the serial flash
791 *	@adapter: the adapter
792 *	@byte_cnt: number of bytes to read
793 *	@cont: whether another operation will be chained
794 *	@valp: where to store the read data
795 *
796 *	Reads up to 4 bytes of data from the serial flash.  The location of
797 *	the read needs to be specified prior to calling this by issuing the
798 *	appropriate commands to the serial flash.
799 */
800static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
801		    u32 *valp)
802{
803	int ret;
804
805	if (!byte_cnt || byte_cnt > 4)
806		return -EINVAL;
807	if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
808		return -EBUSY;
809	t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
810	ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
811	if (!ret)
812		*valp = t3_read_reg(adapter, A_SF_DATA);
813	return ret;
814}
815
816/**
817 *	sf1_write - write data to the serial flash
818 *	@adapter: the adapter
819 *	@byte_cnt: number of bytes to write
820 *	@cont: whether another operation will be chained
821 *	@val: value to write
822 *
823 *	Writes up to 4 bytes of data to the serial flash.  The location of
824 *	the write needs to be specified prior to calling this by issuing the
825 *	appropriate commands to the serial flash.
826 */
827static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
828		     u32 val)
829{
830	if (!byte_cnt || byte_cnt > 4)
831		return -EINVAL;
832	if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
833		return -EBUSY;
834	t3_write_reg(adapter, A_SF_DATA, val);
835	t3_write_reg(adapter, A_SF_OP,
836		     V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
837	return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
838}
839
840/**
841 *	flash_wait_op - wait for a flash operation to complete
842 *	@adapter: the adapter
843 *	@attempts: max number of polls of the status register
844 *	@delay: delay between polls in ms
845 *
846 *	Wait for a flash operation to complete by polling the status register.
847 */
848static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
849{
850	int ret;
851	u32 status;
852
853	while (1) {
854		if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
855		    (ret = sf1_read(adapter, 1, 0, &status)) != 0)
856			return ret;
857		if (!(status & 1))
858			return 0;
859		if (--attempts == 0)
860			return -EAGAIN;
861		if (delay)
862			msleep(delay);
863	}
864}
865
866/**
867 *	t3_read_flash - read words from serial flash
868 *	@adapter: the adapter
869 *	@addr: the start address for the read
870 *	@nwords: how many 32-bit words to read
871 *	@data: where to store the read data
872 *	@byte_oriented: whether to store data as bytes or as words
873 *
874 *	Read the specified number of 32-bit words from the serial flash.
875 *	If @byte_oriented is set the read data is stored as a byte array
876 *	(i.e., big-endian), otherwise as 32-bit words in the platform's
877 *	natural endianness.
878 */
879static int t3_read_flash(struct adapter *adapter, unsigned int addr,
880			 unsigned int nwords, u32 *data, int byte_oriented)
881{
882	int ret;
883
884	if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
885		return -EINVAL;
886
887	addr = swab32(addr) | SF_RD_DATA_FAST;
888
889	if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
890	    (ret = sf1_read(adapter, 1, 1, data)) != 0)
891		return ret;
892
893	for (; nwords; nwords--, data++) {
894		ret = sf1_read(adapter, 4, nwords > 1, data);
895		if (ret)
896			return ret;
897		if (byte_oriented)
898			*data = htonl(*data);
899	}
900	return 0;
901}
902
903/**
904 *	t3_write_flash - write up to a page of data to the serial flash
905 *	@adapter: the adapter
906 *	@addr: the start address to write
907 *	@n: length of data to write
908 *	@data: the data to write
909 *
910 *	Writes up to a page of data (256 bytes) to the serial flash starting
911 *	at the given address.
912 */
913static int t3_write_flash(struct adapter *adapter, unsigned int addr,
914			  unsigned int n, const u8 *data)
915{
916	int ret;
917	u32 buf[64];
918	unsigned int i, c, left, val, offset = addr & 0xff;
919
920	if (addr + n > SF_SIZE || offset + n > 256)
921		return -EINVAL;
922
923	val = swab32(addr) | SF_PROG_PAGE;
924
925	if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
926	    (ret = sf1_write(adapter, 4, 1, val)) != 0)
927		return ret;
928
929	for (left = n; left; left -= c) {
930		c = min(left, 4U);
931		for (val = 0, i = 0; i < c; ++i)
932			val = (val << 8) + *data++;
933
934		ret = sf1_write(adapter, c, c != left, val);
935		if (ret)
936			return ret;
937	}
938	if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
939		return ret;
940
941	/* Read the page to verify the write succeeded */
942	ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
943	if (ret)
944		return ret;
945
946	if (memcmp(data - n, (u8 *) buf + offset, n))
947		return -EIO;
948	return 0;
949}
950
951/**
952 *	t3_get_tp_version - read the tp sram version
953 *	@adapter: the adapter
954 *	@vers: where to place the version
955 *
956 *	Reads the protocol sram version from sram.
957 */
958int t3_get_tp_version(struct adapter *adapter, u32 *vers)
959{
960	int ret;
961
962	/* Get version loaded in SRAM */
963	t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
964	ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
965			      1, 1, 5, 1);
966	if (ret)
967		return ret;
968
969	*vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
970
971	return 0;
972}
973
974/**
975 *	t3_check_tpsram_version - read the tp sram version
976 *	@adapter: the adapter
977 *
978 *	Reads the protocol sram version from flash.
979 */
980int t3_check_tpsram_version(struct adapter *adapter)
981{
982	int ret;
983	u32 vers;
984	unsigned int major, minor;
985
986	if (adapter->params.rev == T3_REV_A)
987		return 0;
988
989
990	ret = t3_get_tp_version(adapter, &vers);
991	if (ret)
992		return ret;
993
994	major = G_TP_VERSION_MAJOR(vers);
995	minor = G_TP_VERSION_MINOR(vers);
996
997	if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
998		return 0;
999	else {
1000		CH_ERR(adapter, "found wrong TP version (%u.%u), "
1001		       "driver compiled for version %d.%d\n", major, minor,
1002		       TP_VERSION_MAJOR, TP_VERSION_MINOR);
1003	}
1004	return -EINVAL;
1005}
1006
1007/**
1008 *	t3_check_tpsram - check if provided protocol SRAM
1009 *			  is compatible with this driver
1010 *	@adapter: the adapter
1011 *	@tp_sram: the firmware image to write
1012 *	@size: image size
1013 *
1014 *	Checks if an adapter's tp sram is compatible with the driver.
1015 *	Returns 0 if the versions are compatible, a negative error otherwise.
1016 */
1017int t3_check_tpsram(struct adapter *adapter, const u8 *tp_sram,
1018		    unsigned int size)
1019{
1020	u32 csum;
1021	unsigned int i;
1022	const __be32 *p = (const __be32 *)tp_sram;
1023
1024	/* Verify checksum */
1025	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1026		csum += ntohl(p[i]);
1027	if (csum != 0xffffffff) {
1028		CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
1029		       csum);
1030		return -EINVAL;
1031	}
1032
1033	return 0;
1034}
1035
1036enum fw_version_type {
1037	FW_VERSION_N3,
1038	FW_VERSION_T3
1039};
1040
1041/**
1042 *	t3_get_fw_version - read the firmware version
1043 *	@adapter: the adapter
1044 *	@vers: where to place the version
1045 *
1046 *	Reads the FW version from flash.
1047 */
1048int t3_get_fw_version(struct adapter *adapter, u32 *vers)
1049{
1050	return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
1051}
1052
1053/**
1054 *	t3_check_fw_version - check if the FW is compatible with this driver
1055 *	@adapter: the adapter
1056 *
1057 *	Checks if an adapter's FW is compatible with the driver.  Returns 0
1058 *	if the versions are compatible, a negative error otherwise.
1059 */
1060int t3_check_fw_version(struct adapter *adapter)
1061{
1062	int ret;
1063	u32 vers;
1064	unsigned int type, major, minor;
1065
1066	ret = t3_get_fw_version(adapter, &vers);
1067	if (ret)
1068		return ret;
1069
1070	type = G_FW_VERSION_TYPE(vers);
1071	major = G_FW_VERSION_MAJOR(vers);
1072	minor = G_FW_VERSION_MINOR(vers);
1073
1074	if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
1075	    minor == FW_VERSION_MINOR)
1076		return 0;
1077	else if (major != FW_VERSION_MAJOR || minor < FW_VERSION_MINOR)
1078		CH_WARN(adapter, "found old FW minor version(%u.%u), "
1079		        "driver compiled for version %u.%u\n", major, minor,
1080			FW_VERSION_MAJOR, FW_VERSION_MINOR);
1081	else {
1082		CH_WARN(adapter, "found newer FW version(%u.%u), "
1083		        "driver compiled for version %u.%u\n", major, minor,
1084			FW_VERSION_MAJOR, FW_VERSION_MINOR);
1085		return 0;
1086	}
1087	return -EINVAL;
1088}
1089
1090/**
1091 *	t3_flash_erase_sectors - erase a range of flash sectors
1092 *	@adapter: the adapter
1093 *	@start: the first sector to erase
1094 *	@end: the last sector to erase
1095 *
1096 *	Erases the sectors in the given range.
1097 */
1098static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
1099{
1100	while (start <= end) {
1101		int ret;
1102
1103		if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1104		    (ret = sf1_write(adapter, 4, 0,
1105				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
1106		    (ret = flash_wait_op(adapter, 5, 500)) != 0)
1107			return ret;
1108		start++;
1109	}
1110	return 0;
1111}
1112
1113/**
1114 *	t3_load_fw - download firmware
1115 *	@adapter: the adapter
1116 *	@fw_data: the firmware image to write
1117 *	@size: image size
1118 *
1119 *	Write the supplied firmware image to the card's serial flash.
1120 *	The FW image has the following sections: @size - 8 bytes of code and
1121 *	data, followed by 4 bytes of FW version, followed by the 32-bit
1122 *	1's complement checksum of the whole image.
1123 */
1124int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
1125{
1126	u32 csum;
1127	unsigned int i;
1128	const __be32 *p = (const __be32 *)fw_data;
1129	int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1130
1131	if ((size & 3) || size < FW_MIN_SIZE)
1132		return -EINVAL;
1133	if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
1134		return -EFBIG;
1135
1136	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1137		csum += ntohl(p[i]);
1138	if (csum != 0xffffffff) {
1139		CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1140		       csum);
1141		return -EINVAL;
1142	}
1143
1144	ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1145	if (ret)
1146		goto out;
1147
1148	size -= 8;		/* trim off version and checksum */
1149	for (addr = FW_FLASH_BOOT_ADDR; size;) {
1150		unsigned int chunk_size = min(size, 256U);
1151
1152		ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
1153		if (ret)
1154			goto out;
1155
1156		addr += chunk_size;
1157		fw_data += chunk_size;
1158		size -= chunk_size;
1159	}
1160
1161	ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
1162out:
1163	if (ret)
1164		CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1165	return ret;
1166}
1167
1168#define CIM_CTL_BASE 0x2000
1169
1170/**
1171 *      t3_cim_ctl_blk_read - read a block from CIM control region
1172 *
1173 *      @adap: the adapter
1174 *      @addr: the start address within the CIM control region
1175 *      @n: number of words to read
1176 *      @valp: where to store the result
1177 *
1178 *      Reads a block of 4-byte words from the CIM control region.
1179 */
1180int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
1181			unsigned int n, unsigned int *valp)
1182{
1183	int ret = 0;
1184
1185	if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1186		return -EBUSY;
1187
1188	for ( ; !ret && n--; addr += 4) {
1189		t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1190		ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1191				      0, 5, 2);
1192		if (!ret)
1193			*valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1194	}
1195	return ret;
1196}
1197
1198static void t3_gate_rx_traffic(struct cmac *mac, u32 *rx_cfg,
1199			       u32 *rx_hash_high, u32 *rx_hash_low)
1200{
1201	/* stop Rx unicast traffic */
1202	t3_mac_disable_exact_filters(mac);
1203
1204	/* stop broadcast, multicast, promiscuous mode traffic */
1205	*rx_cfg = t3_read_reg(mac->adapter, A_XGM_RX_CFG);
1206	t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1207			 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1208			 F_DISBCAST);
1209
1210	*rx_hash_high = t3_read_reg(mac->adapter, A_XGM_RX_HASH_HIGH);
1211	t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, 0);
1212
1213	*rx_hash_low = t3_read_reg(mac->adapter, A_XGM_RX_HASH_LOW);
1214	t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, 0);
1215
1216	/* Leave time to drain max RX fifo */
1217	msleep(1);
1218}
1219
1220static void t3_open_rx_traffic(struct cmac *mac, u32 rx_cfg,
1221			       u32 rx_hash_high, u32 rx_hash_low)
1222{
1223	t3_mac_enable_exact_filters(mac);
1224	t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1225			 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1226			 rx_cfg);
1227	t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, rx_hash_high);
1228	t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, rx_hash_low);
1229}
1230
1231/**
1232 *	t3_link_changed - handle interface link changes
1233 *	@adapter: the adapter
1234 *	@port_id: the port index that changed link state
1235 *
1236 *	Called when a port's link settings change to propagate the new values
1237 *	to the associated PHY and MAC.  After performing the common tasks it
1238 *	invokes an OS-specific handler.
1239 */
1240void t3_link_changed(struct adapter *adapter, int port_id)
1241{
1242	int link_ok, speed, duplex, fc;
1243	struct port_info *pi = adap2pinfo(adapter, port_id);
1244	struct cphy *phy = &pi->phy;
1245	struct cmac *mac = &pi->mac;
1246	struct link_config *lc = &pi->link_config;
1247
1248	phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1249
1250	if (!lc->link_ok && link_ok) {
1251		u32 rx_cfg, rx_hash_high, rx_hash_low;
1252		u32 status;
1253
1254		t3_xgm_intr_enable(adapter, port_id);
1255		t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1256		t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1257		t3_mac_enable(mac, MAC_DIRECTION_RX);
1258
1259		status = t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
1260		if (status & F_LINKFAULTCHANGE) {
1261			mac->stats.link_faults++;
1262			pi->link_fault = 1;
1263		}
1264		t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1265	}
1266
1267	if (lc->requested_fc & PAUSE_AUTONEG)
1268		fc &= lc->requested_fc;
1269	else
1270		fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1271
1272	if (link_ok == lc->link_ok && speed == lc->speed &&
1273	    duplex == lc->duplex && fc == lc->fc)
1274		return;                            /* nothing changed */
1275
1276	if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1277	    uses_xaui(adapter)) {
1278		if (link_ok)
1279			t3b_pcs_reset(mac);
1280		t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1281			     link_ok ? F_TXACTENABLE | F_RXEN : 0);
1282	}
1283	lc->link_ok = link_ok;
1284	lc->speed = speed < 0 ? SPEED_INVALID : speed;
1285	lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1286
1287	if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1288		/* Set MAC speed, duplex, and flow control to match PHY. */
1289		t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1290		lc->fc = fc;
1291	}
1292
1293	t3_os_link_changed(adapter, port_id, link_ok && !pi->link_fault,
1294			   speed, duplex, fc);
1295}
1296
1297void t3_link_fault(struct adapter *adapter, int port_id)
1298{
1299	struct port_info *pi = adap2pinfo(adapter, port_id);
1300	struct cmac *mac = &pi->mac;
1301	struct cphy *phy = &pi->phy;
1302	struct link_config *lc = &pi->link_config;
1303	int link_ok, speed, duplex, fc, link_fault;
1304	u32 rx_cfg, rx_hash_high, rx_hash_low;
1305
1306	t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1307
1308	if (adapter->params.rev > 0 && uses_xaui(adapter))
1309		t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset, 0);
1310
1311	t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1312	t3_mac_enable(mac, MAC_DIRECTION_RX);
1313
1314	t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1315
1316	link_fault = t3_read_reg(adapter,
1317				 A_XGM_INT_STATUS + mac->offset);
1318	link_fault &= F_LINKFAULTCHANGE;
1319
1320	link_ok = lc->link_ok;
1321	speed = lc->speed;
1322	duplex = lc->duplex;
1323	fc = lc->fc;
1324
1325	phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1326
1327	if (link_fault) {
1328		lc->link_ok = 0;
1329		lc->speed = SPEED_INVALID;
1330		lc->duplex = DUPLEX_INVALID;
1331
1332		t3_os_link_fault(adapter, port_id, 0);
1333
1334		/* Account link faults only when the phy reports a link up */
1335		if (link_ok)
1336			mac->stats.link_faults++;
1337	} else {
1338		if (link_ok)
1339			t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1340				     F_TXACTENABLE | F_RXEN);
1341
1342		pi->link_fault = 0;
1343		lc->link_ok = (unsigned char)link_ok;
1344		lc->speed = speed < 0 ? SPEED_INVALID : speed;
1345		lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1346		t3_os_link_fault(adapter, port_id, link_ok);
1347	}
1348}
1349
1350/**
1351 *	t3_link_start - apply link configuration to MAC/PHY
1352 *	@phy: the PHY to setup
1353 *	@mac: the MAC to setup
1354 *	@lc: the requested link configuration
1355 *
1356 *	Set up a port's MAC and PHY according to a desired link configuration.
1357 *	- If the PHY can auto-negotiate first decide what to advertise, then
1358 *	  enable/disable auto-negotiation as desired, and reset.
1359 *	- If the PHY does not auto-negotiate just reset it.
1360 *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1361 *	  otherwise do it later based on the outcome of auto-negotiation.
1362 */
1363int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1364{
1365	unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1366
1367	lc->link_ok = 0;
1368	if (lc->supported & SUPPORTED_Autoneg) {
1369		lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1370		if (fc) {
1371			lc->advertising |= ADVERTISED_Asym_Pause;
1372			if (fc & PAUSE_RX)
1373				lc->advertising |= ADVERTISED_Pause;
1374		}
1375		phy->ops->advertise(phy, lc->advertising);
1376
1377		if (lc->autoneg == AUTONEG_DISABLE) {
1378			lc->speed = lc->requested_speed;
1379			lc->duplex = lc->requested_duplex;
1380			lc->fc = (unsigned char)fc;
1381			t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1382						   fc);
1383			/* Also disables autoneg */
1384			phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1385		} else
1386			phy->ops->autoneg_enable(phy);
1387	} else {
1388		t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1389		lc->fc = (unsigned char)fc;
1390		phy->ops->reset(phy, 0);
1391	}
1392	return 0;
1393}
1394
1395/**
1396 *	t3_set_vlan_accel - control HW VLAN extraction
1397 *	@adapter: the adapter
1398 *	@ports: bitmap of adapter ports to operate on
1399 *	@on: enable (1) or disable (0) HW VLAN extraction
1400 *
1401 *	Enables or disables HW extraction of VLAN tags for the given port.
1402 */
1403void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1404{
1405	t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1406			 ports << S_VLANEXTRACTIONENABLE,
1407			 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1408}
1409
1410struct intr_info {
1411	unsigned int mask;	/* bits to check in interrupt status */
1412	const char *msg;	/* message to print or NULL */
1413	short stat_idx;		/* stat counter to increment or -1 */
1414	unsigned short fatal;	/* whether the condition reported is fatal */
1415};
1416
1417/**
1418 *	t3_handle_intr_status - table driven interrupt handler
1419 *	@adapter: the adapter that generated the interrupt
1420 *	@reg: the interrupt status register to process
1421 *	@mask: a mask to apply to the interrupt status
1422 *	@acts: table of interrupt actions
1423 *	@stats: statistics counters tracking interrupt occurrences
1424 *
1425 *	A table driven interrupt handler that applies a set of masks to an
1426 *	interrupt status word and performs the corresponding actions if the
1427 *	interrupts described by the mask have occurred.  The actions include
1428 *	optionally printing a warning or alert message, and optionally
1429 *	incrementing a stat counter.  The table is terminated by an entry
1430 *	specifying mask 0.  Returns the number of fatal interrupt conditions.
1431 */
1432static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1433				 unsigned int mask,
1434				 const struct intr_info *acts,
1435				 unsigned long *stats)
1436{
1437	int fatal = 0;
1438	unsigned int status = t3_read_reg(adapter, reg) & mask;
1439
1440	for (; acts->mask; ++acts) {
1441		if (!(status & acts->mask))
1442			continue;
1443		if (acts->fatal) {
1444			fatal++;
1445			CH_ALERT(adapter, "%s (0x%x)\n",
1446				 acts->msg, status & acts->mask);
1447			status &= ~acts->mask;
1448		} else if (acts->msg)
1449			CH_WARN(adapter, "%s (0x%x)\n",
1450				acts->msg, status & acts->mask);
1451		if (acts->stat_idx >= 0)
1452			stats[acts->stat_idx]++;
1453	}
1454	if (status)		/* clear processed interrupts */
1455		t3_write_reg(adapter, reg, status);
1456	return fatal;
1457}
1458
1459#define SGE_INTR_MASK (F_RSPQDISABLED | \
1460		       F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
1461		       F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
1462		       F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1463		       V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1464		       F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1465		       F_HIRCQPARITYERROR | F_LOPRIORITYDBFULL | \
1466		       F_HIPRIORITYDBFULL | F_LOPRIORITYDBEMPTY | \
1467		       F_HIPRIORITYDBEMPTY | F_HIPIODRBDROPERR | \
1468		       F_LOPIODRBDROPERR)
1469#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1470		       F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1471		       F_NFASRCHFAIL)
1472#define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1473#define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1474		       V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1475		       F_TXFIFO_UNDERRUN)
1476#define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1477			F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1478			F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1479			F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1480			V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1481			V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1482#define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1483			F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1484			/* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1485			F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
1486			F_TXPARERR | V_BISTERR(M_BISTERR))
1487#define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
1488			 F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
1489			 F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
1490#define ULPTX_INTR_MASK 0xfc
1491#define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
1492			 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1493			 F_ZERO_SWITCH_ERROR)
1494#define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1495		       F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1496		       F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1497	 	       F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
1498		       F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
1499		       F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
1500		       F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
1501		       F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
1502#define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1503			V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1504			V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1505#define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1506			V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1507			V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1508#define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1509		       V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1510		       V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1511		       V_MCAPARERRENB(M_MCAPARERRENB))
1512#define XGM_EXTRA_INTR_MASK (F_LINKFAULTCHANGE)
1513#define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1514		      F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1515		      F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1516		      F_MPS0 | F_CPL_SWITCH)
1517/*
1518 * Interrupt handler for the PCIX1 module.
1519 */
1520static void pci_intr_handler(struct adapter *adapter)
1521{
1522	static const struct intr_info pcix1_intr_info[] = {
1523		{F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1524		{F_SIGTARABT, "PCI signaled target abort", -1, 1},
1525		{F_RCVTARABT, "PCI received target abort", -1, 1},
1526		{F_RCVMSTABT, "PCI received master abort", -1, 1},
1527		{F_SIGSYSERR, "PCI signaled system error", -1, 1},
1528		{F_DETPARERR, "PCI detected parity error", -1, 1},
1529		{F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1530		{F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1531		{F_RCVSPLCMPERR, "PCI received split completion error", -1,
1532		 1},
1533		{F_DETCORECCERR, "PCI correctable ECC error",
1534		 STAT_PCI_CORR_ECC, 0},
1535		{F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1536		{F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1537		{V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1538		 1},
1539		{V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1540		 1},
1541		{V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1542		 1},
1543		{V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1544		 "error", -1, 1},
1545		{0}
1546	};
1547
1548	if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1549				  pcix1_intr_info, adapter->irq_stats))
1550		t3_fatal_err(adapter);
1551}
1552
1553/*
1554 * Interrupt handler for the PCIE module.
1555 */
1556static void pcie_intr_handler(struct adapter *adapter)
1557{
1558	static const struct intr_info pcie_intr_info[] = {
1559		{F_PEXERR, "PCI PEX error", -1, 1},
1560		{F_UNXSPLCPLERRR,
1561		 "PCI unexpected split completion DMA read error", -1, 1},
1562		{F_UNXSPLCPLERRC,
1563		 "PCI unexpected split completion DMA command error", -1, 1},
1564		{F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1565		{F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1566		{F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1567		{F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1568		{V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1569		 "PCI MSI-X table/PBA parity error", -1, 1},
1570		{F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1},
1571		{F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1},
1572		{F_RXPARERR, "PCI Rx parity error", -1, 1},
1573		{F_TXPARERR, "PCI Tx parity error", -1, 1},
1574		{V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1575		{0}
1576	};
1577
1578	if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1579		CH_ALERT(adapter, "PEX error code 0x%x\n",
1580			 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1581
1582	if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1583				  pcie_intr_info, adapter->irq_stats))
1584		t3_fatal_err(adapter);
1585}
1586
1587/*
1588 * TP interrupt handler.
1589 */
1590static void tp_intr_handler(struct adapter *adapter)
1591{
1592	static const struct intr_info tp_intr_info[] = {
1593		{0xffffff, "TP parity error", -1, 1},
1594		{0x1000000, "TP out of Rx pages", -1, 1},
1595		{0x2000000, "TP out of Tx pages", -1, 1},
1596		{0}
1597	};
1598
1599	static const struct intr_info tp_intr_info_t3c[] = {
1600		{0x1fffffff, "TP parity error", -1, 1},
1601		{F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1},
1602		{F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1},
1603		{0}
1604	};
1605
1606	if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1607				  adapter->params.rev < T3_REV_C ?
1608				  tp_intr_info : tp_intr_info_t3c, NULL))
1609		t3_fatal_err(adapter);
1610}
1611
1612/*
1613 * CIM interrupt handler.
1614 */
1615static void cim_intr_handler(struct adapter *adapter)
1616{
1617	static const struct intr_info cim_intr_info[] = {
1618		{F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1619		{F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1620		{F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1621		{F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1622		{F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1623		{F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1624		{F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1625		{F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1626		{F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1627		{F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1628		{F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1629		{F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
1630		{F_DRAMPARERR, "CIM DRAM parity error", -1, 1},
1631		{F_ICACHEPARERR, "CIM icache parity error", -1, 1},
1632		{F_DCACHEPARERR, "CIM dcache parity error", -1, 1},
1633		{F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1},
1634		{F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1},
1635		{F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1},
1636		{F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1},
1637		{F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1},
1638		{F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1},
1639		{F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1},
1640		{F_ITAGPARERR, "CIM itag parity error", -1, 1},
1641		{F_DTAGPARERR, "CIM dtag parity error", -1, 1},
1642		{0}
1643	};
1644
1645	if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1646				  cim_intr_info, NULL))
1647		t3_fatal_err(adapter);
1648}
1649
1650/*
1651 * ULP RX interrupt handler.
1652 */
1653static void ulprx_intr_handler(struct adapter *adapter)
1654{
1655	static const struct intr_info ulprx_intr_info[] = {
1656		{F_PARERRDATA, "ULP RX data parity error", -1, 1},
1657		{F_PARERRPCMD, "ULP RX command parity error", -1, 1},
1658		{F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1},
1659		{F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1},
1660		{F_ARBFPERR, "ULP RX ArbF parity error", -1, 1},
1661		{F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1},
1662		{F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1},
1663		{F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1},
1664		{0}
1665	};
1666
1667	if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1668				  ulprx_intr_info, NULL))
1669		t3_fatal_err(adapter);
1670}
1671
1672/*
1673 * ULP TX interrupt handler.
1674 */
1675static void ulptx_intr_handler(struct adapter *adapter)
1676{
1677	static const struct intr_info ulptx_intr_info[] = {
1678		{F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1679		 STAT_ULP_CH0_PBL_OOB, 0},
1680		{F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1681		 STAT_ULP_CH1_PBL_OOB, 0},
1682		{0xfc, "ULP TX parity error", -1, 1},
1683		{0}
1684	};
1685
1686	if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1687				  ulptx_intr_info, adapter->irq_stats))
1688		t3_fatal_err(adapter);
1689}
1690
1691#define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1692	F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1693	F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1694	F_ICSPI1_TX_FRAMING_ERROR)
1695#define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1696	F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1697	F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1698	F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1699
1700/*
1701 * PM TX interrupt handler.
1702 */
1703static void pmtx_intr_handler(struct adapter *adapter)
1704{
1705	static const struct intr_info pmtx_intr_info[] = {
1706		{F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1707		{ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1708		{OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1709		{V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1710		 "PMTX ispi parity error", -1, 1},
1711		{V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1712		 "PMTX ospi parity error", -1, 1},
1713		{0}
1714	};
1715
1716	if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1717				  pmtx_intr_info, NULL))
1718		t3_fatal_err(adapter);
1719}
1720
1721#define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1722	F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1723	F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1724	F_IESPI1_TX_FRAMING_ERROR)
1725#define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1726	F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1727	F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1728	F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1729
1730/*
1731 * PM RX interrupt handler.
1732 */
1733static void pmrx_intr_handler(struct adapter *adapter)
1734{
1735	static const struct intr_info pmrx_intr_info[] = {
1736		{F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1737		{IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1738		{OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1739		{V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1740		 "PMRX ispi parity error", -1, 1},
1741		{V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1742		 "PMRX ospi parity error", -1, 1},
1743		{0}
1744	};
1745
1746	if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1747				  pmrx_intr_info, NULL))
1748		t3_fatal_err(adapter);
1749}
1750
1751/*
1752 * CPL switch interrupt handler.
1753 */
1754static void cplsw_intr_handler(struct adapter *adapter)
1755{
1756	static const struct intr_info cplsw_intr_info[] = {
1757		{F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1},
1758		{F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1},
1759		{F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1760		{F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1761		{F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1762		{F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1763		{0}
1764	};
1765
1766	if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1767				  cplsw_intr_info, NULL))
1768		t3_fatal_err(adapter);
1769}
1770
1771/*
1772 * MPS interrupt handler.
1773 */
1774static void mps_intr_handler(struct adapter *adapter)
1775{
1776	static const struct intr_info mps_intr_info[] = {
1777		{0x1ff, "MPS parity error", -1, 1},
1778		{0}
1779	};
1780
1781	if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1782				  mps_intr_info, NULL))
1783		t3_fatal_err(adapter);
1784}
1785
1786#define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1787
1788/*
1789 * MC7 interrupt handler.
1790 */
1791static void mc7_intr_handler(struct mc7 *mc7)
1792{
1793	struct adapter *adapter = mc7->adapter;
1794	u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1795
1796	if (cause & F_CE) {
1797		mc7->stats.corr_err++;
1798		CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1799			"data 0x%x 0x%x 0x%x\n", mc7->name,
1800			t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1801			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1802			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1803			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1804	}
1805
1806	if (cause & F_UE) {
1807		mc7->stats.uncorr_err++;
1808		CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1809			 "data 0x%x 0x%x 0x%x\n", mc7->name,
1810			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1811			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1812			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1813			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1814	}
1815
1816	if (G_PE(cause)) {
1817		mc7->stats.parity_err++;
1818		CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1819			 mc7->name, G_PE(cause));
1820	}
1821
1822	if (cause & F_AE) {
1823		u32 addr = 0;
1824
1825		if (adapter->params.rev > 0)
1826			addr = t3_read_reg(adapter,
1827					   mc7->offset + A_MC7_ERR_ADDR);
1828		mc7->stats.addr_err++;
1829		CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1830			 mc7->name, addr);
1831	}
1832
1833	if (cause & MC7_INTR_FATAL)
1834		t3_fatal_err(adapter);
1835
1836	t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1837}
1838
1839#define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1840			V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1841/*
1842 * XGMAC interrupt handler.
1843 */
1844static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1845{
1846	struct cmac *mac = &adap2pinfo(adap, idx)->mac;
1847	/*
1848	 * We mask out interrupt causes for which we're not taking interrupts.
1849	 * This allows us to use polling logic to monitor some of the other
1850	 * conditions when taking interrupts would impose too much load on the
1851	 * system.
1852	 */
1853	u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset) &
1854		    ~F_RXFIFO_OVERFLOW;
1855
1856	if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1857		mac->stats.tx_fifo_parity_err++;
1858		CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1859	}
1860	if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1861		mac->stats.rx_fifo_parity_err++;
1862		CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1863	}
1864	if (cause & F_TXFIFO_UNDERRUN)
1865		mac->stats.tx_fifo_urun++;
1866	if (cause & F_RXFIFO_OVERFLOW)
1867		mac->stats.rx_fifo_ovfl++;
1868	if (cause & V_SERDES_LOS(M_SERDES_LOS))
1869		mac->stats.serdes_signal_loss++;
1870	if (cause & F_XAUIPCSCTCERR)
1871		mac->stats.xaui_pcs_ctc_err++;
1872	if (cause & F_XAUIPCSALIGNCHANGE)
1873		mac->stats.xaui_pcs_align_change++;
1874	if (cause & F_XGM_INT) {
1875		t3_set_reg_field(adap,
1876				 A_XGM_INT_ENABLE + mac->offset,
1877				 F_XGM_INT, 0);
1878		mac->stats.link_faults++;
1879
1880		t3_os_link_fault_handler(adap, idx);
1881	}
1882
1883	if (cause & XGM_INTR_FATAL)
1884		t3_fatal_err(adap);
1885
1886	t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1887	return cause != 0;
1888}
1889
1890/*
1891 * Interrupt handler for PHY events.
1892 */
1893int t3_phy_intr_handler(struct adapter *adapter)
1894{
1895	u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1896
1897	for_each_port(adapter, i) {
1898		struct port_info *p = adap2pinfo(adapter, i);
1899
1900		if (!(p->phy.caps & SUPPORTED_IRQ))
1901			continue;
1902
1903		if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) {
1904			int phy_cause = p->phy.ops->intr_handler(&p->phy);
1905
1906			if (phy_cause & cphy_cause_link_change)
1907				t3_link_changed(adapter, i);
1908			if (phy_cause & cphy_cause_fifo_error)
1909				p->phy.fifo_errors++;
1910			if (phy_cause & cphy_cause_module_change)
1911				t3_os_phymod_changed(adapter, i);
1912		}
1913	}
1914
1915	t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1916	return 0;
1917}
1918
1919/*
1920 * T3 slow path (non-data) interrupt handler.
1921 */
1922int t3_slow_intr_handler(struct adapter *adapter)
1923{
1924	u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1925
1926	cause &= adapter->slow_intr_mask;
1927	if (!cause)
1928		return 0;
1929	if (cause & F_PCIM0) {
1930		if (is_pcie(adapter))
1931			pcie_intr_handler(adapter);
1932		else
1933			pci_intr_handler(adapter);
1934	}
1935	if (cause & F_SGE3)
1936		t3_sge_err_intr_handler(adapter);
1937	if (cause & F_MC7_PMRX)
1938		mc7_intr_handler(&adapter->pmrx);
1939	if (cause & F_MC7_PMTX)
1940		mc7_intr_handler(&adapter->pmtx);
1941	if (cause & F_MC7_CM)
1942		mc7_intr_handler(&adapter->cm);
1943	if (cause & F_CIM)
1944		cim_intr_handler(adapter);
1945	if (cause & F_TP1)
1946		tp_intr_handler(adapter);
1947	if (cause & F_ULP2_RX)
1948		ulprx_intr_handler(adapter);
1949	if (cause & F_ULP2_TX)
1950		ulptx_intr_handler(adapter);
1951	if (cause & F_PM1_RX)
1952		pmrx_intr_handler(adapter);
1953	if (cause & F_PM1_TX)
1954		pmtx_intr_handler(adapter);
1955	if (cause & F_CPL_SWITCH)
1956		cplsw_intr_handler(adapter);
1957	if (cause & F_MPS0)
1958		mps_intr_handler(adapter);
1959	if (cause & F_MC5A)
1960		t3_mc5_intr_handler(&adapter->mc5);
1961	if (cause & F_XGMAC0_0)
1962		mac_intr_handler(adapter, 0);
1963	if (cause & F_XGMAC0_1)
1964		mac_intr_handler(adapter, 1);
1965	if (cause & F_T3DBG)
1966		t3_os_ext_intr_handler(adapter);
1967
1968	/* Clear the interrupts just processed. */
1969	t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1970	t3_read_reg(adapter, A_PL_INT_CAUSE0);	/* flush */
1971	return 1;
1972}
1973
1974static unsigned int calc_gpio_intr(struct adapter *adap)
1975{
1976	unsigned int i, gpi_intr = 0;
1977
1978	for_each_port(adap, i)
1979		if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) &&
1980		    adapter_info(adap)->gpio_intr[i])
1981			gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i];
1982	return gpi_intr;
1983}
1984
1985/**
1986 *	t3_intr_enable - enable interrupts
1987 *	@adapter: the adapter whose interrupts should be enabled
1988 *
1989 *	Enable interrupts by setting the interrupt enable registers of the
1990 *	various HW modules and then enabling the top-level interrupt
1991 *	concentrator.
1992 */
1993void t3_intr_enable(struct adapter *adapter)
1994{
1995	static const struct addr_val_pair intr_en_avp[] = {
1996		{A_SG_INT_ENABLE, SGE_INTR_MASK},
1997		{A_MC7_INT_ENABLE, MC7_INTR_MASK},
1998		{A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1999		 MC7_INTR_MASK},
2000		{A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
2001		 MC7_INTR_MASK},
2002		{A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
2003		{A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
2004		{A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
2005		{A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
2006		{A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
2007		{A_MPS_INT_ENABLE, MPS_INTR_MASK},
2008	};
2009
2010	adapter->slow_intr_mask = PL_INTR_MASK;
2011
2012	t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
2013	t3_write_reg(adapter, A_TP_INT_ENABLE,
2014		     adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
2015
2016	if (adapter->params.rev > 0) {
2017		t3_write_reg(adapter, A_CPL_INTR_ENABLE,
2018			     CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
2019		t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
2020			     ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
2021			     F_PBL_BOUND_ERR_CH1);
2022	} else {
2023		t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
2024		t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
2025	}
2026
2027	t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter));
2028
2029	if (is_pcie(adapter))
2030		t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
2031	else
2032		t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
2033	t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
2034	t3_read_reg(adapter, A_PL_INT_ENABLE0);	/* flush */
2035}
2036
2037/**
2038 *	t3_intr_disable - disable a card's interrupts
2039 *	@adapter: the adapter whose interrupts should be disabled
2040 *
2041 *	Disable interrupts.  We only disable the top-level interrupt
2042 *	concentrator and the SGE data interrupts.
2043 */
2044void t3_intr_disable(struct adapter *adapter)
2045{
2046	t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
2047	t3_read_reg(adapter, A_PL_INT_ENABLE0);	/* flush */
2048	adapter->slow_intr_mask = 0;
2049}
2050
2051/**
2052 *	t3_intr_clear - clear all interrupts
2053 *	@adapter: the adapter whose interrupts should be cleared
2054 *
2055 *	Clears all interrupts.
2056 */
2057void t3_intr_clear(struct adapter *adapter)
2058{
2059	static const unsigned int cause_reg_addr[] = {
2060		A_SG_INT_CAUSE,
2061		A_SG_RSPQ_FL_STATUS,
2062		A_PCIX_INT_CAUSE,
2063		A_MC7_INT_CAUSE,
2064		A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
2065		A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
2066		A_CIM_HOST_INT_CAUSE,
2067		A_TP_INT_CAUSE,
2068		A_MC5_DB_INT_CAUSE,
2069		A_ULPRX_INT_CAUSE,
2070		A_ULPTX_INT_CAUSE,
2071		A_CPL_INTR_CAUSE,
2072		A_PM1_TX_INT_CAUSE,
2073		A_PM1_RX_INT_CAUSE,
2074		A_MPS_INT_CAUSE,
2075		A_T3DBG_INT_CAUSE,
2076	};
2077	unsigned int i;
2078
2079	/* Clear PHY and MAC interrupts for each port. */
2080	for_each_port(adapter, i)
2081	    t3_port_intr_clear(adapter, i);
2082
2083	for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
2084		t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
2085
2086	if (is_pcie(adapter))
2087		t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
2088	t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
2089	t3_read_reg(adapter, A_PL_INT_CAUSE0);	/* flush */
2090}
2091
2092void t3_xgm_intr_enable(struct adapter *adapter, int idx)
2093{
2094	struct port_info *pi = adap2pinfo(adapter, idx);
2095
2096	t3_write_reg(adapter, A_XGM_XGM_INT_ENABLE + pi->mac.offset,
2097		     XGM_EXTRA_INTR_MASK);
2098}
2099
2100void t3_xgm_intr_disable(struct adapter *adapter, int idx)
2101{
2102	struct port_info *pi = adap2pinfo(adapter, idx);
2103
2104	t3_write_reg(adapter, A_XGM_XGM_INT_DISABLE + pi->mac.offset,
2105		     0x7ff);
2106}
2107
2108/**
2109 *	t3_port_intr_enable - enable port-specific interrupts
2110 *	@adapter: associated adapter
2111 *	@idx: index of port whose interrupts should be enabled
2112 *
2113 *	Enable port-specific (i.e., MAC and PHY) interrupts for the given
2114 *	adapter port.
2115 */
2116void t3_port_intr_enable(struct adapter *adapter, int idx)
2117{
2118	struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2119
2120	t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
2121	t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
2122	phy->ops->intr_enable(phy);
2123}
2124
2125/**
2126 *	t3_port_intr_disable - disable port-specific interrupts
2127 *	@adapter: associated adapter
2128 *	@idx: index of port whose interrupts should be disabled
2129 *
2130 *	Disable port-specific (i.e., MAC and PHY) interrupts for the given
2131 *	adapter port.
2132 */
2133void t3_port_intr_disable(struct adapter *adapter, int idx)
2134{
2135	struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2136
2137	t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
2138	t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
2139	phy->ops->intr_disable(phy);
2140}
2141
2142/**
2143 *	t3_port_intr_clear - clear port-specific interrupts
2144 *	@adapter: associated adapter
2145 *	@idx: index of port whose interrupts to clear
2146 *
2147 *	Clear port-specific (i.e., MAC and PHY) interrupts for the given
2148 *	adapter port.
2149 */
2150static void t3_port_intr_clear(struct adapter *adapter, int idx)
2151{
2152	struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2153
2154	t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
2155	t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
2156	phy->ops->intr_clear(phy);
2157}
2158
2159#define SG_CONTEXT_CMD_ATTEMPTS 100
2160
2161/**
2162 * 	t3_sge_write_context - write an SGE context
2163 * 	@adapter: the adapter
2164 * 	@id: the context id
2165 * 	@type: the context type
2166 *
2167 * 	Program an SGE context with the values already loaded in the
2168 * 	CONTEXT_DATA? registers.
2169 */
2170static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
2171				unsigned int type)
2172{
2173	if (type == F_RESPONSEQ) {
2174		/*
2175		 * Can't write the Response Queue Context bits for
2176		 * Interrupt Armed or the Reserve bits after the chip
2177		 * has been initialized out of reset.  Writing to these
2178		 * bits can confuse the hardware.
2179		 */
2180		t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2181		t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2182		t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0x17ffffff);
2183		t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2184	} else {
2185		t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2186		t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2187		t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
2188		t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2189	}
2190	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2191		     V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2192	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2193			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2194}
2195
2196/**
2197 *	clear_sge_ctxt - completely clear an SGE context
2198 *	@adap: the adapter
2199 *	@id: the context id
2200 *	@type: the context type
2201 *
2202 *	Completely clear an SGE context.  Used predominantly at post-reset
2203 *	initialization.  Note in particular that we don't skip writing to any
2204 *	"sensitive bits" in the contexts the way that t3_sge_write_context()
2205 *	does ...
2206 */
2207static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
2208			  unsigned int type)
2209{
2210	t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
2211	t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
2212	t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
2213	t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
2214	t3_write_reg(adap, A_SG_CONTEXT_MASK0, 0xffffffff);
2215	t3_write_reg(adap, A_SG_CONTEXT_MASK1, 0xffffffff);
2216	t3_write_reg(adap, A_SG_CONTEXT_MASK2, 0xffffffff);
2217	t3_write_reg(adap, A_SG_CONTEXT_MASK3, 0xffffffff);
2218	t3_write_reg(adap, A_SG_CONTEXT_CMD,
2219		     V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2220	return t3_wait_op_done(adap, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2221			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2222}
2223
2224/**
2225 *	t3_sge_init_ecntxt - initialize an SGE egress context
2226 *	@adapter: the adapter to configure
2227 *	@id: the context id
2228 *	@gts_enable: whether to enable GTS for the context
2229 *	@type: the egress context type
2230 *	@respq: associated response queue
2231 *	@base_addr: base address of queue
2232 *	@size: number of queue entries
2233 *	@token: uP token
2234 *	@gen: initial generation value for the context
2235 *	@cidx: consumer pointer
2236 *
2237 *	Initialize an SGE egress context and make it ready for use.  If the
2238 *	platform allows concurrent context operations, the caller is
2239 *	responsible for appropriate locking.
2240 */
2241int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
2242		       enum sge_context_type type, int respq, u64 base_addr,
2243		       unsigned int size, unsigned int token, int gen,
2244		       unsigned int cidx)
2245{
2246	unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
2247
2248	if (base_addr & 0xfff)	/* must be 4K aligned */
2249		return -EINVAL;
2250	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2251		return -EBUSY;
2252
2253	base_addr >>= 12;
2254	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
2255		     V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
2256	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
2257		     V_EC_BASE_LO(base_addr & 0xffff));
2258	base_addr >>= 16;
2259	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
2260	base_addr >>= 32;
2261	t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2262		     V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
2263		     V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
2264		     F_EC_VALID);
2265	return t3_sge_write_context(adapter, id, F_EGRESS);
2266}
2267
2268/**
2269 *	t3_sge_init_flcntxt - initialize an SGE free-buffer list context
2270 *	@adapter: the adapter to configure
2271 *	@id: the context id
2272 *	@gts_enable: whether to enable GTS for the context
2273 *	@base_addr: base address of queue
2274 *	@size: number of queue entries
2275 *	@bsize: size of each buffer for this queue
2276 *	@cong_thres: threshold to signal congestion to upstream producers
2277 *	@gen: initial generation value for the context
2278 *	@cidx: consumer pointer
2279 *
2280 *	Initialize an SGE free list context and make it ready for use.  The
2281 *	caller is responsible for ensuring only one context operation occurs
2282 *	at a time.
2283 */
2284int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
2285			int gts_enable, u64 base_addr, unsigned int size,
2286			unsigned int bsize, unsigned int cong_thres, int gen,
2287			unsigned int cidx)
2288{
2289	if (base_addr & 0xfff)	/* must be 4K aligned */
2290		return -EINVAL;
2291	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2292		return -EBUSY;
2293
2294	base_addr >>= 12;
2295	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
2296	base_addr >>= 32;
2297	t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
2298		     V_FL_BASE_HI((u32) base_addr) |
2299		     V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
2300	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
2301		     V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
2302		     V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
2303	t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2304		     V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
2305		     V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
2306	return t3_sge_write_context(adapter, id, F_FREELIST);
2307}
2308
2309/**
2310 *	t3_sge_init_rspcntxt - initialize an SGE response queue context
2311 *	@adapter: the adapter to configure
2312 *	@id: the context id
2313 *	@irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
2314 *	@base_addr: base address of queue
2315 *	@size: number of queue entries
2316 *	@fl_thres: threshold for selecting the normal or jumbo free list
2317 *	@gen: initial generation value for the context
2318 *	@cidx: consumer pointer
2319 *
2320 *	Initialize an SGE response queue context and make it ready for use.
2321 *	The caller is responsible for ensuring only one context operation
2322 *	occurs at a time.
2323 */
2324int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
2325			 int irq_vec_idx, u64 base_addr, unsigned int size,
2326			 unsigned int fl_thres, int gen, unsigned int cidx)
2327{
2328	unsigned int intr = 0;
2329
2330	if (base_addr & 0xfff)	/* must be 4K aligned */
2331		return -EINVAL;
2332	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2333		return -EBUSY;
2334
2335	base_addr >>= 12;
2336	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2337		     V_CQ_INDEX(cidx));
2338	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2339	base_addr >>= 32;
2340	if (irq_vec_idx >= 0)
2341		intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
2342	t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2343		     V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
2344	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2345	return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2346}
2347
2348/**
2349 *	t3_sge_init_cqcntxt - initialize an SGE completion queue context
2350 *	@adapter: the adapter to configure
2351 *	@id: the context id
2352 *	@base_addr: base address of queue
2353 *	@size: number of queue entries
2354 *	@rspq: response queue for async notifications
2355 *	@ovfl_mode: CQ overflow mode
2356 *	@credits: completion queue credits
2357 *	@credit_thres: the credit threshold
2358 *
2359 *	Initialize an SGE completion queue context and make it ready for use.
2360 *	The caller is responsible for ensuring only one context operation
2361 *	occurs at a time.
2362 */
2363int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
2364			unsigned int size, int rspq, int ovfl_mode,
2365			unsigned int credits, unsigned int credit_thres)
2366{
2367	if (base_addr & 0xfff)	/* must be 4K aligned */
2368		return -EINVAL;
2369	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2370		return -EBUSY;
2371
2372	base_addr >>= 12;
2373	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2374	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2375	base_addr >>= 32;
2376	t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2377		     V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
2378		     V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2379		     V_CQ_ERR(ovfl_mode));
2380	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2381		     V_CQ_CREDIT_THRES(credit_thres));
2382	return t3_sge_write_context(adapter, id, F_CQ);
2383}
2384
2385/**
2386 *	t3_sge_enable_ecntxt - enable/disable an SGE egress context
2387 *	@adapter: the adapter
2388 *	@id: the egress context id
2389 *	@enable: enable (1) or disable (0) the context
2390 *
2391 *	Enable or disable an SGE egress context.  The caller is responsible for
2392 *	ensuring only one context operation occurs at a time.
2393 */
2394int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
2395{
2396	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2397		return -EBUSY;
2398
2399	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2400	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2401	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2402	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2403	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2404	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2405		     V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2406	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2407			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2408}
2409
2410/**
2411 *	t3_sge_disable_fl - disable an SGE free-buffer list
2412 *	@adapter: the adapter
2413 *	@id: the free list context id
2414 *
2415 *	Disable an SGE free-buffer list.  The caller is responsible for
2416 *	ensuring only one context operation occurs at a time.
2417 */
2418int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
2419{
2420	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2421		return -EBUSY;
2422
2423	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2424	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2425	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2426	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2427	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2428	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2429		     V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2430	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2431			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2432}
2433
2434/**
2435 *	t3_sge_disable_rspcntxt - disable an SGE response queue
2436 *	@adapter: the adapter
2437 *	@id: the response queue context id
2438 *
2439 *	Disable an SGE response queue.  The caller is responsible for
2440 *	ensuring only one context operation occurs at a time.
2441 */
2442int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
2443{
2444	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2445		return -EBUSY;
2446
2447	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2448	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2449	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2450	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2451	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2452	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2453		     V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2454	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2455			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2456}
2457
2458/**
2459 *	t3_sge_disable_cqcntxt - disable an SGE completion queue
2460 *	@adapter: the adapter
2461 *	@id: the completion queue context id
2462 *
2463 *	Disable an SGE completion queue.  The caller is responsible for
2464 *	ensuring only one context operation occurs at a time.
2465 */
2466int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2467{
2468	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2469		return -EBUSY;
2470
2471	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2472	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2473	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2474	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2475	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2476	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2477		     V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2478	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2479			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2480}
2481
2482/**
2483 *	t3_sge_cqcntxt_op - perform an operation on a completion queue context
2484 *	@adapter: the adapter
2485 *	@id: the context id
2486 *	@op: the operation to perform
2487 *	@credits: credit value to write
2488 *
2489 *	Perform the selected operation on an SGE completion queue context.
2490 *	The caller is responsible for ensuring only one context operation
2491 *	occurs at a time.
2492 */
2493int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2494		      unsigned int credits)
2495{
2496	u32 val;
2497
2498	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2499		return -EBUSY;
2500
2501	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2502	t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2503		     V_CONTEXT(id) | F_CQ);
2504	if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2505				0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
2506		return -EIO;
2507
2508	if (op >= 2 && op < 7) {
2509		if (adapter->params.rev > 0)
2510			return G_CQ_INDEX(val);
2511
2512		t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2513			     V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2514		if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2515				    F_CONTEXT_CMD_BUSY, 0,
2516				    SG_CONTEXT_CMD_ATTEMPTS, 1))
2517			return -EIO;
2518		return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2519	}
2520	return 0;
2521}
2522
2523/**
2524 *	t3_config_rss - configure Rx packet steering
2525 *	@adapter: the adapter
2526 *	@rss_config: RSS settings (written to TP_RSS_CONFIG)
2527 *	@cpus: values for the CPU lookup table (0xff terminated)
2528 *	@rspq: values for the response queue lookup table (0xffff terminated)
2529 *
2530 *	Programs the receive packet steering logic.  @cpus and @rspq provide
2531 *	the values for the CPU and response queue lookup tables.  If they
2532 *	provide fewer values than the size of the tables the supplied values
2533 *	are used repeatedly until the tables are fully populated.
2534 */
2535void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2536		   const u8 * cpus, const u16 *rspq)
2537{
2538	int i, j, cpu_idx = 0, q_idx = 0;
2539
2540	if (cpus)
2541		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2542			u32 val = i << 16;
2543
2544			for (j = 0; j < 2; ++j) {
2545				val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2546				if (cpus[cpu_idx] == 0xff)
2547					cpu_idx = 0;
2548			}
2549			t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2550		}
2551
2552	if (rspq)
2553		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2554			t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2555				     (i << 16) | rspq[q_idx++]);
2556			if (rspq[q_idx] == 0xffff)
2557				q_idx = 0;
2558		}
2559
2560	t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2561}
2562
2563/**
2564 *	t3_tp_set_offload_mode - put TP in NIC/offload mode
2565 *	@adap: the adapter
2566 *	@enable: 1 to select offload mode, 0 for regular NIC
2567 *
2568 *	Switches TP to NIC/offload mode.
2569 */
2570void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2571{
2572	if (is_offload(adap) || !enable)
2573		t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2574				 V_NICMODE(!enable));
2575}
2576
2577/**
2578 *	pm_num_pages - calculate the number of pages of the payload memory
2579 *	@mem_size: the size of the payload memory
2580 *	@pg_size: the size of each payload memory page
2581 *
2582 *	Calculate the number of pages, each of the given size, that fit in a
2583 *	memory of the specified size, respecting the HW requirement that the
2584 *	number of pages must be a multiple of 24.
2585 */
2586static inline unsigned int pm_num_pages(unsigned int mem_size,
2587					unsigned int pg_size)
2588{
2589	unsigned int n = mem_size / pg_size;
2590
2591	return n - n % 24;
2592}
2593
2594#define mem_region(adap, start, size, reg) \
2595	t3_write_reg((adap), A_ ## reg, (start)); \
2596	start += size
2597
2598/**
2599 *	partition_mem - partition memory and configure TP memory settings
2600 *	@adap: the adapter
2601 *	@p: the TP parameters
2602 *
2603 *	Partitions context and payload memory and configures TP's memory
2604 *	registers.
2605 */
2606static void partition_mem(struct adapter *adap, const struct tp_params *p)
2607{
2608	unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2609	unsigned int timers = 0, timers_shift = 22;
2610
2611	if (adap->params.rev > 0) {
2612		if (tids <= 16 * 1024) {
2613			timers = 1;
2614			timers_shift = 16;
2615		} else if (tids <= 64 * 1024) {
2616			timers = 2;
2617			timers_shift = 18;
2618		} else if (tids <= 256 * 1024) {
2619			timers = 3;
2620			timers_shift = 20;
2621		}
2622	}
2623
2624	t3_write_reg(adap, A_TP_PMM_SIZE,
2625		     p->chan_rx_size | (p->chan_tx_size >> 16));
2626
2627	t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2628	t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2629	t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2630	t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2631			 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2632
2633	t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2634	t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2635	t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2636
2637	pstructs = p->rx_num_pgs + p->tx_num_pgs;
2638	/* Add a bit of headroom and make multiple of 24 */
2639	pstructs += 48;
2640	pstructs -= pstructs % 24;
2641	t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2642
2643	m = tids * TCB_SIZE;
2644	mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2645	mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2646	t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2647	m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2648	mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2649	mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2650	mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2651	mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2652
2653	m = (m + 4095) & ~0xfff;
2654	t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2655	t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2656
2657	tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2658	m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2659	    adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2660	if (tids < m)
2661		adap->params.mc5.nservers += m - tids;
2662}
2663
2664static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2665				  u32 val)
2666{
2667	t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2668	t3_write_reg(adap, A_TP_PIO_DATA, val);
2669}
2670
2671static void tp_config(struct adapter *adap, const struct tp_params *p)
2672{
2673	t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2674		     F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2675		     F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2676	t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2677		     F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2678		     V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
2679	t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2680		     V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2681		     V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) |
2682		     F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2683	t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
2684			 F_IPV6ENABLE | F_NICMODE);
2685	t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2686	t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2687	t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2688			 adap->params.rev > 0 ? F_ENABLEESND :
2689			 F_T3A_ENABLEESND);
2690
2691	t3_set_reg_field(adap, A_TP_PC_CONFIG,
2692			 F_ENABLEEPCMDAFULL,
2693			 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2694			 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
2695	t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
2696			 F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
2697			 F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
2698	t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2699	t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2700
2701	if (adap->params.rev > 0) {
2702		tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2703		t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2704				 F_TXPACEAUTO);
2705		t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2706		t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2707	} else
2708		t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2709
2710	if (adap->params.rev == T3_REV_C)
2711		t3_set_reg_field(adap, A_TP_PC_CONFIG,
2712				 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
2713				 V_TABLELATENCYDELTA(4));
2714
2715	t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2716	t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2717	t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2718	t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
2719}
2720
2721/* Desired TP timer resolution in usec */
2722#define TP_TMR_RES 50
2723
2724/* TCP timer values in ms */
2725#define TP_DACK_TIMER 50
2726#define TP_RTO_MIN    250
2727
2728/**
2729 *	tp_set_timers - set TP timing parameters
2730 *	@adap: the adapter to set
2731 *	@core_clk: the core clock frequency in Hz
2732 *
2733 *	Set TP's timing parameters, such as the various timer resolutions and
2734 *	the TCP timer values.
2735 */
2736static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2737{
2738	unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2739	unsigned int dack_re = fls(core_clk / 5000) - 1;	/* 200us */
2740	unsigned int tstamp_re = fls(core_clk / 1000);	/* 1ms, at least */
2741	unsigned int tps = core_clk >> tre;
2742
2743	t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2744		     V_DELAYEDACKRESOLUTION(dack_re) |
2745		     V_TIMESTAMPRESOLUTION(tstamp_re));
2746	t3_write_reg(adap, A_TP_DACK_TIMER,
2747		     (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2748	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2749	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2750	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2751	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2752	t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2753		     V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2754		     V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2755		     V_KEEPALIVEMAX(9));
2756
2757#define SECONDS * tps
2758
2759	t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2760	t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2761	t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2762	t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2763	t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2764	t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2765	t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2766	t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2767	t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2768
2769#undef SECONDS
2770}
2771
2772/**
2773 *	t3_tp_set_coalescing_size - set receive coalescing size
2774 *	@adap: the adapter
2775 *	@size: the receive coalescing size
2776 *	@psh: whether a set PSH bit should deliver coalesced data
2777 *
2778 *	Set the receive coalescing size and PSH bit handling.
2779 */
2780static int t3_tp_set_coalescing_size(struct adapter *adap,
2781				     unsigned int size, int psh)
2782{
2783	u32 val;
2784
2785	if (size > MAX_RX_COALESCING_LEN)
2786		return -EINVAL;
2787
2788	val = t3_read_reg(adap, A_TP_PARA_REG3);
2789	val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2790
2791	if (size) {
2792		val |= F_RXCOALESCEENABLE;
2793		if (psh)
2794			val |= F_RXCOALESCEPSHEN;
2795		size = min(MAX_RX_COALESCING_LEN, size);
2796		t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2797			     V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2798	}
2799	t3_write_reg(adap, A_TP_PARA_REG3, val);
2800	return 0;
2801}
2802
2803/**
2804 *	t3_tp_set_max_rxsize - set the max receive size
2805 *	@adap: the adapter
2806 *	@size: the max receive size
2807 *
2808 *	Set TP's max receive size.  This is the limit that applies when
2809 *	receive coalescing is disabled.
2810 */
2811static void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2812{
2813	t3_write_reg(adap, A_TP_PARA_REG7,
2814		     V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2815}
2816
2817static void init_mtus(unsigned short mtus[])
2818{
2819	/*
2820	 * See draft-mathis-plpmtud-00.txt for the values.  The min is 88 so
2821	 * it can accommodate max size TCP/IP headers when SACK and timestamps
2822	 * are enabled and still have at least 8 bytes of payload.
2823	 */
2824	mtus[0] = 88;
2825	mtus[1] = 88;
2826	mtus[2] = 256;
2827	mtus[3] = 512;
2828	mtus[4] = 576;
2829	mtus[5] = 1024;
2830	mtus[6] = 1280;
2831	mtus[7] = 1492;
2832	mtus[8] = 1500;
2833	mtus[9] = 2002;
2834	mtus[10] = 2048;
2835	mtus[11] = 4096;
2836	mtus[12] = 4352;
2837	mtus[13] = 8192;
2838	mtus[14] = 9000;
2839	mtus[15] = 9600;
2840}
2841
2842/*
2843 * Initial congestion control parameters.
2844 */
2845static void init_cong_ctrl(unsigned short *a, unsigned short *b)
2846{
2847	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2848	a[9] = 2;
2849	a[10] = 3;
2850	a[11] = 4;
2851	a[12] = 5;
2852	a[13] = 6;
2853	a[14] = 7;
2854	a[15] = 8;
2855	a[16] = 9;
2856	a[17] = 10;
2857	a[18] = 14;
2858	a[19] = 17;
2859	a[20] = 21;
2860	a[21] = 25;
2861	a[22] = 30;
2862	a[23] = 35;
2863	a[24] = 45;
2864	a[25] = 60;
2865	a[26] = 80;
2866	a[27] = 100;
2867	a[28] = 200;
2868	a[29] = 300;
2869	a[30] = 400;
2870	a[31] = 500;
2871
2872	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2873	b[9] = b[10] = 1;
2874	b[11] = b[12] = 2;
2875	b[13] = b[14] = b[15] = b[16] = 3;
2876	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2877	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2878	b[28] = b[29] = 6;
2879	b[30] = b[31] = 7;
2880}
2881
2882/* The minimum additive increment value for the congestion control table */
2883#define CC_MIN_INCR 2U
2884
2885/**
2886 *	t3_load_mtus - write the MTU and congestion control HW tables
2887 *	@adap: the adapter
2888 *	@mtus: the unrestricted values for the MTU table
2889 *	@alpha: the values for the congestion control alpha parameter
2890 *	@beta: the values for the congestion control beta parameter
2891 *	@mtu_cap: the maximum permitted effective MTU
2892 *
2893 *	Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2894 *	Update the high-speed congestion control table with the supplied alpha,
2895 * 	beta, and MTUs.
2896 */
2897void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2898		  unsigned short alpha[NCCTRL_WIN],
2899		  unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2900{
2901	static const unsigned int avg_pkts[NCCTRL_WIN] = {
2902		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2903		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2904		28672, 40960, 57344, 81920, 114688, 163840, 229376
2905	};
2906
2907	unsigned int i, w;
2908
2909	for (i = 0; i < NMTUS; ++i) {
2910		unsigned int mtu = min(mtus[i], mtu_cap);
2911		unsigned int log2 = fls(mtu);
2912
2913		if (!(mtu & ((1 << log2) >> 2)))	/* round */
2914			log2--;
2915		t3_write_reg(adap, A_TP_MTU_TABLE,
2916			     (i << 24) | (log2 << 16) | mtu);
2917
2918		for (w = 0; w < NCCTRL_WIN; ++w) {
2919			unsigned int inc;
2920
2921			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2922				  CC_MIN_INCR);
2923
2924			t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2925				     (w << 16) | (beta[w] << 13) | inc);
2926		}
2927	}
2928}
2929
2930/**
2931 *	t3_tp_get_mib_stats - read TP's MIB counters
2932 *	@adap: the adapter
2933 *	@tps: holds the returned counter values
2934 *
2935 *	Returns the values of TP's MIB counters.
2936 */
2937void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
2938{
2939	t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
2940			 sizeof(*tps) / sizeof(u32), 0);
2941}
2942
2943#define ulp_region(adap, name, start, len) \
2944	t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
2945	t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
2946		     (start) + (len) - 1); \
2947	start += len
2948
2949#define ulptx_region(adap, name, start, len) \
2950	t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
2951	t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
2952		     (start) + (len) - 1)
2953
2954static void ulp_config(struct adapter *adap, const struct tp_params *p)
2955{
2956	unsigned int m = p->chan_rx_size;
2957
2958	ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
2959	ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
2960	ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
2961	ulp_region(adap, STAG, m, p->chan_rx_size / 4);
2962	ulp_region(adap, RQ, m, p->chan_rx_size / 4);
2963	ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
2964	ulp_region(adap, PBL, m, p->chan_rx_size / 4);
2965	t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
2966}
2967
2968/**
2969 *	t3_set_proto_sram - set the contents of the protocol sram
2970 *	@adap: the adapter
2971 *	@data: the protocol image
2972 *
2973 *	Write the contents of the protocol SRAM.
2974 */
2975int t3_set_proto_sram(struct adapter *adap, const u8 *data)
2976{
2977	int i;
2978	const __be32 *buf = (const __be32 *)data;
2979
2980	for (i = 0; i < PROTO_SRAM_LINES; i++) {
2981		t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, be32_to_cpu(*buf++));
2982		t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, be32_to_cpu(*buf++));
2983		t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, be32_to_cpu(*buf++));
2984		t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, be32_to_cpu(*buf++));
2985		t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, be32_to_cpu(*buf++));
2986
2987		t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
2988		if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
2989			return -EIO;
2990	}
2991	t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
2992
2993	return 0;
2994}
2995
2996void t3_config_trace_filter(struct adapter *adapter,
2997			    const struct trace_params *tp, int filter_index,
2998			    int invert, int enable)
2999{
3000	u32 addr, key[4], mask[4];
3001
3002	key[0] = tp->sport | (tp->sip << 16);
3003	key[1] = (tp->sip >> 16) | (tp->dport << 16);
3004	key[2] = tp->dip;
3005	key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
3006
3007	mask[0] = tp->sport_mask | (tp->sip_mask << 16);
3008	mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
3009	mask[2] = tp->dip_mask;
3010	mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
3011
3012	if (invert)
3013		key[3] |= (1 << 29);
3014	if (enable)
3015		key[3] |= (1 << 28);
3016
3017	addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
3018	tp_wr_indirect(adapter, addr++, key[0]);
3019	tp_wr_indirect(adapter, addr++, mask[0]);
3020	tp_wr_indirect(adapter, addr++, key[1]);
3021	tp_wr_indirect(adapter, addr++, mask[1]);
3022	tp_wr_indirect(adapter, addr++, key[2]);
3023	tp_wr_indirect(adapter, addr++, mask[2]);
3024	tp_wr_indirect(adapter, addr++, key[3]);
3025	tp_wr_indirect(adapter, addr, mask[3]);
3026	t3_read_reg(adapter, A_TP_PIO_DATA);
3027}
3028
3029/**
3030 *	t3_config_sched - configure a HW traffic scheduler
3031 *	@adap: the adapter
3032 *	@kbps: target rate in Kbps
3033 *	@sched: the scheduler index
3034 *
3035 *	Configure a HW scheduler for the target rate
3036 */
3037int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
3038{
3039	unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3040	unsigned int clk = adap->params.vpd.cclk * 1000;
3041	unsigned int selected_cpt = 0, selected_bpt = 0;
3042
3043	if (kbps > 0) {
3044		kbps *= 125;	/* -> bytes */
3045		for (cpt = 1; cpt <= 255; cpt++) {
3046			tps = clk / cpt;
3047			bpt = (kbps + tps / 2) / tps;
3048			if (bpt > 0 && bpt <= 255) {
3049				v = bpt * tps;
3050				delta = v >= kbps ? v - kbps : kbps - v;
3051				if (delta <= mindelta) {
3052					mindelta = delta;
3053					selected_cpt = cpt;
3054					selected_bpt = bpt;
3055				}
3056			} else if (selected_cpt)
3057				break;
3058		}
3059		if (!selected_cpt)
3060			return -EINVAL;
3061	}
3062	t3_write_reg(adap, A_TP_TM_PIO_ADDR,
3063		     A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3064	v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3065	if (sched & 1)
3066		v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3067	else
3068		v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3069	t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3070	return 0;
3071}
3072
3073static int tp_init(struct adapter *adap, const struct tp_params *p)
3074{
3075	int busy = 0;
3076
3077	tp_config(adap, p);
3078	t3_set_vlan_accel(adap, 3, 0);
3079
3080	if (is_offload(adap)) {
3081		tp_set_timers(adap, adap->params.vpd.cclk * 1000);
3082		t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
3083		busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
3084				       0, 1000, 5);
3085		if (busy)
3086			CH_ERR(adap, "TP initialization timed out\n");
3087	}
3088
3089	if (!busy)
3090		t3_write_reg(adap, A_TP_RESET, F_TPRESET);
3091	return busy;
3092}
3093
3094/*
3095 * Perform the bits of HW initialization that are dependent on the Tx
3096 * channels being used.
3097 */
3098static void chan_init_hw(struct adapter *adap, unsigned int chan_map)
3099{
3100	int i;
3101
3102	if (chan_map != 3) {                                 /* one channel */
3103		t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3104		t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3105		t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT |
3106			     (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE :
3107					      F_TPTXPORT1EN | F_PORT1ACTIVE));
3108		t3_write_reg(adap, A_PM1_TX_CFG,
3109			     chan_map == 1 ? 0xffffffff : 0);
3110	} else {                                             /* two channels */
3111		t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3112		t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3113		t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3114			     V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3115		t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3116			     F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3117			     F_ENFORCEPKT);
3118		t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3119		t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3120		t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3121			     V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3122		for (i = 0; i < 16; i++)
3123			t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3124				     (i << 16) | 0x1010);
3125	}
3126}
3127
3128static int calibrate_xgm(struct adapter *adapter)
3129{
3130	if (uses_xaui(adapter)) {
3131		unsigned int v, i;
3132
3133		for (i = 0; i < 5; ++i) {
3134			t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3135			t3_read_reg(adapter, A_XGM_XAUI_IMP);
3136			msleep(1);
3137			v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3138			if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3139				t3_write_reg(adapter, A_XGM_XAUI_IMP,
3140					     V_XAUIIMP(G_CALIMP(v) >> 2));
3141				return 0;
3142			}
3143		}
3144		CH_ERR(adapter, "MAC calibration failed\n");
3145		return -1;
3146	} else {
3147		t3_write_reg(adapter, A_XGM_RGMII_IMP,
3148			     V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3149		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3150				 F_XGM_IMPSETUPDATE);
3151	}
3152	return 0;
3153}
3154
3155static void calibrate_xgm_t3b(struct adapter *adapter)
3156{
3157	if (!uses_xaui(adapter)) {
3158		t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3159			     F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3160		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3161		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3162				 F_XGM_IMPSETUPDATE);
3163		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3164				 0);
3165		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3166		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3167	}
3168}
3169
3170struct mc7_timing_params {
3171	unsigned char ActToPreDly;
3172	unsigned char ActToRdWrDly;
3173	unsigned char PreCyc;
3174	unsigned char RefCyc[5];
3175	unsigned char BkCyc;
3176	unsigned char WrToRdDly;
3177	unsigned char RdToWrDly;
3178};
3179
3180/*
3181 * Write a value to a register and check that the write completed.  These
3182 * writes normally complete in a cycle or two, so one read should suffice.
3183 * The very first read exists to flush the posted write to the device.
3184 */
3185static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
3186{
3187	t3_write_reg(adapter, addr, val);
3188	t3_read_reg(adapter, addr);	/* flush */
3189	if (!(t3_read_reg(adapter, addr) & F_BUSY))
3190		return 0;
3191	CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3192	return -EIO;
3193}
3194
3195static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3196{
3197	static const unsigned int mc7_mode[] = {
3198		0x632, 0x642, 0x652, 0x432, 0x442
3199	};
3200	static const struct mc7_timing_params mc7_timings[] = {
3201		{12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
3202		{12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
3203		{12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
3204		{9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
3205		{9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
3206	};
3207
3208	u32 val;
3209	unsigned int width, density, slow, attempts;
3210	struct adapter *adapter = mc7->adapter;
3211	const struct mc7_timing_params *p = &mc7_timings[mem_type];
3212
3213	if (!mc7->size)
3214		return 0;
3215
3216	val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3217	slow = val & F_SLOW;
3218	width = G_WIDTH(val);
3219	density = G_DEN(val);
3220
3221	t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3222	val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);	/* flush */
3223	msleep(1);
3224
3225	if (!slow) {
3226		t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3227		t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3228		msleep(1);
3229		if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3230		    (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3231			CH_ERR(adapter, "%s MC7 calibration timed out\n",
3232			       mc7->name);
3233			goto out_fail;
3234		}
3235	}
3236
3237	t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3238		     V_ACTTOPREDLY(p->ActToPreDly) |
3239		     V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3240		     V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3241		     V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3242
3243	t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3244		     val | F_CLKEN | F_TERM150);
3245	t3_read_reg(adapter, mc7->offset + A_MC7_CFG);	/* flush */
3246
3247	if (!slow)
3248		t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3249				 F_DLLENB);
3250	udelay(1);
3251
3252	val = slow ? 3 : 6;
3253	if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3254	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3255	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3256	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3257		goto out_fail;
3258
3259	if (!slow) {
3260		t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3261		t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
3262		udelay(5);
3263	}
3264
3265	if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3266	    wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3267	    wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3268	    wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3269		       mc7_mode[mem_type]) ||
3270	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3271	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3272		goto out_fail;
3273
3274	/* clock value is in KHz */
3275	mc7_clock = mc7_clock * 7812 + mc7_clock / 2;	/* ns */
3276	mc7_clock /= 1000000;	/* KHz->MHz, ns->us */
3277
3278	t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3279		     F_PERREFEN | V_PREREFDIV(mc7_clock));
3280	t3_read_reg(adapter, mc7->offset + A_MC7_REF);	/* flush */
3281
3282	t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
3283	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3284	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3285	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3286		     (mc7->size << width) - 1);
3287	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3288	t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);	/* flush */
3289
3290	attempts = 50;
3291	do {
3292		msleep(250);
3293		val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3294	} while ((val & F_BUSY) && --attempts);
3295	if (val & F_BUSY) {
3296		CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3297		goto out_fail;
3298	}
3299
3300	/* Enable normal memory accesses. */
3301	t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3302	return 0;
3303
3304out_fail:
3305	return -1;
3306}
3307
3308static void config_pcie(struct adapter *adap)
3309{
3310	static const u16 ack_lat[4][6] = {
3311		{237, 416, 559, 1071, 2095, 4143},
3312		{128, 217, 289, 545, 1057, 2081},
3313		{73, 118, 154, 282, 538, 1050},
3314		{67, 107, 86, 150, 278, 534}
3315	};
3316	static const u16 rpl_tmr[4][6] = {
3317		{711, 1248, 1677, 3213, 6285, 12429},
3318		{384, 651, 867, 1635, 3171, 6243},
3319		{219, 354, 462, 846, 1614, 3150},
3320		{201, 321, 258, 450, 834, 1602}
3321	};
3322
3323	u16 val, devid;
3324	unsigned int log2_width, pldsize;
3325	unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3326
3327	pcie_capability_read_word(adap->pdev, PCI_EXP_DEVCTL, &val);
3328	pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3329
3330	pci_read_config_word(adap->pdev, 0x2, &devid);
3331	if (devid == 0x37) {
3332		pcie_capability_write_word(adap->pdev, PCI_EXP_DEVCTL,
3333					   val & ~PCI_EXP_DEVCTL_READRQ &
3334					   ~PCI_EXP_DEVCTL_PAYLOAD);
3335		pldsize = 0;
3336	}
3337
3338	pcie_capability_read_word(adap->pdev, PCI_EXP_LNKCTL, &val);
3339
3340	fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3341	fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3342	    G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3343	log2_width = fls(adap->params.pci.width) - 1;
3344	acklat = ack_lat[log2_width][pldsize];
3345	if (val & PCI_EXP_LNKCTL_ASPM_L0S)	/* check LOsEnable */
3346		acklat += fst_trn_tx * 4;
3347	rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3348
3349	if (adap->params.rev == 0)
3350		t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3351				 V_T3A_ACKLAT(M_T3A_ACKLAT),
3352				 V_T3A_ACKLAT(acklat));
3353	else
3354		t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3355				 V_ACKLAT(acklat));
3356
3357	t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3358			 V_REPLAYLMT(rpllmt));
3359
3360	t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3361	t3_set_reg_field(adap, A_PCIE_CFG, 0,
3362			 F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST |
3363			 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
3364}
3365
3366/*
3367 * Initialize and configure T3 HW modules.  This performs the
3368 * initialization steps that need to be done once after a card is reset.
3369 * MAC and PHY initialization is handled separarely whenever a port is enabled.
3370 *
3371 * fw_params are passed to FW and their value is platform dependent.  Only the
3372 * top 8 bits are available for use, the rest must be 0.
3373 */
3374int t3_init_hw(struct adapter *adapter, u32 fw_params)
3375{
3376	int err = -EIO, attempts, i;
3377	const struct vpd_params *vpd = &adapter->params.vpd;
3378
3379	if (adapter->params.rev > 0)
3380		calibrate_xgm_t3b(adapter);
3381	else if (calibrate_xgm(adapter))
3382		goto out_err;
3383
3384	if (vpd->mclk) {
3385		partition_mem(adapter, &adapter->params.tp);
3386
3387		if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3388		    mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3389		    mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3390		    t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3391				adapter->params.mc5.nfilters,
3392				adapter->params.mc5.nroutes))
3393			goto out_err;
3394
3395		for (i = 0; i < 32; i++)
3396			if (clear_sge_ctxt(adapter, i, F_CQ))
3397				goto out_err;
3398	}
3399
3400	if (tp_init(adapter, &adapter->params.tp))
3401		goto out_err;
3402
3403	t3_tp_set_coalescing_size(adapter,
3404				  min(adapter->params.sge.max_pkt_size,
3405				      MAX_RX_COALESCING_LEN), 1);
3406	t3_tp_set_max_rxsize(adapter,
3407			     min(adapter->params.sge.max_pkt_size, 16384U));
3408	ulp_config(adapter, &adapter->params.tp);
3409
3410	if (is_pcie(adapter))
3411		config_pcie(adapter);
3412	else
3413		t3_set_reg_field(adapter, A_PCIX_CFG, 0,
3414				 F_DMASTOPEN | F_CLIDECEN);
3415
3416	if (adapter->params.rev == T3_REV_C)
3417		t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
3418				 F_CFG_CQE_SOP_MASK);
3419
3420	t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3421	t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3422	t3_write_reg(adapter, A_PM1_TX_MODE, 0);
3423	chan_init_hw(adapter, adapter->params.chan_map);
3424	t3_sge_init(adapter, &adapter->params.sge);
3425	t3_set_reg_field(adapter, A_PL_RST, 0, F_FATALPERREN);
3426
3427	t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
3428
3429	t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3430	t3_write_reg(adapter, A_CIM_BOOT_CFG,
3431		     V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3432	t3_read_reg(adapter, A_CIM_BOOT_CFG);	/* flush */
3433
3434	attempts = 100;
3435	do {			/* wait for uP to initialize */
3436		msleep(20);
3437	} while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3438	if (!attempts) {
3439		CH_ERR(adapter, "uP initialization timed out\n");
3440		goto out_err;
3441	}
3442
3443	err = 0;
3444out_err:
3445	return err;
3446}
3447
3448/**
3449 *	get_pci_mode - determine a card's PCI mode
3450 *	@adapter: the adapter
3451 *	@p: where to store the PCI settings
3452 *
3453 *	Determines a card's PCI mode and associated parameters, such as speed
3454 *	and width.
3455 */
3456static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
3457{
3458	static unsigned short speed_map[] = { 33, 66, 100, 133 };
3459	u32 pci_mode;
3460
3461	if (pci_is_pcie(adapter->pdev)) {
3462		u16 val;
3463
3464		p->variant = PCI_VARIANT_PCIE;
3465		pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
3466		p->width = (val >> 4) & 0x3f;
3467		return;
3468	}
3469
3470	pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3471	p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3472	p->width = (pci_mode & F_64BIT) ? 64 : 32;
3473	pci_mode = G_PCIXINITPAT(pci_mode);
3474	if (pci_mode == 0)
3475		p->variant = PCI_VARIANT_PCI;
3476	else if (pci_mode < 4)
3477		p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3478	else if (pci_mode < 8)
3479		p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3480	else
3481		p->variant = PCI_VARIANT_PCIX_266_MODE2;
3482}
3483
3484/**
3485 *	init_link_config - initialize a link's SW state
3486 *	@lc: structure holding the link state
3487 *	@caps: information about the current card
3488 *
3489 *	Initializes the SW state maintained for each link, including the link's
3490 *	capabilities and default speed/duplex/flow-control/autonegotiation
3491 *	settings.
3492 */
3493static void init_link_config(struct link_config *lc, unsigned int caps)
3494{
3495	lc->supported = caps;
3496	lc->requested_speed = lc->speed = SPEED_INVALID;
3497	lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3498	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3499	if (lc->supported & SUPPORTED_Autoneg) {
3500		lc->advertising = lc->supported;
3501		lc->autoneg = AUTONEG_ENABLE;
3502		lc->requested_fc |= PAUSE_AUTONEG;
3503	} else {
3504		lc->advertising = 0;
3505		lc->autoneg = AUTONEG_DISABLE;
3506	}
3507}
3508
3509/**
3510 *	mc7_calc_size - calculate MC7 memory size
3511 *	@cfg: the MC7 configuration
3512 *
3513 *	Calculates the size of an MC7 memory in bytes from the value of its
3514 *	configuration register.
3515 */
3516static unsigned int mc7_calc_size(u32 cfg)
3517{
3518	unsigned int width = G_WIDTH(cfg);
3519	unsigned int banks = !!(cfg & F_BKS) + 1;
3520	unsigned int org = !!(cfg & F_ORG) + 1;
3521	unsigned int density = G_DEN(cfg);
3522	unsigned int MBs = ((256 << density) * banks) / (org << width);
3523
3524	return MBs << 20;
3525}
3526
3527static void mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3528		     unsigned int base_addr, const char *name)
3529{
3530	u32 cfg;
3531
3532	mc7->adapter = adapter;
3533	mc7->name = name;
3534	mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3535	cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3536	mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
3537	mc7->width = G_WIDTH(cfg);
3538}
3539
3540static void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3541{
3542	u16 devid;
3543
3544	mac->adapter = adapter;
3545	pci_read_config_word(adapter->pdev, 0x2, &devid);
3546
3547	if (devid == 0x37 && !adapter->params.vpd.xauicfg[1])
3548		index = 0;
3549	mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3550	mac->nucast = 1;
3551
3552	if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3553		t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3554			     is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3555		t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3556				 F_ENRGMII, 0);
3557	}
3558}
3559
3560static void early_hw_init(struct adapter *adapter,
3561			  const struct adapter_info *ai)
3562{
3563	u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3564
3565	mi1_init(adapter, ai);
3566	t3_write_reg(adapter, A_I2C_CFG,	/* set for 80KHz */
3567		     V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3568	t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3569		     ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
3570	t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
3571	t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
3572
3573	if (adapter->params.rev == 0 || !uses_xaui(adapter))
3574		val |= F_ENRGMII;
3575
3576	/* Enable MAC clocks so we can access the registers */
3577	t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3578	t3_read_reg(adapter, A_XGM_PORT_CFG);
3579
3580	val |= F_CLKDIVRESET_;
3581	t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3582	t3_read_reg(adapter, A_XGM_PORT_CFG);
3583	t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3584	t3_read_reg(adapter, A_XGM_PORT_CFG);
3585}
3586
3587/*
3588 * Reset the adapter.
3589 * Older PCIe cards lose their config space during reset, PCI-X
3590 * ones don't.
3591 */
3592int t3_reset_adapter(struct adapter *adapter)
3593{
3594	int i, save_and_restore_pcie =
3595	    adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
3596	uint16_t devid = 0;
3597
3598	if (save_and_restore_pcie)
3599		pci_save_state(adapter->pdev);
3600	t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3601
3602	/*
3603	 * Delay. Give Some time to device to reset fully.
3604	 * XXX The delay time should be modified.
3605	 */
3606	for (i = 0; i < 10; i++) {
3607		msleep(50);
3608		pci_read_config_word(adapter->pdev, 0x00, &devid);
3609		if (devid == 0x1425)
3610			break;
3611	}
3612
3613	if (devid != 0x1425)
3614		return -1;
3615
3616	if (save_and_restore_pcie)
3617		pci_restore_state(adapter->pdev);
3618	return 0;
3619}
3620
3621static int init_parity(struct adapter *adap)
3622{
3623	int i, err, addr;
3624
3625	if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
3626		return -EBUSY;
3627
3628	for (err = i = 0; !err && i < 16; i++)
3629		err = clear_sge_ctxt(adap, i, F_EGRESS);
3630	for (i = 0xfff0; !err && i <= 0xffff; i++)
3631		err = clear_sge_ctxt(adap, i, F_EGRESS);
3632	for (i = 0; !err && i < SGE_QSETS; i++)
3633		err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
3634	if (err)
3635		return err;
3636
3637	t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
3638	for (i = 0; i < 4; i++)
3639		for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
3640			t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
3641				     F_IBQDBGWR | V_IBQDBGQID(i) |
3642				     V_IBQDBGADDR(addr));
3643			err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
3644					      F_IBQDBGBUSY, 0, 2, 1);
3645			if (err)
3646				return err;
3647		}
3648	return 0;
3649}
3650
3651/*
3652 * Initialize adapter SW state for the various HW modules, set initial values
3653 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3654 * interface.
3655 */
3656int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
3657		    int reset)
3658{
3659	int ret;
3660	unsigned int i, j = -1;
3661
3662	get_pci_mode(adapter, &adapter->params.pci);
3663
3664	adapter->params.info = ai;
3665	adapter->params.nports = ai->nports0 + ai->nports1;
3666	adapter->params.chan_map = (!!ai->nports0) | (!!ai->nports1 << 1);
3667	adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3668	/*
3669	 * We used to only run the "adapter check task" once a second if
3670	 * we had PHYs which didn't support interrupts (we would check
3671	 * their link status once a second).  Now we check other conditions
3672	 * in that routine which could potentially impose a very high
3673	 * interrupt load on the system.  As such, we now always scan the
3674	 * adapter state once a second ...
3675	 */
3676	adapter->params.linkpoll_period = 10;
3677	adapter->params.stats_update_period = is_10G(adapter) ?
3678	    MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3679	adapter->params.pci.vpd_cap_addr =
3680	    pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3681	if (!adapter->params.pci.vpd_cap_addr)
3682		return -ENODEV;
3683	ret = get_vpd_params(adapter, &adapter->params.vpd);
3684	if (ret < 0)
3685		return ret;
3686
3687	if (reset && t3_reset_adapter(adapter))
3688		return -1;
3689
3690	t3_sge_prep(adapter, &adapter->params.sge);
3691
3692	if (adapter->params.vpd.mclk) {
3693		struct tp_params *p = &adapter->params.tp;
3694
3695		mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3696		mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3697		mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3698
3699		p->nchan = adapter->params.chan_map == 3 ? 2 : 1;
3700		p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3701		p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3702		p->cm_size = t3_mc7_size(&adapter->cm);
3703		p->chan_rx_size = p->pmrx_size / 2;	/* only 1 Rx channel */
3704		p->chan_tx_size = p->pmtx_size / p->nchan;
3705		p->rx_pg_size = 64 * 1024;
3706		p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3707		p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3708		p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3709		p->ntimer_qs = p->cm_size >= (128 << 20) ||
3710		    adapter->params.rev > 0 ? 12 : 6;
3711	}
3712
3713	adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3714				  t3_mc7_size(&adapter->pmtx) &&
3715				  t3_mc7_size(&adapter->cm);
3716
3717	if (is_offload(adapter)) {
3718		adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3719		adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3720		    DEFAULT_NFILTERS : 0;
3721		adapter->params.mc5.nroutes = 0;
3722		t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3723
3724		init_mtus(adapter->params.mtus);
3725		init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3726	}
3727
3728	early_hw_init(adapter, ai);
3729	ret = init_parity(adapter);
3730	if (ret)
3731		return ret;
3732
3733	for_each_port(adapter, i) {
3734		u8 hw_addr[6];
3735		const struct port_type_info *pti;
3736		struct port_info *p = adap2pinfo(adapter, i);
3737
3738		while (!adapter->params.vpd.port_type[++j])
3739			;
3740
3741		pti = &port_types[adapter->params.vpd.port_type[j]];
3742		if (!pti->phy_prep) {
3743			CH_ALERT(adapter, "Invalid port type index %d\n",
3744				 adapter->params.vpd.port_type[j]);
3745			return -EINVAL;
3746		}
3747
3748		p->phy.mdio.dev = adapter->port[i];
3749		ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3750				    ai->mdio_ops);
3751		if (ret)
3752			return ret;
3753		mac_prep(&p->mac, adapter, j);
3754
3755		/*
3756		 * The VPD EEPROM stores the base Ethernet address for the
3757		 * card.  A port's address is derived from the base by adding
3758		 * the port's index to the base's low octet.
3759		 */
3760		memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3761		hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3762
3763		memcpy(adapter->port[i]->dev_addr, hw_addr,
3764		       ETH_ALEN);
3765		init_link_config(&p->link_config, p->phy.caps);
3766		p->phy.ops->power_down(&p->phy, 1);
3767
3768		/*
3769		 * If the PHY doesn't support interrupts for link status
3770		 * changes, schedule a scan of the adapter links at least
3771		 * once a second.
3772		 */
3773		if (!(p->phy.caps & SUPPORTED_IRQ) &&
3774		    adapter->params.linkpoll_period > 10)
3775			adapter->params.linkpoll_period = 10;
3776	}
3777
3778	return 0;
3779}
3780
3781void t3_led_ready(struct adapter *adapter)
3782{
3783	t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3784			 F_GPIO0_OUT_VAL);
3785}
3786
3787int t3_replay_prep_adapter(struct adapter *adapter)
3788{
3789	const struct adapter_info *ai = adapter->params.info;
3790	unsigned int i, j = -1;
3791	int ret;
3792
3793	early_hw_init(adapter, ai);
3794	ret = init_parity(adapter);
3795	if (ret)
3796		return ret;
3797
3798	for_each_port(adapter, i) {
3799		const struct port_type_info *pti;
3800		struct port_info *p = adap2pinfo(adapter, i);
3801
3802		while (!adapter->params.vpd.port_type[++j])
3803			;
3804
3805		pti = &port_types[adapter->params.vpd.port_type[j]];
3806		ret = pti->phy_prep(&p->phy, adapter, p->phy.mdio.prtad, NULL);
3807		if (ret)
3808			return ret;
3809		p->phy.ops->power_down(&p->phy, 1);
3810	}
3811
3812	return 0;
3813}
3814
3815