1/*
2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
35#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/pci.h>
38#include <linux/dma-mapping.h>
39#include <linux/netdevice.h>
40#include <linux/etherdevice.h>
41#include <linux/if_vlan.h>
42#include <linux/mdio.h>
43#include <linux/sockios.h>
44#include <linux/workqueue.h>
45#include <linux/proc_fs.h>
46#include <linux/rtnetlink.h>
47#include <linux/firmware.h>
48#include <linux/log2.h>
49#include <linux/stringify.h>
50#include <linux/sched.h>
51#include <linux/slab.h>
52#include <linux/uaccess.h>
53#include <linux/nospec.h>
54
55#include "common.h"
56#include "cxgb3_ioctl.h"
57#include "regs.h"
58#include "cxgb3_offload.h"
59#include "version.h"
60
61#include "cxgb3_ctl_defs.h"
62#include "t3_cpl.h"
63#include "firmware_exports.h"
64
65enum {
66	MAX_TXQ_ENTRIES = 16384,
67	MAX_CTRL_TXQ_ENTRIES = 1024,
68	MAX_RSPQ_ENTRIES = 16384,
69	MAX_RX_BUFFERS = 16384,
70	MAX_RX_JUMBO_BUFFERS = 16384,
71	MIN_TXQ_ENTRIES = 4,
72	MIN_CTRL_TXQ_ENTRIES = 4,
73	MIN_RSPQ_ENTRIES = 32,
74	MIN_FL_ENTRIES = 32
75};
76
77#define PORT_MASK ((1 << MAX_NPORTS) - 1)
78
79#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
80			 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
81			 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
82
83#define EEPROM_MAGIC 0x38E2F10C
84
85#define CH_DEVICE(devid, idx) \
86	{ PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
87
88static const struct pci_device_id cxgb3_pci_tbl[] = {
89	CH_DEVICE(0x20, 0),	/* PE9000 */
90	CH_DEVICE(0x21, 1),	/* T302E */
91	CH_DEVICE(0x22, 2),	/* T310E */
92	CH_DEVICE(0x23, 3),	/* T320X */
93	CH_DEVICE(0x24, 1),	/* T302X */
94	CH_DEVICE(0x25, 3),	/* T320E */
95	CH_DEVICE(0x26, 2),	/* T310X */
96	CH_DEVICE(0x30, 2),	/* T3B10 */
97	CH_DEVICE(0x31, 3),	/* T3B20 */
98	CH_DEVICE(0x32, 1),	/* T3B02 */
99	CH_DEVICE(0x35, 6),	/* T3C20-derived T3C10 */
100	CH_DEVICE(0x36, 3),	/* S320E-CR */
101	CH_DEVICE(0x37, 7),	/* N320E-G2 */
102	{0,}
103};
104
105MODULE_DESCRIPTION(DRV_DESC);
106MODULE_AUTHOR("Chelsio Communications");
107MODULE_LICENSE("Dual BSD/GPL");
108MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
109
110static int dflt_msg_enable = DFLT_MSG_ENABLE;
111
112module_param(dflt_msg_enable, int, 0644);
113MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
114
115/*
116 * The driver uses the best interrupt scheme available on a platform in the
117 * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
118 * of these schemes the driver may consider as follows:
119 *
120 * msi = 2: choose from among all three options
121 * msi = 1: only consider MSI and pin interrupts
122 * msi = 0: force pin interrupts
123 */
124static int msi = 2;
125
126module_param(msi, int, 0644);
127MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
128
129/*
130 * The driver enables offload as a default.
131 * To disable it, use ofld_disable = 1.
132 */
133
134static int ofld_disable = 0;
135
136module_param(ofld_disable, int, 0644);
137MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
138
139/*
140 * We have work elements that we need to cancel when an interface is taken
141 * down.  Normally the work elements would be executed by keventd but that
142 * can deadlock because of linkwatch.  If our close method takes the rtnl
143 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
144 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
145 * for our work to complete.  Get our own work queue to solve this.
146 */
147struct workqueue_struct *cxgb3_wq;
148
149/**
150 *	link_report - show link status and link speed/duplex
151 *	@dev: the port whose settings are to be reported
152 *
153 *	Shows the link status, speed, and duplex of a port.
154 */
155static void link_report(struct net_device *dev)
156{
157	if (!netif_carrier_ok(dev))
158		netdev_info(dev, "link down\n");
159	else {
160		const char *s = "10Mbps";
161		const struct port_info *p = netdev_priv(dev);
162
163		switch (p->link_config.speed) {
164		case SPEED_10000:
165			s = "10Gbps";
166			break;
167		case SPEED_1000:
168			s = "1000Mbps";
169			break;
170		case SPEED_100:
171			s = "100Mbps";
172			break;
173		}
174
175		netdev_info(dev, "link up, %s, %s-duplex\n",
176			    s, p->link_config.duplex == DUPLEX_FULL
177			    ? "full" : "half");
178	}
179}
180
181static void enable_tx_fifo_drain(struct adapter *adapter,
182				 struct port_info *pi)
183{
184	t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset, 0,
185			 F_ENDROPPKT);
186	t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, 0);
187	t3_write_reg(adapter, A_XGM_TX_CTRL + pi->mac.offset, F_TXEN);
188	t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, F_RXEN);
189}
190
191static void disable_tx_fifo_drain(struct adapter *adapter,
192				  struct port_info *pi)
193{
194	t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset,
195			 F_ENDROPPKT, 0);
196}
197
198void t3_os_link_fault(struct adapter *adap, int port_id, int state)
199{
200	struct net_device *dev = adap->port[port_id];
201	struct port_info *pi = netdev_priv(dev);
202
203	if (state == netif_carrier_ok(dev))
204		return;
205
206	if (state) {
207		struct cmac *mac = &pi->mac;
208
209		netif_carrier_on(dev);
210
211		disable_tx_fifo_drain(adap, pi);
212
213		/* Clear local faults */
214		t3_xgm_intr_disable(adap, pi->port_id);
215		t3_read_reg(adap, A_XGM_INT_STATUS +
216				    pi->mac.offset);
217		t3_write_reg(adap,
218			     A_XGM_INT_CAUSE + pi->mac.offset,
219			     F_XGM_INT);
220
221		t3_set_reg_field(adap,
222				 A_XGM_INT_ENABLE +
223				 pi->mac.offset,
224				 F_XGM_INT, F_XGM_INT);
225		t3_xgm_intr_enable(adap, pi->port_id);
226
227		t3_mac_enable(mac, MAC_DIRECTION_TX);
228	} else {
229		netif_carrier_off(dev);
230
231		/* Flush TX FIFO */
232		enable_tx_fifo_drain(adap, pi);
233	}
234	link_report(dev);
235}
236
237/**
238 *	t3_os_link_changed - handle link status changes
239 *	@adapter: the adapter associated with the link change
240 *	@port_id: the port index whose limk status has changed
241 *	@link_stat: the new status of the link
242 *	@speed: the new speed setting
243 *	@duplex: the new duplex setting
244 *	@pause: the new flow-control setting
245 *
246 *	This is the OS-dependent handler for link status changes.  The OS
247 *	neutral handler takes care of most of the processing for these events,
248 *	then calls this handler for any OS-specific processing.
249 */
250void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
251			int speed, int duplex, int pause)
252{
253	struct net_device *dev = adapter->port[port_id];
254	struct port_info *pi = netdev_priv(dev);
255	struct cmac *mac = &pi->mac;
256
257	/* Skip changes from disabled ports. */
258	if (!netif_running(dev))
259		return;
260
261	if (link_stat != netif_carrier_ok(dev)) {
262		if (link_stat) {
263			disable_tx_fifo_drain(adapter, pi);
264
265			t3_mac_enable(mac, MAC_DIRECTION_RX);
266
267			/* Clear local faults */
268			t3_xgm_intr_disable(adapter, pi->port_id);
269			t3_read_reg(adapter, A_XGM_INT_STATUS +
270				    pi->mac.offset);
271			t3_write_reg(adapter,
272				     A_XGM_INT_CAUSE + pi->mac.offset,
273				     F_XGM_INT);
274
275			t3_set_reg_field(adapter,
276					 A_XGM_INT_ENABLE + pi->mac.offset,
277					 F_XGM_INT, F_XGM_INT);
278			t3_xgm_intr_enable(adapter, pi->port_id);
279
280			netif_carrier_on(dev);
281		} else {
282			netif_carrier_off(dev);
283
284			t3_xgm_intr_disable(adapter, pi->port_id);
285			t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
286			t3_set_reg_field(adapter,
287					 A_XGM_INT_ENABLE + pi->mac.offset,
288					 F_XGM_INT, 0);
289
290			if (is_10G(adapter))
291				pi->phy.ops->power_down(&pi->phy, 1);
292
293			t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
294			t3_mac_disable(mac, MAC_DIRECTION_RX);
295			t3_link_start(&pi->phy, mac, &pi->link_config);
296
297			/* Flush TX FIFO */
298			enable_tx_fifo_drain(adapter, pi);
299		}
300
301		link_report(dev);
302	}
303}
304
305/**
306 *	t3_os_phymod_changed - handle PHY module changes
307 *	@adap: the adapter associated with the link change
308 *	@port_id: the port index whose limk status has changed
309 *
310 *	This is the OS-dependent handler for PHY module changes.  It is
311 *	invoked when a PHY module is removed or inserted for any OS-specific
312 *	processing.
313 */
314void t3_os_phymod_changed(struct adapter *adap, int port_id)
315{
316	static const char *mod_str[] = {
317		NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
318	};
319
320	const struct net_device *dev = adap->port[port_id];
321	const struct port_info *pi = netdev_priv(dev);
322
323	if (pi->phy.modtype == phy_modtype_none)
324		netdev_info(dev, "PHY module unplugged\n");
325	else
326		netdev_info(dev, "%s PHY module inserted\n",
327			    mod_str[pi->phy.modtype]);
328}
329
330static void cxgb_set_rxmode(struct net_device *dev)
331{
332	struct port_info *pi = netdev_priv(dev);
333
334	t3_mac_set_rx_mode(&pi->mac, dev);
335}
336
337/**
338 *	link_start - enable a port
339 *	@dev: the device to enable
340 *
341 *	Performs the MAC and PHY actions needed to enable a port.
342 */
343static void link_start(struct net_device *dev)
344{
345	struct port_info *pi = netdev_priv(dev);
346	struct cmac *mac = &pi->mac;
347
348	t3_mac_reset(mac);
349	t3_mac_set_num_ucast(mac, MAX_MAC_IDX);
350	t3_mac_set_mtu(mac, dev->mtu);
351	t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
352	t3_mac_set_address(mac, SAN_MAC_IDX, pi->iscsic.mac_addr);
353	t3_mac_set_rx_mode(mac, dev);
354	t3_link_start(&pi->phy, mac, &pi->link_config);
355	t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
356}
357
358static inline void cxgb_disable_msi(struct adapter *adapter)
359{
360	if (adapter->flags & USING_MSIX) {
361		pci_disable_msix(adapter->pdev);
362		adapter->flags &= ~USING_MSIX;
363	} else if (adapter->flags & USING_MSI) {
364		pci_disable_msi(adapter->pdev);
365		adapter->flags &= ~USING_MSI;
366	}
367}
368
369/*
370 * Interrupt handler for asynchronous events used with MSI-X.
371 */
372static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
373{
374	t3_slow_intr_handler(cookie);
375	return IRQ_HANDLED;
376}
377
378/*
379 * Name the MSI-X interrupts.
380 */
381static void name_msix_vecs(struct adapter *adap)
382{
383	int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
384
385	snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
386	adap->msix_info[0].desc[n] = 0;
387
388	for_each_port(adap, j) {
389		struct net_device *d = adap->port[j];
390		const struct port_info *pi = netdev_priv(d);
391
392		for (i = 0; i < pi->nqsets; i++, msi_idx++) {
393			snprintf(adap->msix_info[msi_idx].desc, n,
394				 "%s-%d", d->name, pi->first_qset + i);
395			adap->msix_info[msi_idx].desc[n] = 0;
396		}
397	}
398}
399
400static int request_msix_data_irqs(struct adapter *adap)
401{
402	int i, j, err, qidx = 0;
403
404	for_each_port(adap, i) {
405		int nqsets = adap2pinfo(adap, i)->nqsets;
406
407		for (j = 0; j < nqsets; ++j) {
408			err = request_irq(adap->msix_info[qidx + 1].vec,
409					  t3_intr_handler(adap,
410							  adap->sge.qs[qidx].
411							  rspq.polling), 0,
412					  adap->msix_info[qidx + 1].desc,
413					  &adap->sge.qs[qidx]);
414			if (err) {
415				while (--qidx >= 0)
416					free_irq(adap->msix_info[qidx + 1].vec,
417						 &adap->sge.qs[qidx]);
418				return err;
419			}
420			qidx++;
421		}
422	}
423	return 0;
424}
425
426static void free_irq_resources(struct adapter *adapter)
427{
428	if (adapter->flags & USING_MSIX) {
429		int i, n = 0;
430
431		free_irq(adapter->msix_info[0].vec, adapter);
432		for_each_port(adapter, i)
433			n += adap2pinfo(adapter, i)->nqsets;
434
435		for (i = 0; i < n; ++i)
436			free_irq(adapter->msix_info[i + 1].vec,
437				 &adapter->sge.qs[i]);
438	} else
439		free_irq(adapter->pdev->irq, adapter);
440}
441
442static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
443			      unsigned long n)
444{
445	int attempts = 10;
446
447	while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
448		if (!--attempts)
449			return -ETIMEDOUT;
450		msleep(10);
451	}
452	return 0;
453}
454
455static int init_tp_parity(struct adapter *adap)
456{
457	int i;
458	struct sk_buff *skb;
459	struct cpl_set_tcb_field *greq;
460	unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
461
462	t3_tp_set_offload_mode(adap, 1);
463
464	for (i = 0; i < 16; i++) {
465		struct cpl_smt_write_req *req;
466
467		skb = alloc_skb(sizeof(*req), GFP_KERNEL);
468		if (!skb)
469			skb = adap->nofail_skb;
470		if (!skb)
471			goto alloc_skb_fail;
472
473		req = __skb_put_zero(skb, sizeof(*req));
474		req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
475		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
476		req->mtu_idx = NMTUS - 1;
477		req->iff = i;
478		t3_mgmt_tx(adap, skb);
479		if (skb == adap->nofail_skb) {
480			await_mgmt_replies(adap, cnt, i + 1);
481			adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
482			if (!adap->nofail_skb)
483				goto alloc_skb_fail;
484		}
485	}
486
487	for (i = 0; i < 2048; i++) {
488		struct cpl_l2t_write_req *req;
489
490		skb = alloc_skb(sizeof(*req), GFP_KERNEL);
491		if (!skb)
492			skb = adap->nofail_skb;
493		if (!skb)
494			goto alloc_skb_fail;
495
496		req = __skb_put_zero(skb, sizeof(*req));
497		req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
498		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
499		req->params = htonl(V_L2T_W_IDX(i));
500		t3_mgmt_tx(adap, skb);
501		if (skb == adap->nofail_skb) {
502			await_mgmt_replies(adap, cnt, 16 + i + 1);
503			adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
504			if (!adap->nofail_skb)
505				goto alloc_skb_fail;
506		}
507	}
508
509	for (i = 0; i < 2048; i++) {
510		struct cpl_rte_write_req *req;
511
512		skb = alloc_skb(sizeof(*req), GFP_KERNEL);
513		if (!skb)
514			skb = adap->nofail_skb;
515		if (!skb)
516			goto alloc_skb_fail;
517
518		req = __skb_put_zero(skb, sizeof(*req));
519		req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
520		OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
521		req->l2t_idx = htonl(V_L2T_W_IDX(i));
522		t3_mgmt_tx(adap, skb);
523		if (skb == adap->nofail_skb) {
524			await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
525			adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
526			if (!adap->nofail_skb)
527				goto alloc_skb_fail;
528		}
529	}
530
531	skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
532	if (!skb)
533		skb = adap->nofail_skb;
534	if (!skb)
535		goto alloc_skb_fail;
536
537	greq = __skb_put_zero(skb, sizeof(*greq));
538	greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
539	OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
540	greq->mask = cpu_to_be64(1);
541	t3_mgmt_tx(adap, skb);
542
543	i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
544	if (skb == adap->nofail_skb) {
545		i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
546		adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
547	}
548
549	t3_tp_set_offload_mode(adap, 0);
550	return i;
551
552alloc_skb_fail:
553	t3_tp_set_offload_mode(adap, 0);
554	return -ENOMEM;
555}
556
557/**
558 *	setup_rss - configure RSS
559 *	@adap: the adapter
560 *
561 *	Sets up RSS to distribute packets to multiple receive queues.  We
562 *	configure the RSS CPU lookup table to distribute to the number of HW
563 *	receive queues, and the response queue lookup table to narrow that
564 *	down to the response queues actually configured for each port.
565 *	We always configure the RSS mapping for two ports since the mapping
566 *	table has plenty of entries.
567 */
568static void setup_rss(struct adapter *adap)
569{
570	int i;
571	unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
572	unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
573	u8 cpus[SGE_QSETS + 1];
574	u16 rspq_map[RSS_TABLE_SIZE + 1];
575
576	for (i = 0; i < SGE_QSETS; ++i)
577		cpus[i] = i;
578	cpus[SGE_QSETS] = 0xff;	/* terminator */
579
580	for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
581		rspq_map[i] = i % nq0;
582		rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
583	}
584	rspq_map[RSS_TABLE_SIZE] = 0xffff; /* terminator */
585
586	t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
587		      F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
588		      V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
589}
590
591static void ring_dbs(struct adapter *adap)
592{
593	int i, j;
594
595	for (i = 0; i < SGE_QSETS; i++) {
596		struct sge_qset *qs = &adap->sge.qs[i];
597
598		if (qs->adap)
599			for (j = 0; j < SGE_TXQ_PER_SET; j++)
600				t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id));
601	}
602}
603
604static void init_napi(struct adapter *adap)
605{
606	int i;
607
608	for (i = 0; i < SGE_QSETS; i++) {
609		struct sge_qset *qs = &adap->sge.qs[i];
610
611		if (qs->adap)
612			netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
613				       64);
614	}
615
616	/*
617	 * netif_napi_add() can be called only once per napi_struct because it
618	 * adds each new napi_struct to a list.  Be careful not to call it a
619	 * second time, e.g., during EEH recovery, by making a note of it.
620	 */
621	adap->flags |= NAPI_INIT;
622}
623
624/*
625 * Wait until all NAPI handlers are descheduled.  This includes the handlers of
626 * both netdevices representing interfaces and the dummy ones for the extra
627 * queues.
628 */
629static void quiesce_rx(struct adapter *adap)
630{
631	int i;
632
633	for (i = 0; i < SGE_QSETS; i++)
634		if (adap->sge.qs[i].adap)
635			napi_disable(&adap->sge.qs[i].napi);
636}
637
638static void enable_all_napi(struct adapter *adap)
639{
640	int i;
641	for (i = 0; i < SGE_QSETS; i++)
642		if (adap->sge.qs[i].adap)
643			napi_enable(&adap->sge.qs[i].napi);
644}
645
646/**
647 *	setup_sge_qsets - configure SGE Tx/Rx/response queues
648 *	@adap: the adapter
649 *
650 *	Determines how many sets of SGE queues to use and initializes them.
651 *	We support multiple queue sets per port if we have MSI-X, otherwise
652 *	just one queue set per port.
653 */
654static int setup_sge_qsets(struct adapter *adap)
655{
656	int i, j, err, irq_idx = 0, qset_idx = 0;
657	unsigned int ntxq = SGE_TXQ_PER_SET;
658
659	if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
660		irq_idx = -1;
661
662	for_each_port(adap, i) {
663		struct net_device *dev = adap->port[i];
664		struct port_info *pi = netdev_priv(dev);
665
666		pi->qs = &adap->sge.qs[pi->first_qset];
667		for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
668			err = t3_sge_alloc_qset(adap, qset_idx, 1,
669				(adap->flags & USING_MSIX) ? qset_idx + 1 :
670							     irq_idx,
671				&adap->params.sge.qset[qset_idx], ntxq, dev,
672				netdev_get_tx_queue(dev, j));
673			if (err) {
674				t3_free_sge_resources(adap);
675				return err;
676			}
677		}
678	}
679
680	return 0;
681}
682
683static ssize_t attr_show(struct device *d, char *buf,
684			 ssize_t(*format) (struct net_device *, char *))
685{
686	ssize_t len;
687
688	/* Synchronize with ioctls that may shut down the device */
689	rtnl_lock();
690	len = (*format) (to_net_dev(d), buf);
691	rtnl_unlock();
692	return len;
693}
694
695static ssize_t attr_store(struct device *d,
696			  const char *buf, size_t len,
697			  ssize_t(*set) (struct net_device *, unsigned int),
698			  unsigned int min_val, unsigned int max_val)
699{
700	ssize_t ret;
701	unsigned int val;
702
703	if (!capable(CAP_NET_ADMIN))
704		return -EPERM;
705
706	ret = kstrtouint(buf, 0, &val);
707	if (ret)
708		return ret;
709	if (val < min_val || val > max_val)
710		return -EINVAL;
711
712	rtnl_lock();
713	ret = (*set) (to_net_dev(d), val);
714	if (!ret)
715		ret = len;
716	rtnl_unlock();
717	return ret;
718}
719
720#define CXGB3_SHOW(name, val_expr) \
721static ssize_t format_##name(struct net_device *dev, char *buf) \
722{ \
723	struct port_info *pi = netdev_priv(dev); \
724	struct adapter *adap = pi->adapter; \
725	return sprintf(buf, "%u\n", val_expr); \
726} \
727static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
728			   char *buf) \
729{ \
730	return attr_show(d, buf, format_##name); \
731}
732
733static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
734{
735	struct port_info *pi = netdev_priv(dev);
736	struct adapter *adap = pi->adapter;
737	int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
738
739	if (adap->flags & FULL_INIT_DONE)
740		return -EBUSY;
741	if (val && adap->params.rev == 0)
742		return -EINVAL;
743	if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
744	    min_tids)
745		return -EINVAL;
746	adap->params.mc5.nfilters = val;
747	return 0;
748}
749
750static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
751			      const char *buf, size_t len)
752{
753	return attr_store(d, buf, len, set_nfilters, 0, ~0);
754}
755
756static ssize_t set_nservers(struct net_device *dev, unsigned int val)
757{
758	struct port_info *pi = netdev_priv(dev);
759	struct adapter *adap = pi->adapter;
760
761	if (adap->flags & FULL_INIT_DONE)
762		return -EBUSY;
763	if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
764	    MC5_MIN_TIDS)
765		return -EINVAL;
766	adap->params.mc5.nservers = val;
767	return 0;
768}
769
770static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
771			      const char *buf, size_t len)
772{
773	return attr_store(d, buf, len, set_nservers, 0, ~0);
774}
775
776#define CXGB3_ATTR_R(name, val_expr) \
777CXGB3_SHOW(name, val_expr) \
778static DEVICE_ATTR(name, 0444, show_##name, NULL)
779
780#define CXGB3_ATTR_RW(name, val_expr, store_method) \
781CXGB3_SHOW(name, val_expr) \
782static DEVICE_ATTR(name, 0644, show_##name, store_method)
783
784CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
785CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
786CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
787
788static struct attribute *cxgb3_attrs[] = {
789	&dev_attr_cam_size.attr,
790	&dev_attr_nfilters.attr,
791	&dev_attr_nservers.attr,
792	NULL
793};
794
795static const struct attribute_group cxgb3_attr_group = {
796	.attrs = cxgb3_attrs,
797};
798
799static ssize_t tm_attr_show(struct device *d,
800			    char *buf, int sched)
801{
802	struct port_info *pi = netdev_priv(to_net_dev(d));
803	struct adapter *adap = pi->adapter;
804	unsigned int v, addr, bpt, cpt;
805	ssize_t len;
806
807	addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
808	rtnl_lock();
809	t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
810	v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
811	if (sched & 1)
812		v >>= 16;
813	bpt = (v >> 8) & 0xff;
814	cpt = v & 0xff;
815	if (!cpt)
816		len = sprintf(buf, "disabled\n");
817	else {
818		v = (adap->params.vpd.cclk * 1000) / cpt;
819		len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
820	}
821	rtnl_unlock();
822	return len;
823}
824
825static ssize_t tm_attr_store(struct device *d,
826			     const char *buf, size_t len, int sched)
827{
828	struct port_info *pi = netdev_priv(to_net_dev(d));
829	struct adapter *adap = pi->adapter;
830	unsigned int val;
831	ssize_t ret;
832
833	if (!capable(CAP_NET_ADMIN))
834		return -EPERM;
835
836	ret = kstrtouint(buf, 0, &val);
837	if (ret)
838		return ret;
839	if (val > 10000000)
840		return -EINVAL;
841
842	rtnl_lock();
843	ret = t3_config_sched(adap, val, sched);
844	if (!ret)
845		ret = len;
846	rtnl_unlock();
847	return ret;
848}
849
850#define TM_ATTR(name, sched) \
851static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
852			   char *buf) \
853{ \
854	return tm_attr_show(d, buf, sched); \
855} \
856static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
857			    const char *buf, size_t len) \
858{ \
859	return tm_attr_store(d, buf, len, sched); \
860} \
861static DEVICE_ATTR(name, 0644, show_##name, store_##name)
862
863TM_ATTR(sched0, 0);
864TM_ATTR(sched1, 1);
865TM_ATTR(sched2, 2);
866TM_ATTR(sched3, 3);
867TM_ATTR(sched4, 4);
868TM_ATTR(sched5, 5);
869TM_ATTR(sched6, 6);
870TM_ATTR(sched7, 7);
871
872static struct attribute *offload_attrs[] = {
873	&dev_attr_sched0.attr,
874	&dev_attr_sched1.attr,
875	&dev_attr_sched2.attr,
876	&dev_attr_sched3.attr,
877	&dev_attr_sched4.attr,
878	&dev_attr_sched5.attr,
879	&dev_attr_sched6.attr,
880	&dev_attr_sched7.attr,
881	NULL
882};
883
884static const struct attribute_group offload_attr_group = {
885	.attrs = offload_attrs,
886};
887
888/*
889 * Sends an sk_buff to an offload queue driver
890 * after dealing with any active network taps.
891 */
892static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
893{
894	int ret;
895
896	local_bh_disable();
897	ret = t3_offload_tx(tdev, skb);
898	local_bh_enable();
899	return ret;
900}
901
902static int write_smt_entry(struct adapter *adapter, int idx)
903{
904	struct cpl_smt_write_req *req;
905	struct port_info *pi = netdev_priv(adapter->port[idx]);
906	struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
907
908	if (!skb)
909		return -ENOMEM;
910
911	req = __skb_put(skb, sizeof(*req));
912	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
913	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
914	req->mtu_idx = NMTUS - 1;	/* should be 0 but there's a T3 bug */
915	req->iff = idx;
916	memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
917	memcpy(req->src_mac1, pi->iscsic.mac_addr, ETH_ALEN);
918	skb->priority = 1;
919	offload_tx(&adapter->tdev, skb);
920	return 0;
921}
922
923static int init_smt(struct adapter *adapter)
924{
925	int i;
926
927	for_each_port(adapter, i)
928	    write_smt_entry(adapter, i);
929	return 0;
930}
931
932static void init_port_mtus(struct adapter *adapter)
933{
934	unsigned int mtus = adapter->port[0]->mtu;
935
936	if (adapter->port[1])
937		mtus |= adapter->port[1]->mtu << 16;
938	t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
939}
940
941static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
942			      int hi, int port)
943{
944	struct sk_buff *skb;
945	struct mngt_pktsched_wr *req;
946	int ret;
947
948	skb = alloc_skb(sizeof(*req), GFP_KERNEL);
949	if (!skb)
950		skb = adap->nofail_skb;
951	if (!skb)
952		return -ENOMEM;
953
954	req = skb_put(skb, sizeof(*req));
955	req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
956	req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
957	req->sched = sched;
958	req->idx = qidx;
959	req->min = lo;
960	req->max = hi;
961	req->binding = port;
962	ret = t3_mgmt_tx(adap, skb);
963	if (skb == adap->nofail_skb) {
964		adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
965					     GFP_KERNEL);
966		if (!adap->nofail_skb)
967			ret = -ENOMEM;
968	}
969
970	return ret;
971}
972
973static int bind_qsets(struct adapter *adap)
974{
975	int i, j, err = 0;
976
977	for_each_port(adap, i) {
978		const struct port_info *pi = adap2pinfo(adap, i);
979
980		for (j = 0; j < pi->nqsets; ++j) {
981			int ret = send_pktsched_cmd(adap, 1,
982						    pi->first_qset + j, -1,
983						    -1, i);
984			if (ret)
985				err = ret;
986		}
987	}
988
989	return err;
990}
991
992#define FW_VERSION __stringify(FW_VERSION_MAJOR) "."			\
993	__stringify(FW_VERSION_MINOR) "." __stringify(FW_VERSION_MICRO)
994#define FW_FNAME "cxgb3/t3fw-" FW_VERSION ".bin"
995#define TPSRAM_VERSION __stringify(TP_VERSION_MAJOR) "."		\
996	__stringify(TP_VERSION_MINOR) "." __stringify(TP_VERSION_MICRO)
997#define TPSRAM_NAME "cxgb3/t3%c_psram-" TPSRAM_VERSION ".bin"
998#define AEL2005_OPT_EDC_NAME "cxgb3/ael2005_opt_edc.bin"
999#define AEL2005_TWX_EDC_NAME "cxgb3/ael2005_twx_edc.bin"
1000#define AEL2020_TWX_EDC_NAME "cxgb3/ael2020_twx_edc.bin"
1001MODULE_FIRMWARE(FW_FNAME);
1002MODULE_FIRMWARE("cxgb3/t3b_psram-" TPSRAM_VERSION ".bin");
1003MODULE_FIRMWARE("cxgb3/t3c_psram-" TPSRAM_VERSION ".bin");
1004MODULE_FIRMWARE(AEL2005_OPT_EDC_NAME);
1005MODULE_FIRMWARE(AEL2005_TWX_EDC_NAME);
1006MODULE_FIRMWARE(AEL2020_TWX_EDC_NAME);
1007
1008static inline const char *get_edc_fw_name(int edc_idx)
1009{
1010	const char *fw_name = NULL;
1011
1012	switch (edc_idx) {
1013	case EDC_OPT_AEL2005:
1014		fw_name = AEL2005_OPT_EDC_NAME;
1015		break;
1016	case EDC_TWX_AEL2005:
1017		fw_name = AEL2005_TWX_EDC_NAME;
1018		break;
1019	case EDC_TWX_AEL2020:
1020		fw_name = AEL2020_TWX_EDC_NAME;
1021		break;
1022	}
1023	return fw_name;
1024}
1025
1026int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size)
1027{
1028	struct adapter *adapter = phy->adapter;
1029	const struct firmware *fw;
1030	const char *fw_name;
1031	u32 csum;
1032	const __be32 *p;
1033	u16 *cache = phy->phy_cache;
1034	int i, ret = -EINVAL;
1035
1036	fw_name = get_edc_fw_name(edc_idx);
1037	if (fw_name)
1038		ret = request_firmware(&fw, fw_name, &adapter->pdev->dev);
1039	if (ret < 0) {
1040		dev_err(&adapter->pdev->dev,
1041			"could not upgrade firmware: unable to load %s\n",
1042			fw_name);
1043		return ret;
1044	}
1045
1046	/* check size, take checksum in account */
1047	if (fw->size > size + 4) {
1048		CH_ERR(adapter, "firmware image too large %u, expected %d\n",
1049		       (unsigned int)fw->size, size + 4);
1050		ret = -EINVAL;
1051	}
1052
1053	/* compute checksum */
1054	p = (const __be32 *)fw->data;
1055	for (csum = 0, i = 0; i < fw->size / sizeof(csum); i++)
1056		csum += ntohl(p[i]);
1057
1058	if (csum != 0xffffffff) {
1059		CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1060		       csum);
1061		ret = -EINVAL;
1062	}
1063
1064	for (i = 0; i < size / 4 ; i++) {
1065		*cache++ = (be32_to_cpu(p[i]) & 0xffff0000) >> 16;
1066		*cache++ = be32_to_cpu(p[i]) & 0xffff;
1067	}
1068
1069	release_firmware(fw);
1070
1071	return ret;
1072}
1073
1074static int upgrade_fw(struct adapter *adap)
1075{
1076	int ret;
1077	const struct firmware *fw;
1078	struct device *dev = &adap->pdev->dev;
1079
1080	ret = request_firmware(&fw, FW_FNAME, dev);
1081	if (ret < 0) {
1082		dev_err(dev, "could not upgrade firmware: unable to load %s\n",
1083			FW_FNAME);
1084		return ret;
1085	}
1086	ret = t3_load_fw(adap, fw->data, fw->size);
1087	release_firmware(fw);
1088
1089	if (ret == 0)
1090		dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
1091			 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1092	else
1093		dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
1094			FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1095
1096	return ret;
1097}
1098
1099static inline char t3rev2char(struct adapter *adapter)
1100{
1101	char rev = 0;
1102
1103	switch(adapter->params.rev) {
1104	case T3_REV_B:
1105	case T3_REV_B2:
1106		rev = 'b';
1107		break;
1108	case T3_REV_C:
1109		rev = 'c';
1110		break;
1111	}
1112	return rev;
1113}
1114
1115static int update_tpsram(struct adapter *adap)
1116{
1117	const struct firmware *tpsram;
1118	char buf[64];
1119	struct device *dev = &adap->pdev->dev;
1120	int ret;
1121	char rev;
1122
1123	rev = t3rev2char(adap);
1124	if (!rev)
1125		return 0;
1126
1127	snprintf(buf, sizeof(buf), TPSRAM_NAME, rev);
1128
1129	ret = request_firmware(&tpsram, buf, dev);
1130	if (ret < 0) {
1131		dev_err(dev, "could not load TP SRAM: unable to load %s\n",
1132			buf);
1133		return ret;
1134	}
1135
1136	ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
1137	if (ret)
1138		goto release_tpsram;
1139
1140	ret = t3_set_proto_sram(adap, tpsram->data);
1141	if (ret == 0)
1142		dev_info(dev,
1143			 "successful update of protocol engine "
1144			 "to %d.%d.%d\n",
1145			 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1146	else
1147		dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
1148			TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1149	if (ret)
1150		dev_err(dev, "loading protocol SRAM failed\n");
1151
1152release_tpsram:
1153	release_firmware(tpsram);
1154
1155	return ret;
1156}
1157
1158/**
1159 * t3_synchronize_rx - wait for current Rx processing on a port to complete
1160 * @adap: the adapter
1161 * @p: the port
1162 *
1163 * Ensures that current Rx processing on any of the queues associated with
1164 * the given port completes before returning.  We do this by acquiring and
1165 * releasing the locks of the response queues associated with the port.
1166 */
1167static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
1168{
1169	int i;
1170
1171	for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
1172		struct sge_rspq *q = &adap->sge.qs[i].rspq;
1173
1174		spin_lock_irq(&q->lock);
1175		spin_unlock_irq(&q->lock);
1176	}
1177}
1178
1179static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features)
1180{
1181	struct port_info *pi = netdev_priv(dev);
1182	struct adapter *adapter = pi->adapter;
1183
1184	if (adapter->params.rev > 0) {
1185		t3_set_vlan_accel(adapter, 1 << pi->port_id,
1186				  features & NETIF_F_HW_VLAN_CTAG_RX);
1187	} else {
1188		/* single control for all ports */
1189		unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_CTAG_RX;
1190
1191		for_each_port(adapter, i)
1192			have_vlans |=
1193				adapter->port[i]->features &
1194				NETIF_F_HW_VLAN_CTAG_RX;
1195
1196		t3_set_vlan_accel(adapter, 1, have_vlans);
1197	}
1198	t3_synchronize_rx(adapter, pi);
1199}
1200
1201/**
1202 *	cxgb_up - enable the adapter
1203 *	@adap: adapter being enabled
1204 *
1205 *	Called when the first port is enabled, this function performs the
1206 *	actions necessary to make an adapter operational, such as completing
1207 *	the initialization of HW modules, and enabling interrupts.
1208 *
1209 *	Must be called with the rtnl lock held.
1210 */
1211static int cxgb_up(struct adapter *adap)
1212{
1213	int i, err;
1214
1215	if (!(adap->flags & FULL_INIT_DONE)) {
1216		err = t3_check_fw_version(adap);
1217		if (err == -EINVAL) {
1218			err = upgrade_fw(adap);
1219			CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
1220				FW_VERSION_MAJOR, FW_VERSION_MINOR,
1221				FW_VERSION_MICRO, err ? "failed" : "succeeded");
1222		}
1223
1224		err = t3_check_tpsram_version(adap);
1225		if (err == -EINVAL) {
1226			err = update_tpsram(adap);
1227			CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
1228				TP_VERSION_MAJOR, TP_VERSION_MINOR,
1229				TP_VERSION_MICRO, err ? "failed" : "succeeded");
1230		}
1231
1232		/*
1233		 * Clear interrupts now to catch errors if t3_init_hw fails.
1234		 * We clear them again later as initialization may trigger
1235		 * conditions that can interrupt.
1236		 */
1237		t3_intr_clear(adap);
1238
1239		err = t3_init_hw(adap, 0);
1240		if (err)
1241			goto out;
1242
1243		t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1244		t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1245
1246		err = setup_sge_qsets(adap);
1247		if (err)
1248			goto out;
1249
1250		for_each_port(adap, i)
1251			cxgb_vlan_mode(adap->port[i], adap->port[i]->features);
1252
1253		setup_rss(adap);
1254		if (!(adap->flags & NAPI_INIT))
1255			init_napi(adap);
1256
1257		t3_start_sge_timers(adap);
1258		adap->flags |= FULL_INIT_DONE;
1259	}
1260
1261	t3_intr_clear(adap);
1262
1263	if (adap->flags & USING_MSIX) {
1264		name_msix_vecs(adap);
1265		err = request_irq(adap->msix_info[0].vec,
1266				  t3_async_intr_handler, 0,
1267				  adap->msix_info[0].desc, adap);
1268		if (err)
1269			goto irq_err;
1270
1271		err = request_msix_data_irqs(adap);
1272		if (err) {
1273			free_irq(adap->msix_info[0].vec, adap);
1274			goto irq_err;
1275		}
1276	} else if ((err = request_irq(adap->pdev->irq,
1277				      t3_intr_handler(adap,
1278						      adap->sge.qs[0].rspq.
1279						      polling),
1280				      (adap->flags & USING_MSI) ?
1281				       0 : IRQF_SHARED,
1282				      adap->name, adap)))
1283		goto irq_err;
1284
1285	enable_all_napi(adap);
1286	t3_sge_start(adap);
1287	t3_intr_enable(adap);
1288
1289	if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1290	    is_offload(adap) && init_tp_parity(adap) == 0)
1291		adap->flags |= TP_PARITY_INIT;
1292
1293	if (adap->flags & TP_PARITY_INIT) {
1294		t3_write_reg(adap, A_TP_INT_CAUSE,
1295			     F_CMCACHEPERR | F_ARPLUTPERR);
1296		t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1297	}
1298
1299	if (!(adap->flags & QUEUES_BOUND)) {
1300		int ret = bind_qsets(adap);
1301
1302		if (ret < 0) {
1303			CH_ERR(adap, "failed to bind qsets, err %d\n", ret);
1304			t3_intr_disable(adap);
1305			quiesce_rx(adap);
1306			free_irq_resources(adap);
1307			err = ret;
1308			goto out;
1309		}
1310		adap->flags |= QUEUES_BOUND;
1311	}
1312
1313out:
1314	return err;
1315irq_err:
1316	CH_ERR(adap, "request_irq failed, err %d\n", err);
1317	goto out;
1318}
1319
1320/*
1321 * Release resources when all the ports and offloading have been stopped.
1322 */
1323static void cxgb_down(struct adapter *adapter, int on_wq)
1324{
1325	t3_sge_stop(adapter);
1326	spin_lock_irq(&adapter->work_lock);	/* sync with PHY intr task */
1327	t3_intr_disable(adapter);
1328	spin_unlock_irq(&adapter->work_lock);
1329
1330	free_irq_resources(adapter);
1331	quiesce_rx(adapter);
1332	t3_sge_stop(adapter);
1333	if (!on_wq)
1334		flush_workqueue(cxgb3_wq);/* wait for external IRQ handler */
1335}
1336
1337static void schedule_chk_task(struct adapter *adap)
1338{
1339	unsigned int timeo;
1340
1341	timeo = adap->params.linkpoll_period ?
1342	    (HZ * adap->params.linkpoll_period) / 10 :
1343	    adap->params.stats_update_period * HZ;
1344	if (timeo)
1345		queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1346}
1347
1348static int offload_open(struct net_device *dev)
1349{
1350	struct port_info *pi = netdev_priv(dev);
1351	struct adapter *adapter = pi->adapter;
1352	struct t3cdev *tdev = dev2t3cdev(dev);
1353	int adap_up = adapter->open_device_map & PORT_MASK;
1354	int err;
1355
1356	if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1357		return 0;
1358
1359	if (!adap_up && (err = cxgb_up(adapter)) < 0)
1360		goto out;
1361
1362	t3_tp_set_offload_mode(adapter, 1);
1363	tdev->lldev = adapter->port[0];
1364	err = cxgb3_offload_activate(adapter);
1365	if (err)
1366		goto out;
1367
1368	init_port_mtus(adapter);
1369	t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1370		     adapter->params.b_wnd,
1371		     adapter->params.rev == 0 ?
1372		     adapter->port[0]->mtu : 0xffff);
1373	init_smt(adapter);
1374
1375	if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1376		dev_dbg(&dev->dev, "cannot create sysfs group\n");
1377
1378	/* Call back all registered clients */
1379	cxgb3_add_clients(tdev);
1380
1381out:
1382	/* restore them in case the offload module has changed them */
1383	if (err) {
1384		t3_tp_set_offload_mode(adapter, 0);
1385		clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1386		cxgb3_set_dummy_ops(tdev);
1387	}
1388	return err;
1389}
1390
1391static int offload_close(struct t3cdev *tdev)
1392{
1393	struct adapter *adapter = tdev2adap(tdev);
1394	struct t3c_data *td = T3C_DATA(tdev);
1395
1396	if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1397		return 0;
1398
1399	/* Call back all registered clients */
1400	cxgb3_remove_clients(tdev);
1401
1402	sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1403
1404	/* Flush work scheduled while releasing TIDs */
1405	flush_work(&td->tid_release_task);
1406
1407	tdev->lldev = NULL;
1408	cxgb3_set_dummy_ops(tdev);
1409	t3_tp_set_offload_mode(adapter, 0);
1410	clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1411
1412	if (!adapter->open_device_map)
1413		cxgb_down(adapter, 0);
1414
1415	cxgb3_offload_deactivate(adapter);
1416	return 0;
1417}
1418
1419static int cxgb_open(struct net_device *dev)
1420{
1421	struct port_info *pi = netdev_priv(dev);
1422	struct adapter *adapter = pi->adapter;
1423	int other_ports = adapter->open_device_map & PORT_MASK;
1424	int err;
1425
1426	if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1427		return err;
1428
1429	set_bit(pi->port_id, &adapter->open_device_map);
1430	if (is_offload(adapter) && !ofld_disable) {
1431		err = offload_open(dev);
1432		if (err)
1433			pr_warn("Could not initialize offload capabilities\n");
1434	}
1435
1436	netif_set_real_num_tx_queues(dev, pi->nqsets);
1437	err = netif_set_real_num_rx_queues(dev, pi->nqsets);
1438	if (err)
1439		return err;
1440	link_start(dev);
1441	t3_port_intr_enable(adapter, pi->port_id);
1442	netif_tx_start_all_queues(dev);
1443	if (!other_ports)
1444		schedule_chk_task(adapter);
1445
1446	cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id);
1447	return 0;
1448}
1449
1450static int __cxgb_close(struct net_device *dev, int on_wq)
1451{
1452	struct port_info *pi = netdev_priv(dev);
1453	struct adapter *adapter = pi->adapter;
1454
1455
1456	if (!adapter->open_device_map)
1457		return 0;
1458
1459	/* Stop link fault interrupts */
1460	t3_xgm_intr_disable(adapter, pi->port_id);
1461	t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
1462
1463	t3_port_intr_disable(adapter, pi->port_id);
1464	netif_tx_stop_all_queues(dev);
1465	pi->phy.ops->power_down(&pi->phy, 1);
1466	netif_carrier_off(dev);
1467	t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1468
1469	spin_lock_irq(&adapter->work_lock);	/* sync with update task */
1470	clear_bit(pi->port_id, &adapter->open_device_map);
1471	spin_unlock_irq(&adapter->work_lock);
1472
1473	if (!(adapter->open_device_map & PORT_MASK))
1474		cancel_delayed_work_sync(&adapter->adap_check_task);
1475
1476	if (!adapter->open_device_map)
1477		cxgb_down(adapter, on_wq);
1478
1479	cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id);
1480	return 0;
1481}
1482
1483static int cxgb_close(struct net_device *dev)
1484{
1485	return __cxgb_close(dev, 0);
1486}
1487
1488static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1489{
1490	struct port_info *pi = netdev_priv(dev);
1491	struct adapter *adapter = pi->adapter;
1492	struct net_device_stats *ns = &dev->stats;
1493	const struct mac_stats *pstats;
1494
1495	spin_lock(&adapter->stats_lock);
1496	pstats = t3_mac_update_stats(&pi->mac);
1497	spin_unlock(&adapter->stats_lock);
1498
1499	ns->tx_bytes = pstats->tx_octets;
1500	ns->tx_packets = pstats->tx_frames;
1501	ns->rx_bytes = pstats->rx_octets;
1502	ns->rx_packets = pstats->rx_frames;
1503	ns->multicast = pstats->rx_mcast_frames;
1504
1505	ns->tx_errors = pstats->tx_underrun;
1506	ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1507	    pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1508	    pstats->rx_fifo_ovfl;
1509
1510	/* detailed rx_errors */
1511	ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1512	ns->rx_over_errors = 0;
1513	ns->rx_crc_errors = pstats->rx_fcs_errs;
1514	ns->rx_frame_errors = pstats->rx_symbol_errs;
1515	ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1516	ns->rx_missed_errors = pstats->rx_cong_drops;
1517
1518	/* detailed tx_errors */
1519	ns->tx_aborted_errors = 0;
1520	ns->tx_carrier_errors = 0;
1521	ns->tx_fifo_errors = pstats->tx_underrun;
1522	ns->tx_heartbeat_errors = 0;
1523	ns->tx_window_errors = 0;
1524	return ns;
1525}
1526
1527static u32 get_msglevel(struct net_device *dev)
1528{
1529	struct port_info *pi = netdev_priv(dev);
1530	struct adapter *adapter = pi->adapter;
1531
1532	return adapter->msg_enable;
1533}
1534
1535static void set_msglevel(struct net_device *dev, u32 val)
1536{
1537	struct port_info *pi = netdev_priv(dev);
1538	struct adapter *adapter = pi->adapter;
1539
1540	adapter->msg_enable = val;
1541}
1542
1543static const char stats_strings[][ETH_GSTRING_LEN] = {
1544	"TxOctetsOK         ",
1545	"TxFramesOK         ",
1546	"TxMulticastFramesOK",
1547	"TxBroadcastFramesOK",
1548	"TxPauseFrames      ",
1549	"TxUnderrun         ",
1550	"TxExtUnderrun      ",
1551
1552	"TxFrames64         ",
1553	"TxFrames65To127    ",
1554	"TxFrames128To255   ",
1555	"TxFrames256To511   ",
1556	"TxFrames512To1023  ",
1557	"TxFrames1024To1518 ",
1558	"TxFrames1519ToMax  ",
1559
1560	"RxOctetsOK         ",
1561	"RxFramesOK         ",
1562	"RxMulticastFramesOK",
1563	"RxBroadcastFramesOK",
1564	"RxPauseFrames      ",
1565	"RxFCSErrors        ",
1566	"RxSymbolErrors     ",
1567	"RxShortErrors      ",
1568	"RxJabberErrors     ",
1569	"RxLengthErrors     ",
1570	"RxFIFOoverflow     ",
1571
1572	"RxFrames64         ",
1573	"RxFrames65To127    ",
1574	"RxFrames128To255   ",
1575	"RxFrames256To511   ",
1576	"RxFrames512To1023  ",
1577	"RxFrames1024To1518 ",
1578	"RxFrames1519ToMax  ",
1579
1580	"PhyFIFOErrors      ",
1581	"TSO                ",
1582	"VLANextractions    ",
1583	"VLANinsertions     ",
1584	"TxCsumOffload      ",
1585	"RxCsumGood         ",
1586	"LroAggregated      ",
1587	"LroFlushed         ",
1588	"LroNoDesc          ",
1589	"RxDrops            ",
1590
1591	"CheckTXEnToggled   ",
1592	"CheckResets        ",
1593
1594	"LinkFaults         ",
1595};
1596
1597static int get_sset_count(struct net_device *dev, int sset)
1598{
1599	switch (sset) {
1600	case ETH_SS_STATS:
1601		return ARRAY_SIZE(stats_strings);
1602	default:
1603		return -EOPNOTSUPP;
1604	}
1605}
1606
1607#define T3_REGMAP_SIZE (3 * 1024)
1608
1609static int get_regs_len(struct net_device *dev)
1610{
1611	return T3_REGMAP_SIZE;
1612}
1613
1614static int get_eeprom_len(struct net_device *dev)
1615{
1616	return EEPROMSIZE;
1617}
1618
1619static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1620{
1621	struct port_info *pi = netdev_priv(dev);
1622	struct adapter *adapter = pi->adapter;
1623	u32 fw_vers = 0;
1624	u32 tp_vers = 0;
1625
1626	spin_lock(&adapter->stats_lock);
1627	t3_get_fw_version(adapter, &fw_vers);
1628	t3_get_tp_version(adapter, &tp_vers);
1629	spin_unlock(&adapter->stats_lock);
1630
1631	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1632	strlcpy(info->bus_info, pci_name(adapter->pdev),
1633		sizeof(info->bus_info));
1634	if (fw_vers)
1635		snprintf(info->fw_version, sizeof(info->fw_version),
1636			 "%s %u.%u.%u TP %u.%u.%u",
1637			 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1638			 G_FW_VERSION_MAJOR(fw_vers),
1639			 G_FW_VERSION_MINOR(fw_vers),
1640			 G_FW_VERSION_MICRO(fw_vers),
1641			 G_TP_VERSION_MAJOR(tp_vers),
1642			 G_TP_VERSION_MINOR(tp_vers),
1643			 G_TP_VERSION_MICRO(tp_vers));
1644}
1645
1646static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1647{
1648	if (stringset == ETH_SS_STATS)
1649		memcpy(data, stats_strings, sizeof(stats_strings));
1650}
1651
1652static unsigned long collect_sge_port_stats(struct adapter *adapter,
1653					    struct port_info *p, int idx)
1654{
1655	int i;
1656	unsigned long tot = 0;
1657
1658	for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1659		tot += adapter->sge.qs[i].port_stats[idx];
1660	return tot;
1661}
1662
1663static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1664		      u64 *data)
1665{
1666	struct port_info *pi = netdev_priv(dev);
1667	struct adapter *adapter = pi->adapter;
1668	const struct mac_stats *s;
1669
1670	spin_lock(&adapter->stats_lock);
1671	s = t3_mac_update_stats(&pi->mac);
1672	spin_unlock(&adapter->stats_lock);
1673
1674	*data++ = s->tx_octets;
1675	*data++ = s->tx_frames;
1676	*data++ = s->tx_mcast_frames;
1677	*data++ = s->tx_bcast_frames;
1678	*data++ = s->tx_pause;
1679	*data++ = s->tx_underrun;
1680	*data++ = s->tx_fifo_urun;
1681
1682	*data++ = s->tx_frames_64;
1683	*data++ = s->tx_frames_65_127;
1684	*data++ = s->tx_frames_128_255;
1685	*data++ = s->tx_frames_256_511;
1686	*data++ = s->tx_frames_512_1023;
1687	*data++ = s->tx_frames_1024_1518;
1688	*data++ = s->tx_frames_1519_max;
1689
1690	*data++ = s->rx_octets;
1691	*data++ = s->rx_frames;
1692	*data++ = s->rx_mcast_frames;
1693	*data++ = s->rx_bcast_frames;
1694	*data++ = s->rx_pause;
1695	*data++ = s->rx_fcs_errs;
1696	*data++ = s->rx_symbol_errs;
1697	*data++ = s->rx_short;
1698	*data++ = s->rx_jabber;
1699	*data++ = s->rx_too_long;
1700	*data++ = s->rx_fifo_ovfl;
1701
1702	*data++ = s->rx_frames_64;
1703	*data++ = s->rx_frames_65_127;
1704	*data++ = s->rx_frames_128_255;
1705	*data++ = s->rx_frames_256_511;
1706	*data++ = s->rx_frames_512_1023;
1707	*data++ = s->rx_frames_1024_1518;
1708	*data++ = s->rx_frames_1519_max;
1709
1710	*data++ = pi->phy.fifo_errors;
1711
1712	*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1713	*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1714	*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1715	*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1716	*data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1717	*data++ = 0;
1718	*data++ = 0;
1719	*data++ = 0;
1720	*data++ = s->rx_cong_drops;
1721
1722	*data++ = s->num_toggled;
1723	*data++ = s->num_resets;
1724
1725	*data++ = s->link_faults;
1726}
1727
1728static inline void reg_block_dump(struct adapter *ap, void *buf,
1729				  unsigned int start, unsigned int end)
1730{
1731	u32 *p = buf + start;
1732
1733	for (; start <= end; start += sizeof(u32))
1734		*p++ = t3_read_reg(ap, start);
1735}
1736
1737static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1738		     void *buf)
1739{
1740	struct port_info *pi = netdev_priv(dev);
1741	struct adapter *ap = pi->adapter;
1742
1743	/*
1744	 * Version scheme:
1745	 * bits 0..9: chip version
1746	 * bits 10..15: chip revision
1747	 * bit 31: set for PCIe cards
1748	 */
1749	regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1750
1751	/*
1752	 * We skip the MAC statistics registers because they are clear-on-read.
1753	 * Also reading multi-register stats would need to synchronize with the
1754	 * periodic mac stats accumulation.  Hard to justify the complexity.
1755	 */
1756	memset(buf, 0, T3_REGMAP_SIZE);
1757	reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1758	reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1759	reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1760	reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1761	reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1762	reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1763		       XGM_REG(A_XGM_SERDES_STAT3, 1));
1764	reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1765		       XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1766}
1767
1768static int restart_autoneg(struct net_device *dev)
1769{
1770	struct port_info *p = netdev_priv(dev);
1771
1772	if (!netif_running(dev))
1773		return -EAGAIN;
1774	if (p->link_config.autoneg != AUTONEG_ENABLE)
1775		return -EINVAL;
1776	p->phy.ops->autoneg_restart(&p->phy);
1777	return 0;
1778}
1779
1780static int set_phys_id(struct net_device *dev,
1781		       enum ethtool_phys_id_state state)
1782{
1783	struct port_info *pi = netdev_priv(dev);
1784	struct adapter *adapter = pi->adapter;
1785
1786	switch (state) {
1787	case ETHTOOL_ID_ACTIVE:
1788		return 1;	/* cycle on/off once per second */
1789
1790	case ETHTOOL_ID_OFF:
1791		t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0);
1792		break;
1793
1794	case ETHTOOL_ID_ON:
1795	case ETHTOOL_ID_INACTIVE:
1796		t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1797			 F_GPIO0_OUT_VAL);
1798	}
1799
1800	return 0;
1801}
1802
1803static int get_link_ksettings(struct net_device *dev,
1804			      struct ethtool_link_ksettings *cmd)
1805{
1806	struct port_info *p = netdev_priv(dev);
1807	u32 supported;
1808
1809	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1810						p->link_config.supported);
1811	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1812						p->link_config.advertising);
1813
1814	if (netif_carrier_ok(dev)) {
1815		cmd->base.speed = p->link_config.speed;
1816		cmd->base.duplex = p->link_config.duplex;
1817	} else {
1818		cmd->base.speed = SPEED_UNKNOWN;
1819		cmd->base.duplex = DUPLEX_UNKNOWN;
1820	}
1821
1822	ethtool_convert_link_mode_to_legacy_u32(&supported,
1823						cmd->link_modes.supported);
1824
1825	cmd->base.port = (supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1826	cmd->base.phy_address = p->phy.mdio.prtad;
1827	cmd->base.autoneg = p->link_config.autoneg;
1828	return 0;
1829}
1830
1831static int speed_duplex_to_caps(int speed, int duplex)
1832{
1833	int cap = 0;
1834
1835	switch (speed) {
1836	case SPEED_10:
1837		if (duplex == DUPLEX_FULL)
1838			cap = SUPPORTED_10baseT_Full;
1839		else
1840			cap = SUPPORTED_10baseT_Half;
1841		break;
1842	case SPEED_100:
1843		if (duplex == DUPLEX_FULL)
1844			cap = SUPPORTED_100baseT_Full;
1845		else
1846			cap = SUPPORTED_100baseT_Half;
1847		break;
1848	case SPEED_1000:
1849		if (duplex == DUPLEX_FULL)
1850			cap = SUPPORTED_1000baseT_Full;
1851		else
1852			cap = SUPPORTED_1000baseT_Half;
1853		break;
1854	case SPEED_10000:
1855		if (duplex == DUPLEX_FULL)
1856			cap = SUPPORTED_10000baseT_Full;
1857	}
1858	return cap;
1859}
1860
1861#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1862		      ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1863		      ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1864		      ADVERTISED_10000baseT_Full)
1865
1866static int set_link_ksettings(struct net_device *dev,
1867			      const struct ethtool_link_ksettings *cmd)
1868{
1869	struct port_info *p = netdev_priv(dev);
1870	struct link_config *lc = &p->link_config;
1871	u32 advertising;
1872
1873	ethtool_convert_link_mode_to_legacy_u32(&advertising,
1874						cmd->link_modes.advertising);
1875
1876	if (!(lc->supported & SUPPORTED_Autoneg)) {
1877		/*
1878		 * PHY offers a single speed/duplex.  See if that's what's
1879		 * being requested.
1880		 */
1881		if (cmd->base.autoneg == AUTONEG_DISABLE) {
1882			u32 speed = cmd->base.speed;
1883			int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
1884			if (lc->supported & cap)
1885				return 0;
1886		}
1887		return -EINVAL;
1888	}
1889
1890	if (cmd->base.autoneg == AUTONEG_DISABLE) {
1891		u32 speed = cmd->base.speed;
1892		int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
1893
1894		if (!(lc->supported & cap) || (speed == SPEED_1000))
1895			return -EINVAL;
1896		lc->requested_speed = speed;
1897		lc->requested_duplex = cmd->base.duplex;
1898		lc->advertising = 0;
1899	} else {
1900		advertising &= ADVERTISED_MASK;
1901		advertising &= lc->supported;
1902		if (!advertising)
1903			return -EINVAL;
1904		lc->requested_speed = SPEED_INVALID;
1905		lc->requested_duplex = DUPLEX_INVALID;
1906		lc->advertising = advertising | ADVERTISED_Autoneg;
1907	}
1908	lc->autoneg = cmd->base.autoneg;
1909	if (netif_running(dev))
1910		t3_link_start(&p->phy, &p->mac, lc);
1911	return 0;
1912}
1913
1914static void get_pauseparam(struct net_device *dev,
1915			   struct ethtool_pauseparam *epause)
1916{
1917	struct port_info *p = netdev_priv(dev);
1918
1919	epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1920	epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1921	epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1922}
1923
1924static int set_pauseparam(struct net_device *dev,
1925			  struct ethtool_pauseparam *epause)
1926{
1927	struct port_info *p = netdev_priv(dev);
1928	struct link_config *lc = &p->link_config;
1929
1930	if (epause->autoneg == AUTONEG_DISABLE)
1931		lc->requested_fc = 0;
1932	else if (lc->supported & SUPPORTED_Autoneg)
1933		lc->requested_fc = PAUSE_AUTONEG;
1934	else
1935		return -EINVAL;
1936
1937	if (epause->rx_pause)
1938		lc->requested_fc |= PAUSE_RX;
1939	if (epause->tx_pause)
1940		lc->requested_fc |= PAUSE_TX;
1941	if (lc->autoneg == AUTONEG_ENABLE) {
1942		if (netif_running(dev))
1943			t3_link_start(&p->phy, &p->mac, lc);
1944	} else {
1945		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1946		if (netif_running(dev))
1947			t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1948	}
1949	return 0;
1950}
1951
1952static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1953{
1954	struct port_info *pi = netdev_priv(dev);
1955	struct adapter *adapter = pi->adapter;
1956	const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1957
1958	e->rx_max_pending = MAX_RX_BUFFERS;
1959	e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1960	e->tx_max_pending = MAX_TXQ_ENTRIES;
1961
1962	e->rx_pending = q->fl_size;
1963	e->rx_mini_pending = q->rspq_size;
1964	e->rx_jumbo_pending = q->jumbo_size;
1965	e->tx_pending = q->txq_size[0];
1966}
1967
1968static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1969{
1970	struct port_info *pi = netdev_priv(dev);
1971	struct adapter *adapter = pi->adapter;
1972	struct qset_params *q;
1973	int i;
1974
1975	if (e->rx_pending > MAX_RX_BUFFERS ||
1976	    e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1977	    e->tx_pending > MAX_TXQ_ENTRIES ||
1978	    e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1979	    e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1980	    e->rx_pending < MIN_FL_ENTRIES ||
1981	    e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1982	    e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1983		return -EINVAL;
1984
1985	if (adapter->flags & FULL_INIT_DONE)
1986		return -EBUSY;
1987
1988	q = &adapter->params.sge.qset[pi->first_qset];
1989	for (i = 0; i < pi->nqsets; ++i, ++q) {
1990		q->rspq_size = e->rx_mini_pending;
1991		q->fl_size = e->rx_pending;
1992		q->jumbo_size = e->rx_jumbo_pending;
1993		q->txq_size[0] = e->tx_pending;
1994		q->txq_size[1] = e->tx_pending;
1995		q->txq_size[2] = e->tx_pending;
1996	}
1997	return 0;
1998}
1999
2000static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2001{
2002	struct port_info *pi = netdev_priv(dev);
2003	struct adapter *adapter = pi->adapter;
2004	struct qset_params *qsp;
2005	struct sge_qset *qs;
2006	int i;
2007
2008	if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
2009		return -EINVAL;
2010
2011	for (i = 0; i < pi->nqsets; i++) {
2012		qsp = &adapter->params.sge.qset[i];
2013		qs = &adapter->sge.qs[i];
2014		qsp->coalesce_usecs = c->rx_coalesce_usecs;
2015		t3_update_qset_coalesce(qs, qsp);
2016	}
2017
2018	return 0;
2019}
2020
2021static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2022{
2023	struct port_info *pi = netdev_priv(dev);
2024	struct adapter *adapter = pi->adapter;
2025	struct qset_params *q = adapter->params.sge.qset;
2026
2027	c->rx_coalesce_usecs = q->coalesce_usecs;
2028	return 0;
2029}
2030
2031static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2032		      u8 * data)
2033{
2034	struct port_info *pi = netdev_priv(dev);
2035	struct adapter *adapter = pi->adapter;
2036	int i, err = 0;
2037
2038	u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2039	if (!buf)
2040		return -ENOMEM;
2041
2042	e->magic = EEPROM_MAGIC;
2043	for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2044		err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
2045
2046	if (!err)
2047		memcpy(data, buf + e->offset, e->len);
2048	kfree(buf);
2049	return err;
2050}
2051
2052static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2053		      u8 * data)
2054{
2055	struct port_info *pi = netdev_priv(dev);
2056	struct adapter *adapter = pi->adapter;
2057	u32 aligned_offset, aligned_len;
2058	__le32 *p;
2059	u8 *buf;
2060	int err;
2061
2062	if (eeprom->magic != EEPROM_MAGIC)
2063		return -EINVAL;
2064
2065	aligned_offset = eeprom->offset & ~3;
2066	aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2067
2068	if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2069		buf = kmalloc(aligned_len, GFP_KERNEL);
2070		if (!buf)
2071			return -ENOMEM;
2072		err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
2073		if (!err && aligned_len > 4)
2074			err = t3_seeprom_read(adapter,
2075					      aligned_offset + aligned_len - 4,
2076					      (__le32 *) & buf[aligned_len - 4]);
2077		if (err)
2078			goto out;
2079		memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2080	} else
2081		buf = data;
2082
2083	err = t3_seeprom_wp(adapter, 0);
2084	if (err)
2085		goto out;
2086
2087	for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
2088		err = t3_seeprom_write(adapter, aligned_offset, *p);
2089		aligned_offset += 4;
2090	}
2091
2092	if (!err)
2093		err = t3_seeprom_wp(adapter, 1);
2094out:
2095	if (buf != data)
2096		kfree(buf);
2097	return err;
2098}
2099
2100static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2101{
2102	wol->supported = 0;
2103	wol->wolopts = 0;
2104	memset(&wol->sopass, 0, sizeof(wol->sopass));
2105}
2106
2107static const struct ethtool_ops cxgb_ethtool_ops = {
2108	.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
2109	.get_drvinfo = get_drvinfo,
2110	.get_msglevel = get_msglevel,
2111	.set_msglevel = set_msglevel,
2112	.get_ringparam = get_sge_param,
2113	.set_ringparam = set_sge_param,
2114	.get_coalesce = get_coalesce,
2115	.set_coalesce = set_coalesce,
2116	.get_eeprom_len = get_eeprom_len,
2117	.get_eeprom = get_eeprom,
2118	.set_eeprom = set_eeprom,
2119	.get_pauseparam = get_pauseparam,
2120	.set_pauseparam = set_pauseparam,
2121	.get_link = ethtool_op_get_link,
2122	.get_strings = get_strings,
2123	.set_phys_id = set_phys_id,
2124	.nway_reset = restart_autoneg,
2125	.get_sset_count = get_sset_count,
2126	.get_ethtool_stats = get_stats,
2127	.get_regs_len = get_regs_len,
2128	.get_regs = get_regs,
2129	.get_wol = get_wol,
2130	.get_link_ksettings = get_link_ksettings,
2131	.set_link_ksettings = set_link_ksettings,
2132};
2133
2134static int in_range(int val, int lo, int hi)
2135{
2136	return val < 0 || (val <= hi && val >= lo);
2137}
2138
2139static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2140{
2141	struct port_info *pi = netdev_priv(dev);
2142	struct adapter *adapter = pi->adapter;
2143	u32 cmd;
2144	int ret;
2145
2146	if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
2147		return -EFAULT;
2148
2149	switch (cmd) {
2150	case CHELSIO_SET_QSET_PARAMS:{
2151		int i;
2152		struct qset_params *q;
2153		struct ch_qset_params t;
2154		int q1 = pi->first_qset;
2155		int nqsets = pi->nqsets;
2156
2157		if (!capable(CAP_NET_ADMIN))
2158			return -EPERM;
2159		if (copy_from_user(&t, useraddr, sizeof(t)))
2160			return -EFAULT;
2161		if (t.cmd != CHELSIO_SET_QSET_PARAMS)
2162			return -EINVAL;
2163		if (t.qset_idx >= SGE_QSETS)
2164			return -EINVAL;
2165		if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
2166		    !in_range(t.cong_thres, 0, 255) ||
2167		    !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
2168			      MAX_TXQ_ENTRIES) ||
2169		    !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
2170			      MAX_TXQ_ENTRIES) ||
2171		    !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
2172			      MAX_CTRL_TXQ_ENTRIES) ||
2173		    !in_range(t.fl_size[0], MIN_FL_ENTRIES,
2174			      MAX_RX_BUFFERS) ||
2175		    !in_range(t.fl_size[1], MIN_FL_ENTRIES,
2176			      MAX_RX_JUMBO_BUFFERS) ||
2177		    !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
2178			      MAX_RSPQ_ENTRIES))
2179			return -EINVAL;
2180
2181		if ((adapter->flags & FULL_INIT_DONE) &&
2182			(t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
2183			t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
2184			t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
2185			t.polling >= 0 || t.cong_thres >= 0))
2186			return -EBUSY;
2187
2188		/* Allow setting of any available qset when offload enabled */
2189		if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2190			q1 = 0;
2191			for_each_port(adapter, i) {
2192				pi = adap2pinfo(adapter, i);
2193				nqsets += pi->first_qset + pi->nqsets;
2194			}
2195		}
2196
2197		if (t.qset_idx < q1)
2198			return -EINVAL;
2199		if (t.qset_idx > q1 + nqsets - 1)
2200			return -EINVAL;
2201
2202		q = &adapter->params.sge.qset[t.qset_idx];
2203
2204		if (t.rspq_size >= 0)
2205			q->rspq_size = t.rspq_size;
2206		if (t.fl_size[0] >= 0)
2207			q->fl_size = t.fl_size[0];
2208		if (t.fl_size[1] >= 0)
2209			q->jumbo_size = t.fl_size[1];
2210		if (t.txq_size[0] >= 0)
2211			q->txq_size[0] = t.txq_size[0];
2212		if (t.txq_size[1] >= 0)
2213			q->txq_size[1] = t.txq_size[1];
2214		if (t.txq_size[2] >= 0)
2215			q->txq_size[2] = t.txq_size[2];
2216		if (t.cong_thres >= 0)
2217			q->cong_thres = t.cong_thres;
2218		if (t.intr_lat >= 0) {
2219			struct sge_qset *qs =
2220				&adapter->sge.qs[t.qset_idx];
2221
2222			q->coalesce_usecs = t.intr_lat;
2223			t3_update_qset_coalesce(qs, q);
2224		}
2225		if (t.polling >= 0) {
2226			if (adapter->flags & USING_MSIX)
2227				q->polling = t.polling;
2228			else {
2229				/* No polling with INTx for T3A */
2230				if (adapter->params.rev == 0 &&
2231					!(adapter->flags & USING_MSI))
2232					t.polling = 0;
2233
2234				for (i = 0; i < SGE_QSETS; i++) {
2235					q = &adapter->params.sge.
2236						qset[i];
2237					q->polling = t.polling;
2238				}
2239			}
2240		}
2241
2242		if (t.lro >= 0) {
2243			if (t.lro)
2244				dev->wanted_features |= NETIF_F_GRO;
2245			else
2246				dev->wanted_features &= ~NETIF_F_GRO;
2247			netdev_update_features(dev);
2248		}
2249
2250		break;
2251	}
2252	case CHELSIO_GET_QSET_PARAMS:{
2253		struct qset_params *q;
2254		struct ch_qset_params t;
2255		int q1 = pi->first_qset;
2256		int nqsets = pi->nqsets;
2257		int i;
2258
2259		if (copy_from_user(&t, useraddr, sizeof(t)))
2260			return -EFAULT;
2261
2262		if (t.cmd != CHELSIO_GET_QSET_PARAMS)
2263			return -EINVAL;
2264
2265		/* Display qsets for all ports when offload enabled */
2266		if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2267			q1 = 0;
2268			for_each_port(adapter, i) {
2269				pi = adap2pinfo(adapter, i);
2270				nqsets = pi->first_qset + pi->nqsets;
2271			}
2272		}
2273
2274		if (t.qset_idx >= nqsets)
2275			return -EINVAL;
2276		t.qset_idx = array_index_nospec(t.qset_idx, nqsets);
2277
2278		q = &adapter->params.sge.qset[q1 + t.qset_idx];
2279		t.rspq_size = q->rspq_size;
2280		t.txq_size[0] = q->txq_size[0];
2281		t.txq_size[1] = q->txq_size[1];
2282		t.txq_size[2] = q->txq_size[2];
2283		t.fl_size[0] = q->fl_size;
2284		t.fl_size[1] = q->jumbo_size;
2285		t.polling = q->polling;
2286		t.lro = !!(dev->features & NETIF_F_GRO);
2287		t.intr_lat = q->coalesce_usecs;
2288		t.cong_thres = q->cong_thres;
2289		t.qnum = q1;
2290
2291		if (adapter->flags & USING_MSIX)
2292			t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2293		else
2294			t.vector = adapter->pdev->irq;
2295
2296		if (copy_to_user(useraddr, &t, sizeof(t)))
2297			return -EFAULT;
2298		break;
2299	}
2300	case CHELSIO_SET_QSET_NUM:{
2301		struct ch_reg edata;
2302		unsigned int i, first_qset = 0, other_qsets = 0;
2303
2304		if (!capable(CAP_NET_ADMIN))
2305			return -EPERM;
2306		if (adapter->flags & FULL_INIT_DONE)
2307			return -EBUSY;
2308		if (copy_from_user(&edata, useraddr, sizeof(edata)))
2309			return -EFAULT;
2310		if (edata.cmd != CHELSIO_SET_QSET_NUM)
2311			return -EINVAL;
2312		if (edata.val < 1 ||
2313			(edata.val > 1 && !(adapter->flags & USING_MSIX)))
2314			return -EINVAL;
2315
2316		for_each_port(adapter, i)
2317			if (adapter->port[i] && adapter->port[i] != dev)
2318				other_qsets += adap2pinfo(adapter, i)->nqsets;
2319
2320		if (edata.val + other_qsets > SGE_QSETS)
2321			return -EINVAL;
2322
2323		pi->nqsets = edata.val;
2324
2325		for_each_port(adapter, i)
2326			if (adapter->port[i]) {
2327				pi = adap2pinfo(adapter, i);
2328				pi->first_qset = first_qset;
2329				first_qset += pi->nqsets;
2330			}
2331		break;
2332	}
2333	case CHELSIO_GET_QSET_NUM:{
2334		struct ch_reg edata;
2335
2336		memset(&edata, 0, sizeof(struct ch_reg));
2337
2338		edata.cmd = CHELSIO_GET_QSET_NUM;
2339		edata.val = pi->nqsets;
2340		if (copy_to_user(useraddr, &edata, sizeof(edata)))
2341			return -EFAULT;
2342		break;
2343	}
2344	case CHELSIO_LOAD_FW:{
2345		u8 *fw_data;
2346		struct ch_mem_range t;
2347
2348		if (!capable(CAP_SYS_RAWIO))
2349			return -EPERM;
2350		if (copy_from_user(&t, useraddr, sizeof(t)))
2351			return -EFAULT;
2352		if (t.cmd != CHELSIO_LOAD_FW)
2353			return -EINVAL;
2354		/* Check t.len sanity ? */
2355		fw_data = memdup_user(useraddr + sizeof(t), t.len);
2356		if (IS_ERR(fw_data))
2357			return PTR_ERR(fw_data);
2358
2359		ret = t3_load_fw(adapter, fw_data, t.len);
2360		kfree(fw_data);
2361		if (ret)
2362			return ret;
2363		break;
2364	}
2365	case CHELSIO_SETMTUTAB:{
2366		struct ch_mtus m;
2367		int i;
2368
2369		if (!is_offload(adapter))
2370			return -EOPNOTSUPP;
2371		if (!capable(CAP_NET_ADMIN))
2372			return -EPERM;
2373		if (offload_running(adapter))
2374			return -EBUSY;
2375		if (copy_from_user(&m, useraddr, sizeof(m)))
2376			return -EFAULT;
2377		if (m.cmd != CHELSIO_SETMTUTAB)
2378			return -EINVAL;
2379		if (m.nmtus != NMTUS)
2380			return -EINVAL;
2381		if (m.mtus[0] < 81)	/* accommodate SACK */
2382			return -EINVAL;
2383
2384		/* MTUs must be in ascending order */
2385		for (i = 1; i < NMTUS; ++i)
2386			if (m.mtus[i] < m.mtus[i - 1])
2387				return -EINVAL;
2388
2389		memcpy(adapter->params.mtus, m.mtus,
2390			sizeof(adapter->params.mtus));
2391		break;
2392	}
2393	case CHELSIO_GET_PM:{
2394		struct tp_params *p = &adapter->params.tp;
2395		struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2396
2397		if (!is_offload(adapter))
2398			return -EOPNOTSUPP;
2399		m.tx_pg_sz = p->tx_pg_size;
2400		m.tx_num_pg = p->tx_num_pgs;
2401		m.rx_pg_sz = p->rx_pg_size;
2402		m.rx_num_pg = p->rx_num_pgs;
2403		m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2404		if (copy_to_user(useraddr, &m, sizeof(m)))
2405			return -EFAULT;
2406		break;
2407	}
2408	case CHELSIO_SET_PM:{
2409		struct ch_pm m;
2410		struct tp_params *p = &adapter->params.tp;
2411
2412		if (!is_offload(adapter))
2413			return -EOPNOTSUPP;
2414		if (!capable(CAP_NET_ADMIN))
2415			return -EPERM;
2416		if (adapter->flags & FULL_INIT_DONE)
2417			return -EBUSY;
2418		if (copy_from_user(&m, useraddr, sizeof(m)))
2419			return -EFAULT;
2420		if (m.cmd != CHELSIO_SET_PM)
2421			return -EINVAL;
2422		if (!is_power_of_2(m.rx_pg_sz) ||
2423			!is_power_of_2(m.tx_pg_sz))
2424			return -EINVAL;	/* not power of 2 */
2425		if (!(m.rx_pg_sz & 0x14000))
2426			return -EINVAL;	/* not 16KB or 64KB */
2427		if (!(m.tx_pg_sz & 0x1554000))
2428			return -EINVAL;
2429		if (m.tx_num_pg == -1)
2430			m.tx_num_pg = p->tx_num_pgs;
2431		if (m.rx_num_pg == -1)
2432			m.rx_num_pg = p->rx_num_pgs;
2433		if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2434			return -EINVAL;
2435		if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2436			m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2437			return -EINVAL;
2438		p->rx_pg_size = m.rx_pg_sz;
2439		p->tx_pg_size = m.tx_pg_sz;
2440		p->rx_num_pgs = m.rx_num_pg;
2441		p->tx_num_pgs = m.tx_num_pg;
2442		break;
2443	}
2444	case CHELSIO_GET_MEM:{
2445		struct ch_mem_range t;
2446		struct mc7 *mem;
2447		u64 buf[32];
2448
2449		if (!is_offload(adapter))
2450			return -EOPNOTSUPP;
2451		if (!capable(CAP_NET_ADMIN))
2452			return -EPERM;
2453		if (!(adapter->flags & FULL_INIT_DONE))
2454			return -EIO;	/* need the memory controllers */
2455		if (copy_from_user(&t, useraddr, sizeof(t)))
2456			return -EFAULT;
2457		if (t.cmd != CHELSIO_GET_MEM)
2458			return -EINVAL;
2459		if ((t.addr & 7) || (t.len & 7))
2460			return -EINVAL;
2461		if (t.mem_id == MEM_CM)
2462			mem = &adapter->cm;
2463		else if (t.mem_id == MEM_PMRX)
2464			mem = &adapter->pmrx;
2465		else if (t.mem_id == MEM_PMTX)
2466			mem = &adapter->pmtx;
2467		else
2468			return -EINVAL;
2469
2470		/*
2471		 * Version scheme:
2472		 * bits 0..9: chip version
2473		 * bits 10..15: chip revision
2474		 */
2475		t.version = 3 | (adapter->params.rev << 10);
2476		if (copy_to_user(useraddr, &t, sizeof(t)))
2477			return -EFAULT;
2478
2479		/*
2480		 * Read 256 bytes at a time as len can be large and we don't
2481		 * want to use huge intermediate buffers.
2482		 */
2483		useraddr += sizeof(t);	/* advance to start of buffer */
2484		while (t.len) {
2485			unsigned int chunk =
2486				min_t(unsigned int, t.len, sizeof(buf));
2487
2488			ret =
2489				t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2490						buf);
2491			if (ret)
2492				return ret;
2493			if (copy_to_user(useraddr, buf, chunk))
2494				return -EFAULT;
2495			useraddr += chunk;
2496			t.addr += chunk;
2497			t.len -= chunk;
2498		}
2499		break;
2500	}
2501	case CHELSIO_SET_TRACE_FILTER:{
2502		struct ch_trace t;
2503		const struct trace_params *tp;
2504
2505		if (!capable(CAP_NET_ADMIN))
2506			return -EPERM;
2507		if (!offload_running(adapter))
2508			return -EAGAIN;
2509		if (copy_from_user(&t, useraddr, sizeof(t)))
2510			return -EFAULT;
2511		if (t.cmd != CHELSIO_SET_TRACE_FILTER)
2512			return -EINVAL;
2513
2514		tp = (const struct trace_params *)&t.sip;
2515		if (t.config_tx)
2516			t3_config_trace_filter(adapter, tp, 0,
2517						t.invert_match,
2518						t.trace_tx);
2519		if (t.config_rx)
2520			t3_config_trace_filter(adapter, tp, 1,
2521						t.invert_match,
2522						t.trace_rx);
2523		break;
2524	}
2525	default:
2526		return -EOPNOTSUPP;
2527	}
2528	return 0;
2529}
2530
2531static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2532{
2533	struct mii_ioctl_data *data = if_mii(req);
2534	struct port_info *pi = netdev_priv(dev);
2535	struct adapter *adapter = pi->adapter;
2536
2537	switch (cmd) {
2538	case SIOCGMIIREG:
2539	case SIOCSMIIREG:
2540		/* Convert phy_id from older PRTAD/DEVAD format */
2541		if (is_10G(adapter) &&
2542		    !mdio_phy_id_is_c45(data->phy_id) &&
2543		    (data->phy_id & 0x1f00) &&
2544		    !(data->phy_id & 0xe0e0))
2545			data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
2546						       data->phy_id & 0x1f);
2547		fallthrough;
2548	case SIOCGMIIPHY:
2549		return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
2550	case SIOCCHIOCTL:
2551		return cxgb_extension_ioctl(dev, req->ifr_data);
2552	default:
2553		return -EOPNOTSUPP;
2554	}
2555}
2556
2557static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2558{
2559	struct port_info *pi = netdev_priv(dev);
2560	struct adapter *adapter = pi->adapter;
2561	int ret;
2562
2563	if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2564		return ret;
2565	dev->mtu = new_mtu;
2566	init_port_mtus(adapter);
2567	if (adapter->params.rev == 0 && offload_running(adapter))
2568		t3_load_mtus(adapter, adapter->params.mtus,
2569			     adapter->params.a_wnd, adapter->params.b_wnd,
2570			     adapter->port[0]->mtu);
2571	return 0;
2572}
2573
2574static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2575{
2576	struct port_info *pi = netdev_priv(dev);
2577	struct adapter *adapter = pi->adapter;
2578	struct sockaddr *addr = p;
2579
2580	if (!is_valid_ether_addr(addr->sa_data))
2581		return -EADDRNOTAVAIL;
2582
2583	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2584	t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr);
2585	if (offload_running(adapter))
2586		write_smt_entry(adapter, pi->port_id);
2587	return 0;
2588}
2589
2590static netdev_features_t cxgb_fix_features(struct net_device *dev,
2591	netdev_features_t features)
2592{
2593	/*
2594	 * Since there is no support for separate rx/tx vlan accel
2595	 * enable/disable make sure tx flag is always in same state as rx.
2596	 */
2597	if (features & NETIF_F_HW_VLAN_CTAG_RX)
2598		features |= NETIF_F_HW_VLAN_CTAG_TX;
2599	else
2600		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2601
2602	return features;
2603}
2604
2605static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2606{
2607	netdev_features_t changed = dev->features ^ features;
2608
2609	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2610		cxgb_vlan_mode(dev, features);
2611
2612	return 0;
2613}
2614
2615#ifdef CONFIG_NET_POLL_CONTROLLER
2616static void cxgb_netpoll(struct net_device *dev)
2617{
2618	struct port_info *pi = netdev_priv(dev);
2619	struct adapter *adapter = pi->adapter;
2620	int qidx;
2621
2622	for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2623		struct sge_qset *qs = &adapter->sge.qs[qidx];
2624		void *source;
2625
2626		if (adapter->flags & USING_MSIX)
2627			source = qs;
2628		else
2629			source = adapter;
2630
2631		t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2632	}
2633}
2634#endif
2635
2636/*
2637 * Periodic accumulation of MAC statistics.
2638 */
2639static void mac_stats_update(struct adapter *adapter)
2640{
2641	int i;
2642
2643	for_each_port(adapter, i) {
2644		struct net_device *dev = adapter->port[i];
2645		struct port_info *p = netdev_priv(dev);
2646
2647		if (netif_running(dev)) {
2648			spin_lock(&adapter->stats_lock);
2649			t3_mac_update_stats(&p->mac);
2650			spin_unlock(&adapter->stats_lock);
2651		}
2652	}
2653}
2654
2655static void check_link_status(struct adapter *adapter)
2656{
2657	int i;
2658
2659	for_each_port(adapter, i) {
2660		struct net_device *dev = adapter->port[i];
2661		struct port_info *p = netdev_priv(dev);
2662		int link_fault;
2663
2664		spin_lock_irq(&adapter->work_lock);
2665		link_fault = p->link_fault;
2666		spin_unlock_irq(&adapter->work_lock);
2667
2668		if (link_fault) {
2669			t3_link_fault(adapter, i);
2670			continue;
2671		}
2672
2673		if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2674			t3_xgm_intr_disable(adapter, i);
2675			t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2676
2677			t3_link_changed(adapter, i);
2678			t3_xgm_intr_enable(adapter, i);
2679		}
2680	}
2681}
2682
2683static void check_t3b2_mac(struct adapter *adapter)
2684{
2685	int i;
2686
2687	if (!rtnl_trylock())	/* synchronize with ifdown */
2688		return;
2689
2690	for_each_port(adapter, i) {
2691		struct net_device *dev = adapter->port[i];
2692		struct port_info *p = netdev_priv(dev);
2693		int status;
2694
2695		if (!netif_running(dev))
2696			continue;
2697
2698		status = 0;
2699		if (netif_running(dev) && netif_carrier_ok(dev))
2700			status = t3b2_mac_watchdog_task(&p->mac);
2701		if (status == 1)
2702			p->mac.stats.num_toggled++;
2703		else if (status == 2) {
2704			struct cmac *mac = &p->mac;
2705
2706			t3_mac_set_mtu(mac, dev->mtu);
2707			t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
2708			cxgb_set_rxmode(dev);
2709			t3_link_start(&p->phy, mac, &p->link_config);
2710			t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2711			t3_port_intr_enable(adapter, p->port_id);
2712			p->mac.stats.num_resets++;
2713		}
2714	}
2715	rtnl_unlock();
2716}
2717
2718
2719static void t3_adap_check_task(struct work_struct *work)
2720{
2721	struct adapter *adapter = container_of(work, struct adapter,
2722					       adap_check_task.work);
2723	const struct adapter_params *p = &adapter->params;
2724	int port;
2725	unsigned int v, status, reset;
2726
2727	adapter->check_task_cnt++;
2728
2729	check_link_status(adapter);
2730
2731	/* Accumulate MAC stats if needed */
2732	if (!p->linkpoll_period ||
2733	    (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2734	    p->stats_update_period) {
2735		mac_stats_update(adapter);
2736		adapter->check_task_cnt = 0;
2737	}
2738
2739	if (p->rev == T3_REV_B2)
2740		check_t3b2_mac(adapter);
2741
2742	/*
2743	 * Scan the XGMAC's to check for various conditions which we want to
2744	 * monitor in a periodic polling manner rather than via an interrupt
2745	 * condition.  This is used for conditions which would otherwise flood
2746	 * the system with interrupts and we only really need to know that the
2747	 * conditions are "happening" ...  For each condition we count the
2748	 * detection of the condition and reset it for the next polling loop.
2749	 */
2750	for_each_port(adapter, port) {
2751		struct cmac *mac =  &adap2pinfo(adapter, port)->mac;
2752		u32 cause;
2753
2754		cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
2755		reset = 0;
2756		if (cause & F_RXFIFO_OVERFLOW) {
2757			mac->stats.rx_fifo_ovfl++;
2758			reset |= F_RXFIFO_OVERFLOW;
2759		}
2760
2761		t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
2762	}
2763
2764	/*
2765	 * We do the same as above for FL_EMPTY interrupts.
2766	 */
2767	status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2768	reset = 0;
2769
2770	if (status & F_FLEMPTY) {
2771		struct sge_qset *qs = &adapter->sge.qs[0];
2772		int i = 0;
2773
2774		reset |= F_FLEMPTY;
2775
2776		v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
2777		    0xffff;
2778
2779		while (v) {
2780			qs->fl[i].empty += (v & 1);
2781			if (i)
2782				qs++;
2783			i ^= 1;
2784			v >>= 1;
2785		}
2786	}
2787
2788	t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
2789
2790	/* Schedule the next check update if any port is active. */
2791	spin_lock_irq(&adapter->work_lock);
2792	if (adapter->open_device_map & PORT_MASK)
2793		schedule_chk_task(adapter);
2794	spin_unlock_irq(&adapter->work_lock);
2795}
2796
2797static void db_full_task(struct work_struct *work)
2798{
2799	struct adapter *adapter = container_of(work, struct adapter,
2800					       db_full_task);
2801
2802	cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0);
2803}
2804
2805static void db_empty_task(struct work_struct *work)
2806{
2807	struct adapter *adapter = container_of(work, struct adapter,
2808					       db_empty_task);
2809
2810	cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0);
2811}
2812
2813static void db_drop_task(struct work_struct *work)
2814{
2815	struct adapter *adapter = container_of(work, struct adapter,
2816					       db_drop_task);
2817	unsigned long delay = 1000;
2818	unsigned short r;
2819
2820	cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0);
2821
2822	/*
2823	 * Sleep a while before ringing the driver qset dbs.
2824	 * The delay is between 1000-2023 usecs.
2825	 */
2826	get_random_bytes(&r, 2);
2827	delay += r & 1023;
2828	set_current_state(TASK_UNINTERRUPTIBLE);
2829	schedule_timeout(usecs_to_jiffies(delay));
2830	ring_dbs(adapter);
2831}
2832
2833/*
2834 * Processes external (PHY) interrupts in process context.
2835 */
2836static void ext_intr_task(struct work_struct *work)
2837{
2838	struct adapter *adapter = container_of(work, struct adapter,
2839					       ext_intr_handler_task);
2840	int i;
2841
2842	/* Disable link fault interrupts */
2843	for_each_port(adapter, i) {
2844		struct net_device *dev = adapter->port[i];
2845		struct port_info *p = netdev_priv(dev);
2846
2847		t3_xgm_intr_disable(adapter, i);
2848		t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2849	}
2850
2851	/* Re-enable link fault interrupts */
2852	t3_phy_intr_handler(adapter);
2853
2854	for_each_port(adapter, i)
2855		t3_xgm_intr_enable(adapter, i);
2856
2857	/* Now reenable external interrupts */
2858	spin_lock_irq(&adapter->work_lock);
2859	if (adapter->slow_intr_mask) {
2860		adapter->slow_intr_mask |= F_T3DBG;
2861		t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2862		t3_write_reg(adapter, A_PL_INT_ENABLE0,
2863			     adapter->slow_intr_mask);
2864	}
2865	spin_unlock_irq(&adapter->work_lock);
2866}
2867
2868/*
2869 * Interrupt-context handler for external (PHY) interrupts.
2870 */
2871void t3_os_ext_intr_handler(struct adapter *adapter)
2872{
2873	/*
2874	 * Schedule a task to handle external interrupts as they may be slow
2875	 * and we use a mutex to protect MDIO registers.  We disable PHY
2876	 * interrupts in the meantime and let the task reenable them when
2877	 * it's done.
2878	 */
2879	spin_lock(&adapter->work_lock);
2880	if (adapter->slow_intr_mask) {
2881		adapter->slow_intr_mask &= ~F_T3DBG;
2882		t3_write_reg(adapter, A_PL_INT_ENABLE0,
2883			     adapter->slow_intr_mask);
2884		queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2885	}
2886	spin_unlock(&adapter->work_lock);
2887}
2888
2889void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2890{
2891	struct net_device *netdev = adapter->port[port_id];
2892	struct port_info *pi = netdev_priv(netdev);
2893
2894	spin_lock(&adapter->work_lock);
2895	pi->link_fault = 1;
2896	spin_unlock(&adapter->work_lock);
2897}
2898
2899static int t3_adapter_error(struct adapter *adapter, int reset, int on_wq)
2900{
2901	int i, ret = 0;
2902
2903	if (is_offload(adapter) &&
2904	    test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2905		cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2906		offload_close(&adapter->tdev);
2907	}
2908
2909	/* Stop all ports */
2910	for_each_port(adapter, i) {
2911		struct net_device *netdev = adapter->port[i];
2912
2913		if (netif_running(netdev))
2914			__cxgb_close(netdev, on_wq);
2915	}
2916
2917	/* Stop SGE timers */
2918	t3_stop_sge_timers(adapter);
2919
2920	adapter->flags &= ~FULL_INIT_DONE;
2921
2922	if (reset)
2923		ret = t3_reset_adapter(adapter);
2924
2925	pci_disable_device(adapter->pdev);
2926
2927	return ret;
2928}
2929
2930static int t3_reenable_adapter(struct adapter *adapter)
2931{
2932	if (pci_enable_device(adapter->pdev)) {
2933		dev_err(&adapter->pdev->dev,
2934			"Cannot re-enable PCI device after reset.\n");
2935		goto err;
2936	}
2937	pci_set_master(adapter->pdev);
2938	pci_restore_state(adapter->pdev);
2939	pci_save_state(adapter->pdev);
2940
2941	/* Free sge resources */
2942	t3_free_sge_resources(adapter);
2943
2944	if (t3_replay_prep_adapter(adapter))
2945		goto err;
2946
2947	return 0;
2948err:
2949	return -1;
2950}
2951
2952static void t3_resume_ports(struct adapter *adapter)
2953{
2954	int i;
2955
2956	/* Restart the ports */
2957	for_each_port(adapter, i) {
2958		struct net_device *netdev = adapter->port[i];
2959
2960		if (netif_running(netdev)) {
2961			if (cxgb_open(netdev)) {
2962				dev_err(&adapter->pdev->dev,
2963					"can't bring device back up"
2964					" after reset\n");
2965				continue;
2966			}
2967		}
2968	}
2969
2970	if (is_offload(adapter) && !ofld_disable)
2971		cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
2972}
2973
2974/*
2975 * processes a fatal error.
2976 * Bring the ports down, reset the chip, bring the ports back up.
2977 */
2978static void fatal_error_task(struct work_struct *work)
2979{
2980	struct adapter *adapter = container_of(work, struct adapter,
2981					       fatal_error_handler_task);
2982	int err = 0;
2983
2984	rtnl_lock();
2985	err = t3_adapter_error(adapter, 1, 1);
2986	if (!err)
2987		err = t3_reenable_adapter(adapter);
2988	if (!err)
2989		t3_resume_ports(adapter);
2990
2991	CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2992	rtnl_unlock();
2993}
2994
2995void t3_fatal_err(struct adapter *adapter)
2996{
2997	unsigned int fw_status[4];
2998
2999	if (adapter->flags & FULL_INIT_DONE) {
3000		t3_sge_stop_dma(adapter);
3001		t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
3002		t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
3003		t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
3004		t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
3005
3006		spin_lock(&adapter->work_lock);
3007		t3_intr_disable(adapter);
3008		queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
3009		spin_unlock(&adapter->work_lock);
3010	}
3011	CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
3012	if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
3013		CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
3014			 fw_status[0], fw_status[1],
3015			 fw_status[2], fw_status[3]);
3016}
3017
3018/**
3019 * t3_io_error_detected - called when PCI error is detected
3020 * @pdev: Pointer to PCI device
3021 * @state: The current pci connection state
3022 *
3023 * This function is called after a PCI bus error affecting
3024 * this device has been detected.
3025 */
3026static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
3027					     pci_channel_state_t state)
3028{
3029	struct adapter *adapter = pci_get_drvdata(pdev);
3030
3031	if (state == pci_channel_io_perm_failure)
3032		return PCI_ERS_RESULT_DISCONNECT;
3033
3034	t3_adapter_error(adapter, 0, 0);
3035
3036	/* Request a slot reset. */
3037	return PCI_ERS_RESULT_NEED_RESET;
3038}
3039
3040/**
3041 * t3_io_slot_reset - called after the pci bus has been reset.
3042 * @pdev: Pointer to PCI device
3043 *
3044 * Restart the card from scratch, as if from a cold-boot.
3045 */
3046static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
3047{
3048	struct adapter *adapter = pci_get_drvdata(pdev);
3049
3050	if (!t3_reenable_adapter(adapter))
3051		return PCI_ERS_RESULT_RECOVERED;
3052
3053	return PCI_ERS_RESULT_DISCONNECT;
3054}
3055
3056/**
3057 * t3_io_resume - called when traffic can start flowing again.
3058 * @pdev: Pointer to PCI device
3059 *
3060 * This callback is called when the error recovery driver tells us that
3061 * its OK to resume normal operation.
3062 */
3063static void t3_io_resume(struct pci_dev *pdev)
3064{
3065	struct adapter *adapter = pci_get_drvdata(pdev);
3066
3067	CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
3068		 t3_read_reg(adapter, A_PCIE_PEX_ERR));
3069
3070	rtnl_lock();
3071	t3_resume_ports(adapter);
3072	rtnl_unlock();
3073}
3074
3075static const struct pci_error_handlers t3_err_handler = {
3076	.error_detected = t3_io_error_detected,
3077	.slot_reset = t3_io_slot_reset,
3078	.resume = t3_io_resume,
3079};
3080
3081/*
3082 * Set the number of qsets based on the number of CPUs and the number of ports,
3083 * not to exceed the number of available qsets, assuming there are enough qsets
3084 * per port in HW.
3085 */
3086static void set_nqsets(struct adapter *adap)
3087{
3088	int i, j = 0;
3089	int num_cpus = netif_get_num_default_rss_queues();
3090	int hwports = adap->params.nports;
3091	int nqsets = adap->msix_nvectors - 1;
3092
3093	if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
3094		if (hwports == 2 &&
3095		    (hwports * nqsets > SGE_QSETS ||
3096		     num_cpus >= nqsets / hwports))
3097			nqsets /= hwports;
3098		if (nqsets > num_cpus)
3099			nqsets = num_cpus;
3100		if (nqsets < 1 || hwports == 4)
3101			nqsets = 1;
3102	} else
3103		nqsets = 1;
3104
3105	for_each_port(adap, i) {
3106		struct port_info *pi = adap2pinfo(adap, i);
3107
3108		pi->first_qset = j;
3109		pi->nqsets = nqsets;
3110		j = pi->first_qset + nqsets;
3111
3112		dev_info(&adap->pdev->dev,
3113			 "Port %d using %d queue sets.\n", i, nqsets);
3114	}
3115}
3116
3117static int cxgb_enable_msix(struct adapter *adap)
3118{
3119	struct msix_entry entries[SGE_QSETS + 1];
3120	int vectors;
3121	int i;
3122
3123	vectors = ARRAY_SIZE(entries);
3124	for (i = 0; i < vectors; ++i)
3125		entries[i].entry = i;
3126
3127	vectors = pci_enable_msix_range(adap->pdev, entries,
3128					adap->params.nports + 1, vectors);
3129	if (vectors < 0)
3130		return vectors;
3131
3132	for (i = 0; i < vectors; ++i)
3133		adap->msix_info[i].vec = entries[i].vector;
3134	adap->msix_nvectors = vectors;
3135
3136	return 0;
3137}
3138
3139static void print_port_info(struct adapter *adap, const struct adapter_info *ai)
3140{
3141	static const char *pci_variant[] = {
3142		"PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
3143	};
3144
3145	int i;
3146	char buf[80];
3147
3148	if (is_pcie(adap))
3149		snprintf(buf, sizeof(buf), "%s x%d",
3150			 pci_variant[adap->params.pci.variant],
3151			 adap->params.pci.width);
3152	else
3153		snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
3154			 pci_variant[adap->params.pci.variant],
3155			 adap->params.pci.speed, adap->params.pci.width);
3156
3157	for_each_port(adap, i) {
3158		struct net_device *dev = adap->port[i];
3159		const struct port_info *pi = netdev_priv(dev);
3160
3161		if (!test_bit(i, &adap->registered_device_map))
3162			continue;
3163		netdev_info(dev, "%s %s %sNIC (rev %d) %s%s\n",
3164			    ai->desc, pi->phy.desc,
3165			    is_offload(adap) ? "R" : "", adap->params.rev, buf,
3166			    (adap->flags & USING_MSIX) ? " MSI-X" :
3167			    (adap->flags & USING_MSI) ? " MSI" : "");
3168		if (adap->name == dev->name && adap->params.vpd.mclk)
3169			pr_info("%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
3170			       adap->name, t3_mc7_size(&adap->cm) >> 20,
3171			       t3_mc7_size(&adap->pmtx) >> 20,
3172			       t3_mc7_size(&adap->pmrx) >> 20,
3173			       adap->params.vpd.sn);
3174	}
3175}
3176
3177static const struct net_device_ops cxgb_netdev_ops = {
3178	.ndo_open		= cxgb_open,
3179	.ndo_stop		= cxgb_close,
3180	.ndo_start_xmit		= t3_eth_xmit,
3181	.ndo_get_stats		= cxgb_get_stats,
3182	.ndo_validate_addr	= eth_validate_addr,
3183	.ndo_set_rx_mode	= cxgb_set_rxmode,
3184	.ndo_do_ioctl		= cxgb_ioctl,
3185	.ndo_change_mtu		= cxgb_change_mtu,
3186	.ndo_set_mac_address	= cxgb_set_mac_addr,
3187	.ndo_fix_features	= cxgb_fix_features,
3188	.ndo_set_features	= cxgb_set_features,
3189#ifdef CONFIG_NET_POLL_CONTROLLER
3190	.ndo_poll_controller	= cxgb_netpoll,
3191#endif
3192};
3193
3194static void cxgb3_init_iscsi_mac(struct net_device *dev)
3195{
3196	struct port_info *pi = netdev_priv(dev);
3197
3198	memcpy(pi->iscsic.mac_addr, dev->dev_addr, ETH_ALEN);
3199	pi->iscsic.mac_addr[3] |= 0x80;
3200}
3201
3202#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
3203#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
3204			NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
3205static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3206{
3207	int i, err, pci_using_dac = 0;
3208	resource_size_t mmio_start, mmio_len;
3209	const struct adapter_info *ai;
3210	struct adapter *adapter = NULL;
3211	struct port_info *pi;
3212
3213	if (!cxgb3_wq) {
3214		cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
3215		if (!cxgb3_wq) {
3216			pr_err("cannot initialize work queue\n");
3217			return -ENOMEM;
3218		}
3219	}
3220
3221	err = pci_enable_device(pdev);
3222	if (err) {
3223		dev_err(&pdev->dev, "cannot enable PCI device\n");
3224		goto out;
3225	}
3226
3227	err = pci_request_regions(pdev, DRV_NAME);
3228	if (err) {
3229		/* Just info, some other driver may have claimed the device. */
3230		dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3231		goto out_disable_device;
3232	}
3233
3234	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3235		pci_using_dac = 1;
3236		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3237		if (err) {
3238			dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3239			       "coherent allocations\n");
3240			goto out_release_regions;
3241		}
3242	} else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
3243		dev_err(&pdev->dev, "no usable DMA configuration\n");
3244		goto out_release_regions;
3245	}
3246
3247	pci_set_master(pdev);
3248	pci_save_state(pdev);
3249
3250	mmio_start = pci_resource_start(pdev, 0);
3251	mmio_len = pci_resource_len(pdev, 0);
3252	ai = t3_get_adapter_info(ent->driver_data);
3253
3254	adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3255	if (!adapter) {
3256		err = -ENOMEM;
3257		goto out_release_regions;
3258	}
3259
3260	adapter->nofail_skb =
3261		alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
3262	if (!adapter->nofail_skb) {
3263		dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
3264		err = -ENOMEM;
3265		goto out_free_adapter;
3266	}
3267
3268	adapter->regs = ioremap(mmio_start, mmio_len);
3269	if (!adapter->regs) {
3270		dev_err(&pdev->dev, "cannot map device registers\n");
3271		err = -ENOMEM;
3272		goto out_free_adapter_nofail;
3273	}
3274
3275	adapter->pdev = pdev;
3276	adapter->name = pci_name(pdev);
3277	adapter->msg_enable = dflt_msg_enable;
3278	adapter->mmio_len = mmio_len;
3279
3280	mutex_init(&adapter->mdio_lock);
3281	spin_lock_init(&adapter->work_lock);
3282	spin_lock_init(&adapter->stats_lock);
3283
3284	INIT_LIST_HEAD(&adapter->adapter_list);
3285	INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
3286	INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3287
3288	INIT_WORK(&adapter->db_full_task, db_full_task);
3289	INIT_WORK(&adapter->db_empty_task, db_empty_task);
3290	INIT_WORK(&adapter->db_drop_task, db_drop_task);
3291
3292	INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3293
3294	for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
3295		struct net_device *netdev;
3296
3297		netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
3298		if (!netdev) {
3299			err = -ENOMEM;
3300			goto out_free_dev;
3301		}
3302
3303		SET_NETDEV_DEV(netdev, &pdev->dev);
3304
3305		adapter->port[i] = netdev;
3306		pi = netdev_priv(netdev);
3307		pi->adapter = adapter;
3308		pi->port_id = i;
3309		netif_carrier_off(netdev);
3310		netdev->irq = pdev->irq;
3311		netdev->mem_start = mmio_start;
3312		netdev->mem_end = mmio_start + mmio_len - 1;
3313		netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
3314			NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX;
3315		netdev->features |= netdev->hw_features |
3316				    NETIF_F_HW_VLAN_CTAG_TX;
3317		netdev->vlan_features |= netdev->features & VLAN_FEAT;
3318		if (pci_using_dac)
3319			netdev->features |= NETIF_F_HIGHDMA;
3320
3321		netdev->netdev_ops = &cxgb_netdev_ops;
3322		netdev->ethtool_ops = &cxgb_ethtool_ops;
3323		netdev->min_mtu = 81;
3324		netdev->max_mtu = ETH_MAX_MTU;
3325		netdev->dev_port = pi->port_id;
3326	}
3327
3328	pci_set_drvdata(pdev, adapter);
3329	if (t3_prep_adapter(adapter, ai, 1) < 0) {
3330		err = -ENODEV;
3331		goto out_free_dev;
3332	}
3333
3334	/*
3335	 * The card is now ready to go.  If any errors occur during device
3336	 * registration we do not fail the whole card but rather proceed only
3337	 * with the ports we manage to register successfully.  However we must
3338	 * register at least one net device.
3339	 */
3340	for_each_port(adapter, i) {
3341		err = register_netdev(adapter->port[i]);
3342		if (err)
3343			dev_warn(&pdev->dev,
3344				 "cannot register net device %s, skipping\n",
3345				 adapter->port[i]->name);
3346		else {
3347			/*
3348			 * Change the name we use for messages to the name of
3349			 * the first successfully registered interface.
3350			 */
3351			if (!adapter->registered_device_map)
3352				adapter->name = adapter->port[i]->name;
3353
3354			__set_bit(i, &adapter->registered_device_map);
3355		}
3356	}
3357	if (!adapter->registered_device_map) {
3358		dev_err(&pdev->dev, "could not register any net devices\n");
3359		goto out_free_dev;
3360	}
3361
3362	for_each_port(adapter, i)
3363		cxgb3_init_iscsi_mac(adapter->port[i]);
3364
3365	/* Driver's ready. Reflect it on LEDs */
3366	t3_led_ready(adapter);
3367
3368	if (is_offload(adapter)) {
3369		__set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3370		cxgb3_adapter_ofld(adapter);
3371	}
3372
3373	/* See what interrupts we'll be using */
3374	if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3375		adapter->flags |= USING_MSIX;
3376	else if (msi > 0 && pci_enable_msi(pdev) == 0)
3377		adapter->flags |= USING_MSI;
3378
3379	set_nqsets(adapter);
3380
3381	err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3382				 &cxgb3_attr_group);
3383	if (err) {
3384		dev_err(&pdev->dev, "cannot create sysfs group\n");
3385		goto out_close_led;
3386	}
3387
3388	print_port_info(adapter, ai);
3389	return 0;
3390
3391out_close_led:
3392	t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0);
3393
3394out_free_dev:
3395	iounmap(adapter->regs);
3396	for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
3397		if (adapter->port[i])
3398			free_netdev(adapter->port[i]);
3399
3400out_free_adapter_nofail:
3401	kfree_skb(adapter->nofail_skb);
3402
3403out_free_adapter:
3404	kfree(adapter);
3405
3406out_release_regions:
3407	pci_release_regions(pdev);
3408out_disable_device:
3409	pci_disable_device(pdev);
3410out:
3411	return err;
3412}
3413
3414static void remove_one(struct pci_dev *pdev)
3415{
3416	struct adapter *adapter = pci_get_drvdata(pdev);
3417
3418	if (adapter) {
3419		int i;
3420
3421		t3_sge_stop(adapter);
3422		sysfs_remove_group(&adapter->port[0]->dev.kobj,
3423				   &cxgb3_attr_group);
3424
3425		if (is_offload(adapter)) {
3426			cxgb3_adapter_unofld(adapter);
3427			if (test_bit(OFFLOAD_DEVMAP_BIT,
3428				     &adapter->open_device_map))
3429				offload_close(&adapter->tdev);
3430		}
3431
3432		for_each_port(adapter, i)
3433		    if (test_bit(i, &adapter->registered_device_map))
3434			unregister_netdev(adapter->port[i]);
3435
3436		t3_stop_sge_timers(adapter);
3437		t3_free_sge_resources(adapter);
3438		cxgb_disable_msi(adapter);
3439
3440		for_each_port(adapter, i)
3441			if (adapter->port[i])
3442				free_netdev(adapter->port[i]);
3443
3444		iounmap(adapter->regs);
3445		kfree_skb(adapter->nofail_skb);
3446		kfree(adapter);
3447		pci_release_regions(pdev);
3448		pci_disable_device(pdev);
3449	}
3450}
3451
3452static struct pci_driver driver = {
3453	.name = DRV_NAME,
3454	.id_table = cxgb3_pci_tbl,
3455	.probe = init_one,
3456	.remove = remove_one,
3457	.err_handler = &t3_err_handler,
3458};
3459
3460static int __init cxgb3_init_module(void)
3461{
3462	int ret;
3463
3464	cxgb3_offload_init();
3465
3466	ret = pci_register_driver(&driver);
3467	return ret;
3468}
3469
3470static void __exit cxgb3_cleanup_module(void)
3471{
3472	pci_unregister_driver(&driver);
3473	if (cxgb3_wq)
3474		destroy_workqueue(cxgb3_wq);
3475}
3476
3477module_init(cxgb3_init_module);
3478module_exit(cxgb3_cleanup_module);
3479