1// SPDX-License-Identifier: GPL-2.0
2/* niu.c: Neptune ethernet driver.
3 *
4 * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
5 */
6
7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9#include <linux/module.h>
10#include <linux/init.h>
11#include <linux/interrupt.h>
12#include <linux/pci.h>
13#include <linux/dma-mapping.h>
14#include <linux/netdevice.h>
15#include <linux/ethtool.h>
16#include <linux/etherdevice.h>
17#include <linux/platform_device.h>
18#include <linux/delay.h>
19#include <linux/bitops.h>
20#include <linux/mii.h>
21#include <linux/if.h>
22#include <linux/if_ether.h>
23#include <linux/if_vlan.h>
24#include <linux/ip.h>
25#include <linux/in.h>
26#include <linux/ipv6.h>
27#include <linux/log2.h>
28#include <linux/jiffies.h>
29#include <linux/crc32.h>
30#include <linux/list.h>
31#include <linux/slab.h>
32
33#include <linux/io.h>
34#include <linux/of_device.h>
35
36#include "niu.h"
37
38#define DRV_MODULE_NAME		"niu"
39#define DRV_MODULE_VERSION	"1.1"
40#define DRV_MODULE_RELDATE	"Apr 22, 2010"
41
42static char version[] =
43	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
44
45MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
46MODULE_DESCRIPTION("NIU ethernet driver");
47MODULE_LICENSE("GPL");
48MODULE_VERSION(DRV_MODULE_VERSION);
49
50#ifndef readq
51static u64 readq(void __iomem *reg)
52{
53	return ((u64) readl(reg)) | (((u64) readl(reg + 4UL)) << 32);
54}
55
56static void writeq(u64 val, void __iomem *reg)
57{
58	writel(val & 0xffffffff, reg);
59	writel(val >> 32, reg + 0x4UL);
60}
61#endif
62
63static const struct pci_device_id niu_pci_tbl[] = {
64	{PCI_DEVICE(PCI_VENDOR_ID_SUN, 0xabcd)},
65	{}
66};
67
68MODULE_DEVICE_TABLE(pci, niu_pci_tbl);
69
70#define NIU_TX_TIMEOUT			(5 * HZ)
71
72#define nr64(reg)		readq(np->regs + (reg))
73#define nw64(reg, val)		writeq((val), np->regs + (reg))
74
75#define nr64_mac(reg)		readq(np->mac_regs + (reg))
76#define nw64_mac(reg, val)	writeq((val), np->mac_regs + (reg))
77
78#define nr64_ipp(reg)		readq(np->regs + np->ipp_off + (reg))
79#define nw64_ipp(reg, val)	writeq((val), np->regs + np->ipp_off + (reg))
80
81#define nr64_pcs(reg)		readq(np->regs + np->pcs_off + (reg))
82#define nw64_pcs(reg, val)	writeq((val), np->regs + np->pcs_off + (reg))
83
84#define nr64_xpcs(reg)		readq(np->regs + np->xpcs_off + (reg))
85#define nw64_xpcs(reg, val)	writeq((val), np->regs + np->xpcs_off + (reg))
86
87#define NIU_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
88
89static int niu_debug;
90static int debug = -1;
91module_param(debug, int, 0);
92MODULE_PARM_DESC(debug, "NIU debug level");
93
94#define niu_lock_parent(np, flags) \
95	spin_lock_irqsave(&np->parent->lock, flags)
96#define niu_unlock_parent(np, flags) \
97	spin_unlock_irqrestore(&np->parent->lock, flags)
98
99static int serdes_init_10g_serdes(struct niu *np);
100
101static int __niu_wait_bits_clear_mac(struct niu *np, unsigned long reg,
102				     u64 bits, int limit, int delay)
103{
104	while (--limit >= 0) {
105		u64 val = nr64_mac(reg);
106
107		if (!(val & bits))
108			break;
109		udelay(delay);
110	}
111	if (limit < 0)
112		return -ENODEV;
113	return 0;
114}
115
116static int __niu_set_and_wait_clear_mac(struct niu *np, unsigned long reg,
117					u64 bits, int limit, int delay,
118					const char *reg_name)
119{
120	int err;
121
122	nw64_mac(reg, bits);
123	err = __niu_wait_bits_clear_mac(np, reg, bits, limit, delay);
124	if (err)
125		netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
126			   (unsigned long long)bits, reg_name,
127			   (unsigned long long)nr64_mac(reg));
128	return err;
129}
130
131#define niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
132({	BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
133	__niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
134})
135
136static int __niu_wait_bits_clear_ipp(struct niu *np, unsigned long reg,
137				     u64 bits, int limit, int delay)
138{
139	while (--limit >= 0) {
140		u64 val = nr64_ipp(reg);
141
142		if (!(val & bits))
143			break;
144		udelay(delay);
145	}
146	if (limit < 0)
147		return -ENODEV;
148	return 0;
149}
150
151static int __niu_set_and_wait_clear_ipp(struct niu *np, unsigned long reg,
152					u64 bits, int limit, int delay,
153					const char *reg_name)
154{
155	int err;
156	u64 val;
157
158	val = nr64_ipp(reg);
159	val |= bits;
160	nw64_ipp(reg, val);
161
162	err = __niu_wait_bits_clear_ipp(np, reg, bits, limit, delay);
163	if (err)
164		netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
165			   (unsigned long long)bits, reg_name,
166			   (unsigned long long)nr64_ipp(reg));
167	return err;
168}
169
170#define niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
171({	BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
172	__niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
173})
174
175static int __niu_wait_bits_clear(struct niu *np, unsigned long reg,
176				 u64 bits, int limit, int delay)
177{
178	while (--limit >= 0) {
179		u64 val = nr64(reg);
180
181		if (!(val & bits))
182			break;
183		udelay(delay);
184	}
185	if (limit < 0)
186		return -ENODEV;
187	return 0;
188}
189
190#define niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY) \
191({	BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
192	__niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY); \
193})
194
195static int __niu_set_and_wait_clear(struct niu *np, unsigned long reg,
196				    u64 bits, int limit, int delay,
197				    const char *reg_name)
198{
199	int err;
200
201	nw64(reg, bits);
202	err = __niu_wait_bits_clear(np, reg, bits, limit, delay);
203	if (err)
204		netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
205			   (unsigned long long)bits, reg_name,
206			   (unsigned long long)nr64(reg));
207	return err;
208}
209
210#define niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
211({	BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
212	__niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
213})
214
215static void niu_ldg_rearm(struct niu *np, struct niu_ldg *lp, int on)
216{
217	u64 val = (u64) lp->timer;
218
219	if (on)
220		val |= LDG_IMGMT_ARM;
221
222	nw64(LDG_IMGMT(lp->ldg_num), val);
223}
224
225static int niu_ldn_irq_enable(struct niu *np, int ldn, int on)
226{
227	unsigned long mask_reg, bits;
228	u64 val;
229
230	if (ldn < 0 || ldn > LDN_MAX)
231		return -EINVAL;
232
233	if (ldn < 64) {
234		mask_reg = LD_IM0(ldn);
235		bits = LD_IM0_MASK;
236	} else {
237		mask_reg = LD_IM1(ldn - 64);
238		bits = LD_IM1_MASK;
239	}
240
241	val = nr64(mask_reg);
242	if (on)
243		val &= ~bits;
244	else
245		val |= bits;
246	nw64(mask_reg, val);
247
248	return 0;
249}
250
251static int niu_enable_ldn_in_ldg(struct niu *np, struct niu_ldg *lp, int on)
252{
253	struct niu_parent *parent = np->parent;
254	int i;
255
256	for (i = 0; i <= LDN_MAX; i++) {
257		int err;
258
259		if (parent->ldg_map[i] != lp->ldg_num)
260			continue;
261
262		err = niu_ldn_irq_enable(np, i, on);
263		if (err)
264			return err;
265	}
266	return 0;
267}
268
269static int niu_enable_interrupts(struct niu *np, int on)
270{
271	int i;
272
273	for (i = 0; i < np->num_ldg; i++) {
274		struct niu_ldg *lp = &np->ldg[i];
275		int err;
276
277		err = niu_enable_ldn_in_ldg(np, lp, on);
278		if (err)
279			return err;
280	}
281	for (i = 0; i < np->num_ldg; i++)
282		niu_ldg_rearm(np, &np->ldg[i], on);
283
284	return 0;
285}
286
287static u32 phy_encode(u32 type, int port)
288{
289	return type << (port * 2);
290}
291
292static u32 phy_decode(u32 val, int port)
293{
294	return (val >> (port * 2)) & PORT_TYPE_MASK;
295}
296
297static int mdio_wait(struct niu *np)
298{
299	int limit = 1000;
300	u64 val;
301
302	while (--limit > 0) {
303		val = nr64(MIF_FRAME_OUTPUT);
304		if ((val >> MIF_FRAME_OUTPUT_TA_SHIFT) & 0x1)
305			return val & MIF_FRAME_OUTPUT_DATA;
306
307		udelay(10);
308	}
309
310	return -ENODEV;
311}
312
313static int mdio_read(struct niu *np, int port, int dev, int reg)
314{
315	int err;
316
317	nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg));
318	err = mdio_wait(np);
319	if (err < 0)
320		return err;
321
322	nw64(MIF_FRAME_OUTPUT, MDIO_READ_OP(port, dev));
323	return mdio_wait(np);
324}
325
326static int mdio_write(struct niu *np, int port, int dev, int reg, int data)
327{
328	int err;
329
330	nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg));
331	err = mdio_wait(np);
332	if (err < 0)
333		return err;
334
335	nw64(MIF_FRAME_OUTPUT, MDIO_WRITE_OP(port, dev, data));
336	err = mdio_wait(np);
337	if (err < 0)
338		return err;
339
340	return 0;
341}
342
343static int mii_read(struct niu *np, int port, int reg)
344{
345	nw64(MIF_FRAME_OUTPUT, MII_READ_OP(port, reg));
346	return mdio_wait(np);
347}
348
349static int mii_write(struct niu *np, int port, int reg, int data)
350{
351	int err;
352
353	nw64(MIF_FRAME_OUTPUT, MII_WRITE_OP(port, reg, data));
354	err = mdio_wait(np);
355	if (err < 0)
356		return err;
357
358	return 0;
359}
360
361static int esr2_set_tx_cfg(struct niu *np, unsigned long channel, u32 val)
362{
363	int err;
364
365	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
366			 ESR2_TI_PLL_TX_CFG_L(channel),
367			 val & 0xffff);
368	if (!err)
369		err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
370				 ESR2_TI_PLL_TX_CFG_H(channel),
371				 val >> 16);
372	return err;
373}
374
375static int esr2_set_rx_cfg(struct niu *np, unsigned long channel, u32 val)
376{
377	int err;
378
379	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
380			 ESR2_TI_PLL_RX_CFG_L(channel),
381			 val & 0xffff);
382	if (!err)
383		err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
384				 ESR2_TI_PLL_RX_CFG_H(channel),
385				 val >> 16);
386	return err;
387}
388
389/* Mode is always 10G fiber.  */
390static int serdes_init_niu_10g_fiber(struct niu *np)
391{
392	struct niu_link_config *lp = &np->link_config;
393	u32 tx_cfg, rx_cfg;
394	unsigned long i;
395
396	tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV);
397	rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
398		  PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
399		  PLL_RX_CFG_EQ_LP_ADAPTIVE);
400
401	if (lp->loopback_mode == LOOPBACK_PHY) {
402		u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
403
404		mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
405			   ESR2_TI_PLL_TEST_CFG_L, test_cfg);
406
407		tx_cfg |= PLL_TX_CFG_ENTEST;
408		rx_cfg |= PLL_RX_CFG_ENTEST;
409	}
410
411	/* Initialize all 4 lanes of the SERDES.  */
412	for (i = 0; i < 4; i++) {
413		int err = esr2_set_tx_cfg(np, i, tx_cfg);
414		if (err)
415			return err;
416	}
417
418	for (i = 0; i < 4; i++) {
419		int err = esr2_set_rx_cfg(np, i, rx_cfg);
420		if (err)
421			return err;
422	}
423
424	return 0;
425}
426
427static int serdes_init_niu_1g_serdes(struct niu *np)
428{
429	struct niu_link_config *lp = &np->link_config;
430	u16 pll_cfg, pll_sts;
431	int max_retry = 100;
432	u64 sig, mask, val;
433	u32 tx_cfg, rx_cfg;
434	unsigned long i;
435	int err;
436
437	tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV |
438		  PLL_TX_CFG_RATE_HALF);
439	rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
440		  PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
441		  PLL_RX_CFG_RATE_HALF);
442
443	if (np->port == 0)
444		rx_cfg |= PLL_RX_CFG_EQ_LP_ADAPTIVE;
445
446	if (lp->loopback_mode == LOOPBACK_PHY) {
447		u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
448
449		mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
450			   ESR2_TI_PLL_TEST_CFG_L, test_cfg);
451
452		tx_cfg |= PLL_TX_CFG_ENTEST;
453		rx_cfg |= PLL_RX_CFG_ENTEST;
454	}
455
456	/* Initialize PLL for 1G */
457	pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_8X);
458
459	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
460			 ESR2_TI_PLL_CFG_L, pll_cfg);
461	if (err) {
462		netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n",
463			   np->port, __func__);
464		return err;
465	}
466
467	pll_sts = PLL_CFG_ENPLL;
468
469	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
470			 ESR2_TI_PLL_STS_L, pll_sts);
471	if (err) {
472		netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n",
473			   np->port, __func__);
474		return err;
475	}
476
477	udelay(200);
478
479	/* Initialize all 4 lanes of the SERDES.  */
480	for (i = 0; i < 4; i++) {
481		err = esr2_set_tx_cfg(np, i, tx_cfg);
482		if (err)
483			return err;
484	}
485
486	for (i = 0; i < 4; i++) {
487		err = esr2_set_rx_cfg(np, i, rx_cfg);
488		if (err)
489			return err;
490	}
491
492	switch (np->port) {
493	case 0:
494		val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0);
495		mask = val;
496		break;
497
498	case 1:
499		val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1);
500		mask = val;
501		break;
502
503	default:
504		return -EINVAL;
505	}
506
507	while (max_retry--) {
508		sig = nr64(ESR_INT_SIGNALS);
509		if ((sig & mask) == val)
510			break;
511
512		mdelay(500);
513	}
514
515	if ((sig & mask) != val) {
516		netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
517			   np->port, (int)(sig & mask), (int)val);
518		return -ENODEV;
519	}
520
521	return 0;
522}
523
524static int serdes_init_niu_10g_serdes(struct niu *np)
525{
526	struct niu_link_config *lp = &np->link_config;
527	u32 tx_cfg, rx_cfg, pll_cfg, pll_sts;
528	int max_retry = 100;
529	u64 sig, mask, val;
530	unsigned long i;
531	int err;
532
533	tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV);
534	rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
535		  PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
536		  PLL_RX_CFG_EQ_LP_ADAPTIVE);
537
538	if (lp->loopback_mode == LOOPBACK_PHY) {
539		u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
540
541		mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
542			   ESR2_TI_PLL_TEST_CFG_L, test_cfg);
543
544		tx_cfg |= PLL_TX_CFG_ENTEST;
545		rx_cfg |= PLL_RX_CFG_ENTEST;
546	}
547
548	/* Initialize PLL for 10G */
549	pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_10X);
550
551	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
552			 ESR2_TI_PLL_CFG_L, pll_cfg & 0xffff);
553	if (err) {
554		netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n",
555			   np->port, __func__);
556		return err;
557	}
558
559	pll_sts = PLL_CFG_ENPLL;
560
561	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
562			 ESR2_TI_PLL_STS_L, pll_sts & 0xffff);
563	if (err) {
564		netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n",
565			   np->port, __func__);
566		return err;
567	}
568
569	udelay(200);
570
571	/* Initialize all 4 lanes of the SERDES.  */
572	for (i = 0; i < 4; i++) {
573		err = esr2_set_tx_cfg(np, i, tx_cfg);
574		if (err)
575			return err;
576	}
577
578	for (i = 0; i < 4; i++) {
579		err = esr2_set_rx_cfg(np, i, rx_cfg);
580		if (err)
581			return err;
582	}
583
584	/* check if serdes is ready */
585
586	switch (np->port) {
587	case 0:
588		mask = ESR_INT_SIGNALS_P0_BITS;
589		val = (ESR_INT_SRDY0_P0 |
590		       ESR_INT_DET0_P0 |
591		       ESR_INT_XSRDY_P0 |
592		       ESR_INT_XDP_P0_CH3 |
593		       ESR_INT_XDP_P0_CH2 |
594		       ESR_INT_XDP_P0_CH1 |
595		       ESR_INT_XDP_P0_CH0);
596		break;
597
598	case 1:
599		mask = ESR_INT_SIGNALS_P1_BITS;
600		val = (ESR_INT_SRDY0_P1 |
601		       ESR_INT_DET0_P1 |
602		       ESR_INT_XSRDY_P1 |
603		       ESR_INT_XDP_P1_CH3 |
604		       ESR_INT_XDP_P1_CH2 |
605		       ESR_INT_XDP_P1_CH1 |
606		       ESR_INT_XDP_P1_CH0);
607		break;
608
609	default:
610		return -EINVAL;
611	}
612
613	while (max_retry--) {
614		sig = nr64(ESR_INT_SIGNALS);
615		if ((sig & mask) == val)
616			break;
617
618		mdelay(500);
619	}
620
621	if ((sig & mask) != val) {
622		pr_info("NIU Port %u signal bits [%08x] are not [%08x] for 10G...trying 1G\n",
623			np->port, (int)(sig & mask), (int)val);
624
625		/* 10G failed, try initializing at 1G */
626		err = serdes_init_niu_1g_serdes(np);
627		if (!err) {
628			np->flags &= ~NIU_FLAGS_10G;
629			np->mac_xcvr = MAC_XCVR_PCS;
630		}  else {
631			netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n",
632				   np->port);
633			return -ENODEV;
634		}
635	}
636	return 0;
637}
638
639static int esr_read_rxtx_ctrl(struct niu *np, unsigned long chan, u32 *val)
640{
641	int err;
642
643	err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_CTRL_L(chan));
644	if (err >= 0) {
645		*val = (err & 0xffff);
646		err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
647				ESR_RXTX_CTRL_H(chan));
648		if (err >= 0)
649			*val |= ((err & 0xffff) << 16);
650		err = 0;
651	}
652	return err;
653}
654
655static int esr_read_glue0(struct niu *np, unsigned long chan, u32 *val)
656{
657	int err;
658
659	err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
660			ESR_GLUE_CTRL0_L(chan));
661	if (err >= 0) {
662		*val = (err & 0xffff);
663		err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
664				ESR_GLUE_CTRL0_H(chan));
665		if (err >= 0) {
666			*val |= ((err & 0xffff) << 16);
667			err = 0;
668		}
669	}
670	return err;
671}
672
673static int esr_read_reset(struct niu *np, u32 *val)
674{
675	int err;
676
677	err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
678			ESR_RXTX_RESET_CTRL_L);
679	if (err >= 0) {
680		*val = (err & 0xffff);
681		err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
682				ESR_RXTX_RESET_CTRL_H);
683		if (err >= 0) {
684			*val |= ((err & 0xffff) << 16);
685			err = 0;
686		}
687	}
688	return err;
689}
690
691static int esr_write_rxtx_ctrl(struct niu *np, unsigned long chan, u32 val)
692{
693	int err;
694
695	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
696			 ESR_RXTX_CTRL_L(chan), val & 0xffff);
697	if (!err)
698		err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
699				 ESR_RXTX_CTRL_H(chan), (val >> 16));
700	return err;
701}
702
703static int esr_write_glue0(struct niu *np, unsigned long chan, u32 val)
704{
705	int err;
706
707	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
708			ESR_GLUE_CTRL0_L(chan), val & 0xffff);
709	if (!err)
710		err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
711				 ESR_GLUE_CTRL0_H(chan), (val >> 16));
712	return err;
713}
714
715static int esr_reset(struct niu *np)
716{
717	u32 reset;
718	int err;
719
720	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
721			 ESR_RXTX_RESET_CTRL_L, 0x0000);
722	if (err)
723		return err;
724	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
725			 ESR_RXTX_RESET_CTRL_H, 0xffff);
726	if (err)
727		return err;
728	udelay(200);
729
730	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
731			 ESR_RXTX_RESET_CTRL_L, 0xffff);
732	if (err)
733		return err;
734	udelay(200);
735
736	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
737			 ESR_RXTX_RESET_CTRL_H, 0x0000);
738	if (err)
739		return err;
740	udelay(200);
741
742	err = esr_read_reset(np, &reset);
743	if (err)
744		return err;
745	if (reset != 0) {
746		netdev_err(np->dev, "Port %u ESR_RESET did not clear [%08x]\n",
747			   np->port, reset);
748		return -ENODEV;
749	}
750
751	return 0;
752}
753
754static int serdes_init_10g(struct niu *np)
755{
756	struct niu_link_config *lp = &np->link_config;
757	unsigned long ctrl_reg, test_cfg_reg, i;
758	u64 ctrl_val, test_cfg_val, sig, mask, val;
759	int err;
760
761	switch (np->port) {
762	case 0:
763		ctrl_reg = ENET_SERDES_0_CTRL_CFG;
764		test_cfg_reg = ENET_SERDES_0_TEST_CFG;
765		break;
766	case 1:
767		ctrl_reg = ENET_SERDES_1_CTRL_CFG;
768		test_cfg_reg = ENET_SERDES_1_TEST_CFG;
769		break;
770
771	default:
772		return -EINVAL;
773	}
774	ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
775		    ENET_SERDES_CTRL_SDET_1 |
776		    ENET_SERDES_CTRL_SDET_2 |
777		    ENET_SERDES_CTRL_SDET_3 |
778		    (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
779		    (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
780		    (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
781		    (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
782		    (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
783		    (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
784		    (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
785		    (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
786	test_cfg_val = 0;
787
788	if (lp->loopback_mode == LOOPBACK_PHY) {
789		test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
790				  ENET_SERDES_TEST_MD_0_SHIFT) |
791				 (ENET_TEST_MD_PAD_LOOPBACK <<
792				  ENET_SERDES_TEST_MD_1_SHIFT) |
793				 (ENET_TEST_MD_PAD_LOOPBACK <<
794				  ENET_SERDES_TEST_MD_2_SHIFT) |
795				 (ENET_TEST_MD_PAD_LOOPBACK <<
796				  ENET_SERDES_TEST_MD_3_SHIFT));
797	}
798
799	nw64(ctrl_reg, ctrl_val);
800	nw64(test_cfg_reg, test_cfg_val);
801
802	/* Initialize all 4 lanes of the SERDES.  */
803	for (i = 0; i < 4; i++) {
804		u32 rxtx_ctrl, glue0;
805
806		err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
807		if (err)
808			return err;
809		err = esr_read_glue0(np, i, &glue0);
810		if (err)
811			return err;
812
813		rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
814		rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
815			      (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
816
817		glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
818			   ESR_GLUE_CTRL0_THCNT |
819			   ESR_GLUE_CTRL0_BLTIME);
820		glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
821			  (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
822			  (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
823			  (BLTIME_300_CYCLES <<
824			   ESR_GLUE_CTRL0_BLTIME_SHIFT));
825
826		err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
827		if (err)
828			return err;
829		err = esr_write_glue0(np, i, glue0);
830		if (err)
831			return err;
832	}
833
834	err = esr_reset(np);
835	if (err)
836		return err;
837
838	sig = nr64(ESR_INT_SIGNALS);
839	switch (np->port) {
840	case 0:
841		mask = ESR_INT_SIGNALS_P0_BITS;
842		val = (ESR_INT_SRDY0_P0 |
843		       ESR_INT_DET0_P0 |
844		       ESR_INT_XSRDY_P0 |
845		       ESR_INT_XDP_P0_CH3 |
846		       ESR_INT_XDP_P0_CH2 |
847		       ESR_INT_XDP_P0_CH1 |
848		       ESR_INT_XDP_P0_CH0);
849		break;
850
851	case 1:
852		mask = ESR_INT_SIGNALS_P1_BITS;
853		val = (ESR_INT_SRDY0_P1 |
854		       ESR_INT_DET0_P1 |
855		       ESR_INT_XSRDY_P1 |
856		       ESR_INT_XDP_P1_CH3 |
857		       ESR_INT_XDP_P1_CH2 |
858		       ESR_INT_XDP_P1_CH1 |
859		       ESR_INT_XDP_P1_CH0);
860		break;
861
862	default:
863		return -EINVAL;
864	}
865
866	if ((sig & mask) != val) {
867		if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
868			np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
869			return 0;
870		}
871		netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
872			   np->port, (int)(sig & mask), (int)val);
873		return -ENODEV;
874	}
875	if (np->flags & NIU_FLAGS_HOTPLUG_PHY)
876		np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT;
877	return 0;
878}
879
880static int serdes_init_1g(struct niu *np)
881{
882	u64 val;
883
884	val = nr64(ENET_SERDES_1_PLL_CFG);
885	val &= ~ENET_SERDES_PLL_FBDIV2;
886	switch (np->port) {
887	case 0:
888		val |= ENET_SERDES_PLL_HRATE0;
889		break;
890	case 1:
891		val |= ENET_SERDES_PLL_HRATE1;
892		break;
893	case 2:
894		val |= ENET_SERDES_PLL_HRATE2;
895		break;
896	case 3:
897		val |= ENET_SERDES_PLL_HRATE3;
898		break;
899	default:
900		return -EINVAL;
901	}
902	nw64(ENET_SERDES_1_PLL_CFG, val);
903
904	return 0;
905}
906
907static int serdes_init_1g_serdes(struct niu *np)
908{
909	struct niu_link_config *lp = &np->link_config;
910	unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i;
911	u64 ctrl_val, test_cfg_val, sig, mask, val;
912	int err;
913	u64 reset_val, val_rd;
914
915	val = ENET_SERDES_PLL_HRATE0 | ENET_SERDES_PLL_HRATE1 |
916		ENET_SERDES_PLL_HRATE2 | ENET_SERDES_PLL_HRATE3 |
917		ENET_SERDES_PLL_FBDIV0;
918	switch (np->port) {
919	case 0:
920		reset_val =  ENET_SERDES_RESET_0;
921		ctrl_reg = ENET_SERDES_0_CTRL_CFG;
922		test_cfg_reg = ENET_SERDES_0_TEST_CFG;
923		pll_cfg = ENET_SERDES_0_PLL_CFG;
924		break;
925	case 1:
926		reset_val =  ENET_SERDES_RESET_1;
927		ctrl_reg = ENET_SERDES_1_CTRL_CFG;
928		test_cfg_reg = ENET_SERDES_1_TEST_CFG;
929		pll_cfg = ENET_SERDES_1_PLL_CFG;
930		break;
931
932	default:
933		return -EINVAL;
934	}
935	ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
936		    ENET_SERDES_CTRL_SDET_1 |
937		    ENET_SERDES_CTRL_SDET_2 |
938		    ENET_SERDES_CTRL_SDET_3 |
939		    (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
940		    (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
941		    (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
942		    (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
943		    (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
944		    (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
945		    (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
946		    (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
947	test_cfg_val = 0;
948
949	if (lp->loopback_mode == LOOPBACK_PHY) {
950		test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
951				  ENET_SERDES_TEST_MD_0_SHIFT) |
952				 (ENET_TEST_MD_PAD_LOOPBACK <<
953				  ENET_SERDES_TEST_MD_1_SHIFT) |
954				 (ENET_TEST_MD_PAD_LOOPBACK <<
955				  ENET_SERDES_TEST_MD_2_SHIFT) |
956				 (ENET_TEST_MD_PAD_LOOPBACK <<
957				  ENET_SERDES_TEST_MD_3_SHIFT));
958	}
959
960	nw64(ENET_SERDES_RESET, reset_val);
961	mdelay(20);
962	val_rd = nr64(ENET_SERDES_RESET);
963	val_rd &= ~reset_val;
964	nw64(pll_cfg, val);
965	nw64(ctrl_reg, ctrl_val);
966	nw64(test_cfg_reg, test_cfg_val);
967	nw64(ENET_SERDES_RESET, val_rd);
968	mdelay(2000);
969
970	/* Initialize all 4 lanes of the SERDES.  */
971	for (i = 0; i < 4; i++) {
972		u32 rxtx_ctrl, glue0;
973
974		err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
975		if (err)
976			return err;
977		err = esr_read_glue0(np, i, &glue0);
978		if (err)
979			return err;
980
981		rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
982		rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
983			      (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
984
985		glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
986			   ESR_GLUE_CTRL0_THCNT |
987			   ESR_GLUE_CTRL0_BLTIME);
988		glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
989			  (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
990			  (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
991			  (BLTIME_300_CYCLES <<
992			   ESR_GLUE_CTRL0_BLTIME_SHIFT));
993
994		err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
995		if (err)
996			return err;
997		err = esr_write_glue0(np, i, glue0);
998		if (err)
999			return err;
1000	}
1001
1002
1003	sig = nr64(ESR_INT_SIGNALS);
1004	switch (np->port) {
1005	case 0:
1006		val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0);
1007		mask = val;
1008		break;
1009
1010	case 1:
1011		val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1);
1012		mask = val;
1013		break;
1014
1015	default:
1016		return -EINVAL;
1017	}
1018
1019	if ((sig & mask) != val) {
1020		netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
1021			   np->port, (int)(sig & mask), (int)val);
1022		return -ENODEV;
1023	}
1024
1025	return 0;
1026}
1027
1028static int link_status_1g_serdes(struct niu *np, int *link_up_p)
1029{
1030	struct niu_link_config *lp = &np->link_config;
1031	int link_up;
1032	u64 val;
1033	u16 current_speed;
1034	unsigned long flags;
1035	u8 current_duplex;
1036
1037	link_up = 0;
1038	current_speed = SPEED_INVALID;
1039	current_duplex = DUPLEX_INVALID;
1040
1041	spin_lock_irqsave(&np->lock, flags);
1042
1043	val = nr64_pcs(PCS_MII_STAT);
1044
1045	if (val & PCS_MII_STAT_LINK_STATUS) {
1046		link_up = 1;
1047		current_speed = SPEED_1000;
1048		current_duplex = DUPLEX_FULL;
1049	}
1050
1051	lp->active_speed = current_speed;
1052	lp->active_duplex = current_duplex;
1053	spin_unlock_irqrestore(&np->lock, flags);
1054
1055	*link_up_p = link_up;
1056	return 0;
1057}
1058
1059static int link_status_10g_serdes(struct niu *np, int *link_up_p)
1060{
1061	unsigned long flags;
1062	struct niu_link_config *lp = &np->link_config;
1063	int link_up = 0;
1064	int link_ok = 1;
1065	u64 val, val2;
1066	u16 current_speed;
1067	u8 current_duplex;
1068
1069	if (!(np->flags & NIU_FLAGS_10G))
1070		return link_status_1g_serdes(np, link_up_p);
1071
1072	current_speed = SPEED_INVALID;
1073	current_duplex = DUPLEX_INVALID;
1074	spin_lock_irqsave(&np->lock, flags);
1075
1076	val = nr64_xpcs(XPCS_STATUS(0));
1077	val2 = nr64_mac(XMAC_INTER2);
1078	if (val2 & 0x01000000)
1079		link_ok = 0;
1080
1081	if ((val & 0x1000ULL) && link_ok) {
1082		link_up = 1;
1083		current_speed = SPEED_10000;
1084		current_duplex = DUPLEX_FULL;
1085	}
1086	lp->active_speed = current_speed;
1087	lp->active_duplex = current_duplex;
1088	spin_unlock_irqrestore(&np->lock, flags);
1089	*link_up_p = link_up;
1090	return 0;
1091}
1092
1093static int link_status_mii(struct niu *np, int *link_up_p)
1094{
1095	struct niu_link_config *lp = &np->link_config;
1096	int err;
1097	int bmsr, advert, ctrl1000, stat1000, lpa, bmcr, estatus;
1098	int supported, advertising, active_speed, active_duplex;
1099
1100	err = mii_read(np, np->phy_addr, MII_BMCR);
1101	if (unlikely(err < 0))
1102		return err;
1103	bmcr = err;
1104
1105	err = mii_read(np, np->phy_addr, MII_BMSR);
1106	if (unlikely(err < 0))
1107		return err;
1108	bmsr = err;
1109
1110	err = mii_read(np, np->phy_addr, MII_ADVERTISE);
1111	if (unlikely(err < 0))
1112		return err;
1113	advert = err;
1114
1115	err = mii_read(np, np->phy_addr, MII_LPA);
1116	if (unlikely(err < 0))
1117		return err;
1118	lpa = err;
1119
1120	if (likely(bmsr & BMSR_ESTATEN)) {
1121		err = mii_read(np, np->phy_addr, MII_ESTATUS);
1122		if (unlikely(err < 0))
1123			return err;
1124		estatus = err;
1125
1126		err = mii_read(np, np->phy_addr, MII_CTRL1000);
1127		if (unlikely(err < 0))
1128			return err;
1129		ctrl1000 = err;
1130
1131		err = mii_read(np, np->phy_addr, MII_STAT1000);
1132		if (unlikely(err < 0))
1133			return err;
1134		stat1000 = err;
1135	} else
1136		estatus = ctrl1000 = stat1000 = 0;
1137
1138	supported = 0;
1139	if (bmsr & BMSR_ANEGCAPABLE)
1140		supported |= SUPPORTED_Autoneg;
1141	if (bmsr & BMSR_10HALF)
1142		supported |= SUPPORTED_10baseT_Half;
1143	if (bmsr & BMSR_10FULL)
1144		supported |= SUPPORTED_10baseT_Full;
1145	if (bmsr & BMSR_100HALF)
1146		supported |= SUPPORTED_100baseT_Half;
1147	if (bmsr & BMSR_100FULL)
1148		supported |= SUPPORTED_100baseT_Full;
1149	if (estatus & ESTATUS_1000_THALF)
1150		supported |= SUPPORTED_1000baseT_Half;
1151	if (estatus & ESTATUS_1000_TFULL)
1152		supported |= SUPPORTED_1000baseT_Full;
1153	lp->supported = supported;
1154
1155	advertising = mii_adv_to_ethtool_adv_t(advert);
1156	advertising |= mii_ctrl1000_to_ethtool_adv_t(ctrl1000);
1157
1158	if (bmcr & BMCR_ANENABLE) {
1159		int neg, neg1000;
1160
1161		lp->active_autoneg = 1;
1162		advertising |= ADVERTISED_Autoneg;
1163
1164		neg = advert & lpa;
1165		neg1000 = (ctrl1000 << 2) & stat1000;
1166
1167		if (neg1000 & (LPA_1000FULL | LPA_1000HALF))
1168			active_speed = SPEED_1000;
1169		else if (neg & LPA_100)
1170			active_speed = SPEED_100;
1171		else if (neg & (LPA_10HALF | LPA_10FULL))
1172			active_speed = SPEED_10;
1173		else
1174			active_speed = SPEED_INVALID;
1175
1176		if ((neg1000 & LPA_1000FULL) || (neg & LPA_DUPLEX))
1177			active_duplex = DUPLEX_FULL;
1178		else if (active_speed != SPEED_INVALID)
1179			active_duplex = DUPLEX_HALF;
1180		else
1181			active_duplex = DUPLEX_INVALID;
1182	} else {
1183		lp->active_autoneg = 0;
1184
1185		if ((bmcr & BMCR_SPEED1000) && !(bmcr & BMCR_SPEED100))
1186			active_speed = SPEED_1000;
1187		else if (bmcr & BMCR_SPEED100)
1188			active_speed = SPEED_100;
1189		else
1190			active_speed = SPEED_10;
1191
1192		if (bmcr & BMCR_FULLDPLX)
1193			active_duplex = DUPLEX_FULL;
1194		else
1195			active_duplex = DUPLEX_HALF;
1196	}
1197
1198	lp->active_advertising = advertising;
1199	lp->active_speed = active_speed;
1200	lp->active_duplex = active_duplex;
1201	*link_up_p = !!(bmsr & BMSR_LSTATUS);
1202
1203	return 0;
1204}
1205
1206static int link_status_1g_rgmii(struct niu *np, int *link_up_p)
1207{
1208	struct niu_link_config *lp = &np->link_config;
1209	u16 current_speed, bmsr;
1210	unsigned long flags;
1211	u8 current_duplex;
1212	int err, link_up;
1213
1214	link_up = 0;
1215	current_speed = SPEED_INVALID;
1216	current_duplex = DUPLEX_INVALID;
1217
1218	spin_lock_irqsave(&np->lock, flags);
1219
1220	err = mii_read(np, np->phy_addr, MII_BMSR);
1221	if (err < 0)
1222		goto out;
1223
1224	bmsr = err;
1225	if (bmsr & BMSR_LSTATUS) {
1226		link_up = 1;
1227		current_speed = SPEED_1000;
1228		current_duplex = DUPLEX_FULL;
1229	}
1230	lp->active_speed = current_speed;
1231	lp->active_duplex = current_duplex;
1232	err = 0;
1233
1234out:
1235	spin_unlock_irqrestore(&np->lock, flags);
1236
1237	*link_up_p = link_up;
1238	return err;
1239}
1240
1241static int link_status_1g(struct niu *np, int *link_up_p)
1242{
1243	struct niu_link_config *lp = &np->link_config;
1244	unsigned long flags;
1245	int err;
1246
1247	spin_lock_irqsave(&np->lock, flags);
1248
1249	err = link_status_mii(np, link_up_p);
1250	lp->supported |= SUPPORTED_TP;
1251	lp->active_advertising |= ADVERTISED_TP;
1252
1253	spin_unlock_irqrestore(&np->lock, flags);
1254	return err;
1255}
1256
1257static int bcm8704_reset(struct niu *np)
1258{
1259	int err, limit;
1260
1261	err = mdio_read(np, np->phy_addr,
1262			BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
1263	if (err < 0 || err == 0xffff)
1264		return err;
1265	err |= BMCR_RESET;
1266	err = mdio_write(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
1267			 MII_BMCR, err);
1268	if (err)
1269		return err;
1270
1271	limit = 1000;
1272	while (--limit >= 0) {
1273		err = mdio_read(np, np->phy_addr,
1274				BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
1275		if (err < 0)
1276			return err;
1277		if (!(err & BMCR_RESET))
1278			break;
1279	}
1280	if (limit < 0) {
1281		netdev_err(np->dev, "Port %u PHY will not reset (bmcr=%04x)\n",
1282			   np->port, (err & 0xffff));
1283		return -ENODEV;
1284	}
1285	return 0;
1286}
1287
1288/* When written, certain PHY registers need to be read back twice
1289 * in order for the bits to settle properly.
1290 */
1291static int bcm8704_user_dev3_readback(struct niu *np, int reg)
1292{
1293	int err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg);
1294	if (err < 0)
1295		return err;
1296	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg);
1297	if (err < 0)
1298		return err;
1299	return 0;
1300}
1301
1302static int bcm8706_init_user_dev3(struct niu *np)
1303{
1304	int err;
1305
1306
1307	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1308			BCM8704_USER_OPT_DIGITAL_CTRL);
1309	if (err < 0)
1310		return err;
1311	err &= ~USER_ODIG_CTRL_GPIOS;
1312	err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT);
1313	err |=  USER_ODIG_CTRL_RESV2;
1314	err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1315			 BCM8704_USER_OPT_DIGITAL_CTRL, err);
1316	if (err)
1317		return err;
1318
1319	mdelay(1000);
1320
1321	return 0;
1322}
1323
1324static int bcm8704_init_user_dev3(struct niu *np)
1325{
1326	int err;
1327
1328	err = mdio_write(np, np->phy_addr,
1329			 BCM8704_USER_DEV3_ADDR, BCM8704_USER_CONTROL,
1330			 (USER_CONTROL_OPTXRST_LVL |
1331			  USER_CONTROL_OPBIASFLT_LVL |
1332			  USER_CONTROL_OBTMPFLT_LVL |
1333			  USER_CONTROL_OPPRFLT_LVL |
1334			  USER_CONTROL_OPTXFLT_LVL |
1335			  USER_CONTROL_OPRXLOS_LVL |
1336			  USER_CONTROL_OPRXFLT_LVL |
1337			  USER_CONTROL_OPTXON_LVL |
1338			  (0x3f << USER_CONTROL_RES1_SHIFT)));
1339	if (err)
1340		return err;
1341
1342	err = mdio_write(np, np->phy_addr,
1343			 BCM8704_USER_DEV3_ADDR, BCM8704_USER_PMD_TX_CONTROL,
1344			 (USER_PMD_TX_CTL_XFP_CLKEN |
1345			  (1 << USER_PMD_TX_CTL_TX_DAC_TXD_SH) |
1346			  (2 << USER_PMD_TX_CTL_TX_DAC_TXCK_SH) |
1347			  USER_PMD_TX_CTL_TSCK_LPWREN));
1348	if (err)
1349		return err;
1350
1351	err = bcm8704_user_dev3_readback(np, BCM8704_USER_CONTROL);
1352	if (err)
1353		return err;
1354	err = bcm8704_user_dev3_readback(np, BCM8704_USER_PMD_TX_CONTROL);
1355	if (err)
1356		return err;
1357
1358	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1359			BCM8704_USER_OPT_DIGITAL_CTRL);
1360	if (err < 0)
1361		return err;
1362	err &= ~USER_ODIG_CTRL_GPIOS;
1363	err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT);
1364	err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1365			 BCM8704_USER_OPT_DIGITAL_CTRL, err);
1366	if (err)
1367		return err;
1368
1369	mdelay(1000);
1370
1371	return 0;
1372}
1373
1374static int mrvl88x2011_act_led(struct niu *np, int val)
1375{
1376	int	err;
1377
1378	err  = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
1379		MRVL88X2011_LED_8_TO_11_CTL);
1380	if (err < 0)
1381		return err;
1382
1383	err &= ~MRVL88X2011_LED(MRVL88X2011_LED_ACT,MRVL88X2011_LED_CTL_MASK);
1384	err |=  MRVL88X2011_LED(MRVL88X2011_LED_ACT,val);
1385
1386	return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
1387			  MRVL88X2011_LED_8_TO_11_CTL, err);
1388}
1389
1390static int mrvl88x2011_led_blink_rate(struct niu *np, int rate)
1391{
1392	int	err;
1393
1394	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
1395			MRVL88X2011_LED_BLINK_CTL);
1396	if (err >= 0) {
1397		err &= ~MRVL88X2011_LED_BLKRATE_MASK;
1398		err |= (rate << 4);
1399
1400		err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
1401				 MRVL88X2011_LED_BLINK_CTL, err);
1402	}
1403
1404	return err;
1405}
1406
1407static int xcvr_init_10g_mrvl88x2011(struct niu *np)
1408{
1409	int	err;
1410
1411	/* Set LED functions */
1412	err = mrvl88x2011_led_blink_rate(np, MRVL88X2011_LED_BLKRATE_134MS);
1413	if (err)
1414		return err;
1415
1416	/* led activity */
1417	err = mrvl88x2011_act_led(np, MRVL88X2011_LED_CTL_OFF);
1418	if (err)
1419		return err;
1420
1421	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
1422			MRVL88X2011_GENERAL_CTL);
1423	if (err < 0)
1424		return err;
1425
1426	err |= MRVL88X2011_ENA_XFPREFCLK;
1427
1428	err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
1429			 MRVL88X2011_GENERAL_CTL, err);
1430	if (err < 0)
1431		return err;
1432
1433	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1434			MRVL88X2011_PMA_PMD_CTL_1);
1435	if (err < 0)
1436		return err;
1437
1438	if (np->link_config.loopback_mode == LOOPBACK_MAC)
1439		err |= MRVL88X2011_LOOPBACK;
1440	else
1441		err &= ~MRVL88X2011_LOOPBACK;
1442
1443	err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1444			 MRVL88X2011_PMA_PMD_CTL_1, err);
1445	if (err < 0)
1446		return err;
1447
1448	/* Enable PMD  */
1449	return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1450			  MRVL88X2011_10G_PMD_TX_DIS, MRVL88X2011_ENA_PMDTX);
1451}
1452
1453
1454static int xcvr_diag_bcm870x(struct niu *np)
1455{
1456	u16 analog_stat0, tx_alarm_status;
1457	int err = 0;
1458
1459#if 1
1460	err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
1461			MII_STAT1000);
1462	if (err < 0)
1463		return err;
1464	pr_info("Port %u PMA_PMD(MII_STAT1000) [%04x]\n", np->port, err);
1465
1466	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 0x20);
1467	if (err < 0)
1468		return err;
1469	pr_info("Port %u USER_DEV3(0x20) [%04x]\n", np->port, err);
1470
1471	err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
1472			MII_NWAYTEST);
1473	if (err < 0)
1474		return err;
1475	pr_info("Port %u PHYXS(MII_NWAYTEST) [%04x]\n", np->port, err);
1476#endif
1477
1478	/* XXX dig this out it might not be so useful XXX */
1479	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1480			BCM8704_USER_ANALOG_STATUS0);
1481	if (err < 0)
1482		return err;
1483	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1484			BCM8704_USER_ANALOG_STATUS0);
1485	if (err < 0)
1486		return err;
1487	analog_stat0 = err;
1488
1489	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1490			BCM8704_USER_TX_ALARM_STATUS);
1491	if (err < 0)
1492		return err;
1493	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1494			BCM8704_USER_TX_ALARM_STATUS);
1495	if (err < 0)
1496		return err;
1497	tx_alarm_status = err;
1498
1499	if (analog_stat0 != 0x03fc) {
1500		if ((analog_stat0 == 0x43bc) && (tx_alarm_status != 0)) {
1501			pr_info("Port %u cable not connected or bad cable\n",
1502				np->port);
1503		} else if (analog_stat0 == 0x639c) {
1504			pr_info("Port %u optical module is bad or missing\n",
1505				np->port);
1506		}
1507	}
1508
1509	return 0;
1510}
1511
1512static int xcvr_10g_set_lb_bcm870x(struct niu *np)
1513{
1514	struct niu_link_config *lp = &np->link_config;
1515	int err;
1516
1517	err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
1518			MII_BMCR);
1519	if (err < 0)
1520		return err;
1521
1522	err &= ~BMCR_LOOPBACK;
1523
1524	if (lp->loopback_mode == LOOPBACK_MAC)
1525		err |= BMCR_LOOPBACK;
1526
1527	err = mdio_write(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
1528			 MII_BMCR, err);
1529	if (err)
1530		return err;
1531
1532	return 0;
1533}
1534
1535static int xcvr_init_10g_bcm8706(struct niu *np)
1536{
1537	int err = 0;
1538	u64 val;
1539
1540	if ((np->flags & NIU_FLAGS_HOTPLUG_PHY) &&
1541	    (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) == 0)
1542			return err;
1543
1544	val = nr64_mac(XMAC_CONFIG);
1545	val &= ~XMAC_CONFIG_LED_POLARITY;
1546	val |= XMAC_CONFIG_FORCE_LED_ON;
1547	nw64_mac(XMAC_CONFIG, val);
1548
1549	val = nr64(MIF_CONFIG);
1550	val |= MIF_CONFIG_INDIRECT_MODE;
1551	nw64(MIF_CONFIG, val);
1552
1553	err = bcm8704_reset(np);
1554	if (err)
1555		return err;
1556
1557	err = xcvr_10g_set_lb_bcm870x(np);
1558	if (err)
1559		return err;
1560
1561	err = bcm8706_init_user_dev3(np);
1562	if (err)
1563		return err;
1564
1565	err = xcvr_diag_bcm870x(np);
1566	if (err)
1567		return err;
1568
1569	return 0;
1570}
1571
1572static int xcvr_init_10g_bcm8704(struct niu *np)
1573{
1574	int err;
1575
1576	err = bcm8704_reset(np);
1577	if (err)
1578		return err;
1579
1580	err = bcm8704_init_user_dev3(np);
1581	if (err)
1582		return err;
1583
1584	err = xcvr_10g_set_lb_bcm870x(np);
1585	if (err)
1586		return err;
1587
1588	err =  xcvr_diag_bcm870x(np);
1589	if (err)
1590		return err;
1591
1592	return 0;
1593}
1594
1595static int xcvr_init_10g(struct niu *np)
1596{
1597	int phy_id, err;
1598	u64 val;
1599
1600	val = nr64_mac(XMAC_CONFIG);
1601	val &= ~XMAC_CONFIG_LED_POLARITY;
1602	val |= XMAC_CONFIG_FORCE_LED_ON;
1603	nw64_mac(XMAC_CONFIG, val);
1604
1605	/* XXX shared resource, lock parent XXX */
1606	val = nr64(MIF_CONFIG);
1607	val |= MIF_CONFIG_INDIRECT_MODE;
1608	nw64(MIF_CONFIG, val);
1609
1610	phy_id = phy_decode(np->parent->port_phy, np->port);
1611	phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port];
1612
1613	/* handle different phy types */
1614	switch (phy_id & NIU_PHY_ID_MASK) {
1615	case NIU_PHY_ID_MRVL88X2011:
1616		err = xcvr_init_10g_mrvl88x2011(np);
1617		break;
1618
1619	default: /* bcom 8704 */
1620		err = xcvr_init_10g_bcm8704(np);
1621		break;
1622	}
1623
1624	return err;
1625}
1626
1627static int mii_reset(struct niu *np)
1628{
1629	int limit, err;
1630
1631	err = mii_write(np, np->phy_addr, MII_BMCR, BMCR_RESET);
1632	if (err)
1633		return err;
1634
1635	limit = 1000;
1636	while (--limit >= 0) {
1637		udelay(500);
1638		err = mii_read(np, np->phy_addr, MII_BMCR);
1639		if (err < 0)
1640			return err;
1641		if (!(err & BMCR_RESET))
1642			break;
1643	}
1644	if (limit < 0) {
1645		netdev_err(np->dev, "Port %u MII would not reset, bmcr[%04x]\n",
1646			   np->port, err);
1647		return -ENODEV;
1648	}
1649
1650	return 0;
1651}
1652
1653static int xcvr_init_1g_rgmii(struct niu *np)
1654{
1655	int err;
1656	u64 val;
1657	u16 bmcr, bmsr, estat;
1658
1659	val = nr64(MIF_CONFIG);
1660	val &= ~MIF_CONFIG_INDIRECT_MODE;
1661	nw64(MIF_CONFIG, val);
1662
1663	err = mii_reset(np);
1664	if (err)
1665		return err;
1666
1667	err = mii_read(np, np->phy_addr, MII_BMSR);
1668	if (err < 0)
1669		return err;
1670	bmsr = err;
1671
1672	estat = 0;
1673	if (bmsr & BMSR_ESTATEN) {
1674		err = mii_read(np, np->phy_addr, MII_ESTATUS);
1675		if (err < 0)
1676			return err;
1677		estat = err;
1678	}
1679
1680	bmcr = 0;
1681	err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
1682	if (err)
1683		return err;
1684
1685	if (bmsr & BMSR_ESTATEN) {
1686		u16 ctrl1000 = 0;
1687
1688		if (estat & ESTATUS_1000_TFULL)
1689			ctrl1000 |= ADVERTISE_1000FULL;
1690		err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000);
1691		if (err)
1692			return err;
1693	}
1694
1695	bmcr = (BMCR_SPEED1000 | BMCR_FULLDPLX);
1696
1697	err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
1698	if (err)
1699		return err;
1700
1701	err = mii_read(np, np->phy_addr, MII_BMCR);
1702	if (err < 0)
1703		return err;
1704	bmcr = mii_read(np, np->phy_addr, MII_BMCR);
1705
1706	err = mii_read(np, np->phy_addr, MII_BMSR);
1707	if (err < 0)
1708		return err;
1709
1710	return 0;
1711}
1712
1713static int mii_init_common(struct niu *np)
1714{
1715	struct niu_link_config *lp = &np->link_config;
1716	u16 bmcr, bmsr, adv, estat;
1717	int err;
1718
1719	err = mii_reset(np);
1720	if (err)
1721		return err;
1722
1723	err = mii_read(np, np->phy_addr, MII_BMSR);
1724	if (err < 0)
1725		return err;
1726	bmsr = err;
1727
1728	estat = 0;
1729	if (bmsr & BMSR_ESTATEN) {
1730		err = mii_read(np, np->phy_addr, MII_ESTATUS);
1731		if (err < 0)
1732			return err;
1733		estat = err;
1734	}
1735
1736	bmcr = 0;
1737	err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
1738	if (err)
1739		return err;
1740
1741	if (lp->loopback_mode == LOOPBACK_MAC) {
1742		bmcr |= BMCR_LOOPBACK;
1743		if (lp->active_speed == SPEED_1000)
1744			bmcr |= BMCR_SPEED1000;
1745		if (lp->active_duplex == DUPLEX_FULL)
1746			bmcr |= BMCR_FULLDPLX;
1747	}
1748
1749	if (lp->loopback_mode == LOOPBACK_PHY) {
1750		u16 aux;
1751
1752		aux = (BCM5464R_AUX_CTL_EXT_LB |
1753		       BCM5464R_AUX_CTL_WRITE_1);
1754		err = mii_write(np, np->phy_addr, BCM5464R_AUX_CTL, aux);
1755		if (err)
1756			return err;
1757	}
1758
1759	if (lp->autoneg) {
1760		u16 ctrl1000;
1761
1762		adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1763		if ((bmsr & BMSR_10HALF) &&
1764			(lp->advertising & ADVERTISED_10baseT_Half))
1765			adv |= ADVERTISE_10HALF;
1766		if ((bmsr & BMSR_10FULL) &&
1767			(lp->advertising & ADVERTISED_10baseT_Full))
1768			adv |= ADVERTISE_10FULL;
1769		if ((bmsr & BMSR_100HALF) &&
1770			(lp->advertising & ADVERTISED_100baseT_Half))
1771			adv |= ADVERTISE_100HALF;
1772		if ((bmsr & BMSR_100FULL) &&
1773			(lp->advertising & ADVERTISED_100baseT_Full))
1774			adv |= ADVERTISE_100FULL;
1775		err = mii_write(np, np->phy_addr, MII_ADVERTISE, adv);
1776		if (err)
1777			return err;
1778
1779		if (likely(bmsr & BMSR_ESTATEN)) {
1780			ctrl1000 = 0;
1781			if ((estat & ESTATUS_1000_THALF) &&
1782				(lp->advertising & ADVERTISED_1000baseT_Half))
1783				ctrl1000 |= ADVERTISE_1000HALF;
1784			if ((estat & ESTATUS_1000_TFULL) &&
1785				(lp->advertising & ADVERTISED_1000baseT_Full))
1786				ctrl1000 |= ADVERTISE_1000FULL;
1787			err = mii_write(np, np->phy_addr,
1788					MII_CTRL1000, ctrl1000);
1789			if (err)
1790				return err;
1791		}
1792
1793		bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
1794	} else {
1795		/* !lp->autoneg */
1796		int fulldpx;
1797
1798		if (lp->duplex == DUPLEX_FULL) {
1799			bmcr |= BMCR_FULLDPLX;
1800			fulldpx = 1;
1801		} else if (lp->duplex == DUPLEX_HALF)
1802			fulldpx = 0;
1803		else
1804			return -EINVAL;
1805
1806		if (lp->speed == SPEED_1000) {
1807			/* if X-full requested while not supported, or
1808			   X-half requested while not supported... */
1809			if ((fulldpx && !(estat & ESTATUS_1000_TFULL)) ||
1810				(!fulldpx && !(estat & ESTATUS_1000_THALF)))
1811				return -EINVAL;
1812			bmcr |= BMCR_SPEED1000;
1813		} else if (lp->speed == SPEED_100) {
1814			if ((fulldpx && !(bmsr & BMSR_100FULL)) ||
1815				(!fulldpx && !(bmsr & BMSR_100HALF)))
1816				return -EINVAL;
1817			bmcr |= BMCR_SPEED100;
1818		} else if (lp->speed == SPEED_10) {
1819			if ((fulldpx && !(bmsr & BMSR_10FULL)) ||
1820				(!fulldpx && !(bmsr & BMSR_10HALF)))
1821				return -EINVAL;
1822		} else
1823			return -EINVAL;
1824	}
1825
1826	err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
1827	if (err)
1828		return err;
1829
1830#if 0
1831	err = mii_read(np, np->phy_addr, MII_BMCR);
1832	if (err < 0)
1833		return err;
1834	bmcr = err;
1835
1836	err = mii_read(np, np->phy_addr, MII_BMSR);
1837	if (err < 0)
1838		return err;
1839	bmsr = err;
1840
1841	pr_info("Port %u after MII init bmcr[%04x] bmsr[%04x]\n",
1842		np->port, bmcr, bmsr);
1843#endif
1844
1845	return 0;
1846}
1847
1848static int xcvr_init_1g(struct niu *np)
1849{
1850	u64 val;
1851
1852	/* XXX shared resource, lock parent XXX */
1853	val = nr64(MIF_CONFIG);
1854	val &= ~MIF_CONFIG_INDIRECT_MODE;
1855	nw64(MIF_CONFIG, val);
1856
1857	return mii_init_common(np);
1858}
1859
1860static int niu_xcvr_init(struct niu *np)
1861{
1862	const struct niu_phy_ops *ops = np->phy_ops;
1863	int err;
1864
1865	err = 0;
1866	if (ops->xcvr_init)
1867		err = ops->xcvr_init(np);
1868
1869	return err;
1870}
1871
1872static int niu_serdes_init(struct niu *np)
1873{
1874	const struct niu_phy_ops *ops = np->phy_ops;
1875	int err;
1876
1877	err = 0;
1878	if (ops->serdes_init)
1879		err = ops->serdes_init(np);
1880
1881	return err;
1882}
1883
1884static void niu_init_xif(struct niu *);
1885static void niu_handle_led(struct niu *, int status);
1886
1887static int niu_link_status_common(struct niu *np, int link_up)
1888{
1889	struct niu_link_config *lp = &np->link_config;
1890	struct net_device *dev = np->dev;
1891	unsigned long flags;
1892
1893	if (!netif_carrier_ok(dev) && link_up) {
1894		netif_info(np, link, dev, "Link is up at %s, %s duplex\n",
1895			   lp->active_speed == SPEED_10000 ? "10Gb/sec" :
1896			   lp->active_speed == SPEED_1000 ? "1Gb/sec" :
1897			   lp->active_speed == SPEED_100 ? "100Mbit/sec" :
1898			   "10Mbit/sec",
1899			   lp->active_duplex == DUPLEX_FULL ? "full" : "half");
1900
1901		spin_lock_irqsave(&np->lock, flags);
1902		niu_init_xif(np);
1903		niu_handle_led(np, 1);
1904		spin_unlock_irqrestore(&np->lock, flags);
1905
1906		netif_carrier_on(dev);
1907	} else if (netif_carrier_ok(dev) && !link_up) {
1908		netif_warn(np, link, dev, "Link is down\n");
1909		spin_lock_irqsave(&np->lock, flags);
1910		niu_handle_led(np, 0);
1911		spin_unlock_irqrestore(&np->lock, flags);
1912		netif_carrier_off(dev);
1913	}
1914
1915	return 0;
1916}
1917
1918static int link_status_10g_mrvl(struct niu *np, int *link_up_p)
1919{
1920	int err, link_up, pma_status, pcs_status;
1921
1922	link_up = 0;
1923
1924	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1925			MRVL88X2011_10G_PMD_STATUS_2);
1926	if (err < 0)
1927		goto out;
1928
1929	/* Check PMA/PMD Register: 1.0001.2 == 1 */
1930	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1931			MRVL88X2011_PMA_PMD_STATUS_1);
1932	if (err < 0)
1933		goto out;
1934
1935	pma_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0);
1936
1937        /* Check PMC Register : 3.0001.2 == 1: read twice */
1938	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
1939			MRVL88X2011_PMA_PMD_STATUS_1);
1940	if (err < 0)
1941		goto out;
1942
1943	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
1944			MRVL88X2011_PMA_PMD_STATUS_1);
1945	if (err < 0)
1946		goto out;
1947
1948	pcs_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0);
1949
1950        /* Check XGXS Register : 4.0018.[0-3,12] */
1951	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV4_ADDR,
1952			MRVL88X2011_10G_XGXS_LANE_STAT);
1953	if (err < 0)
1954		goto out;
1955
1956	if (err == (PHYXS_XGXS_LANE_STAT_ALINGED | PHYXS_XGXS_LANE_STAT_LANE3 |
1957		    PHYXS_XGXS_LANE_STAT_LANE2 | PHYXS_XGXS_LANE_STAT_LANE1 |
1958		    PHYXS_XGXS_LANE_STAT_LANE0 | PHYXS_XGXS_LANE_STAT_MAGIC |
1959		    0x800))
1960		link_up = (pma_status && pcs_status) ? 1 : 0;
1961
1962	np->link_config.active_speed = SPEED_10000;
1963	np->link_config.active_duplex = DUPLEX_FULL;
1964	err = 0;
1965out:
1966	mrvl88x2011_act_led(np, (link_up ?
1967				 MRVL88X2011_LED_CTL_PCS_ACT :
1968				 MRVL88X2011_LED_CTL_OFF));
1969
1970	*link_up_p = link_up;
1971	return err;
1972}
1973
1974static int link_status_10g_bcm8706(struct niu *np, int *link_up_p)
1975{
1976	int err, link_up;
1977	link_up = 0;
1978
1979	err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
1980			BCM8704_PMD_RCV_SIGDET);
1981	if (err < 0 || err == 0xffff)
1982		goto out;
1983	if (!(err & PMD_RCV_SIGDET_GLOBAL)) {
1984		err = 0;
1985		goto out;
1986	}
1987
1988	err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
1989			BCM8704_PCS_10G_R_STATUS);
1990	if (err < 0)
1991		goto out;
1992
1993	if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) {
1994		err = 0;
1995		goto out;
1996	}
1997
1998	err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
1999			BCM8704_PHYXS_XGXS_LANE_STAT);
2000	if (err < 0)
2001		goto out;
2002	if (err != (PHYXS_XGXS_LANE_STAT_ALINGED |
2003		    PHYXS_XGXS_LANE_STAT_MAGIC |
2004		    PHYXS_XGXS_LANE_STAT_PATTEST |
2005		    PHYXS_XGXS_LANE_STAT_LANE3 |
2006		    PHYXS_XGXS_LANE_STAT_LANE2 |
2007		    PHYXS_XGXS_LANE_STAT_LANE1 |
2008		    PHYXS_XGXS_LANE_STAT_LANE0)) {
2009		err = 0;
2010		np->link_config.active_speed = SPEED_INVALID;
2011		np->link_config.active_duplex = DUPLEX_INVALID;
2012		goto out;
2013	}
2014
2015	link_up = 1;
2016	np->link_config.active_speed = SPEED_10000;
2017	np->link_config.active_duplex = DUPLEX_FULL;
2018	err = 0;
2019
2020out:
2021	*link_up_p = link_up;
2022	return err;
2023}
2024
2025static int link_status_10g_bcom(struct niu *np, int *link_up_p)
2026{
2027	int err, link_up;
2028
2029	link_up = 0;
2030
2031	err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
2032			BCM8704_PMD_RCV_SIGDET);
2033	if (err < 0)
2034		goto out;
2035	if (!(err & PMD_RCV_SIGDET_GLOBAL)) {
2036		err = 0;
2037		goto out;
2038	}
2039
2040	err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
2041			BCM8704_PCS_10G_R_STATUS);
2042	if (err < 0)
2043		goto out;
2044	if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) {
2045		err = 0;
2046		goto out;
2047	}
2048
2049	err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
2050			BCM8704_PHYXS_XGXS_LANE_STAT);
2051	if (err < 0)
2052		goto out;
2053
2054	if (err != (PHYXS_XGXS_LANE_STAT_ALINGED |
2055		    PHYXS_XGXS_LANE_STAT_MAGIC |
2056		    PHYXS_XGXS_LANE_STAT_LANE3 |
2057		    PHYXS_XGXS_LANE_STAT_LANE2 |
2058		    PHYXS_XGXS_LANE_STAT_LANE1 |
2059		    PHYXS_XGXS_LANE_STAT_LANE0)) {
2060		err = 0;
2061		goto out;
2062	}
2063
2064	link_up = 1;
2065	np->link_config.active_speed = SPEED_10000;
2066	np->link_config.active_duplex = DUPLEX_FULL;
2067	err = 0;
2068
2069out:
2070	*link_up_p = link_up;
2071	return err;
2072}
2073
2074static int link_status_10g(struct niu *np, int *link_up_p)
2075{
2076	unsigned long flags;
2077	int err = -EINVAL;
2078
2079	spin_lock_irqsave(&np->lock, flags);
2080
2081	if (np->link_config.loopback_mode == LOOPBACK_DISABLED) {
2082		int phy_id;
2083
2084		phy_id = phy_decode(np->parent->port_phy, np->port);
2085		phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port];
2086
2087		/* handle different phy types */
2088		switch (phy_id & NIU_PHY_ID_MASK) {
2089		case NIU_PHY_ID_MRVL88X2011:
2090			err = link_status_10g_mrvl(np, link_up_p);
2091			break;
2092
2093		default: /* bcom 8704 */
2094			err = link_status_10g_bcom(np, link_up_p);
2095			break;
2096		}
2097	}
2098
2099	spin_unlock_irqrestore(&np->lock, flags);
2100
2101	return err;
2102}
2103
2104static int niu_10g_phy_present(struct niu *np)
2105{
2106	u64 sig, mask, val;
2107
2108	sig = nr64(ESR_INT_SIGNALS);
2109	switch (np->port) {
2110	case 0:
2111		mask = ESR_INT_SIGNALS_P0_BITS;
2112		val = (ESR_INT_SRDY0_P0 |
2113		       ESR_INT_DET0_P0 |
2114		       ESR_INT_XSRDY_P0 |
2115		       ESR_INT_XDP_P0_CH3 |
2116		       ESR_INT_XDP_P0_CH2 |
2117		       ESR_INT_XDP_P0_CH1 |
2118		       ESR_INT_XDP_P0_CH0);
2119		break;
2120
2121	case 1:
2122		mask = ESR_INT_SIGNALS_P1_BITS;
2123		val = (ESR_INT_SRDY0_P1 |
2124		       ESR_INT_DET0_P1 |
2125		       ESR_INT_XSRDY_P1 |
2126		       ESR_INT_XDP_P1_CH3 |
2127		       ESR_INT_XDP_P1_CH2 |
2128		       ESR_INT_XDP_P1_CH1 |
2129		       ESR_INT_XDP_P1_CH0);
2130		break;
2131
2132	default:
2133		return 0;
2134	}
2135
2136	if ((sig & mask) != val)
2137		return 0;
2138	return 1;
2139}
2140
2141static int link_status_10g_hotplug(struct niu *np, int *link_up_p)
2142{
2143	unsigned long flags;
2144	int err = 0;
2145	int phy_present;
2146	int phy_present_prev;
2147
2148	spin_lock_irqsave(&np->lock, flags);
2149
2150	if (np->link_config.loopback_mode == LOOPBACK_DISABLED) {
2151		phy_present_prev = (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) ?
2152			1 : 0;
2153		phy_present = niu_10g_phy_present(np);
2154		if (phy_present != phy_present_prev) {
2155			/* state change */
2156			if (phy_present) {
2157				/* A NEM was just plugged in */
2158				np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT;
2159				if (np->phy_ops->xcvr_init)
2160					err = np->phy_ops->xcvr_init(np);
2161				if (err) {
2162					err = mdio_read(np, np->phy_addr,
2163						BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
2164					if (err == 0xffff) {
2165						/* No mdio, back-to-back XAUI */
2166						goto out;
2167					}
2168					/* debounce */
2169					np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
2170				}
2171			} else {
2172				np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
2173				*link_up_p = 0;
2174				netif_warn(np, link, np->dev,
2175					   "Hotplug PHY Removed\n");
2176			}
2177		}
2178out:
2179		if (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) {
2180			err = link_status_10g_bcm8706(np, link_up_p);
2181			if (err == 0xffff) {
2182				/* No mdio, back-to-back XAUI: it is C10NEM */
2183				*link_up_p = 1;
2184				np->link_config.active_speed = SPEED_10000;
2185				np->link_config.active_duplex = DUPLEX_FULL;
2186			}
2187		}
2188	}
2189
2190	spin_unlock_irqrestore(&np->lock, flags);
2191
2192	return 0;
2193}
2194
2195static int niu_link_status(struct niu *np, int *link_up_p)
2196{
2197	const struct niu_phy_ops *ops = np->phy_ops;
2198	int err;
2199
2200	err = 0;
2201	if (ops->link_status)
2202		err = ops->link_status(np, link_up_p);
2203
2204	return err;
2205}
2206
2207static void niu_timer(struct timer_list *t)
2208{
2209	struct niu *np = from_timer(np, t, timer);
2210	unsigned long off;
2211	int err, link_up;
2212
2213	err = niu_link_status(np, &link_up);
2214	if (!err)
2215		niu_link_status_common(np, link_up);
2216
2217	if (netif_carrier_ok(np->dev))
2218		off = 5 * HZ;
2219	else
2220		off = 1 * HZ;
2221	np->timer.expires = jiffies + off;
2222
2223	add_timer(&np->timer);
2224}
2225
2226static const struct niu_phy_ops phy_ops_10g_serdes = {
2227	.serdes_init		= serdes_init_10g_serdes,
2228	.link_status		= link_status_10g_serdes,
2229};
2230
2231static const struct niu_phy_ops phy_ops_10g_serdes_niu = {
2232	.serdes_init		= serdes_init_niu_10g_serdes,
2233	.link_status		= link_status_10g_serdes,
2234};
2235
2236static const struct niu_phy_ops phy_ops_1g_serdes_niu = {
2237	.serdes_init		= serdes_init_niu_1g_serdes,
2238	.link_status		= link_status_1g_serdes,
2239};
2240
2241static const struct niu_phy_ops phy_ops_1g_rgmii = {
2242	.xcvr_init		= xcvr_init_1g_rgmii,
2243	.link_status		= link_status_1g_rgmii,
2244};
2245
2246static const struct niu_phy_ops phy_ops_10g_fiber_niu = {
2247	.serdes_init		= serdes_init_niu_10g_fiber,
2248	.xcvr_init		= xcvr_init_10g,
2249	.link_status		= link_status_10g,
2250};
2251
2252static const struct niu_phy_ops phy_ops_10g_fiber = {
2253	.serdes_init		= serdes_init_10g,
2254	.xcvr_init		= xcvr_init_10g,
2255	.link_status		= link_status_10g,
2256};
2257
2258static const struct niu_phy_ops phy_ops_10g_fiber_hotplug = {
2259	.serdes_init		= serdes_init_10g,
2260	.xcvr_init		= xcvr_init_10g_bcm8706,
2261	.link_status		= link_status_10g_hotplug,
2262};
2263
2264static const struct niu_phy_ops phy_ops_niu_10g_hotplug = {
2265	.serdes_init		= serdes_init_niu_10g_fiber,
2266	.xcvr_init		= xcvr_init_10g_bcm8706,
2267	.link_status		= link_status_10g_hotplug,
2268};
2269
2270static const struct niu_phy_ops phy_ops_10g_copper = {
2271	.serdes_init		= serdes_init_10g,
2272	.link_status		= link_status_10g, /* XXX */
2273};
2274
2275static const struct niu_phy_ops phy_ops_1g_fiber = {
2276	.serdes_init		= serdes_init_1g,
2277	.xcvr_init		= xcvr_init_1g,
2278	.link_status		= link_status_1g,
2279};
2280
2281static const struct niu_phy_ops phy_ops_1g_copper = {
2282	.xcvr_init		= xcvr_init_1g,
2283	.link_status		= link_status_1g,
2284};
2285
2286struct niu_phy_template {
2287	const struct niu_phy_ops	*ops;
2288	u32				phy_addr_base;
2289};
2290
2291static const struct niu_phy_template phy_template_niu_10g_fiber = {
2292	.ops		= &phy_ops_10g_fiber_niu,
2293	.phy_addr_base	= 16,
2294};
2295
2296static const struct niu_phy_template phy_template_niu_10g_serdes = {
2297	.ops		= &phy_ops_10g_serdes_niu,
2298	.phy_addr_base	= 0,
2299};
2300
2301static const struct niu_phy_template phy_template_niu_1g_serdes = {
2302	.ops		= &phy_ops_1g_serdes_niu,
2303	.phy_addr_base	= 0,
2304};
2305
2306static const struct niu_phy_template phy_template_10g_fiber = {
2307	.ops		= &phy_ops_10g_fiber,
2308	.phy_addr_base	= 8,
2309};
2310
2311static const struct niu_phy_template phy_template_10g_fiber_hotplug = {
2312	.ops		= &phy_ops_10g_fiber_hotplug,
2313	.phy_addr_base	= 8,
2314};
2315
2316static const struct niu_phy_template phy_template_niu_10g_hotplug = {
2317	.ops		= &phy_ops_niu_10g_hotplug,
2318	.phy_addr_base	= 8,
2319};
2320
2321static const struct niu_phy_template phy_template_10g_copper = {
2322	.ops		= &phy_ops_10g_copper,
2323	.phy_addr_base	= 10,
2324};
2325
2326static const struct niu_phy_template phy_template_1g_fiber = {
2327	.ops		= &phy_ops_1g_fiber,
2328	.phy_addr_base	= 0,
2329};
2330
2331static const struct niu_phy_template phy_template_1g_copper = {
2332	.ops		= &phy_ops_1g_copper,
2333	.phy_addr_base	= 0,
2334};
2335
2336static const struct niu_phy_template phy_template_1g_rgmii = {
2337	.ops		= &phy_ops_1g_rgmii,
2338	.phy_addr_base	= 0,
2339};
2340
2341static const struct niu_phy_template phy_template_10g_serdes = {
2342	.ops		= &phy_ops_10g_serdes,
2343	.phy_addr_base	= 0,
2344};
2345
2346static int niu_atca_port_num[4] = {
2347	0, 0,  11, 10
2348};
2349
2350static int serdes_init_10g_serdes(struct niu *np)
2351{
2352	struct niu_link_config *lp = &np->link_config;
2353	unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i;
2354	u64 ctrl_val, test_cfg_val, sig, mask, val;
2355
2356	switch (np->port) {
2357	case 0:
2358		ctrl_reg = ENET_SERDES_0_CTRL_CFG;
2359		test_cfg_reg = ENET_SERDES_0_TEST_CFG;
2360		pll_cfg = ENET_SERDES_0_PLL_CFG;
2361		break;
2362	case 1:
2363		ctrl_reg = ENET_SERDES_1_CTRL_CFG;
2364		test_cfg_reg = ENET_SERDES_1_TEST_CFG;
2365		pll_cfg = ENET_SERDES_1_PLL_CFG;
2366		break;
2367
2368	default:
2369		return -EINVAL;
2370	}
2371	ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
2372		    ENET_SERDES_CTRL_SDET_1 |
2373		    ENET_SERDES_CTRL_SDET_2 |
2374		    ENET_SERDES_CTRL_SDET_3 |
2375		    (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
2376		    (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
2377		    (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
2378		    (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
2379		    (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
2380		    (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
2381		    (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
2382		    (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
2383	test_cfg_val = 0;
2384
2385	if (lp->loopback_mode == LOOPBACK_PHY) {
2386		test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
2387				  ENET_SERDES_TEST_MD_0_SHIFT) |
2388				 (ENET_TEST_MD_PAD_LOOPBACK <<
2389				  ENET_SERDES_TEST_MD_1_SHIFT) |
2390				 (ENET_TEST_MD_PAD_LOOPBACK <<
2391				  ENET_SERDES_TEST_MD_2_SHIFT) |
2392				 (ENET_TEST_MD_PAD_LOOPBACK <<
2393				  ENET_SERDES_TEST_MD_3_SHIFT));
2394	}
2395
2396	esr_reset(np);
2397	nw64(pll_cfg, ENET_SERDES_PLL_FBDIV2);
2398	nw64(ctrl_reg, ctrl_val);
2399	nw64(test_cfg_reg, test_cfg_val);
2400
2401	/* Initialize all 4 lanes of the SERDES.  */
2402	for (i = 0; i < 4; i++) {
2403		u32 rxtx_ctrl, glue0;
2404		int err;
2405
2406		err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
2407		if (err)
2408			return err;
2409		err = esr_read_glue0(np, i, &glue0);
2410		if (err)
2411			return err;
2412
2413		rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
2414		rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
2415			      (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
2416
2417		glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
2418			   ESR_GLUE_CTRL0_THCNT |
2419			   ESR_GLUE_CTRL0_BLTIME);
2420		glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
2421			  (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
2422			  (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
2423			  (BLTIME_300_CYCLES <<
2424			   ESR_GLUE_CTRL0_BLTIME_SHIFT));
2425
2426		err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
2427		if (err)
2428			return err;
2429		err = esr_write_glue0(np, i, glue0);
2430		if (err)
2431			return err;
2432	}
2433
2434
2435	sig = nr64(ESR_INT_SIGNALS);
2436	switch (np->port) {
2437	case 0:
2438		mask = ESR_INT_SIGNALS_P0_BITS;
2439		val = (ESR_INT_SRDY0_P0 |
2440		       ESR_INT_DET0_P0 |
2441		       ESR_INT_XSRDY_P0 |
2442		       ESR_INT_XDP_P0_CH3 |
2443		       ESR_INT_XDP_P0_CH2 |
2444		       ESR_INT_XDP_P0_CH1 |
2445		       ESR_INT_XDP_P0_CH0);
2446		break;
2447
2448	case 1:
2449		mask = ESR_INT_SIGNALS_P1_BITS;
2450		val = (ESR_INT_SRDY0_P1 |
2451		       ESR_INT_DET0_P1 |
2452		       ESR_INT_XSRDY_P1 |
2453		       ESR_INT_XDP_P1_CH3 |
2454		       ESR_INT_XDP_P1_CH2 |
2455		       ESR_INT_XDP_P1_CH1 |
2456		       ESR_INT_XDP_P1_CH0);
2457		break;
2458
2459	default:
2460		return -EINVAL;
2461	}
2462
2463	if ((sig & mask) != val) {
2464		int err;
2465		err = serdes_init_1g_serdes(np);
2466		if (!err) {
2467			np->flags &= ~NIU_FLAGS_10G;
2468			np->mac_xcvr = MAC_XCVR_PCS;
2469		}  else {
2470			netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n",
2471				   np->port);
2472			return -ENODEV;
2473		}
2474	}
2475
2476	return 0;
2477}
2478
2479static int niu_determine_phy_disposition(struct niu *np)
2480{
2481	struct niu_parent *parent = np->parent;
2482	u8 plat_type = parent->plat_type;
2483	const struct niu_phy_template *tp;
2484	u32 phy_addr_off = 0;
2485
2486	if (plat_type == PLAT_TYPE_NIU) {
2487		switch (np->flags &
2488			(NIU_FLAGS_10G |
2489			 NIU_FLAGS_FIBER |
2490			 NIU_FLAGS_XCVR_SERDES)) {
2491		case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
2492			/* 10G Serdes */
2493			tp = &phy_template_niu_10g_serdes;
2494			break;
2495		case NIU_FLAGS_XCVR_SERDES:
2496			/* 1G Serdes */
2497			tp = &phy_template_niu_1g_serdes;
2498			break;
2499		case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
2500			/* 10G Fiber */
2501		default:
2502			if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
2503				tp = &phy_template_niu_10g_hotplug;
2504				if (np->port == 0)
2505					phy_addr_off = 8;
2506				if (np->port == 1)
2507					phy_addr_off = 12;
2508			} else {
2509				tp = &phy_template_niu_10g_fiber;
2510				phy_addr_off += np->port;
2511			}
2512			break;
2513		}
2514	} else {
2515		switch (np->flags &
2516			(NIU_FLAGS_10G |
2517			 NIU_FLAGS_FIBER |
2518			 NIU_FLAGS_XCVR_SERDES)) {
2519		case 0:
2520			/* 1G copper */
2521			tp = &phy_template_1g_copper;
2522			if (plat_type == PLAT_TYPE_VF_P0)
2523				phy_addr_off = 10;
2524			else if (plat_type == PLAT_TYPE_VF_P1)
2525				phy_addr_off = 26;
2526
2527			phy_addr_off += (np->port ^ 0x3);
2528			break;
2529
2530		case NIU_FLAGS_10G:
2531			/* 10G copper */
2532			tp = &phy_template_10g_copper;
2533			break;
2534
2535		case NIU_FLAGS_FIBER:
2536			/* 1G fiber */
2537			tp = &phy_template_1g_fiber;
2538			break;
2539
2540		case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
2541			/* 10G fiber */
2542			tp = &phy_template_10g_fiber;
2543			if (plat_type == PLAT_TYPE_VF_P0 ||
2544			    plat_type == PLAT_TYPE_VF_P1)
2545				phy_addr_off = 8;
2546			phy_addr_off += np->port;
2547			if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
2548				tp = &phy_template_10g_fiber_hotplug;
2549				if (np->port == 0)
2550					phy_addr_off = 8;
2551				if (np->port == 1)
2552					phy_addr_off = 12;
2553			}
2554			break;
2555
2556		case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
2557		case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER:
2558		case NIU_FLAGS_XCVR_SERDES:
2559			switch(np->port) {
2560			case 0:
2561			case 1:
2562				tp = &phy_template_10g_serdes;
2563				break;
2564			case 2:
2565			case 3:
2566				tp = &phy_template_1g_rgmii;
2567				break;
2568			default:
2569				return -EINVAL;
2570			}
2571			phy_addr_off = niu_atca_port_num[np->port];
2572			break;
2573
2574		default:
2575			return -EINVAL;
2576		}
2577	}
2578
2579	np->phy_ops = tp->ops;
2580	np->phy_addr = tp->phy_addr_base + phy_addr_off;
2581
2582	return 0;
2583}
2584
2585static int niu_init_link(struct niu *np)
2586{
2587	struct niu_parent *parent = np->parent;
2588	int err, ignore;
2589
2590	if (parent->plat_type == PLAT_TYPE_NIU) {
2591		err = niu_xcvr_init(np);
2592		if (err)
2593			return err;
2594		msleep(200);
2595	}
2596	err = niu_serdes_init(np);
2597	if (err && !(np->flags & NIU_FLAGS_HOTPLUG_PHY))
2598		return err;
2599	msleep(200);
2600	err = niu_xcvr_init(np);
2601	if (!err || (np->flags & NIU_FLAGS_HOTPLUG_PHY))
2602		niu_link_status(np, &ignore);
2603	return 0;
2604}
2605
2606static void niu_set_primary_mac(struct niu *np, unsigned char *addr)
2607{
2608	u16 reg0 = addr[4] << 8 | addr[5];
2609	u16 reg1 = addr[2] << 8 | addr[3];
2610	u16 reg2 = addr[0] << 8 | addr[1];
2611
2612	if (np->flags & NIU_FLAGS_XMAC) {
2613		nw64_mac(XMAC_ADDR0, reg0);
2614		nw64_mac(XMAC_ADDR1, reg1);
2615		nw64_mac(XMAC_ADDR2, reg2);
2616	} else {
2617		nw64_mac(BMAC_ADDR0, reg0);
2618		nw64_mac(BMAC_ADDR1, reg1);
2619		nw64_mac(BMAC_ADDR2, reg2);
2620	}
2621}
2622
2623static int niu_num_alt_addr(struct niu *np)
2624{
2625	if (np->flags & NIU_FLAGS_XMAC)
2626		return XMAC_NUM_ALT_ADDR;
2627	else
2628		return BMAC_NUM_ALT_ADDR;
2629}
2630
2631static int niu_set_alt_mac(struct niu *np, int index, unsigned char *addr)
2632{
2633	u16 reg0 = addr[4] << 8 | addr[5];
2634	u16 reg1 = addr[2] << 8 | addr[3];
2635	u16 reg2 = addr[0] << 8 | addr[1];
2636
2637	if (index >= niu_num_alt_addr(np))
2638		return -EINVAL;
2639
2640	if (np->flags & NIU_FLAGS_XMAC) {
2641		nw64_mac(XMAC_ALT_ADDR0(index), reg0);
2642		nw64_mac(XMAC_ALT_ADDR1(index), reg1);
2643		nw64_mac(XMAC_ALT_ADDR2(index), reg2);
2644	} else {
2645		nw64_mac(BMAC_ALT_ADDR0(index), reg0);
2646		nw64_mac(BMAC_ALT_ADDR1(index), reg1);
2647		nw64_mac(BMAC_ALT_ADDR2(index), reg2);
2648	}
2649
2650	return 0;
2651}
2652
2653static int niu_enable_alt_mac(struct niu *np, int index, int on)
2654{
2655	unsigned long reg;
2656	u64 val, mask;
2657
2658	if (index >= niu_num_alt_addr(np))
2659		return -EINVAL;
2660
2661	if (np->flags & NIU_FLAGS_XMAC) {
2662		reg = XMAC_ADDR_CMPEN;
2663		mask = 1 << index;
2664	} else {
2665		reg = BMAC_ADDR_CMPEN;
2666		mask = 1 << (index + 1);
2667	}
2668
2669	val = nr64_mac(reg);
2670	if (on)
2671		val |= mask;
2672	else
2673		val &= ~mask;
2674	nw64_mac(reg, val);
2675
2676	return 0;
2677}
2678
2679static void __set_rdc_table_num_hw(struct niu *np, unsigned long reg,
2680				   int num, int mac_pref)
2681{
2682	u64 val = nr64_mac(reg);
2683	val &= ~(HOST_INFO_MACRDCTBLN | HOST_INFO_MPR);
2684	val |= num;
2685	if (mac_pref)
2686		val |= HOST_INFO_MPR;
2687	nw64_mac(reg, val);
2688}
2689
2690static int __set_rdc_table_num(struct niu *np,
2691			       int xmac_index, int bmac_index,
2692			       int rdc_table_num, int mac_pref)
2693{
2694	unsigned long reg;
2695
2696	if (rdc_table_num & ~HOST_INFO_MACRDCTBLN)
2697		return -EINVAL;
2698	if (np->flags & NIU_FLAGS_XMAC)
2699		reg = XMAC_HOST_INFO(xmac_index);
2700	else
2701		reg = BMAC_HOST_INFO(bmac_index);
2702	__set_rdc_table_num_hw(np, reg, rdc_table_num, mac_pref);
2703	return 0;
2704}
2705
2706static int niu_set_primary_mac_rdc_table(struct niu *np, int table_num,
2707					 int mac_pref)
2708{
2709	return __set_rdc_table_num(np, 17, 0, table_num, mac_pref);
2710}
2711
2712static int niu_set_multicast_mac_rdc_table(struct niu *np, int table_num,
2713					   int mac_pref)
2714{
2715	return __set_rdc_table_num(np, 16, 8, table_num, mac_pref);
2716}
2717
2718static int niu_set_alt_mac_rdc_table(struct niu *np, int idx,
2719				     int table_num, int mac_pref)
2720{
2721	if (idx >= niu_num_alt_addr(np))
2722		return -EINVAL;
2723	return __set_rdc_table_num(np, idx, idx + 1, table_num, mac_pref);
2724}
2725
2726static u64 vlan_entry_set_parity(u64 reg_val)
2727{
2728	u64 port01_mask;
2729	u64 port23_mask;
2730
2731	port01_mask = 0x00ff;
2732	port23_mask = 0xff00;
2733
2734	if (hweight64(reg_val & port01_mask) & 1)
2735		reg_val |= ENET_VLAN_TBL_PARITY0;
2736	else
2737		reg_val &= ~ENET_VLAN_TBL_PARITY0;
2738
2739	if (hweight64(reg_val & port23_mask) & 1)
2740		reg_val |= ENET_VLAN_TBL_PARITY1;
2741	else
2742		reg_val &= ~ENET_VLAN_TBL_PARITY1;
2743
2744	return reg_val;
2745}
2746
2747static void vlan_tbl_write(struct niu *np, unsigned long index,
2748			   int port, int vpr, int rdc_table)
2749{
2750	u64 reg_val = nr64(ENET_VLAN_TBL(index));
2751
2752	reg_val &= ~((ENET_VLAN_TBL_VPR |
2753		      ENET_VLAN_TBL_VLANRDCTBLN) <<
2754		     ENET_VLAN_TBL_SHIFT(port));
2755	if (vpr)
2756		reg_val |= (ENET_VLAN_TBL_VPR <<
2757			    ENET_VLAN_TBL_SHIFT(port));
2758	reg_val |= (rdc_table << ENET_VLAN_TBL_SHIFT(port));
2759
2760	reg_val = vlan_entry_set_parity(reg_val);
2761
2762	nw64(ENET_VLAN_TBL(index), reg_val);
2763}
2764
2765static void vlan_tbl_clear(struct niu *np)
2766{
2767	int i;
2768
2769	for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++)
2770		nw64(ENET_VLAN_TBL(i), 0);
2771}
2772
2773static int tcam_wait_bit(struct niu *np, u64 bit)
2774{
2775	int limit = 1000;
2776
2777	while (--limit > 0) {
2778		if (nr64(TCAM_CTL) & bit)
2779			break;
2780		udelay(1);
2781	}
2782	if (limit <= 0)
2783		return -ENODEV;
2784
2785	return 0;
2786}
2787
2788static int tcam_flush(struct niu *np, int index)
2789{
2790	nw64(TCAM_KEY_0, 0x00);
2791	nw64(TCAM_KEY_MASK_0, 0xff);
2792	nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index));
2793
2794	return tcam_wait_bit(np, TCAM_CTL_STAT);
2795}
2796
2797#if 0
2798static int tcam_read(struct niu *np, int index,
2799		     u64 *key, u64 *mask)
2800{
2801	int err;
2802
2803	nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_READ | index));
2804	err = tcam_wait_bit(np, TCAM_CTL_STAT);
2805	if (!err) {
2806		key[0] = nr64(TCAM_KEY_0);
2807		key[1] = nr64(TCAM_KEY_1);
2808		key[2] = nr64(TCAM_KEY_2);
2809		key[3] = nr64(TCAM_KEY_3);
2810		mask[0] = nr64(TCAM_KEY_MASK_0);
2811		mask[1] = nr64(TCAM_KEY_MASK_1);
2812		mask[2] = nr64(TCAM_KEY_MASK_2);
2813		mask[3] = nr64(TCAM_KEY_MASK_3);
2814	}
2815	return err;
2816}
2817#endif
2818
2819static int tcam_write(struct niu *np, int index,
2820		      u64 *key, u64 *mask)
2821{
2822	nw64(TCAM_KEY_0, key[0]);
2823	nw64(TCAM_KEY_1, key[1]);
2824	nw64(TCAM_KEY_2, key[2]);
2825	nw64(TCAM_KEY_3, key[3]);
2826	nw64(TCAM_KEY_MASK_0, mask[0]);
2827	nw64(TCAM_KEY_MASK_1, mask[1]);
2828	nw64(TCAM_KEY_MASK_2, mask[2]);
2829	nw64(TCAM_KEY_MASK_3, mask[3]);
2830	nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index));
2831
2832	return tcam_wait_bit(np, TCAM_CTL_STAT);
2833}
2834
2835#if 0
2836static int tcam_assoc_read(struct niu *np, int index, u64 *data)
2837{
2838	int err;
2839
2840	nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_READ | index));
2841	err = tcam_wait_bit(np, TCAM_CTL_STAT);
2842	if (!err)
2843		*data = nr64(TCAM_KEY_1);
2844
2845	return err;
2846}
2847#endif
2848
2849static int tcam_assoc_write(struct niu *np, int index, u64 assoc_data)
2850{
2851	nw64(TCAM_KEY_1, assoc_data);
2852	nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_WRITE | index));
2853
2854	return tcam_wait_bit(np, TCAM_CTL_STAT);
2855}
2856
2857static void tcam_enable(struct niu *np, int on)
2858{
2859	u64 val = nr64(FFLP_CFG_1);
2860
2861	if (on)
2862		val &= ~FFLP_CFG_1_TCAM_DIS;
2863	else
2864		val |= FFLP_CFG_1_TCAM_DIS;
2865	nw64(FFLP_CFG_1, val);
2866}
2867
2868static void tcam_set_lat_and_ratio(struct niu *np, u64 latency, u64 ratio)
2869{
2870	u64 val = nr64(FFLP_CFG_1);
2871
2872	val &= ~(FFLP_CFG_1_FFLPINITDONE |
2873		 FFLP_CFG_1_CAMLAT |
2874		 FFLP_CFG_1_CAMRATIO);
2875	val |= (latency << FFLP_CFG_1_CAMLAT_SHIFT);
2876	val |= (ratio << FFLP_CFG_1_CAMRATIO_SHIFT);
2877	nw64(FFLP_CFG_1, val);
2878
2879	val = nr64(FFLP_CFG_1);
2880	val |= FFLP_CFG_1_FFLPINITDONE;
2881	nw64(FFLP_CFG_1, val);
2882}
2883
2884static int tcam_user_eth_class_enable(struct niu *np, unsigned long class,
2885				      int on)
2886{
2887	unsigned long reg;
2888	u64 val;
2889
2890	if (class < CLASS_CODE_ETHERTYPE1 ||
2891	    class > CLASS_CODE_ETHERTYPE2)
2892		return -EINVAL;
2893
2894	reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1);
2895	val = nr64(reg);
2896	if (on)
2897		val |= L2_CLS_VLD;
2898	else
2899		val &= ~L2_CLS_VLD;
2900	nw64(reg, val);
2901
2902	return 0;
2903}
2904
2905#if 0
2906static int tcam_user_eth_class_set(struct niu *np, unsigned long class,
2907				   u64 ether_type)
2908{
2909	unsigned long reg;
2910	u64 val;
2911
2912	if (class < CLASS_CODE_ETHERTYPE1 ||
2913	    class > CLASS_CODE_ETHERTYPE2 ||
2914	    (ether_type & ~(u64)0xffff) != 0)
2915		return -EINVAL;
2916
2917	reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1);
2918	val = nr64(reg);
2919	val &= ~L2_CLS_ETYPE;
2920	val |= (ether_type << L2_CLS_ETYPE_SHIFT);
2921	nw64(reg, val);
2922
2923	return 0;
2924}
2925#endif
2926
2927static int tcam_user_ip_class_enable(struct niu *np, unsigned long class,
2928				     int on)
2929{
2930	unsigned long reg;
2931	u64 val;
2932
2933	if (class < CLASS_CODE_USER_PROG1 ||
2934	    class > CLASS_CODE_USER_PROG4)
2935		return -EINVAL;
2936
2937	reg = L3_CLS(class - CLASS_CODE_USER_PROG1);
2938	val = nr64(reg);
2939	if (on)
2940		val |= L3_CLS_VALID;
2941	else
2942		val &= ~L3_CLS_VALID;
2943	nw64(reg, val);
2944
2945	return 0;
2946}
2947
2948static int tcam_user_ip_class_set(struct niu *np, unsigned long class,
2949				  int ipv6, u64 protocol_id,
2950				  u64 tos_mask, u64 tos_val)
2951{
2952	unsigned long reg;
2953	u64 val;
2954
2955	if (class < CLASS_CODE_USER_PROG1 ||
2956	    class > CLASS_CODE_USER_PROG4 ||
2957	    (protocol_id & ~(u64)0xff) != 0 ||
2958	    (tos_mask & ~(u64)0xff) != 0 ||
2959	    (tos_val & ~(u64)0xff) != 0)
2960		return -EINVAL;
2961
2962	reg = L3_CLS(class - CLASS_CODE_USER_PROG1);
2963	val = nr64(reg);
2964	val &= ~(L3_CLS_IPVER | L3_CLS_PID |
2965		 L3_CLS_TOSMASK | L3_CLS_TOS);
2966	if (ipv6)
2967		val |= L3_CLS_IPVER;
2968	val |= (protocol_id << L3_CLS_PID_SHIFT);
2969	val |= (tos_mask << L3_CLS_TOSMASK_SHIFT);
2970	val |= (tos_val << L3_CLS_TOS_SHIFT);
2971	nw64(reg, val);
2972
2973	return 0;
2974}
2975
2976static int tcam_early_init(struct niu *np)
2977{
2978	unsigned long i;
2979	int err;
2980
2981	tcam_enable(np, 0);
2982	tcam_set_lat_and_ratio(np,
2983			       DEFAULT_TCAM_LATENCY,
2984			       DEFAULT_TCAM_ACCESS_RATIO);
2985	for (i = CLASS_CODE_ETHERTYPE1; i <= CLASS_CODE_ETHERTYPE2; i++) {
2986		err = tcam_user_eth_class_enable(np, i, 0);
2987		if (err)
2988			return err;
2989	}
2990	for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_USER_PROG4; i++) {
2991		err = tcam_user_ip_class_enable(np, i, 0);
2992		if (err)
2993			return err;
2994	}
2995
2996	return 0;
2997}
2998
2999static int tcam_flush_all(struct niu *np)
3000{
3001	unsigned long i;
3002
3003	for (i = 0; i < np->parent->tcam_num_entries; i++) {
3004		int err = tcam_flush(np, i);
3005		if (err)
3006			return err;
3007	}
3008	return 0;
3009}
3010
3011static u64 hash_addr_regval(unsigned long index, unsigned long num_entries)
3012{
3013	return (u64)index | (num_entries == 1 ? HASH_TBL_ADDR_AUTOINC : 0);
3014}
3015
3016#if 0
3017static int hash_read(struct niu *np, unsigned long partition,
3018		     unsigned long index, unsigned long num_entries,
3019		     u64 *data)
3020{
3021	u64 val = hash_addr_regval(index, num_entries);
3022	unsigned long i;
3023
3024	if (partition >= FCRAM_NUM_PARTITIONS ||
3025	    index + num_entries > FCRAM_SIZE)
3026		return -EINVAL;
3027
3028	nw64(HASH_TBL_ADDR(partition), val);
3029	for (i = 0; i < num_entries; i++)
3030		data[i] = nr64(HASH_TBL_DATA(partition));
3031
3032	return 0;
3033}
3034#endif
3035
3036static int hash_write(struct niu *np, unsigned long partition,
3037		      unsigned long index, unsigned long num_entries,
3038		      u64 *data)
3039{
3040	u64 val = hash_addr_regval(index, num_entries);
3041	unsigned long i;
3042
3043	if (partition >= FCRAM_NUM_PARTITIONS ||
3044	    index + (num_entries * 8) > FCRAM_SIZE)
3045		return -EINVAL;
3046
3047	nw64(HASH_TBL_ADDR(partition), val);
3048	for (i = 0; i < num_entries; i++)
3049		nw64(HASH_TBL_DATA(partition), data[i]);
3050
3051	return 0;
3052}
3053
3054static void fflp_reset(struct niu *np)
3055{
3056	u64 val;
3057
3058	nw64(FFLP_CFG_1, FFLP_CFG_1_PIO_FIO_RST);
3059	udelay(10);
3060	nw64(FFLP_CFG_1, 0);
3061
3062	val = FFLP_CFG_1_FCRAMOUTDR_NORMAL | FFLP_CFG_1_FFLPINITDONE;
3063	nw64(FFLP_CFG_1, val);
3064}
3065
3066static void fflp_set_timings(struct niu *np)
3067{
3068	u64 val = nr64(FFLP_CFG_1);
3069
3070	val &= ~FFLP_CFG_1_FFLPINITDONE;
3071	val |= (DEFAULT_FCRAMRATIO << FFLP_CFG_1_FCRAMRATIO_SHIFT);
3072	nw64(FFLP_CFG_1, val);
3073
3074	val = nr64(FFLP_CFG_1);
3075	val |= FFLP_CFG_1_FFLPINITDONE;
3076	nw64(FFLP_CFG_1, val);
3077
3078	val = nr64(FCRAM_REF_TMR);
3079	val &= ~(FCRAM_REF_TMR_MAX | FCRAM_REF_TMR_MIN);
3080	val |= (DEFAULT_FCRAM_REFRESH_MAX << FCRAM_REF_TMR_MAX_SHIFT);
3081	val |= (DEFAULT_FCRAM_REFRESH_MIN << FCRAM_REF_TMR_MIN_SHIFT);
3082	nw64(FCRAM_REF_TMR, val);
3083}
3084
3085static int fflp_set_partition(struct niu *np, u64 partition,
3086			      u64 mask, u64 base, int enable)
3087{
3088	unsigned long reg;
3089	u64 val;
3090
3091	if (partition >= FCRAM_NUM_PARTITIONS ||
3092	    (mask & ~(u64)0x1f) != 0 ||
3093	    (base & ~(u64)0x1f) != 0)
3094		return -EINVAL;
3095
3096	reg = FLW_PRT_SEL(partition);
3097
3098	val = nr64(reg);
3099	val &= ~(FLW_PRT_SEL_EXT | FLW_PRT_SEL_MASK | FLW_PRT_SEL_BASE);
3100	val |= (mask << FLW_PRT_SEL_MASK_SHIFT);
3101	val |= (base << FLW_PRT_SEL_BASE_SHIFT);
3102	if (enable)
3103		val |= FLW_PRT_SEL_EXT;
3104	nw64(reg, val);
3105
3106	return 0;
3107}
3108
3109static int fflp_disable_all_partitions(struct niu *np)
3110{
3111	unsigned long i;
3112
3113	for (i = 0; i < FCRAM_NUM_PARTITIONS; i++) {
3114		int err = fflp_set_partition(np, 0, 0, 0, 0);
3115		if (err)
3116			return err;
3117	}
3118	return 0;
3119}
3120
3121static void fflp_llcsnap_enable(struct niu *np, int on)
3122{
3123	u64 val = nr64(FFLP_CFG_1);
3124
3125	if (on)
3126		val |= FFLP_CFG_1_LLCSNAP;
3127	else
3128		val &= ~FFLP_CFG_1_LLCSNAP;
3129	nw64(FFLP_CFG_1, val);
3130}
3131
3132static void fflp_errors_enable(struct niu *np, int on)
3133{
3134	u64 val = nr64(FFLP_CFG_1);
3135
3136	if (on)
3137		val &= ~FFLP_CFG_1_ERRORDIS;
3138	else
3139		val |= FFLP_CFG_1_ERRORDIS;
3140	nw64(FFLP_CFG_1, val);
3141}
3142
3143static int fflp_hash_clear(struct niu *np)
3144{
3145	struct fcram_hash_ipv4 ent;
3146	unsigned long i;
3147
3148	/* IPV4 hash entry with valid bit clear, rest is don't care.  */
3149	memset(&ent, 0, sizeof(ent));
3150	ent.header = HASH_HEADER_EXT;
3151
3152	for (i = 0; i < FCRAM_SIZE; i += sizeof(ent)) {
3153		int err = hash_write(np, 0, i, 1, (u64 *) &ent);
3154		if (err)
3155			return err;
3156	}
3157	return 0;
3158}
3159
3160static int fflp_early_init(struct niu *np)
3161{
3162	struct niu_parent *parent;
3163	unsigned long flags;
3164	int err;
3165
3166	niu_lock_parent(np, flags);
3167
3168	parent = np->parent;
3169	err = 0;
3170	if (!(parent->flags & PARENT_FLGS_CLS_HWINIT)) {
3171		if (np->parent->plat_type != PLAT_TYPE_NIU) {
3172			fflp_reset(np);
3173			fflp_set_timings(np);
3174			err = fflp_disable_all_partitions(np);
3175			if (err) {
3176				netif_printk(np, probe, KERN_DEBUG, np->dev,
3177					     "fflp_disable_all_partitions failed, err=%d\n",
3178					     err);
3179				goto out;
3180			}
3181		}
3182
3183		err = tcam_early_init(np);
3184		if (err) {
3185			netif_printk(np, probe, KERN_DEBUG, np->dev,
3186				     "tcam_early_init failed, err=%d\n", err);
3187			goto out;
3188		}
3189		fflp_llcsnap_enable(np, 1);
3190		fflp_errors_enable(np, 0);
3191		nw64(H1POLY, 0);
3192		nw64(H2POLY, 0);
3193
3194		err = tcam_flush_all(np);
3195		if (err) {
3196			netif_printk(np, probe, KERN_DEBUG, np->dev,
3197				     "tcam_flush_all failed, err=%d\n", err);
3198			goto out;
3199		}
3200		if (np->parent->plat_type != PLAT_TYPE_NIU) {
3201			err = fflp_hash_clear(np);
3202			if (err) {
3203				netif_printk(np, probe, KERN_DEBUG, np->dev,
3204					     "fflp_hash_clear failed, err=%d\n",
3205					     err);
3206				goto out;
3207			}
3208		}
3209
3210		vlan_tbl_clear(np);
3211
3212		parent->flags |= PARENT_FLGS_CLS_HWINIT;
3213	}
3214out:
3215	niu_unlock_parent(np, flags);
3216	return err;
3217}
3218
3219static int niu_set_flow_key(struct niu *np, unsigned long class_code, u64 key)
3220{
3221	if (class_code < CLASS_CODE_USER_PROG1 ||
3222	    class_code > CLASS_CODE_SCTP_IPV6)
3223		return -EINVAL;
3224
3225	nw64(FLOW_KEY(class_code - CLASS_CODE_USER_PROG1), key);
3226	return 0;
3227}
3228
3229static int niu_set_tcam_key(struct niu *np, unsigned long class_code, u64 key)
3230{
3231	if (class_code < CLASS_CODE_USER_PROG1 ||
3232	    class_code > CLASS_CODE_SCTP_IPV6)
3233		return -EINVAL;
3234
3235	nw64(TCAM_KEY(class_code - CLASS_CODE_USER_PROG1), key);
3236	return 0;
3237}
3238
3239/* Entries for the ports are interleaved in the TCAM */
3240static u16 tcam_get_index(struct niu *np, u16 idx)
3241{
3242	/* One entry reserved for IP fragment rule */
3243	if (idx >= (np->clas.tcam_sz - 1))
3244		idx = 0;
3245	return np->clas.tcam_top + ((idx+1) * np->parent->num_ports);
3246}
3247
3248static u16 tcam_get_size(struct niu *np)
3249{
3250	/* One entry reserved for IP fragment rule */
3251	return np->clas.tcam_sz - 1;
3252}
3253
3254static u16 tcam_get_valid_entry_cnt(struct niu *np)
3255{
3256	/* One entry reserved for IP fragment rule */
3257	return np->clas.tcam_valid_entries - 1;
3258}
3259
3260static void niu_rx_skb_append(struct sk_buff *skb, struct page *page,
3261			      u32 offset, u32 size, u32 truesize)
3262{
3263	skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, offset, size);
3264
3265	skb->len += size;
3266	skb->data_len += size;
3267	skb->truesize += truesize;
3268}
3269
3270static unsigned int niu_hash_rxaddr(struct rx_ring_info *rp, u64 a)
3271{
3272	a >>= PAGE_SHIFT;
3273	a ^= (a >> ilog2(MAX_RBR_RING_SIZE));
3274
3275	return a & (MAX_RBR_RING_SIZE - 1);
3276}
3277
3278static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr,
3279				    struct page ***link)
3280{
3281	unsigned int h = niu_hash_rxaddr(rp, addr);
3282	struct page *p, **pp;
3283
3284	addr &= PAGE_MASK;
3285	pp = &rp->rxhash[h];
3286	for (; (p = *pp) != NULL; pp = (struct page **) &p->mapping) {
3287		if (p->index == addr) {
3288			*link = pp;
3289			goto found;
3290		}
3291	}
3292	BUG();
3293
3294found:
3295	return p;
3296}
3297
3298static void niu_hash_page(struct rx_ring_info *rp, struct page *page, u64 base)
3299{
3300	unsigned int h = niu_hash_rxaddr(rp, base);
3301
3302	page->index = base;
3303	page->mapping = (struct address_space *) rp->rxhash[h];
3304	rp->rxhash[h] = page;
3305}
3306
3307static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp,
3308			    gfp_t mask, int start_index)
3309{
3310	struct page *page;
3311	u64 addr;
3312	int i;
3313
3314	page = alloc_page(mask);
3315	if (!page)
3316		return -ENOMEM;
3317
3318	addr = np->ops->map_page(np->device, page, 0,
3319				 PAGE_SIZE, DMA_FROM_DEVICE);
3320	if (!addr) {
3321		__free_page(page);
3322		return -ENOMEM;
3323	}
3324
3325	niu_hash_page(rp, page, addr);
3326	if (rp->rbr_blocks_per_page > 1)
3327		page_ref_add(page, rp->rbr_blocks_per_page - 1);
3328
3329	for (i = 0; i < rp->rbr_blocks_per_page; i++) {
3330		__le32 *rbr = &rp->rbr[start_index + i];
3331
3332		*rbr = cpu_to_le32(addr >> RBR_DESCR_ADDR_SHIFT);
3333		addr += rp->rbr_block_size;
3334	}
3335
3336	return 0;
3337}
3338
3339static void niu_rbr_refill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
3340{
3341	int index = rp->rbr_index;
3342
3343	rp->rbr_pending++;
3344	if ((rp->rbr_pending % rp->rbr_blocks_per_page) == 0) {
3345		int err = niu_rbr_add_page(np, rp, mask, index);
3346
3347		if (unlikely(err)) {
3348			rp->rbr_pending--;
3349			return;
3350		}
3351
3352		rp->rbr_index += rp->rbr_blocks_per_page;
3353		BUG_ON(rp->rbr_index > rp->rbr_table_size);
3354		if (rp->rbr_index == rp->rbr_table_size)
3355			rp->rbr_index = 0;
3356
3357		if (rp->rbr_pending >= rp->rbr_kick_thresh) {
3358			nw64(RBR_KICK(rp->rx_channel), rp->rbr_pending);
3359			rp->rbr_pending = 0;
3360		}
3361	}
3362}
3363
3364static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp)
3365{
3366	unsigned int index = rp->rcr_index;
3367	int num_rcr = 0;
3368
3369	rp->rx_dropped++;
3370	while (1) {
3371		struct page *page, **link;
3372		u64 addr, val;
3373		u32 rcr_size;
3374
3375		num_rcr++;
3376
3377		val = le64_to_cpup(&rp->rcr[index]);
3378		addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
3379			RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
3380		page = niu_find_rxpage(rp, addr, &link);
3381
3382		rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
3383					 RCR_ENTRY_PKTBUFSZ_SHIFT];
3384		if ((page->index + PAGE_SIZE) - rcr_size == addr) {
3385			*link = (struct page *) page->mapping;
3386			np->ops->unmap_page(np->device, page->index,
3387					    PAGE_SIZE, DMA_FROM_DEVICE);
3388			page->index = 0;
3389			page->mapping = NULL;
3390			__free_page(page);
3391			rp->rbr_refill_pending++;
3392		}
3393
3394		index = NEXT_RCR(rp, index);
3395		if (!(val & RCR_ENTRY_MULTI))
3396			break;
3397
3398	}
3399	rp->rcr_index = index;
3400
3401	return num_rcr;
3402}
3403
3404static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
3405			      struct rx_ring_info *rp)
3406{
3407	unsigned int index = rp->rcr_index;
3408	struct rx_pkt_hdr1 *rh;
3409	struct sk_buff *skb;
3410	int len, num_rcr;
3411
3412	skb = netdev_alloc_skb(np->dev, RX_SKB_ALLOC_SIZE);
3413	if (unlikely(!skb))
3414		return niu_rx_pkt_ignore(np, rp);
3415
3416	num_rcr = 0;
3417	while (1) {
3418		struct page *page, **link;
3419		u32 rcr_size, append_size;
3420		u64 addr, val, off;
3421
3422		num_rcr++;
3423
3424		val = le64_to_cpup(&rp->rcr[index]);
3425
3426		len = (val & RCR_ENTRY_L2_LEN) >>
3427			RCR_ENTRY_L2_LEN_SHIFT;
3428		append_size = len + ETH_HLEN + ETH_FCS_LEN;
3429
3430		addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
3431			RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
3432		page = niu_find_rxpage(rp, addr, &link);
3433
3434		rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
3435					 RCR_ENTRY_PKTBUFSZ_SHIFT];
3436
3437		off = addr & ~PAGE_MASK;
3438		if (num_rcr == 1) {
3439			int ptype;
3440
3441			ptype = (val >> RCR_ENTRY_PKT_TYPE_SHIFT);
3442			if ((ptype == RCR_PKT_TYPE_TCP ||
3443			     ptype == RCR_PKT_TYPE_UDP) &&
3444			    !(val & (RCR_ENTRY_NOPORT |
3445				     RCR_ENTRY_ERROR)))
3446				skb->ip_summed = CHECKSUM_UNNECESSARY;
3447			else
3448				skb_checksum_none_assert(skb);
3449		} else if (!(val & RCR_ENTRY_MULTI))
3450			append_size = append_size - skb->len;
3451
3452		niu_rx_skb_append(skb, page, off, append_size, rcr_size);
3453		if ((page->index + rp->rbr_block_size) - rcr_size == addr) {
3454			*link = (struct page *) page->mapping;
3455			np->ops->unmap_page(np->device, page->index,
3456					    PAGE_SIZE, DMA_FROM_DEVICE);
3457			page->index = 0;
3458			page->mapping = NULL;
3459			rp->rbr_refill_pending++;
3460		} else
3461			get_page(page);
3462
3463		index = NEXT_RCR(rp, index);
3464		if (!(val & RCR_ENTRY_MULTI))
3465			break;
3466
3467	}
3468	rp->rcr_index = index;
3469
3470	len += sizeof(*rh);
3471	len = min_t(int, len, sizeof(*rh) + VLAN_ETH_HLEN);
3472	__pskb_pull_tail(skb, len);
3473
3474	rh = (struct rx_pkt_hdr1 *) skb->data;
3475	if (np->dev->features & NETIF_F_RXHASH)
3476		skb_set_hash(skb,
3477			     ((u32)rh->hashval2_0 << 24 |
3478			      (u32)rh->hashval2_1 << 16 |
3479			      (u32)rh->hashval1_1 << 8 |
3480			      (u32)rh->hashval1_2 << 0),
3481			     PKT_HASH_TYPE_L3);
3482	skb_pull(skb, sizeof(*rh));
3483
3484	rp->rx_packets++;
3485	rp->rx_bytes += skb->len;
3486
3487	skb->protocol = eth_type_trans(skb, np->dev);
3488	skb_record_rx_queue(skb, rp->rx_channel);
3489	napi_gro_receive(napi, skb);
3490
3491	return num_rcr;
3492}
3493
3494static int niu_rbr_fill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
3495{
3496	int blocks_per_page = rp->rbr_blocks_per_page;
3497	int err, index = rp->rbr_index;
3498
3499	err = 0;
3500	while (index < (rp->rbr_table_size - blocks_per_page)) {
3501		err = niu_rbr_add_page(np, rp, mask, index);
3502		if (unlikely(err))
3503			break;
3504
3505		index += blocks_per_page;
3506	}
3507
3508	rp->rbr_index = index;
3509	return err;
3510}
3511
3512static void niu_rbr_free(struct niu *np, struct rx_ring_info *rp)
3513{
3514	int i;
3515
3516	for (i = 0; i < MAX_RBR_RING_SIZE; i++) {
3517		struct page *page;
3518
3519		page = rp->rxhash[i];
3520		while (page) {
3521			struct page *next = (struct page *) page->mapping;
3522			u64 base = page->index;
3523
3524			np->ops->unmap_page(np->device, base, PAGE_SIZE,
3525					    DMA_FROM_DEVICE);
3526			page->index = 0;
3527			page->mapping = NULL;
3528
3529			__free_page(page);
3530
3531			page = next;
3532		}
3533	}
3534
3535	for (i = 0; i < rp->rbr_table_size; i++)
3536		rp->rbr[i] = cpu_to_le32(0);
3537	rp->rbr_index = 0;
3538}
3539
3540static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx)
3541{
3542	struct tx_buff_info *tb = &rp->tx_buffs[idx];
3543	struct sk_buff *skb = tb->skb;
3544	struct tx_pkt_hdr *tp;
3545	u64 tx_flags;
3546	int i, len;
3547
3548	tp = (struct tx_pkt_hdr *) skb->data;
3549	tx_flags = le64_to_cpup(&tp->flags);
3550
3551	rp->tx_packets++;
3552	rp->tx_bytes += (((tx_flags & TXHDR_LEN) >> TXHDR_LEN_SHIFT) -
3553			 ((tx_flags & TXHDR_PAD) / 2));
3554
3555	len = skb_headlen(skb);
3556	np->ops->unmap_single(np->device, tb->mapping,
3557			      len, DMA_TO_DEVICE);
3558
3559	if (le64_to_cpu(rp->descr[idx]) & TX_DESC_MARK)
3560		rp->mark_pending--;
3561
3562	tb->skb = NULL;
3563	do {
3564		idx = NEXT_TX(rp, idx);
3565		len -= MAX_TX_DESC_LEN;
3566	} while (len > 0);
3567
3568	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3569		tb = &rp->tx_buffs[idx];
3570		BUG_ON(tb->skb != NULL);
3571		np->ops->unmap_page(np->device, tb->mapping,
3572				    skb_frag_size(&skb_shinfo(skb)->frags[i]),
3573				    DMA_TO_DEVICE);
3574		idx = NEXT_TX(rp, idx);
3575	}
3576
3577	dev_kfree_skb(skb);
3578
3579	return idx;
3580}
3581
3582#define NIU_TX_WAKEUP_THRESH(rp)		((rp)->pending / 4)
3583
3584static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
3585{
3586	struct netdev_queue *txq;
3587	u16 pkt_cnt, tmp;
3588	int cons, index;
3589	u64 cs;
3590
3591	index = (rp - np->tx_rings);
3592	txq = netdev_get_tx_queue(np->dev, index);
3593
3594	cs = rp->tx_cs;
3595	if (unlikely(!(cs & (TX_CS_MK | TX_CS_MMK))))
3596		goto out;
3597
3598	tmp = pkt_cnt = (cs & TX_CS_PKT_CNT) >> TX_CS_PKT_CNT_SHIFT;
3599	pkt_cnt = (pkt_cnt - rp->last_pkt_cnt) &
3600		(TX_CS_PKT_CNT >> TX_CS_PKT_CNT_SHIFT);
3601
3602	rp->last_pkt_cnt = tmp;
3603
3604	cons = rp->cons;
3605
3606	netif_printk(np, tx_done, KERN_DEBUG, np->dev,
3607		     "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons);
3608
3609	while (pkt_cnt--)
3610		cons = release_tx_packet(np, rp, cons);
3611
3612	rp->cons = cons;
3613	smp_mb();
3614
3615out:
3616	if (unlikely(netif_tx_queue_stopped(txq) &&
3617		     (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) {
3618		__netif_tx_lock(txq, smp_processor_id());
3619		if (netif_tx_queue_stopped(txq) &&
3620		    (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))
3621			netif_tx_wake_queue(txq);
3622		__netif_tx_unlock(txq);
3623	}
3624}
3625
3626static inline void niu_sync_rx_discard_stats(struct niu *np,
3627					     struct rx_ring_info *rp,
3628					     const int limit)
3629{
3630	/* This elaborate scheme is needed for reading the RX discard
3631	 * counters, as they are only 16-bit and can overflow quickly,
3632	 * and because the overflow indication bit is not usable as
3633	 * the counter value does not wrap, but remains at max value
3634	 * 0xFFFF.
3635	 *
3636	 * In theory and in practice counters can be lost in between
3637	 * reading nr64() and clearing the counter nw64().  For this
3638	 * reason, the number of counter clearings nw64() is
3639	 * limited/reduced though the limit parameter.
3640	 */
3641	int rx_channel = rp->rx_channel;
3642	u32 misc, wred;
3643
3644	/* RXMISC (Receive Miscellaneous Discard Count), covers the
3645	 * following discard events: IPP (Input Port Process),
3646	 * FFLP/TCAM, Full RCR (Receive Completion Ring) RBR (Receive
3647	 * Block Ring) prefetch buffer is empty.
3648	 */
3649	misc = nr64(RXMISC(rx_channel));
3650	if (unlikely((misc & RXMISC_COUNT) > limit)) {
3651		nw64(RXMISC(rx_channel), 0);
3652		rp->rx_errors += misc & RXMISC_COUNT;
3653
3654		if (unlikely(misc & RXMISC_OFLOW))
3655			dev_err(np->device, "rx-%d: Counter overflow RXMISC discard\n",
3656				rx_channel);
3657
3658		netif_printk(np, rx_err, KERN_DEBUG, np->dev,
3659			     "rx-%d: MISC drop=%u over=%u\n",
3660			     rx_channel, misc, misc-limit);
3661	}
3662
3663	/* WRED (Weighted Random Early Discard) by hardware */
3664	wred = nr64(RED_DIS_CNT(rx_channel));
3665	if (unlikely((wred & RED_DIS_CNT_COUNT) > limit)) {
3666		nw64(RED_DIS_CNT(rx_channel), 0);
3667		rp->rx_dropped += wred & RED_DIS_CNT_COUNT;
3668
3669		if (unlikely(wred & RED_DIS_CNT_OFLOW))
3670			dev_err(np->device, "rx-%d: Counter overflow WRED discard\n", rx_channel);
3671
3672		netif_printk(np, rx_err, KERN_DEBUG, np->dev,
3673			     "rx-%d: WRED drop=%u over=%u\n",
3674			     rx_channel, wred, wred-limit);
3675	}
3676}
3677
3678static int niu_rx_work(struct napi_struct *napi, struct niu *np,
3679		       struct rx_ring_info *rp, int budget)
3680{
3681	int qlen, rcr_done = 0, work_done = 0;
3682	struct rxdma_mailbox *mbox = rp->mbox;
3683	u64 stat;
3684
3685#if 1
3686	stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel));
3687	qlen = nr64(RCRSTAT_A(rp->rx_channel)) & RCRSTAT_A_QLEN;
3688#else
3689	stat = le64_to_cpup(&mbox->rx_dma_ctl_stat);
3690	qlen = (le64_to_cpup(&mbox->rcrstat_a) & RCRSTAT_A_QLEN);
3691#endif
3692	mbox->rx_dma_ctl_stat = 0;
3693	mbox->rcrstat_a = 0;
3694
3695	netif_printk(np, rx_status, KERN_DEBUG, np->dev,
3696		     "%s(chan[%d]), stat[%llx] qlen=%d\n",
3697		     __func__, rp->rx_channel, (unsigned long long)stat, qlen);
3698
3699	rcr_done = work_done = 0;
3700	qlen = min(qlen, budget);
3701	while (work_done < qlen) {
3702		rcr_done += niu_process_rx_pkt(napi, np, rp);
3703		work_done++;
3704	}
3705
3706	if (rp->rbr_refill_pending >= rp->rbr_kick_thresh) {
3707		unsigned int i;
3708
3709		for (i = 0; i < rp->rbr_refill_pending; i++)
3710			niu_rbr_refill(np, rp, GFP_ATOMIC);
3711		rp->rbr_refill_pending = 0;
3712	}
3713
3714	stat = (RX_DMA_CTL_STAT_MEX |
3715		((u64)work_done << RX_DMA_CTL_STAT_PKTREAD_SHIFT) |
3716		((u64)rcr_done << RX_DMA_CTL_STAT_PTRREAD_SHIFT));
3717
3718	nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat);
3719
3720	/* Only sync discards stats when qlen indicate potential for drops */
3721	if (qlen > 10)
3722		niu_sync_rx_discard_stats(np, rp, 0x7FFF);
3723
3724	return work_done;
3725}
3726
3727static int niu_poll_core(struct niu *np, struct niu_ldg *lp, int budget)
3728{
3729	u64 v0 = lp->v0;
3730	u32 tx_vec = (v0 >> 32);
3731	u32 rx_vec = (v0 & 0xffffffff);
3732	int i, work_done = 0;
3733
3734	netif_printk(np, intr, KERN_DEBUG, np->dev,
3735		     "%s() v0[%016llx]\n", __func__, (unsigned long long)v0);
3736
3737	for (i = 0; i < np->num_tx_rings; i++) {
3738		struct tx_ring_info *rp = &np->tx_rings[i];
3739		if (tx_vec & (1 << rp->tx_channel))
3740			niu_tx_work(np, rp);
3741		nw64(LD_IM0(LDN_TXDMA(rp->tx_channel)), 0);
3742	}
3743
3744	for (i = 0; i < np->num_rx_rings; i++) {
3745		struct rx_ring_info *rp = &np->rx_rings[i];
3746
3747		if (rx_vec & (1 << rp->rx_channel)) {
3748			int this_work_done;
3749
3750			this_work_done = niu_rx_work(&lp->napi, np, rp,
3751						     budget);
3752
3753			budget -= this_work_done;
3754			work_done += this_work_done;
3755		}
3756		nw64(LD_IM0(LDN_RXDMA(rp->rx_channel)), 0);
3757	}
3758
3759	return work_done;
3760}
3761
3762static int niu_poll(struct napi_struct *napi, int budget)
3763{
3764	struct niu_ldg *lp = container_of(napi, struct niu_ldg, napi);
3765	struct niu *np = lp->np;
3766	int work_done;
3767
3768	work_done = niu_poll_core(np, lp, budget);
3769
3770	if (work_done < budget) {
3771		napi_complete_done(napi, work_done);
3772		niu_ldg_rearm(np, lp, 1);
3773	}
3774	return work_done;
3775}
3776
3777static void niu_log_rxchan_errors(struct niu *np, struct rx_ring_info *rp,
3778				  u64 stat)
3779{
3780	netdev_err(np->dev, "RX channel %u errors ( ", rp->rx_channel);
3781
3782	if (stat & RX_DMA_CTL_STAT_RBR_TMOUT)
3783		pr_cont("RBR_TMOUT ");
3784	if (stat & RX_DMA_CTL_STAT_RSP_CNT_ERR)
3785		pr_cont("RSP_CNT ");
3786	if (stat & RX_DMA_CTL_STAT_BYTE_EN_BUS)
3787		pr_cont("BYTE_EN_BUS ");
3788	if (stat & RX_DMA_CTL_STAT_RSP_DAT_ERR)
3789		pr_cont("RSP_DAT ");
3790	if (stat & RX_DMA_CTL_STAT_RCR_ACK_ERR)
3791		pr_cont("RCR_ACK ");
3792	if (stat & RX_DMA_CTL_STAT_RCR_SHA_PAR)
3793		pr_cont("RCR_SHA_PAR ");
3794	if (stat & RX_DMA_CTL_STAT_RBR_PRE_PAR)
3795		pr_cont("RBR_PRE_PAR ");
3796	if (stat & RX_DMA_CTL_STAT_CONFIG_ERR)
3797		pr_cont("CONFIG ");
3798	if (stat & RX_DMA_CTL_STAT_RCRINCON)
3799		pr_cont("RCRINCON ");
3800	if (stat & RX_DMA_CTL_STAT_RCRFULL)
3801		pr_cont("RCRFULL ");
3802	if (stat & RX_DMA_CTL_STAT_RBRFULL)
3803		pr_cont("RBRFULL ");
3804	if (stat & RX_DMA_CTL_STAT_RBRLOGPAGE)
3805		pr_cont("RBRLOGPAGE ");
3806	if (stat & RX_DMA_CTL_STAT_CFIGLOGPAGE)
3807		pr_cont("CFIGLOGPAGE ");
3808	if (stat & RX_DMA_CTL_STAT_DC_FIFO_ERR)
3809		pr_cont("DC_FIDO ");
3810
3811	pr_cont(")\n");
3812}
3813
3814static int niu_rx_error(struct niu *np, struct rx_ring_info *rp)
3815{
3816	u64 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel));
3817	int err = 0;
3818
3819
3820	if (stat & (RX_DMA_CTL_STAT_CHAN_FATAL |
3821		    RX_DMA_CTL_STAT_PORT_FATAL))
3822		err = -EINVAL;
3823
3824	if (err) {
3825		netdev_err(np->dev, "RX channel %u error, stat[%llx]\n",
3826			   rp->rx_channel,
3827			   (unsigned long long) stat);
3828
3829		niu_log_rxchan_errors(np, rp, stat);
3830	}
3831
3832	nw64(RX_DMA_CTL_STAT(rp->rx_channel),
3833	     stat & RX_DMA_CTL_WRITE_CLEAR_ERRS);
3834
3835	return err;
3836}
3837
3838static void niu_log_txchan_errors(struct niu *np, struct tx_ring_info *rp,
3839				  u64 cs)
3840{
3841	netdev_err(np->dev, "TX channel %u errors ( ", rp->tx_channel);
3842
3843	if (cs & TX_CS_MBOX_ERR)
3844		pr_cont("MBOX ");
3845	if (cs & TX_CS_PKT_SIZE_ERR)
3846		pr_cont("PKT_SIZE ");
3847	if (cs & TX_CS_TX_RING_OFLOW)
3848		pr_cont("TX_RING_OFLOW ");
3849	if (cs & TX_CS_PREF_BUF_PAR_ERR)
3850		pr_cont("PREF_BUF_PAR ");
3851	if (cs & TX_CS_NACK_PREF)
3852		pr_cont("NACK_PREF ");
3853	if (cs & TX_CS_NACK_PKT_RD)
3854		pr_cont("NACK_PKT_RD ");
3855	if (cs & TX_CS_CONF_PART_ERR)
3856		pr_cont("CONF_PART ");
3857	if (cs & TX_CS_PKT_PRT_ERR)
3858		pr_cont("PKT_PTR ");
3859
3860	pr_cont(")\n");
3861}
3862
3863static int niu_tx_error(struct niu *np, struct tx_ring_info *rp)
3864{
3865	u64 cs, logh, logl;
3866
3867	cs = nr64(TX_CS(rp->tx_channel));
3868	logh = nr64(TX_RNG_ERR_LOGH(rp->tx_channel));
3869	logl = nr64(TX_RNG_ERR_LOGL(rp->tx_channel));
3870
3871	netdev_err(np->dev, "TX channel %u error, cs[%llx] logh[%llx] logl[%llx]\n",
3872		   rp->tx_channel,
3873		   (unsigned long long)cs,
3874		   (unsigned long long)logh,
3875		   (unsigned long long)logl);
3876
3877	niu_log_txchan_errors(np, rp, cs);
3878
3879	return -ENODEV;
3880}
3881
3882static int niu_mif_interrupt(struct niu *np)
3883{
3884	u64 mif_status = nr64(MIF_STATUS);
3885	int phy_mdint = 0;
3886
3887	if (np->flags & NIU_FLAGS_XMAC) {
3888		u64 xrxmac_stat = nr64_mac(XRXMAC_STATUS);
3889
3890		if (xrxmac_stat & XRXMAC_STATUS_PHY_MDINT)
3891			phy_mdint = 1;
3892	}
3893
3894	netdev_err(np->dev, "MIF interrupt, stat[%llx] phy_mdint(%d)\n",
3895		   (unsigned long long)mif_status, phy_mdint);
3896
3897	return -ENODEV;
3898}
3899
3900static void niu_xmac_interrupt(struct niu *np)
3901{
3902	struct niu_xmac_stats *mp = &np->mac_stats.xmac;
3903	u64 val;
3904
3905	val = nr64_mac(XTXMAC_STATUS);
3906	if (val & XTXMAC_STATUS_FRAME_CNT_EXP)
3907		mp->tx_frames += TXMAC_FRM_CNT_COUNT;
3908	if (val & XTXMAC_STATUS_BYTE_CNT_EXP)
3909		mp->tx_bytes += TXMAC_BYTE_CNT_COUNT;
3910	if (val & XTXMAC_STATUS_TXFIFO_XFR_ERR)
3911		mp->tx_fifo_errors++;
3912	if (val & XTXMAC_STATUS_TXMAC_OFLOW)
3913		mp->tx_overflow_errors++;
3914	if (val & XTXMAC_STATUS_MAX_PSIZE_ERR)
3915		mp->tx_max_pkt_size_errors++;
3916	if (val & XTXMAC_STATUS_TXMAC_UFLOW)
3917		mp->tx_underflow_errors++;
3918
3919	val = nr64_mac(XRXMAC_STATUS);
3920	if (val & XRXMAC_STATUS_LCL_FLT_STATUS)
3921		mp->rx_local_faults++;
3922	if (val & XRXMAC_STATUS_RFLT_DET)
3923		mp->rx_remote_faults++;
3924	if (val & XRXMAC_STATUS_LFLT_CNT_EXP)
3925		mp->rx_link_faults += LINK_FAULT_CNT_COUNT;
3926	if (val & XRXMAC_STATUS_ALIGNERR_CNT_EXP)
3927		mp->rx_align_errors += RXMAC_ALIGN_ERR_CNT_COUNT;
3928	if (val & XRXMAC_STATUS_RXFRAG_CNT_EXP)
3929		mp->rx_frags += RXMAC_FRAG_CNT_COUNT;
3930	if (val & XRXMAC_STATUS_RXMULTF_CNT_EXP)
3931		mp->rx_mcasts += RXMAC_MC_FRM_CNT_COUNT;
3932	if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
3933		mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
3934	if (val & XRXMAC_STATUS_RXHIST1_CNT_EXP)
3935		mp->rx_hist_cnt1 += RXMAC_HIST_CNT1_COUNT;
3936	if (val & XRXMAC_STATUS_RXHIST2_CNT_EXP)
3937		mp->rx_hist_cnt2 += RXMAC_HIST_CNT2_COUNT;
3938	if (val & XRXMAC_STATUS_RXHIST3_CNT_EXP)
3939		mp->rx_hist_cnt3 += RXMAC_HIST_CNT3_COUNT;
3940	if (val & XRXMAC_STATUS_RXHIST4_CNT_EXP)
3941		mp->rx_hist_cnt4 += RXMAC_HIST_CNT4_COUNT;
3942	if (val & XRXMAC_STATUS_RXHIST5_CNT_EXP)
3943		mp->rx_hist_cnt5 += RXMAC_HIST_CNT5_COUNT;
3944	if (val & XRXMAC_STATUS_RXHIST6_CNT_EXP)
3945		mp->rx_hist_cnt6 += RXMAC_HIST_CNT6_COUNT;
3946	if (val & XRXMAC_STATUS_RXHIST7_CNT_EXP)
3947		mp->rx_hist_cnt7 += RXMAC_HIST_CNT7_COUNT;
3948	if (val & XRXMAC_STATUS_RXOCTET_CNT_EXP)
3949		mp->rx_octets += RXMAC_BT_CNT_COUNT;
3950	if (val & XRXMAC_STATUS_CVIOLERR_CNT_EXP)
3951		mp->rx_code_violations += RXMAC_CD_VIO_CNT_COUNT;
3952	if (val & XRXMAC_STATUS_LENERR_CNT_EXP)
3953		mp->rx_len_errors += RXMAC_MPSZER_CNT_COUNT;
3954	if (val & XRXMAC_STATUS_CRCERR_CNT_EXP)
3955		mp->rx_crc_errors += RXMAC_CRC_ER_CNT_COUNT;
3956	if (val & XRXMAC_STATUS_RXUFLOW)
3957		mp->rx_underflows++;
3958	if (val & XRXMAC_STATUS_RXOFLOW)
3959		mp->rx_overflows++;
3960
3961	val = nr64_mac(XMAC_FC_STAT);
3962	if (val & XMAC_FC_STAT_TX_MAC_NPAUSE)
3963		mp->pause_off_state++;
3964	if (val & XMAC_FC_STAT_TX_MAC_PAUSE)
3965		mp->pause_on_state++;
3966	if (val & XMAC_FC_STAT_RX_MAC_RPAUSE)
3967		mp->pause_received++;
3968}
3969
3970static void niu_bmac_interrupt(struct niu *np)
3971{
3972	struct niu_bmac_stats *mp = &np->mac_stats.bmac;
3973	u64 val;
3974
3975	val = nr64_mac(BTXMAC_STATUS);
3976	if (val & BTXMAC_STATUS_UNDERRUN)
3977		mp->tx_underflow_errors++;
3978	if (val & BTXMAC_STATUS_MAX_PKT_ERR)
3979		mp->tx_max_pkt_size_errors++;
3980	if (val & BTXMAC_STATUS_BYTE_CNT_EXP)
3981		mp->tx_bytes += BTXMAC_BYTE_CNT_COUNT;
3982	if (val & BTXMAC_STATUS_FRAME_CNT_EXP)
3983		mp->tx_frames += BTXMAC_FRM_CNT_COUNT;
3984
3985	val = nr64_mac(BRXMAC_STATUS);
3986	if (val & BRXMAC_STATUS_OVERFLOW)
3987		mp->rx_overflows++;
3988	if (val & BRXMAC_STATUS_FRAME_CNT_EXP)
3989		mp->rx_frames += BRXMAC_FRAME_CNT_COUNT;
3990	if (val & BRXMAC_STATUS_ALIGN_ERR_EXP)
3991		mp->rx_align_errors += BRXMAC_ALIGN_ERR_CNT_COUNT;
3992	if (val & BRXMAC_STATUS_CRC_ERR_EXP)
3993		mp->rx_crc_errors += BRXMAC_ALIGN_ERR_CNT_COUNT;
3994	if (val & BRXMAC_STATUS_LEN_ERR_EXP)
3995		mp->rx_len_errors += BRXMAC_CODE_VIOL_ERR_CNT_COUNT;
3996
3997	val = nr64_mac(BMAC_CTRL_STATUS);
3998	if (val & BMAC_CTRL_STATUS_NOPAUSE)
3999		mp->pause_off_state++;
4000	if (val & BMAC_CTRL_STATUS_PAUSE)
4001		mp->pause_on_state++;
4002	if (val & BMAC_CTRL_STATUS_PAUSE_RECV)
4003		mp->pause_received++;
4004}
4005
4006static int niu_mac_interrupt(struct niu *np)
4007{
4008	if (np->flags & NIU_FLAGS_XMAC)
4009		niu_xmac_interrupt(np);
4010	else
4011		niu_bmac_interrupt(np);
4012
4013	return 0;
4014}
4015
4016static void niu_log_device_error(struct niu *np, u64 stat)
4017{
4018	netdev_err(np->dev, "Core device errors ( ");
4019
4020	if (stat & SYS_ERR_MASK_META2)
4021		pr_cont("META2 ");
4022	if (stat & SYS_ERR_MASK_META1)
4023		pr_cont("META1 ");
4024	if (stat & SYS_ERR_MASK_PEU)
4025		pr_cont("PEU ");
4026	if (stat & SYS_ERR_MASK_TXC)
4027		pr_cont("TXC ");
4028	if (stat & SYS_ERR_MASK_RDMC)
4029		pr_cont("RDMC ");
4030	if (stat & SYS_ERR_MASK_TDMC)
4031		pr_cont("TDMC ");
4032	if (stat & SYS_ERR_MASK_ZCP)
4033		pr_cont("ZCP ");
4034	if (stat & SYS_ERR_MASK_FFLP)
4035		pr_cont("FFLP ");
4036	if (stat & SYS_ERR_MASK_IPP)
4037		pr_cont("IPP ");
4038	if (stat & SYS_ERR_MASK_MAC)
4039		pr_cont("MAC ");
4040	if (stat & SYS_ERR_MASK_SMX)
4041		pr_cont("SMX ");
4042
4043	pr_cont(")\n");
4044}
4045
4046static int niu_device_error(struct niu *np)
4047{
4048	u64 stat = nr64(SYS_ERR_STAT);
4049
4050	netdev_err(np->dev, "Core device error, stat[%llx]\n",
4051		   (unsigned long long)stat);
4052
4053	niu_log_device_error(np, stat);
4054
4055	return -ENODEV;
4056}
4057
4058static int niu_slowpath_interrupt(struct niu *np, struct niu_ldg *lp,
4059			      u64 v0, u64 v1, u64 v2)
4060{
4061
4062	int i, err = 0;
4063
4064	lp->v0 = v0;
4065	lp->v1 = v1;
4066	lp->v2 = v2;
4067
4068	if (v1 & 0x00000000ffffffffULL) {
4069		u32 rx_vec = (v1 & 0xffffffff);
4070
4071		for (i = 0; i < np->num_rx_rings; i++) {
4072			struct rx_ring_info *rp = &np->rx_rings[i];
4073
4074			if (rx_vec & (1 << rp->rx_channel)) {
4075				int r = niu_rx_error(np, rp);
4076				if (r) {
4077					err = r;
4078				} else {
4079					if (!v0)
4080						nw64(RX_DMA_CTL_STAT(rp->rx_channel),
4081						     RX_DMA_CTL_STAT_MEX);
4082				}
4083			}
4084		}
4085	}
4086	if (v1 & 0x7fffffff00000000ULL) {
4087		u32 tx_vec = (v1 >> 32) & 0x7fffffff;
4088
4089		for (i = 0; i < np->num_tx_rings; i++) {
4090			struct tx_ring_info *rp = &np->tx_rings[i];
4091
4092			if (tx_vec & (1 << rp->tx_channel)) {
4093				int r = niu_tx_error(np, rp);
4094				if (r)
4095					err = r;
4096			}
4097		}
4098	}
4099	if ((v0 | v1) & 0x8000000000000000ULL) {
4100		int r = niu_mif_interrupt(np);
4101		if (r)
4102			err = r;
4103	}
4104	if (v2) {
4105		if (v2 & 0x01ef) {
4106			int r = niu_mac_interrupt(np);
4107			if (r)
4108				err = r;
4109		}
4110		if (v2 & 0x0210) {
4111			int r = niu_device_error(np);
4112			if (r)
4113				err = r;
4114		}
4115	}
4116
4117	if (err)
4118		niu_enable_interrupts(np, 0);
4119
4120	return err;
4121}
4122
4123static void niu_rxchan_intr(struct niu *np, struct rx_ring_info *rp,
4124			    int ldn)
4125{
4126	struct rxdma_mailbox *mbox = rp->mbox;
4127	u64 stat_write, stat = le64_to_cpup(&mbox->rx_dma_ctl_stat);
4128
4129	stat_write = (RX_DMA_CTL_STAT_RCRTHRES |
4130		      RX_DMA_CTL_STAT_RCRTO);
4131	nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat_write);
4132
4133	netif_printk(np, intr, KERN_DEBUG, np->dev,
4134		     "%s() stat[%llx]\n", __func__, (unsigned long long)stat);
4135}
4136
4137static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp,
4138			    int ldn)
4139{
4140	rp->tx_cs = nr64(TX_CS(rp->tx_channel));
4141
4142	netif_printk(np, intr, KERN_DEBUG, np->dev,
4143		     "%s() cs[%llx]\n", __func__, (unsigned long long)rp->tx_cs);
4144}
4145
4146static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0)
4147{
4148	struct niu_parent *parent = np->parent;
4149	u32 rx_vec, tx_vec;
4150	int i;
4151
4152	tx_vec = (v0 >> 32);
4153	rx_vec = (v0 & 0xffffffff);
4154
4155	for (i = 0; i < np->num_rx_rings; i++) {
4156		struct rx_ring_info *rp = &np->rx_rings[i];
4157		int ldn = LDN_RXDMA(rp->rx_channel);
4158
4159		if (parent->ldg_map[ldn] != ldg)
4160			continue;
4161
4162		nw64(LD_IM0(ldn), LD_IM0_MASK);
4163		if (rx_vec & (1 << rp->rx_channel))
4164			niu_rxchan_intr(np, rp, ldn);
4165	}
4166
4167	for (i = 0; i < np->num_tx_rings; i++) {
4168		struct tx_ring_info *rp = &np->tx_rings[i];
4169		int ldn = LDN_TXDMA(rp->tx_channel);
4170
4171		if (parent->ldg_map[ldn] != ldg)
4172			continue;
4173
4174		nw64(LD_IM0(ldn), LD_IM0_MASK);
4175		if (tx_vec & (1 << rp->tx_channel))
4176			niu_txchan_intr(np, rp, ldn);
4177	}
4178}
4179
4180static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp,
4181			      u64 v0, u64 v1, u64 v2)
4182{
4183	if (likely(napi_schedule_prep(&lp->napi))) {
4184		lp->v0 = v0;
4185		lp->v1 = v1;
4186		lp->v2 = v2;
4187		__niu_fastpath_interrupt(np, lp->ldg_num, v0);
4188		__napi_schedule(&lp->napi);
4189	}
4190}
4191
4192static irqreturn_t niu_interrupt(int irq, void *dev_id)
4193{
4194	struct niu_ldg *lp = dev_id;
4195	struct niu *np = lp->np;
4196	int ldg = lp->ldg_num;
4197	unsigned long flags;
4198	u64 v0, v1, v2;
4199
4200	if (netif_msg_intr(np))
4201		printk(KERN_DEBUG KBUILD_MODNAME ": " "%s() ldg[%p](%d)",
4202		       __func__, lp, ldg);
4203
4204	spin_lock_irqsave(&np->lock, flags);
4205
4206	v0 = nr64(LDSV0(ldg));
4207	v1 = nr64(LDSV1(ldg));
4208	v2 = nr64(LDSV2(ldg));
4209
4210	if (netif_msg_intr(np))
4211		pr_cont(" v0[%llx] v1[%llx] v2[%llx]\n",
4212		       (unsigned long long) v0,
4213		       (unsigned long long) v1,
4214		       (unsigned long long) v2);
4215
4216	if (unlikely(!v0 && !v1 && !v2)) {
4217		spin_unlock_irqrestore(&np->lock, flags);
4218		return IRQ_NONE;
4219	}
4220
4221	if (unlikely((v0 & ((u64)1 << LDN_MIF)) || v1 || v2)) {
4222		int err = niu_slowpath_interrupt(np, lp, v0, v1, v2);
4223		if (err)
4224			goto out;
4225	}
4226	if (likely(v0 & ~((u64)1 << LDN_MIF)))
4227		niu_schedule_napi(np, lp, v0, v1, v2);
4228	else
4229		niu_ldg_rearm(np, lp, 1);
4230out:
4231	spin_unlock_irqrestore(&np->lock, flags);
4232
4233	return IRQ_HANDLED;
4234}
4235
4236static void niu_free_rx_ring_info(struct niu *np, struct rx_ring_info *rp)
4237{
4238	if (rp->mbox) {
4239		np->ops->free_coherent(np->device,
4240				       sizeof(struct rxdma_mailbox),
4241				       rp->mbox, rp->mbox_dma);
4242		rp->mbox = NULL;
4243	}
4244	if (rp->rcr) {
4245		np->ops->free_coherent(np->device,
4246				       MAX_RCR_RING_SIZE * sizeof(__le64),
4247				       rp->rcr, rp->rcr_dma);
4248		rp->rcr = NULL;
4249		rp->rcr_table_size = 0;
4250		rp->rcr_index = 0;
4251	}
4252	if (rp->rbr) {
4253		niu_rbr_free(np, rp);
4254
4255		np->ops->free_coherent(np->device,
4256				       MAX_RBR_RING_SIZE * sizeof(__le32),
4257				       rp->rbr, rp->rbr_dma);
4258		rp->rbr = NULL;
4259		rp->rbr_table_size = 0;
4260		rp->rbr_index = 0;
4261	}
4262	kfree(rp->rxhash);
4263	rp->rxhash = NULL;
4264}
4265
4266static void niu_free_tx_ring_info(struct niu *np, struct tx_ring_info *rp)
4267{
4268	if (rp->mbox) {
4269		np->ops->free_coherent(np->device,
4270				       sizeof(struct txdma_mailbox),
4271				       rp->mbox, rp->mbox_dma);
4272		rp->mbox = NULL;
4273	}
4274	if (rp->descr) {
4275		int i;
4276
4277		for (i = 0; i < MAX_TX_RING_SIZE; i++) {
4278			if (rp->tx_buffs[i].skb)
4279				(void) release_tx_packet(np, rp, i);
4280		}
4281
4282		np->ops->free_coherent(np->device,
4283				       MAX_TX_RING_SIZE * sizeof(__le64),
4284				       rp->descr, rp->descr_dma);
4285		rp->descr = NULL;
4286		rp->pending = 0;
4287		rp->prod = 0;
4288		rp->cons = 0;
4289		rp->wrap_bit = 0;
4290	}
4291}
4292
4293static void niu_free_channels(struct niu *np)
4294{
4295	int i;
4296
4297	if (np->rx_rings) {
4298		for (i = 0; i < np->num_rx_rings; i++) {
4299			struct rx_ring_info *rp = &np->rx_rings[i];
4300
4301			niu_free_rx_ring_info(np, rp);
4302		}
4303		kfree(np->rx_rings);
4304		np->rx_rings = NULL;
4305		np->num_rx_rings = 0;
4306	}
4307
4308	if (np->tx_rings) {
4309		for (i = 0; i < np->num_tx_rings; i++) {
4310			struct tx_ring_info *rp = &np->tx_rings[i];
4311
4312			niu_free_tx_ring_info(np, rp);
4313		}
4314		kfree(np->tx_rings);
4315		np->tx_rings = NULL;
4316		np->num_tx_rings = 0;
4317	}
4318}
4319
4320static int niu_alloc_rx_ring_info(struct niu *np,
4321				  struct rx_ring_info *rp)
4322{
4323	BUILD_BUG_ON(sizeof(struct rxdma_mailbox) != 64);
4324
4325	rp->rxhash = kcalloc(MAX_RBR_RING_SIZE, sizeof(struct page *),
4326			     GFP_KERNEL);
4327	if (!rp->rxhash)
4328		return -ENOMEM;
4329
4330	rp->mbox = np->ops->alloc_coherent(np->device,
4331					   sizeof(struct rxdma_mailbox),
4332					   &rp->mbox_dma, GFP_KERNEL);
4333	if (!rp->mbox)
4334		return -ENOMEM;
4335	if ((unsigned long)rp->mbox & (64UL - 1)) {
4336		netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA mailbox %p\n",
4337			   rp->mbox);
4338		return -EINVAL;
4339	}
4340
4341	rp->rcr = np->ops->alloc_coherent(np->device,
4342					  MAX_RCR_RING_SIZE * sizeof(__le64),
4343					  &rp->rcr_dma, GFP_KERNEL);
4344	if (!rp->rcr)
4345		return -ENOMEM;
4346	if ((unsigned long)rp->rcr & (64UL - 1)) {
4347		netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RCR table %p\n",
4348			   rp->rcr);
4349		return -EINVAL;
4350	}
4351	rp->rcr_table_size = MAX_RCR_RING_SIZE;
4352	rp->rcr_index = 0;
4353
4354	rp->rbr = np->ops->alloc_coherent(np->device,
4355					  MAX_RBR_RING_SIZE * sizeof(__le32),
4356					  &rp->rbr_dma, GFP_KERNEL);
4357	if (!rp->rbr)
4358		return -ENOMEM;
4359	if ((unsigned long)rp->rbr & (64UL - 1)) {
4360		netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RBR table %p\n",
4361			   rp->rbr);
4362		return -EINVAL;
4363	}
4364	rp->rbr_table_size = MAX_RBR_RING_SIZE;
4365	rp->rbr_index = 0;
4366	rp->rbr_pending = 0;
4367
4368	return 0;
4369}
4370
4371static void niu_set_max_burst(struct niu *np, struct tx_ring_info *rp)
4372{
4373	int mtu = np->dev->mtu;
4374
4375	/* These values are recommended by the HW designers for fair
4376	 * utilization of DRR amongst the rings.
4377	 */
4378	rp->max_burst = mtu + 32;
4379	if (rp->max_burst > 4096)
4380		rp->max_burst = 4096;
4381}
4382
4383static int niu_alloc_tx_ring_info(struct niu *np,
4384				  struct tx_ring_info *rp)
4385{
4386	BUILD_BUG_ON(sizeof(struct txdma_mailbox) != 64);
4387
4388	rp->mbox = np->ops->alloc_coherent(np->device,
4389					   sizeof(struct txdma_mailbox),
4390					   &rp->mbox_dma, GFP_KERNEL);
4391	if (!rp->mbox)
4392		return -ENOMEM;
4393	if ((unsigned long)rp->mbox & (64UL - 1)) {
4394		netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA mailbox %p\n",
4395			   rp->mbox);
4396		return -EINVAL;
4397	}
4398
4399	rp->descr = np->ops->alloc_coherent(np->device,
4400					    MAX_TX_RING_SIZE * sizeof(__le64),
4401					    &rp->descr_dma, GFP_KERNEL);
4402	if (!rp->descr)
4403		return -ENOMEM;
4404	if ((unsigned long)rp->descr & (64UL - 1)) {
4405		netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA descr table %p\n",
4406			   rp->descr);
4407		return -EINVAL;
4408	}
4409
4410	rp->pending = MAX_TX_RING_SIZE;
4411	rp->prod = 0;
4412	rp->cons = 0;
4413	rp->wrap_bit = 0;
4414
4415	/* XXX make these configurable... XXX */
4416	rp->mark_freq = rp->pending / 4;
4417
4418	niu_set_max_burst(np, rp);
4419
4420	return 0;
4421}
4422
4423static void niu_size_rbr(struct niu *np, struct rx_ring_info *rp)
4424{
4425	u16 bss;
4426
4427	bss = min(PAGE_SHIFT, 15);
4428
4429	rp->rbr_block_size = 1 << bss;
4430	rp->rbr_blocks_per_page = 1 << (PAGE_SHIFT-bss);
4431
4432	rp->rbr_sizes[0] = 256;
4433	rp->rbr_sizes[1] = 1024;
4434	if (np->dev->mtu > ETH_DATA_LEN) {
4435		switch (PAGE_SIZE) {
4436		case 4 * 1024:
4437			rp->rbr_sizes[2] = 4096;
4438			break;
4439
4440		default:
4441			rp->rbr_sizes[2] = 8192;
4442			break;
4443		}
4444	} else {
4445		rp->rbr_sizes[2] = 2048;
4446	}
4447	rp->rbr_sizes[3] = rp->rbr_block_size;
4448}
4449
4450static int niu_alloc_channels(struct niu *np)
4451{
4452	struct niu_parent *parent = np->parent;
4453	int first_rx_channel, first_tx_channel;
4454	int num_rx_rings, num_tx_rings;
4455	struct rx_ring_info *rx_rings;
4456	struct tx_ring_info *tx_rings;
4457	int i, port, err;
4458
4459	port = np->port;
4460	first_rx_channel = first_tx_channel = 0;
4461	for (i = 0; i < port; i++) {
4462		first_rx_channel += parent->rxchan_per_port[i];
4463		first_tx_channel += parent->txchan_per_port[i];
4464	}
4465
4466	num_rx_rings = parent->rxchan_per_port[port];
4467	num_tx_rings = parent->txchan_per_port[port];
4468
4469	rx_rings = kcalloc(num_rx_rings, sizeof(struct rx_ring_info),
4470			   GFP_KERNEL);
4471	err = -ENOMEM;
4472	if (!rx_rings)
4473		goto out_err;
4474
4475	np->num_rx_rings = num_rx_rings;
4476	smp_wmb();
4477	np->rx_rings = rx_rings;
4478
4479	netif_set_real_num_rx_queues(np->dev, num_rx_rings);
4480
4481	for (i = 0; i < np->num_rx_rings; i++) {
4482		struct rx_ring_info *rp = &np->rx_rings[i];
4483
4484		rp->np = np;
4485		rp->rx_channel = first_rx_channel + i;
4486
4487		err = niu_alloc_rx_ring_info(np, rp);
4488		if (err)
4489			goto out_err;
4490
4491		niu_size_rbr(np, rp);
4492
4493		/* XXX better defaults, configurable, etc... XXX */
4494		rp->nonsyn_window = 64;
4495		rp->nonsyn_threshold = rp->rcr_table_size - 64;
4496		rp->syn_window = 64;
4497		rp->syn_threshold = rp->rcr_table_size - 64;
4498		rp->rcr_pkt_threshold = 16;
4499		rp->rcr_timeout = 8;
4500		rp->rbr_kick_thresh = RBR_REFILL_MIN;
4501		if (rp->rbr_kick_thresh < rp->rbr_blocks_per_page)
4502			rp->rbr_kick_thresh = rp->rbr_blocks_per_page;
4503
4504		err = niu_rbr_fill(np, rp, GFP_KERNEL);
4505		if (err)
4506			goto out_err;
4507	}
4508
4509	tx_rings = kcalloc(num_tx_rings, sizeof(struct tx_ring_info),
4510			   GFP_KERNEL);
4511	err = -ENOMEM;
4512	if (!tx_rings)
4513		goto out_err;
4514
4515	np->num_tx_rings = num_tx_rings;
4516	smp_wmb();
4517	np->tx_rings = tx_rings;
4518
4519	netif_set_real_num_tx_queues(np->dev, num_tx_rings);
4520
4521	for (i = 0; i < np->num_tx_rings; i++) {
4522		struct tx_ring_info *rp = &np->tx_rings[i];
4523
4524		rp->np = np;
4525		rp->tx_channel = first_tx_channel + i;
4526
4527		err = niu_alloc_tx_ring_info(np, rp);
4528		if (err)
4529			goto out_err;
4530	}
4531
4532	return 0;
4533
4534out_err:
4535	niu_free_channels(np);
4536	return err;
4537}
4538
4539static int niu_tx_cs_sng_poll(struct niu *np, int channel)
4540{
4541	int limit = 1000;
4542
4543	while (--limit > 0) {
4544		u64 val = nr64(TX_CS(channel));
4545		if (val & TX_CS_SNG_STATE)
4546			return 0;
4547	}
4548	return -ENODEV;
4549}
4550
4551static int niu_tx_channel_stop(struct niu *np, int channel)
4552{
4553	u64 val = nr64(TX_CS(channel));
4554
4555	val |= TX_CS_STOP_N_GO;
4556	nw64(TX_CS(channel), val);
4557
4558	return niu_tx_cs_sng_poll(np, channel);
4559}
4560
4561static int niu_tx_cs_reset_poll(struct niu *np, int channel)
4562{
4563	int limit = 1000;
4564
4565	while (--limit > 0) {
4566		u64 val = nr64(TX_CS(channel));
4567		if (!(val & TX_CS_RST))
4568			return 0;
4569	}
4570	return -ENODEV;
4571}
4572
4573static int niu_tx_channel_reset(struct niu *np, int channel)
4574{
4575	u64 val = nr64(TX_CS(channel));
4576	int err;
4577
4578	val |= TX_CS_RST;
4579	nw64(TX_CS(channel), val);
4580
4581	err = niu_tx_cs_reset_poll(np, channel);
4582	if (!err)
4583		nw64(TX_RING_KICK(channel), 0);
4584
4585	return err;
4586}
4587
4588static int niu_tx_channel_lpage_init(struct niu *np, int channel)
4589{
4590	u64 val;
4591
4592	nw64(TX_LOG_MASK1(channel), 0);
4593	nw64(TX_LOG_VAL1(channel), 0);
4594	nw64(TX_LOG_MASK2(channel), 0);
4595	nw64(TX_LOG_VAL2(channel), 0);
4596	nw64(TX_LOG_PAGE_RELO1(channel), 0);
4597	nw64(TX_LOG_PAGE_RELO2(channel), 0);
4598	nw64(TX_LOG_PAGE_HDL(channel), 0);
4599
4600	val  = (u64)np->port << TX_LOG_PAGE_VLD_FUNC_SHIFT;
4601	val |= (TX_LOG_PAGE_VLD_PAGE0 | TX_LOG_PAGE_VLD_PAGE1);
4602	nw64(TX_LOG_PAGE_VLD(channel), val);
4603
4604	/* XXX TXDMA 32bit mode? XXX */
4605
4606	return 0;
4607}
4608
4609static void niu_txc_enable_port(struct niu *np, int on)
4610{
4611	unsigned long flags;
4612	u64 val, mask;
4613
4614	niu_lock_parent(np, flags);
4615	val = nr64(TXC_CONTROL);
4616	mask = (u64)1 << np->port;
4617	if (on) {
4618		val |= TXC_CONTROL_ENABLE | mask;
4619	} else {
4620		val &= ~mask;
4621		if ((val & ~TXC_CONTROL_ENABLE) == 0)
4622			val &= ~TXC_CONTROL_ENABLE;
4623	}
4624	nw64(TXC_CONTROL, val);
4625	niu_unlock_parent(np, flags);
4626}
4627
4628static void niu_txc_set_imask(struct niu *np, u64 imask)
4629{
4630	unsigned long flags;
4631	u64 val;
4632
4633	niu_lock_parent(np, flags);
4634	val = nr64(TXC_INT_MASK);
4635	val &= ~TXC_INT_MASK_VAL(np->port);
4636	val |= (imask << TXC_INT_MASK_VAL_SHIFT(np->port));
4637	niu_unlock_parent(np, flags);
4638}
4639
4640static void niu_txc_port_dma_enable(struct niu *np, int on)
4641{
4642	u64 val = 0;
4643
4644	if (on) {
4645		int i;
4646
4647		for (i = 0; i < np->num_tx_rings; i++)
4648			val |= (1 << np->tx_rings[i].tx_channel);
4649	}
4650	nw64(TXC_PORT_DMA(np->port), val);
4651}
4652
4653static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
4654{
4655	int err, channel = rp->tx_channel;
4656	u64 val, ring_len;
4657
4658	err = niu_tx_channel_stop(np, channel);
4659	if (err)
4660		return err;
4661
4662	err = niu_tx_channel_reset(np, channel);
4663	if (err)
4664		return err;
4665
4666	err = niu_tx_channel_lpage_init(np, channel);
4667	if (err)
4668		return err;
4669
4670	nw64(TXC_DMA_MAX(channel), rp->max_burst);
4671	nw64(TX_ENT_MSK(channel), 0);
4672
4673	if (rp->descr_dma & ~(TX_RNG_CFIG_STADDR_BASE |
4674			      TX_RNG_CFIG_STADDR)) {
4675		netdev_err(np->dev, "TX ring channel %d DMA addr (%llx) is not aligned\n",
4676			   channel, (unsigned long long)rp->descr_dma);
4677		return -EINVAL;
4678	}
4679
4680	/* The length field in TX_RNG_CFIG is measured in 64-byte
4681	 * blocks.  rp->pending is the number of TX descriptors in
4682	 * our ring, 8 bytes each, thus we divide by 8 bytes more
4683	 * to get the proper value the chip wants.
4684	 */
4685	ring_len = (rp->pending / 8);
4686
4687	val = ((ring_len << TX_RNG_CFIG_LEN_SHIFT) |
4688	       rp->descr_dma);
4689	nw64(TX_RNG_CFIG(channel), val);
4690
4691	if (((rp->mbox_dma >> 32) & ~TXDMA_MBH_MBADDR) ||
4692	    ((u32)rp->mbox_dma & ~TXDMA_MBL_MBADDR)) {
4693		netdev_err(np->dev, "TX ring channel %d MBOX addr (%llx) has invalid bits\n",
4694			    channel, (unsigned long long)rp->mbox_dma);
4695		return -EINVAL;
4696	}
4697	nw64(TXDMA_MBH(channel), rp->mbox_dma >> 32);
4698	nw64(TXDMA_MBL(channel), rp->mbox_dma & TXDMA_MBL_MBADDR);
4699
4700	nw64(TX_CS(channel), 0);
4701
4702	rp->last_pkt_cnt = 0;
4703
4704	return 0;
4705}
4706
4707static void niu_init_rdc_groups(struct niu *np)
4708{
4709	struct niu_rdc_tables *tp = &np->parent->rdc_group_cfg[np->port];
4710	int i, first_table_num = tp->first_table_num;
4711
4712	for (i = 0; i < tp->num_tables; i++) {
4713		struct rdc_table *tbl = &tp->tables[i];
4714		int this_table = first_table_num + i;
4715		int slot;
4716
4717		for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++)
4718			nw64(RDC_TBL(this_table, slot),
4719			     tbl->rxdma_channel[slot]);
4720	}
4721
4722	nw64(DEF_RDC(np->port), np->parent->rdc_default[np->port]);
4723}
4724
4725static void niu_init_drr_weight(struct niu *np)
4726{
4727	int type = phy_decode(np->parent->port_phy, np->port);
4728	u64 val;
4729
4730	switch (type) {
4731	case PORT_TYPE_10G:
4732		val = PT_DRR_WEIGHT_DEFAULT_10G;
4733		break;
4734
4735	case PORT_TYPE_1G:
4736	default:
4737		val = PT_DRR_WEIGHT_DEFAULT_1G;
4738		break;
4739	}
4740	nw64(PT_DRR_WT(np->port), val);
4741}
4742
4743static int niu_init_hostinfo(struct niu *np)
4744{
4745	struct niu_parent *parent = np->parent;
4746	struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
4747	int i, err, num_alt = niu_num_alt_addr(np);
4748	int first_rdc_table = tp->first_table_num;
4749
4750	err = niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
4751	if (err)
4752		return err;
4753
4754	err = niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
4755	if (err)
4756		return err;
4757
4758	for (i = 0; i < num_alt; i++) {
4759		err = niu_set_alt_mac_rdc_table(np, i, first_rdc_table, 1);
4760		if (err)
4761			return err;
4762	}
4763
4764	return 0;
4765}
4766
4767static int niu_rx_channel_reset(struct niu *np, int channel)
4768{
4769	return niu_set_and_wait_clear(np, RXDMA_CFIG1(channel),
4770				      RXDMA_CFIG1_RST, 1000, 10,
4771				      "RXDMA_CFIG1");
4772}
4773
4774static int niu_rx_channel_lpage_init(struct niu *np, int channel)
4775{
4776	u64 val;
4777
4778	nw64(RX_LOG_MASK1(channel), 0);
4779	nw64(RX_LOG_VAL1(channel), 0);
4780	nw64(RX_LOG_MASK2(channel), 0);
4781	nw64(RX_LOG_VAL2(channel), 0);
4782	nw64(RX_LOG_PAGE_RELO1(channel), 0);
4783	nw64(RX_LOG_PAGE_RELO2(channel), 0);
4784	nw64(RX_LOG_PAGE_HDL(channel), 0);
4785
4786	val  = (u64)np->port << RX_LOG_PAGE_VLD_FUNC_SHIFT;
4787	val |= (RX_LOG_PAGE_VLD_PAGE0 | RX_LOG_PAGE_VLD_PAGE1);
4788	nw64(RX_LOG_PAGE_VLD(channel), val);
4789
4790	return 0;
4791}
4792
4793static void niu_rx_channel_wred_init(struct niu *np, struct rx_ring_info *rp)
4794{
4795	u64 val;
4796
4797	val = (((u64)rp->nonsyn_window << RDC_RED_PARA_WIN_SHIFT) |
4798	       ((u64)rp->nonsyn_threshold << RDC_RED_PARA_THRE_SHIFT) |
4799	       ((u64)rp->syn_window << RDC_RED_PARA_WIN_SYN_SHIFT) |
4800	       ((u64)rp->syn_threshold << RDC_RED_PARA_THRE_SYN_SHIFT));
4801	nw64(RDC_RED_PARA(rp->rx_channel), val);
4802}
4803
4804static int niu_compute_rbr_cfig_b(struct rx_ring_info *rp, u64 *ret)
4805{
4806	u64 val = 0;
4807
4808	*ret = 0;
4809	switch (rp->rbr_block_size) {
4810	case 4 * 1024:
4811		val |= (RBR_BLKSIZE_4K << RBR_CFIG_B_BLKSIZE_SHIFT);
4812		break;
4813	case 8 * 1024:
4814		val |= (RBR_BLKSIZE_8K << RBR_CFIG_B_BLKSIZE_SHIFT);
4815		break;
4816	case 16 * 1024:
4817		val |= (RBR_BLKSIZE_16K << RBR_CFIG_B_BLKSIZE_SHIFT);
4818		break;
4819	case 32 * 1024:
4820		val |= (RBR_BLKSIZE_32K << RBR_CFIG_B_BLKSIZE_SHIFT);
4821		break;
4822	default:
4823		return -EINVAL;
4824	}
4825	val |= RBR_CFIG_B_VLD2;
4826	switch (rp->rbr_sizes[2]) {
4827	case 2 * 1024:
4828		val |= (RBR_BUFSZ2_2K << RBR_CFIG_B_BUFSZ2_SHIFT);
4829		break;
4830	case 4 * 1024:
4831		val |= (RBR_BUFSZ2_4K << RBR_CFIG_B_BUFSZ2_SHIFT);
4832		break;
4833	case 8 * 1024:
4834		val |= (RBR_BUFSZ2_8K << RBR_CFIG_B_BUFSZ2_SHIFT);
4835		break;
4836	case 16 * 1024:
4837		val |= (RBR_BUFSZ2_16K << RBR_CFIG_B_BUFSZ2_SHIFT);
4838		break;
4839
4840	default:
4841		return -EINVAL;
4842	}
4843	val |= RBR_CFIG_B_VLD1;
4844	switch (rp->rbr_sizes[1]) {
4845	case 1 * 1024:
4846		val |= (RBR_BUFSZ1_1K << RBR_CFIG_B_BUFSZ1_SHIFT);
4847		break;
4848	case 2 * 1024:
4849		val |= (RBR_BUFSZ1_2K << RBR_CFIG_B_BUFSZ1_SHIFT);
4850		break;
4851	case 4 * 1024:
4852		val |= (RBR_BUFSZ1_4K << RBR_CFIG_B_BUFSZ1_SHIFT);
4853		break;
4854	case 8 * 1024:
4855		val |= (RBR_BUFSZ1_8K << RBR_CFIG_B_BUFSZ1_SHIFT);
4856		break;
4857
4858	default:
4859		return -EINVAL;
4860	}
4861	val |= RBR_CFIG_B_VLD0;
4862	switch (rp->rbr_sizes[0]) {
4863	case 256:
4864		val |= (RBR_BUFSZ0_256 << RBR_CFIG_B_BUFSZ0_SHIFT);
4865		break;
4866	case 512:
4867		val |= (RBR_BUFSZ0_512 << RBR_CFIG_B_BUFSZ0_SHIFT);
4868		break;
4869	case 1 * 1024:
4870		val |= (RBR_BUFSZ0_1K << RBR_CFIG_B_BUFSZ0_SHIFT);
4871		break;
4872	case 2 * 1024:
4873		val |= (RBR_BUFSZ0_2K << RBR_CFIG_B_BUFSZ0_SHIFT);
4874		break;
4875
4876	default:
4877		return -EINVAL;
4878	}
4879
4880	*ret = val;
4881	return 0;
4882}
4883
4884static int niu_enable_rx_channel(struct niu *np, int channel, int on)
4885{
4886	u64 val = nr64(RXDMA_CFIG1(channel));
4887	int limit;
4888
4889	if (on)
4890		val |= RXDMA_CFIG1_EN;
4891	else
4892		val &= ~RXDMA_CFIG1_EN;
4893	nw64(RXDMA_CFIG1(channel), val);
4894
4895	limit = 1000;
4896	while (--limit > 0) {
4897		if (nr64(RXDMA_CFIG1(channel)) & RXDMA_CFIG1_QST)
4898			break;
4899		udelay(10);
4900	}
4901	if (limit <= 0)
4902		return -ENODEV;
4903	return 0;
4904}
4905
4906static int niu_init_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
4907{
4908	int err, channel = rp->rx_channel;
4909	u64 val;
4910
4911	err = niu_rx_channel_reset(np, channel);
4912	if (err)
4913		return err;
4914
4915	err = niu_rx_channel_lpage_init(np, channel);
4916	if (err)
4917		return err;
4918
4919	niu_rx_channel_wred_init(np, rp);
4920
4921	nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_RBR_EMPTY);
4922	nw64(RX_DMA_CTL_STAT(channel),
4923	     (RX_DMA_CTL_STAT_MEX |
4924	      RX_DMA_CTL_STAT_RCRTHRES |
4925	      RX_DMA_CTL_STAT_RCRTO |
4926	      RX_DMA_CTL_STAT_RBR_EMPTY));
4927	nw64(RXDMA_CFIG1(channel), rp->mbox_dma >> 32);
4928	nw64(RXDMA_CFIG2(channel),
4929	     ((rp->mbox_dma & RXDMA_CFIG2_MBADDR_L) |
4930	      RXDMA_CFIG2_FULL_HDR));
4931	nw64(RBR_CFIG_A(channel),
4932	     ((u64)rp->rbr_table_size << RBR_CFIG_A_LEN_SHIFT) |
4933	     (rp->rbr_dma & (RBR_CFIG_A_STADDR_BASE | RBR_CFIG_A_STADDR)));
4934	err = niu_compute_rbr_cfig_b(rp, &val);
4935	if (err)
4936		return err;
4937	nw64(RBR_CFIG_B(channel), val);
4938	nw64(RCRCFIG_A(channel),
4939	     ((u64)rp->rcr_table_size << RCRCFIG_A_LEN_SHIFT) |
4940	     (rp->rcr_dma & (RCRCFIG_A_STADDR_BASE | RCRCFIG_A_STADDR)));
4941	nw64(RCRCFIG_B(channel),
4942	     ((u64)rp->rcr_pkt_threshold << RCRCFIG_B_PTHRES_SHIFT) |
4943	     RCRCFIG_B_ENTOUT |
4944	     ((u64)rp->rcr_timeout << RCRCFIG_B_TIMEOUT_SHIFT));
4945
4946	err = niu_enable_rx_channel(np, channel, 1);
4947	if (err)
4948		return err;
4949
4950	nw64(RBR_KICK(channel), rp->rbr_index);
4951
4952	val = nr64(RX_DMA_CTL_STAT(channel));
4953	val |= RX_DMA_CTL_STAT_RBR_EMPTY;
4954	nw64(RX_DMA_CTL_STAT(channel), val);
4955
4956	return 0;
4957}
4958
4959static int niu_init_rx_channels(struct niu *np)
4960{
4961	unsigned long flags;
4962	u64 seed = jiffies_64;
4963	int err, i;
4964
4965	niu_lock_parent(np, flags);
4966	nw64(RX_DMA_CK_DIV, np->parent->rxdma_clock_divider);
4967	nw64(RED_RAN_INIT, RED_RAN_INIT_OPMODE | (seed & RED_RAN_INIT_VAL));
4968	niu_unlock_parent(np, flags);
4969
4970	/* XXX RXDMA 32bit mode? XXX */
4971
4972	niu_init_rdc_groups(np);
4973	niu_init_drr_weight(np);
4974
4975	err = niu_init_hostinfo(np);
4976	if (err)
4977		return err;
4978
4979	for (i = 0; i < np->num_rx_rings; i++) {
4980		struct rx_ring_info *rp = &np->rx_rings[i];
4981
4982		err = niu_init_one_rx_channel(np, rp);
4983		if (err)
4984			return err;
4985	}
4986
4987	return 0;
4988}
4989
4990static int niu_set_ip_frag_rule(struct niu *np)
4991{
4992	struct niu_parent *parent = np->parent;
4993	struct niu_classifier *cp = &np->clas;
4994	struct niu_tcam_entry *tp;
4995	int index, err;
4996
4997	index = cp->tcam_top;
4998	tp = &parent->tcam[index];
4999
5000	/* Note that the noport bit is the same in both ipv4 and
5001	 * ipv6 format TCAM entries.
5002	 */
5003	memset(tp, 0, sizeof(*tp));
5004	tp->key[1] = TCAM_V4KEY1_NOPORT;
5005	tp->key_mask[1] = TCAM_V4KEY1_NOPORT;
5006	tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET |
5007			  ((u64)0 << TCAM_ASSOCDATA_OFFSET_SHIFT));
5008	err = tcam_write(np, index, tp->key, tp->key_mask);
5009	if (err)
5010		return err;
5011	err = tcam_assoc_write(np, index, tp->assoc_data);
5012	if (err)
5013		return err;
5014	tp->valid = 1;
5015	cp->tcam_valid_entries++;
5016
5017	return 0;
5018}
5019
5020static int niu_init_classifier_hw(struct niu *np)
5021{
5022	struct niu_parent *parent = np->parent;
5023	struct niu_classifier *cp = &np->clas;
5024	int i, err;
5025
5026	nw64(H1POLY, cp->h1_init);
5027	nw64(H2POLY, cp->h2_init);
5028
5029	err = niu_init_hostinfo(np);
5030	if (err)
5031		return err;
5032
5033	for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++) {
5034		struct niu_vlan_rdc *vp = &cp->vlan_mappings[i];
5035
5036		vlan_tbl_write(np, i, np->port,
5037			       vp->vlan_pref, vp->rdc_num);
5038	}
5039
5040	for (i = 0; i < cp->num_alt_mac_mappings; i++) {
5041		struct niu_altmac_rdc *ap = &cp->alt_mac_mappings[i];
5042
5043		err = niu_set_alt_mac_rdc_table(np, ap->alt_mac_num,
5044						ap->rdc_num, ap->mac_pref);
5045		if (err)
5046			return err;
5047	}
5048
5049	for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) {
5050		int index = i - CLASS_CODE_USER_PROG1;
5051
5052		err = niu_set_tcam_key(np, i, parent->tcam_key[index]);
5053		if (err)
5054			return err;
5055		err = niu_set_flow_key(np, i, parent->flow_key[index]);
5056		if (err)
5057			return err;
5058	}
5059
5060	err = niu_set_ip_frag_rule(np);
5061	if (err)
5062		return err;
5063
5064	tcam_enable(np, 1);
5065
5066	return 0;
5067}
5068
5069static int niu_zcp_write(struct niu *np, int index, u64 *data)
5070{
5071	nw64(ZCP_RAM_DATA0, data[0]);
5072	nw64(ZCP_RAM_DATA1, data[1]);
5073	nw64(ZCP_RAM_DATA2, data[2]);
5074	nw64(ZCP_RAM_DATA3, data[3]);
5075	nw64(ZCP_RAM_DATA4, data[4]);
5076	nw64(ZCP_RAM_BE, ZCP_RAM_BE_VAL);
5077	nw64(ZCP_RAM_ACC,
5078	     (ZCP_RAM_ACC_WRITE |
5079	      (0 << ZCP_RAM_ACC_ZFCID_SHIFT) |
5080	      (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT)));
5081
5082	return niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
5083				   1000, 100);
5084}
5085
5086static int niu_zcp_read(struct niu *np, int index, u64 *data)
5087{
5088	int err;
5089
5090	err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
5091				  1000, 100);
5092	if (err) {
5093		netdev_err(np->dev, "ZCP read busy won't clear, ZCP_RAM_ACC[%llx]\n",
5094			   (unsigned long long)nr64(ZCP_RAM_ACC));
5095		return err;
5096	}
5097
5098	nw64(ZCP_RAM_ACC,
5099	     (ZCP_RAM_ACC_READ |
5100	      (0 << ZCP_RAM_ACC_ZFCID_SHIFT) |
5101	      (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT)));
5102
5103	err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
5104				  1000, 100);
5105	if (err) {
5106		netdev_err(np->dev, "ZCP read busy2 won't clear, ZCP_RAM_ACC[%llx]\n",
5107			   (unsigned long long)nr64(ZCP_RAM_ACC));
5108		return err;
5109	}
5110
5111	data[0] = nr64(ZCP_RAM_DATA0);
5112	data[1] = nr64(ZCP_RAM_DATA1);
5113	data[2] = nr64(ZCP_RAM_DATA2);
5114	data[3] = nr64(ZCP_RAM_DATA3);
5115	data[4] = nr64(ZCP_RAM_DATA4);
5116
5117	return 0;
5118}
5119
5120static void niu_zcp_cfifo_reset(struct niu *np)
5121{
5122	u64 val = nr64(RESET_CFIFO);
5123
5124	val |= RESET_CFIFO_RST(np->port);
5125	nw64(RESET_CFIFO, val);
5126	udelay(10);
5127
5128	val &= ~RESET_CFIFO_RST(np->port);
5129	nw64(RESET_CFIFO, val);
5130}
5131
5132static int niu_init_zcp(struct niu *np)
5133{
5134	u64 data[5], rbuf[5];
5135	int i, max, err;
5136
5137	if (np->parent->plat_type != PLAT_TYPE_NIU) {
5138		if (np->port == 0 || np->port == 1)
5139			max = ATLAS_P0_P1_CFIFO_ENTRIES;
5140		else
5141			max = ATLAS_P2_P3_CFIFO_ENTRIES;
5142	} else
5143		max = NIU_CFIFO_ENTRIES;
5144
5145	data[0] = 0;
5146	data[1] = 0;
5147	data[2] = 0;
5148	data[3] = 0;
5149	data[4] = 0;
5150
5151	for (i = 0; i < max; i++) {
5152		err = niu_zcp_write(np, i, data);
5153		if (err)
5154			return err;
5155		err = niu_zcp_read(np, i, rbuf);
5156		if (err)
5157			return err;
5158	}
5159
5160	niu_zcp_cfifo_reset(np);
5161	nw64(CFIFO_ECC(np->port), 0);
5162	nw64(ZCP_INT_STAT, ZCP_INT_STAT_ALL);
5163	(void) nr64(ZCP_INT_STAT);
5164	nw64(ZCP_INT_MASK, ZCP_INT_MASK_ALL);
5165
5166	return 0;
5167}
5168
5169static void niu_ipp_write(struct niu *np, int index, u64 *data)
5170{
5171	u64 val = nr64_ipp(IPP_CFIG);
5172
5173	nw64_ipp(IPP_CFIG, val | IPP_CFIG_DFIFO_PIO_W);
5174	nw64_ipp(IPP_DFIFO_WR_PTR, index);
5175	nw64_ipp(IPP_DFIFO_WR0, data[0]);
5176	nw64_ipp(IPP_DFIFO_WR1, data[1]);
5177	nw64_ipp(IPP_DFIFO_WR2, data[2]);
5178	nw64_ipp(IPP_DFIFO_WR3, data[3]);
5179	nw64_ipp(IPP_DFIFO_WR4, data[4]);
5180	nw64_ipp(IPP_CFIG, val & ~IPP_CFIG_DFIFO_PIO_W);
5181}
5182
5183static void niu_ipp_read(struct niu *np, int index, u64 *data)
5184{
5185	nw64_ipp(IPP_DFIFO_RD_PTR, index);
5186	data[0] = nr64_ipp(IPP_DFIFO_RD0);
5187	data[1] = nr64_ipp(IPP_DFIFO_RD1);
5188	data[2] = nr64_ipp(IPP_DFIFO_RD2);
5189	data[3] = nr64_ipp(IPP_DFIFO_RD3);
5190	data[4] = nr64_ipp(IPP_DFIFO_RD4);
5191}
5192
5193static int niu_ipp_reset(struct niu *np)
5194{
5195	return niu_set_and_wait_clear_ipp(np, IPP_CFIG, IPP_CFIG_SOFT_RST,
5196					  1000, 100, "IPP_CFIG");
5197}
5198
5199static int niu_init_ipp(struct niu *np)
5200{
5201	u64 data[5], rbuf[5], val;
5202	int i, max, err;
5203
5204	if (np->parent->plat_type != PLAT_TYPE_NIU) {
5205		if (np->port == 0 || np->port == 1)
5206			max = ATLAS_P0_P1_DFIFO_ENTRIES;
5207		else
5208			max = ATLAS_P2_P3_DFIFO_ENTRIES;
5209	} else
5210		max = NIU_DFIFO_ENTRIES;
5211
5212	data[0] = 0;
5213	data[1] = 0;
5214	data[2] = 0;
5215	data[3] = 0;
5216	data[4] = 0;
5217
5218	for (i = 0; i < max; i++) {
5219		niu_ipp_write(np, i, data);
5220		niu_ipp_read(np, i, rbuf);
5221	}
5222
5223	(void) nr64_ipp(IPP_INT_STAT);
5224	(void) nr64_ipp(IPP_INT_STAT);
5225
5226	err = niu_ipp_reset(np);
5227	if (err)
5228		return err;
5229
5230	(void) nr64_ipp(IPP_PKT_DIS);
5231	(void) nr64_ipp(IPP_BAD_CS_CNT);
5232	(void) nr64_ipp(IPP_ECC);
5233
5234	(void) nr64_ipp(IPP_INT_STAT);
5235
5236	nw64_ipp(IPP_MSK, ~IPP_MSK_ALL);
5237
5238	val = nr64_ipp(IPP_CFIG);
5239	val &= ~IPP_CFIG_IP_MAX_PKT;
5240	val |= (IPP_CFIG_IPP_ENABLE |
5241		IPP_CFIG_DFIFO_ECC_EN |
5242		IPP_CFIG_DROP_BAD_CRC |
5243		IPP_CFIG_CKSUM_EN |
5244		(0x1ffff << IPP_CFIG_IP_MAX_PKT_SHIFT));
5245	nw64_ipp(IPP_CFIG, val);
5246
5247	return 0;
5248}
5249
5250static void niu_handle_led(struct niu *np, int status)
5251{
5252	u64 val;
5253	val = nr64_mac(XMAC_CONFIG);
5254
5255	if ((np->flags & NIU_FLAGS_10G) != 0 &&
5256	    (np->flags & NIU_FLAGS_FIBER) != 0) {
5257		if (status) {
5258			val |= XMAC_CONFIG_LED_POLARITY;
5259			val &= ~XMAC_CONFIG_FORCE_LED_ON;
5260		} else {
5261			val |= XMAC_CONFIG_FORCE_LED_ON;
5262			val &= ~XMAC_CONFIG_LED_POLARITY;
5263		}
5264	}
5265
5266	nw64_mac(XMAC_CONFIG, val);
5267}
5268
5269static void niu_init_xif_xmac(struct niu *np)
5270{
5271	struct niu_link_config *lp = &np->link_config;
5272	u64 val;
5273
5274	if (np->flags & NIU_FLAGS_XCVR_SERDES) {
5275		val = nr64(MIF_CONFIG);
5276		val |= MIF_CONFIG_ATCA_GE;
5277		nw64(MIF_CONFIG, val);
5278	}
5279
5280	val = nr64_mac(XMAC_CONFIG);
5281	val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC;
5282
5283	val |= XMAC_CONFIG_TX_OUTPUT_EN;
5284
5285	if (lp->loopback_mode == LOOPBACK_MAC) {
5286		val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC;
5287		val |= XMAC_CONFIG_LOOPBACK;
5288	} else {
5289		val &= ~XMAC_CONFIG_LOOPBACK;
5290	}
5291
5292	if (np->flags & NIU_FLAGS_10G) {
5293		val &= ~XMAC_CONFIG_LFS_DISABLE;
5294	} else {
5295		val |= XMAC_CONFIG_LFS_DISABLE;
5296		if (!(np->flags & NIU_FLAGS_FIBER) &&
5297		    !(np->flags & NIU_FLAGS_XCVR_SERDES))
5298			val |= XMAC_CONFIG_1G_PCS_BYPASS;
5299		else
5300			val &= ~XMAC_CONFIG_1G_PCS_BYPASS;
5301	}
5302
5303	val &= ~XMAC_CONFIG_10G_XPCS_BYPASS;
5304
5305	if (lp->active_speed == SPEED_100)
5306		val |= XMAC_CONFIG_SEL_CLK_25MHZ;
5307	else
5308		val &= ~XMAC_CONFIG_SEL_CLK_25MHZ;
5309
5310	nw64_mac(XMAC_CONFIG, val);
5311
5312	val = nr64_mac(XMAC_CONFIG);
5313	val &= ~XMAC_CONFIG_MODE_MASK;
5314	if (np->flags & NIU_FLAGS_10G) {
5315		val |= XMAC_CONFIG_MODE_XGMII;
5316	} else {
5317		if (lp->active_speed == SPEED_1000)
5318			val |= XMAC_CONFIG_MODE_GMII;
5319		else
5320			val |= XMAC_CONFIG_MODE_MII;
5321	}
5322
5323	nw64_mac(XMAC_CONFIG, val);
5324}
5325
5326static void niu_init_xif_bmac(struct niu *np)
5327{
5328	struct niu_link_config *lp = &np->link_config;
5329	u64 val;
5330
5331	val = BMAC_XIF_CONFIG_TX_OUTPUT_EN;
5332
5333	if (lp->loopback_mode == LOOPBACK_MAC)
5334		val |= BMAC_XIF_CONFIG_MII_LOOPBACK;
5335	else
5336		val &= ~BMAC_XIF_CONFIG_MII_LOOPBACK;
5337
5338	if (lp->active_speed == SPEED_1000)
5339		val |= BMAC_XIF_CONFIG_GMII_MODE;
5340	else
5341		val &= ~BMAC_XIF_CONFIG_GMII_MODE;
5342
5343	val &= ~(BMAC_XIF_CONFIG_LINK_LED |
5344		 BMAC_XIF_CONFIG_LED_POLARITY);
5345
5346	if (!(np->flags & NIU_FLAGS_10G) &&
5347	    !(np->flags & NIU_FLAGS_FIBER) &&
5348	    lp->active_speed == SPEED_100)
5349		val |= BMAC_XIF_CONFIG_25MHZ_CLOCK;
5350	else
5351		val &= ~BMAC_XIF_CONFIG_25MHZ_CLOCK;
5352
5353	nw64_mac(BMAC_XIF_CONFIG, val);
5354}
5355
5356static void niu_init_xif(struct niu *np)
5357{
5358	if (np->flags & NIU_FLAGS_XMAC)
5359		niu_init_xif_xmac(np);
5360	else
5361		niu_init_xif_bmac(np);
5362}
5363
5364static void niu_pcs_mii_reset(struct niu *np)
5365{
5366	int limit = 1000;
5367	u64 val = nr64_pcs(PCS_MII_CTL);
5368	val |= PCS_MII_CTL_RST;
5369	nw64_pcs(PCS_MII_CTL, val);
5370	while ((--limit >= 0) && (val & PCS_MII_CTL_RST)) {
5371		udelay(100);
5372		val = nr64_pcs(PCS_MII_CTL);
5373	}
5374}
5375
5376static void niu_xpcs_reset(struct niu *np)
5377{
5378	int limit = 1000;
5379	u64 val = nr64_xpcs(XPCS_CONTROL1);
5380	val |= XPCS_CONTROL1_RESET;
5381	nw64_xpcs(XPCS_CONTROL1, val);
5382	while ((--limit >= 0) && (val & XPCS_CONTROL1_RESET)) {
5383		udelay(100);
5384		val = nr64_xpcs(XPCS_CONTROL1);
5385	}
5386}
5387
5388static int niu_init_pcs(struct niu *np)
5389{
5390	struct niu_link_config *lp = &np->link_config;
5391	u64 val;
5392
5393	switch (np->flags & (NIU_FLAGS_10G |
5394			     NIU_FLAGS_FIBER |
5395			     NIU_FLAGS_XCVR_SERDES)) {
5396	case NIU_FLAGS_FIBER:
5397		/* 1G fiber */
5398		nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE);
5399		nw64_pcs(PCS_DPATH_MODE, 0);
5400		niu_pcs_mii_reset(np);
5401		break;
5402
5403	case NIU_FLAGS_10G:
5404	case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
5405	case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
5406		/* 10G SERDES */
5407		if (!(np->flags & NIU_FLAGS_XMAC))
5408			return -EINVAL;
5409
5410		/* 10G copper or fiber */
5411		val = nr64_mac(XMAC_CONFIG);
5412		val &= ~XMAC_CONFIG_10G_XPCS_BYPASS;
5413		nw64_mac(XMAC_CONFIG, val);
5414
5415		niu_xpcs_reset(np);
5416
5417		val = nr64_xpcs(XPCS_CONTROL1);
5418		if (lp->loopback_mode == LOOPBACK_PHY)
5419			val |= XPCS_CONTROL1_LOOPBACK;
5420		else
5421			val &= ~XPCS_CONTROL1_LOOPBACK;
5422		nw64_xpcs(XPCS_CONTROL1, val);
5423
5424		nw64_xpcs(XPCS_DESKEW_ERR_CNT, 0);
5425		(void) nr64_xpcs(XPCS_SYMERR_CNT01);
5426		(void) nr64_xpcs(XPCS_SYMERR_CNT23);
5427		break;
5428
5429
5430	case NIU_FLAGS_XCVR_SERDES:
5431		/* 1G SERDES */
5432		niu_pcs_mii_reset(np);
5433		nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE);
5434		nw64_pcs(PCS_DPATH_MODE, 0);
5435		break;
5436
5437	case 0:
5438		/* 1G copper */
5439	case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER:
5440		/* 1G RGMII FIBER */
5441		nw64_pcs(PCS_DPATH_MODE, PCS_DPATH_MODE_MII);
5442		niu_pcs_mii_reset(np);
5443		break;
5444
5445	default:
5446		return -EINVAL;
5447	}
5448
5449	return 0;
5450}
5451
5452static int niu_reset_tx_xmac(struct niu *np)
5453{
5454	return niu_set_and_wait_clear_mac(np, XTXMAC_SW_RST,
5455					  (XTXMAC_SW_RST_REG_RS |
5456					   XTXMAC_SW_RST_SOFT_RST),
5457					  1000, 100, "XTXMAC_SW_RST");
5458}
5459
5460static int niu_reset_tx_bmac(struct niu *np)
5461{
5462	int limit;
5463
5464	nw64_mac(BTXMAC_SW_RST, BTXMAC_SW_RST_RESET);
5465	limit = 1000;
5466	while (--limit >= 0) {
5467		if (!(nr64_mac(BTXMAC_SW_RST) & BTXMAC_SW_RST_RESET))
5468			break;
5469		udelay(100);
5470	}
5471	if (limit < 0) {
5472		dev_err(np->device, "Port %u TX BMAC would not reset, BTXMAC_SW_RST[%llx]\n",
5473			np->port,
5474			(unsigned long long) nr64_mac(BTXMAC_SW_RST));
5475		return -ENODEV;
5476	}
5477
5478	return 0;
5479}
5480
5481static int niu_reset_tx_mac(struct niu *np)
5482{
5483	if (np->flags & NIU_FLAGS_XMAC)
5484		return niu_reset_tx_xmac(np);
5485	else
5486		return niu_reset_tx_bmac(np);
5487}
5488
5489static void niu_init_tx_xmac(struct niu *np, u64 min, u64 max)
5490{
5491	u64 val;
5492
5493	val = nr64_mac(XMAC_MIN);
5494	val &= ~(XMAC_MIN_TX_MIN_PKT_SIZE |
5495		 XMAC_MIN_RX_MIN_PKT_SIZE);
5496	val |= (min << XMAC_MIN_RX_MIN_PKT_SIZE_SHFT);
5497	val |= (min << XMAC_MIN_TX_MIN_PKT_SIZE_SHFT);
5498	nw64_mac(XMAC_MIN, val);
5499
5500	nw64_mac(XMAC_MAX, max);
5501
5502	nw64_mac(XTXMAC_STAT_MSK, ~(u64)0);
5503
5504	val = nr64_mac(XMAC_IPG);
5505	if (np->flags & NIU_FLAGS_10G) {
5506		val &= ~XMAC_IPG_IPG_XGMII;
5507		val |= (IPG_12_15_XGMII << XMAC_IPG_IPG_XGMII_SHIFT);
5508	} else {
5509		val &= ~XMAC_IPG_IPG_MII_GMII;
5510		val |= (IPG_12_MII_GMII << XMAC_IPG_IPG_MII_GMII_SHIFT);
5511	}
5512	nw64_mac(XMAC_IPG, val);
5513
5514	val = nr64_mac(XMAC_CONFIG);
5515	val &= ~(XMAC_CONFIG_ALWAYS_NO_CRC |
5516		 XMAC_CONFIG_STRETCH_MODE |
5517		 XMAC_CONFIG_VAR_MIN_IPG_EN |
5518		 XMAC_CONFIG_TX_ENABLE);
5519	nw64_mac(XMAC_CONFIG, val);
5520
5521	nw64_mac(TXMAC_FRM_CNT, 0);
5522	nw64_mac(TXMAC_BYTE_CNT, 0);
5523}
5524
5525static void niu_init_tx_bmac(struct niu *np, u64 min, u64 max)
5526{
5527	u64 val;
5528
5529	nw64_mac(BMAC_MIN_FRAME, min);
5530	nw64_mac(BMAC_MAX_FRAME, max);
5531
5532	nw64_mac(BTXMAC_STATUS_MASK, ~(u64)0);
5533	nw64_mac(BMAC_CTRL_TYPE, 0x8808);
5534	nw64_mac(BMAC_PREAMBLE_SIZE, 7);
5535
5536	val = nr64_mac(BTXMAC_CONFIG);
5537	val &= ~(BTXMAC_CONFIG_FCS_DISABLE |
5538		 BTXMAC_CONFIG_ENABLE);
5539	nw64_mac(BTXMAC_CONFIG, val);
5540}
5541
5542static void niu_init_tx_mac(struct niu *np)
5543{
5544	u64 min, max;
5545
5546	min = 64;
5547	if (np->dev->mtu > ETH_DATA_LEN)
5548		max = 9216;
5549	else
5550		max = 1522;
5551
5552	/* The XMAC_MIN register only accepts values for TX min which
5553	 * have the low 3 bits cleared.
5554	 */
5555	BUG_ON(min & 0x7);
5556
5557	if (np->flags & NIU_FLAGS_XMAC)
5558		niu_init_tx_xmac(np, min, max);
5559	else
5560		niu_init_tx_bmac(np, min, max);
5561}
5562
5563static int niu_reset_rx_xmac(struct niu *np)
5564{
5565	int limit;
5566
5567	nw64_mac(XRXMAC_SW_RST,
5568		 XRXMAC_SW_RST_REG_RS | XRXMAC_SW_RST_SOFT_RST);
5569	limit = 1000;
5570	while (--limit >= 0) {
5571		if (!(nr64_mac(XRXMAC_SW_RST) & (XRXMAC_SW_RST_REG_RS |
5572						 XRXMAC_SW_RST_SOFT_RST)))
5573			break;
5574		udelay(100);
5575	}
5576	if (limit < 0) {
5577		dev_err(np->device, "Port %u RX XMAC would not reset, XRXMAC_SW_RST[%llx]\n",
5578			np->port,
5579			(unsigned long long) nr64_mac(XRXMAC_SW_RST));
5580		return -ENODEV;
5581	}
5582
5583	return 0;
5584}
5585
5586static int niu_reset_rx_bmac(struct niu *np)
5587{
5588	int limit;
5589
5590	nw64_mac(BRXMAC_SW_RST, BRXMAC_SW_RST_RESET);
5591	limit = 1000;
5592	while (--limit >= 0) {
5593		if (!(nr64_mac(BRXMAC_SW_RST) & BRXMAC_SW_RST_RESET))
5594			break;
5595		udelay(100);
5596	}
5597	if (limit < 0) {
5598		dev_err(np->device, "Port %u RX BMAC would not reset, BRXMAC_SW_RST[%llx]\n",
5599			np->port,
5600			(unsigned long long) nr64_mac(BRXMAC_SW_RST));
5601		return -ENODEV;
5602	}
5603
5604	return 0;
5605}
5606
5607static int niu_reset_rx_mac(struct niu *np)
5608{
5609	if (np->flags & NIU_FLAGS_XMAC)
5610		return niu_reset_rx_xmac(np);
5611	else
5612		return niu_reset_rx_bmac(np);
5613}
5614
5615static void niu_init_rx_xmac(struct niu *np)
5616{
5617	struct niu_parent *parent = np->parent;
5618	struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
5619	int first_rdc_table = tp->first_table_num;
5620	unsigned long i;
5621	u64 val;
5622
5623	nw64_mac(XMAC_ADD_FILT0, 0);
5624	nw64_mac(XMAC_ADD_FILT1, 0);
5625	nw64_mac(XMAC_ADD_FILT2, 0);
5626	nw64_mac(XMAC_ADD_FILT12_MASK, 0);
5627	nw64_mac(XMAC_ADD_FILT00_MASK, 0);
5628	for (i = 0; i < MAC_NUM_HASH; i++)
5629		nw64_mac(XMAC_HASH_TBL(i), 0);
5630	nw64_mac(XRXMAC_STAT_MSK, ~(u64)0);
5631	niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
5632	niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
5633
5634	val = nr64_mac(XMAC_CONFIG);
5635	val &= ~(XMAC_CONFIG_RX_MAC_ENABLE |
5636		 XMAC_CONFIG_PROMISCUOUS |
5637		 XMAC_CONFIG_PROMISC_GROUP |
5638		 XMAC_CONFIG_ERR_CHK_DIS |
5639		 XMAC_CONFIG_RX_CRC_CHK_DIS |
5640		 XMAC_CONFIG_RESERVED_MULTICAST |
5641		 XMAC_CONFIG_RX_CODEV_CHK_DIS |
5642		 XMAC_CONFIG_ADDR_FILTER_EN |
5643		 XMAC_CONFIG_RCV_PAUSE_ENABLE |
5644		 XMAC_CONFIG_STRIP_CRC |
5645		 XMAC_CONFIG_PASS_FLOW_CTRL |
5646		 XMAC_CONFIG_MAC2IPP_PKT_CNT_EN);
5647	val |= (XMAC_CONFIG_HASH_FILTER_EN);
5648	nw64_mac(XMAC_CONFIG, val);
5649
5650	nw64_mac(RXMAC_BT_CNT, 0);
5651	nw64_mac(RXMAC_BC_FRM_CNT, 0);
5652	nw64_mac(RXMAC_MC_FRM_CNT, 0);
5653	nw64_mac(RXMAC_FRAG_CNT, 0);
5654	nw64_mac(RXMAC_HIST_CNT1, 0);
5655	nw64_mac(RXMAC_HIST_CNT2, 0);
5656	nw64_mac(RXMAC_HIST_CNT3, 0);
5657	nw64_mac(RXMAC_HIST_CNT4, 0);
5658	nw64_mac(RXMAC_HIST_CNT5, 0);
5659	nw64_mac(RXMAC_HIST_CNT6, 0);
5660	nw64_mac(RXMAC_HIST_CNT7, 0);
5661	nw64_mac(RXMAC_MPSZER_CNT, 0);
5662	nw64_mac(RXMAC_CRC_ER_CNT, 0);
5663	nw64_mac(RXMAC_CD_VIO_CNT, 0);
5664	nw64_mac(LINK_FAULT_CNT, 0);
5665}
5666
5667static void niu_init_rx_bmac(struct niu *np)
5668{
5669	struct niu_parent *parent = np->parent;
5670	struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
5671	int first_rdc_table = tp->first_table_num;
5672	unsigned long i;
5673	u64 val;
5674
5675	nw64_mac(BMAC_ADD_FILT0, 0);
5676	nw64_mac(BMAC_ADD_FILT1, 0);
5677	nw64_mac(BMAC_ADD_FILT2, 0);
5678	nw64_mac(BMAC_ADD_FILT12_MASK, 0);
5679	nw64_mac(BMAC_ADD_FILT00_MASK, 0);
5680	for (i = 0; i < MAC_NUM_HASH; i++)
5681		nw64_mac(BMAC_HASH_TBL(i), 0);
5682	niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
5683	niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
5684	nw64_mac(BRXMAC_STATUS_MASK, ~(u64)0);
5685
5686	val = nr64_mac(BRXMAC_CONFIG);
5687	val &= ~(BRXMAC_CONFIG_ENABLE |
5688		 BRXMAC_CONFIG_STRIP_PAD |
5689		 BRXMAC_CONFIG_STRIP_FCS |
5690		 BRXMAC_CONFIG_PROMISC |
5691		 BRXMAC_CONFIG_PROMISC_GRP |
5692		 BRXMAC_CONFIG_ADDR_FILT_EN |
5693		 BRXMAC_CONFIG_DISCARD_DIS);
5694	val |= (BRXMAC_CONFIG_HASH_FILT_EN);
5695	nw64_mac(BRXMAC_CONFIG, val);
5696
5697	val = nr64_mac(BMAC_ADDR_CMPEN);
5698	val |= BMAC_ADDR_CMPEN_EN0;
5699	nw64_mac(BMAC_ADDR_CMPEN, val);
5700}
5701
5702static void niu_init_rx_mac(struct niu *np)
5703{
5704	niu_set_primary_mac(np, np->dev->dev_addr);
5705
5706	if (np->flags & NIU_FLAGS_XMAC)
5707		niu_init_rx_xmac(np);
5708	else
5709		niu_init_rx_bmac(np);
5710}
5711
5712static void niu_enable_tx_xmac(struct niu *np, int on)
5713{
5714	u64 val = nr64_mac(XMAC_CONFIG);
5715
5716	if (on)
5717		val |= XMAC_CONFIG_TX_ENABLE;
5718	else
5719		val &= ~XMAC_CONFIG_TX_ENABLE;
5720	nw64_mac(XMAC_CONFIG, val);
5721}
5722
5723static void niu_enable_tx_bmac(struct niu *np, int on)
5724{
5725	u64 val = nr64_mac(BTXMAC_CONFIG);
5726
5727	if (on)
5728		val |= BTXMAC_CONFIG_ENABLE;
5729	else
5730		val &= ~BTXMAC_CONFIG_ENABLE;
5731	nw64_mac(BTXMAC_CONFIG, val);
5732}
5733
5734static void niu_enable_tx_mac(struct niu *np, int on)
5735{
5736	if (np->flags & NIU_FLAGS_XMAC)
5737		niu_enable_tx_xmac(np, on);
5738	else
5739		niu_enable_tx_bmac(np, on);
5740}
5741
5742static void niu_enable_rx_xmac(struct niu *np, int on)
5743{
5744	u64 val = nr64_mac(XMAC_CONFIG);
5745
5746	val &= ~(XMAC_CONFIG_HASH_FILTER_EN |
5747		 XMAC_CONFIG_PROMISCUOUS);
5748
5749	if (np->flags & NIU_FLAGS_MCAST)
5750		val |= XMAC_CONFIG_HASH_FILTER_EN;
5751	if (np->flags & NIU_FLAGS_PROMISC)
5752		val |= XMAC_CONFIG_PROMISCUOUS;
5753
5754	if (on)
5755		val |= XMAC_CONFIG_RX_MAC_ENABLE;
5756	else
5757		val &= ~XMAC_CONFIG_RX_MAC_ENABLE;
5758	nw64_mac(XMAC_CONFIG, val);
5759}
5760
5761static void niu_enable_rx_bmac(struct niu *np, int on)
5762{
5763	u64 val = nr64_mac(BRXMAC_CONFIG);
5764
5765	val &= ~(BRXMAC_CONFIG_HASH_FILT_EN |
5766		 BRXMAC_CONFIG_PROMISC);
5767
5768	if (np->flags & NIU_FLAGS_MCAST)
5769		val |= BRXMAC_CONFIG_HASH_FILT_EN;
5770	if (np->flags & NIU_FLAGS_PROMISC)
5771		val |= BRXMAC_CONFIG_PROMISC;
5772
5773	if (on)
5774		val |= BRXMAC_CONFIG_ENABLE;
5775	else
5776		val &= ~BRXMAC_CONFIG_ENABLE;
5777	nw64_mac(BRXMAC_CONFIG, val);
5778}
5779
5780static void niu_enable_rx_mac(struct niu *np, int on)
5781{
5782	if (np->flags & NIU_FLAGS_XMAC)
5783		niu_enable_rx_xmac(np, on);
5784	else
5785		niu_enable_rx_bmac(np, on);
5786}
5787
5788static int niu_init_mac(struct niu *np)
5789{
5790	int err;
5791
5792	niu_init_xif(np);
5793	err = niu_init_pcs(np);
5794	if (err)
5795		return err;
5796
5797	err = niu_reset_tx_mac(np);
5798	if (err)
5799		return err;
5800	niu_init_tx_mac(np);
5801	err = niu_reset_rx_mac(np);
5802	if (err)
5803		return err;
5804	niu_init_rx_mac(np);
5805
5806	/* This looks hookey but the RX MAC reset we just did will
5807	 * undo some of the state we setup in niu_init_tx_mac() so we
5808	 * have to call it again.  In particular, the RX MAC reset will
5809	 * set the XMAC_MAX register back to it's default value.
5810	 */
5811	niu_init_tx_mac(np);
5812	niu_enable_tx_mac(np, 1);
5813
5814	niu_enable_rx_mac(np, 1);
5815
5816	return 0;
5817}
5818
5819static void niu_stop_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
5820{
5821	(void) niu_tx_channel_stop(np, rp->tx_channel);
5822}
5823
5824static void niu_stop_tx_channels(struct niu *np)
5825{
5826	int i;
5827
5828	for (i = 0; i < np->num_tx_rings; i++) {
5829		struct tx_ring_info *rp = &np->tx_rings[i];
5830
5831		niu_stop_one_tx_channel(np, rp);
5832	}
5833}
5834
5835static void niu_reset_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
5836{
5837	(void) niu_tx_channel_reset(np, rp->tx_channel);
5838}
5839
5840static void niu_reset_tx_channels(struct niu *np)
5841{
5842	int i;
5843
5844	for (i = 0; i < np->num_tx_rings; i++) {
5845		struct tx_ring_info *rp = &np->tx_rings[i];
5846
5847		niu_reset_one_tx_channel(np, rp);
5848	}
5849}
5850
5851static void niu_stop_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
5852{
5853	(void) niu_enable_rx_channel(np, rp->rx_channel, 0);
5854}
5855
5856static void niu_stop_rx_channels(struct niu *np)
5857{
5858	int i;
5859
5860	for (i = 0; i < np->num_rx_rings; i++) {
5861		struct rx_ring_info *rp = &np->rx_rings[i];
5862
5863		niu_stop_one_rx_channel(np, rp);
5864	}
5865}
5866
5867static void niu_reset_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
5868{
5869	int channel = rp->rx_channel;
5870
5871	(void) niu_rx_channel_reset(np, channel);
5872	nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_ALL);
5873	nw64(RX_DMA_CTL_STAT(channel), 0);
5874	(void) niu_enable_rx_channel(np, channel, 0);
5875}
5876
5877static void niu_reset_rx_channels(struct niu *np)
5878{
5879	int i;
5880
5881	for (i = 0; i < np->num_rx_rings; i++) {
5882		struct rx_ring_info *rp = &np->rx_rings[i];
5883
5884		niu_reset_one_rx_channel(np, rp);
5885	}
5886}
5887
5888static void niu_disable_ipp(struct niu *np)
5889{
5890	u64 rd, wr, val;
5891	int limit;
5892
5893	rd = nr64_ipp(IPP_DFIFO_RD_PTR);
5894	wr = nr64_ipp(IPP_DFIFO_WR_PTR);
5895	limit = 100;
5896	while (--limit >= 0 && (rd != wr)) {
5897		rd = nr64_ipp(IPP_DFIFO_RD_PTR);
5898		wr = nr64_ipp(IPP_DFIFO_WR_PTR);
5899	}
5900	if (limit < 0 &&
5901	    (rd != 0 && wr != 1)) {
5902		netdev_err(np->dev, "IPP would not quiesce, rd_ptr[%llx] wr_ptr[%llx]\n",
5903			   (unsigned long long)nr64_ipp(IPP_DFIFO_RD_PTR),
5904			   (unsigned long long)nr64_ipp(IPP_DFIFO_WR_PTR));
5905	}
5906
5907	val = nr64_ipp(IPP_CFIG);
5908	val &= ~(IPP_CFIG_IPP_ENABLE |
5909		 IPP_CFIG_DFIFO_ECC_EN |
5910		 IPP_CFIG_DROP_BAD_CRC |
5911		 IPP_CFIG_CKSUM_EN);
5912	nw64_ipp(IPP_CFIG, val);
5913
5914	(void) niu_ipp_reset(np);
5915}
5916
5917static int niu_init_hw(struct niu *np)
5918{
5919	int i, err;
5920
5921	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TXC\n");
5922	niu_txc_enable_port(np, 1);
5923	niu_txc_port_dma_enable(np, 1);
5924	niu_txc_set_imask(np, 0);
5925
5926	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TX channels\n");
5927	for (i = 0; i < np->num_tx_rings; i++) {
5928		struct tx_ring_info *rp = &np->tx_rings[i];
5929
5930		err = niu_init_one_tx_channel(np, rp);
5931		if (err)
5932			return err;
5933	}
5934
5935	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize RX channels\n");
5936	err = niu_init_rx_channels(np);
5937	if (err)
5938		goto out_uninit_tx_channels;
5939
5940	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize classifier\n");
5941	err = niu_init_classifier_hw(np);
5942	if (err)
5943		goto out_uninit_rx_channels;
5944
5945	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize ZCP\n");
5946	err = niu_init_zcp(np);
5947	if (err)
5948		goto out_uninit_rx_channels;
5949
5950	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize IPP\n");
5951	err = niu_init_ipp(np);
5952	if (err)
5953		goto out_uninit_rx_channels;
5954
5955	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize MAC\n");
5956	err = niu_init_mac(np);
5957	if (err)
5958		goto out_uninit_ipp;
5959
5960	return 0;
5961
5962out_uninit_ipp:
5963	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit IPP\n");
5964	niu_disable_ipp(np);
5965
5966out_uninit_rx_channels:
5967	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit RX channels\n");
5968	niu_stop_rx_channels(np);
5969	niu_reset_rx_channels(np);
5970
5971out_uninit_tx_channels:
5972	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit TX channels\n");
5973	niu_stop_tx_channels(np);
5974	niu_reset_tx_channels(np);
5975
5976	return err;
5977}
5978
5979static void niu_stop_hw(struct niu *np)
5980{
5981	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable interrupts\n");
5982	niu_enable_interrupts(np, 0);
5983
5984	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable RX MAC\n");
5985	niu_enable_rx_mac(np, 0);
5986
5987	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable IPP\n");
5988	niu_disable_ipp(np);
5989
5990	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop TX channels\n");
5991	niu_stop_tx_channels(np);
5992
5993	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop RX channels\n");
5994	niu_stop_rx_channels(np);
5995
5996	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset TX channels\n");
5997	niu_reset_tx_channels(np);
5998
5999	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset RX channels\n");
6000	niu_reset_rx_channels(np);
6001}
6002
6003static void niu_set_irq_name(struct niu *np)
6004{
6005	int port = np->port;
6006	int i, j = 1;
6007
6008	sprintf(np->irq_name[0], "%s:MAC", np->dev->name);
6009
6010	if (port == 0) {
6011		sprintf(np->irq_name[1], "%s:MIF", np->dev->name);
6012		sprintf(np->irq_name[2], "%s:SYSERR", np->dev->name);
6013		j = 3;
6014	}
6015
6016	for (i = 0; i < np->num_ldg - j; i++) {
6017		if (i < np->num_rx_rings)
6018			sprintf(np->irq_name[i+j], "%s-rx-%d",
6019				np->dev->name, i);
6020		else if (i < np->num_tx_rings + np->num_rx_rings)
6021			sprintf(np->irq_name[i+j], "%s-tx-%d", np->dev->name,
6022				i - np->num_rx_rings);
6023	}
6024}
6025
6026static int niu_request_irq(struct niu *np)
6027{
6028	int i, j, err;
6029
6030	niu_set_irq_name(np);
6031
6032	err = 0;
6033	for (i = 0; i < np->num_ldg; i++) {
6034		struct niu_ldg *lp = &np->ldg[i];
6035
6036		err = request_irq(lp->irq, niu_interrupt, IRQF_SHARED,
6037				  np->irq_name[i], lp);
6038		if (err)
6039			goto out_free_irqs;
6040
6041	}
6042
6043	return 0;
6044
6045out_free_irqs:
6046	for (j = 0; j < i; j++) {
6047		struct niu_ldg *lp = &np->ldg[j];
6048
6049		free_irq(lp->irq, lp);
6050	}
6051	return err;
6052}
6053
6054static void niu_free_irq(struct niu *np)
6055{
6056	int i;
6057
6058	for (i = 0; i < np->num_ldg; i++) {
6059		struct niu_ldg *lp = &np->ldg[i];
6060
6061		free_irq(lp->irq, lp);
6062	}
6063}
6064
6065static void niu_enable_napi(struct niu *np)
6066{
6067	int i;
6068
6069	for (i = 0; i < np->num_ldg; i++)
6070		napi_enable(&np->ldg[i].napi);
6071}
6072
6073static void niu_disable_napi(struct niu *np)
6074{
6075	int i;
6076
6077	for (i = 0; i < np->num_ldg; i++)
6078		napi_disable(&np->ldg[i].napi);
6079}
6080
6081static int niu_open(struct net_device *dev)
6082{
6083	struct niu *np = netdev_priv(dev);
6084	int err;
6085
6086	netif_carrier_off(dev);
6087
6088	err = niu_alloc_channels(np);
6089	if (err)
6090		goto out_err;
6091
6092	err = niu_enable_interrupts(np, 0);
6093	if (err)
6094		goto out_free_channels;
6095
6096	err = niu_request_irq(np);
6097	if (err)
6098		goto out_free_channels;
6099
6100	niu_enable_napi(np);
6101
6102	spin_lock_irq(&np->lock);
6103
6104	err = niu_init_hw(np);
6105	if (!err) {
6106		timer_setup(&np->timer, niu_timer, 0);
6107		np->timer.expires = jiffies + HZ;
6108
6109		err = niu_enable_interrupts(np, 1);
6110		if (err)
6111			niu_stop_hw(np);
6112	}
6113
6114	spin_unlock_irq(&np->lock);
6115
6116	if (err) {
6117		niu_disable_napi(np);
6118		goto out_free_irq;
6119	}
6120
6121	netif_tx_start_all_queues(dev);
6122
6123	if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
6124		netif_carrier_on(dev);
6125
6126	add_timer(&np->timer);
6127
6128	return 0;
6129
6130out_free_irq:
6131	niu_free_irq(np);
6132
6133out_free_channels:
6134	niu_free_channels(np);
6135
6136out_err:
6137	return err;
6138}
6139
6140static void niu_full_shutdown(struct niu *np, struct net_device *dev)
6141{
6142	cancel_work_sync(&np->reset_task);
6143
6144	niu_disable_napi(np);
6145	netif_tx_stop_all_queues(dev);
6146
6147	del_timer_sync(&np->timer);
6148
6149	spin_lock_irq(&np->lock);
6150
6151	niu_stop_hw(np);
6152
6153	spin_unlock_irq(&np->lock);
6154}
6155
6156static int niu_close(struct net_device *dev)
6157{
6158	struct niu *np = netdev_priv(dev);
6159
6160	niu_full_shutdown(np, dev);
6161
6162	niu_free_irq(np);
6163
6164	niu_free_channels(np);
6165
6166	niu_handle_led(np, 0);
6167
6168	return 0;
6169}
6170
6171static void niu_sync_xmac_stats(struct niu *np)
6172{
6173	struct niu_xmac_stats *mp = &np->mac_stats.xmac;
6174
6175	mp->tx_frames += nr64_mac(TXMAC_FRM_CNT);
6176	mp->tx_bytes += nr64_mac(TXMAC_BYTE_CNT);
6177
6178	mp->rx_link_faults += nr64_mac(LINK_FAULT_CNT);
6179	mp->rx_align_errors += nr64_mac(RXMAC_ALIGN_ERR_CNT);
6180	mp->rx_frags += nr64_mac(RXMAC_FRAG_CNT);
6181	mp->rx_mcasts += nr64_mac(RXMAC_MC_FRM_CNT);
6182	mp->rx_bcasts += nr64_mac(RXMAC_BC_FRM_CNT);
6183	mp->rx_hist_cnt1 += nr64_mac(RXMAC_HIST_CNT1);
6184	mp->rx_hist_cnt2 += nr64_mac(RXMAC_HIST_CNT2);
6185	mp->rx_hist_cnt3 += nr64_mac(RXMAC_HIST_CNT3);
6186	mp->rx_hist_cnt4 += nr64_mac(RXMAC_HIST_CNT4);
6187	mp->rx_hist_cnt5 += nr64_mac(RXMAC_HIST_CNT5);
6188	mp->rx_hist_cnt6 += nr64_mac(RXMAC_HIST_CNT6);
6189	mp->rx_hist_cnt7 += nr64_mac(RXMAC_HIST_CNT7);
6190	mp->rx_octets += nr64_mac(RXMAC_BT_CNT);
6191	mp->rx_code_violations += nr64_mac(RXMAC_CD_VIO_CNT);
6192	mp->rx_len_errors += nr64_mac(RXMAC_MPSZER_CNT);
6193	mp->rx_crc_errors += nr64_mac(RXMAC_CRC_ER_CNT);
6194}
6195
6196static void niu_sync_bmac_stats(struct niu *np)
6197{
6198	struct niu_bmac_stats *mp = &np->mac_stats.bmac;
6199
6200	mp->tx_bytes += nr64_mac(BTXMAC_BYTE_CNT);
6201	mp->tx_frames += nr64_mac(BTXMAC_FRM_CNT);
6202
6203	mp->rx_frames += nr64_mac(BRXMAC_FRAME_CNT);
6204	mp->rx_align_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT);
6205	mp->rx_crc_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT);
6206	mp->rx_len_errors += nr64_mac(BRXMAC_CODE_VIOL_ERR_CNT);
6207}
6208
6209static void niu_sync_mac_stats(struct niu *np)
6210{
6211	if (np->flags & NIU_FLAGS_XMAC)
6212		niu_sync_xmac_stats(np);
6213	else
6214		niu_sync_bmac_stats(np);
6215}
6216
6217static void niu_get_rx_stats(struct niu *np,
6218			     struct rtnl_link_stats64 *stats)
6219{
6220	u64 pkts, dropped, errors, bytes;
6221	struct rx_ring_info *rx_rings;
6222	int i;
6223
6224	pkts = dropped = errors = bytes = 0;
6225
6226	rx_rings = READ_ONCE(np->rx_rings);
6227	if (!rx_rings)
6228		goto no_rings;
6229
6230	for (i = 0; i < np->num_rx_rings; i++) {
6231		struct rx_ring_info *rp = &rx_rings[i];
6232
6233		niu_sync_rx_discard_stats(np, rp, 0);
6234
6235		pkts += rp->rx_packets;
6236		bytes += rp->rx_bytes;
6237		dropped += rp->rx_dropped;
6238		errors += rp->rx_errors;
6239	}
6240
6241no_rings:
6242	stats->rx_packets = pkts;
6243	stats->rx_bytes = bytes;
6244	stats->rx_dropped = dropped;
6245	stats->rx_errors = errors;
6246}
6247
6248static void niu_get_tx_stats(struct niu *np,
6249			     struct rtnl_link_stats64 *stats)
6250{
6251	u64 pkts, errors, bytes;
6252	struct tx_ring_info *tx_rings;
6253	int i;
6254
6255	pkts = errors = bytes = 0;
6256
6257	tx_rings = READ_ONCE(np->tx_rings);
6258	if (!tx_rings)
6259		goto no_rings;
6260
6261	for (i = 0; i < np->num_tx_rings; i++) {
6262		struct tx_ring_info *rp = &tx_rings[i];
6263
6264		pkts += rp->tx_packets;
6265		bytes += rp->tx_bytes;
6266		errors += rp->tx_errors;
6267	}
6268
6269no_rings:
6270	stats->tx_packets = pkts;
6271	stats->tx_bytes = bytes;
6272	stats->tx_errors = errors;
6273}
6274
6275static void niu_get_stats(struct net_device *dev,
6276			  struct rtnl_link_stats64 *stats)
6277{
6278	struct niu *np = netdev_priv(dev);
6279
6280	if (netif_running(dev)) {
6281		niu_get_rx_stats(np, stats);
6282		niu_get_tx_stats(np, stats);
6283	}
6284}
6285
6286static void niu_load_hash_xmac(struct niu *np, u16 *hash)
6287{
6288	int i;
6289
6290	for (i = 0; i < 16; i++)
6291		nw64_mac(XMAC_HASH_TBL(i), hash[i]);
6292}
6293
6294static void niu_load_hash_bmac(struct niu *np, u16 *hash)
6295{
6296	int i;
6297
6298	for (i = 0; i < 16; i++)
6299		nw64_mac(BMAC_HASH_TBL(i), hash[i]);
6300}
6301
6302static void niu_load_hash(struct niu *np, u16 *hash)
6303{
6304	if (np->flags & NIU_FLAGS_XMAC)
6305		niu_load_hash_xmac(np, hash);
6306	else
6307		niu_load_hash_bmac(np, hash);
6308}
6309
6310static void niu_set_rx_mode(struct net_device *dev)
6311{
6312	struct niu *np = netdev_priv(dev);
6313	int i, alt_cnt, err;
6314	struct netdev_hw_addr *ha;
6315	unsigned long flags;
6316	u16 hash[16] = { 0, };
6317
6318	spin_lock_irqsave(&np->lock, flags);
6319	niu_enable_rx_mac(np, 0);
6320
6321	np->flags &= ~(NIU_FLAGS_MCAST | NIU_FLAGS_PROMISC);
6322	if (dev->flags & IFF_PROMISC)
6323		np->flags |= NIU_FLAGS_PROMISC;
6324	if ((dev->flags & IFF_ALLMULTI) || (!netdev_mc_empty(dev)))
6325		np->flags |= NIU_FLAGS_MCAST;
6326
6327	alt_cnt = netdev_uc_count(dev);
6328	if (alt_cnt > niu_num_alt_addr(np)) {
6329		alt_cnt = 0;
6330		np->flags |= NIU_FLAGS_PROMISC;
6331	}
6332
6333	if (alt_cnt) {
6334		int index = 0;
6335
6336		netdev_for_each_uc_addr(ha, dev) {
6337			err = niu_set_alt_mac(np, index, ha->addr);
6338			if (err)
6339				netdev_warn(dev, "Error %d adding alt mac %d\n",
6340					    err, index);
6341			err = niu_enable_alt_mac(np, index, 1);
6342			if (err)
6343				netdev_warn(dev, "Error %d enabling alt mac %d\n",
6344					    err, index);
6345
6346			index++;
6347		}
6348	} else {
6349		int alt_start;
6350		if (np->flags & NIU_FLAGS_XMAC)
6351			alt_start = 0;
6352		else
6353			alt_start = 1;
6354		for (i = alt_start; i < niu_num_alt_addr(np); i++) {
6355			err = niu_enable_alt_mac(np, i, 0);
6356			if (err)
6357				netdev_warn(dev, "Error %d disabling alt mac %d\n",
6358					    err, i);
6359		}
6360	}
6361	if (dev->flags & IFF_ALLMULTI) {
6362		for (i = 0; i < 16; i++)
6363			hash[i] = 0xffff;
6364	} else if (!netdev_mc_empty(dev)) {
6365		netdev_for_each_mc_addr(ha, dev) {
6366			u32 crc = ether_crc_le(ETH_ALEN, ha->addr);
6367
6368			crc >>= 24;
6369			hash[crc >> 4] |= (1 << (15 - (crc & 0xf)));
6370		}
6371	}
6372
6373	if (np->flags & NIU_FLAGS_MCAST)
6374		niu_load_hash(np, hash);
6375
6376	niu_enable_rx_mac(np, 1);
6377	spin_unlock_irqrestore(&np->lock, flags);
6378}
6379
6380static int niu_set_mac_addr(struct net_device *dev, void *p)
6381{
6382	struct niu *np = netdev_priv(dev);
6383	struct sockaddr *addr = p;
6384	unsigned long flags;
6385
6386	if (!is_valid_ether_addr(addr->sa_data))
6387		return -EADDRNOTAVAIL;
6388
6389	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
6390
6391	if (!netif_running(dev))
6392		return 0;
6393
6394	spin_lock_irqsave(&np->lock, flags);
6395	niu_enable_rx_mac(np, 0);
6396	niu_set_primary_mac(np, dev->dev_addr);
6397	niu_enable_rx_mac(np, 1);
6398	spin_unlock_irqrestore(&np->lock, flags);
6399
6400	return 0;
6401}
6402
6403static int niu_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6404{
6405	return -EOPNOTSUPP;
6406}
6407
6408static void niu_netif_stop(struct niu *np)
6409{
6410	netif_trans_update(np->dev);	/* prevent tx timeout */
6411
6412	niu_disable_napi(np);
6413
6414	netif_tx_disable(np->dev);
6415}
6416
6417static void niu_netif_start(struct niu *np)
6418{
6419	/* NOTE: unconditional netif_wake_queue is only appropriate
6420	 * so long as all callers are assured to have free tx slots
6421	 * (such as after niu_init_hw).
6422	 */
6423	netif_tx_wake_all_queues(np->dev);
6424
6425	niu_enable_napi(np);
6426
6427	niu_enable_interrupts(np, 1);
6428}
6429
6430static void niu_reset_buffers(struct niu *np)
6431{
6432	int i, j, k, err;
6433
6434	if (np->rx_rings) {
6435		for (i = 0; i < np->num_rx_rings; i++) {
6436			struct rx_ring_info *rp = &np->rx_rings[i];
6437
6438			for (j = 0, k = 0; j < MAX_RBR_RING_SIZE; j++) {
6439				struct page *page;
6440
6441				page = rp->rxhash[j];
6442				while (page) {
6443					struct page *next =
6444						(struct page *) page->mapping;
6445					u64 base = page->index;
6446					base = base >> RBR_DESCR_ADDR_SHIFT;
6447					rp->rbr[k++] = cpu_to_le32(base);
6448					page = next;
6449				}
6450			}
6451			for (; k < MAX_RBR_RING_SIZE; k++) {
6452				err = niu_rbr_add_page(np, rp, GFP_ATOMIC, k);
6453				if (unlikely(err))
6454					break;
6455			}
6456
6457			rp->rbr_index = rp->rbr_table_size - 1;
6458			rp->rcr_index = 0;
6459			rp->rbr_pending = 0;
6460			rp->rbr_refill_pending = 0;
6461		}
6462	}
6463	if (np->tx_rings) {
6464		for (i = 0; i < np->num_tx_rings; i++) {
6465			struct tx_ring_info *rp = &np->tx_rings[i];
6466
6467			for (j = 0; j < MAX_TX_RING_SIZE; j++) {
6468				if (rp->tx_buffs[j].skb)
6469					(void) release_tx_packet(np, rp, j);
6470			}
6471
6472			rp->pending = MAX_TX_RING_SIZE;
6473			rp->prod = 0;
6474			rp->cons = 0;
6475			rp->wrap_bit = 0;
6476		}
6477	}
6478}
6479
6480static void niu_reset_task(struct work_struct *work)
6481{
6482	struct niu *np = container_of(work, struct niu, reset_task);
6483	unsigned long flags;
6484	int err;
6485
6486	spin_lock_irqsave(&np->lock, flags);
6487	if (!netif_running(np->dev)) {
6488		spin_unlock_irqrestore(&np->lock, flags);
6489		return;
6490	}
6491
6492	spin_unlock_irqrestore(&np->lock, flags);
6493
6494	del_timer_sync(&np->timer);
6495
6496	niu_netif_stop(np);
6497
6498	spin_lock_irqsave(&np->lock, flags);
6499
6500	niu_stop_hw(np);
6501
6502	spin_unlock_irqrestore(&np->lock, flags);
6503
6504	niu_reset_buffers(np);
6505
6506	spin_lock_irqsave(&np->lock, flags);
6507
6508	err = niu_init_hw(np);
6509	if (!err) {
6510		np->timer.expires = jiffies + HZ;
6511		add_timer(&np->timer);
6512		niu_netif_start(np);
6513	}
6514
6515	spin_unlock_irqrestore(&np->lock, flags);
6516}
6517
6518static void niu_tx_timeout(struct net_device *dev, unsigned int txqueue)
6519{
6520	struct niu *np = netdev_priv(dev);
6521
6522	dev_err(np->device, "%s: Transmit timed out, resetting\n",
6523		dev->name);
6524
6525	schedule_work(&np->reset_task);
6526}
6527
6528static void niu_set_txd(struct tx_ring_info *rp, int index,
6529			u64 mapping, u64 len, u64 mark,
6530			u64 n_frags)
6531{
6532	__le64 *desc = &rp->descr[index];
6533
6534	*desc = cpu_to_le64(mark |
6535			    (n_frags << TX_DESC_NUM_PTR_SHIFT) |
6536			    (len << TX_DESC_TR_LEN_SHIFT) |
6537			    (mapping & TX_DESC_SAD));
6538}
6539
6540static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr,
6541				u64 pad_bytes, u64 len)
6542{
6543	u16 eth_proto, eth_proto_inner;
6544	u64 csum_bits, l3off, ihl, ret;
6545	u8 ip_proto;
6546	int ipv6;
6547
6548	eth_proto = be16_to_cpu(ehdr->h_proto);
6549	eth_proto_inner = eth_proto;
6550	if (eth_proto == ETH_P_8021Q) {
6551		struct vlan_ethhdr *vp = (struct vlan_ethhdr *) ehdr;
6552		__be16 val = vp->h_vlan_encapsulated_proto;
6553
6554		eth_proto_inner = be16_to_cpu(val);
6555	}
6556
6557	ipv6 = ihl = 0;
6558	switch (skb->protocol) {
6559	case cpu_to_be16(ETH_P_IP):
6560		ip_proto = ip_hdr(skb)->protocol;
6561		ihl = ip_hdr(skb)->ihl;
6562		break;
6563	case cpu_to_be16(ETH_P_IPV6):
6564		ip_proto = ipv6_hdr(skb)->nexthdr;
6565		ihl = (40 >> 2);
6566		ipv6 = 1;
6567		break;
6568	default:
6569		ip_proto = ihl = 0;
6570		break;
6571	}
6572
6573	csum_bits = TXHDR_CSUM_NONE;
6574	if (skb->ip_summed == CHECKSUM_PARTIAL) {
6575		u64 start, stuff;
6576
6577		csum_bits = (ip_proto == IPPROTO_TCP ?
6578			     TXHDR_CSUM_TCP :
6579			     (ip_proto == IPPROTO_UDP ?
6580			      TXHDR_CSUM_UDP : TXHDR_CSUM_SCTP));
6581
6582		start = skb_checksum_start_offset(skb) -
6583			(pad_bytes + sizeof(struct tx_pkt_hdr));
6584		stuff = start + skb->csum_offset;
6585
6586		csum_bits |= (start / 2) << TXHDR_L4START_SHIFT;
6587		csum_bits |= (stuff / 2) << TXHDR_L4STUFF_SHIFT;
6588	}
6589
6590	l3off = skb_network_offset(skb) -
6591		(pad_bytes + sizeof(struct tx_pkt_hdr));
6592
6593	ret = (((pad_bytes / 2) << TXHDR_PAD_SHIFT) |
6594	       (len << TXHDR_LEN_SHIFT) |
6595	       ((l3off / 2) << TXHDR_L3START_SHIFT) |
6596	       (ihl << TXHDR_IHL_SHIFT) |
6597	       ((eth_proto_inner < ETH_P_802_3_MIN) ? TXHDR_LLC : 0) |
6598	       ((eth_proto == ETH_P_8021Q) ? TXHDR_VLAN : 0) |
6599	       (ipv6 ? TXHDR_IP_VER : 0) |
6600	       csum_bits);
6601
6602	return ret;
6603}
6604
6605static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
6606				  struct net_device *dev)
6607{
6608	struct niu *np = netdev_priv(dev);
6609	unsigned long align, headroom;
6610	struct netdev_queue *txq;
6611	struct tx_ring_info *rp;
6612	struct tx_pkt_hdr *tp;
6613	unsigned int len, nfg;
6614	struct ethhdr *ehdr;
6615	int prod, i, tlen;
6616	u64 mapping, mrk;
6617
6618	i = skb_get_queue_mapping(skb);
6619	rp = &np->tx_rings[i];
6620	txq = netdev_get_tx_queue(dev, i);
6621
6622	if (niu_tx_avail(rp) <= (skb_shinfo(skb)->nr_frags + 1)) {
6623		netif_tx_stop_queue(txq);
6624		dev_err(np->device, "%s: BUG! Tx ring full when queue awake!\n", dev->name);
6625		rp->tx_errors++;
6626		return NETDEV_TX_BUSY;
6627	}
6628
6629	if (eth_skb_pad(skb))
6630		goto out;
6631
6632	len = sizeof(struct tx_pkt_hdr) + 15;
6633	if (skb_headroom(skb) < len) {
6634		struct sk_buff *skb_new;
6635
6636		skb_new = skb_realloc_headroom(skb, len);
6637		if (!skb_new)
6638			goto out_drop;
6639		kfree_skb(skb);
6640		skb = skb_new;
6641	} else
6642		skb_orphan(skb);
6643
6644	align = ((unsigned long) skb->data & (16 - 1));
6645	headroom = align + sizeof(struct tx_pkt_hdr);
6646
6647	ehdr = (struct ethhdr *) skb->data;
6648	tp = skb_push(skb, headroom);
6649
6650	len = skb->len - sizeof(struct tx_pkt_hdr);
6651	tp->flags = cpu_to_le64(niu_compute_tx_flags(skb, ehdr, align, len));
6652	tp->resv = 0;
6653
6654	len = skb_headlen(skb);
6655	mapping = np->ops->map_single(np->device, skb->data,
6656				      len, DMA_TO_DEVICE);
6657
6658	prod = rp->prod;
6659
6660	rp->tx_buffs[prod].skb = skb;
6661	rp->tx_buffs[prod].mapping = mapping;
6662
6663	mrk = TX_DESC_SOP;
6664	if (++rp->mark_counter == rp->mark_freq) {
6665		rp->mark_counter = 0;
6666		mrk |= TX_DESC_MARK;
6667		rp->mark_pending++;
6668	}
6669
6670	tlen = len;
6671	nfg = skb_shinfo(skb)->nr_frags;
6672	while (tlen > 0) {
6673		tlen -= MAX_TX_DESC_LEN;
6674		nfg++;
6675	}
6676
6677	while (len > 0) {
6678		unsigned int this_len = len;
6679
6680		if (this_len > MAX_TX_DESC_LEN)
6681			this_len = MAX_TX_DESC_LEN;
6682
6683		niu_set_txd(rp, prod, mapping, this_len, mrk, nfg);
6684		mrk = nfg = 0;
6685
6686		prod = NEXT_TX(rp, prod);
6687		mapping += this_len;
6688		len -= this_len;
6689	}
6690
6691	for (i = 0; i <  skb_shinfo(skb)->nr_frags; i++) {
6692		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6693
6694		len = skb_frag_size(frag);
6695		mapping = np->ops->map_page(np->device, skb_frag_page(frag),
6696					    skb_frag_off(frag), len,
6697					    DMA_TO_DEVICE);
6698
6699		rp->tx_buffs[prod].skb = NULL;
6700		rp->tx_buffs[prod].mapping = mapping;
6701
6702		niu_set_txd(rp, prod, mapping, len, 0, 0);
6703
6704		prod = NEXT_TX(rp, prod);
6705	}
6706
6707	if (prod < rp->prod)
6708		rp->wrap_bit ^= TX_RING_KICK_WRAP;
6709	rp->prod = prod;
6710
6711	nw64(TX_RING_KICK(rp->tx_channel), rp->wrap_bit | (prod << 3));
6712
6713	if (unlikely(niu_tx_avail(rp) <= (MAX_SKB_FRAGS + 1))) {
6714		netif_tx_stop_queue(txq);
6715		if (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp))
6716			netif_tx_wake_queue(txq);
6717	}
6718
6719out:
6720	return NETDEV_TX_OK;
6721
6722out_drop:
6723	rp->tx_errors++;
6724	kfree_skb(skb);
6725	goto out;
6726}
6727
6728static int niu_change_mtu(struct net_device *dev, int new_mtu)
6729{
6730	struct niu *np = netdev_priv(dev);
6731	int err, orig_jumbo, new_jumbo;
6732
6733	orig_jumbo = (dev->mtu > ETH_DATA_LEN);
6734	new_jumbo = (new_mtu > ETH_DATA_LEN);
6735
6736	dev->mtu = new_mtu;
6737
6738	if (!netif_running(dev) ||
6739	    (orig_jumbo == new_jumbo))
6740		return 0;
6741
6742	niu_full_shutdown(np, dev);
6743
6744	niu_free_channels(np);
6745
6746	niu_enable_napi(np);
6747
6748	err = niu_alloc_channels(np);
6749	if (err)
6750		return err;
6751
6752	spin_lock_irq(&np->lock);
6753
6754	err = niu_init_hw(np);
6755	if (!err) {
6756		timer_setup(&np->timer, niu_timer, 0);
6757		np->timer.expires = jiffies + HZ;
6758
6759		err = niu_enable_interrupts(np, 1);
6760		if (err)
6761			niu_stop_hw(np);
6762	}
6763
6764	spin_unlock_irq(&np->lock);
6765
6766	if (!err) {
6767		netif_tx_start_all_queues(dev);
6768		if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
6769			netif_carrier_on(dev);
6770
6771		add_timer(&np->timer);
6772	}
6773
6774	return err;
6775}
6776
6777static void niu_get_drvinfo(struct net_device *dev,
6778			    struct ethtool_drvinfo *info)
6779{
6780	struct niu *np = netdev_priv(dev);
6781	struct niu_vpd *vpd = &np->vpd;
6782
6783	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
6784	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
6785	snprintf(info->fw_version, sizeof(info->fw_version), "%d.%d",
6786		vpd->fcode_major, vpd->fcode_minor);
6787	if (np->parent->plat_type != PLAT_TYPE_NIU)
6788		strlcpy(info->bus_info, pci_name(np->pdev),
6789			sizeof(info->bus_info));
6790}
6791
6792static int niu_get_link_ksettings(struct net_device *dev,
6793				  struct ethtool_link_ksettings *cmd)
6794{
6795	struct niu *np = netdev_priv(dev);
6796	struct niu_link_config *lp;
6797
6798	lp = &np->link_config;
6799
6800	memset(cmd, 0, sizeof(*cmd));
6801	cmd->base.phy_address = np->phy_addr;
6802	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
6803						lp->supported);
6804	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
6805						lp->active_advertising);
6806	cmd->base.autoneg = lp->active_autoneg;
6807	cmd->base.speed = lp->active_speed;
6808	cmd->base.duplex = lp->active_duplex;
6809	cmd->base.port = (np->flags & NIU_FLAGS_FIBER) ? PORT_FIBRE : PORT_TP;
6810
6811	return 0;
6812}
6813
6814static int niu_set_link_ksettings(struct net_device *dev,
6815				  const struct ethtool_link_ksettings *cmd)
6816{
6817	struct niu *np = netdev_priv(dev);
6818	struct niu_link_config *lp = &np->link_config;
6819
6820	ethtool_convert_link_mode_to_legacy_u32(&lp->advertising,
6821						cmd->link_modes.advertising);
6822	lp->speed = cmd->base.speed;
6823	lp->duplex = cmd->base.duplex;
6824	lp->autoneg = cmd->base.autoneg;
6825	return niu_init_link(np);
6826}
6827
6828static u32 niu_get_msglevel(struct net_device *dev)
6829{
6830	struct niu *np = netdev_priv(dev);
6831	return np->msg_enable;
6832}
6833
6834static void niu_set_msglevel(struct net_device *dev, u32 value)
6835{
6836	struct niu *np = netdev_priv(dev);
6837	np->msg_enable = value;
6838}
6839
6840static int niu_nway_reset(struct net_device *dev)
6841{
6842	struct niu *np = netdev_priv(dev);
6843
6844	if (np->link_config.autoneg)
6845		return niu_init_link(np);
6846
6847	return 0;
6848}
6849
6850static int niu_get_eeprom_len(struct net_device *dev)
6851{
6852	struct niu *np = netdev_priv(dev);
6853
6854	return np->eeprom_len;
6855}
6856
6857static int niu_get_eeprom(struct net_device *dev,
6858			  struct ethtool_eeprom *eeprom, u8 *data)
6859{
6860	struct niu *np = netdev_priv(dev);
6861	u32 offset, len, val;
6862
6863	offset = eeprom->offset;
6864	len = eeprom->len;
6865
6866	if (offset + len < offset)
6867		return -EINVAL;
6868	if (offset >= np->eeprom_len)
6869		return -EINVAL;
6870	if (offset + len > np->eeprom_len)
6871		len = eeprom->len = np->eeprom_len - offset;
6872
6873	if (offset & 3) {
6874		u32 b_offset, b_count;
6875
6876		b_offset = offset & 3;
6877		b_count = 4 - b_offset;
6878		if (b_count > len)
6879			b_count = len;
6880
6881		val = nr64(ESPC_NCR((offset - b_offset) / 4));
6882		memcpy(data, ((char *)&val) + b_offset, b_count);
6883		data += b_count;
6884		len -= b_count;
6885		offset += b_count;
6886	}
6887	while (len >= 4) {
6888		val = nr64(ESPC_NCR(offset / 4));
6889		memcpy(data, &val, 4);
6890		data += 4;
6891		len -= 4;
6892		offset += 4;
6893	}
6894	if (len) {
6895		val = nr64(ESPC_NCR(offset / 4));
6896		memcpy(data, &val, len);
6897	}
6898	return 0;
6899}
6900
6901static void niu_ethflow_to_l3proto(int flow_type, u8 *pid)
6902{
6903	switch (flow_type) {
6904	case TCP_V4_FLOW:
6905	case TCP_V6_FLOW:
6906		*pid = IPPROTO_TCP;
6907		break;
6908	case UDP_V4_FLOW:
6909	case UDP_V6_FLOW:
6910		*pid = IPPROTO_UDP;
6911		break;
6912	case SCTP_V4_FLOW:
6913	case SCTP_V6_FLOW:
6914		*pid = IPPROTO_SCTP;
6915		break;
6916	case AH_V4_FLOW:
6917	case AH_V6_FLOW:
6918		*pid = IPPROTO_AH;
6919		break;
6920	case ESP_V4_FLOW:
6921	case ESP_V6_FLOW:
6922		*pid = IPPROTO_ESP;
6923		break;
6924	default:
6925		*pid = 0;
6926		break;
6927	}
6928}
6929
6930static int niu_class_to_ethflow(u64 class, int *flow_type)
6931{
6932	switch (class) {
6933	case CLASS_CODE_TCP_IPV4:
6934		*flow_type = TCP_V4_FLOW;
6935		break;
6936	case CLASS_CODE_UDP_IPV4:
6937		*flow_type = UDP_V4_FLOW;
6938		break;
6939	case CLASS_CODE_AH_ESP_IPV4:
6940		*flow_type = AH_V4_FLOW;
6941		break;
6942	case CLASS_CODE_SCTP_IPV4:
6943		*flow_type = SCTP_V4_FLOW;
6944		break;
6945	case CLASS_CODE_TCP_IPV6:
6946		*flow_type = TCP_V6_FLOW;
6947		break;
6948	case CLASS_CODE_UDP_IPV6:
6949		*flow_type = UDP_V6_FLOW;
6950		break;
6951	case CLASS_CODE_AH_ESP_IPV6:
6952		*flow_type = AH_V6_FLOW;
6953		break;
6954	case CLASS_CODE_SCTP_IPV6:
6955		*flow_type = SCTP_V6_FLOW;
6956		break;
6957	case CLASS_CODE_USER_PROG1:
6958	case CLASS_CODE_USER_PROG2:
6959	case CLASS_CODE_USER_PROG3:
6960	case CLASS_CODE_USER_PROG4:
6961		*flow_type = IP_USER_FLOW;
6962		break;
6963	default:
6964		return -EINVAL;
6965	}
6966
6967	return 0;
6968}
6969
6970static int niu_ethflow_to_class(int flow_type, u64 *class)
6971{
6972	switch (flow_type) {
6973	case TCP_V4_FLOW:
6974		*class = CLASS_CODE_TCP_IPV4;
6975		break;
6976	case UDP_V4_FLOW:
6977		*class = CLASS_CODE_UDP_IPV4;
6978		break;
6979	case AH_ESP_V4_FLOW:
6980	case AH_V4_FLOW:
6981	case ESP_V4_FLOW:
6982		*class = CLASS_CODE_AH_ESP_IPV4;
6983		break;
6984	case SCTP_V4_FLOW:
6985		*class = CLASS_CODE_SCTP_IPV4;
6986		break;
6987	case TCP_V6_FLOW:
6988		*class = CLASS_CODE_TCP_IPV6;
6989		break;
6990	case UDP_V6_FLOW:
6991		*class = CLASS_CODE_UDP_IPV6;
6992		break;
6993	case AH_ESP_V6_FLOW:
6994	case AH_V6_FLOW:
6995	case ESP_V6_FLOW:
6996		*class = CLASS_CODE_AH_ESP_IPV6;
6997		break;
6998	case SCTP_V6_FLOW:
6999		*class = CLASS_CODE_SCTP_IPV6;
7000		break;
7001	default:
7002		return 0;
7003	}
7004
7005	return 1;
7006}
7007
7008static u64 niu_flowkey_to_ethflow(u64 flow_key)
7009{
7010	u64 ethflow = 0;
7011
7012	if (flow_key & FLOW_KEY_L2DA)
7013		ethflow |= RXH_L2DA;
7014	if (flow_key & FLOW_KEY_VLAN)
7015		ethflow |= RXH_VLAN;
7016	if (flow_key & FLOW_KEY_IPSA)
7017		ethflow |= RXH_IP_SRC;
7018	if (flow_key & FLOW_KEY_IPDA)
7019		ethflow |= RXH_IP_DST;
7020	if (flow_key & FLOW_KEY_PROTO)
7021		ethflow |= RXH_L3_PROTO;
7022	if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT))
7023		ethflow |= RXH_L4_B_0_1;
7024	if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT))
7025		ethflow |= RXH_L4_B_2_3;
7026
7027	return ethflow;
7028
7029}
7030
7031static int niu_ethflow_to_flowkey(u64 ethflow, u64 *flow_key)
7032{
7033	u64 key = 0;
7034
7035	if (ethflow & RXH_L2DA)
7036		key |= FLOW_KEY_L2DA;
7037	if (ethflow & RXH_VLAN)
7038		key |= FLOW_KEY_VLAN;
7039	if (ethflow & RXH_IP_SRC)
7040		key |= FLOW_KEY_IPSA;
7041	if (ethflow & RXH_IP_DST)
7042		key |= FLOW_KEY_IPDA;
7043	if (ethflow & RXH_L3_PROTO)
7044		key |= FLOW_KEY_PROTO;
7045	if (ethflow & RXH_L4_B_0_1)
7046		key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT);
7047	if (ethflow & RXH_L4_B_2_3)
7048		key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT);
7049
7050	*flow_key = key;
7051
7052	return 1;
7053
7054}
7055
7056static int niu_get_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc)
7057{
7058	u64 class;
7059
7060	nfc->data = 0;
7061
7062	if (!niu_ethflow_to_class(nfc->flow_type, &class))
7063		return -EINVAL;
7064
7065	if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] &
7066	    TCAM_KEY_DISC)
7067		nfc->data = RXH_DISCARD;
7068	else
7069		nfc->data = niu_flowkey_to_ethflow(np->parent->flow_key[class -
7070						      CLASS_CODE_USER_PROG1]);
7071	return 0;
7072}
7073
7074static void niu_get_ip4fs_from_tcam_key(struct niu_tcam_entry *tp,
7075					struct ethtool_rx_flow_spec *fsp)
7076{
7077	u32 tmp;
7078	u16 prt;
7079
7080	tmp = (tp->key[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT;
7081	fsp->h_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp);
7082
7083	tmp = (tp->key[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT;
7084	fsp->h_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp);
7085
7086	tmp = (tp->key_mask[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT;
7087	fsp->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp);
7088
7089	tmp = (tp->key_mask[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT;
7090	fsp->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp);
7091
7092	fsp->h_u.tcp_ip4_spec.tos = (tp->key[2] & TCAM_V4KEY2_TOS) >>
7093		TCAM_V4KEY2_TOS_SHIFT;
7094	fsp->m_u.tcp_ip4_spec.tos = (tp->key_mask[2] & TCAM_V4KEY2_TOS) >>
7095		TCAM_V4KEY2_TOS_SHIFT;
7096
7097	switch (fsp->flow_type) {
7098	case TCP_V4_FLOW:
7099	case UDP_V4_FLOW:
7100	case SCTP_V4_FLOW:
7101		prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
7102			TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16;
7103		fsp->h_u.tcp_ip4_spec.psrc = cpu_to_be16(prt);
7104
7105		prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
7106			TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff;
7107		fsp->h_u.tcp_ip4_spec.pdst = cpu_to_be16(prt);
7108
7109		prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
7110			TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16;
7111		fsp->m_u.tcp_ip4_spec.psrc = cpu_to_be16(prt);
7112
7113		prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
7114			 TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff;
7115		fsp->m_u.tcp_ip4_spec.pdst = cpu_to_be16(prt);
7116		break;
7117	case AH_V4_FLOW:
7118	case ESP_V4_FLOW:
7119		tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
7120			TCAM_V4KEY2_PORT_SPI_SHIFT;
7121		fsp->h_u.ah_ip4_spec.spi = cpu_to_be32(tmp);
7122
7123		tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
7124			TCAM_V4KEY2_PORT_SPI_SHIFT;
7125		fsp->m_u.ah_ip4_spec.spi = cpu_to_be32(tmp);
7126		break;
7127	case IP_USER_FLOW:
7128		tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
7129			TCAM_V4KEY2_PORT_SPI_SHIFT;
7130		fsp->h_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp);
7131
7132		tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
7133			TCAM_V4KEY2_PORT_SPI_SHIFT;
7134		fsp->m_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp);
7135
7136		fsp->h_u.usr_ip4_spec.proto =
7137			(tp->key[2] & TCAM_V4KEY2_PROTO) >>
7138			TCAM_V4KEY2_PROTO_SHIFT;
7139		fsp->m_u.usr_ip4_spec.proto =
7140			(tp->key_mask[2] & TCAM_V4KEY2_PROTO) >>
7141			TCAM_V4KEY2_PROTO_SHIFT;
7142
7143		fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
7144		break;
7145	default:
7146		break;
7147	}
7148}
7149
7150static int niu_get_ethtool_tcam_entry(struct niu *np,
7151				      struct ethtool_rxnfc *nfc)
7152{
7153	struct niu_parent *parent = np->parent;
7154	struct niu_tcam_entry *tp;
7155	struct ethtool_rx_flow_spec *fsp = &nfc->fs;
7156	u16 idx;
7157	u64 class;
7158	int ret = 0;
7159
7160	idx = tcam_get_index(np, (u16)nfc->fs.location);
7161
7162	tp = &parent->tcam[idx];
7163	if (!tp->valid) {
7164		netdev_info(np->dev, "niu%d: entry [%d] invalid for idx[%d]\n",
7165			    parent->index, (u16)nfc->fs.location, idx);
7166		return -EINVAL;
7167	}
7168
7169	/* fill the flow spec entry */
7170	class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >>
7171		TCAM_V4KEY0_CLASS_CODE_SHIFT;
7172	ret = niu_class_to_ethflow(class, &fsp->flow_type);
7173	if (ret < 0) {
7174		netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n",
7175			    parent->index);
7176		goto out;
7177	}
7178
7179	if (fsp->flow_type == AH_V4_FLOW || fsp->flow_type == AH_V6_FLOW) {
7180		u32 proto = (tp->key[2] & TCAM_V4KEY2_PROTO) >>
7181			TCAM_V4KEY2_PROTO_SHIFT;
7182		if (proto == IPPROTO_ESP) {
7183			if (fsp->flow_type == AH_V4_FLOW)
7184				fsp->flow_type = ESP_V4_FLOW;
7185			else
7186				fsp->flow_type = ESP_V6_FLOW;
7187		}
7188	}
7189
7190	switch (fsp->flow_type) {
7191	case TCP_V4_FLOW:
7192	case UDP_V4_FLOW:
7193	case SCTP_V4_FLOW:
7194	case AH_V4_FLOW:
7195	case ESP_V4_FLOW:
7196		niu_get_ip4fs_from_tcam_key(tp, fsp);
7197		break;
7198	case TCP_V6_FLOW:
7199	case UDP_V6_FLOW:
7200	case SCTP_V6_FLOW:
7201	case AH_V6_FLOW:
7202	case ESP_V6_FLOW:
7203		/* Not yet implemented */
7204		ret = -EINVAL;
7205		break;
7206	case IP_USER_FLOW:
7207		niu_get_ip4fs_from_tcam_key(tp, fsp);
7208		break;
7209	default:
7210		ret = -EINVAL;
7211		break;
7212	}
7213
7214	if (ret < 0)
7215		goto out;
7216
7217	if (tp->assoc_data & TCAM_ASSOCDATA_DISC)
7218		fsp->ring_cookie = RX_CLS_FLOW_DISC;
7219	else
7220		fsp->ring_cookie = (tp->assoc_data & TCAM_ASSOCDATA_OFFSET) >>
7221			TCAM_ASSOCDATA_OFFSET_SHIFT;
7222
7223	/* put the tcam size here */
7224	nfc->data = tcam_get_size(np);
7225out:
7226	return ret;
7227}
7228
7229static int niu_get_ethtool_tcam_all(struct niu *np,
7230				    struct ethtool_rxnfc *nfc,
7231				    u32 *rule_locs)
7232{
7233	struct niu_parent *parent = np->parent;
7234	struct niu_tcam_entry *tp;
7235	int i, idx, cnt;
7236	unsigned long flags;
7237	int ret = 0;
7238
7239	/* put the tcam size here */
7240	nfc->data = tcam_get_size(np);
7241
7242	niu_lock_parent(np, flags);
7243	for (cnt = 0, i = 0; i < nfc->data; i++) {
7244		idx = tcam_get_index(np, i);
7245		tp = &parent->tcam[idx];
7246		if (!tp->valid)
7247			continue;
7248		if (cnt == nfc->rule_cnt) {
7249			ret = -EMSGSIZE;
7250			break;
7251		}
7252		rule_locs[cnt] = i;
7253		cnt++;
7254	}
7255	niu_unlock_parent(np, flags);
7256
7257	nfc->rule_cnt = cnt;
7258
7259	return ret;
7260}
7261
7262static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
7263		       u32 *rule_locs)
7264{
7265	struct niu *np = netdev_priv(dev);
7266	int ret = 0;
7267
7268	switch (cmd->cmd) {
7269	case ETHTOOL_GRXFH:
7270		ret = niu_get_hash_opts(np, cmd);
7271		break;
7272	case ETHTOOL_GRXRINGS:
7273		cmd->data = np->num_rx_rings;
7274		break;
7275	case ETHTOOL_GRXCLSRLCNT:
7276		cmd->rule_cnt = tcam_get_valid_entry_cnt(np);
7277		break;
7278	case ETHTOOL_GRXCLSRULE:
7279		ret = niu_get_ethtool_tcam_entry(np, cmd);
7280		break;
7281	case ETHTOOL_GRXCLSRLALL:
7282		ret = niu_get_ethtool_tcam_all(np, cmd, rule_locs);
7283		break;
7284	default:
7285		ret = -EINVAL;
7286		break;
7287	}
7288
7289	return ret;
7290}
7291
7292static int niu_set_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc)
7293{
7294	u64 class;
7295	u64 flow_key = 0;
7296	unsigned long flags;
7297
7298	if (!niu_ethflow_to_class(nfc->flow_type, &class))
7299		return -EINVAL;
7300
7301	if (class < CLASS_CODE_USER_PROG1 ||
7302	    class > CLASS_CODE_SCTP_IPV6)
7303		return -EINVAL;
7304
7305	if (nfc->data & RXH_DISCARD) {
7306		niu_lock_parent(np, flags);
7307		flow_key = np->parent->tcam_key[class -
7308					       CLASS_CODE_USER_PROG1];
7309		flow_key |= TCAM_KEY_DISC;
7310		nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1), flow_key);
7311		np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = flow_key;
7312		niu_unlock_parent(np, flags);
7313		return 0;
7314	} else {
7315		/* Discard was set before, but is not set now */
7316		if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] &
7317		    TCAM_KEY_DISC) {
7318			niu_lock_parent(np, flags);
7319			flow_key = np->parent->tcam_key[class -
7320					       CLASS_CODE_USER_PROG1];
7321			flow_key &= ~TCAM_KEY_DISC;
7322			nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1),
7323			     flow_key);
7324			np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] =
7325				flow_key;
7326			niu_unlock_parent(np, flags);
7327		}
7328	}
7329
7330	if (!niu_ethflow_to_flowkey(nfc->data, &flow_key))
7331		return -EINVAL;
7332
7333	niu_lock_parent(np, flags);
7334	nw64(FLOW_KEY(class - CLASS_CODE_USER_PROG1), flow_key);
7335	np->parent->flow_key[class - CLASS_CODE_USER_PROG1] = flow_key;
7336	niu_unlock_parent(np, flags);
7337
7338	return 0;
7339}
7340
7341static void niu_get_tcamkey_from_ip4fs(struct ethtool_rx_flow_spec *fsp,
7342				       struct niu_tcam_entry *tp,
7343				       int l2_rdc_tab, u64 class)
7344{
7345	u8 pid = 0;
7346	u32 sip, dip, sipm, dipm, spi, spim;
7347	u16 sport, dport, spm, dpm;
7348
7349	sip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4src);
7350	sipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4src);
7351	dip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4dst);
7352	dipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4dst);
7353
7354	tp->key[0] = class << TCAM_V4KEY0_CLASS_CODE_SHIFT;
7355	tp->key_mask[0] = TCAM_V4KEY0_CLASS_CODE;
7356	tp->key[1] = (u64)l2_rdc_tab << TCAM_V4KEY1_L2RDCNUM_SHIFT;
7357	tp->key_mask[1] = TCAM_V4KEY1_L2RDCNUM;
7358
7359	tp->key[3] = (u64)sip << TCAM_V4KEY3_SADDR_SHIFT;
7360	tp->key[3] |= dip;
7361
7362	tp->key_mask[3] = (u64)sipm << TCAM_V4KEY3_SADDR_SHIFT;
7363	tp->key_mask[3] |= dipm;
7364
7365	tp->key[2] |= ((u64)fsp->h_u.tcp_ip4_spec.tos <<
7366		       TCAM_V4KEY2_TOS_SHIFT);
7367	tp->key_mask[2] |= ((u64)fsp->m_u.tcp_ip4_spec.tos <<
7368			    TCAM_V4KEY2_TOS_SHIFT);
7369	switch (fsp->flow_type) {
7370	case TCP_V4_FLOW:
7371	case UDP_V4_FLOW:
7372	case SCTP_V4_FLOW:
7373		sport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.psrc);
7374		spm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.psrc);
7375		dport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.pdst);
7376		dpm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.pdst);
7377
7378		tp->key[2] |= (((u64)sport << 16) | dport);
7379		tp->key_mask[2] |= (((u64)spm << 16) | dpm);
7380		niu_ethflow_to_l3proto(fsp->flow_type, &pid);
7381		break;
7382	case AH_V4_FLOW:
7383	case ESP_V4_FLOW:
7384		spi = be32_to_cpu(fsp->h_u.ah_ip4_spec.spi);
7385		spim = be32_to_cpu(fsp->m_u.ah_ip4_spec.spi);
7386
7387		tp->key[2] |= spi;
7388		tp->key_mask[2] |= spim;
7389		niu_ethflow_to_l3proto(fsp->flow_type, &pid);
7390		break;
7391	case IP_USER_FLOW:
7392		spi = be32_to_cpu(fsp->h_u.usr_ip4_spec.l4_4_bytes);
7393		spim = be32_to_cpu(fsp->m_u.usr_ip4_spec.l4_4_bytes);
7394
7395		tp->key[2] |= spi;
7396		tp->key_mask[2] |= spim;
7397		pid = fsp->h_u.usr_ip4_spec.proto;
7398		break;
7399	default:
7400		break;
7401	}
7402
7403	tp->key[2] |= ((u64)pid << TCAM_V4KEY2_PROTO_SHIFT);
7404	if (pid) {
7405		tp->key_mask[2] |= TCAM_V4KEY2_PROTO;
7406	}
7407}
7408
7409static int niu_add_ethtool_tcam_entry(struct niu *np,
7410				      struct ethtool_rxnfc *nfc)
7411{
7412	struct niu_parent *parent = np->parent;
7413	struct niu_tcam_entry *tp;
7414	struct ethtool_rx_flow_spec *fsp = &nfc->fs;
7415	struct niu_rdc_tables *rdc_table = &parent->rdc_group_cfg[np->port];
7416	int l2_rdc_table = rdc_table->first_table_num;
7417	u16 idx;
7418	u64 class;
7419	unsigned long flags;
7420	int err, ret;
7421
7422	ret = 0;
7423
7424	idx = nfc->fs.location;
7425	if (idx >= tcam_get_size(np))
7426		return -EINVAL;
7427
7428	if (fsp->flow_type == IP_USER_FLOW) {
7429		int i;
7430		int add_usr_cls = 0;
7431		struct ethtool_usrip4_spec *uspec = &fsp->h_u.usr_ip4_spec;
7432		struct ethtool_usrip4_spec *umask = &fsp->m_u.usr_ip4_spec;
7433
7434		if (uspec->ip_ver != ETH_RX_NFC_IP4)
7435			return -EINVAL;
7436
7437		niu_lock_parent(np, flags);
7438
7439		for (i = 0; i < NIU_L3_PROG_CLS; i++) {
7440			if (parent->l3_cls[i]) {
7441				if (uspec->proto == parent->l3_cls_pid[i]) {
7442					class = parent->l3_cls[i];
7443					parent->l3_cls_refcnt[i]++;
7444					add_usr_cls = 1;
7445					break;
7446				}
7447			} else {
7448				/* Program new user IP class */
7449				switch (i) {
7450				case 0:
7451					class = CLASS_CODE_USER_PROG1;
7452					break;
7453				case 1:
7454					class = CLASS_CODE_USER_PROG2;
7455					break;
7456				case 2:
7457					class = CLASS_CODE_USER_PROG3;
7458					break;
7459				case 3:
7460					class = CLASS_CODE_USER_PROG4;
7461					break;
7462				default:
7463					class = CLASS_CODE_UNRECOG;
7464					break;
7465				}
7466				ret = tcam_user_ip_class_set(np, class, 0,
7467							     uspec->proto,
7468							     uspec->tos,
7469							     umask->tos);
7470				if (ret)
7471					goto out;
7472
7473				ret = tcam_user_ip_class_enable(np, class, 1);
7474				if (ret)
7475					goto out;
7476				parent->l3_cls[i] = class;
7477				parent->l3_cls_pid[i] = uspec->proto;
7478				parent->l3_cls_refcnt[i]++;
7479				add_usr_cls = 1;
7480				break;
7481			}
7482		}
7483		if (!add_usr_cls) {
7484			netdev_info(np->dev, "niu%d: %s(): Could not find/insert class for pid %d\n",
7485				    parent->index, __func__, uspec->proto);
7486			ret = -EINVAL;
7487			goto out;
7488		}
7489		niu_unlock_parent(np, flags);
7490	} else {
7491		if (!niu_ethflow_to_class(fsp->flow_type, &class)) {
7492			return -EINVAL;
7493		}
7494	}
7495
7496	niu_lock_parent(np, flags);
7497
7498	idx = tcam_get_index(np, idx);
7499	tp = &parent->tcam[idx];
7500
7501	memset(tp, 0, sizeof(*tp));
7502
7503	/* fill in the tcam key and mask */
7504	switch (fsp->flow_type) {
7505	case TCP_V4_FLOW:
7506	case UDP_V4_FLOW:
7507	case SCTP_V4_FLOW:
7508	case AH_V4_FLOW:
7509	case ESP_V4_FLOW:
7510		niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, class);
7511		break;
7512	case TCP_V6_FLOW:
7513	case UDP_V6_FLOW:
7514	case SCTP_V6_FLOW:
7515	case AH_V6_FLOW:
7516	case ESP_V6_FLOW:
7517		/* Not yet implemented */
7518		netdev_info(np->dev, "niu%d: In %s(): flow %d for IPv6 not implemented\n",
7519			    parent->index, __func__, fsp->flow_type);
7520		ret = -EINVAL;
7521		goto out;
7522	case IP_USER_FLOW:
7523		niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, class);
7524		break;
7525	default:
7526		netdev_info(np->dev, "niu%d: In %s(): Unknown flow type %d\n",
7527			    parent->index, __func__, fsp->flow_type);
7528		ret = -EINVAL;
7529		goto out;
7530	}
7531
7532	/* fill in the assoc data */
7533	if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
7534		tp->assoc_data = TCAM_ASSOCDATA_DISC;
7535	} else {
7536		if (fsp->ring_cookie >= np->num_rx_rings) {
7537			netdev_info(np->dev, "niu%d: In %s(): Invalid RX ring %lld\n",
7538				    parent->index, __func__,
7539				    (long long)fsp->ring_cookie);
7540			ret = -EINVAL;
7541			goto out;
7542		}
7543		tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET |
7544				  (fsp->ring_cookie <<
7545				   TCAM_ASSOCDATA_OFFSET_SHIFT));
7546	}
7547
7548	err = tcam_write(np, idx, tp->key, tp->key_mask);
7549	if (err) {
7550		ret = -EINVAL;
7551		goto out;
7552	}
7553	err = tcam_assoc_write(np, idx, tp->assoc_data);
7554	if (err) {
7555		ret = -EINVAL;
7556		goto out;
7557	}
7558
7559	/* validate the entry */
7560	tp->valid = 1;
7561	np->clas.tcam_valid_entries++;
7562out:
7563	niu_unlock_parent(np, flags);
7564
7565	return ret;
7566}
7567
7568static int niu_del_ethtool_tcam_entry(struct niu *np, u32 loc)
7569{
7570	struct niu_parent *parent = np->parent;
7571	struct niu_tcam_entry *tp;
7572	u16 idx;
7573	unsigned long flags;
7574	u64 class;
7575	int ret = 0;
7576
7577	if (loc >= tcam_get_size(np))
7578		return -EINVAL;
7579
7580	niu_lock_parent(np, flags);
7581
7582	idx = tcam_get_index(np, loc);
7583	tp = &parent->tcam[idx];
7584
7585	/* if the entry is of a user defined class, then update*/
7586	class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >>
7587		TCAM_V4KEY0_CLASS_CODE_SHIFT;
7588
7589	if (class >= CLASS_CODE_USER_PROG1 && class <= CLASS_CODE_USER_PROG4) {
7590		int i;
7591		for (i = 0; i < NIU_L3_PROG_CLS; i++) {
7592			if (parent->l3_cls[i] == class) {
7593				parent->l3_cls_refcnt[i]--;
7594				if (!parent->l3_cls_refcnt[i]) {
7595					/* disable class */
7596					ret = tcam_user_ip_class_enable(np,
7597									class,
7598									0);
7599					if (ret)
7600						goto out;
7601					parent->l3_cls[i] = 0;
7602					parent->l3_cls_pid[i] = 0;
7603				}
7604				break;
7605			}
7606		}
7607		if (i == NIU_L3_PROG_CLS) {
7608			netdev_info(np->dev, "niu%d: In %s(): Usr class 0x%llx not found\n",
7609				    parent->index, __func__,
7610				    (unsigned long long)class);
7611			ret = -EINVAL;
7612			goto out;
7613		}
7614	}
7615
7616	ret = tcam_flush(np, idx);
7617	if (ret)
7618		goto out;
7619
7620	/* invalidate the entry */
7621	tp->valid = 0;
7622	np->clas.tcam_valid_entries--;
7623out:
7624	niu_unlock_parent(np, flags);
7625
7626	return ret;
7627}
7628
7629static int niu_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
7630{
7631	struct niu *np = netdev_priv(dev);
7632	int ret = 0;
7633
7634	switch (cmd->cmd) {
7635	case ETHTOOL_SRXFH:
7636		ret = niu_set_hash_opts(np, cmd);
7637		break;
7638	case ETHTOOL_SRXCLSRLINS:
7639		ret = niu_add_ethtool_tcam_entry(np, cmd);
7640		break;
7641	case ETHTOOL_SRXCLSRLDEL:
7642		ret = niu_del_ethtool_tcam_entry(np, cmd->fs.location);
7643		break;
7644	default:
7645		ret = -EINVAL;
7646		break;
7647	}
7648
7649	return ret;
7650}
7651
7652static const struct {
7653	const char string[ETH_GSTRING_LEN];
7654} niu_xmac_stat_keys[] = {
7655	{ "tx_frames" },
7656	{ "tx_bytes" },
7657	{ "tx_fifo_errors" },
7658	{ "tx_overflow_errors" },
7659	{ "tx_max_pkt_size_errors" },
7660	{ "tx_underflow_errors" },
7661	{ "rx_local_faults" },
7662	{ "rx_remote_faults" },
7663	{ "rx_link_faults" },
7664	{ "rx_align_errors" },
7665	{ "rx_frags" },
7666	{ "rx_mcasts" },
7667	{ "rx_bcasts" },
7668	{ "rx_hist_cnt1" },
7669	{ "rx_hist_cnt2" },
7670	{ "rx_hist_cnt3" },
7671	{ "rx_hist_cnt4" },
7672	{ "rx_hist_cnt5" },
7673	{ "rx_hist_cnt6" },
7674	{ "rx_hist_cnt7" },
7675	{ "rx_octets" },
7676	{ "rx_code_violations" },
7677	{ "rx_len_errors" },
7678	{ "rx_crc_errors" },
7679	{ "rx_underflows" },
7680	{ "rx_overflows" },
7681	{ "pause_off_state" },
7682	{ "pause_on_state" },
7683	{ "pause_received" },
7684};
7685
7686#define NUM_XMAC_STAT_KEYS	ARRAY_SIZE(niu_xmac_stat_keys)
7687
7688static const struct {
7689	const char string[ETH_GSTRING_LEN];
7690} niu_bmac_stat_keys[] = {
7691	{ "tx_underflow_errors" },
7692	{ "tx_max_pkt_size_errors" },
7693	{ "tx_bytes" },
7694	{ "tx_frames" },
7695	{ "rx_overflows" },
7696	{ "rx_frames" },
7697	{ "rx_align_errors" },
7698	{ "rx_crc_errors" },
7699	{ "rx_len_errors" },
7700	{ "pause_off_state" },
7701	{ "pause_on_state" },
7702	{ "pause_received" },
7703};
7704
7705#define NUM_BMAC_STAT_KEYS	ARRAY_SIZE(niu_bmac_stat_keys)
7706
7707static const struct {
7708	const char string[ETH_GSTRING_LEN];
7709} niu_rxchan_stat_keys[] = {
7710	{ "rx_channel" },
7711	{ "rx_packets" },
7712	{ "rx_bytes" },
7713	{ "rx_dropped" },
7714	{ "rx_errors" },
7715};
7716
7717#define NUM_RXCHAN_STAT_KEYS	ARRAY_SIZE(niu_rxchan_stat_keys)
7718
7719static const struct {
7720	const char string[ETH_GSTRING_LEN];
7721} niu_txchan_stat_keys[] = {
7722	{ "tx_channel" },
7723	{ "tx_packets" },
7724	{ "tx_bytes" },
7725	{ "tx_errors" },
7726};
7727
7728#define NUM_TXCHAN_STAT_KEYS	ARRAY_SIZE(niu_txchan_stat_keys)
7729
7730static void niu_get_strings(struct net_device *dev, u32 stringset, u8 *data)
7731{
7732	struct niu *np = netdev_priv(dev);
7733	int i;
7734
7735	if (stringset != ETH_SS_STATS)
7736		return;
7737
7738	if (np->flags & NIU_FLAGS_XMAC) {
7739		memcpy(data, niu_xmac_stat_keys,
7740		       sizeof(niu_xmac_stat_keys));
7741		data += sizeof(niu_xmac_stat_keys);
7742	} else {
7743		memcpy(data, niu_bmac_stat_keys,
7744		       sizeof(niu_bmac_stat_keys));
7745		data += sizeof(niu_bmac_stat_keys);
7746	}
7747	for (i = 0; i < np->num_rx_rings; i++) {
7748		memcpy(data, niu_rxchan_stat_keys,
7749		       sizeof(niu_rxchan_stat_keys));
7750		data += sizeof(niu_rxchan_stat_keys);
7751	}
7752	for (i = 0; i < np->num_tx_rings; i++) {
7753		memcpy(data, niu_txchan_stat_keys,
7754		       sizeof(niu_txchan_stat_keys));
7755		data += sizeof(niu_txchan_stat_keys);
7756	}
7757}
7758
7759static int niu_get_sset_count(struct net_device *dev, int stringset)
7760{
7761	struct niu *np = netdev_priv(dev);
7762
7763	if (stringset != ETH_SS_STATS)
7764		return -EINVAL;
7765
7766	return (np->flags & NIU_FLAGS_XMAC ?
7767		 NUM_XMAC_STAT_KEYS :
7768		 NUM_BMAC_STAT_KEYS) +
7769		(np->num_rx_rings * NUM_RXCHAN_STAT_KEYS) +
7770		(np->num_tx_rings * NUM_TXCHAN_STAT_KEYS);
7771}
7772
7773static void niu_get_ethtool_stats(struct net_device *dev,
7774				  struct ethtool_stats *stats, u64 *data)
7775{
7776	struct niu *np = netdev_priv(dev);
7777	int i;
7778
7779	niu_sync_mac_stats(np);
7780	if (np->flags & NIU_FLAGS_XMAC) {
7781		memcpy(data, &np->mac_stats.xmac,
7782		       sizeof(struct niu_xmac_stats));
7783		data += (sizeof(struct niu_xmac_stats) / sizeof(u64));
7784	} else {
7785		memcpy(data, &np->mac_stats.bmac,
7786		       sizeof(struct niu_bmac_stats));
7787		data += (sizeof(struct niu_bmac_stats) / sizeof(u64));
7788	}
7789	for (i = 0; i < np->num_rx_rings; i++) {
7790		struct rx_ring_info *rp = &np->rx_rings[i];
7791
7792		niu_sync_rx_discard_stats(np, rp, 0);
7793
7794		data[0] = rp->rx_channel;
7795		data[1] = rp->rx_packets;
7796		data[2] = rp->rx_bytes;
7797		data[3] = rp->rx_dropped;
7798		data[4] = rp->rx_errors;
7799		data += 5;
7800	}
7801	for (i = 0; i < np->num_tx_rings; i++) {
7802		struct tx_ring_info *rp = &np->tx_rings[i];
7803
7804		data[0] = rp->tx_channel;
7805		data[1] = rp->tx_packets;
7806		data[2] = rp->tx_bytes;
7807		data[3] = rp->tx_errors;
7808		data += 4;
7809	}
7810}
7811
7812static u64 niu_led_state_save(struct niu *np)
7813{
7814	if (np->flags & NIU_FLAGS_XMAC)
7815		return nr64_mac(XMAC_CONFIG);
7816	else
7817		return nr64_mac(BMAC_XIF_CONFIG);
7818}
7819
7820static void niu_led_state_restore(struct niu *np, u64 val)
7821{
7822	if (np->flags & NIU_FLAGS_XMAC)
7823		nw64_mac(XMAC_CONFIG, val);
7824	else
7825		nw64_mac(BMAC_XIF_CONFIG, val);
7826}
7827
7828static void niu_force_led(struct niu *np, int on)
7829{
7830	u64 val, reg, bit;
7831
7832	if (np->flags & NIU_FLAGS_XMAC) {
7833		reg = XMAC_CONFIG;
7834		bit = XMAC_CONFIG_FORCE_LED_ON;
7835	} else {
7836		reg = BMAC_XIF_CONFIG;
7837		bit = BMAC_XIF_CONFIG_LINK_LED;
7838	}
7839
7840	val = nr64_mac(reg);
7841	if (on)
7842		val |= bit;
7843	else
7844		val &= ~bit;
7845	nw64_mac(reg, val);
7846}
7847
7848static int niu_set_phys_id(struct net_device *dev,
7849			   enum ethtool_phys_id_state state)
7850
7851{
7852	struct niu *np = netdev_priv(dev);
7853
7854	if (!netif_running(dev))
7855		return -EAGAIN;
7856
7857	switch (state) {
7858	case ETHTOOL_ID_ACTIVE:
7859		np->orig_led_state = niu_led_state_save(np);
7860		return 1;	/* cycle on/off once per second */
7861
7862	case ETHTOOL_ID_ON:
7863		niu_force_led(np, 1);
7864		break;
7865
7866	case ETHTOOL_ID_OFF:
7867		niu_force_led(np, 0);
7868		break;
7869
7870	case ETHTOOL_ID_INACTIVE:
7871		niu_led_state_restore(np, np->orig_led_state);
7872	}
7873
7874	return 0;
7875}
7876
7877static const struct ethtool_ops niu_ethtool_ops = {
7878	.get_drvinfo		= niu_get_drvinfo,
7879	.get_link		= ethtool_op_get_link,
7880	.get_msglevel		= niu_get_msglevel,
7881	.set_msglevel		= niu_set_msglevel,
7882	.nway_reset		= niu_nway_reset,
7883	.get_eeprom_len		= niu_get_eeprom_len,
7884	.get_eeprom		= niu_get_eeprom,
7885	.get_strings		= niu_get_strings,
7886	.get_sset_count		= niu_get_sset_count,
7887	.get_ethtool_stats	= niu_get_ethtool_stats,
7888	.set_phys_id		= niu_set_phys_id,
7889	.get_rxnfc		= niu_get_nfc,
7890	.set_rxnfc		= niu_set_nfc,
7891	.get_link_ksettings	= niu_get_link_ksettings,
7892	.set_link_ksettings	= niu_set_link_ksettings,
7893};
7894
7895static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent,
7896			      int ldg, int ldn)
7897{
7898	if (ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX)
7899		return -EINVAL;
7900	if (ldn < 0 || ldn > LDN_MAX)
7901		return -EINVAL;
7902
7903	parent->ldg_map[ldn] = ldg;
7904
7905	if (np->parent->plat_type == PLAT_TYPE_NIU) {
7906		/* On N2 NIU, the ldn-->ldg assignments are setup and fixed by
7907		 * the firmware, and we're not supposed to change them.
7908		 * Validate the mapping, because if it's wrong we probably
7909		 * won't get any interrupts and that's painful to debug.
7910		 */
7911		if (nr64(LDG_NUM(ldn)) != ldg) {
7912			dev_err(np->device, "Port %u, mis-matched LDG assignment for ldn %d, should be %d is %llu\n",
7913				np->port, ldn, ldg,
7914				(unsigned long long) nr64(LDG_NUM(ldn)));
7915			return -EINVAL;
7916		}
7917	} else
7918		nw64(LDG_NUM(ldn), ldg);
7919
7920	return 0;
7921}
7922
7923static int niu_set_ldg_timer_res(struct niu *np, int res)
7924{
7925	if (res < 0 || res > LDG_TIMER_RES_VAL)
7926		return -EINVAL;
7927
7928
7929	nw64(LDG_TIMER_RES, res);
7930
7931	return 0;
7932}
7933
7934static int niu_set_ldg_sid(struct niu *np, int ldg, int func, int vector)
7935{
7936	if ((ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX) ||
7937	    (func < 0 || func > 3) ||
7938	    (vector < 0 || vector > 0x1f))
7939		return -EINVAL;
7940
7941	nw64(SID(ldg), (func << SID_FUNC_SHIFT) | vector);
7942
7943	return 0;
7944}
7945
7946static int niu_pci_eeprom_read(struct niu *np, u32 addr)
7947{
7948	u64 frame, frame_base = (ESPC_PIO_STAT_READ_START |
7949				 (addr << ESPC_PIO_STAT_ADDR_SHIFT));
7950	int limit;
7951
7952	if (addr > (ESPC_PIO_STAT_ADDR >> ESPC_PIO_STAT_ADDR_SHIFT))
7953		return -EINVAL;
7954
7955	frame = frame_base;
7956	nw64(ESPC_PIO_STAT, frame);
7957	limit = 64;
7958	do {
7959		udelay(5);
7960		frame = nr64(ESPC_PIO_STAT);
7961		if (frame & ESPC_PIO_STAT_READ_END)
7962			break;
7963	} while (limit--);
7964	if (!(frame & ESPC_PIO_STAT_READ_END)) {
7965		dev_err(np->device, "EEPROM read timeout frame[%llx]\n",
7966			(unsigned long long) frame);
7967		return -ENODEV;
7968	}
7969
7970	frame = frame_base;
7971	nw64(ESPC_PIO_STAT, frame);
7972	limit = 64;
7973	do {
7974		udelay(5);
7975		frame = nr64(ESPC_PIO_STAT);
7976		if (frame & ESPC_PIO_STAT_READ_END)
7977			break;
7978	} while (limit--);
7979	if (!(frame & ESPC_PIO_STAT_READ_END)) {
7980		dev_err(np->device, "EEPROM read timeout frame[%llx]\n",
7981			(unsigned long long) frame);
7982		return -ENODEV;
7983	}
7984
7985	frame = nr64(ESPC_PIO_STAT);
7986	return (frame & ESPC_PIO_STAT_DATA) >> ESPC_PIO_STAT_DATA_SHIFT;
7987}
7988
7989static int niu_pci_eeprom_read16(struct niu *np, u32 off)
7990{
7991	int err = niu_pci_eeprom_read(np, off);
7992	u16 val;
7993
7994	if (err < 0)
7995		return err;
7996	val = (err << 8);
7997	err = niu_pci_eeprom_read(np, off + 1);
7998	if (err < 0)
7999		return err;
8000	val |= (err & 0xff);
8001
8002	return val;
8003}
8004
8005static int niu_pci_eeprom_read16_swp(struct niu *np, u32 off)
8006{
8007	int err = niu_pci_eeprom_read(np, off);
8008	u16 val;
8009
8010	if (err < 0)
8011		return err;
8012
8013	val = (err & 0xff);
8014	err = niu_pci_eeprom_read(np, off + 1);
8015	if (err < 0)
8016		return err;
8017
8018	val |= (err & 0xff) << 8;
8019
8020	return val;
8021}
8022
8023static int niu_pci_vpd_get_propname(struct niu *np, u32 off, char *namebuf,
8024				    int namebuf_len)
8025{
8026	int i;
8027
8028	for (i = 0; i < namebuf_len; i++) {
8029		int err = niu_pci_eeprom_read(np, off + i);
8030		if (err < 0)
8031			return err;
8032		*namebuf++ = err;
8033		if (!err)
8034			break;
8035	}
8036	if (i >= namebuf_len)
8037		return -EINVAL;
8038
8039	return i + 1;
8040}
8041
8042static void niu_vpd_parse_version(struct niu *np)
8043{
8044	struct niu_vpd *vpd = &np->vpd;
8045	int len = strlen(vpd->version) + 1;
8046	const char *s = vpd->version;
8047	int i;
8048
8049	for (i = 0; i < len - 5; i++) {
8050		if (!strncmp(s + i, "FCode ", 6))
8051			break;
8052	}
8053	if (i >= len - 5)
8054		return;
8055
8056	s += i + 5;
8057	sscanf(s, "%d.%d", &vpd->fcode_major, &vpd->fcode_minor);
8058
8059	netif_printk(np, probe, KERN_DEBUG, np->dev,
8060		     "VPD_SCAN: FCODE major(%d) minor(%d)\n",
8061		     vpd->fcode_major, vpd->fcode_minor);
8062	if (vpd->fcode_major > NIU_VPD_MIN_MAJOR ||
8063	    (vpd->fcode_major == NIU_VPD_MIN_MAJOR &&
8064	     vpd->fcode_minor >= NIU_VPD_MIN_MINOR))
8065		np->flags |= NIU_FLAGS_VPD_VALID;
8066}
8067
8068/* ESPC_PIO_EN_ENABLE must be set */
8069static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end)
8070{
8071	unsigned int found_mask = 0;
8072#define FOUND_MASK_MODEL	0x00000001
8073#define FOUND_MASK_BMODEL	0x00000002
8074#define FOUND_MASK_VERS		0x00000004
8075#define FOUND_MASK_MAC		0x00000008
8076#define FOUND_MASK_NMAC		0x00000010
8077#define FOUND_MASK_PHY		0x00000020
8078#define FOUND_MASK_ALL		0x0000003f
8079
8080	netif_printk(np, probe, KERN_DEBUG, np->dev,
8081		     "VPD_SCAN: start[%x] end[%x]\n", start, end);
8082	while (start < end) {
8083		int len, err, prop_len;
8084		char namebuf[64];
8085		u8 *prop_buf;
8086		int max_len;
8087
8088		if (found_mask == FOUND_MASK_ALL) {
8089			niu_vpd_parse_version(np);
8090			return 1;
8091		}
8092
8093		err = niu_pci_eeprom_read(np, start + 2);
8094		if (err < 0)
8095			return err;
8096		len = err;
8097		start += 3;
8098
8099		prop_len = niu_pci_eeprom_read(np, start + 4);
8100		if (prop_len < 0)
8101			return prop_len;
8102		err = niu_pci_vpd_get_propname(np, start + 5, namebuf, 64);
8103		if (err < 0)
8104			return err;
8105
8106		prop_buf = NULL;
8107		max_len = 0;
8108		if (!strcmp(namebuf, "model")) {
8109			prop_buf = np->vpd.model;
8110			max_len = NIU_VPD_MODEL_MAX;
8111			found_mask |= FOUND_MASK_MODEL;
8112		} else if (!strcmp(namebuf, "board-model")) {
8113			prop_buf = np->vpd.board_model;
8114			max_len = NIU_VPD_BD_MODEL_MAX;
8115			found_mask |= FOUND_MASK_BMODEL;
8116		} else if (!strcmp(namebuf, "version")) {
8117			prop_buf = np->vpd.version;
8118			max_len = NIU_VPD_VERSION_MAX;
8119			found_mask |= FOUND_MASK_VERS;
8120		} else if (!strcmp(namebuf, "local-mac-address")) {
8121			prop_buf = np->vpd.local_mac;
8122			max_len = ETH_ALEN;
8123			found_mask |= FOUND_MASK_MAC;
8124		} else if (!strcmp(namebuf, "num-mac-addresses")) {
8125			prop_buf = &np->vpd.mac_num;
8126			max_len = 1;
8127			found_mask |= FOUND_MASK_NMAC;
8128		} else if (!strcmp(namebuf, "phy-type")) {
8129			prop_buf = np->vpd.phy_type;
8130			max_len = NIU_VPD_PHY_TYPE_MAX;
8131			found_mask |= FOUND_MASK_PHY;
8132		}
8133
8134		if (max_len && prop_len > max_len) {
8135			dev_err(np->device, "Property '%s' length (%d) is too long\n", namebuf, prop_len);
8136			return -EINVAL;
8137		}
8138
8139		if (prop_buf) {
8140			u32 off = start + 5 + err;
8141			int i;
8142
8143			netif_printk(np, probe, KERN_DEBUG, np->dev,
8144				     "VPD_SCAN: Reading in property [%s] len[%d]\n",
8145				     namebuf, prop_len);
8146			for (i = 0; i < prop_len; i++) {
8147				err =  niu_pci_eeprom_read(np, off + i);
8148				if (err < 0)
8149					return err;
8150				*prop_buf++ = err;
8151			}
8152		}
8153
8154		start += len;
8155	}
8156
8157	return 0;
8158}
8159
8160/* ESPC_PIO_EN_ENABLE must be set */
8161static int niu_pci_vpd_fetch(struct niu *np, u32 start)
8162{
8163	u32 offset;
8164	int err;
8165
8166	err = niu_pci_eeprom_read16_swp(np, start + 1);
8167	if (err < 0)
8168		return err;
8169
8170	offset = err + 3;
8171
8172	while (start + offset < ESPC_EEPROM_SIZE) {
8173		u32 here = start + offset;
8174		u32 end;
8175
8176		err = niu_pci_eeprom_read(np, here);
8177		if (err < 0)
8178			return err;
8179		if (err != 0x90)
8180			return -EINVAL;
8181
8182		err = niu_pci_eeprom_read16_swp(np, here + 1);
8183		if (err < 0)
8184			return err;
8185
8186		here = start + offset + 3;
8187		end = start + offset + err;
8188
8189		offset += err;
8190
8191		err = niu_pci_vpd_scan_props(np, here, end);
8192		if (err < 0)
8193			return err;
8194		/* ret == 1 is not an error */
8195		if (err == 1)
8196			return 0;
8197	}
8198	return 0;
8199}
8200
8201/* ESPC_PIO_EN_ENABLE must be set */
8202static u32 niu_pci_vpd_offset(struct niu *np)
8203{
8204	u32 start = 0, end = ESPC_EEPROM_SIZE, ret;
8205	int err;
8206
8207	while (start < end) {
8208		ret = start;
8209
8210		/* ROM header signature?  */
8211		err = niu_pci_eeprom_read16(np, start +  0);
8212		if (err != 0x55aa)
8213			return 0;
8214
8215		/* Apply offset to PCI data structure.  */
8216		err = niu_pci_eeprom_read16(np, start + 23);
8217		if (err < 0)
8218			return 0;
8219		start += err;
8220
8221		/* Check for "PCIR" signature.  */
8222		err = niu_pci_eeprom_read16(np, start +  0);
8223		if (err != 0x5043)
8224			return 0;
8225		err = niu_pci_eeprom_read16(np, start +  2);
8226		if (err != 0x4952)
8227			return 0;
8228
8229		/* Check for OBP image type.  */
8230		err = niu_pci_eeprom_read(np, start + 20);
8231		if (err < 0)
8232			return 0;
8233		if (err != 0x01) {
8234			err = niu_pci_eeprom_read(np, ret + 2);
8235			if (err < 0)
8236				return 0;
8237
8238			start = ret + (err * 512);
8239			continue;
8240		}
8241
8242		err = niu_pci_eeprom_read16_swp(np, start + 8);
8243		if (err < 0)
8244			return err;
8245		ret += err;
8246
8247		err = niu_pci_eeprom_read(np, ret + 0);
8248		if (err != 0x82)
8249			return 0;
8250
8251		return ret;
8252	}
8253
8254	return 0;
8255}
8256
8257static int niu_phy_type_prop_decode(struct niu *np, const char *phy_prop)
8258{
8259	if (!strcmp(phy_prop, "mif")) {
8260		/* 1G copper, MII */
8261		np->flags &= ~(NIU_FLAGS_FIBER |
8262			       NIU_FLAGS_10G);
8263		np->mac_xcvr = MAC_XCVR_MII;
8264	} else if (!strcmp(phy_prop, "xgf")) {
8265		/* 10G fiber, XPCS */
8266		np->flags |= (NIU_FLAGS_10G |
8267			      NIU_FLAGS_FIBER);
8268		np->mac_xcvr = MAC_XCVR_XPCS;
8269	} else if (!strcmp(phy_prop, "pcs")) {
8270		/* 1G fiber, PCS */
8271		np->flags &= ~NIU_FLAGS_10G;
8272		np->flags |= NIU_FLAGS_FIBER;
8273		np->mac_xcvr = MAC_XCVR_PCS;
8274	} else if (!strcmp(phy_prop, "xgc")) {
8275		/* 10G copper, XPCS */
8276		np->flags |= NIU_FLAGS_10G;
8277		np->flags &= ~NIU_FLAGS_FIBER;
8278		np->mac_xcvr = MAC_XCVR_XPCS;
8279	} else if (!strcmp(phy_prop, "xgsd") || !strcmp(phy_prop, "gsd")) {
8280		/* 10G Serdes or 1G Serdes, default to 10G */
8281		np->flags |= NIU_FLAGS_10G;
8282		np->flags &= ~NIU_FLAGS_FIBER;
8283		np->flags |= NIU_FLAGS_XCVR_SERDES;
8284		np->mac_xcvr = MAC_XCVR_XPCS;
8285	} else {
8286		return -EINVAL;
8287	}
8288	return 0;
8289}
8290
8291static int niu_pci_vpd_get_nports(struct niu *np)
8292{
8293	int ports = 0;
8294
8295	if ((!strcmp(np->vpd.model, NIU_QGC_LP_MDL_STR)) ||
8296	    (!strcmp(np->vpd.model, NIU_QGC_PEM_MDL_STR)) ||
8297	    (!strcmp(np->vpd.model, NIU_MARAMBA_MDL_STR)) ||
8298	    (!strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) ||
8299	    (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR))) {
8300		ports = 4;
8301	} else if ((!strcmp(np->vpd.model, NIU_2XGF_LP_MDL_STR)) ||
8302		   (!strcmp(np->vpd.model, NIU_2XGF_PEM_MDL_STR)) ||
8303		   (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) ||
8304		   (!strcmp(np->vpd.model, NIU_2XGF_MRVL_MDL_STR))) {
8305		ports = 2;
8306	}
8307
8308	return ports;
8309}
8310
8311static void niu_pci_vpd_validate(struct niu *np)
8312{
8313	struct net_device *dev = np->dev;
8314	struct niu_vpd *vpd = &np->vpd;
8315	u8 val8;
8316
8317	if (!is_valid_ether_addr(&vpd->local_mac[0])) {
8318		dev_err(np->device, "VPD MAC invalid, falling back to SPROM\n");
8319
8320		np->flags &= ~NIU_FLAGS_VPD_VALID;
8321		return;
8322	}
8323
8324	if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) ||
8325	    !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) {
8326		np->flags |= NIU_FLAGS_10G;
8327		np->flags &= ~NIU_FLAGS_FIBER;
8328		np->flags |= NIU_FLAGS_XCVR_SERDES;
8329		np->mac_xcvr = MAC_XCVR_PCS;
8330		if (np->port > 1) {
8331			np->flags |= NIU_FLAGS_FIBER;
8332			np->flags &= ~NIU_FLAGS_10G;
8333		}
8334		if (np->flags & NIU_FLAGS_10G)
8335			np->mac_xcvr = MAC_XCVR_XPCS;
8336	} else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) {
8337		np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER |
8338			      NIU_FLAGS_HOTPLUG_PHY);
8339	} else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
8340		dev_err(np->device, "Illegal phy string [%s]\n",
8341			np->vpd.phy_type);
8342		dev_err(np->device, "Falling back to SPROM\n");
8343		np->flags &= ~NIU_FLAGS_VPD_VALID;
8344		return;
8345	}
8346
8347	memcpy(dev->dev_addr, vpd->local_mac, ETH_ALEN);
8348
8349	val8 = dev->dev_addr[5];
8350	dev->dev_addr[5] += np->port;
8351	if (dev->dev_addr[5] < val8)
8352		dev->dev_addr[4]++;
8353}
8354
8355static int niu_pci_probe_sprom(struct niu *np)
8356{
8357	struct net_device *dev = np->dev;
8358	int len, i;
8359	u64 val, sum;
8360	u8 val8;
8361
8362	val = (nr64(ESPC_VER_IMGSZ) & ESPC_VER_IMGSZ_IMGSZ);
8363	val >>= ESPC_VER_IMGSZ_IMGSZ_SHIFT;
8364	len = val / 4;
8365
8366	np->eeprom_len = len;
8367
8368	netif_printk(np, probe, KERN_DEBUG, np->dev,
8369		     "SPROM: Image size %llu\n", (unsigned long long)val);
8370
8371	sum = 0;
8372	for (i = 0; i < len; i++) {
8373		val = nr64(ESPC_NCR(i));
8374		sum += (val >>  0) & 0xff;
8375		sum += (val >>  8) & 0xff;
8376		sum += (val >> 16) & 0xff;
8377		sum += (val >> 24) & 0xff;
8378	}
8379	netif_printk(np, probe, KERN_DEBUG, np->dev,
8380		     "SPROM: Checksum %x\n", (int)(sum & 0xff));
8381	if ((sum & 0xff) != 0xab) {
8382		dev_err(np->device, "Bad SPROM checksum (%x, should be 0xab)\n", (int)(sum & 0xff));
8383		return -EINVAL;
8384	}
8385
8386	val = nr64(ESPC_PHY_TYPE);
8387	switch (np->port) {
8388	case 0:
8389		val8 = (val & ESPC_PHY_TYPE_PORT0) >>
8390			ESPC_PHY_TYPE_PORT0_SHIFT;
8391		break;
8392	case 1:
8393		val8 = (val & ESPC_PHY_TYPE_PORT1) >>
8394			ESPC_PHY_TYPE_PORT1_SHIFT;
8395		break;
8396	case 2:
8397		val8 = (val & ESPC_PHY_TYPE_PORT2) >>
8398			ESPC_PHY_TYPE_PORT2_SHIFT;
8399		break;
8400	case 3:
8401		val8 = (val & ESPC_PHY_TYPE_PORT3) >>
8402			ESPC_PHY_TYPE_PORT3_SHIFT;
8403		break;
8404	default:
8405		dev_err(np->device, "Bogus port number %u\n",
8406			np->port);
8407		return -EINVAL;
8408	}
8409	netif_printk(np, probe, KERN_DEBUG, np->dev,
8410		     "SPROM: PHY type %x\n", val8);
8411
8412	switch (val8) {
8413	case ESPC_PHY_TYPE_1G_COPPER:
8414		/* 1G copper, MII */
8415		np->flags &= ~(NIU_FLAGS_FIBER |
8416			       NIU_FLAGS_10G);
8417		np->mac_xcvr = MAC_XCVR_MII;
8418		break;
8419
8420	case ESPC_PHY_TYPE_1G_FIBER:
8421		/* 1G fiber, PCS */
8422		np->flags &= ~NIU_FLAGS_10G;
8423		np->flags |= NIU_FLAGS_FIBER;
8424		np->mac_xcvr = MAC_XCVR_PCS;
8425		break;
8426
8427	case ESPC_PHY_TYPE_10G_COPPER:
8428		/* 10G copper, XPCS */
8429		np->flags |= NIU_FLAGS_10G;
8430		np->flags &= ~NIU_FLAGS_FIBER;
8431		np->mac_xcvr = MAC_XCVR_XPCS;
8432		break;
8433
8434	case ESPC_PHY_TYPE_10G_FIBER:
8435		/* 10G fiber, XPCS */
8436		np->flags |= (NIU_FLAGS_10G |
8437			      NIU_FLAGS_FIBER);
8438		np->mac_xcvr = MAC_XCVR_XPCS;
8439		break;
8440
8441	default:
8442		dev_err(np->device, "Bogus SPROM phy type %u\n", val8);
8443		return -EINVAL;
8444	}
8445
8446	val = nr64(ESPC_MAC_ADDR0);
8447	netif_printk(np, probe, KERN_DEBUG, np->dev,
8448		     "SPROM: MAC_ADDR0[%08llx]\n", (unsigned long long)val);
8449	dev->dev_addr[0] = (val >>  0) & 0xff;
8450	dev->dev_addr[1] = (val >>  8) & 0xff;
8451	dev->dev_addr[2] = (val >> 16) & 0xff;
8452	dev->dev_addr[3] = (val >> 24) & 0xff;
8453
8454	val = nr64(ESPC_MAC_ADDR1);
8455	netif_printk(np, probe, KERN_DEBUG, np->dev,
8456		     "SPROM: MAC_ADDR1[%08llx]\n", (unsigned long long)val);
8457	dev->dev_addr[4] = (val >>  0) & 0xff;
8458	dev->dev_addr[5] = (val >>  8) & 0xff;
8459
8460	if (!is_valid_ether_addr(&dev->dev_addr[0])) {
8461		dev_err(np->device, "SPROM MAC address invalid [ %pM ]\n",
8462			dev->dev_addr);
8463		return -EINVAL;
8464	}
8465
8466	val8 = dev->dev_addr[5];
8467	dev->dev_addr[5] += np->port;
8468	if (dev->dev_addr[5] < val8)
8469		dev->dev_addr[4]++;
8470
8471	val = nr64(ESPC_MOD_STR_LEN);
8472	netif_printk(np, probe, KERN_DEBUG, np->dev,
8473		     "SPROM: MOD_STR_LEN[%llu]\n", (unsigned long long)val);
8474	if (val >= 8 * 4)
8475		return -EINVAL;
8476
8477	for (i = 0; i < val; i += 4) {
8478		u64 tmp = nr64(ESPC_NCR(5 + (i / 4)));
8479
8480		np->vpd.model[i + 3] = (tmp >>  0) & 0xff;
8481		np->vpd.model[i + 2] = (tmp >>  8) & 0xff;
8482		np->vpd.model[i + 1] = (tmp >> 16) & 0xff;
8483		np->vpd.model[i + 0] = (tmp >> 24) & 0xff;
8484	}
8485	np->vpd.model[val] = '\0';
8486
8487	val = nr64(ESPC_BD_MOD_STR_LEN);
8488	netif_printk(np, probe, KERN_DEBUG, np->dev,
8489		     "SPROM: BD_MOD_STR_LEN[%llu]\n", (unsigned long long)val);
8490	if (val >= 4 * 4)
8491		return -EINVAL;
8492
8493	for (i = 0; i < val; i += 4) {
8494		u64 tmp = nr64(ESPC_NCR(14 + (i / 4)));
8495
8496		np->vpd.board_model[i + 3] = (tmp >>  0) & 0xff;
8497		np->vpd.board_model[i + 2] = (tmp >>  8) & 0xff;
8498		np->vpd.board_model[i + 1] = (tmp >> 16) & 0xff;
8499		np->vpd.board_model[i + 0] = (tmp >> 24) & 0xff;
8500	}
8501	np->vpd.board_model[val] = '\0';
8502
8503	np->vpd.mac_num =
8504		nr64(ESPC_NUM_PORTS_MACS) & ESPC_NUM_PORTS_MACS_VAL;
8505	netif_printk(np, probe, KERN_DEBUG, np->dev,
8506		     "SPROM: NUM_PORTS_MACS[%d]\n", np->vpd.mac_num);
8507
8508	return 0;
8509}
8510
8511static int niu_get_and_validate_port(struct niu *np)
8512{
8513	struct niu_parent *parent = np->parent;
8514
8515	if (np->port <= 1)
8516		np->flags |= NIU_FLAGS_XMAC;
8517
8518	if (!parent->num_ports) {
8519		if (parent->plat_type == PLAT_TYPE_NIU) {
8520			parent->num_ports = 2;
8521		} else {
8522			parent->num_ports = niu_pci_vpd_get_nports(np);
8523			if (!parent->num_ports) {
8524				/* Fall back to SPROM as last resort.
8525				 * This will fail on most cards.
8526				 */
8527				parent->num_ports = nr64(ESPC_NUM_PORTS_MACS) &
8528					ESPC_NUM_PORTS_MACS_VAL;
8529
8530				/* All of the current probing methods fail on
8531				 * Maramba on-board parts.
8532				 */
8533				if (!parent->num_ports)
8534					parent->num_ports = 4;
8535			}
8536		}
8537	}
8538
8539	if (np->port >= parent->num_ports)
8540		return -ENODEV;
8541
8542	return 0;
8543}
8544
8545static int phy_record(struct niu_parent *parent, struct phy_probe_info *p,
8546		      int dev_id_1, int dev_id_2, u8 phy_port, int type)
8547{
8548	u32 id = (dev_id_1 << 16) | dev_id_2;
8549	u8 idx;
8550
8551	if (dev_id_1 < 0 || dev_id_2 < 0)
8552		return 0;
8553	if (type == PHY_TYPE_PMA_PMD || type == PHY_TYPE_PCS) {
8554		/* Because of the NIU_PHY_ID_MASK being applied, the 8704
8555		 * test covers the 8706 as well.
8556		 */
8557		if (((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8704) &&
8558		    ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011))
8559			return 0;
8560	} else {
8561		if ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM5464R)
8562			return 0;
8563	}
8564
8565	pr_info("niu%d: Found PHY %08x type %s at phy_port %u\n",
8566		parent->index, id,
8567		type == PHY_TYPE_PMA_PMD ? "PMA/PMD" :
8568		type == PHY_TYPE_PCS ? "PCS" : "MII",
8569		phy_port);
8570
8571	if (p->cur[type] >= NIU_MAX_PORTS) {
8572		pr_err("Too many PHY ports\n");
8573		return -EINVAL;
8574	}
8575	idx = p->cur[type];
8576	p->phy_id[type][idx] = id;
8577	p->phy_port[type][idx] = phy_port;
8578	p->cur[type] = idx + 1;
8579	return 0;
8580}
8581
8582static int port_has_10g(struct phy_probe_info *p, int port)
8583{
8584	int i;
8585
8586	for (i = 0; i < p->cur[PHY_TYPE_PMA_PMD]; i++) {
8587		if (p->phy_port[PHY_TYPE_PMA_PMD][i] == port)
8588			return 1;
8589	}
8590	for (i = 0; i < p->cur[PHY_TYPE_PCS]; i++) {
8591		if (p->phy_port[PHY_TYPE_PCS][i] == port)
8592			return 1;
8593	}
8594
8595	return 0;
8596}
8597
8598static int count_10g_ports(struct phy_probe_info *p, int *lowest)
8599{
8600	int port, cnt;
8601
8602	cnt = 0;
8603	*lowest = 32;
8604	for (port = 8; port < 32; port++) {
8605		if (port_has_10g(p, port)) {
8606			if (!cnt)
8607				*lowest = port;
8608			cnt++;
8609		}
8610	}
8611
8612	return cnt;
8613}
8614
8615static int count_1g_ports(struct phy_probe_info *p, int *lowest)
8616{
8617	*lowest = 32;
8618	if (p->cur[PHY_TYPE_MII])
8619		*lowest = p->phy_port[PHY_TYPE_MII][0];
8620
8621	return p->cur[PHY_TYPE_MII];
8622}
8623
8624static void niu_n2_divide_channels(struct niu_parent *parent)
8625{
8626	int num_ports = parent->num_ports;
8627	int i;
8628
8629	for (i = 0; i < num_ports; i++) {
8630		parent->rxchan_per_port[i] = (16 / num_ports);
8631		parent->txchan_per_port[i] = (16 / num_ports);
8632
8633		pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n",
8634			parent->index, i,
8635			parent->rxchan_per_port[i],
8636			parent->txchan_per_port[i]);
8637	}
8638}
8639
8640static void niu_divide_channels(struct niu_parent *parent,
8641				int num_10g, int num_1g)
8642{
8643	int num_ports = parent->num_ports;
8644	int rx_chans_per_10g, rx_chans_per_1g;
8645	int tx_chans_per_10g, tx_chans_per_1g;
8646	int i, tot_rx, tot_tx;
8647
8648	if (!num_10g || !num_1g) {
8649		rx_chans_per_10g = rx_chans_per_1g =
8650			(NIU_NUM_RXCHAN / num_ports);
8651		tx_chans_per_10g = tx_chans_per_1g =
8652			(NIU_NUM_TXCHAN / num_ports);
8653	} else {
8654		rx_chans_per_1g = NIU_NUM_RXCHAN / 8;
8655		rx_chans_per_10g = (NIU_NUM_RXCHAN -
8656				    (rx_chans_per_1g * num_1g)) /
8657			num_10g;
8658
8659		tx_chans_per_1g = NIU_NUM_TXCHAN / 6;
8660		tx_chans_per_10g = (NIU_NUM_TXCHAN -
8661				    (tx_chans_per_1g * num_1g)) /
8662			num_10g;
8663	}
8664
8665	tot_rx = tot_tx = 0;
8666	for (i = 0; i < num_ports; i++) {
8667		int type = phy_decode(parent->port_phy, i);
8668
8669		if (type == PORT_TYPE_10G) {
8670			parent->rxchan_per_port[i] = rx_chans_per_10g;
8671			parent->txchan_per_port[i] = tx_chans_per_10g;
8672		} else {
8673			parent->rxchan_per_port[i] = rx_chans_per_1g;
8674			parent->txchan_per_port[i] = tx_chans_per_1g;
8675		}
8676		pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n",
8677			parent->index, i,
8678			parent->rxchan_per_port[i],
8679			parent->txchan_per_port[i]);
8680		tot_rx += parent->rxchan_per_port[i];
8681		tot_tx += parent->txchan_per_port[i];
8682	}
8683
8684	if (tot_rx > NIU_NUM_RXCHAN) {
8685		pr_err("niu%d: Too many RX channels (%d), resetting to one per port\n",
8686		       parent->index, tot_rx);
8687		for (i = 0; i < num_ports; i++)
8688			parent->rxchan_per_port[i] = 1;
8689	}
8690	if (tot_tx > NIU_NUM_TXCHAN) {
8691		pr_err("niu%d: Too many TX channels (%d), resetting to one per port\n",
8692		       parent->index, tot_tx);
8693		for (i = 0; i < num_ports; i++)
8694			parent->txchan_per_port[i] = 1;
8695	}
8696	if (tot_rx < NIU_NUM_RXCHAN || tot_tx < NIU_NUM_TXCHAN) {
8697		pr_warn("niu%d: Driver bug, wasted channels, RX[%d] TX[%d]\n",
8698			parent->index, tot_rx, tot_tx);
8699	}
8700}
8701
8702static void niu_divide_rdc_groups(struct niu_parent *parent,
8703				  int num_10g, int num_1g)
8704{
8705	int i, num_ports = parent->num_ports;
8706	int rdc_group, rdc_groups_per_port;
8707	int rdc_channel_base;
8708
8709	rdc_group = 0;
8710	rdc_groups_per_port = NIU_NUM_RDC_TABLES / num_ports;
8711
8712	rdc_channel_base = 0;
8713
8714	for (i = 0; i < num_ports; i++) {
8715		struct niu_rdc_tables *tp = &parent->rdc_group_cfg[i];
8716		int grp, num_channels = parent->rxchan_per_port[i];
8717		int this_channel_offset;
8718
8719		tp->first_table_num = rdc_group;
8720		tp->num_tables = rdc_groups_per_port;
8721		this_channel_offset = 0;
8722		for (grp = 0; grp < tp->num_tables; grp++) {
8723			struct rdc_table *rt = &tp->tables[grp];
8724			int slot;
8725
8726			pr_info("niu%d: Port %d RDC tbl(%d) [ ",
8727				parent->index, i, tp->first_table_num + grp);
8728			for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) {
8729				rt->rxdma_channel[slot] =
8730					rdc_channel_base + this_channel_offset;
8731
8732				pr_cont("%d ", rt->rxdma_channel[slot]);
8733
8734				if (++this_channel_offset == num_channels)
8735					this_channel_offset = 0;
8736			}
8737			pr_cont("]\n");
8738		}
8739
8740		parent->rdc_default[i] = rdc_channel_base;
8741
8742		rdc_channel_base += num_channels;
8743		rdc_group += rdc_groups_per_port;
8744	}
8745}
8746
8747static int fill_phy_probe_info(struct niu *np, struct niu_parent *parent,
8748			       struct phy_probe_info *info)
8749{
8750	unsigned long flags;
8751	int port, err;
8752
8753	memset(info, 0, sizeof(*info));
8754
8755	/* Port 0 to 7 are reserved for onboard Serdes, probe the rest.  */
8756	niu_lock_parent(np, flags);
8757	err = 0;
8758	for (port = 8; port < 32; port++) {
8759		int dev_id_1, dev_id_2;
8760
8761		dev_id_1 = mdio_read(np, port,
8762				     NIU_PMA_PMD_DEV_ADDR, MII_PHYSID1);
8763		dev_id_2 = mdio_read(np, port,
8764				     NIU_PMA_PMD_DEV_ADDR, MII_PHYSID2);
8765		err = phy_record(parent, info, dev_id_1, dev_id_2, port,
8766				 PHY_TYPE_PMA_PMD);
8767		if (err)
8768			break;
8769		dev_id_1 = mdio_read(np, port,
8770				     NIU_PCS_DEV_ADDR, MII_PHYSID1);
8771		dev_id_2 = mdio_read(np, port,
8772				     NIU_PCS_DEV_ADDR, MII_PHYSID2);
8773		err = phy_record(parent, info, dev_id_1, dev_id_2, port,
8774				 PHY_TYPE_PCS);
8775		if (err)
8776			break;
8777		dev_id_1 = mii_read(np, port, MII_PHYSID1);
8778		dev_id_2 = mii_read(np, port, MII_PHYSID2);
8779		err = phy_record(parent, info, dev_id_1, dev_id_2, port,
8780				 PHY_TYPE_MII);
8781		if (err)
8782			break;
8783	}
8784	niu_unlock_parent(np, flags);
8785
8786	return err;
8787}
8788
8789static int walk_phys(struct niu *np, struct niu_parent *parent)
8790{
8791	struct phy_probe_info *info = &parent->phy_probe_info;
8792	int lowest_10g, lowest_1g;
8793	int num_10g, num_1g;
8794	u32 val;
8795	int err;
8796
8797	num_10g = num_1g = 0;
8798
8799	if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) ||
8800	    !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) {
8801		num_10g = 0;
8802		num_1g = 2;
8803		parent->plat_type = PLAT_TYPE_ATCA_CP3220;
8804		parent->num_ports = 4;
8805		val = (phy_encode(PORT_TYPE_1G, 0) |
8806		       phy_encode(PORT_TYPE_1G, 1) |
8807		       phy_encode(PORT_TYPE_1G, 2) |
8808		       phy_encode(PORT_TYPE_1G, 3));
8809	} else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) {
8810		num_10g = 2;
8811		num_1g = 0;
8812		parent->num_ports = 2;
8813		val = (phy_encode(PORT_TYPE_10G, 0) |
8814		       phy_encode(PORT_TYPE_10G, 1));
8815	} else if ((np->flags & NIU_FLAGS_XCVR_SERDES) &&
8816		   (parent->plat_type == PLAT_TYPE_NIU)) {
8817		/* this is the Monza case */
8818		if (np->flags & NIU_FLAGS_10G) {
8819			val = (phy_encode(PORT_TYPE_10G, 0) |
8820			       phy_encode(PORT_TYPE_10G, 1));
8821		} else {
8822			val = (phy_encode(PORT_TYPE_1G, 0) |
8823			       phy_encode(PORT_TYPE_1G, 1));
8824		}
8825	} else {
8826		err = fill_phy_probe_info(np, parent, info);
8827		if (err)
8828			return err;
8829
8830		num_10g = count_10g_ports(info, &lowest_10g);
8831		num_1g = count_1g_ports(info, &lowest_1g);
8832
8833		switch ((num_10g << 4) | num_1g) {
8834		case 0x24:
8835			if (lowest_1g == 10)
8836				parent->plat_type = PLAT_TYPE_VF_P0;
8837			else if (lowest_1g == 26)
8838				parent->plat_type = PLAT_TYPE_VF_P1;
8839			else
8840				goto unknown_vg_1g_port;
8841
8842			fallthrough;
8843		case 0x22:
8844			val = (phy_encode(PORT_TYPE_10G, 0) |
8845			       phy_encode(PORT_TYPE_10G, 1) |
8846			       phy_encode(PORT_TYPE_1G, 2) |
8847			       phy_encode(PORT_TYPE_1G, 3));
8848			break;
8849
8850		case 0x20:
8851			val = (phy_encode(PORT_TYPE_10G, 0) |
8852			       phy_encode(PORT_TYPE_10G, 1));
8853			break;
8854
8855		case 0x10:
8856			val = phy_encode(PORT_TYPE_10G, np->port);
8857			break;
8858
8859		case 0x14:
8860			if (lowest_1g == 10)
8861				parent->plat_type = PLAT_TYPE_VF_P0;
8862			else if (lowest_1g == 26)
8863				parent->plat_type = PLAT_TYPE_VF_P1;
8864			else
8865				goto unknown_vg_1g_port;
8866
8867			fallthrough;
8868		case 0x13:
8869			if ((lowest_10g & 0x7) == 0)
8870				val = (phy_encode(PORT_TYPE_10G, 0) |
8871				       phy_encode(PORT_TYPE_1G, 1) |
8872				       phy_encode(PORT_TYPE_1G, 2) |
8873				       phy_encode(PORT_TYPE_1G, 3));
8874			else
8875				val = (phy_encode(PORT_TYPE_1G, 0) |
8876				       phy_encode(PORT_TYPE_10G, 1) |
8877				       phy_encode(PORT_TYPE_1G, 2) |
8878				       phy_encode(PORT_TYPE_1G, 3));
8879			break;
8880
8881		case 0x04:
8882			if (lowest_1g == 10)
8883				parent->plat_type = PLAT_TYPE_VF_P0;
8884			else if (lowest_1g == 26)
8885				parent->plat_type = PLAT_TYPE_VF_P1;
8886			else
8887				goto unknown_vg_1g_port;
8888
8889			val = (phy_encode(PORT_TYPE_1G, 0) |
8890			       phy_encode(PORT_TYPE_1G, 1) |
8891			       phy_encode(PORT_TYPE_1G, 2) |
8892			       phy_encode(PORT_TYPE_1G, 3));
8893			break;
8894
8895		default:
8896			pr_err("Unsupported port config 10G[%d] 1G[%d]\n",
8897			       num_10g, num_1g);
8898			return -EINVAL;
8899		}
8900	}
8901
8902	parent->port_phy = val;
8903
8904	if (parent->plat_type == PLAT_TYPE_NIU)
8905		niu_n2_divide_channels(parent);
8906	else
8907		niu_divide_channels(parent, num_10g, num_1g);
8908
8909	niu_divide_rdc_groups(parent, num_10g, num_1g);
8910
8911	return 0;
8912
8913unknown_vg_1g_port:
8914	pr_err("Cannot identify platform type, 1gport=%d\n", lowest_1g);
8915	return -EINVAL;
8916}
8917
8918static int niu_probe_ports(struct niu *np)
8919{
8920	struct niu_parent *parent = np->parent;
8921	int err, i;
8922
8923	if (parent->port_phy == PORT_PHY_UNKNOWN) {
8924		err = walk_phys(np, parent);
8925		if (err)
8926			return err;
8927
8928		niu_set_ldg_timer_res(np, 2);
8929		for (i = 0; i <= LDN_MAX; i++)
8930			niu_ldn_irq_enable(np, i, 0);
8931	}
8932
8933	if (parent->port_phy == PORT_PHY_INVALID)
8934		return -EINVAL;
8935
8936	return 0;
8937}
8938
8939static int niu_classifier_swstate_init(struct niu *np)
8940{
8941	struct niu_classifier *cp = &np->clas;
8942
8943	cp->tcam_top = (u16) np->port;
8944	cp->tcam_sz = np->parent->tcam_num_entries / np->parent->num_ports;
8945	cp->h1_init = 0xffffffff;
8946	cp->h2_init = 0xffff;
8947
8948	return fflp_early_init(np);
8949}
8950
8951static void niu_link_config_init(struct niu *np)
8952{
8953	struct niu_link_config *lp = &np->link_config;
8954
8955	lp->advertising = (ADVERTISED_10baseT_Half |
8956			   ADVERTISED_10baseT_Full |
8957			   ADVERTISED_100baseT_Half |
8958			   ADVERTISED_100baseT_Full |
8959			   ADVERTISED_1000baseT_Half |
8960			   ADVERTISED_1000baseT_Full |
8961			   ADVERTISED_10000baseT_Full |
8962			   ADVERTISED_Autoneg);
8963	lp->speed = lp->active_speed = SPEED_INVALID;
8964	lp->duplex = DUPLEX_FULL;
8965	lp->active_duplex = DUPLEX_INVALID;
8966	lp->autoneg = 1;
8967#if 0
8968	lp->loopback_mode = LOOPBACK_MAC;
8969	lp->active_speed = SPEED_10000;
8970	lp->active_duplex = DUPLEX_FULL;
8971#else
8972	lp->loopback_mode = LOOPBACK_DISABLED;
8973#endif
8974}
8975
8976static int niu_init_mac_ipp_pcs_base(struct niu *np)
8977{
8978	switch (np->port) {
8979	case 0:
8980		np->mac_regs = np->regs + XMAC_PORT0_OFF;
8981		np->ipp_off  = 0x00000;
8982		np->pcs_off  = 0x04000;
8983		np->xpcs_off = 0x02000;
8984		break;
8985
8986	case 1:
8987		np->mac_regs = np->regs + XMAC_PORT1_OFF;
8988		np->ipp_off  = 0x08000;
8989		np->pcs_off  = 0x0a000;
8990		np->xpcs_off = 0x08000;
8991		break;
8992
8993	case 2:
8994		np->mac_regs = np->regs + BMAC_PORT2_OFF;
8995		np->ipp_off  = 0x04000;
8996		np->pcs_off  = 0x0e000;
8997		np->xpcs_off = ~0UL;
8998		break;
8999
9000	case 3:
9001		np->mac_regs = np->regs + BMAC_PORT3_OFF;
9002		np->ipp_off  = 0x0c000;
9003		np->pcs_off  = 0x12000;
9004		np->xpcs_off = ~0UL;
9005		break;
9006
9007	default:
9008		dev_err(np->device, "Port %u is invalid, cannot compute MAC block offset\n", np->port);
9009		return -EINVAL;
9010	}
9011
9012	return 0;
9013}
9014
9015static void niu_try_msix(struct niu *np, u8 *ldg_num_map)
9016{
9017	struct msix_entry msi_vec[NIU_NUM_LDG];
9018	struct niu_parent *parent = np->parent;
9019	struct pci_dev *pdev = np->pdev;
9020	int i, num_irqs;
9021	u8 first_ldg;
9022
9023	first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
9024	for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
9025		ldg_num_map[i] = first_ldg + i;
9026
9027	num_irqs = (parent->rxchan_per_port[np->port] +
9028		    parent->txchan_per_port[np->port] +
9029		    (np->port == 0 ? 3 : 1));
9030	BUG_ON(num_irqs > (NIU_NUM_LDG / parent->num_ports));
9031
9032	for (i = 0; i < num_irqs; i++) {
9033		msi_vec[i].vector = 0;
9034		msi_vec[i].entry = i;
9035	}
9036
9037	num_irqs = pci_enable_msix_range(pdev, msi_vec, 1, num_irqs);
9038	if (num_irqs < 0) {
9039		np->flags &= ~NIU_FLAGS_MSIX;
9040		return;
9041	}
9042
9043	np->flags |= NIU_FLAGS_MSIX;
9044	for (i = 0; i < num_irqs; i++)
9045		np->ldg[i].irq = msi_vec[i].vector;
9046	np->num_ldg = num_irqs;
9047}
9048
9049static int niu_n2_irq_init(struct niu *np, u8 *ldg_num_map)
9050{
9051#ifdef CONFIG_SPARC64
9052	struct platform_device *op = np->op;
9053	const u32 *int_prop;
9054	int i;
9055
9056	int_prop = of_get_property(op->dev.of_node, "interrupts", NULL);
9057	if (!int_prop)
9058		return -ENODEV;
9059
9060	for (i = 0; i < op->archdata.num_irqs; i++) {
9061		ldg_num_map[i] = int_prop[i];
9062		np->ldg[i].irq = op->archdata.irqs[i];
9063	}
9064
9065	np->num_ldg = op->archdata.num_irqs;
9066
9067	return 0;
9068#else
9069	return -EINVAL;
9070#endif
9071}
9072
9073static int niu_ldg_init(struct niu *np)
9074{
9075	struct niu_parent *parent = np->parent;
9076	u8 ldg_num_map[NIU_NUM_LDG];
9077	int first_chan, num_chan;
9078	int i, err, ldg_rotor;
9079	u8 port;
9080
9081	np->num_ldg = 1;
9082	np->ldg[0].irq = np->dev->irq;
9083	if (parent->plat_type == PLAT_TYPE_NIU) {
9084		err = niu_n2_irq_init(np, ldg_num_map);
9085		if (err)
9086			return err;
9087	} else
9088		niu_try_msix(np, ldg_num_map);
9089
9090	port = np->port;
9091	for (i = 0; i < np->num_ldg; i++) {
9092		struct niu_ldg *lp = &np->ldg[i];
9093
9094		netif_napi_add(np->dev, &lp->napi, niu_poll, 64);
9095
9096		lp->np = np;
9097		lp->ldg_num = ldg_num_map[i];
9098		lp->timer = 2; /* XXX */
9099
9100		/* On N2 NIU the firmware has setup the SID mappings so they go
9101		 * to the correct values that will route the LDG to the proper
9102		 * interrupt in the NCU interrupt table.
9103		 */
9104		if (np->parent->plat_type != PLAT_TYPE_NIU) {
9105			err = niu_set_ldg_sid(np, lp->ldg_num, port, i);
9106			if (err)
9107				return err;
9108		}
9109	}
9110
9111	/* We adopt the LDG assignment ordering used by the N2 NIU
9112	 * 'interrupt' properties because that simplifies a lot of
9113	 * things.  This ordering is:
9114	 *
9115	 *	MAC
9116	 *	MIF	(if port zero)
9117	 *	SYSERR	(if port zero)
9118	 *	RX channels
9119	 *	TX channels
9120	 */
9121
9122	ldg_rotor = 0;
9123
9124	err = niu_ldg_assign_ldn(np, parent, ldg_num_map[ldg_rotor],
9125				  LDN_MAC(port));
9126	if (err)
9127		return err;
9128
9129	ldg_rotor++;
9130	if (ldg_rotor == np->num_ldg)
9131		ldg_rotor = 0;
9132
9133	if (port == 0) {
9134		err = niu_ldg_assign_ldn(np, parent,
9135					 ldg_num_map[ldg_rotor],
9136					 LDN_MIF);
9137		if (err)
9138			return err;
9139
9140		ldg_rotor++;
9141		if (ldg_rotor == np->num_ldg)
9142			ldg_rotor = 0;
9143
9144		err = niu_ldg_assign_ldn(np, parent,
9145					 ldg_num_map[ldg_rotor],
9146					 LDN_DEVICE_ERROR);
9147		if (err)
9148			return err;
9149
9150		ldg_rotor++;
9151		if (ldg_rotor == np->num_ldg)
9152			ldg_rotor = 0;
9153
9154	}
9155
9156	first_chan = 0;
9157	for (i = 0; i < port; i++)
9158		first_chan += parent->rxchan_per_port[i];
9159	num_chan = parent->rxchan_per_port[port];
9160
9161	for (i = first_chan; i < (first_chan + num_chan); i++) {
9162		err = niu_ldg_assign_ldn(np, parent,
9163					 ldg_num_map[ldg_rotor],
9164					 LDN_RXDMA(i));
9165		if (err)
9166			return err;
9167		ldg_rotor++;
9168		if (ldg_rotor == np->num_ldg)
9169			ldg_rotor = 0;
9170	}
9171
9172	first_chan = 0;
9173	for (i = 0; i < port; i++)
9174		first_chan += parent->txchan_per_port[i];
9175	num_chan = parent->txchan_per_port[port];
9176	for (i = first_chan; i < (first_chan + num_chan); i++) {
9177		err = niu_ldg_assign_ldn(np, parent,
9178					 ldg_num_map[ldg_rotor],
9179					 LDN_TXDMA(i));
9180		if (err)
9181			return err;
9182		ldg_rotor++;
9183		if (ldg_rotor == np->num_ldg)
9184			ldg_rotor = 0;
9185	}
9186
9187	return 0;
9188}
9189
9190static void niu_ldg_free(struct niu *np)
9191{
9192	if (np->flags & NIU_FLAGS_MSIX)
9193		pci_disable_msix(np->pdev);
9194}
9195
9196static int niu_get_of_props(struct niu *np)
9197{
9198#ifdef CONFIG_SPARC64
9199	struct net_device *dev = np->dev;
9200	struct device_node *dp;
9201	const char *phy_type;
9202	const u8 *mac_addr;
9203	const char *model;
9204	int prop_len;
9205
9206	if (np->parent->plat_type == PLAT_TYPE_NIU)
9207		dp = np->op->dev.of_node;
9208	else
9209		dp = pci_device_to_OF_node(np->pdev);
9210
9211	phy_type = of_get_property(dp, "phy-type", &prop_len);
9212	if (!phy_type) {
9213		netdev_err(dev, "%pOF: OF node lacks phy-type property\n", dp);
9214		return -EINVAL;
9215	}
9216
9217	if (!strcmp(phy_type, "none"))
9218		return -ENODEV;
9219
9220	strcpy(np->vpd.phy_type, phy_type);
9221
9222	if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
9223		netdev_err(dev, "%pOF: Illegal phy string [%s]\n",
9224			   dp, np->vpd.phy_type);
9225		return -EINVAL;
9226	}
9227
9228	mac_addr = of_get_property(dp, "local-mac-address", &prop_len);
9229	if (!mac_addr) {
9230		netdev_err(dev, "%pOF: OF node lacks local-mac-address property\n",
9231			   dp);
9232		return -EINVAL;
9233	}
9234	if (prop_len != dev->addr_len) {
9235		netdev_err(dev, "%pOF: OF MAC address prop len (%d) is wrong\n",
9236			   dp, prop_len);
9237	}
9238	memcpy(dev->dev_addr, mac_addr, dev->addr_len);
9239	if (!is_valid_ether_addr(&dev->dev_addr[0])) {
9240		netdev_err(dev, "%pOF: OF MAC address is invalid\n", dp);
9241		netdev_err(dev, "%pOF: [ %pM ]\n", dp, dev->dev_addr);
9242		return -EINVAL;
9243	}
9244
9245	model = of_get_property(dp, "model", &prop_len);
9246
9247	if (model)
9248		strcpy(np->vpd.model, model);
9249
9250	if (of_find_property(dp, "hot-swappable-phy", &prop_len)) {
9251		np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER |
9252			NIU_FLAGS_HOTPLUG_PHY);
9253	}
9254
9255	return 0;
9256#else
9257	return -EINVAL;
9258#endif
9259}
9260
9261static int niu_get_invariants(struct niu *np)
9262{
9263	int err, have_props;
9264	u32 offset;
9265
9266	err = niu_get_of_props(np);
9267	if (err == -ENODEV)
9268		return err;
9269
9270	have_props = !err;
9271
9272	err = niu_init_mac_ipp_pcs_base(np);
9273	if (err)
9274		return err;
9275
9276	if (have_props) {
9277		err = niu_get_and_validate_port(np);
9278		if (err)
9279			return err;
9280
9281	} else  {
9282		if (np->parent->plat_type == PLAT_TYPE_NIU)
9283			return -EINVAL;
9284
9285		nw64(ESPC_PIO_EN, ESPC_PIO_EN_ENABLE);
9286		offset = niu_pci_vpd_offset(np);
9287		netif_printk(np, probe, KERN_DEBUG, np->dev,
9288			     "%s() VPD offset [%08x]\n", __func__, offset);
9289		if (offset) {
9290			err = niu_pci_vpd_fetch(np, offset);
9291			if (err < 0)
9292				return err;
9293		}
9294		nw64(ESPC_PIO_EN, 0);
9295
9296		if (np->flags & NIU_FLAGS_VPD_VALID) {
9297			niu_pci_vpd_validate(np);
9298			err = niu_get_and_validate_port(np);
9299			if (err)
9300				return err;
9301		}
9302
9303		if (!(np->flags & NIU_FLAGS_VPD_VALID)) {
9304			err = niu_get_and_validate_port(np);
9305			if (err)
9306				return err;
9307			err = niu_pci_probe_sprom(np);
9308			if (err)
9309				return err;
9310		}
9311	}
9312
9313	err = niu_probe_ports(np);
9314	if (err)
9315		return err;
9316
9317	niu_ldg_init(np);
9318
9319	niu_classifier_swstate_init(np);
9320	niu_link_config_init(np);
9321
9322	err = niu_determine_phy_disposition(np);
9323	if (!err)
9324		err = niu_init_link(np);
9325
9326	return err;
9327}
9328
9329static LIST_HEAD(niu_parent_list);
9330static DEFINE_MUTEX(niu_parent_lock);
9331static int niu_parent_index;
9332
9333static ssize_t show_port_phy(struct device *dev,
9334			     struct device_attribute *attr, char *buf)
9335{
9336	struct platform_device *plat_dev = to_platform_device(dev);
9337	struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
9338	u32 port_phy = p->port_phy;
9339	char *orig_buf = buf;
9340	int i;
9341
9342	if (port_phy == PORT_PHY_UNKNOWN ||
9343	    port_phy == PORT_PHY_INVALID)
9344		return 0;
9345
9346	for (i = 0; i < p->num_ports; i++) {
9347		const char *type_str;
9348		int type;
9349
9350		type = phy_decode(port_phy, i);
9351		if (type == PORT_TYPE_10G)
9352			type_str = "10G";
9353		else
9354			type_str = "1G";
9355		buf += sprintf(buf,
9356			       (i == 0) ? "%s" : " %s",
9357			       type_str);
9358	}
9359	buf += sprintf(buf, "\n");
9360	return buf - orig_buf;
9361}
9362
9363static ssize_t show_plat_type(struct device *dev,
9364			      struct device_attribute *attr, char *buf)
9365{
9366	struct platform_device *plat_dev = to_platform_device(dev);
9367	struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
9368	const char *type_str;
9369
9370	switch (p->plat_type) {
9371	case PLAT_TYPE_ATLAS:
9372		type_str = "atlas";
9373		break;
9374	case PLAT_TYPE_NIU:
9375		type_str = "niu";
9376		break;
9377	case PLAT_TYPE_VF_P0:
9378		type_str = "vf_p0";
9379		break;
9380	case PLAT_TYPE_VF_P1:
9381		type_str = "vf_p1";
9382		break;
9383	default:
9384		type_str = "unknown";
9385		break;
9386	}
9387
9388	return sprintf(buf, "%s\n", type_str);
9389}
9390
9391static ssize_t __show_chan_per_port(struct device *dev,
9392				    struct device_attribute *attr, char *buf,
9393				    int rx)
9394{
9395	struct platform_device *plat_dev = to_platform_device(dev);
9396	struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
9397	char *orig_buf = buf;
9398	u8 *arr;
9399	int i;
9400
9401	arr = (rx ? p->rxchan_per_port : p->txchan_per_port);
9402
9403	for (i = 0; i < p->num_ports; i++) {
9404		buf += sprintf(buf,
9405			       (i == 0) ? "%d" : " %d",
9406			       arr[i]);
9407	}
9408	buf += sprintf(buf, "\n");
9409
9410	return buf - orig_buf;
9411}
9412
9413static ssize_t show_rxchan_per_port(struct device *dev,
9414				    struct device_attribute *attr, char *buf)
9415{
9416	return __show_chan_per_port(dev, attr, buf, 1);
9417}
9418
9419static ssize_t show_txchan_per_port(struct device *dev,
9420				    struct device_attribute *attr, char *buf)
9421{
9422	return __show_chan_per_port(dev, attr, buf, 1);
9423}
9424
9425static ssize_t show_num_ports(struct device *dev,
9426			      struct device_attribute *attr, char *buf)
9427{
9428	struct platform_device *plat_dev = to_platform_device(dev);
9429	struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
9430
9431	return sprintf(buf, "%d\n", p->num_ports);
9432}
9433
9434static struct device_attribute niu_parent_attributes[] = {
9435	__ATTR(port_phy, 0444, show_port_phy, NULL),
9436	__ATTR(plat_type, 0444, show_plat_type, NULL),
9437	__ATTR(rxchan_per_port, 0444, show_rxchan_per_port, NULL),
9438	__ATTR(txchan_per_port, 0444, show_txchan_per_port, NULL),
9439	__ATTR(num_ports, 0444, show_num_ports, NULL),
9440	{}
9441};
9442
9443static struct niu_parent *niu_new_parent(struct niu *np,
9444					 union niu_parent_id *id, u8 ptype)
9445{
9446	struct platform_device *plat_dev;
9447	struct niu_parent *p;
9448	int i;
9449
9450	plat_dev = platform_device_register_simple("niu-board", niu_parent_index,
9451						   NULL, 0);
9452	if (IS_ERR(plat_dev))
9453		return NULL;
9454
9455	for (i = 0; niu_parent_attributes[i].attr.name; i++) {
9456		int err = device_create_file(&plat_dev->dev,
9457					     &niu_parent_attributes[i]);
9458		if (err)
9459			goto fail_unregister;
9460	}
9461
9462	p = kzalloc(sizeof(*p), GFP_KERNEL);
9463	if (!p)
9464		goto fail_unregister;
9465
9466	p->index = niu_parent_index++;
9467
9468	plat_dev->dev.platform_data = p;
9469	p->plat_dev = plat_dev;
9470
9471	memcpy(&p->id, id, sizeof(*id));
9472	p->plat_type = ptype;
9473	INIT_LIST_HEAD(&p->list);
9474	atomic_set(&p->refcnt, 0);
9475	list_add(&p->list, &niu_parent_list);
9476	spin_lock_init(&p->lock);
9477
9478	p->rxdma_clock_divider = 7500;
9479
9480	p->tcam_num_entries = NIU_PCI_TCAM_ENTRIES;
9481	if (p->plat_type == PLAT_TYPE_NIU)
9482		p->tcam_num_entries = NIU_NONPCI_TCAM_ENTRIES;
9483
9484	for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) {
9485		int index = i - CLASS_CODE_USER_PROG1;
9486
9487		p->tcam_key[index] = TCAM_KEY_TSEL;
9488		p->flow_key[index] = (FLOW_KEY_IPSA |
9489				      FLOW_KEY_IPDA |
9490				      FLOW_KEY_PROTO |
9491				      (FLOW_KEY_L4_BYTE12 <<
9492				       FLOW_KEY_L4_0_SHIFT) |
9493				      (FLOW_KEY_L4_BYTE12 <<
9494				       FLOW_KEY_L4_1_SHIFT));
9495	}
9496
9497	for (i = 0; i < LDN_MAX + 1; i++)
9498		p->ldg_map[i] = LDG_INVALID;
9499
9500	return p;
9501
9502fail_unregister:
9503	platform_device_unregister(plat_dev);
9504	return NULL;
9505}
9506
9507static struct niu_parent *niu_get_parent(struct niu *np,
9508					 union niu_parent_id *id, u8 ptype)
9509{
9510	struct niu_parent *p, *tmp;
9511	int port = np->port;
9512
9513	mutex_lock(&niu_parent_lock);
9514	p = NULL;
9515	list_for_each_entry(tmp, &niu_parent_list, list) {
9516		if (!memcmp(id, &tmp->id, sizeof(*id))) {
9517			p = tmp;
9518			break;
9519		}
9520	}
9521	if (!p)
9522		p = niu_new_parent(np, id, ptype);
9523
9524	if (p) {
9525		char port_name[8];
9526		int err;
9527
9528		sprintf(port_name, "port%d", port);
9529		err = sysfs_create_link(&p->plat_dev->dev.kobj,
9530					&np->device->kobj,
9531					port_name);
9532		if (!err) {
9533			p->ports[port] = np;
9534			atomic_inc(&p->refcnt);
9535		}
9536	}
9537	mutex_unlock(&niu_parent_lock);
9538
9539	return p;
9540}
9541
9542static void niu_put_parent(struct niu *np)
9543{
9544	struct niu_parent *p = np->parent;
9545	u8 port = np->port;
9546	char port_name[8];
9547
9548	BUG_ON(!p || p->ports[port] != np);
9549
9550	netif_printk(np, probe, KERN_DEBUG, np->dev,
9551		     "%s() port[%u]\n", __func__, port);
9552
9553	sprintf(port_name, "port%d", port);
9554
9555	mutex_lock(&niu_parent_lock);
9556
9557	sysfs_remove_link(&p->plat_dev->dev.kobj, port_name);
9558
9559	p->ports[port] = NULL;
9560	np->parent = NULL;
9561
9562	if (atomic_dec_and_test(&p->refcnt)) {
9563		list_del(&p->list);
9564		platform_device_unregister(p->plat_dev);
9565	}
9566
9567	mutex_unlock(&niu_parent_lock);
9568}
9569
9570static void *niu_pci_alloc_coherent(struct device *dev, size_t size,
9571				    u64 *handle, gfp_t flag)
9572{
9573	dma_addr_t dh;
9574	void *ret;
9575
9576	ret = dma_alloc_coherent(dev, size, &dh, flag);
9577	if (ret)
9578		*handle = dh;
9579	return ret;
9580}
9581
9582static void niu_pci_free_coherent(struct device *dev, size_t size,
9583				  void *cpu_addr, u64 handle)
9584{
9585	dma_free_coherent(dev, size, cpu_addr, handle);
9586}
9587
9588static u64 niu_pci_map_page(struct device *dev, struct page *page,
9589			    unsigned long offset, size_t size,
9590			    enum dma_data_direction direction)
9591{
9592	return dma_map_page(dev, page, offset, size, direction);
9593}
9594
9595static void niu_pci_unmap_page(struct device *dev, u64 dma_address,
9596			       size_t size, enum dma_data_direction direction)
9597{
9598	dma_unmap_page(dev, dma_address, size, direction);
9599}
9600
9601static u64 niu_pci_map_single(struct device *dev, void *cpu_addr,
9602			      size_t size,
9603			      enum dma_data_direction direction)
9604{
9605	return dma_map_single(dev, cpu_addr, size, direction);
9606}
9607
9608static void niu_pci_unmap_single(struct device *dev, u64 dma_address,
9609				 size_t size,
9610				 enum dma_data_direction direction)
9611{
9612	dma_unmap_single(dev, dma_address, size, direction);
9613}
9614
9615static const struct niu_ops niu_pci_ops = {
9616	.alloc_coherent	= niu_pci_alloc_coherent,
9617	.free_coherent	= niu_pci_free_coherent,
9618	.map_page	= niu_pci_map_page,
9619	.unmap_page	= niu_pci_unmap_page,
9620	.map_single	= niu_pci_map_single,
9621	.unmap_single	= niu_pci_unmap_single,
9622};
9623
9624static void niu_driver_version(void)
9625{
9626	static int niu_version_printed;
9627
9628	if (niu_version_printed++ == 0)
9629		pr_info("%s", version);
9630}
9631
9632static struct net_device *niu_alloc_and_init(struct device *gen_dev,
9633					     struct pci_dev *pdev,
9634					     struct platform_device *op,
9635					     const struct niu_ops *ops, u8 port)
9636{
9637	struct net_device *dev;
9638	struct niu *np;
9639
9640	dev = alloc_etherdev_mq(sizeof(struct niu), NIU_NUM_TXCHAN);
9641	if (!dev)
9642		return NULL;
9643
9644	SET_NETDEV_DEV(dev, gen_dev);
9645
9646	np = netdev_priv(dev);
9647	np->dev = dev;
9648	np->pdev = pdev;
9649	np->op = op;
9650	np->device = gen_dev;
9651	np->ops = ops;
9652
9653	np->msg_enable = niu_debug;
9654
9655	spin_lock_init(&np->lock);
9656	INIT_WORK(&np->reset_task, niu_reset_task);
9657
9658	np->port = port;
9659
9660	return dev;
9661}
9662
9663static const struct net_device_ops niu_netdev_ops = {
9664	.ndo_open		= niu_open,
9665	.ndo_stop		= niu_close,
9666	.ndo_start_xmit		= niu_start_xmit,
9667	.ndo_get_stats64	= niu_get_stats,
9668	.ndo_set_rx_mode	= niu_set_rx_mode,
9669	.ndo_validate_addr	= eth_validate_addr,
9670	.ndo_set_mac_address	= niu_set_mac_addr,
9671	.ndo_do_ioctl		= niu_ioctl,
9672	.ndo_tx_timeout		= niu_tx_timeout,
9673	.ndo_change_mtu		= niu_change_mtu,
9674};
9675
9676static void niu_assign_netdev_ops(struct net_device *dev)
9677{
9678	dev->netdev_ops = &niu_netdev_ops;
9679	dev->ethtool_ops = &niu_ethtool_ops;
9680	dev->watchdog_timeo = NIU_TX_TIMEOUT;
9681}
9682
9683static void niu_device_announce(struct niu *np)
9684{
9685	struct net_device *dev = np->dev;
9686
9687	pr_info("%s: NIU Ethernet %pM\n", dev->name, dev->dev_addr);
9688
9689	if (np->parent->plat_type == PLAT_TYPE_ATCA_CP3220) {
9690		pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
9691				dev->name,
9692				(np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"),
9693				(np->flags & NIU_FLAGS_10G ? "10G" : "1G"),
9694				(np->flags & NIU_FLAGS_FIBER ? "RGMII FIBER" : "SERDES"),
9695				(np->mac_xcvr == MAC_XCVR_MII ? "MII" :
9696				 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")),
9697				np->vpd.phy_type);
9698	} else {
9699		pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
9700				dev->name,
9701				(np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"),
9702				(np->flags & NIU_FLAGS_10G ? "10G" : "1G"),
9703				(np->flags & NIU_FLAGS_FIBER ? "FIBER" :
9704				 (np->flags & NIU_FLAGS_XCVR_SERDES ? "SERDES" :
9705				  "COPPER")),
9706				(np->mac_xcvr == MAC_XCVR_MII ? "MII" :
9707				 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")),
9708				np->vpd.phy_type);
9709	}
9710}
9711
9712static void niu_set_basic_features(struct net_device *dev)
9713{
9714	dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXHASH;
9715	dev->features |= dev->hw_features | NETIF_F_RXCSUM;
9716}
9717
9718static int niu_pci_init_one(struct pci_dev *pdev,
9719			    const struct pci_device_id *ent)
9720{
9721	union niu_parent_id parent_id;
9722	struct net_device *dev;
9723	struct niu *np;
9724	int err;
9725	u64 dma_mask;
9726
9727	niu_driver_version();
9728
9729	err = pci_enable_device(pdev);
9730	if (err) {
9731		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
9732		return err;
9733	}
9734
9735	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
9736	    !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
9737		dev_err(&pdev->dev, "Cannot find proper PCI device base addresses, aborting\n");
9738		err = -ENODEV;
9739		goto err_out_disable_pdev;
9740	}
9741
9742	err = pci_request_regions(pdev, DRV_MODULE_NAME);
9743	if (err) {
9744		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
9745		goto err_out_disable_pdev;
9746	}
9747
9748	if (!pci_is_pcie(pdev)) {
9749		dev_err(&pdev->dev, "Cannot find PCI Express capability, aborting\n");
9750		err = -ENODEV;
9751		goto err_out_free_res;
9752	}
9753
9754	dev = niu_alloc_and_init(&pdev->dev, pdev, NULL,
9755				 &niu_pci_ops, PCI_FUNC(pdev->devfn));
9756	if (!dev) {
9757		err = -ENOMEM;
9758		goto err_out_free_res;
9759	}
9760	np = netdev_priv(dev);
9761
9762	memset(&parent_id, 0, sizeof(parent_id));
9763	parent_id.pci.domain = pci_domain_nr(pdev->bus);
9764	parent_id.pci.bus = pdev->bus->number;
9765	parent_id.pci.device = PCI_SLOT(pdev->devfn);
9766
9767	np->parent = niu_get_parent(np, &parent_id,
9768				    PLAT_TYPE_ATLAS);
9769	if (!np->parent) {
9770		err = -ENOMEM;
9771		goto err_out_free_dev;
9772	}
9773
9774	pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
9775		PCI_EXP_DEVCTL_NOSNOOP_EN,
9776		PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE |
9777		PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE |
9778		PCI_EXP_DEVCTL_RELAX_EN);
9779
9780	dma_mask = DMA_BIT_MASK(44);
9781	err = pci_set_dma_mask(pdev, dma_mask);
9782	if (!err) {
9783		dev->features |= NETIF_F_HIGHDMA;
9784		err = pci_set_consistent_dma_mask(pdev, dma_mask);
9785		if (err) {
9786			dev_err(&pdev->dev, "Unable to obtain 44 bit DMA for consistent allocations, aborting\n");
9787			goto err_out_release_parent;
9788		}
9789	}
9790	if (err) {
9791		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9792		if (err) {
9793			dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
9794			goto err_out_release_parent;
9795		}
9796	}
9797
9798	niu_set_basic_features(dev);
9799
9800	dev->priv_flags |= IFF_UNICAST_FLT;
9801
9802	np->regs = pci_ioremap_bar(pdev, 0);
9803	if (!np->regs) {
9804		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
9805		err = -ENOMEM;
9806		goto err_out_release_parent;
9807	}
9808
9809	pci_set_master(pdev);
9810	pci_save_state(pdev);
9811
9812	dev->irq = pdev->irq;
9813
9814	/* MTU range: 68 - 9216 */
9815	dev->min_mtu = ETH_MIN_MTU;
9816	dev->max_mtu = NIU_MAX_MTU;
9817
9818	niu_assign_netdev_ops(dev);
9819
9820	err = niu_get_invariants(np);
9821	if (err) {
9822		if (err != -ENODEV)
9823			dev_err(&pdev->dev, "Problem fetching invariants of chip, aborting\n");
9824		goto err_out_iounmap;
9825	}
9826
9827	err = register_netdev(dev);
9828	if (err) {
9829		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
9830		goto err_out_iounmap;
9831	}
9832
9833	pci_set_drvdata(pdev, dev);
9834
9835	niu_device_announce(np);
9836
9837	return 0;
9838
9839err_out_iounmap:
9840	if (np->regs) {
9841		iounmap(np->regs);
9842		np->regs = NULL;
9843	}
9844
9845err_out_release_parent:
9846	niu_put_parent(np);
9847
9848err_out_free_dev:
9849	free_netdev(dev);
9850
9851err_out_free_res:
9852	pci_release_regions(pdev);
9853
9854err_out_disable_pdev:
9855	pci_disable_device(pdev);
9856
9857	return err;
9858}
9859
9860static void niu_pci_remove_one(struct pci_dev *pdev)
9861{
9862	struct net_device *dev = pci_get_drvdata(pdev);
9863
9864	if (dev) {
9865		struct niu *np = netdev_priv(dev);
9866
9867		unregister_netdev(dev);
9868		if (np->regs) {
9869			iounmap(np->regs);
9870			np->regs = NULL;
9871		}
9872
9873		niu_ldg_free(np);
9874
9875		niu_put_parent(np);
9876
9877		free_netdev(dev);
9878		pci_release_regions(pdev);
9879		pci_disable_device(pdev);
9880	}
9881}
9882
9883static int __maybe_unused niu_suspend(struct device *dev_d)
9884{
9885	struct net_device *dev = dev_get_drvdata(dev_d);
9886	struct niu *np = netdev_priv(dev);
9887	unsigned long flags;
9888
9889	if (!netif_running(dev))
9890		return 0;
9891
9892	flush_work(&np->reset_task);
9893	niu_netif_stop(np);
9894
9895	del_timer_sync(&np->timer);
9896
9897	spin_lock_irqsave(&np->lock, flags);
9898	niu_enable_interrupts(np, 0);
9899	spin_unlock_irqrestore(&np->lock, flags);
9900
9901	netif_device_detach(dev);
9902
9903	spin_lock_irqsave(&np->lock, flags);
9904	niu_stop_hw(np);
9905	spin_unlock_irqrestore(&np->lock, flags);
9906
9907	return 0;
9908}
9909
9910static int __maybe_unused niu_resume(struct device *dev_d)
9911{
9912	struct net_device *dev = dev_get_drvdata(dev_d);
9913	struct niu *np = netdev_priv(dev);
9914	unsigned long flags;
9915	int err;
9916
9917	if (!netif_running(dev))
9918		return 0;
9919
9920	netif_device_attach(dev);
9921
9922	spin_lock_irqsave(&np->lock, flags);
9923
9924	err = niu_init_hw(np);
9925	if (!err) {
9926		np->timer.expires = jiffies + HZ;
9927		add_timer(&np->timer);
9928		niu_netif_start(np);
9929	}
9930
9931	spin_unlock_irqrestore(&np->lock, flags);
9932
9933	return err;
9934}
9935
9936static SIMPLE_DEV_PM_OPS(niu_pm_ops, niu_suspend, niu_resume);
9937
9938static struct pci_driver niu_pci_driver = {
9939	.name		= DRV_MODULE_NAME,
9940	.id_table	= niu_pci_tbl,
9941	.probe		= niu_pci_init_one,
9942	.remove		= niu_pci_remove_one,
9943	.driver.pm	= &niu_pm_ops,
9944};
9945
9946#ifdef CONFIG_SPARC64
9947static void *niu_phys_alloc_coherent(struct device *dev, size_t size,
9948				     u64 *dma_addr, gfp_t flag)
9949{
9950	unsigned long order = get_order(size);
9951	unsigned long page = __get_free_pages(flag, order);
9952
9953	if (page == 0UL)
9954		return NULL;
9955	memset((char *)page, 0, PAGE_SIZE << order);
9956	*dma_addr = __pa(page);
9957
9958	return (void *) page;
9959}
9960
9961static void niu_phys_free_coherent(struct device *dev, size_t size,
9962				   void *cpu_addr, u64 handle)
9963{
9964	unsigned long order = get_order(size);
9965
9966	free_pages((unsigned long) cpu_addr, order);
9967}
9968
9969static u64 niu_phys_map_page(struct device *dev, struct page *page,
9970			     unsigned long offset, size_t size,
9971			     enum dma_data_direction direction)
9972{
9973	return page_to_phys(page) + offset;
9974}
9975
9976static void niu_phys_unmap_page(struct device *dev, u64 dma_address,
9977				size_t size, enum dma_data_direction direction)
9978{
9979	/* Nothing to do.  */
9980}
9981
9982static u64 niu_phys_map_single(struct device *dev, void *cpu_addr,
9983			       size_t size,
9984			       enum dma_data_direction direction)
9985{
9986	return __pa(cpu_addr);
9987}
9988
9989static void niu_phys_unmap_single(struct device *dev, u64 dma_address,
9990				  size_t size,
9991				  enum dma_data_direction direction)
9992{
9993	/* Nothing to do.  */
9994}
9995
9996static const struct niu_ops niu_phys_ops = {
9997	.alloc_coherent	= niu_phys_alloc_coherent,
9998	.free_coherent	= niu_phys_free_coherent,
9999	.map_page	= niu_phys_map_page,
10000	.unmap_page	= niu_phys_unmap_page,
10001	.map_single	= niu_phys_map_single,
10002	.unmap_single	= niu_phys_unmap_single,
10003};
10004
10005static int niu_of_probe(struct platform_device *op)
10006{
10007	union niu_parent_id parent_id;
10008	struct net_device *dev;
10009	struct niu *np;
10010	const u32 *reg;
10011	int err;
10012
10013	niu_driver_version();
10014
10015	reg = of_get_property(op->dev.of_node, "reg", NULL);
10016	if (!reg) {
10017		dev_err(&op->dev, "%pOF: No 'reg' property, aborting\n",
10018			op->dev.of_node);
10019		return -ENODEV;
10020	}
10021
10022	dev = niu_alloc_and_init(&op->dev, NULL, op,
10023				 &niu_phys_ops, reg[0] & 0x1);
10024	if (!dev) {
10025		err = -ENOMEM;
10026		goto err_out;
10027	}
10028	np = netdev_priv(dev);
10029
10030	memset(&parent_id, 0, sizeof(parent_id));
10031	parent_id.of = of_get_parent(op->dev.of_node);
10032
10033	np->parent = niu_get_parent(np, &parent_id,
10034				    PLAT_TYPE_NIU);
10035	if (!np->parent) {
10036		err = -ENOMEM;
10037		goto err_out_free_dev;
10038	}
10039
10040	niu_set_basic_features(dev);
10041
10042	np->regs = of_ioremap(&op->resource[1], 0,
10043			      resource_size(&op->resource[1]),
10044			      "niu regs");
10045	if (!np->regs) {
10046		dev_err(&op->dev, "Cannot map device registers, aborting\n");
10047		err = -ENOMEM;
10048		goto err_out_release_parent;
10049	}
10050
10051	np->vir_regs_1 = of_ioremap(&op->resource[2], 0,
10052				    resource_size(&op->resource[2]),
10053				    "niu vregs-1");
10054	if (!np->vir_regs_1) {
10055		dev_err(&op->dev, "Cannot map device vir registers 1, aborting\n");
10056		err = -ENOMEM;
10057		goto err_out_iounmap;
10058	}
10059
10060	np->vir_regs_2 = of_ioremap(&op->resource[3], 0,
10061				    resource_size(&op->resource[3]),
10062				    "niu vregs-2");
10063	if (!np->vir_regs_2) {
10064		dev_err(&op->dev, "Cannot map device vir registers 2, aborting\n");
10065		err = -ENOMEM;
10066		goto err_out_iounmap;
10067	}
10068
10069	niu_assign_netdev_ops(dev);
10070
10071	err = niu_get_invariants(np);
10072	if (err) {
10073		if (err != -ENODEV)
10074			dev_err(&op->dev, "Problem fetching invariants of chip, aborting\n");
10075		goto err_out_iounmap;
10076	}
10077
10078	err = register_netdev(dev);
10079	if (err) {
10080		dev_err(&op->dev, "Cannot register net device, aborting\n");
10081		goto err_out_iounmap;
10082	}
10083
10084	platform_set_drvdata(op, dev);
10085
10086	niu_device_announce(np);
10087
10088	return 0;
10089
10090err_out_iounmap:
10091	if (np->vir_regs_1) {
10092		of_iounmap(&op->resource[2], np->vir_regs_1,
10093			   resource_size(&op->resource[2]));
10094		np->vir_regs_1 = NULL;
10095	}
10096
10097	if (np->vir_regs_2) {
10098		of_iounmap(&op->resource[3], np->vir_regs_2,
10099			   resource_size(&op->resource[3]));
10100		np->vir_regs_2 = NULL;
10101	}
10102
10103	if (np->regs) {
10104		of_iounmap(&op->resource[1], np->regs,
10105			   resource_size(&op->resource[1]));
10106		np->regs = NULL;
10107	}
10108
10109err_out_release_parent:
10110	niu_put_parent(np);
10111
10112err_out_free_dev:
10113	free_netdev(dev);
10114
10115err_out:
10116	return err;
10117}
10118
10119static int niu_of_remove(struct platform_device *op)
10120{
10121	struct net_device *dev = platform_get_drvdata(op);
10122
10123	if (dev) {
10124		struct niu *np = netdev_priv(dev);
10125
10126		unregister_netdev(dev);
10127
10128		if (np->vir_regs_1) {
10129			of_iounmap(&op->resource[2], np->vir_regs_1,
10130				   resource_size(&op->resource[2]));
10131			np->vir_regs_1 = NULL;
10132		}
10133
10134		if (np->vir_regs_2) {
10135			of_iounmap(&op->resource[3], np->vir_regs_2,
10136				   resource_size(&op->resource[3]));
10137			np->vir_regs_2 = NULL;
10138		}
10139
10140		if (np->regs) {
10141			of_iounmap(&op->resource[1], np->regs,
10142				   resource_size(&op->resource[1]));
10143			np->regs = NULL;
10144		}
10145
10146		niu_ldg_free(np);
10147
10148		niu_put_parent(np);
10149
10150		free_netdev(dev);
10151	}
10152	return 0;
10153}
10154
10155static const struct of_device_id niu_match[] = {
10156	{
10157		.name = "network",
10158		.compatible = "SUNW,niusl",
10159	},
10160	{},
10161};
10162MODULE_DEVICE_TABLE(of, niu_match);
10163
10164static struct platform_driver niu_of_driver = {
10165	.driver = {
10166		.name = "niu",
10167		.of_match_table = niu_match,
10168	},
10169	.probe		= niu_of_probe,
10170	.remove		= niu_of_remove,
10171};
10172
10173#endif /* CONFIG_SPARC64 */
10174
10175static int __init niu_init(void)
10176{
10177	int err = 0;
10178
10179	BUILD_BUG_ON(PAGE_SIZE < 4 * 1024);
10180
10181	niu_debug = netif_msg_init(debug, NIU_MSG_DEFAULT);
10182
10183#ifdef CONFIG_SPARC64
10184	err = platform_driver_register(&niu_of_driver);
10185#endif
10186
10187	if (!err) {
10188		err = pci_register_driver(&niu_pci_driver);
10189#ifdef CONFIG_SPARC64
10190		if (err)
10191			platform_driver_unregister(&niu_of_driver);
10192#endif
10193	}
10194
10195	return err;
10196}
10197
10198static void __exit niu_exit(void)
10199{
10200	pci_unregister_driver(&niu_pci_driver);
10201#ifdef CONFIG_SPARC64
10202	platform_driver_unregister(&niu_of_driver);
10203#endif
10204}
10205
10206module_init(niu_init);
10207module_exit(niu_exit);
10208