1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Driver for BCM963xx builtin Ethernet mac
4 *
5 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
6 */
7#include <linux/init.h>
8#include <linux/interrupt.h>
9#include <linux/module.h>
10#include <linux/clk.h>
11#include <linux/etherdevice.h>
12#include <linux/slab.h>
13#include <linux/delay.h>
14#include <linux/ethtool.h>
15#include <linux/crc32.h>
16#include <linux/err.h>
17#include <linux/dma-mapping.h>
18#include <linux/platform_device.h>
19#include <linux/if_vlan.h>
20
21#include <bcm63xx_dev_enet.h>
22#include "bcm63xx_enet.h"
23
24static char bcm_enet_driver_name[] = "bcm63xx_enet";
25
26static int copybreak __read_mostly = 128;
27module_param(copybreak, int, 0);
28MODULE_PARM_DESC(copybreak, "Receive copy threshold");
29
30/* io registers memory shared between all devices */
31static void __iomem *bcm_enet_shared_base[3];
32
33/*
34 * io helpers to access mac registers
35 */
36static inline u32 enet_readl(struct bcm_enet_priv *priv, u32 off)
37{
38	return bcm_readl(priv->base + off);
39}
40
41static inline void enet_writel(struct bcm_enet_priv *priv,
42			       u32 val, u32 off)
43{
44	bcm_writel(val, priv->base + off);
45}
46
47/*
48 * io helpers to access switch registers
49 */
50static inline u32 enetsw_readl(struct bcm_enet_priv *priv, u32 off)
51{
52	return bcm_readl(priv->base + off);
53}
54
55static inline void enetsw_writel(struct bcm_enet_priv *priv,
56				 u32 val, u32 off)
57{
58	bcm_writel(val, priv->base + off);
59}
60
61static inline u16 enetsw_readw(struct bcm_enet_priv *priv, u32 off)
62{
63	return bcm_readw(priv->base + off);
64}
65
66static inline void enetsw_writew(struct bcm_enet_priv *priv,
67				 u16 val, u32 off)
68{
69	bcm_writew(val, priv->base + off);
70}
71
72static inline u8 enetsw_readb(struct bcm_enet_priv *priv, u32 off)
73{
74	return bcm_readb(priv->base + off);
75}
76
77static inline void enetsw_writeb(struct bcm_enet_priv *priv,
78				 u8 val, u32 off)
79{
80	bcm_writeb(val, priv->base + off);
81}
82
83
84/* io helpers to access shared registers */
85static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off)
86{
87	return bcm_readl(bcm_enet_shared_base[0] + off);
88}
89
90static inline void enet_dma_writel(struct bcm_enet_priv *priv,
91				       u32 val, u32 off)
92{
93	bcm_writel(val, bcm_enet_shared_base[0] + off);
94}
95
96static inline u32 enet_dmac_readl(struct bcm_enet_priv *priv, u32 off, int chan)
97{
98	return bcm_readl(bcm_enet_shared_base[1] +
99		bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width);
100}
101
102static inline void enet_dmac_writel(struct bcm_enet_priv *priv,
103				       u32 val, u32 off, int chan)
104{
105	bcm_writel(val, bcm_enet_shared_base[1] +
106		bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width);
107}
108
109static inline u32 enet_dmas_readl(struct bcm_enet_priv *priv, u32 off, int chan)
110{
111	return bcm_readl(bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width);
112}
113
114static inline void enet_dmas_writel(struct bcm_enet_priv *priv,
115				       u32 val, u32 off, int chan)
116{
117	bcm_writel(val, bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width);
118}
119
120/*
121 * write given data into mii register and wait for transfer to end
122 * with timeout (average measured transfer time is 25us)
123 */
124static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data)
125{
126	int limit;
127
128	/* make sure mii interrupt status is cleared */
129	enet_writel(priv, ENET_IR_MII, ENET_IR_REG);
130
131	enet_writel(priv, data, ENET_MIIDATA_REG);
132	wmb();
133
134	/* busy wait on mii interrupt bit, with timeout */
135	limit = 1000;
136	do {
137		if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII)
138			break;
139		udelay(1);
140	} while (limit-- > 0);
141
142	return (limit < 0) ? 1 : 0;
143}
144
145/*
146 * MII internal read callback
147 */
148static int bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id,
149			      int regnum)
150{
151	u32 tmp, val;
152
153	tmp = regnum << ENET_MIIDATA_REG_SHIFT;
154	tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
155	tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
156	tmp |= ENET_MIIDATA_OP_READ_MASK;
157
158	if (do_mdio_op(priv, tmp))
159		return -1;
160
161	val = enet_readl(priv, ENET_MIIDATA_REG);
162	val &= 0xffff;
163	return val;
164}
165
166/*
167 * MII internal write callback
168 */
169static int bcm_enet_mdio_write(struct bcm_enet_priv *priv, int mii_id,
170			       int regnum, u16 value)
171{
172	u32 tmp;
173
174	tmp = (value & 0xffff) << ENET_MIIDATA_DATA_SHIFT;
175	tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
176	tmp |= regnum << ENET_MIIDATA_REG_SHIFT;
177	tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
178	tmp |= ENET_MIIDATA_OP_WRITE_MASK;
179
180	(void)do_mdio_op(priv, tmp);
181	return 0;
182}
183
184/*
185 * MII read callback from phylib
186 */
187static int bcm_enet_mdio_read_phylib(struct mii_bus *bus, int mii_id,
188				     int regnum)
189{
190	return bcm_enet_mdio_read(bus->priv, mii_id, regnum);
191}
192
193/*
194 * MII write callback from phylib
195 */
196static int bcm_enet_mdio_write_phylib(struct mii_bus *bus, int mii_id,
197				      int regnum, u16 value)
198{
199	return bcm_enet_mdio_write(bus->priv, mii_id, regnum, value);
200}
201
202/*
203 * MII read callback from mii core
204 */
205static int bcm_enet_mdio_read_mii(struct net_device *dev, int mii_id,
206				  int regnum)
207{
208	return bcm_enet_mdio_read(netdev_priv(dev), mii_id, regnum);
209}
210
211/*
212 * MII write callback from mii core
213 */
214static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id,
215				    int regnum, int value)
216{
217	bcm_enet_mdio_write(netdev_priv(dev), mii_id, regnum, value);
218}
219
220/*
221 * refill rx queue
222 */
223static int bcm_enet_refill_rx(struct net_device *dev, bool napi_mode)
224{
225	struct bcm_enet_priv *priv;
226
227	priv = netdev_priv(dev);
228
229	while (priv->rx_desc_count < priv->rx_ring_size) {
230		struct bcm_enet_desc *desc;
231		int desc_idx;
232		u32 len_stat;
233
234		desc_idx = priv->rx_dirty_desc;
235		desc = &priv->rx_desc_cpu[desc_idx];
236
237		if (!priv->rx_buf[desc_idx]) {
238			void *buf;
239
240			if (likely(napi_mode))
241				buf = napi_alloc_frag(priv->rx_frag_size);
242			else
243				buf = netdev_alloc_frag(priv->rx_frag_size);
244			if (unlikely(!buf))
245				break;
246			priv->rx_buf[desc_idx] = buf;
247			desc->address = dma_map_single(&priv->pdev->dev,
248						       buf + priv->rx_buf_offset,
249						       priv->rx_buf_size,
250						       DMA_FROM_DEVICE);
251		}
252
253		len_stat = priv->rx_buf_size << DMADESC_LENGTH_SHIFT;
254		len_stat |= DMADESC_OWNER_MASK;
255		if (priv->rx_dirty_desc == priv->rx_ring_size - 1) {
256			len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift);
257			priv->rx_dirty_desc = 0;
258		} else {
259			priv->rx_dirty_desc++;
260		}
261		wmb();
262		desc->len_stat = len_stat;
263
264		priv->rx_desc_count++;
265
266		/* tell dma engine we allocated one buffer */
267		if (priv->dma_has_sram)
268			enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan));
269		else
270			enet_dmac_writel(priv, 1, ENETDMAC_BUFALLOC, priv->rx_chan);
271	}
272
273	/* If rx ring is still empty, set a timer to try allocating
274	 * again at a later time. */
275	if (priv->rx_desc_count == 0 && netif_running(dev)) {
276		dev_warn(&priv->pdev->dev, "unable to refill rx ring\n");
277		priv->rx_timeout.expires = jiffies + HZ;
278		add_timer(&priv->rx_timeout);
279	}
280
281	return 0;
282}
283
284/*
285 * timer callback to defer refill rx queue in case we're OOM
286 */
287static void bcm_enet_refill_rx_timer(struct timer_list *t)
288{
289	struct bcm_enet_priv *priv = from_timer(priv, t, rx_timeout);
290	struct net_device *dev = priv->net_dev;
291
292	spin_lock(&priv->rx_lock);
293	bcm_enet_refill_rx(dev, false);
294	spin_unlock(&priv->rx_lock);
295}
296
297/*
298 * extract packet from rx queue
299 */
300static int bcm_enet_receive_queue(struct net_device *dev, int budget)
301{
302	struct bcm_enet_priv *priv;
303	struct list_head rx_list;
304	struct device *kdev;
305	int processed;
306
307	priv = netdev_priv(dev);
308	INIT_LIST_HEAD(&rx_list);
309	kdev = &priv->pdev->dev;
310	processed = 0;
311
312	/* don't scan ring further than number of refilled
313	 * descriptor */
314	if (budget > priv->rx_desc_count)
315		budget = priv->rx_desc_count;
316
317	do {
318		struct bcm_enet_desc *desc;
319		struct sk_buff *skb;
320		int desc_idx;
321		u32 len_stat;
322		unsigned int len;
323		void *buf;
324
325		desc_idx = priv->rx_curr_desc;
326		desc = &priv->rx_desc_cpu[desc_idx];
327
328		/* make sure we actually read the descriptor status at
329		 * each loop */
330		rmb();
331
332		len_stat = desc->len_stat;
333
334		/* break if dma ownership belongs to hw */
335		if (len_stat & DMADESC_OWNER_MASK)
336			break;
337
338		processed++;
339		priv->rx_curr_desc++;
340		if (priv->rx_curr_desc == priv->rx_ring_size)
341			priv->rx_curr_desc = 0;
342
343		/* if the packet does not have start of packet _and_
344		 * end of packet flag set, then just recycle it */
345		if ((len_stat & (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) !=
346			(DMADESC_ESOP_MASK >> priv->dma_desc_shift)) {
347			dev->stats.rx_dropped++;
348			continue;
349		}
350
351		/* recycle packet if it's marked as bad */
352		if (!priv->enet_is_sw &&
353		    unlikely(len_stat & DMADESC_ERR_MASK)) {
354			dev->stats.rx_errors++;
355
356			if (len_stat & DMADESC_OVSIZE_MASK)
357				dev->stats.rx_length_errors++;
358			if (len_stat & DMADESC_CRC_MASK)
359				dev->stats.rx_crc_errors++;
360			if (len_stat & DMADESC_UNDER_MASK)
361				dev->stats.rx_frame_errors++;
362			if (len_stat & DMADESC_OV_MASK)
363				dev->stats.rx_fifo_errors++;
364			continue;
365		}
366
367		/* valid packet */
368		buf = priv->rx_buf[desc_idx];
369		len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT;
370		/* don't include FCS */
371		len -= 4;
372
373		if (len < copybreak) {
374			skb = napi_alloc_skb(&priv->napi, len);
375			if (unlikely(!skb)) {
376				/* forget packet, just rearm desc */
377				dev->stats.rx_dropped++;
378				continue;
379			}
380
381			dma_sync_single_for_cpu(kdev, desc->address,
382						len, DMA_FROM_DEVICE);
383			memcpy(skb->data, buf + priv->rx_buf_offset, len);
384			dma_sync_single_for_device(kdev, desc->address,
385						   len, DMA_FROM_DEVICE);
386		} else {
387			dma_unmap_single(kdev, desc->address,
388					 priv->rx_buf_size, DMA_FROM_DEVICE);
389			priv->rx_buf[desc_idx] = NULL;
390
391			skb = napi_build_skb(buf, priv->rx_frag_size);
392			if (unlikely(!skb)) {
393				skb_free_frag(buf);
394				dev->stats.rx_dropped++;
395				continue;
396			}
397			skb_reserve(skb, priv->rx_buf_offset);
398		}
399
400		skb_put(skb, len);
401		skb->protocol = eth_type_trans(skb, dev);
402		dev->stats.rx_packets++;
403		dev->stats.rx_bytes += len;
404		list_add_tail(&skb->list, &rx_list);
405
406	} while (processed < budget);
407
408	netif_receive_skb_list(&rx_list);
409	priv->rx_desc_count -= processed;
410
411	if (processed || !priv->rx_desc_count) {
412		bcm_enet_refill_rx(dev, true);
413
414		/* kick rx dma */
415		enet_dmac_writel(priv, priv->dma_chan_en_mask,
416					 ENETDMAC_CHANCFG, priv->rx_chan);
417	}
418
419	return processed;
420}
421
422
423/*
424 * try to or force reclaim of transmitted buffers
425 */
426static int bcm_enet_tx_reclaim(struct net_device *dev, int force, int budget)
427{
428	struct bcm_enet_priv *priv;
429	unsigned int bytes;
430	int released;
431
432	priv = netdev_priv(dev);
433	bytes = 0;
434	released = 0;
435
436	while (priv->tx_desc_count < priv->tx_ring_size) {
437		struct bcm_enet_desc *desc;
438		struct sk_buff *skb;
439
440		/* We run in a bh and fight against start_xmit, which
441		 * is called with bh disabled  */
442		spin_lock(&priv->tx_lock);
443
444		desc = &priv->tx_desc_cpu[priv->tx_dirty_desc];
445
446		if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) {
447			spin_unlock(&priv->tx_lock);
448			break;
449		}
450
451		/* ensure other field of the descriptor were not read
452		 * before we checked ownership */
453		rmb();
454
455		skb = priv->tx_skb[priv->tx_dirty_desc];
456		priv->tx_skb[priv->tx_dirty_desc] = NULL;
457		dma_unmap_single(&priv->pdev->dev, desc->address, skb->len,
458				 DMA_TO_DEVICE);
459
460		priv->tx_dirty_desc++;
461		if (priv->tx_dirty_desc == priv->tx_ring_size)
462			priv->tx_dirty_desc = 0;
463		priv->tx_desc_count++;
464
465		spin_unlock(&priv->tx_lock);
466
467		if (desc->len_stat & DMADESC_UNDER_MASK)
468			dev->stats.tx_errors++;
469
470		bytes += skb->len;
471		napi_consume_skb(skb, budget);
472		released++;
473	}
474
475	netdev_completed_queue(dev, released, bytes);
476
477	if (netif_queue_stopped(dev) && released)
478		netif_wake_queue(dev);
479
480	return released;
481}
482
483/*
484 * poll func, called by network core
485 */
486static int bcm_enet_poll(struct napi_struct *napi, int budget)
487{
488	struct bcm_enet_priv *priv;
489	struct net_device *dev;
490	int rx_work_done;
491
492	priv = container_of(napi, struct bcm_enet_priv, napi);
493	dev = priv->net_dev;
494
495	/* ack interrupts */
496	enet_dmac_writel(priv, priv->dma_chan_int_mask,
497			 ENETDMAC_IR, priv->rx_chan);
498	enet_dmac_writel(priv, priv->dma_chan_int_mask,
499			 ENETDMAC_IR, priv->tx_chan);
500
501	/* reclaim sent skb */
502	bcm_enet_tx_reclaim(dev, 0, budget);
503
504	spin_lock(&priv->rx_lock);
505	rx_work_done = bcm_enet_receive_queue(dev, budget);
506	spin_unlock(&priv->rx_lock);
507
508	if (rx_work_done >= budget) {
509		/* rx queue is not yet empty/clean */
510		return rx_work_done;
511	}
512
513	/* no more packet in rx/tx queue, remove device from poll
514	 * queue */
515	napi_complete_done(napi, rx_work_done);
516
517	/* restore rx/tx interrupt */
518	enet_dmac_writel(priv, priv->dma_chan_int_mask,
519			 ENETDMAC_IRMASK, priv->rx_chan);
520	enet_dmac_writel(priv, priv->dma_chan_int_mask,
521			 ENETDMAC_IRMASK, priv->tx_chan);
522
523	return rx_work_done;
524}
525
526/*
527 * mac interrupt handler
528 */
529static irqreturn_t bcm_enet_isr_mac(int irq, void *dev_id)
530{
531	struct net_device *dev;
532	struct bcm_enet_priv *priv;
533	u32 stat;
534
535	dev = dev_id;
536	priv = netdev_priv(dev);
537
538	stat = enet_readl(priv, ENET_IR_REG);
539	if (!(stat & ENET_IR_MIB))
540		return IRQ_NONE;
541
542	/* clear & mask interrupt */
543	enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
544	enet_writel(priv, 0, ENET_IRMASK_REG);
545
546	/* read mib registers in workqueue */
547	schedule_work(&priv->mib_update_task);
548
549	return IRQ_HANDLED;
550}
551
552/*
553 * rx/tx dma interrupt handler
554 */
555static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id)
556{
557	struct net_device *dev;
558	struct bcm_enet_priv *priv;
559
560	dev = dev_id;
561	priv = netdev_priv(dev);
562
563	/* mask rx/tx interrupts */
564	enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
565	enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
566
567	napi_schedule(&priv->napi);
568
569	return IRQ_HANDLED;
570}
571
572/*
573 * tx request callback
574 */
575static netdev_tx_t
576bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
577{
578	struct bcm_enet_priv *priv;
579	struct bcm_enet_desc *desc;
580	u32 len_stat;
581	netdev_tx_t ret;
582
583	priv = netdev_priv(dev);
584
585	/* lock against tx reclaim */
586	spin_lock(&priv->tx_lock);
587
588	/* make sure  the tx hw queue  is not full,  should not happen
589	 * since we stop queue before it's the case */
590	if (unlikely(!priv->tx_desc_count)) {
591		netif_stop_queue(dev);
592		dev_err(&priv->pdev->dev, "xmit called with no tx desc "
593			"available?\n");
594		ret = NETDEV_TX_BUSY;
595		goto out_unlock;
596	}
597
598	/* pad small packets sent on a switch device */
599	if (priv->enet_is_sw && skb->len < 64) {
600		int needed = 64 - skb->len;
601		char *data;
602
603		if (unlikely(skb_tailroom(skb) < needed)) {
604			struct sk_buff *nskb;
605
606			nskb = skb_copy_expand(skb, 0, needed, GFP_ATOMIC);
607			if (!nskb) {
608				ret = NETDEV_TX_BUSY;
609				goto out_unlock;
610			}
611			dev_kfree_skb(skb);
612			skb = nskb;
613		}
614		data = skb_put_zero(skb, needed);
615	}
616
617	/* point to the next available desc */
618	desc = &priv->tx_desc_cpu[priv->tx_curr_desc];
619	priv->tx_skb[priv->tx_curr_desc] = skb;
620
621	/* fill descriptor */
622	desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
623				       DMA_TO_DEVICE);
624
625	len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK;
626	len_stat |= (DMADESC_ESOP_MASK >> priv->dma_desc_shift) |
627		DMADESC_APPEND_CRC |
628		DMADESC_OWNER_MASK;
629
630	priv->tx_curr_desc++;
631	if (priv->tx_curr_desc == priv->tx_ring_size) {
632		priv->tx_curr_desc = 0;
633		len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift);
634	}
635	priv->tx_desc_count--;
636
637	/* dma might be already polling, make sure we update desc
638	 * fields in correct order */
639	wmb();
640	desc->len_stat = len_stat;
641	wmb();
642
643	netdev_sent_queue(dev, skb->len);
644
645	/* kick tx dma */
646	if (!netdev_xmit_more() || !priv->tx_desc_count)
647		enet_dmac_writel(priv, priv->dma_chan_en_mask,
648				 ENETDMAC_CHANCFG, priv->tx_chan);
649
650	/* stop queue if no more desc available */
651	if (!priv->tx_desc_count)
652		netif_stop_queue(dev);
653
654	dev->stats.tx_bytes += skb->len;
655	dev->stats.tx_packets++;
656	ret = NETDEV_TX_OK;
657
658out_unlock:
659	spin_unlock(&priv->tx_lock);
660	return ret;
661}
662
663/*
664 * Change the interface's mac address.
665 */
666static int bcm_enet_set_mac_address(struct net_device *dev, void *p)
667{
668	struct bcm_enet_priv *priv;
669	struct sockaddr *addr = p;
670	u32 val;
671
672	priv = netdev_priv(dev);
673	eth_hw_addr_set(dev, addr->sa_data);
674
675	/* use perfect match register 0 to store my mac address */
676	val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) |
677		(dev->dev_addr[4] << 8) | dev->dev_addr[5];
678	enet_writel(priv, val, ENET_PML_REG(0));
679
680	val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]);
681	val |= ENET_PMH_DATAVALID_MASK;
682	enet_writel(priv, val, ENET_PMH_REG(0));
683
684	return 0;
685}
686
687/*
688 * Change rx mode (promiscuous/allmulti) and update multicast list
689 */
690static void bcm_enet_set_multicast_list(struct net_device *dev)
691{
692	struct bcm_enet_priv *priv;
693	struct netdev_hw_addr *ha;
694	u32 val;
695	int i;
696
697	priv = netdev_priv(dev);
698
699	val = enet_readl(priv, ENET_RXCFG_REG);
700
701	if (dev->flags & IFF_PROMISC)
702		val |= ENET_RXCFG_PROMISC_MASK;
703	else
704		val &= ~ENET_RXCFG_PROMISC_MASK;
705
706	/* only 3 perfect match registers left, first one is used for
707	 * own mac address */
708	if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > 3)
709		val |= ENET_RXCFG_ALLMCAST_MASK;
710	else
711		val &= ~ENET_RXCFG_ALLMCAST_MASK;
712
713	/* no need to set perfect match registers if we catch all
714	 * multicast */
715	if (val & ENET_RXCFG_ALLMCAST_MASK) {
716		enet_writel(priv, val, ENET_RXCFG_REG);
717		return;
718	}
719
720	i = 0;
721	netdev_for_each_mc_addr(ha, dev) {
722		u8 *dmi_addr;
723		u32 tmp;
724
725		if (i == 3)
726			break;
727		/* update perfect match registers */
728		dmi_addr = ha->addr;
729		tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) |
730			(dmi_addr[4] << 8) | dmi_addr[5];
731		enet_writel(priv, tmp, ENET_PML_REG(i + 1));
732
733		tmp = (dmi_addr[0] << 8 | dmi_addr[1]);
734		tmp |= ENET_PMH_DATAVALID_MASK;
735		enet_writel(priv, tmp, ENET_PMH_REG(i++ + 1));
736	}
737
738	for (; i < 3; i++) {
739		enet_writel(priv, 0, ENET_PML_REG(i + 1));
740		enet_writel(priv, 0, ENET_PMH_REG(i + 1));
741	}
742
743	enet_writel(priv, val, ENET_RXCFG_REG);
744}
745
746/*
747 * set mac duplex parameters
748 */
749static void bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex)
750{
751	u32 val;
752
753	val = enet_readl(priv, ENET_TXCTL_REG);
754	if (fullduplex)
755		val |= ENET_TXCTL_FD_MASK;
756	else
757		val &= ~ENET_TXCTL_FD_MASK;
758	enet_writel(priv, val, ENET_TXCTL_REG);
759}
760
761/*
762 * set mac flow control parameters
763 */
764static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en)
765{
766	u32 val;
767
768	/* rx flow control (pause frame handling) */
769	val = enet_readl(priv, ENET_RXCFG_REG);
770	if (rx_en)
771		val |= ENET_RXCFG_ENFLOW_MASK;
772	else
773		val &= ~ENET_RXCFG_ENFLOW_MASK;
774	enet_writel(priv, val, ENET_RXCFG_REG);
775
776	if (!priv->dma_has_sram)
777		return;
778
779	/* tx flow control (pause frame generation) */
780	val = enet_dma_readl(priv, ENETDMA_CFG_REG);
781	if (tx_en)
782		val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
783	else
784		val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
785	enet_dma_writel(priv, val, ENETDMA_CFG_REG);
786}
787
788/*
789 * link changed callback (from phylib)
790 */
791static void bcm_enet_adjust_phy_link(struct net_device *dev)
792{
793	struct bcm_enet_priv *priv;
794	struct phy_device *phydev;
795	int status_changed;
796
797	priv = netdev_priv(dev);
798	phydev = dev->phydev;
799	status_changed = 0;
800
801	if (priv->old_link != phydev->link) {
802		status_changed = 1;
803		priv->old_link = phydev->link;
804	}
805
806	/* reflect duplex change in mac configuration */
807	if (phydev->link && phydev->duplex != priv->old_duplex) {
808		bcm_enet_set_duplex(priv,
809				    (phydev->duplex == DUPLEX_FULL) ? 1 : 0);
810		status_changed = 1;
811		priv->old_duplex = phydev->duplex;
812	}
813
814	/* enable flow control if remote advertise it (trust phylib to
815	 * check that duplex is full */
816	if (phydev->link && phydev->pause != priv->old_pause) {
817		int rx_pause_en, tx_pause_en;
818
819		if (phydev->pause) {
820			/* pause was advertised by lpa and us */
821			rx_pause_en = 1;
822			tx_pause_en = 1;
823		} else if (!priv->pause_auto) {
824			/* pause setting overridden by user */
825			rx_pause_en = priv->pause_rx;
826			tx_pause_en = priv->pause_tx;
827		} else {
828			rx_pause_en = 0;
829			tx_pause_en = 0;
830		}
831
832		bcm_enet_set_flow(priv, rx_pause_en, tx_pause_en);
833		status_changed = 1;
834		priv->old_pause = phydev->pause;
835	}
836
837	if (status_changed) {
838		pr_info("%s: link %s", dev->name, phydev->link ?
839			"UP" : "DOWN");
840		if (phydev->link)
841			pr_cont(" - %d/%s - flow control %s", phydev->speed,
842			       DUPLEX_FULL == phydev->duplex ? "full" : "half",
843			       phydev->pause == 1 ? "rx&tx" : "off");
844
845		pr_cont("\n");
846	}
847}
848
849/*
850 * link changed callback (if phylib is not used)
851 */
852static void bcm_enet_adjust_link(struct net_device *dev)
853{
854	struct bcm_enet_priv *priv;
855
856	priv = netdev_priv(dev);
857	bcm_enet_set_duplex(priv, priv->force_duplex_full);
858	bcm_enet_set_flow(priv, priv->pause_rx, priv->pause_tx);
859	netif_carrier_on(dev);
860
861	pr_info("%s: link forced UP - %d/%s - flow control %s/%s\n",
862		dev->name,
863		priv->force_speed_100 ? 100 : 10,
864		priv->force_duplex_full ? "full" : "half",
865		priv->pause_rx ? "rx" : "off",
866		priv->pause_tx ? "tx" : "off");
867}
868
869static void bcm_enet_free_rx_buf_ring(struct device *kdev, struct bcm_enet_priv *priv)
870{
871	int i;
872
873	for (i = 0; i < priv->rx_ring_size; i++) {
874		struct bcm_enet_desc *desc;
875
876		if (!priv->rx_buf[i])
877			continue;
878
879		desc = &priv->rx_desc_cpu[i];
880		dma_unmap_single(kdev, desc->address, priv->rx_buf_size,
881				 DMA_FROM_DEVICE);
882		skb_free_frag(priv->rx_buf[i]);
883	}
884	kfree(priv->rx_buf);
885}
886
887/*
888 * open callback, allocate dma rings & buffers and start rx operation
889 */
890static int bcm_enet_open(struct net_device *dev)
891{
892	struct bcm_enet_priv *priv;
893	struct sockaddr addr;
894	struct device *kdev;
895	struct phy_device *phydev;
896	int i, ret;
897	unsigned int size;
898	char phy_id[MII_BUS_ID_SIZE + 3];
899	void *p;
900	u32 val;
901
902	priv = netdev_priv(dev);
903	kdev = &priv->pdev->dev;
904
905	if (priv->has_phy) {
906		/* connect to PHY */
907		snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
908			 priv->mii_bus->id, priv->phy_id);
909
910		phydev = phy_connect(dev, phy_id, bcm_enet_adjust_phy_link,
911				     PHY_INTERFACE_MODE_MII);
912
913		if (IS_ERR(phydev)) {
914			dev_err(kdev, "could not attach to PHY\n");
915			return PTR_ERR(phydev);
916		}
917
918		/* mask with MAC supported features */
919		phy_support_sym_pause(phydev);
920		phy_set_max_speed(phydev, SPEED_100);
921		phy_set_sym_pause(phydev, priv->pause_rx, priv->pause_rx,
922				  priv->pause_auto);
923
924		phy_attached_info(phydev);
925
926		priv->old_link = 0;
927		priv->old_duplex = -1;
928		priv->old_pause = -1;
929	} else {
930		phydev = NULL;
931	}
932
933	/* mask all interrupts and request them */
934	enet_writel(priv, 0, ENET_IRMASK_REG);
935	enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
936	enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
937
938	ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev);
939	if (ret)
940		goto out_phy_disconnect;
941
942	ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, 0,
943			  dev->name, dev);
944	if (ret)
945		goto out_freeirq;
946
947	ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
948			  0, dev->name, dev);
949	if (ret)
950		goto out_freeirq_rx;
951
952	/* initialize perfect match registers */
953	for (i = 0; i < 4; i++) {
954		enet_writel(priv, 0, ENET_PML_REG(i));
955		enet_writel(priv, 0, ENET_PMH_REG(i));
956	}
957
958	/* write device mac address */
959	memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN);
960	bcm_enet_set_mac_address(dev, &addr);
961
962	/* allocate rx dma ring */
963	size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
964	p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
965	if (!p) {
966		ret = -ENOMEM;
967		goto out_freeirq_tx;
968	}
969
970	priv->rx_desc_alloc_size = size;
971	priv->rx_desc_cpu = p;
972
973	/* allocate tx dma ring */
974	size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
975	p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
976	if (!p) {
977		ret = -ENOMEM;
978		goto out_free_rx_ring;
979	}
980
981	priv->tx_desc_alloc_size = size;
982	priv->tx_desc_cpu = p;
983
984	priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *),
985			       GFP_KERNEL);
986	if (!priv->tx_skb) {
987		ret = -ENOMEM;
988		goto out_free_tx_ring;
989	}
990
991	priv->tx_desc_count = priv->tx_ring_size;
992	priv->tx_dirty_desc = 0;
993	priv->tx_curr_desc = 0;
994	spin_lock_init(&priv->tx_lock);
995
996	/* init & fill rx ring with buffers */
997	priv->rx_buf = kcalloc(priv->rx_ring_size, sizeof(void *),
998			       GFP_KERNEL);
999	if (!priv->rx_buf) {
1000		ret = -ENOMEM;
1001		goto out_free_tx_skb;
1002	}
1003
1004	priv->rx_desc_count = 0;
1005	priv->rx_dirty_desc = 0;
1006	priv->rx_curr_desc = 0;
1007
1008	/* initialize flow control buffer allocation */
1009	if (priv->dma_has_sram)
1010		enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
1011				ENETDMA_BUFALLOC_REG(priv->rx_chan));
1012	else
1013		enet_dmac_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
1014				ENETDMAC_BUFALLOC, priv->rx_chan);
1015
1016	if (bcm_enet_refill_rx(dev, false)) {
1017		dev_err(kdev, "cannot allocate rx buffer queue\n");
1018		ret = -ENOMEM;
1019		goto out;
1020	}
1021
1022	/* write rx & tx ring addresses */
1023	if (priv->dma_has_sram) {
1024		enet_dmas_writel(priv, priv->rx_desc_dma,
1025				 ENETDMAS_RSTART_REG, priv->rx_chan);
1026		enet_dmas_writel(priv, priv->tx_desc_dma,
1027			 ENETDMAS_RSTART_REG, priv->tx_chan);
1028	} else {
1029		enet_dmac_writel(priv, priv->rx_desc_dma,
1030				ENETDMAC_RSTART, priv->rx_chan);
1031		enet_dmac_writel(priv, priv->tx_desc_dma,
1032				ENETDMAC_RSTART, priv->tx_chan);
1033	}
1034
1035	/* clear remaining state ram for rx & tx channel */
1036	if (priv->dma_has_sram) {
1037		enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan);
1038		enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan);
1039		enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan);
1040		enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan);
1041		enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan);
1042		enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan);
1043	} else {
1044		enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->rx_chan);
1045		enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->tx_chan);
1046	}
1047
1048	/* set max rx/tx length */
1049	enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG);
1050	enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG);
1051
1052	/* set dma maximum burst len */
1053	enet_dmac_writel(priv, priv->dma_maxburst,
1054			 ENETDMAC_MAXBURST, priv->rx_chan);
1055	enet_dmac_writel(priv, priv->dma_maxburst,
1056			 ENETDMAC_MAXBURST, priv->tx_chan);
1057
1058	/* set correct transmit fifo watermark */
1059	enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG);
1060
1061	/* set flow control low/high threshold to 1/3 / 2/3 */
1062	if (priv->dma_has_sram) {
1063		val = priv->rx_ring_size / 3;
1064		enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
1065		val = (priv->rx_ring_size * 2) / 3;
1066		enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
1067	} else {
1068		enet_dmac_writel(priv, 5, ENETDMAC_FC, priv->rx_chan);
1069		enet_dmac_writel(priv, priv->rx_ring_size, ENETDMAC_LEN, priv->rx_chan);
1070		enet_dmac_writel(priv, priv->tx_ring_size, ENETDMAC_LEN, priv->tx_chan);
1071	}
1072
1073	/* all set, enable mac and interrupts, start dma engine and
1074	 * kick rx dma channel */
1075	wmb();
1076	val = enet_readl(priv, ENET_CTL_REG);
1077	val |= ENET_CTL_ENABLE_MASK;
1078	enet_writel(priv, val, ENET_CTL_REG);
1079	if (priv->dma_has_sram)
1080		enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
1081	enet_dmac_writel(priv, priv->dma_chan_en_mask,
1082			 ENETDMAC_CHANCFG, priv->rx_chan);
1083
1084	/* watch "mib counters about to overflow" interrupt */
1085	enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
1086	enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
1087
1088	/* watch "packet transferred" interrupt in rx and tx */
1089	enet_dmac_writel(priv, priv->dma_chan_int_mask,
1090			 ENETDMAC_IR, priv->rx_chan);
1091	enet_dmac_writel(priv, priv->dma_chan_int_mask,
1092			 ENETDMAC_IR, priv->tx_chan);
1093
1094	/* make sure we enable napi before rx interrupt  */
1095	napi_enable(&priv->napi);
1096
1097	enet_dmac_writel(priv, priv->dma_chan_int_mask,
1098			 ENETDMAC_IRMASK, priv->rx_chan);
1099	enet_dmac_writel(priv, priv->dma_chan_int_mask,
1100			 ENETDMAC_IRMASK, priv->tx_chan);
1101
1102	if (phydev)
1103		phy_start(phydev);
1104	else
1105		bcm_enet_adjust_link(dev);
1106
1107	netif_start_queue(dev);
1108	return 0;
1109
1110out:
1111	bcm_enet_free_rx_buf_ring(kdev, priv);
1112
1113out_free_tx_skb:
1114	kfree(priv->tx_skb);
1115
1116out_free_tx_ring:
1117	dma_free_coherent(kdev, priv->tx_desc_alloc_size,
1118			  priv->tx_desc_cpu, priv->tx_desc_dma);
1119
1120out_free_rx_ring:
1121	dma_free_coherent(kdev, priv->rx_desc_alloc_size,
1122			  priv->rx_desc_cpu, priv->rx_desc_dma);
1123
1124out_freeirq_tx:
1125	free_irq(priv->irq_tx, dev);
1126
1127out_freeirq_rx:
1128	free_irq(priv->irq_rx, dev);
1129
1130out_freeirq:
1131	free_irq(dev->irq, dev);
1132
1133out_phy_disconnect:
1134	if (phydev)
1135		phy_disconnect(phydev);
1136
1137	return ret;
1138}
1139
1140/*
1141 * disable mac
1142 */
1143static void bcm_enet_disable_mac(struct bcm_enet_priv *priv)
1144{
1145	int limit;
1146	u32 val;
1147
1148	val = enet_readl(priv, ENET_CTL_REG);
1149	val |= ENET_CTL_DISABLE_MASK;
1150	enet_writel(priv, val, ENET_CTL_REG);
1151
1152	limit = 1000;
1153	do {
1154		u32 val;
1155
1156		val = enet_readl(priv, ENET_CTL_REG);
1157		if (!(val & ENET_CTL_DISABLE_MASK))
1158			break;
1159		udelay(1);
1160	} while (limit--);
1161}
1162
1163/*
1164 * disable dma in given channel
1165 */
1166static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan)
1167{
1168	int limit;
1169
1170	enet_dmac_writel(priv, 0, ENETDMAC_CHANCFG, chan);
1171
1172	limit = 1000;
1173	do {
1174		u32 val;
1175
1176		val = enet_dmac_readl(priv, ENETDMAC_CHANCFG, chan);
1177		if (!(val & ENETDMAC_CHANCFG_EN_MASK))
1178			break;
1179		udelay(1);
1180	} while (limit--);
1181}
1182
1183/*
1184 * stop callback
1185 */
1186static int bcm_enet_stop(struct net_device *dev)
1187{
1188	struct bcm_enet_priv *priv;
1189	struct device *kdev;
1190
1191	priv = netdev_priv(dev);
1192	kdev = &priv->pdev->dev;
1193
1194	netif_stop_queue(dev);
1195	napi_disable(&priv->napi);
1196	if (priv->has_phy)
1197		phy_stop(dev->phydev);
1198	del_timer_sync(&priv->rx_timeout);
1199
1200	/* mask all interrupts */
1201	enet_writel(priv, 0, ENET_IRMASK_REG);
1202	enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
1203	enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
1204
1205	/* make sure no mib update is scheduled */
1206	cancel_work_sync(&priv->mib_update_task);
1207
1208	/* disable dma & mac */
1209	bcm_enet_disable_dma(priv, priv->tx_chan);
1210	bcm_enet_disable_dma(priv, priv->rx_chan);
1211	bcm_enet_disable_mac(priv);
1212
1213	/* force reclaim of all tx buffers */
1214	bcm_enet_tx_reclaim(dev, 1, 0);
1215
1216	/* free the rx buffer ring */
1217	bcm_enet_free_rx_buf_ring(kdev, priv);
1218
1219	/* free remaining allocated memory */
1220	kfree(priv->tx_skb);
1221	dma_free_coherent(kdev, priv->rx_desc_alloc_size,
1222			  priv->rx_desc_cpu, priv->rx_desc_dma);
1223	dma_free_coherent(kdev, priv->tx_desc_alloc_size,
1224			  priv->tx_desc_cpu, priv->tx_desc_dma);
1225	free_irq(priv->irq_tx, dev);
1226	free_irq(priv->irq_rx, dev);
1227	free_irq(dev->irq, dev);
1228
1229	/* release phy */
1230	if (priv->has_phy)
1231		phy_disconnect(dev->phydev);
1232
1233	/* reset BQL after forced tx reclaim to prevent kernel panic */
1234	netdev_reset_queue(dev);
1235
1236	return 0;
1237}
1238
1239/*
1240 * ethtool callbacks
1241 */
1242struct bcm_enet_stats {
1243	char stat_string[ETH_GSTRING_LEN];
1244	int sizeof_stat;
1245	int stat_offset;
1246	int mib_reg;
1247};
1248
1249#define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m),		\
1250		     offsetof(struct bcm_enet_priv, m)
1251#define DEV_STAT(m) sizeof(((struct net_device_stats *)0)->m),		\
1252		     offsetof(struct net_device_stats, m)
1253
1254static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = {
1255	{ "rx_packets", DEV_STAT(rx_packets), -1 },
1256	{ "tx_packets",	DEV_STAT(tx_packets), -1 },
1257	{ "rx_bytes", DEV_STAT(rx_bytes), -1 },
1258	{ "tx_bytes", DEV_STAT(tx_bytes), -1 },
1259	{ "rx_errors", DEV_STAT(rx_errors), -1 },
1260	{ "tx_errors", DEV_STAT(tx_errors), -1 },
1261	{ "rx_dropped",	DEV_STAT(rx_dropped), -1 },
1262	{ "tx_dropped",	DEV_STAT(tx_dropped), -1 },
1263
1264	{ "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS},
1265	{ "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS },
1266	{ "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETH_MIB_RX_BRDCAST },
1267	{ "rx_multicast", GEN_STAT(mib.rx_mult), ETH_MIB_RX_MULT },
1268	{ "rx_64_octets", GEN_STAT(mib.rx_64), ETH_MIB_RX_64 },
1269	{ "rx_65_127_oct", GEN_STAT(mib.rx_65_127), ETH_MIB_RX_65_127 },
1270	{ "rx_128_255_oct", GEN_STAT(mib.rx_128_255), ETH_MIB_RX_128_255 },
1271	{ "rx_256_511_oct", GEN_STAT(mib.rx_256_511), ETH_MIB_RX_256_511 },
1272	{ "rx_512_1023_oct", GEN_STAT(mib.rx_512_1023), ETH_MIB_RX_512_1023 },
1273	{ "rx_1024_max_oct", GEN_STAT(mib.rx_1024_max), ETH_MIB_RX_1024_MAX },
1274	{ "rx_jabber", GEN_STAT(mib.rx_jab), ETH_MIB_RX_JAB },
1275	{ "rx_oversize", GEN_STAT(mib.rx_ovr), ETH_MIB_RX_OVR },
1276	{ "rx_fragment", GEN_STAT(mib.rx_frag), ETH_MIB_RX_FRAG },
1277	{ "rx_dropped",	GEN_STAT(mib.rx_drop), ETH_MIB_RX_DROP },
1278	{ "rx_crc_align", GEN_STAT(mib.rx_crc_align), ETH_MIB_RX_CRC_ALIGN },
1279	{ "rx_undersize", GEN_STAT(mib.rx_und), ETH_MIB_RX_UND },
1280	{ "rx_crc", GEN_STAT(mib.rx_crc), ETH_MIB_RX_CRC },
1281	{ "rx_align", GEN_STAT(mib.rx_align), ETH_MIB_RX_ALIGN },
1282	{ "rx_symbol_error", GEN_STAT(mib.rx_sym), ETH_MIB_RX_SYM },
1283	{ "rx_pause", GEN_STAT(mib.rx_pause), ETH_MIB_RX_PAUSE },
1284	{ "rx_control", GEN_STAT(mib.rx_cntrl), ETH_MIB_RX_CNTRL },
1285
1286	{ "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETH_MIB_TX_GD_OCTETS },
1287	{ "tx_good_pkts", GEN_STAT(mib.tx_gd_pkts), ETH_MIB_TX_GD_PKTS },
1288	{ "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETH_MIB_TX_BRDCAST },
1289	{ "tx_multicast", GEN_STAT(mib.tx_mult), ETH_MIB_TX_MULT },
1290	{ "tx_64_oct", GEN_STAT(mib.tx_64), ETH_MIB_TX_64 },
1291	{ "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETH_MIB_TX_65_127 },
1292	{ "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETH_MIB_TX_128_255 },
1293	{ "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETH_MIB_TX_256_511 },
1294	{ "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETH_MIB_TX_512_1023},
1295	{ "tx_1024_max_oct", GEN_STAT(mib.tx_1024_max), ETH_MIB_TX_1024_MAX },
1296	{ "tx_jabber", GEN_STAT(mib.tx_jab), ETH_MIB_TX_JAB },
1297	{ "tx_oversize", GEN_STAT(mib.tx_ovr), ETH_MIB_TX_OVR },
1298	{ "tx_fragment", GEN_STAT(mib.tx_frag), ETH_MIB_TX_FRAG },
1299	{ "tx_underrun", GEN_STAT(mib.tx_underrun), ETH_MIB_TX_UNDERRUN },
1300	{ "tx_collisions", GEN_STAT(mib.tx_col), ETH_MIB_TX_COL },
1301	{ "tx_single_collision", GEN_STAT(mib.tx_1_col), ETH_MIB_TX_1_COL },
1302	{ "tx_multiple_collision", GEN_STAT(mib.tx_m_col), ETH_MIB_TX_M_COL },
1303	{ "tx_excess_collision", GEN_STAT(mib.tx_ex_col), ETH_MIB_TX_EX_COL },
1304	{ "tx_late_collision", GEN_STAT(mib.tx_late), ETH_MIB_TX_LATE },
1305	{ "tx_deferred", GEN_STAT(mib.tx_def), ETH_MIB_TX_DEF },
1306	{ "tx_carrier_sense", GEN_STAT(mib.tx_crs), ETH_MIB_TX_CRS },
1307	{ "tx_pause", GEN_STAT(mib.tx_pause), ETH_MIB_TX_PAUSE },
1308
1309};
1310
1311#define BCM_ENET_STATS_LEN	ARRAY_SIZE(bcm_enet_gstrings_stats)
1312
1313static const u32 unused_mib_regs[] = {
1314	ETH_MIB_TX_ALL_OCTETS,
1315	ETH_MIB_TX_ALL_PKTS,
1316	ETH_MIB_RX_ALL_OCTETS,
1317	ETH_MIB_RX_ALL_PKTS,
1318};
1319
1320
1321static void bcm_enet_get_drvinfo(struct net_device *netdev,
1322				 struct ethtool_drvinfo *drvinfo)
1323{
1324	strscpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
1325	strscpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
1326}
1327
1328static int bcm_enet_get_sset_count(struct net_device *netdev,
1329					int string_set)
1330{
1331	switch (string_set) {
1332	case ETH_SS_STATS:
1333		return BCM_ENET_STATS_LEN;
1334	default:
1335		return -EINVAL;
1336	}
1337}
1338
1339static void bcm_enet_get_strings(struct net_device *netdev,
1340				 u32 stringset, u8 *data)
1341{
1342	int i;
1343
1344	switch (stringset) {
1345	case ETH_SS_STATS:
1346		for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1347			memcpy(data + i * ETH_GSTRING_LEN,
1348			       bcm_enet_gstrings_stats[i].stat_string,
1349			       ETH_GSTRING_LEN);
1350		}
1351		break;
1352	}
1353}
1354
1355static void update_mib_counters(struct bcm_enet_priv *priv)
1356{
1357	int i;
1358
1359	for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1360		const struct bcm_enet_stats *s;
1361		u32 val;
1362		char *p;
1363
1364		s = &bcm_enet_gstrings_stats[i];
1365		if (s->mib_reg == -1)
1366			continue;
1367
1368		val = enet_readl(priv, ENET_MIB_REG(s->mib_reg));
1369		p = (char *)priv + s->stat_offset;
1370
1371		if (s->sizeof_stat == sizeof(u64))
1372			*(u64 *)p += val;
1373		else
1374			*(u32 *)p += val;
1375	}
1376
1377	/* also empty unused mib counters to make sure mib counter
1378	 * overflow interrupt is cleared */
1379	for (i = 0; i < ARRAY_SIZE(unused_mib_regs); i++)
1380		(void)enet_readl(priv, ENET_MIB_REG(unused_mib_regs[i]));
1381}
1382
1383static void bcm_enet_update_mib_counters_defer(struct work_struct *t)
1384{
1385	struct bcm_enet_priv *priv;
1386
1387	priv = container_of(t, struct bcm_enet_priv, mib_update_task);
1388	mutex_lock(&priv->mib_update_lock);
1389	update_mib_counters(priv);
1390	mutex_unlock(&priv->mib_update_lock);
1391
1392	/* reenable mib interrupt */
1393	if (netif_running(priv->net_dev))
1394		enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
1395}
1396
1397static void bcm_enet_get_ethtool_stats(struct net_device *netdev,
1398				       struct ethtool_stats *stats,
1399				       u64 *data)
1400{
1401	struct bcm_enet_priv *priv;
1402	int i;
1403
1404	priv = netdev_priv(netdev);
1405
1406	mutex_lock(&priv->mib_update_lock);
1407	update_mib_counters(priv);
1408
1409	for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1410		const struct bcm_enet_stats *s;
1411		char *p;
1412
1413		s = &bcm_enet_gstrings_stats[i];
1414		if (s->mib_reg == -1)
1415			p = (char *)&netdev->stats;
1416		else
1417			p = (char *)priv;
1418		p += s->stat_offset;
1419		data[i] = (s->sizeof_stat == sizeof(u64)) ?
1420			*(u64 *)p : *(u32 *)p;
1421	}
1422	mutex_unlock(&priv->mib_update_lock);
1423}
1424
1425static int bcm_enet_nway_reset(struct net_device *dev)
1426{
1427	struct bcm_enet_priv *priv;
1428
1429	priv = netdev_priv(dev);
1430	if (priv->has_phy)
1431		return phy_ethtool_nway_reset(dev);
1432
1433	return -EOPNOTSUPP;
1434}
1435
1436static int bcm_enet_get_link_ksettings(struct net_device *dev,
1437				       struct ethtool_link_ksettings *cmd)
1438{
1439	struct bcm_enet_priv *priv;
1440	u32 supported, advertising;
1441
1442	priv = netdev_priv(dev);
1443
1444	if (priv->has_phy) {
1445		if (!dev->phydev)
1446			return -ENODEV;
1447
1448		phy_ethtool_ksettings_get(dev->phydev, cmd);
1449
1450		return 0;
1451	} else {
1452		cmd->base.autoneg = 0;
1453		cmd->base.speed = (priv->force_speed_100) ?
1454			SPEED_100 : SPEED_10;
1455		cmd->base.duplex = (priv->force_duplex_full) ?
1456			DUPLEX_FULL : DUPLEX_HALF;
1457		supported = ADVERTISED_10baseT_Half |
1458			ADVERTISED_10baseT_Full |
1459			ADVERTISED_100baseT_Half |
1460			ADVERTISED_100baseT_Full;
1461		advertising = 0;
1462		ethtool_convert_legacy_u32_to_link_mode(
1463			cmd->link_modes.supported, supported);
1464		ethtool_convert_legacy_u32_to_link_mode(
1465			cmd->link_modes.advertising, advertising);
1466		cmd->base.port = PORT_MII;
1467	}
1468	return 0;
1469}
1470
1471static int bcm_enet_set_link_ksettings(struct net_device *dev,
1472				       const struct ethtool_link_ksettings *cmd)
1473{
1474	struct bcm_enet_priv *priv;
1475
1476	priv = netdev_priv(dev);
1477	if (priv->has_phy) {
1478		if (!dev->phydev)
1479			return -ENODEV;
1480		return phy_ethtool_ksettings_set(dev->phydev, cmd);
1481	} else {
1482
1483		if (cmd->base.autoneg ||
1484		    (cmd->base.speed != SPEED_100 &&
1485		     cmd->base.speed != SPEED_10) ||
1486		    cmd->base.port != PORT_MII)
1487			return -EINVAL;
1488
1489		priv->force_speed_100 =
1490			(cmd->base.speed == SPEED_100) ? 1 : 0;
1491		priv->force_duplex_full =
1492			(cmd->base.duplex == DUPLEX_FULL) ? 1 : 0;
1493
1494		if (netif_running(dev))
1495			bcm_enet_adjust_link(dev);
1496		return 0;
1497	}
1498}
1499
1500static void
1501bcm_enet_get_ringparam(struct net_device *dev,
1502		       struct ethtool_ringparam *ering,
1503		       struct kernel_ethtool_ringparam *kernel_ering,
1504		       struct netlink_ext_ack *extack)
1505{
1506	struct bcm_enet_priv *priv;
1507
1508	priv = netdev_priv(dev);
1509
1510	/* rx/tx ring is actually only limited by memory */
1511	ering->rx_max_pending = 8192;
1512	ering->tx_max_pending = 8192;
1513	ering->rx_pending = priv->rx_ring_size;
1514	ering->tx_pending = priv->tx_ring_size;
1515}
1516
1517static int bcm_enet_set_ringparam(struct net_device *dev,
1518				  struct ethtool_ringparam *ering,
1519				  struct kernel_ethtool_ringparam *kernel_ering,
1520				  struct netlink_ext_ack *extack)
1521{
1522	struct bcm_enet_priv *priv;
1523	int was_running;
1524
1525	priv = netdev_priv(dev);
1526
1527	was_running = 0;
1528	if (netif_running(dev)) {
1529		bcm_enet_stop(dev);
1530		was_running = 1;
1531	}
1532
1533	priv->rx_ring_size = ering->rx_pending;
1534	priv->tx_ring_size = ering->tx_pending;
1535
1536	if (was_running) {
1537		int err;
1538
1539		err = bcm_enet_open(dev);
1540		if (err)
1541			dev_close(dev);
1542		else
1543			bcm_enet_set_multicast_list(dev);
1544	}
1545	return 0;
1546}
1547
1548static void bcm_enet_get_pauseparam(struct net_device *dev,
1549				    struct ethtool_pauseparam *ecmd)
1550{
1551	struct bcm_enet_priv *priv;
1552
1553	priv = netdev_priv(dev);
1554	ecmd->autoneg = priv->pause_auto;
1555	ecmd->rx_pause = priv->pause_rx;
1556	ecmd->tx_pause = priv->pause_tx;
1557}
1558
1559static int bcm_enet_set_pauseparam(struct net_device *dev,
1560				   struct ethtool_pauseparam *ecmd)
1561{
1562	struct bcm_enet_priv *priv;
1563
1564	priv = netdev_priv(dev);
1565
1566	if (priv->has_phy) {
1567		if (ecmd->autoneg && (ecmd->rx_pause != ecmd->tx_pause)) {
1568			/* asymetric pause mode not supported,
1569			 * actually possible but integrated PHY has RO
1570			 * asym_pause bit */
1571			return -EINVAL;
1572		}
1573	} else {
1574		/* no pause autoneg on direct mii connection */
1575		if (ecmd->autoneg)
1576			return -EINVAL;
1577	}
1578
1579	priv->pause_auto = ecmd->autoneg;
1580	priv->pause_rx = ecmd->rx_pause;
1581	priv->pause_tx = ecmd->tx_pause;
1582
1583	return 0;
1584}
1585
1586static const struct ethtool_ops bcm_enet_ethtool_ops = {
1587	.get_strings		= bcm_enet_get_strings,
1588	.get_sset_count		= bcm_enet_get_sset_count,
1589	.get_ethtool_stats      = bcm_enet_get_ethtool_stats,
1590	.nway_reset		= bcm_enet_nway_reset,
1591	.get_drvinfo		= bcm_enet_get_drvinfo,
1592	.get_link		= ethtool_op_get_link,
1593	.get_ringparam		= bcm_enet_get_ringparam,
1594	.set_ringparam		= bcm_enet_set_ringparam,
1595	.get_pauseparam		= bcm_enet_get_pauseparam,
1596	.set_pauseparam		= bcm_enet_set_pauseparam,
1597	.get_link_ksettings	= bcm_enet_get_link_ksettings,
1598	.set_link_ksettings	= bcm_enet_set_link_ksettings,
1599};
1600
1601static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1602{
1603	struct bcm_enet_priv *priv;
1604
1605	priv = netdev_priv(dev);
1606	if (priv->has_phy) {
1607		if (!dev->phydev)
1608			return -ENODEV;
1609		return phy_mii_ioctl(dev->phydev, rq, cmd);
1610	} else {
1611		struct mii_if_info mii;
1612
1613		mii.dev = dev;
1614		mii.mdio_read = bcm_enet_mdio_read_mii;
1615		mii.mdio_write = bcm_enet_mdio_write_mii;
1616		mii.phy_id = 0;
1617		mii.phy_id_mask = 0x3f;
1618		mii.reg_num_mask = 0x1f;
1619		return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
1620	}
1621}
1622
1623/*
1624 * adjust mtu, can't be called while device is running
1625 */
1626static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu)
1627{
1628	struct bcm_enet_priv *priv = netdev_priv(dev);
1629	int actual_mtu = new_mtu;
1630
1631	if (netif_running(dev))
1632		return -EBUSY;
1633
1634	/* add ethernet header + vlan tag size */
1635	actual_mtu += VLAN_ETH_HLEN;
1636
1637	/*
1638	 * setup maximum size before we get overflow mark in
1639	 * descriptor, note that this will not prevent reception of
1640	 * big frames, they will be split into multiple buffers
1641	 * anyway
1642	 */
1643	priv->hw_mtu = actual_mtu;
1644
1645	/*
1646	 * align rx buffer size to dma burst len, account FCS since
1647	 * it's appended
1648	 */
1649	priv->rx_buf_size = ALIGN(actual_mtu + ETH_FCS_LEN,
1650				  priv->dma_maxburst * 4);
1651
1652	priv->rx_frag_size = SKB_DATA_ALIGN(priv->rx_buf_offset + priv->rx_buf_size) +
1653					    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1654
1655	dev->mtu = new_mtu;
1656	return 0;
1657}
1658
1659/*
1660 * preinit hardware to allow mii operation while device is down
1661 */
1662static void bcm_enet_hw_preinit(struct bcm_enet_priv *priv)
1663{
1664	u32 val;
1665	int limit;
1666
1667	/* make sure mac is disabled */
1668	bcm_enet_disable_mac(priv);
1669
1670	/* soft reset mac */
1671	val = ENET_CTL_SRESET_MASK;
1672	enet_writel(priv, val, ENET_CTL_REG);
1673	wmb();
1674
1675	limit = 1000;
1676	do {
1677		val = enet_readl(priv, ENET_CTL_REG);
1678		if (!(val & ENET_CTL_SRESET_MASK))
1679			break;
1680		udelay(1);
1681	} while (limit--);
1682
1683	/* select correct mii interface */
1684	val = enet_readl(priv, ENET_CTL_REG);
1685	if (priv->use_external_mii)
1686		val |= ENET_CTL_EPHYSEL_MASK;
1687	else
1688		val &= ~ENET_CTL_EPHYSEL_MASK;
1689	enet_writel(priv, val, ENET_CTL_REG);
1690
1691	/* turn on mdc clock */
1692	enet_writel(priv, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) |
1693		    ENET_MIISC_PREAMBLEEN_MASK, ENET_MIISC_REG);
1694
1695	/* set mib counters to self-clear when read */
1696	val = enet_readl(priv, ENET_MIBCTL_REG);
1697	val |= ENET_MIBCTL_RDCLEAR_MASK;
1698	enet_writel(priv, val, ENET_MIBCTL_REG);
1699}
1700
1701static const struct net_device_ops bcm_enet_ops = {
1702	.ndo_open		= bcm_enet_open,
1703	.ndo_stop		= bcm_enet_stop,
1704	.ndo_start_xmit		= bcm_enet_start_xmit,
1705	.ndo_set_mac_address	= bcm_enet_set_mac_address,
1706	.ndo_set_rx_mode	= bcm_enet_set_multicast_list,
1707	.ndo_eth_ioctl		= bcm_enet_ioctl,
1708	.ndo_change_mtu		= bcm_enet_change_mtu,
1709};
1710
1711/*
1712 * allocate netdevice, request register memory and register device.
1713 */
1714static int bcm_enet_probe(struct platform_device *pdev)
1715{
1716	struct bcm_enet_priv *priv;
1717	struct net_device *dev;
1718	struct bcm63xx_enet_platform_data *pd;
1719	int irq, irq_rx, irq_tx;
1720	struct mii_bus *bus;
1721	int i, ret;
1722
1723	if (!bcm_enet_shared_base[0])
1724		return -EPROBE_DEFER;
1725
1726	irq = platform_get_irq(pdev, 0);
1727	irq_rx = platform_get_irq(pdev, 1);
1728	irq_tx = platform_get_irq(pdev, 2);
1729	if (irq < 0 || irq_rx < 0 || irq_tx < 0)
1730		return -ENODEV;
1731
1732	dev = alloc_etherdev(sizeof(*priv));
1733	if (!dev)
1734		return -ENOMEM;
1735	priv = netdev_priv(dev);
1736
1737	priv->enet_is_sw = false;
1738	priv->dma_maxburst = BCMENET_DMA_MAXBURST;
1739	priv->rx_buf_offset = NET_SKB_PAD;
1740
1741	ret = bcm_enet_change_mtu(dev, dev->mtu);
1742	if (ret)
1743		goto out;
1744
1745	priv->base = devm_platform_ioremap_resource(pdev, 0);
1746	if (IS_ERR(priv->base)) {
1747		ret = PTR_ERR(priv->base);
1748		goto out;
1749	}
1750
1751	dev->irq = priv->irq = irq;
1752	priv->irq_rx = irq_rx;
1753	priv->irq_tx = irq_tx;
1754
1755	priv->mac_clk = devm_clk_get(&pdev->dev, "enet");
1756	if (IS_ERR(priv->mac_clk)) {
1757		ret = PTR_ERR(priv->mac_clk);
1758		goto out;
1759	}
1760	ret = clk_prepare_enable(priv->mac_clk);
1761	if (ret)
1762		goto out;
1763
1764	/* initialize default and fetch platform data */
1765	priv->rx_ring_size = BCMENET_DEF_RX_DESC;
1766	priv->tx_ring_size = BCMENET_DEF_TX_DESC;
1767
1768	pd = dev_get_platdata(&pdev->dev);
1769	if (pd) {
1770		eth_hw_addr_set(dev, pd->mac_addr);
1771		priv->has_phy = pd->has_phy;
1772		priv->phy_id = pd->phy_id;
1773		priv->has_phy_interrupt = pd->has_phy_interrupt;
1774		priv->phy_interrupt = pd->phy_interrupt;
1775		priv->use_external_mii = !pd->use_internal_phy;
1776		priv->pause_auto = pd->pause_auto;
1777		priv->pause_rx = pd->pause_rx;
1778		priv->pause_tx = pd->pause_tx;
1779		priv->force_duplex_full = pd->force_duplex_full;
1780		priv->force_speed_100 = pd->force_speed_100;
1781		priv->dma_chan_en_mask = pd->dma_chan_en_mask;
1782		priv->dma_chan_int_mask = pd->dma_chan_int_mask;
1783		priv->dma_chan_width = pd->dma_chan_width;
1784		priv->dma_has_sram = pd->dma_has_sram;
1785		priv->dma_desc_shift = pd->dma_desc_shift;
1786		priv->rx_chan = pd->rx_chan;
1787		priv->tx_chan = pd->tx_chan;
1788	}
1789
1790	if (priv->has_phy && !priv->use_external_mii) {
1791		/* using internal PHY, enable clock */
1792		priv->phy_clk = devm_clk_get(&pdev->dev, "ephy");
1793		if (IS_ERR(priv->phy_clk)) {
1794			ret = PTR_ERR(priv->phy_clk);
1795			priv->phy_clk = NULL;
1796			goto out_disable_clk_mac;
1797		}
1798		ret = clk_prepare_enable(priv->phy_clk);
1799		if (ret)
1800			goto out_disable_clk_mac;
1801	}
1802
1803	/* do minimal hardware init to be able to probe mii bus */
1804	bcm_enet_hw_preinit(priv);
1805
1806	/* MII bus registration */
1807	if (priv->has_phy) {
1808
1809		priv->mii_bus = mdiobus_alloc();
1810		if (!priv->mii_bus) {
1811			ret = -ENOMEM;
1812			goto out_uninit_hw;
1813		}
1814
1815		bus = priv->mii_bus;
1816		bus->name = "bcm63xx_enet MII bus";
1817		bus->parent = &pdev->dev;
1818		bus->priv = priv;
1819		bus->read = bcm_enet_mdio_read_phylib;
1820		bus->write = bcm_enet_mdio_write_phylib;
1821		sprintf(bus->id, "%s-%d", pdev->name, pdev->id);
1822
1823		/* only probe bus where we think the PHY is, because
1824		 * the mdio read operation return 0 instead of 0xffff
1825		 * if a slave is not present on hw */
1826		bus->phy_mask = ~(1 << priv->phy_id);
1827
1828		if (priv->has_phy_interrupt)
1829			bus->irq[priv->phy_id] = priv->phy_interrupt;
1830
1831		ret = mdiobus_register(bus);
1832		if (ret) {
1833			dev_err(&pdev->dev, "unable to register mdio bus\n");
1834			goto out_free_mdio;
1835		}
1836	} else {
1837
1838		/* run platform code to initialize PHY device */
1839		if (pd && pd->mii_config &&
1840		    pd->mii_config(dev, 1, bcm_enet_mdio_read_mii,
1841				   bcm_enet_mdio_write_mii)) {
1842			dev_err(&pdev->dev, "unable to configure mdio bus\n");
1843			goto out_uninit_hw;
1844		}
1845	}
1846
1847	spin_lock_init(&priv->rx_lock);
1848
1849	/* init rx timeout (used for oom) */
1850	timer_setup(&priv->rx_timeout, bcm_enet_refill_rx_timer, 0);
1851
1852	/* init the mib update lock&work */
1853	mutex_init(&priv->mib_update_lock);
1854	INIT_WORK(&priv->mib_update_task, bcm_enet_update_mib_counters_defer);
1855
1856	/* zero mib counters */
1857	for (i = 0; i < ENET_MIB_REG_COUNT; i++)
1858		enet_writel(priv, 0, ENET_MIB_REG(i));
1859
1860	/* register netdevice */
1861	dev->netdev_ops = &bcm_enet_ops;
1862	netif_napi_add_weight(dev, &priv->napi, bcm_enet_poll, 16);
1863
1864	dev->ethtool_ops = &bcm_enet_ethtool_ops;
1865	/* MTU range: 46 - 2028 */
1866	dev->min_mtu = ETH_ZLEN - ETH_HLEN;
1867	dev->max_mtu = BCMENET_MAX_MTU - VLAN_ETH_HLEN;
1868	SET_NETDEV_DEV(dev, &pdev->dev);
1869
1870	ret = register_netdev(dev);
1871	if (ret)
1872		goto out_unregister_mdio;
1873
1874	netif_carrier_off(dev);
1875	platform_set_drvdata(pdev, dev);
1876	priv->pdev = pdev;
1877	priv->net_dev = dev;
1878
1879	return 0;
1880
1881out_unregister_mdio:
1882	if (priv->mii_bus)
1883		mdiobus_unregister(priv->mii_bus);
1884
1885out_free_mdio:
1886	if (priv->mii_bus)
1887		mdiobus_free(priv->mii_bus);
1888
1889out_uninit_hw:
1890	/* turn off mdc clock */
1891	enet_writel(priv, 0, ENET_MIISC_REG);
1892	clk_disable_unprepare(priv->phy_clk);
1893
1894out_disable_clk_mac:
1895	clk_disable_unprepare(priv->mac_clk);
1896out:
1897	free_netdev(dev);
1898	return ret;
1899}
1900
1901
1902/*
1903 * exit func, stops hardware and unregisters netdevice
1904 */
1905static int bcm_enet_remove(struct platform_device *pdev)
1906{
1907	struct bcm_enet_priv *priv;
1908	struct net_device *dev;
1909
1910	/* stop netdevice */
1911	dev = platform_get_drvdata(pdev);
1912	priv = netdev_priv(dev);
1913	unregister_netdev(dev);
1914
1915	/* turn off mdc clock */
1916	enet_writel(priv, 0, ENET_MIISC_REG);
1917
1918	if (priv->has_phy) {
1919		mdiobus_unregister(priv->mii_bus);
1920		mdiobus_free(priv->mii_bus);
1921	} else {
1922		struct bcm63xx_enet_platform_data *pd;
1923
1924		pd = dev_get_platdata(&pdev->dev);
1925		if (pd && pd->mii_config)
1926			pd->mii_config(dev, 0, bcm_enet_mdio_read_mii,
1927				       bcm_enet_mdio_write_mii);
1928	}
1929
1930	/* disable hw block clocks */
1931	clk_disable_unprepare(priv->phy_clk);
1932	clk_disable_unprepare(priv->mac_clk);
1933
1934	free_netdev(dev);
1935	return 0;
1936}
1937
1938static struct platform_driver bcm63xx_enet_driver = {
1939	.probe	= bcm_enet_probe,
1940	.remove	= bcm_enet_remove,
1941	.driver	= {
1942		.name	= "bcm63xx_enet",
1943	},
1944};
1945
1946/*
1947 * switch mii access callbacks
1948 */
1949static int bcmenet_sw_mdio_read(struct bcm_enet_priv *priv,
1950				int ext, int phy_id, int location)
1951{
1952	u32 reg;
1953	int ret;
1954
1955	spin_lock_bh(&priv->enetsw_mdio_lock);
1956	enetsw_writel(priv, 0, ENETSW_MDIOC_REG);
1957
1958	reg = ENETSW_MDIOC_RD_MASK |
1959		(phy_id << ENETSW_MDIOC_PHYID_SHIFT) |
1960		(location << ENETSW_MDIOC_REG_SHIFT);
1961
1962	if (ext)
1963		reg |= ENETSW_MDIOC_EXT_MASK;
1964
1965	enetsw_writel(priv, reg, ENETSW_MDIOC_REG);
1966	udelay(50);
1967	ret = enetsw_readw(priv, ENETSW_MDIOD_REG);
1968	spin_unlock_bh(&priv->enetsw_mdio_lock);
1969	return ret;
1970}
1971
1972static void bcmenet_sw_mdio_write(struct bcm_enet_priv *priv,
1973				 int ext, int phy_id, int location,
1974				 uint16_t data)
1975{
1976	u32 reg;
1977
1978	spin_lock_bh(&priv->enetsw_mdio_lock);
1979	enetsw_writel(priv, 0, ENETSW_MDIOC_REG);
1980
1981	reg = ENETSW_MDIOC_WR_MASK |
1982		(phy_id << ENETSW_MDIOC_PHYID_SHIFT) |
1983		(location << ENETSW_MDIOC_REG_SHIFT);
1984
1985	if (ext)
1986		reg |= ENETSW_MDIOC_EXT_MASK;
1987
1988	reg |= data;
1989
1990	enetsw_writel(priv, reg, ENETSW_MDIOC_REG);
1991	udelay(50);
1992	spin_unlock_bh(&priv->enetsw_mdio_lock);
1993}
1994
1995static inline int bcm_enet_port_is_rgmii(int portid)
1996{
1997	return portid >= ENETSW_RGMII_PORT0;
1998}
1999
2000/*
2001 * enet sw PHY polling
2002 */
2003static void swphy_poll_timer(struct timer_list *t)
2004{
2005	struct bcm_enet_priv *priv = from_timer(priv, t, swphy_poll);
2006	unsigned int i;
2007
2008	for (i = 0; i < priv->num_ports; i++) {
2009		struct bcm63xx_enetsw_port *port;
2010		int val, j, up, advertise, lpa, speed, duplex, media;
2011		int external_phy = bcm_enet_port_is_rgmii(i);
2012		u8 override;
2013
2014		port = &priv->used_ports[i];
2015		if (!port->used)
2016			continue;
2017
2018		if (port->bypass_link)
2019			continue;
2020
2021		/* dummy read to clear */
2022		for (j = 0; j < 2; j++)
2023			val = bcmenet_sw_mdio_read(priv, external_phy,
2024						   port->phy_id, MII_BMSR);
2025
2026		if (val == 0xffff)
2027			continue;
2028
2029		up = (val & BMSR_LSTATUS) ? 1 : 0;
2030		if (!(up ^ priv->sw_port_link[i]))
2031			continue;
2032
2033		priv->sw_port_link[i] = up;
2034
2035		/* link changed */
2036		if (!up) {
2037			dev_info(&priv->pdev->dev, "link DOWN on %s\n",
2038				 port->name);
2039			enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK,
2040				      ENETSW_PORTOV_REG(i));
2041			enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK |
2042				      ENETSW_PTCTRL_TXDIS_MASK,
2043				      ENETSW_PTCTRL_REG(i));
2044			continue;
2045		}
2046
2047		advertise = bcmenet_sw_mdio_read(priv, external_phy,
2048						 port->phy_id, MII_ADVERTISE);
2049
2050		lpa = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id,
2051					   MII_LPA);
2052
2053		/* figure out media and duplex from advertise and LPA values */
2054		media = mii_nway_result(lpa & advertise);
2055		duplex = (media & ADVERTISE_FULL) ? 1 : 0;
2056
2057		if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF))
2058			speed = 100;
2059		else
2060			speed = 10;
2061
2062		if (val & BMSR_ESTATEN) {
2063			advertise = bcmenet_sw_mdio_read(priv, external_phy,
2064						port->phy_id, MII_CTRL1000);
2065
2066			lpa = bcmenet_sw_mdio_read(priv, external_phy,
2067						port->phy_id, MII_STAT1000);
2068
2069			if (advertise & (ADVERTISE_1000FULL | ADVERTISE_1000HALF)
2070					&& lpa & (LPA_1000FULL | LPA_1000HALF)) {
2071				speed = 1000;
2072				duplex = (lpa & LPA_1000FULL);
2073			}
2074		}
2075
2076		dev_info(&priv->pdev->dev,
2077			 "link UP on %s, %dMbps, %s-duplex\n",
2078			 port->name, speed, duplex ? "full" : "half");
2079
2080		override = ENETSW_PORTOV_ENABLE_MASK |
2081			ENETSW_PORTOV_LINKUP_MASK;
2082
2083		if (speed == 1000)
2084			override |= ENETSW_IMPOV_1000_MASK;
2085		else if (speed == 100)
2086			override |= ENETSW_IMPOV_100_MASK;
2087		if (duplex)
2088			override |= ENETSW_IMPOV_FDX_MASK;
2089
2090		enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i));
2091		enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i));
2092	}
2093
2094	priv->swphy_poll.expires = jiffies + HZ;
2095	add_timer(&priv->swphy_poll);
2096}
2097
2098/*
2099 * open callback, allocate dma rings & buffers and start rx operation
2100 */
2101static int bcm_enetsw_open(struct net_device *dev)
2102{
2103	struct bcm_enet_priv *priv;
2104	struct device *kdev;
2105	int i, ret;
2106	unsigned int size;
2107	void *p;
2108	u32 val;
2109
2110	priv = netdev_priv(dev);
2111	kdev = &priv->pdev->dev;
2112
2113	/* mask all interrupts and request them */
2114	enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
2115	enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
2116
2117	ret = request_irq(priv->irq_rx, bcm_enet_isr_dma,
2118			  0, dev->name, dev);
2119	if (ret)
2120		goto out_freeirq;
2121
2122	if (priv->irq_tx != -1) {
2123		ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
2124				  0, dev->name, dev);
2125		if (ret)
2126			goto out_freeirq_rx;
2127	}
2128
2129	/* allocate rx dma ring */
2130	size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
2131	p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
2132	if (!p) {
2133		dev_err(kdev, "cannot allocate rx ring %u\n", size);
2134		ret = -ENOMEM;
2135		goto out_freeirq_tx;
2136	}
2137
2138	priv->rx_desc_alloc_size = size;
2139	priv->rx_desc_cpu = p;
2140
2141	/* allocate tx dma ring */
2142	size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
2143	p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
2144	if (!p) {
2145		dev_err(kdev, "cannot allocate tx ring\n");
2146		ret = -ENOMEM;
2147		goto out_free_rx_ring;
2148	}
2149
2150	priv->tx_desc_alloc_size = size;
2151	priv->tx_desc_cpu = p;
2152
2153	priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *),
2154			       GFP_KERNEL);
2155	if (!priv->tx_skb) {
2156		dev_err(kdev, "cannot allocate tx skb queue\n");
2157		ret = -ENOMEM;
2158		goto out_free_tx_ring;
2159	}
2160
2161	priv->tx_desc_count = priv->tx_ring_size;
2162	priv->tx_dirty_desc = 0;
2163	priv->tx_curr_desc = 0;
2164	spin_lock_init(&priv->tx_lock);
2165
2166	/* init & fill rx ring with buffers */
2167	priv->rx_buf = kcalloc(priv->rx_ring_size, sizeof(void *),
2168			       GFP_KERNEL);
2169	if (!priv->rx_buf) {
2170		dev_err(kdev, "cannot allocate rx buffer queue\n");
2171		ret = -ENOMEM;
2172		goto out_free_tx_skb;
2173	}
2174
2175	priv->rx_desc_count = 0;
2176	priv->rx_dirty_desc = 0;
2177	priv->rx_curr_desc = 0;
2178
2179	/* disable all ports */
2180	for (i = 0; i < priv->num_ports; i++) {
2181		enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK,
2182			      ENETSW_PORTOV_REG(i));
2183		enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK |
2184			      ENETSW_PTCTRL_TXDIS_MASK,
2185			      ENETSW_PTCTRL_REG(i));
2186
2187		priv->sw_port_link[i] = 0;
2188	}
2189
2190	/* reset mib */
2191	val = enetsw_readb(priv, ENETSW_GMCR_REG);
2192	val |= ENETSW_GMCR_RST_MIB_MASK;
2193	enetsw_writeb(priv, val, ENETSW_GMCR_REG);
2194	mdelay(1);
2195	val &= ~ENETSW_GMCR_RST_MIB_MASK;
2196	enetsw_writeb(priv, val, ENETSW_GMCR_REG);
2197	mdelay(1);
2198
2199	/* force CPU port state */
2200	val = enetsw_readb(priv, ENETSW_IMPOV_REG);
2201	val |= ENETSW_IMPOV_FORCE_MASK | ENETSW_IMPOV_LINKUP_MASK;
2202	enetsw_writeb(priv, val, ENETSW_IMPOV_REG);
2203
2204	/* enable switch forward engine */
2205	val = enetsw_readb(priv, ENETSW_SWMODE_REG);
2206	val |= ENETSW_SWMODE_FWD_EN_MASK;
2207	enetsw_writeb(priv, val, ENETSW_SWMODE_REG);
2208
2209	/* enable jumbo on all ports */
2210	enetsw_writel(priv, 0x1ff, ENETSW_JMBCTL_PORT_REG);
2211	enetsw_writew(priv, 9728, ENETSW_JMBCTL_MAXSIZE_REG);
2212
2213	/* initialize flow control buffer allocation */
2214	enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
2215			ENETDMA_BUFALLOC_REG(priv->rx_chan));
2216
2217	if (bcm_enet_refill_rx(dev, false)) {
2218		dev_err(kdev, "cannot allocate rx buffer queue\n");
2219		ret = -ENOMEM;
2220		goto out;
2221	}
2222
2223	/* write rx & tx ring addresses */
2224	enet_dmas_writel(priv, priv->rx_desc_dma,
2225			 ENETDMAS_RSTART_REG, priv->rx_chan);
2226	enet_dmas_writel(priv, priv->tx_desc_dma,
2227			 ENETDMAS_RSTART_REG, priv->tx_chan);
2228
2229	/* clear remaining state ram for rx & tx channel */
2230	enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan);
2231	enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan);
2232	enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan);
2233	enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan);
2234	enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan);
2235	enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan);
2236
2237	/* set dma maximum burst len */
2238	enet_dmac_writel(priv, priv->dma_maxburst,
2239			 ENETDMAC_MAXBURST, priv->rx_chan);
2240	enet_dmac_writel(priv, priv->dma_maxburst,
2241			 ENETDMAC_MAXBURST, priv->tx_chan);
2242
2243	/* set flow control low/high threshold to 1/3 / 2/3 */
2244	val = priv->rx_ring_size / 3;
2245	enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
2246	val = (priv->rx_ring_size * 2) / 3;
2247	enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
2248
2249	/* all set, enable mac and interrupts, start dma engine and
2250	 * kick rx dma channel
2251	 */
2252	wmb();
2253	enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
2254	enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK,
2255			 ENETDMAC_CHANCFG, priv->rx_chan);
2256
2257	/* watch "packet transferred" interrupt in rx and tx */
2258	enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2259			 ENETDMAC_IR, priv->rx_chan);
2260	enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2261			 ENETDMAC_IR, priv->tx_chan);
2262
2263	/* make sure we enable napi before rx interrupt  */
2264	napi_enable(&priv->napi);
2265
2266	enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2267			 ENETDMAC_IRMASK, priv->rx_chan);
2268	enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2269			 ENETDMAC_IRMASK, priv->tx_chan);
2270
2271	netif_carrier_on(dev);
2272	netif_start_queue(dev);
2273
2274	/* apply override config for bypass_link ports here. */
2275	for (i = 0; i < priv->num_ports; i++) {
2276		struct bcm63xx_enetsw_port *port;
2277		u8 override;
2278		port = &priv->used_ports[i];
2279		if (!port->used)
2280			continue;
2281
2282		if (!port->bypass_link)
2283			continue;
2284
2285		override = ENETSW_PORTOV_ENABLE_MASK |
2286			ENETSW_PORTOV_LINKUP_MASK;
2287
2288		switch (port->force_speed) {
2289		case 1000:
2290			override |= ENETSW_IMPOV_1000_MASK;
2291			break;
2292		case 100:
2293			override |= ENETSW_IMPOV_100_MASK;
2294			break;
2295		case 10:
2296			break;
2297		default:
2298			pr_warn("invalid forced speed on port %s: assume 10\n",
2299			       port->name);
2300			break;
2301		}
2302
2303		if (port->force_duplex_full)
2304			override |= ENETSW_IMPOV_FDX_MASK;
2305
2306
2307		enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i));
2308		enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i));
2309	}
2310
2311	/* start phy polling timer */
2312	timer_setup(&priv->swphy_poll, swphy_poll_timer, 0);
2313	mod_timer(&priv->swphy_poll, jiffies);
2314	return 0;
2315
2316out:
2317	bcm_enet_free_rx_buf_ring(kdev, priv);
2318
2319out_free_tx_skb:
2320	kfree(priv->tx_skb);
2321
2322out_free_tx_ring:
2323	dma_free_coherent(kdev, priv->tx_desc_alloc_size,
2324			  priv->tx_desc_cpu, priv->tx_desc_dma);
2325
2326out_free_rx_ring:
2327	dma_free_coherent(kdev, priv->rx_desc_alloc_size,
2328			  priv->rx_desc_cpu, priv->rx_desc_dma);
2329
2330out_freeirq_tx:
2331	if (priv->irq_tx != -1)
2332		free_irq(priv->irq_tx, dev);
2333
2334out_freeirq_rx:
2335	free_irq(priv->irq_rx, dev);
2336
2337out_freeirq:
2338	return ret;
2339}
2340
2341/* stop callback */
2342static int bcm_enetsw_stop(struct net_device *dev)
2343{
2344	struct bcm_enet_priv *priv;
2345	struct device *kdev;
2346
2347	priv = netdev_priv(dev);
2348	kdev = &priv->pdev->dev;
2349
2350	del_timer_sync(&priv->swphy_poll);
2351	netif_stop_queue(dev);
2352	napi_disable(&priv->napi);
2353	del_timer_sync(&priv->rx_timeout);
2354
2355	/* mask all interrupts */
2356	enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
2357	enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
2358
2359	/* disable dma & mac */
2360	bcm_enet_disable_dma(priv, priv->tx_chan);
2361	bcm_enet_disable_dma(priv, priv->rx_chan);
2362
2363	/* force reclaim of all tx buffers */
2364	bcm_enet_tx_reclaim(dev, 1, 0);
2365
2366	/* free the rx buffer ring */
2367	bcm_enet_free_rx_buf_ring(kdev, priv);
2368
2369	/* free remaining allocated memory */
2370	kfree(priv->tx_skb);
2371	dma_free_coherent(kdev, priv->rx_desc_alloc_size,
2372			  priv->rx_desc_cpu, priv->rx_desc_dma);
2373	dma_free_coherent(kdev, priv->tx_desc_alloc_size,
2374			  priv->tx_desc_cpu, priv->tx_desc_dma);
2375	if (priv->irq_tx != -1)
2376		free_irq(priv->irq_tx, dev);
2377	free_irq(priv->irq_rx, dev);
2378
2379	/* reset BQL after forced tx reclaim to prevent kernel panic */
2380	netdev_reset_queue(dev);
2381
2382	return 0;
2383}
2384
2385/* try to sort out phy external status by walking the used_port field
2386 * in the bcm_enet_priv structure. in case the phy address is not
2387 * assigned to any physical port on the switch, assume it is external
2388 * (and yell at the user).
2389 */
2390static int bcm_enetsw_phy_is_external(struct bcm_enet_priv *priv, int phy_id)
2391{
2392	int i;
2393
2394	for (i = 0; i < priv->num_ports; ++i) {
2395		if (!priv->used_ports[i].used)
2396			continue;
2397		if (priv->used_ports[i].phy_id == phy_id)
2398			return bcm_enet_port_is_rgmii(i);
2399	}
2400
2401	printk_once(KERN_WARNING  "bcm63xx_enet: could not find a used port with phy_id %i, assuming phy is external\n",
2402		    phy_id);
2403	return 1;
2404}
2405
2406/* can't use bcmenet_sw_mdio_read directly as we need to sort out
2407 * external/internal status of the given phy_id first.
2408 */
2409static int bcm_enetsw_mii_mdio_read(struct net_device *dev, int phy_id,
2410				    int location)
2411{
2412	struct bcm_enet_priv *priv;
2413
2414	priv = netdev_priv(dev);
2415	return bcmenet_sw_mdio_read(priv,
2416				    bcm_enetsw_phy_is_external(priv, phy_id),
2417				    phy_id, location);
2418}
2419
2420/* can't use bcmenet_sw_mdio_write directly as we need to sort out
2421 * external/internal status of the given phy_id first.
2422 */
2423static void bcm_enetsw_mii_mdio_write(struct net_device *dev, int phy_id,
2424				      int location,
2425				      int val)
2426{
2427	struct bcm_enet_priv *priv;
2428
2429	priv = netdev_priv(dev);
2430	bcmenet_sw_mdio_write(priv, bcm_enetsw_phy_is_external(priv, phy_id),
2431			      phy_id, location, val);
2432}
2433
2434static int bcm_enetsw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2435{
2436	struct mii_if_info mii;
2437
2438	mii.dev = dev;
2439	mii.mdio_read = bcm_enetsw_mii_mdio_read;
2440	mii.mdio_write = bcm_enetsw_mii_mdio_write;
2441	mii.phy_id = 0;
2442	mii.phy_id_mask = 0x3f;
2443	mii.reg_num_mask = 0x1f;
2444	return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
2445
2446}
2447
2448static const struct net_device_ops bcm_enetsw_ops = {
2449	.ndo_open		= bcm_enetsw_open,
2450	.ndo_stop		= bcm_enetsw_stop,
2451	.ndo_start_xmit		= bcm_enet_start_xmit,
2452	.ndo_change_mtu		= bcm_enet_change_mtu,
2453	.ndo_eth_ioctl		= bcm_enetsw_ioctl,
2454};
2455
2456
2457static const struct bcm_enet_stats bcm_enetsw_gstrings_stats[] = {
2458	{ "rx_packets", DEV_STAT(rx_packets), -1 },
2459	{ "tx_packets",	DEV_STAT(tx_packets), -1 },
2460	{ "rx_bytes", DEV_STAT(rx_bytes), -1 },
2461	{ "tx_bytes", DEV_STAT(tx_bytes), -1 },
2462	{ "rx_errors", DEV_STAT(rx_errors), -1 },
2463	{ "tx_errors", DEV_STAT(tx_errors), -1 },
2464	{ "rx_dropped",	DEV_STAT(rx_dropped), -1 },
2465	{ "tx_dropped",	DEV_STAT(tx_dropped), -1 },
2466
2467	{ "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETHSW_MIB_RX_GD_OCT },
2468	{ "tx_unicast", GEN_STAT(mib.tx_unicast), ETHSW_MIB_RX_BRDCAST },
2469	{ "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETHSW_MIB_RX_BRDCAST },
2470	{ "tx_multicast", GEN_STAT(mib.tx_mult), ETHSW_MIB_RX_MULT },
2471	{ "tx_64_octets", GEN_STAT(mib.tx_64), ETHSW_MIB_RX_64 },
2472	{ "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETHSW_MIB_RX_65_127 },
2473	{ "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETHSW_MIB_RX_128_255 },
2474	{ "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETHSW_MIB_RX_256_511 },
2475	{ "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETHSW_MIB_RX_512_1023},
2476	{ "tx_1024_1522_oct", GEN_STAT(mib.tx_1024_max),
2477	  ETHSW_MIB_RX_1024_1522 },
2478	{ "tx_1523_2047_oct", GEN_STAT(mib.tx_1523_2047),
2479	  ETHSW_MIB_RX_1523_2047 },
2480	{ "tx_2048_4095_oct", GEN_STAT(mib.tx_2048_4095),
2481	  ETHSW_MIB_RX_2048_4095 },
2482	{ "tx_4096_8191_oct", GEN_STAT(mib.tx_4096_8191),
2483	  ETHSW_MIB_RX_4096_8191 },
2484	{ "tx_8192_9728_oct", GEN_STAT(mib.tx_8192_9728),
2485	  ETHSW_MIB_RX_8192_9728 },
2486	{ "tx_oversize", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR },
2487	{ "tx_oversize_drop", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR_DISC },
2488	{ "tx_dropped",	GEN_STAT(mib.tx_drop), ETHSW_MIB_RX_DROP },
2489	{ "tx_undersize", GEN_STAT(mib.tx_underrun), ETHSW_MIB_RX_UND },
2490	{ "tx_pause", GEN_STAT(mib.tx_pause), ETHSW_MIB_RX_PAUSE },
2491
2492	{ "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETHSW_MIB_TX_ALL_OCT },
2493	{ "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETHSW_MIB_TX_BRDCAST },
2494	{ "rx_multicast", GEN_STAT(mib.rx_mult), ETHSW_MIB_TX_MULT },
2495	{ "rx_unicast", GEN_STAT(mib.rx_unicast), ETHSW_MIB_TX_MULT },
2496	{ "rx_pause", GEN_STAT(mib.rx_pause), ETHSW_MIB_TX_PAUSE },
2497	{ "rx_dropped", GEN_STAT(mib.rx_drop), ETHSW_MIB_TX_DROP_PKTS },
2498
2499};
2500
2501#define BCM_ENETSW_STATS_LEN	\
2502	(sizeof(bcm_enetsw_gstrings_stats) / sizeof(struct bcm_enet_stats))
2503
2504static void bcm_enetsw_get_strings(struct net_device *netdev,
2505				   u32 stringset, u8 *data)
2506{
2507	int i;
2508
2509	switch (stringset) {
2510	case ETH_SS_STATS:
2511		for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2512			memcpy(data + i * ETH_GSTRING_LEN,
2513			       bcm_enetsw_gstrings_stats[i].stat_string,
2514			       ETH_GSTRING_LEN);
2515		}
2516		break;
2517	}
2518}
2519
2520static int bcm_enetsw_get_sset_count(struct net_device *netdev,
2521				     int string_set)
2522{
2523	switch (string_set) {
2524	case ETH_SS_STATS:
2525		return BCM_ENETSW_STATS_LEN;
2526	default:
2527		return -EINVAL;
2528	}
2529}
2530
2531static void bcm_enetsw_get_drvinfo(struct net_device *netdev,
2532				   struct ethtool_drvinfo *drvinfo)
2533{
2534	strncpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
2535	strncpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
2536}
2537
2538static void bcm_enetsw_get_ethtool_stats(struct net_device *netdev,
2539					 struct ethtool_stats *stats,
2540					 u64 *data)
2541{
2542	struct bcm_enet_priv *priv;
2543	int i;
2544
2545	priv = netdev_priv(netdev);
2546
2547	for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2548		const struct bcm_enet_stats *s;
2549		u32 lo, hi;
2550		char *p;
2551		int reg;
2552
2553		s = &bcm_enetsw_gstrings_stats[i];
2554
2555		reg = s->mib_reg;
2556		if (reg == -1)
2557			continue;
2558
2559		lo = enetsw_readl(priv, ENETSW_MIB_REG(reg));
2560		p = (char *)priv + s->stat_offset;
2561
2562		if (s->sizeof_stat == sizeof(u64)) {
2563			hi = enetsw_readl(priv, ENETSW_MIB_REG(reg + 1));
2564			*(u64 *)p = ((u64)hi << 32 | lo);
2565		} else {
2566			*(u32 *)p = lo;
2567		}
2568	}
2569
2570	for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2571		const struct bcm_enet_stats *s;
2572		char *p;
2573
2574		s = &bcm_enetsw_gstrings_stats[i];
2575
2576		if (s->mib_reg == -1)
2577			p = (char *)&netdev->stats + s->stat_offset;
2578		else
2579			p = (char *)priv + s->stat_offset;
2580
2581		data[i] = (s->sizeof_stat == sizeof(u64)) ?
2582			*(u64 *)p : *(u32 *)p;
2583	}
2584}
2585
2586static void
2587bcm_enetsw_get_ringparam(struct net_device *dev,
2588			 struct ethtool_ringparam *ering,
2589			 struct kernel_ethtool_ringparam *kernel_ering,
2590			 struct netlink_ext_ack *extack)
2591{
2592	struct bcm_enet_priv *priv;
2593
2594	priv = netdev_priv(dev);
2595
2596	/* rx/tx ring is actually only limited by memory */
2597	ering->rx_max_pending = 8192;
2598	ering->tx_max_pending = 8192;
2599	ering->rx_mini_max_pending = 0;
2600	ering->rx_jumbo_max_pending = 0;
2601	ering->rx_pending = priv->rx_ring_size;
2602	ering->tx_pending = priv->tx_ring_size;
2603}
2604
2605static int
2606bcm_enetsw_set_ringparam(struct net_device *dev,
2607			 struct ethtool_ringparam *ering,
2608			 struct kernel_ethtool_ringparam *kernel_ering,
2609			 struct netlink_ext_ack *extack)
2610{
2611	struct bcm_enet_priv *priv;
2612	int was_running;
2613
2614	priv = netdev_priv(dev);
2615
2616	was_running = 0;
2617	if (netif_running(dev)) {
2618		bcm_enetsw_stop(dev);
2619		was_running = 1;
2620	}
2621
2622	priv->rx_ring_size = ering->rx_pending;
2623	priv->tx_ring_size = ering->tx_pending;
2624
2625	if (was_running) {
2626		int err;
2627
2628		err = bcm_enetsw_open(dev);
2629		if (err)
2630			dev_close(dev);
2631	}
2632	return 0;
2633}
2634
2635static const struct ethtool_ops bcm_enetsw_ethtool_ops = {
2636	.get_strings		= bcm_enetsw_get_strings,
2637	.get_sset_count		= bcm_enetsw_get_sset_count,
2638	.get_ethtool_stats      = bcm_enetsw_get_ethtool_stats,
2639	.get_drvinfo		= bcm_enetsw_get_drvinfo,
2640	.get_ringparam		= bcm_enetsw_get_ringparam,
2641	.set_ringparam		= bcm_enetsw_set_ringparam,
2642};
2643
2644/* allocate netdevice, request register memory and register device. */
2645static int bcm_enetsw_probe(struct platform_device *pdev)
2646{
2647	struct bcm_enet_priv *priv;
2648	struct net_device *dev;
2649	struct bcm63xx_enetsw_platform_data *pd;
2650	struct resource *res_mem;
2651	int ret, irq_rx, irq_tx;
2652
2653	if (!bcm_enet_shared_base[0])
2654		return -EPROBE_DEFER;
2655
2656	res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2657	irq_rx = platform_get_irq(pdev, 0);
2658	irq_tx = platform_get_irq(pdev, 1);
2659	if (!res_mem || irq_rx < 0)
2660		return -ENODEV;
2661
2662	dev = alloc_etherdev(sizeof(*priv));
2663	if (!dev)
2664		return -ENOMEM;
2665	priv = netdev_priv(dev);
2666
2667	/* initialize default and fetch platform data */
2668	priv->enet_is_sw = true;
2669	priv->irq_rx = irq_rx;
2670	priv->irq_tx = irq_tx;
2671	priv->rx_ring_size = BCMENET_DEF_RX_DESC;
2672	priv->tx_ring_size = BCMENET_DEF_TX_DESC;
2673	priv->dma_maxburst = BCMENETSW_DMA_MAXBURST;
2674	priv->rx_buf_offset = NET_SKB_PAD + NET_IP_ALIGN;
2675
2676	pd = dev_get_platdata(&pdev->dev);
2677	if (pd) {
2678		eth_hw_addr_set(dev, pd->mac_addr);
2679		memcpy(priv->used_ports, pd->used_ports,
2680		       sizeof(pd->used_ports));
2681		priv->num_ports = pd->num_ports;
2682		priv->dma_has_sram = pd->dma_has_sram;
2683		priv->dma_chan_en_mask = pd->dma_chan_en_mask;
2684		priv->dma_chan_int_mask = pd->dma_chan_int_mask;
2685		priv->dma_chan_width = pd->dma_chan_width;
2686	}
2687
2688	ret = bcm_enet_change_mtu(dev, dev->mtu);
2689	if (ret)
2690		goto out;
2691
2692	priv->base = devm_ioremap_resource(&pdev->dev, res_mem);
2693	if (IS_ERR(priv->base)) {
2694		ret = PTR_ERR(priv->base);
2695		goto out;
2696	}
2697
2698	priv->mac_clk = devm_clk_get(&pdev->dev, "enetsw");
2699	if (IS_ERR(priv->mac_clk)) {
2700		ret = PTR_ERR(priv->mac_clk);
2701		goto out;
2702	}
2703	ret = clk_prepare_enable(priv->mac_clk);
2704	if (ret)
2705		goto out;
2706
2707	priv->rx_chan = 0;
2708	priv->tx_chan = 1;
2709	spin_lock_init(&priv->rx_lock);
2710
2711	/* init rx timeout (used for oom) */
2712	timer_setup(&priv->rx_timeout, bcm_enet_refill_rx_timer, 0);
2713
2714	/* register netdevice */
2715	dev->netdev_ops = &bcm_enetsw_ops;
2716	netif_napi_add_weight(dev, &priv->napi, bcm_enet_poll, 16);
2717	dev->ethtool_ops = &bcm_enetsw_ethtool_ops;
2718	SET_NETDEV_DEV(dev, &pdev->dev);
2719
2720	spin_lock_init(&priv->enetsw_mdio_lock);
2721
2722	ret = register_netdev(dev);
2723	if (ret)
2724		goto out_disable_clk;
2725
2726	netif_carrier_off(dev);
2727	platform_set_drvdata(pdev, dev);
2728	priv->pdev = pdev;
2729	priv->net_dev = dev;
2730
2731	return 0;
2732
2733out_disable_clk:
2734	clk_disable_unprepare(priv->mac_clk);
2735out:
2736	free_netdev(dev);
2737	return ret;
2738}
2739
2740
2741/* exit func, stops hardware and unregisters netdevice */
2742static int bcm_enetsw_remove(struct platform_device *pdev)
2743{
2744	struct bcm_enet_priv *priv;
2745	struct net_device *dev;
2746
2747	/* stop netdevice */
2748	dev = platform_get_drvdata(pdev);
2749	priv = netdev_priv(dev);
2750	unregister_netdev(dev);
2751
2752	clk_disable_unprepare(priv->mac_clk);
2753
2754	free_netdev(dev);
2755	return 0;
2756}
2757
2758static struct platform_driver bcm63xx_enetsw_driver = {
2759	.probe	= bcm_enetsw_probe,
2760	.remove	= bcm_enetsw_remove,
2761	.driver	= {
2762		.name	= "bcm63xx_enetsw",
2763	},
2764};
2765
2766/* reserve & remap memory space shared between all macs */
2767static int bcm_enet_shared_probe(struct platform_device *pdev)
2768{
2769	void __iomem *p[3];
2770	unsigned int i;
2771
2772	memset(bcm_enet_shared_base, 0, sizeof(bcm_enet_shared_base));
2773
2774	for (i = 0; i < 3; i++) {
2775		p[i] = devm_platform_ioremap_resource(pdev, i);
2776		if (IS_ERR(p[i]))
2777			return PTR_ERR(p[i]);
2778	}
2779
2780	memcpy(bcm_enet_shared_base, p, sizeof(bcm_enet_shared_base));
2781
2782	return 0;
2783}
2784
2785/* this "shared" driver is needed because both macs share a single
2786 * address space
2787 */
2788struct platform_driver bcm63xx_enet_shared_driver = {
2789	.probe	= bcm_enet_shared_probe,
2790	.driver	= {
2791		.name	= "bcm63xx_enet_shared",
2792	},
2793};
2794
2795static struct platform_driver * const drivers[] = {
2796	&bcm63xx_enet_shared_driver,
2797	&bcm63xx_enet_driver,
2798	&bcm63xx_enetsw_driver,
2799};
2800
2801/* entry point */
2802static int __init bcm_enet_init(void)
2803{
2804	return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
2805}
2806
2807static void __exit bcm_enet_exit(void)
2808{
2809	platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
2810}
2811
2812
2813module_init(bcm_enet_init);
2814module_exit(bcm_enet_exit);
2815
2816MODULE_DESCRIPTION("BCM63xx internal ethernet mac driver");
2817MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
2818MODULE_LICENSE("GPL");
2819