1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * ks8842.c timberdale KS8842 ethernet driver
4 * Copyright (c) 2009 Intel Corporation
5 */
6
7/* Supports:
8 * The Micrel KS8842 behind the timberdale FPGA
9 * The genuine Micrel KS8841/42 device with ISA 16/32bit bus interface
10 */
11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14#include <linux/interrupt.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/platform_device.h>
18#include <linux/netdevice.h>
19#include <linux/etherdevice.h>
20#include <linux/ethtool.h>
21#include <linux/ks8842.h>
22#include <linux/dmaengine.h>
23#include <linux/dma-mapping.h>
24#include <linux/scatterlist.h>
25
26#define DRV_NAME "ks8842"
27
28/* Timberdale specific Registers */
29#define REG_TIMB_RST		0x1c
30#define REG_TIMB_FIFO		0x20
31#define REG_TIMB_ISR		0x24
32#define REG_TIMB_IER		0x28
33#define REG_TIMB_IAR		0x2C
34#define REQ_TIMB_DMA_RESUME	0x30
35
36/* KS8842 registers */
37
38#define REG_SELECT_BANK 0x0e
39
40/* bank 0 registers */
41#define REG_QRFCR	0x04
42
43/* bank 2 registers */
44#define REG_MARL	0x00
45#define REG_MARM	0x02
46#define REG_MARH	0x04
47
48/* bank 3 registers */
49#define REG_GRR		0x06
50
51/* bank 16 registers */
52#define REG_TXCR	0x00
53#define REG_TXSR	0x02
54#define REG_RXCR	0x04
55#define REG_TXMIR	0x08
56#define REG_RXMIR	0x0A
57
58/* bank 17 registers */
59#define REG_TXQCR	0x00
60#define REG_RXQCR	0x02
61#define REG_TXFDPR	0x04
62#define REG_RXFDPR	0x06
63#define REG_QMU_DATA_LO 0x08
64#define REG_QMU_DATA_HI 0x0A
65
66/* bank 18 registers */
67#define REG_IER		0x00
68#define IRQ_LINK_CHANGE	0x8000
69#define IRQ_TX		0x4000
70#define IRQ_RX		0x2000
71#define IRQ_RX_OVERRUN	0x0800
72#define IRQ_TX_STOPPED	0x0200
73#define IRQ_RX_STOPPED	0x0100
74#define IRQ_RX_ERROR	0x0080
75#define ENABLED_IRQS	(IRQ_LINK_CHANGE | IRQ_TX | IRQ_RX | IRQ_RX_STOPPED | \
76		IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR)
77/* When running via timberdale in DMA mode, the RX interrupt should be
78   enabled in the KS8842, but not in the FPGA IP, since the IP handles
79   RX DMA internally.
80   TX interrupts are not needed it is handled by the FPGA the driver is
81   notified via DMA callbacks.
82*/
83#define ENABLED_IRQS_DMA_IP	(IRQ_LINK_CHANGE | IRQ_RX_STOPPED | \
84	IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR)
85#define ENABLED_IRQS_DMA	(ENABLED_IRQS_DMA_IP | IRQ_RX)
86#define REG_ISR		0x02
87#define REG_RXSR	0x04
88#define RXSR_VALID	0x8000
89#define RXSR_BROADCAST	0x80
90#define RXSR_MULTICAST	0x40
91#define RXSR_UNICAST	0x20
92#define RXSR_FRAMETYPE	0x08
93#define RXSR_TOO_LONG	0x04
94#define RXSR_RUNT	0x02
95#define RXSR_CRC_ERROR	0x01
96#define RXSR_ERROR	(RXSR_TOO_LONG | RXSR_RUNT | RXSR_CRC_ERROR)
97
98/* bank 32 registers */
99#define REG_SW_ID_AND_ENABLE	0x00
100#define REG_SGCR1		0x02
101#define REG_SGCR2		0x04
102#define REG_SGCR3		0x06
103
104/* bank 39 registers */
105#define REG_MACAR1		0x00
106#define REG_MACAR2		0x02
107#define REG_MACAR3		0x04
108
109/* bank 45 registers */
110#define REG_P1MBCR		0x00
111#define REG_P1MBSR		0x02
112
113/* bank 46 registers */
114#define REG_P2MBCR		0x00
115#define REG_P2MBSR		0x02
116
117/* bank 48 registers */
118#define REG_P1CR2		0x02
119
120/* bank 49 registers */
121#define REG_P1CR4		0x02
122#define REG_P1SR		0x04
123
124/* flags passed by platform_device for configuration */
125#define	MICREL_KS884X		0x01	/* 0=Timeberdale(FPGA), 1=Micrel */
126#define	KS884X_16BIT		0x02	/*  1=16bit, 0=32bit */
127
128#define DMA_BUFFER_SIZE		2048
129
130struct ks8842_tx_dma_ctl {
131	struct dma_chan *chan;
132	struct dma_async_tx_descriptor *adesc;
133	void *buf;
134	struct scatterlist sg;
135	int channel;
136};
137
138struct ks8842_rx_dma_ctl {
139	struct dma_chan *chan;
140	struct dma_async_tx_descriptor *adesc;
141	struct sk_buff  *skb;
142	struct scatterlist sg;
143	struct tasklet_struct tasklet;
144	int channel;
145};
146
147#define KS8842_USE_DMA(adapter) (((adapter)->dma_tx.channel != -1) && \
148	 ((adapter)->dma_rx.channel != -1))
149
150struct ks8842_adapter {
151	void __iomem	*hw_addr;
152	int		irq;
153	unsigned long	conf_flags;	/* copy of platform_device config */
154	struct tasklet_struct	tasklet;
155	spinlock_t	lock; /* spinlock to be interrupt safe */
156	struct work_struct timeout_work;
157	struct net_device *netdev;
158	struct device *dev;
159	struct ks8842_tx_dma_ctl	dma_tx;
160	struct ks8842_rx_dma_ctl	dma_rx;
161};
162
163static void ks8842_dma_rx_cb(void *data);
164static void ks8842_dma_tx_cb(void *data);
165
166static inline void ks8842_resume_dma(struct ks8842_adapter *adapter)
167{
168	iowrite32(1, adapter->hw_addr + REQ_TIMB_DMA_RESUME);
169}
170
171static inline void ks8842_select_bank(struct ks8842_adapter *adapter, u16 bank)
172{
173	iowrite16(bank, adapter->hw_addr + REG_SELECT_BANK);
174}
175
176static inline void ks8842_write8(struct ks8842_adapter *adapter, u16 bank,
177	u8 value, int offset)
178{
179	ks8842_select_bank(adapter, bank);
180	iowrite8(value, adapter->hw_addr + offset);
181}
182
183static inline void ks8842_write16(struct ks8842_adapter *adapter, u16 bank,
184	u16 value, int offset)
185{
186	ks8842_select_bank(adapter, bank);
187	iowrite16(value, adapter->hw_addr + offset);
188}
189
190static inline void ks8842_enable_bits(struct ks8842_adapter *adapter, u16 bank,
191	u16 bits, int offset)
192{
193	u16 reg;
194	ks8842_select_bank(adapter, bank);
195	reg = ioread16(adapter->hw_addr + offset);
196	reg |= bits;
197	iowrite16(reg, adapter->hw_addr + offset);
198}
199
200static inline void ks8842_clear_bits(struct ks8842_adapter *adapter, u16 bank,
201	u16 bits, int offset)
202{
203	u16 reg;
204	ks8842_select_bank(adapter, bank);
205	reg = ioread16(adapter->hw_addr + offset);
206	reg &= ~bits;
207	iowrite16(reg, adapter->hw_addr + offset);
208}
209
210static inline void ks8842_write32(struct ks8842_adapter *adapter, u16 bank,
211	u32 value, int offset)
212{
213	ks8842_select_bank(adapter, bank);
214	iowrite32(value, adapter->hw_addr + offset);
215}
216
217static inline u8 ks8842_read8(struct ks8842_adapter *adapter, u16 bank,
218	int offset)
219{
220	ks8842_select_bank(adapter, bank);
221	return ioread8(adapter->hw_addr + offset);
222}
223
224static inline u16 ks8842_read16(struct ks8842_adapter *adapter, u16 bank,
225	int offset)
226{
227	ks8842_select_bank(adapter, bank);
228	return ioread16(adapter->hw_addr + offset);
229}
230
231static inline u32 ks8842_read32(struct ks8842_adapter *adapter, u16 bank,
232	int offset)
233{
234	ks8842_select_bank(adapter, bank);
235	return ioread32(adapter->hw_addr + offset);
236}
237
238static void ks8842_reset(struct ks8842_adapter *adapter)
239{
240	if (adapter->conf_flags & MICREL_KS884X) {
241		ks8842_write16(adapter, 3, 1, REG_GRR);
242		msleep(10);
243		iowrite16(0, adapter->hw_addr + REG_GRR);
244	} else {
245		/* The KS8842 goes haywire when doing softare reset
246		* a work around in the timberdale IP is implemented to
247		* do a hardware reset instead
248		ks8842_write16(adapter, 3, 1, REG_GRR);
249		msleep(10);
250		iowrite16(0, adapter->hw_addr + REG_GRR);
251		*/
252		iowrite32(0x1, adapter->hw_addr + REG_TIMB_RST);
253		msleep(20);
254	}
255}
256
257static void ks8842_update_link_status(struct net_device *netdev,
258	struct ks8842_adapter *adapter)
259{
260	/* check the status of the link */
261	if (ks8842_read16(adapter, 45, REG_P1MBSR) & 0x4) {
262		netif_carrier_on(netdev);
263		netif_wake_queue(netdev);
264	} else {
265		netif_stop_queue(netdev);
266		netif_carrier_off(netdev);
267	}
268}
269
270static void ks8842_enable_tx(struct ks8842_adapter *adapter)
271{
272	ks8842_enable_bits(adapter, 16, 0x01, REG_TXCR);
273}
274
275static void ks8842_disable_tx(struct ks8842_adapter *adapter)
276{
277	ks8842_clear_bits(adapter, 16, 0x01, REG_TXCR);
278}
279
280static void ks8842_enable_rx(struct ks8842_adapter *adapter)
281{
282	ks8842_enable_bits(adapter, 16, 0x01, REG_RXCR);
283}
284
285static void ks8842_disable_rx(struct ks8842_adapter *adapter)
286{
287	ks8842_clear_bits(adapter, 16, 0x01, REG_RXCR);
288}
289
290static void ks8842_reset_hw(struct ks8842_adapter *adapter)
291{
292	/* reset the HW */
293	ks8842_reset(adapter);
294
295	/* Enable QMU Transmit flow control / transmit padding / Transmit CRC */
296	ks8842_write16(adapter, 16, 0x000E, REG_TXCR);
297
298	/* enable the receiver, uni + multi + broadcast + flow ctrl
299		+ crc strip */
300	ks8842_write16(adapter, 16, 0x8 | 0x20 | 0x40 | 0x80 | 0x400,
301		REG_RXCR);
302
303	/* TX frame pointer autoincrement */
304	ks8842_write16(adapter, 17, 0x4000, REG_TXFDPR);
305
306	/* RX frame pointer autoincrement */
307	ks8842_write16(adapter, 17, 0x4000, REG_RXFDPR);
308
309	/* RX 2 kb high watermark */
310	ks8842_write16(adapter, 0, 0x1000, REG_QRFCR);
311
312	/* aggressive back off in half duplex */
313	ks8842_enable_bits(adapter, 32, 1 << 8, REG_SGCR1);
314
315	/* enable no excessive collison drop */
316	ks8842_enable_bits(adapter, 32, 1 << 3, REG_SGCR2);
317
318	/* Enable port 1 force flow control / back pressure / transmit / recv */
319	ks8842_write16(adapter, 48, 0x1E07, REG_P1CR2);
320
321	/* restart port auto-negotiation */
322	ks8842_enable_bits(adapter, 49, 1 << 13, REG_P1CR4);
323
324	/* Enable the transmitter */
325	ks8842_enable_tx(adapter);
326
327	/* Enable the receiver */
328	ks8842_enable_rx(adapter);
329
330	/* clear all interrupts */
331	ks8842_write16(adapter, 18, 0xffff, REG_ISR);
332
333	/* enable interrupts */
334	if (KS8842_USE_DMA(adapter)) {
335		/* When running in DMA Mode the RX interrupt is not enabled in
336		   timberdale because RX data is received by DMA callbacks
337		   it must still be enabled in the KS8842 because it indicates
338		   to timberdale when there is RX data for it's DMA FIFOs */
339		iowrite16(ENABLED_IRQS_DMA_IP, adapter->hw_addr + REG_TIMB_IER);
340		ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER);
341	} else {
342		if (!(adapter->conf_flags & MICREL_KS884X))
343			iowrite16(ENABLED_IRQS,
344				adapter->hw_addr + REG_TIMB_IER);
345		ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
346	}
347	/* enable the switch */
348	ks8842_write16(adapter, 32, 0x1, REG_SW_ID_AND_ENABLE);
349}
350
351static void ks8842_read_mac_addr(struct ks8842_adapter *adapter, u8 *dest)
352{
353	int i;
354	u16 mac;
355
356	for (i = 0; i < ETH_ALEN; i++)
357		dest[ETH_ALEN - i - 1] = ks8842_read8(adapter, 2, REG_MARL + i);
358
359	if (adapter->conf_flags & MICREL_KS884X) {
360		/*
361		the sequence of saving mac addr between MAC and Switch is
362		different.
363		*/
364
365		mac = ks8842_read16(adapter, 2, REG_MARL);
366		ks8842_write16(adapter, 39, mac, REG_MACAR3);
367		mac = ks8842_read16(adapter, 2, REG_MARM);
368		ks8842_write16(adapter, 39, mac, REG_MACAR2);
369		mac = ks8842_read16(adapter, 2, REG_MARH);
370		ks8842_write16(adapter, 39, mac, REG_MACAR1);
371	} else {
372
373		/* make sure the switch port uses the same MAC as the QMU */
374		mac = ks8842_read16(adapter, 2, REG_MARL);
375		ks8842_write16(adapter, 39, mac, REG_MACAR1);
376		mac = ks8842_read16(adapter, 2, REG_MARM);
377		ks8842_write16(adapter, 39, mac, REG_MACAR2);
378		mac = ks8842_read16(adapter, 2, REG_MARH);
379		ks8842_write16(adapter, 39, mac, REG_MACAR3);
380	}
381}
382
383static void ks8842_write_mac_addr(struct ks8842_adapter *adapter, u8 *mac)
384{
385	unsigned long flags;
386	unsigned i;
387
388	spin_lock_irqsave(&adapter->lock, flags);
389	for (i = 0; i < ETH_ALEN; i++) {
390		ks8842_write8(adapter, 2, mac[ETH_ALEN - i - 1], REG_MARL + i);
391		if (!(adapter->conf_flags & MICREL_KS884X))
392			ks8842_write8(adapter, 39, mac[ETH_ALEN - i - 1],
393				REG_MACAR1 + i);
394	}
395
396	if (adapter->conf_flags & MICREL_KS884X) {
397		/*
398		the sequence of saving mac addr between MAC and Switch is
399		different.
400		*/
401
402		u16 mac;
403
404		mac = ks8842_read16(adapter, 2, REG_MARL);
405		ks8842_write16(adapter, 39, mac, REG_MACAR3);
406		mac = ks8842_read16(adapter, 2, REG_MARM);
407		ks8842_write16(adapter, 39, mac, REG_MACAR2);
408		mac = ks8842_read16(adapter, 2, REG_MARH);
409		ks8842_write16(adapter, 39, mac, REG_MACAR1);
410	}
411	spin_unlock_irqrestore(&adapter->lock, flags);
412}
413
414static inline u16 ks8842_tx_fifo_space(struct ks8842_adapter *adapter)
415{
416	return ks8842_read16(adapter, 16, REG_TXMIR) & 0x1fff;
417}
418
419static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev)
420{
421	struct ks8842_adapter *adapter = netdev_priv(netdev);
422	struct ks8842_tx_dma_ctl *ctl = &adapter->dma_tx;
423	u8 *buf = ctl->buf;
424
425	if (ctl->adesc) {
426		netdev_dbg(netdev, "%s: TX ongoing\n", __func__);
427		/* transfer ongoing */
428		return NETDEV_TX_BUSY;
429	}
430
431	sg_dma_len(&ctl->sg) = skb->len + sizeof(u32);
432
433	/* copy data to the TX buffer */
434	/* the control word, enable IRQ, port 1 and the length */
435	*buf++ = 0x00;
436	*buf++ = 0x01; /* Port 1 */
437	*buf++ = skb->len & 0xff;
438	*buf++ = (skb->len >> 8) & 0xff;
439	skb_copy_from_linear_data(skb, buf, skb->len);
440
441	dma_sync_single_range_for_device(adapter->dev,
442		sg_dma_address(&ctl->sg), 0, sg_dma_len(&ctl->sg),
443		DMA_TO_DEVICE);
444
445	/* make sure the length is a multiple of 4 */
446	if (sg_dma_len(&ctl->sg) % 4)
447		sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4;
448
449	ctl->adesc = dmaengine_prep_slave_sg(ctl->chan,
450		&ctl->sg, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
451	if (!ctl->adesc)
452		return NETDEV_TX_BUSY;
453
454	ctl->adesc->callback_param = netdev;
455	ctl->adesc->callback = ks8842_dma_tx_cb;
456	ctl->adesc->tx_submit(ctl->adesc);
457
458	netdev->stats.tx_bytes += skb->len;
459
460	dev_kfree_skb(skb);
461
462	return NETDEV_TX_OK;
463}
464
465static int ks8842_tx_frame(struct sk_buff *skb, struct net_device *netdev)
466{
467	struct ks8842_adapter *adapter = netdev_priv(netdev);
468	int len = skb->len;
469
470	netdev_dbg(netdev, "%s: len %u head %p data %p tail %p end %p\n",
471		__func__, skb->len, skb->head, skb->data,
472		skb_tail_pointer(skb), skb_end_pointer(skb));
473
474	/* check FIFO buffer space, we need space for CRC and command bits */
475	if (ks8842_tx_fifo_space(adapter) < len + 8)
476		return NETDEV_TX_BUSY;
477
478	if (adapter->conf_flags & KS884X_16BIT) {
479		u16 *ptr16 = (u16 *)skb->data;
480		ks8842_write16(adapter, 17, 0x8000 | 0x100, REG_QMU_DATA_LO);
481		ks8842_write16(adapter, 17, (u16)len, REG_QMU_DATA_HI);
482		netdev->stats.tx_bytes += len;
483
484		/* copy buffer */
485		while (len > 0) {
486			iowrite16(*ptr16++, adapter->hw_addr + REG_QMU_DATA_LO);
487			iowrite16(*ptr16++, adapter->hw_addr + REG_QMU_DATA_HI);
488			len -= sizeof(u32);
489		}
490	} else {
491
492		u32 *ptr = (u32 *)skb->data;
493		u32 ctrl;
494		/* the control word, enable IRQ, port 1 and the length */
495		ctrl = 0x8000 | 0x100 | (len << 16);
496		ks8842_write32(adapter, 17, ctrl, REG_QMU_DATA_LO);
497
498		netdev->stats.tx_bytes += len;
499
500		/* copy buffer */
501		while (len > 0) {
502			iowrite32(*ptr, adapter->hw_addr + REG_QMU_DATA_LO);
503			len -= sizeof(u32);
504			ptr++;
505		}
506	}
507
508	/* enqueue packet */
509	ks8842_write16(adapter, 17, 1, REG_TXQCR);
510
511	dev_kfree_skb(skb);
512
513	return NETDEV_TX_OK;
514}
515
516static void ks8842_update_rx_err_counters(struct net_device *netdev, u32 status)
517{
518	netdev_dbg(netdev, "RX error, status: %x\n", status);
519
520	netdev->stats.rx_errors++;
521	if (status & RXSR_TOO_LONG)
522		netdev->stats.rx_length_errors++;
523	if (status & RXSR_CRC_ERROR)
524		netdev->stats.rx_crc_errors++;
525	if (status & RXSR_RUNT)
526		netdev->stats.rx_frame_errors++;
527}
528
529static void ks8842_update_rx_counters(struct net_device *netdev, u32 status,
530	int len)
531{
532	netdev_dbg(netdev, "RX packet, len: %d\n", len);
533
534	netdev->stats.rx_packets++;
535	netdev->stats.rx_bytes += len;
536	if (status & RXSR_MULTICAST)
537		netdev->stats.multicast++;
538}
539
540static int __ks8842_start_new_rx_dma(struct net_device *netdev)
541{
542	struct ks8842_adapter *adapter = netdev_priv(netdev);
543	struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx;
544	struct scatterlist *sg = &ctl->sg;
545	int err;
546
547	ctl->skb = netdev_alloc_skb(netdev, DMA_BUFFER_SIZE);
548	if (ctl->skb) {
549		sg_init_table(sg, 1);
550		sg_dma_address(sg) = dma_map_single(adapter->dev,
551			ctl->skb->data, DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
552		if (dma_mapping_error(adapter->dev, sg_dma_address(sg))) {
553			err = -ENOMEM;
554			sg_dma_address(sg) = 0;
555			goto out;
556		}
557
558		sg_dma_len(sg) = DMA_BUFFER_SIZE;
559
560		ctl->adesc = dmaengine_prep_slave_sg(ctl->chan,
561			sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
562
563		if (!ctl->adesc) {
564			err = -ENOMEM;
565			goto out;
566		}
567
568		ctl->adesc->callback_param = netdev;
569		ctl->adesc->callback = ks8842_dma_rx_cb;
570		ctl->adesc->tx_submit(ctl->adesc);
571	} else {
572		err = -ENOMEM;
573		sg_dma_address(sg) = 0;
574		goto out;
575	}
576
577	return 0;
578out:
579	if (sg_dma_address(sg))
580		dma_unmap_single(adapter->dev, sg_dma_address(sg),
581			DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
582	sg_dma_address(sg) = 0;
583	dev_kfree_skb(ctl->skb);
584	ctl->skb = NULL;
585
586	printk(KERN_ERR DRV_NAME": Failed to start RX DMA: %d\n", err);
587	return err;
588}
589
590static void ks8842_rx_frame_dma_tasklet(struct tasklet_struct *t)
591{
592	struct ks8842_adapter *adapter = from_tasklet(adapter, t, dma_rx.tasklet);
593	struct net_device *netdev = adapter->netdev;
594	struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx;
595	struct sk_buff *skb = ctl->skb;
596	dma_addr_t addr = sg_dma_address(&ctl->sg);
597	u32 status;
598
599	ctl->adesc = NULL;
600
601	/* kick next transfer going */
602	__ks8842_start_new_rx_dma(netdev);
603
604	/* now handle the data we got */
605	dma_unmap_single(adapter->dev, addr, DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
606
607	status = *((u32 *)skb->data);
608
609	netdev_dbg(netdev, "%s - rx_data: status: %x\n",
610		__func__, status & 0xffff);
611
612	/* check the status */
613	if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
614		int len = (status >> 16) & 0x7ff;
615
616		ks8842_update_rx_counters(netdev, status, len);
617
618		/* reserve 4 bytes which is the status word */
619		skb_reserve(skb, 4);
620		skb_put(skb, len);
621
622		skb->protocol = eth_type_trans(skb, netdev);
623		netif_rx(skb);
624	} else {
625		ks8842_update_rx_err_counters(netdev, status);
626		dev_kfree_skb(skb);
627	}
628}
629
630static void ks8842_rx_frame(struct net_device *netdev,
631	struct ks8842_adapter *adapter)
632{
633	u32 status;
634	int len;
635
636	if (adapter->conf_flags & KS884X_16BIT) {
637		status = ks8842_read16(adapter, 17, REG_QMU_DATA_LO);
638		len = ks8842_read16(adapter, 17, REG_QMU_DATA_HI);
639		netdev_dbg(netdev, "%s - rx_data: status: %x\n",
640			   __func__, status);
641	} else {
642		status = ks8842_read32(adapter, 17, REG_QMU_DATA_LO);
643		len = (status >> 16) & 0x7ff;
644		status &= 0xffff;
645		netdev_dbg(netdev, "%s - rx_data: status: %x\n",
646			   __func__, status);
647	}
648
649	/* check the status */
650	if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
651		struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len + 3);
652
653		if (skb) {
654
655			ks8842_update_rx_counters(netdev, status, len);
656
657			if (adapter->conf_flags & KS884X_16BIT) {
658				u16 *data16 = skb_put(skb, len);
659				ks8842_select_bank(adapter, 17);
660				while (len > 0) {
661					*data16++ = ioread16(adapter->hw_addr +
662						REG_QMU_DATA_LO);
663					*data16++ = ioread16(adapter->hw_addr +
664						REG_QMU_DATA_HI);
665					len -= sizeof(u32);
666				}
667			} else {
668				u32 *data = skb_put(skb, len);
669
670				ks8842_select_bank(adapter, 17);
671				while (len > 0) {
672					*data++ = ioread32(adapter->hw_addr +
673						REG_QMU_DATA_LO);
674					len -= sizeof(u32);
675				}
676			}
677			skb->protocol = eth_type_trans(skb, netdev);
678			netif_rx(skb);
679		} else
680			netdev->stats.rx_dropped++;
681	} else
682		ks8842_update_rx_err_counters(netdev, status);
683
684	/* set high watermark to 3K */
685	ks8842_clear_bits(adapter, 0, 1 << 12, REG_QRFCR);
686
687	/* release the frame */
688	ks8842_write16(adapter, 17, 0x01, REG_RXQCR);
689
690	/* set high watermark to 2K */
691	ks8842_enable_bits(adapter, 0, 1 << 12, REG_QRFCR);
692}
693
694static void ks8842_handle_rx(struct net_device *netdev,
695	struct ks8842_adapter *adapter)
696{
697	u16 rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff;
698	netdev_dbg(netdev, "%s Entry - rx_data: %d\n", __func__, rx_data);
699	while (rx_data) {
700		ks8842_rx_frame(netdev, adapter);
701		rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff;
702	}
703}
704
705static void ks8842_handle_tx(struct net_device *netdev,
706	struct ks8842_adapter *adapter)
707{
708	u16 sr = ks8842_read16(adapter, 16, REG_TXSR);
709	netdev_dbg(netdev, "%s - entry, sr: %x\n", __func__, sr);
710	netdev->stats.tx_packets++;
711	if (netif_queue_stopped(netdev))
712		netif_wake_queue(netdev);
713}
714
715static void ks8842_handle_rx_overrun(struct net_device *netdev,
716	struct ks8842_adapter *adapter)
717{
718	netdev_dbg(netdev, "%s: entry\n", __func__);
719	netdev->stats.rx_errors++;
720	netdev->stats.rx_fifo_errors++;
721}
722
723static void ks8842_tasklet(struct tasklet_struct *t)
724{
725	struct ks8842_adapter *adapter = from_tasklet(adapter, t, tasklet);
726	struct net_device *netdev = adapter->netdev;
727	u16 isr;
728	unsigned long flags;
729	u16 entry_bank;
730
731	/* read current bank to be able to set it back */
732	spin_lock_irqsave(&adapter->lock, flags);
733	entry_bank = ioread16(adapter->hw_addr + REG_SELECT_BANK);
734	spin_unlock_irqrestore(&adapter->lock, flags);
735
736	isr = ks8842_read16(adapter, 18, REG_ISR);
737	netdev_dbg(netdev, "%s - ISR: 0x%x\n", __func__, isr);
738
739	/* when running in DMA mode, do not ack RX interrupts, it is handled
740	   internally by timberdale, otherwise it's DMA FIFO:s would stop
741	*/
742	if (KS8842_USE_DMA(adapter))
743		isr &= ~IRQ_RX;
744
745	/* Ack */
746	ks8842_write16(adapter, 18, isr, REG_ISR);
747
748	if (!(adapter->conf_flags & MICREL_KS884X))
749		/* Ack in the timberdale IP as well */
750		iowrite32(0x1, adapter->hw_addr + REG_TIMB_IAR);
751
752	if (!netif_running(netdev))
753		return;
754
755	if (isr & IRQ_LINK_CHANGE)
756		ks8842_update_link_status(netdev, adapter);
757
758	/* should not get IRQ_RX when running DMA mode */
759	if (isr & (IRQ_RX | IRQ_RX_ERROR) && !KS8842_USE_DMA(adapter))
760		ks8842_handle_rx(netdev, adapter);
761
762	/* should only happen when in PIO mode */
763	if (isr & IRQ_TX)
764		ks8842_handle_tx(netdev, adapter);
765
766	if (isr & IRQ_RX_OVERRUN)
767		ks8842_handle_rx_overrun(netdev, adapter);
768
769	if (isr & IRQ_TX_STOPPED) {
770		ks8842_disable_tx(adapter);
771		ks8842_enable_tx(adapter);
772	}
773
774	if (isr & IRQ_RX_STOPPED) {
775		ks8842_disable_rx(adapter);
776		ks8842_enable_rx(adapter);
777	}
778
779	/* re-enable interrupts, put back the bank selection register */
780	spin_lock_irqsave(&adapter->lock, flags);
781	if (KS8842_USE_DMA(adapter))
782		ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER);
783	else
784		ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
785	iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK);
786
787	/* Make sure timberdale continues DMA operations, they are stopped while
788	   we are handling the ks8842 because we might change bank */
789	if (KS8842_USE_DMA(adapter))
790		ks8842_resume_dma(adapter);
791
792	spin_unlock_irqrestore(&adapter->lock, flags);
793}
794
795static irqreturn_t ks8842_irq(int irq, void *devid)
796{
797	struct net_device *netdev = devid;
798	struct ks8842_adapter *adapter = netdev_priv(netdev);
799	u16 isr;
800	u16 entry_bank = ioread16(adapter->hw_addr + REG_SELECT_BANK);
801	irqreturn_t ret = IRQ_NONE;
802
803	isr = ks8842_read16(adapter, 18, REG_ISR);
804	netdev_dbg(netdev, "%s - ISR: 0x%x\n", __func__, isr);
805
806	if (isr) {
807		if (KS8842_USE_DMA(adapter))
808			/* disable all but RX IRQ, since the FPGA relies on it*/
809			ks8842_write16(adapter, 18, IRQ_RX, REG_IER);
810		else
811			/* disable IRQ */
812			ks8842_write16(adapter, 18, 0x00, REG_IER);
813
814		/* schedule tasklet */
815		tasklet_schedule(&adapter->tasklet);
816
817		ret = IRQ_HANDLED;
818	}
819
820	iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK);
821
822	/* After an interrupt, tell timberdale to continue DMA operations.
823	   DMA is disabled while we are handling the ks8842 because we might
824	   change bank */
825	ks8842_resume_dma(adapter);
826
827	return ret;
828}
829
830static void ks8842_dma_rx_cb(void *data)
831{
832	struct net_device	*netdev = data;
833	struct ks8842_adapter	*adapter = netdev_priv(netdev);
834
835	netdev_dbg(netdev, "RX DMA finished\n");
836	/* schedule tasklet */
837	if (adapter->dma_rx.adesc)
838		tasklet_schedule(&adapter->dma_rx.tasklet);
839}
840
841static void ks8842_dma_tx_cb(void *data)
842{
843	struct net_device		*netdev = data;
844	struct ks8842_adapter		*adapter = netdev_priv(netdev);
845	struct ks8842_tx_dma_ctl	*ctl = &adapter->dma_tx;
846
847	netdev_dbg(netdev, "TX DMA finished\n");
848
849	if (!ctl->adesc)
850		return;
851
852	netdev->stats.tx_packets++;
853	ctl->adesc = NULL;
854
855	if (netif_queue_stopped(netdev))
856		netif_wake_queue(netdev);
857}
858
859static void ks8842_stop_dma(struct ks8842_adapter *adapter)
860{
861	struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx;
862	struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
863
864	tx_ctl->adesc = NULL;
865	if (tx_ctl->chan)
866		dmaengine_terminate_all(tx_ctl->chan);
867
868	rx_ctl->adesc = NULL;
869	if (rx_ctl->chan)
870		dmaengine_terminate_all(rx_ctl->chan);
871
872	if (sg_dma_address(&rx_ctl->sg))
873		dma_unmap_single(adapter->dev, sg_dma_address(&rx_ctl->sg),
874			DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
875	sg_dma_address(&rx_ctl->sg) = 0;
876
877	dev_kfree_skb(rx_ctl->skb);
878	rx_ctl->skb = NULL;
879}
880
881static void ks8842_dealloc_dma_bufs(struct ks8842_adapter *adapter)
882{
883	struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx;
884	struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
885
886	ks8842_stop_dma(adapter);
887
888	if (tx_ctl->chan)
889		dma_release_channel(tx_ctl->chan);
890	tx_ctl->chan = NULL;
891
892	if (rx_ctl->chan)
893		dma_release_channel(rx_ctl->chan);
894	rx_ctl->chan = NULL;
895
896	tasklet_kill(&rx_ctl->tasklet);
897
898	if (sg_dma_address(&tx_ctl->sg))
899		dma_unmap_single(adapter->dev, sg_dma_address(&tx_ctl->sg),
900			DMA_BUFFER_SIZE, DMA_TO_DEVICE);
901	sg_dma_address(&tx_ctl->sg) = 0;
902
903	kfree(tx_ctl->buf);
904	tx_ctl->buf = NULL;
905}
906
907static bool ks8842_dma_filter_fn(struct dma_chan *chan, void *filter_param)
908{
909	return chan->chan_id == (long)filter_param;
910}
911
912static int ks8842_alloc_dma_bufs(struct net_device *netdev)
913{
914	struct ks8842_adapter *adapter = netdev_priv(netdev);
915	struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx;
916	struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
917	int err;
918
919	dma_cap_mask_t mask;
920
921	dma_cap_zero(mask);
922	dma_cap_set(DMA_SLAVE, mask);
923	dma_cap_set(DMA_PRIVATE, mask);
924
925	sg_init_table(&tx_ctl->sg, 1);
926
927	tx_ctl->chan = dma_request_channel(mask, ks8842_dma_filter_fn,
928					   (void *)(long)tx_ctl->channel);
929	if (!tx_ctl->chan) {
930		err = -ENODEV;
931		goto err;
932	}
933
934	/* allocate DMA buffer */
935	tx_ctl->buf = kmalloc(DMA_BUFFER_SIZE, GFP_KERNEL);
936	if (!tx_ctl->buf) {
937		err = -ENOMEM;
938		goto err;
939	}
940
941	sg_dma_address(&tx_ctl->sg) = dma_map_single(adapter->dev,
942		tx_ctl->buf, DMA_BUFFER_SIZE, DMA_TO_DEVICE);
943	if (dma_mapping_error(adapter->dev, sg_dma_address(&tx_ctl->sg))) {
944		err = -ENOMEM;
945		sg_dma_address(&tx_ctl->sg) = 0;
946		goto err;
947	}
948
949	rx_ctl->chan = dma_request_channel(mask, ks8842_dma_filter_fn,
950					   (void *)(long)rx_ctl->channel);
951	if (!rx_ctl->chan) {
952		err = -ENODEV;
953		goto err;
954	}
955
956	tasklet_setup(&rx_ctl->tasklet, ks8842_rx_frame_dma_tasklet);
957
958	return 0;
959err:
960	ks8842_dealloc_dma_bufs(adapter);
961	return err;
962}
963
964/* Netdevice operations */
965
966static int ks8842_open(struct net_device *netdev)
967{
968	struct ks8842_adapter *adapter = netdev_priv(netdev);
969	int err;
970
971	netdev_dbg(netdev, "%s - entry\n", __func__);
972
973	if (KS8842_USE_DMA(adapter)) {
974		err = ks8842_alloc_dma_bufs(netdev);
975
976		if (!err) {
977			/* start RX dma */
978			err = __ks8842_start_new_rx_dma(netdev);
979			if (err)
980				ks8842_dealloc_dma_bufs(adapter);
981		}
982
983		if (err) {
984			printk(KERN_WARNING DRV_NAME
985				": Failed to initiate DMA, running PIO\n");
986			ks8842_dealloc_dma_bufs(adapter);
987			adapter->dma_rx.channel = -1;
988			adapter->dma_tx.channel = -1;
989		}
990	}
991
992	/* reset the HW */
993	ks8842_reset_hw(adapter);
994
995	ks8842_write_mac_addr(adapter, netdev->dev_addr);
996
997	ks8842_update_link_status(netdev, adapter);
998
999	err = request_irq(adapter->irq, ks8842_irq, IRQF_SHARED, DRV_NAME,
1000		netdev);
1001	if (err) {
1002		pr_err("Failed to request IRQ: %d: %d\n", adapter->irq, err);
1003		return err;
1004	}
1005
1006	return 0;
1007}
1008
1009static int ks8842_close(struct net_device *netdev)
1010{
1011	struct ks8842_adapter *adapter = netdev_priv(netdev);
1012
1013	netdev_dbg(netdev, "%s - entry\n", __func__);
1014
1015	cancel_work_sync(&adapter->timeout_work);
1016
1017	if (KS8842_USE_DMA(adapter))
1018		ks8842_dealloc_dma_bufs(adapter);
1019
1020	/* free the irq */
1021	free_irq(adapter->irq, netdev);
1022
1023	/* disable the switch */
1024	ks8842_write16(adapter, 32, 0x0, REG_SW_ID_AND_ENABLE);
1025
1026	return 0;
1027}
1028
1029static netdev_tx_t ks8842_xmit_frame(struct sk_buff *skb,
1030				     struct net_device *netdev)
1031{
1032	int ret;
1033	struct ks8842_adapter *adapter = netdev_priv(netdev);
1034
1035	netdev_dbg(netdev, "%s: entry\n", __func__);
1036
1037	if (KS8842_USE_DMA(adapter)) {
1038		unsigned long flags;
1039		ret = ks8842_tx_frame_dma(skb, netdev);
1040		/* for now only allow one transfer at the time */
1041		spin_lock_irqsave(&adapter->lock, flags);
1042		if (adapter->dma_tx.adesc)
1043			netif_stop_queue(netdev);
1044		spin_unlock_irqrestore(&adapter->lock, flags);
1045		return ret;
1046	}
1047
1048	ret = ks8842_tx_frame(skb, netdev);
1049
1050	if (ks8842_tx_fifo_space(adapter) <  netdev->mtu + 8)
1051		netif_stop_queue(netdev);
1052
1053	return ret;
1054}
1055
1056static int ks8842_set_mac(struct net_device *netdev, void *p)
1057{
1058	struct ks8842_adapter *adapter = netdev_priv(netdev);
1059	struct sockaddr *addr = p;
1060	char *mac = (u8 *)addr->sa_data;
1061
1062	netdev_dbg(netdev, "%s: entry\n", __func__);
1063
1064	if (!is_valid_ether_addr(addr->sa_data))
1065		return -EADDRNOTAVAIL;
1066
1067	memcpy(netdev->dev_addr, mac, netdev->addr_len);
1068
1069	ks8842_write_mac_addr(adapter, mac);
1070	return 0;
1071}
1072
1073static void ks8842_tx_timeout_work(struct work_struct *work)
1074{
1075	struct ks8842_adapter *adapter =
1076		container_of(work, struct ks8842_adapter, timeout_work);
1077	struct net_device *netdev = adapter->netdev;
1078	unsigned long flags;
1079
1080	netdev_dbg(netdev, "%s: entry\n", __func__);
1081
1082	spin_lock_irqsave(&adapter->lock, flags);
1083
1084	if (KS8842_USE_DMA(adapter))
1085		ks8842_stop_dma(adapter);
1086
1087	/* disable interrupts */
1088	ks8842_write16(adapter, 18, 0, REG_IER);
1089	ks8842_write16(adapter, 18, 0xFFFF, REG_ISR);
1090
1091	netif_stop_queue(netdev);
1092
1093	spin_unlock_irqrestore(&adapter->lock, flags);
1094
1095	ks8842_reset_hw(adapter);
1096
1097	ks8842_write_mac_addr(adapter, netdev->dev_addr);
1098
1099	ks8842_update_link_status(netdev, adapter);
1100
1101	if (KS8842_USE_DMA(adapter))
1102		__ks8842_start_new_rx_dma(netdev);
1103}
1104
1105static void ks8842_tx_timeout(struct net_device *netdev, unsigned int txqueue)
1106{
1107	struct ks8842_adapter *adapter = netdev_priv(netdev);
1108
1109	netdev_dbg(netdev, "%s: entry\n", __func__);
1110
1111	schedule_work(&adapter->timeout_work);
1112}
1113
1114static const struct net_device_ops ks8842_netdev_ops = {
1115	.ndo_open		= ks8842_open,
1116	.ndo_stop		= ks8842_close,
1117	.ndo_start_xmit		= ks8842_xmit_frame,
1118	.ndo_set_mac_address	= ks8842_set_mac,
1119	.ndo_tx_timeout 	= ks8842_tx_timeout,
1120	.ndo_validate_addr	= eth_validate_addr
1121};
1122
1123static const struct ethtool_ops ks8842_ethtool_ops = {
1124	.get_link		= ethtool_op_get_link,
1125};
1126
1127static int ks8842_probe(struct platform_device *pdev)
1128{
1129	int err = -ENOMEM;
1130	struct resource *iomem;
1131	struct net_device *netdev;
1132	struct ks8842_adapter *adapter;
1133	struct ks8842_platform_data *pdata = dev_get_platdata(&pdev->dev);
1134	u16 id;
1135	unsigned i;
1136
1137	iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1138	if (!iomem) {
1139		dev_err(&pdev->dev, "Invalid resource\n");
1140		return -EINVAL;
1141	}
1142	if (!request_mem_region(iomem->start, resource_size(iomem), DRV_NAME))
1143		goto err_mem_region;
1144
1145	netdev = alloc_etherdev(sizeof(struct ks8842_adapter));
1146	if (!netdev)
1147		goto err_alloc_etherdev;
1148
1149	SET_NETDEV_DEV(netdev, &pdev->dev);
1150
1151	adapter = netdev_priv(netdev);
1152	adapter->netdev = netdev;
1153	INIT_WORK(&adapter->timeout_work, ks8842_tx_timeout_work);
1154	adapter->hw_addr = ioremap(iomem->start, resource_size(iomem));
1155	adapter->conf_flags = iomem->flags;
1156
1157	if (!adapter->hw_addr)
1158		goto err_ioremap;
1159
1160	adapter->irq = platform_get_irq(pdev, 0);
1161	if (adapter->irq < 0) {
1162		err = adapter->irq;
1163		goto err_get_irq;
1164	}
1165
1166	adapter->dev = (pdev->dev.parent) ? pdev->dev.parent : &pdev->dev;
1167
1168	/* DMA is only supported when accessed via timberdale */
1169	if (!(adapter->conf_flags & MICREL_KS884X) && pdata &&
1170		(pdata->tx_dma_channel != -1) &&
1171		(pdata->rx_dma_channel != -1)) {
1172		adapter->dma_rx.channel = pdata->rx_dma_channel;
1173		adapter->dma_tx.channel = pdata->tx_dma_channel;
1174	} else {
1175		adapter->dma_rx.channel = -1;
1176		adapter->dma_tx.channel = -1;
1177	}
1178
1179	tasklet_setup(&adapter->tasklet, ks8842_tasklet);
1180	spin_lock_init(&adapter->lock);
1181
1182	netdev->netdev_ops = &ks8842_netdev_ops;
1183	netdev->ethtool_ops = &ks8842_ethtool_ops;
1184
1185	/* Check if a mac address was given */
1186	i = netdev->addr_len;
1187	if (pdata) {
1188		for (i = 0; i < netdev->addr_len; i++)
1189			if (pdata->macaddr[i] != 0)
1190				break;
1191
1192		if (i < netdev->addr_len)
1193			/* an address was passed, use it */
1194			memcpy(netdev->dev_addr, pdata->macaddr,
1195				netdev->addr_len);
1196	}
1197
1198	if (i == netdev->addr_len) {
1199		ks8842_read_mac_addr(adapter, netdev->dev_addr);
1200
1201		if (!is_valid_ether_addr(netdev->dev_addr))
1202			eth_hw_addr_random(netdev);
1203	}
1204
1205	id = ks8842_read16(adapter, 32, REG_SW_ID_AND_ENABLE);
1206
1207	strcpy(netdev->name, "eth%d");
1208	err = register_netdev(netdev);
1209	if (err)
1210		goto err_register;
1211
1212	platform_set_drvdata(pdev, netdev);
1213
1214	pr_info("Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
1215		(id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
1216
1217	return 0;
1218
1219err_register:
1220err_get_irq:
1221	iounmap(adapter->hw_addr);
1222err_ioremap:
1223	free_netdev(netdev);
1224err_alloc_etherdev:
1225	release_mem_region(iomem->start, resource_size(iomem));
1226err_mem_region:
1227	return err;
1228}
1229
1230static int ks8842_remove(struct platform_device *pdev)
1231{
1232	struct net_device *netdev = platform_get_drvdata(pdev);
1233	struct ks8842_adapter *adapter = netdev_priv(netdev);
1234	struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1235
1236	unregister_netdev(netdev);
1237	tasklet_kill(&adapter->tasklet);
1238	iounmap(adapter->hw_addr);
1239	free_netdev(netdev);
1240	release_mem_region(iomem->start, resource_size(iomem));
1241	return 0;
1242}
1243
1244
1245static struct platform_driver ks8842_platform_driver = {
1246	.driver = {
1247		.name	= DRV_NAME,
1248	},
1249	.probe		= ks8842_probe,
1250	.remove		= ks8842_remove,
1251};
1252
1253module_platform_driver(ks8842_platform_driver);
1254
1255MODULE_DESCRIPTION("Timberdale KS8842 ethernet driver");
1256MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
1257MODULE_LICENSE("GPL v2");
1258MODULE_ALIAS("platform:ks8842");
1259
1260