1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * ks8842.c timberdale KS8842 ethernet driver
4 * Copyright (c) 2009 Intel Corporation
5 */
6
7/* Supports:
8 * The Micrel KS8842 behind the timberdale FPGA
9 * The genuine Micrel KS8841/42 device with ISA 16/32bit bus interface
10 */
11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14#include <linux/interrupt.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/platform_device.h>
18#include <linux/netdevice.h>
19#include <linux/etherdevice.h>
20#include <linux/ethtool.h>
21#include <linux/ks8842.h>
22#include <linux/dmaengine.h>
23#include <linux/dma-mapping.h>
24#include <linux/scatterlist.h>
25
26#define DRV_NAME "ks8842"
27
28/* Timberdale specific Registers */
29#define REG_TIMB_RST		0x1c
30#define REG_TIMB_FIFO		0x20
31#define REG_TIMB_ISR		0x24
32#define REG_TIMB_IER		0x28
33#define REG_TIMB_IAR		0x2C
34#define REQ_TIMB_DMA_RESUME	0x30
35
36/* KS8842 registers */
37
38#define REG_SELECT_BANK 0x0e
39
40/* bank 0 registers */
41#define REG_QRFCR	0x04
42
43/* bank 2 registers */
44#define REG_MARL	0x00
45#define REG_MARM	0x02
46#define REG_MARH	0x04
47
48/* bank 3 registers */
49#define REG_GRR		0x06
50
51/* bank 16 registers */
52#define REG_TXCR	0x00
53#define REG_TXSR	0x02
54#define REG_RXCR	0x04
55#define REG_TXMIR	0x08
56#define REG_RXMIR	0x0A
57
58/* bank 17 registers */
59#define REG_TXQCR	0x00
60#define REG_RXQCR	0x02
61#define REG_TXFDPR	0x04
62#define REG_RXFDPR	0x06
63#define REG_QMU_DATA_LO 0x08
64#define REG_QMU_DATA_HI 0x0A
65
66/* bank 18 registers */
67#define REG_IER		0x00
68#define IRQ_LINK_CHANGE	0x8000
69#define IRQ_TX		0x4000
70#define IRQ_RX		0x2000
71#define IRQ_RX_OVERRUN	0x0800
72#define IRQ_TX_STOPPED	0x0200
73#define IRQ_RX_STOPPED	0x0100
74#define IRQ_RX_ERROR	0x0080
75#define ENABLED_IRQS	(IRQ_LINK_CHANGE | IRQ_TX | IRQ_RX | IRQ_RX_STOPPED | \
76		IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR)
77/* When running via timberdale in DMA mode, the RX interrupt should be
78   enabled in the KS8842, but not in the FPGA IP, since the IP handles
79   RX DMA internally.
80   TX interrupts are not needed it is handled by the FPGA the driver is
81   notified via DMA callbacks.
82*/
83#define ENABLED_IRQS_DMA_IP	(IRQ_LINK_CHANGE | IRQ_RX_STOPPED | \
84	IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR)
85#define ENABLED_IRQS_DMA	(ENABLED_IRQS_DMA_IP | IRQ_RX)
86#define REG_ISR		0x02
87#define REG_RXSR	0x04
88#define RXSR_VALID	0x8000
89#define RXSR_BROADCAST	0x80
90#define RXSR_MULTICAST	0x40
91#define RXSR_UNICAST	0x20
92#define RXSR_FRAMETYPE	0x08
93#define RXSR_TOO_LONG	0x04
94#define RXSR_RUNT	0x02
95#define RXSR_CRC_ERROR	0x01
96#define RXSR_ERROR	(RXSR_TOO_LONG | RXSR_RUNT | RXSR_CRC_ERROR)
97
98/* bank 32 registers */
99#define REG_SW_ID_AND_ENABLE	0x00
100#define REG_SGCR1		0x02
101#define REG_SGCR2		0x04
102#define REG_SGCR3		0x06
103
104/* bank 39 registers */
105#define REG_MACAR1		0x00
106#define REG_MACAR2		0x02
107#define REG_MACAR3		0x04
108
109/* bank 45 registers */
110#define REG_P1MBCR		0x00
111#define REG_P1MBSR		0x02
112
113/* bank 46 registers */
114#define REG_P2MBCR		0x00
115#define REG_P2MBSR		0x02
116
117/* bank 48 registers */
118#define REG_P1CR2		0x02
119
120/* bank 49 registers */
121#define REG_P1CR4		0x02
122#define REG_P1SR		0x04
123
124/* flags passed by platform_device for configuration */
125#define	MICREL_KS884X		0x01	/* 0=Timeberdale(FPGA), 1=Micrel */
126#define	KS884X_16BIT		0x02	/*  1=16bit, 0=32bit */
127
128#define DMA_BUFFER_SIZE		2048
129
130struct ks8842_tx_dma_ctl {
131	struct dma_chan *chan;
132	struct dma_async_tx_descriptor *adesc;
133	void *buf;
134	struct scatterlist sg;
135	int channel;
136};
137
138struct ks8842_rx_dma_ctl {
139	struct dma_chan *chan;
140	struct dma_async_tx_descriptor *adesc;
141	struct sk_buff  *skb;
142	struct scatterlist sg;
143	struct tasklet_struct tasklet;
144	int channel;
145};
146
147#define KS8842_USE_DMA(adapter) (((adapter)->dma_tx.channel != -1) && \
148	 ((adapter)->dma_rx.channel != -1))
149
150struct ks8842_adapter {
151	void __iomem	*hw_addr;
152	int		irq;
153	unsigned long	conf_flags;	/* copy of platform_device config */
154	struct tasklet_struct	tasklet;
155	spinlock_t	lock; /* spinlock to be interrupt safe */
156	struct work_struct timeout_work;
157	struct net_device *netdev;
158	struct device *dev;
159	struct ks8842_tx_dma_ctl	dma_tx;
160	struct ks8842_rx_dma_ctl	dma_rx;
161};
162
163static void ks8842_dma_rx_cb(void *data);
164static void ks8842_dma_tx_cb(void *data);
165
166static inline void ks8842_resume_dma(struct ks8842_adapter *adapter)
167{
168	iowrite32(1, adapter->hw_addr + REQ_TIMB_DMA_RESUME);
169}
170
171static inline void ks8842_select_bank(struct ks8842_adapter *adapter, u16 bank)
172{
173	iowrite16(bank, adapter->hw_addr + REG_SELECT_BANK);
174}
175
176static inline void ks8842_write8(struct ks8842_adapter *adapter, u16 bank,
177	u8 value, int offset)
178{
179	ks8842_select_bank(adapter, bank);
180	iowrite8(value, adapter->hw_addr + offset);
181}
182
183static inline void ks8842_write16(struct ks8842_adapter *adapter, u16 bank,
184	u16 value, int offset)
185{
186	ks8842_select_bank(adapter, bank);
187	iowrite16(value, adapter->hw_addr + offset);
188}
189
190static inline void ks8842_enable_bits(struct ks8842_adapter *adapter, u16 bank,
191	u16 bits, int offset)
192{
193	u16 reg;
194	ks8842_select_bank(adapter, bank);
195	reg = ioread16(adapter->hw_addr + offset);
196	reg |= bits;
197	iowrite16(reg, adapter->hw_addr + offset);
198}
199
200static inline void ks8842_clear_bits(struct ks8842_adapter *adapter, u16 bank,
201	u16 bits, int offset)
202{
203	u16 reg;
204	ks8842_select_bank(adapter, bank);
205	reg = ioread16(adapter->hw_addr + offset);
206	reg &= ~bits;
207	iowrite16(reg, adapter->hw_addr + offset);
208}
209
210static inline void ks8842_write32(struct ks8842_adapter *adapter, u16 bank,
211	u32 value, int offset)
212{
213	ks8842_select_bank(adapter, bank);
214	iowrite32(value, adapter->hw_addr + offset);
215}
216
217static inline u8 ks8842_read8(struct ks8842_adapter *adapter, u16 bank,
218	int offset)
219{
220	ks8842_select_bank(adapter, bank);
221	return ioread8(adapter->hw_addr + offset);
222}
223
224static inline u16 ks8842_read16(struct ks8842_adapter *adapter, u16 bank,
225	int offset)
226{
227	ks8842_select_bank(adapter, bank);
228	return ioread16(adapter->hw_addr + offset);
229}
230
231static inline u32 ks8842_read32(struct ks8842_adapter *adapter, u16 bank,
232	int offset)
233{
234	ks8842_select_bank(adapter, bank);
235	return ioread32(adapter->hw_addr + offset);
236}
237
238static void ks8842_reset(struct ks8842_adapter *adapter)
239{
240	if (adapter->conf_flags & MICREL_KS884X) {
241		ks8842_write16(adapter, 3, 1, REG_GRR);
242		msleep(10);
243		iowrite16(0, adapter->hw_addr + REG_GRR);
244	} else {
245		/* The KS8842 goes haywire when doing softare reset
246		* a work around in the timberdale IP is implemented to
247		* do a hardware reset instead
248		ks8842_write16(adapter, 3, 1, REG_GRR);
249		msleep(10);
250		iowrite16(0, adapter->hw_addr + REG_GRR);
251		*/
252		iowrite32(0x1, adapter->hw_addr + REG_TIMB_RST);
253		msleep(20);
254	}
255}
256
257static void ks8842_update_link_status(struct net_device *netdev,
258	struct ks8842_adapter *adapter)
259{
260	/* check the status of the link */
261	if (ks8842_read16(adapter, 45, REG_P1MBSR) & 0x4) {
262		netif_carrier_on(netdev);
263		netif_wake_queue(netdev);
264	} else {
265		netif_stop_queue(netdev);
266		netif_carrier_off(netdev);
267	}
268}
269
270static void ks8842_enable_tx(struct ks8842_adapter *adapter)
271{
272	ks8842_enable_bits(adapter, 16, 0x01, REG_TXCR);
273}
274
275static void ks8842_disable_tx(struct ks8842_adapter *adapter)
276{
277	ks8842_clear_bits(adapter, 16, 0x01, REG_TXCR);
278}
279
280static void ks8842_enable_rx(struct ks8842_adapter *adapter)
281{
282	ks8842_enable_bits(adapter, 16, 0x01, REG_RXCR);
283}
284
285static void ks8842_disable_rx(struct ks8842_adapter *adapter)
286{
287	ks8842_clear_bits(adapter, 16, 0x01, REG_RXCR);
288}
289
290static void ks8842_reset_hw(struct ks8842_adapter *adapter)
291{
292	/* reset the HW */
293	ks8842_reset(adapter);
294
295	/* Enable QMU Transmit flow control / transmit padding / Transmit CRC */
296	ks8842_write16(adapter, 16, 0x000E, REG_TXCR);
297
298	/* enable the receiver, uni + multi + broadcast + flow ctrl
299		+ crc strip */
300	ks8842_write16(adapter, 16, 0x8 | 0x20 | 0x40 | 0x80 | 0x400,
301		REG_RXCR);
302
303	/* TX frame pointer autoincrement */
304	ks8842_write16(adapter, 17, 0x4000, REG_TXFDPR);
305
306	/* RX frame pointer autoincrement */
307	ks8842_write16(adapter, 17, 0x4000, REG_RXFDPR);
308
309	/* RX 2 kb high watermark */
310	ks8842_write16(adapter, 0, 0x1000, REG_QRFCR);
311
312	/* aggressive back off in half duplex */
313	ks8842_enable_bits(adapter, 32, 1 << 8, REG_SGCR1);
314
315	/* enable no excessive collison drop */
316	ks8842_enable_bits(adapter, 32, 1 << 3, REG_SGCR2);
317
318	/* Enable port 1 force flow control / back pressure / transmit / recv */
319	ks8842_write16(adapter, 48, 0x1E07, REG_P1CR2);
320
321	/* restart port auto-negotiation */
322	ks8842_enable_bits(adapter, 49, 1 << 13, REG_P1CR4);
323
324	/* Enable the transmitter */
325	ks8842_enable_tx(adapter);
326
327	/* Enable the receiver */
328	ks8842_enable_rx(adapter);
329
330	/* clear all interrupts */
331	ks8842_write16(adapter, 18, 0xffff, REG_ISR);
332
333	/* enable interrupts */
334	if (KS8842_USE_DMA(adapter)) {
335		/* When running in DMA Mode the RX interrupt is not enabled in
336		   timberdale because RX data is received by DMA callbacks
337		   it must still be enabled in the KS8842 because it indicates
338		   to timberdale when there is RX data for it's DMA FIFOs */
339		iowrite16(ENABLED_IRQS_DMA_IP, adapter->hw_addr + REG_TIMB_IER);
340		ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER);
341	} else {
342		if (!(adapter->conf_flags & MICREL_KS884X))
343			iowrite16(ENABLED_IRQS,
344				adapter->hw_addr + REG_TIMB_IER);
345		ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
346	}
347	/* enable the switch */
348	ks8842_write16(adapter, 32, 0x1, REG_SW_ID_AND_ENABLE);
349}
350
351static void ks8842_init_mac_addr(struct ks8842_adapter *adapter)
352{
353	u8 addr[ETH_ALEN];
354	int i;
355	u16 mac;
356
357	for (i = 0; i < ETH_ALEN; i++)
358		addr[ETH_ALEN - i - 1] = ks8842_read8(adapter, 2, REG_MARL + i);
359	eth_hw_addr_set(adapter->netdev, addr);
360
361	if (adapter->conf_flags & MICREL_KS884X) {
362		/*
363		the sequence of saving mac addr between MAC and Switch is
364		different.
365		*/
366
367		mac = ks8842_read16(adapter, 2, REG_MARL);
368		ks8842_write16(adapter, 39, mac, REG_MACAR3);
369		mac = ks8842_read16(adapter, 2, REG_MARM);
370		ks8842_write16(adapter, 39, mac, REG_MACAR2);
371		mac = ks8842_read16(adapter, 2, REG_MARH);
372		ks8842_write16(adapter, 39, mac, REG_MACAR1);
373	} else {
374
375		/* make sure the switch port uses the same MAC as the QMU */
376		mac = ks8842_read16(adapter, 2, REG_MARL);
377		ks8842_write16(adapter, 39, mac, REG_MACAR1);
378		mac = ks8842_read16(adapter, 2, REG_MARM);
379		ks8842_write16(adapter, 39, mac, REG_MACAR2);
380		mac = ks8842_read16(adapter, 2, REG_MARH);
381		ks8842_write16(adapter, 39, mac, REG_MACAR3);
382	}
383}
384
385static void ks8842_write_mac_addr(struct ks8842_adapter *adapter, const u8 *mac)
386{
387	unsigned long flags;
388	unsigned i;
389
390	spin_lock_irqsave(&adapter->lock, flags);
391	for (i = 0; i < ETH_ALEN; i++) {
392		ks8842_write8(adapter, 2, mac[ETH_ALEN - i - 1], REG_MARL + i);
393		if (!(adapter->conf_flags & MICREL_KS884X))
394			ks8842_write8(adapter, 39, mac[ETH_ALEN - i - 1],
395				REG_MACAR1 + i);
396	}
397
398	if (adapter->conf_flags & MICREL_KS884X) {
399		/*
400		the sequence of saving mac addr between MAC and Switch is
401		different.
402		*/
403
404		u16 mac;
405
406		mac = ks8842_read16(adapter, 2, REG_MARL);
407		ks8842_write16(adapter, 39, mac, REG_MACAR3);
408		mac = ks8842_read16(adapter, 2, REG_MARM);
409		ks8842_write16(adapter, 39, mac, REG_MACAR2);
410		mac = ks8842_read16(adapter, 2, REG_MARH);
411		ks8842_write16(adapter, 39, mac, REG_MACAR1);
412	}
413	spin_unlock_irqrestore(&adapter->lock, flags);
414}
415
416static inline u16 ks8842_tx_fifo_space(struct ks8842_adapter *adapter)
417{
418	return ks8842_read16(adapter, 16, REG_TXMIR) & 0x1fff;
419}
420
421static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev)
422{
423	struct ks8842_adapter *adapter = netdev_priv(netdev);
424	struct ks8842_tx_dma_ctl *ctl = &adapter->dma_tx;
425	u8 *buf = ctl->buf;
426
427	if (ctl->adesc) {
428		netdev_dbg(netdev, "%s: TX ongoing\n", __func__);
429		/* transfer ongoing */
430		return NETDEV_TX_BUSY;
431	}
432
433	sg_dma_len(&ctl->sg) = skb->len + sizeof(u32);
434
435	/* copy data to the TX buffer */
436	/* the control word, enable IRQ, port 1 and the length */
437	*buf++ = 0x00;
438	*buf++ = 0x01; /* Port 1 */
439	*buf++ = skb->len & 0xff;
440	*buf++ = (skb->len >> 8) & 0xff;
441	skb_copy_from_linear_data(skb, buf, skb->len);
442
443	dma_sync_single_range_for_device(adapter->dev,
444		sg_dma_address(&ctl->sg), 0, sg_dma_len(&ctl->sg),
445		DMA_TO_DEVICE);
446
447	/* make sure the length is a multiple of 4 */
448	if (sg_dma_len(&ctl->sg) % 4)
449		sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4;
450
451	ctl->adesc = dmaengine_prep_slave_sg(ctl->chan,
452		&ctl->sg, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
453	if (!ctl->adesc)
454		return NETDEV_TX_BUSY;
455
456	ctl->adesc->callback_param = netdev;
457	ctl->adesc->callback = ks8842_dma_tx_cb;
458	ctl->adesc->tx_submit(ctl->adesc);
459
460	netdev->stats.tx_bytes += skb->len;
461
462	dev_kfree_skb(skb);
463
464	return NETDEV_TX_OK;
465}
466
467static int ks8842_tx_frame(struct sk_buff *skb, struct net_device *netdev)
468{
469	struct ks8842_adapter *adapter = netdev_priv(netdev);
470	int len = skb->len;
471
472	netdev_dbg(netdev, "%s: len %u head %p data %p tail %p end %p\n",
473		__func__, skb->len, skb->head, skb->data,
474		skb_tail_pointer(skb), skb_end_pointer(skb));
475
476	/* check FIFO buffer space, we need space for CRC and command bits */
477	if (ks8842_tx_fifo_space(adapter) < len + 8)
478		return NETDEV_TX_BUSY;
479
480	if (adapter->conf_flags & KS884X_16BIT) {
481		u16 *ptr16 = (u16 *)skb->data;
482		ks8842_write16(adapter, 17, 0x8000 | 0x100, REG_QMU_DATA_LO);
483		ks8842_write16(adapter, 17, (u16)len, REG_QMU_DATA_HI);
484		netdev->stats.tx_bytes += len;
485
486		/* copy buffer */
487		while (len > 0) {
488			iowrite16(*ptr16++, adapter->hw_addr + REG_QMU_DATA_LO);
489			iowrite16(*ptr16++, adapter->hw_addr + REG_QMU_DATA_HI);
490			len -= sizeof(u32);
491		}
492	} else {
493
494		u32 *ptr = (u32 *)skb->data;
495		u32 ctrl;
496		/* the control word, enable IRQ, port 1 and the length */
497		ctrl = 0x8000 | 0x100 | (len << 16);
498		ks8842_write32(adapter, 17, ctrl, REG_QMU_DATA_LO);
499
500		netdev->stats.tx_bytes += len;
501
502		/* copy buffer */
503		while (len > 0) {
504			iowrite32(*ptr, adapter->hw_addr + REG_QMU_DATA_LO);
505			len -= sizeof(u32);
506			ptr++;
507		}
508	}
509
510	/* enqueue packet */
511	ks8842_write16(adapter, 17, 1, REG_TXQCR);
512
513	dev_kfree_skb(skb);
514
515	return NETDEV_TX_OK;
516}
517
518static void ks8842_update_rx_err_counters(struct net_device *netdev, u32 status)
519{
520	netdev_dbg(netdev, "RX error, status: %x\n", status);
521
522	netdev->stats.rx_errors++;
523	if (status & RXSR_TOO_LONG)
524		netdev->stats.rx_length_errors++;
525	if (status & RXSR_CRC_ERROR)
526		netdev->stats.rx_crc_errors++;
527	if (status & RXSR_RUNT)
528		netdev->stats.rx_frame_errors++;
529}
530
531static void ks8842_update_rx_counters(struct net_device *netdev, u32 status,
532	int len)
533{
534	netdev_dbg(netdev, "RX packet, len: %d\n", len);
535
536	netdev->stats.rx_packets++;
537	netdev->stats.rx_bytes += len;
538	if (status & RXSR_MULTICAST)
539		netdev->stats.multicast++;
540}
541
542static int __ks8842_start_new_rx_dma(struct net_device *netdev)
543{
544	struct ks8842_adapter *adapter = netdev_priv(netdev);
545	struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx;
546	struct scatterlist *sg = &ctl->sg;
547	int err;
548
549	ctl->skb = netdev_alloc_skb(netdev, DMA_BUFFER_SIZE);
550	if (ctl->skb) {
551		sg_init_table(sg, 1);
552		sg_dma_address(sg) = dma_map_single(adapter->dev,
553			ctl->skb->data, DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
554		if (dma_mapping_error(adapter->dev, sg_dma_address(sg))) {
555			err = -ENOMEM;
556			sg_dma_address(sg) = 0;
557			goto out;
558		}
559
560		sg_dma_len(sg) = DMA_BUFFER_SIZE;
561
562		ctl->adesc = dmaengine_prep_slave_sg(ctl->chan,
563			sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
564
565		if (!ctl->adesc) {
566			err = -ENOMEM;
567			goto out;
568		}
569
570		ctl->adesc->callback_param = netdev;
571		ctl->adesc->callback = ks8842_dma_rx_cb;
572		ctl->adesc->tx_submit(ctl->adesc);
573	} else {
574		err = -ENOMEM;
575		sg_dma_address(sg) = 0;
576		goto out;
577	}
578
579	return 0;
580out:
581	if (sg_dma_address(sg))
582		dma_unmap_single(adapter->dev, sg_dma_address(sg),
583			DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
584	sg_dma_address(sg) = 0;
585	dev_kfree_skb(ctl->skb);
586	ctl->skb = NULL;
587
588	printk(KERN_ERR DRV_NAME": Failed to start RX DMA: %d\n", err);
589	return err;
590}
591
592static void ks8842_rx_frame_dma_tasklet(struct tasklet_struct *t)
593{
594	struct ks8842_adapter *adapter = from_tasklet(adapter, t, dma_rx.tasklet);
595	struct net_device *netdev = adapter->netdev;
596	struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx;
597	struct sk_buff *skb = ctl->skb;
598	dma_addr_t addr = sg_dma_address(&ctl->sg);
599	u32 status;
600
601	ctl->adesc = NULL;
602
603	/* kick next transfer going */
604	__ks8842_start_new_rx_dma(netdev);
605
606	/* now handle the data we got */
607	dma_unmap_single(adapter->dev, addr, DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
608
609	status = *((u32 *)skb->data);
610
611	netdev_dbg(netdev, "%s - rx_data: status: %x\n",
612		__func__, status & 0xffff);
613
614	/* check the status */
615	if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
616		int len = (status >> 16) & 0x7ff;
617
618		ks8842_update_rx_counters(netdev, status, len);
619
620		/* reserve 4 bytes which is the status word */
621		skb_reserve(skb, 4);
622		skb_put(skb, len);
623
624		skb->protocol = eth_type_trans(skb, netdev);
625		netif_rx(skb);
626	} else {
627		ks8842_update_rx_err_counters(netdev, status);
628		dev_kfree_skb(skb);
629	}
630}
631
632static void ks8842_rx_frame(struct net_device *netdev,
633	struct ks8842_adapter *adapter)
634{
635	u32 status;
636	int len;
637
638	if (adapter->conf_flags & KS884X_16BIT) {
639		status = ks8842_read16(adapter, 17, REG_QMU_DATA_LO);
640		len = ks8842_read16(adapter, 17, REG_QMU_DATA_HI);
641		netdev_dbg(netdev, "%s - rx_data: status: %x\n",
642			   __func__, status);
643	} else {
644		status = ks8842_read32(adapter, 17, REG_QMU_DATA_LO);
645		len = (status >> 16) & 0x7ff;
646		status &= 0xffff;
647		netdev_dbg(netdev, "%s - rx_data: status: %x\n",
648			   __func__, status);
649	}
650
651	/* check the status */
652	if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
653		struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len + 3);
654
655		if (skb) {
656
657			ks8842_update_rx_counters(netdev, status, len);
658
659			if (adapter->conf_flags & KS884X_16BIT) {
660				u16 *data16 = skb_put(skb, len);
661				ks8842_select_bank(adapter, 17);
662				while (len > 0) {
663					*data16++ = ioread16(adapter->hw_addr +
664						REG_QMU_DATA_LO);
665					*data16++ = ioread16(adapter->hw_addr +
666						REG_QMU_DATA_HI);
667					len -= sizeof(u32);
668				}
669			} else {
670				u32 *data = skb_put(skb, len);
671
672				ks8842_select_bank(adapter, 17);
673				while (len > 0) {
674					*data++ = ioread32(adapter->hw_addr +
675						REG_QMU_DATA_LO);
676					len -= sizeof(u32);
677				}
678			}
679			skb->protocol = eth_type_trans(skb, netdev);
680			netif_rx(skb);
681		} else
682			netdev->stats.rx_dropped++;
683	} else
684		ks8842_update_rx_err_counters(netdev, status);
685
686	/* set high watermark to 3K */
687	ks8842_clear_bits(adapter, 0, 1 << 12, REG_QRFCR);
688
689	/* release the frame */
690	ks8842_write16(adapter, 17, 0x01, REG_RXQCR);
691
692	/* set high watermark to 2K */
693	ks8842_enable_bits(adapter, 0, 1 << 12, REG_QRFCR);
694}
695
696static void ks8842_handle_rx(struct net_device *netdev,
697	struct ks8842_adapter *adapter)
698{
699	u16 rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff;
700	netdev_dbg(netdev, "%s Entry - rx_data: %d\n", __func__, rx_data);
701	while (rx_data) {
702		ks8842_rx_frame(netdev, adapter);
703		rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff;
704	}
705}
706
707static void ks8842_handle_tx(struct net_device *netdev,
708	struct ks8842_adapter *adapter)
709{
710	u16 sr = ks8842_read16(adapter, 16, REG_TXSR);
711	netdev_dbg(netdev, "%s - entry, sr: %x\n", __func__, sr);
712	netdev->stats.tx_packets++;
713	if (netif_queue_stopped(netdev))
714		netif_wake_queue(netdev);
715}
716
717static void ks8842_handle_rx_overrun(struct net_device *netdev,
718	struct ks8842_adapter *adapter)
719{
720	netdev_dbg(netdev, "%s: entry\n", __func__);
721	netdev->stats.rx_errors++;
722	netdev->stats.rx_fifo_errors++;
723}
724
725static void ks8842_tasklet(struct tasklet_struct *t)
726{
727	struct ks8842_adapter *adapter = from_tasklet(adapter, t, tasklet);
728	struct net_device *netdev = adapter->netdev;
729	u16 isr;
730	unsigned long flags;
731	u16 entry_bank;
732
733	/* read current bank to be able to set it back */
734	spin_lock_irqsave(&adapter->lock, flags);
735	entry_bank = ioread16(adapter->hw_addr + REG_SELECT_BANK);
736	spin_unlock_irqrestore(&adapter->lock, flags);
737
738	isr = ks8842_read16(adapter, 18, REG_ISR);
739	netdev_dbg(netdev, "%s - ISR: 0x%x\n", __func__, isr);
740
741	/* when running in DMA mode, do not ack RX interrupts, it is handled
742	   internally by timberdale, otherwise it's DMA FIFO:s would stop
743	*/
744	if (KS8842_USE_DMA(adapter))
745		isr &= ~IRQ_RX;
746
747	/* Ack */
748	ks8842_write16(adapter, 18, isr, REG_ISR);
749
750	if (!(adapter->conf_flags & MICREL_KS884X))
751		/* Ack in the timberdale IP as well */
752		iowrite32(0x1, adapter->hw_addr + REG_TIMB_IAR);
753
754	if (!netif_running(netdev))
755		return;
756
757	if (isr & IRQ_LINK_CHANGE)
758		ks8842_update_link_status(netdev, adapter);
759
760	/* should not get IRQ_RX when running DMA mode */
761	if (isr & (IRQ_RX | IRQ_RX_ERROR) && !KS8842_USE_DMA(adapter))
762		ks8842_handle_rx(netdev, adapter);
763
764	/* should only happen when in PIO mode */
765	if (isr & IRQ_TX)
766		ks8842_handle_tx(netdev, adapter);
767
768	if (isr & IRQ_RX_OVERRUN)
769		ks8842_handle_rx_overrun(netdev, adapter);
770
771	if (isr & IRQ_TX_STOPPED) {
772		ks8842_disable_tx(adapter);
773		ks8842_enable_tx(adapter);
774	}
775
776	if (isr & IRQ_RX_STOPPED) {
777		ks8842_disable_rx(adapter);
778		ks8842_enable_rx(adapter);
779	}
780
781	/* re-enable interrupts, put back the bank selection register */
782	spin_lock_irqsave(&adapter->lock, flags);
783	if (KS8842_USE_DMA(adapter))
784		ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER);
785	else
786		ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
787	iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK);
788
789	/* Make sure timberdale continues DMA operations, they are stopped while
790	   we are handling the ks8842 because we might change bank */
791	if (KS8842_USE_DMA(adapter))
792		ks8842_resume_dma(adapter);
793
794	spin_unlock_irqrestore(&adapter->lock, flags);
795}
796
797static irqreturn_t ks8842_irq(int irq, void *devid)
798{
799	struct net_device *netdev = devid;
800	struct ks8842_adapter *adapter = netdev_priv(netdev);
801	u16 isr;
802	u16 entry_bank = ioread16(adapter->hw_addr + REG_SELECT_BANK);
803	irqreturn_t ret = IRQ_NONE;
804
805	isr = ks8842_read16(adapter, 18, REG_ISR);
806	netdev_dbg(netdev, "%s - ISR: 0x%x\n", __func__, isr);
807
808	if (isr) {
809		if (KS8842_USE_DMA(adapter))
810			/* disable all but RX IRQ, since the FPGA relies on it*/
811			ks8842_write16(adapter, 18, IRQ_RX, REG_IER);
812		else
813			/* disable IRQ */
814			ks8842_write16(adapter, 18, 0x00, REG_IER);
815
816		/* schedule tasklet */
817		tasklet_schedule(&adapter->tasklet);
818
819		ret = IRQ_HANDLED;
820	}
821
822	iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK);
823
824	/* After an interrupt, tell timberdale to continue DMA operations.
825	   DMA is disabled while we are handling the ks8842 because we might
826	   change bank */
827	ks8842_resume_dma(adapter);
828
829	return ret;
830}
831
832static void ks8842_dma_rx_cb(void *data)
833{
834	struct net_device	*netdev = data;
835	struct ks8842_adapter	*adapter = netdev_priv(netdev);
836
837	netdev_dbg(netdev, "RX DMA finished\n");
838	/* schedule tasklet */
839	if (adapter->dma_rx.adesc)
840		tasklet_schedule(&adapter->dma_rx.tasklet);
841}
842
843static void ks8842_dma_tx_cb(void *data)
844{
845	struct net_device		*netdev = data;
846	struct ks8842_adapter		*adapter = netdev_priv(netdev);
847	struct ks8842_tx_dma_ctl	*ctl = &adapter->dma_tx;
848
849	netdev_dbg(netdev, "TX DMA finished\n");
850
851	if (!ctl->adesc)
852		return;
853
854	netdev->stats.tx_packets++;
855	ctl->adesc = NULL;
856
857	if (netif_queue_stopped(netdev))
858		netif_wake_queue(netdev);
859}
860
861static void ks8842_stop_dma(struct ks8842_adapter *adapter)
862{
863	struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx;
864	struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
865
866	tx_ctl->adesc = NULL;
867	if (tx_ctl->chan)
868		dmaengine_terminate_all(tx_ctl->chan);
869
870	rx_ctl->adesc = NULL;
871	if (rx_ctl->chan)
872		dmaengine_terminate_all(rx_ctl->chan);
873
874	if (sg_dma_address(&rx_ctl->sg))
875		dma_unmap_single(adapter->dev, sg_dma_address(&rx_ctl->sg),
876			DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
877	sg_dma_address(&rx_ctl->sg) = 0;
878
879	dev_kfree_skb(rx_ctl->skb);
880	rx_ctl->skb = NULL;
881}
882
883static void ks8842_dealloc_dma_bufs(struct ks8842_adapter *adapter)
884{
885	struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx;
886	struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
887
888	ks8842_stop_dma(adapter);
889
890	if (tx_ctl->chan)
891		dma_release_channel(tx_ctl->chan);
892	tx_ctl->chan = NULL;
893
894	if (rx_ctl->chan)
895		dma_release_channel(rx_ctl->chan);
896	rx_ctl->chan = NULL;
897
898	tasklet_kill(&rx_ctl->tasklet);
899
900	if (sg_dma_address(&tx_ctl->sg))
901		dma_unmap_single(adapter->dev, sg_dma_address(&tx_ctl->sg),
902			DMA_BUFFER_SIZE, DMA_TO_DEVICE);
903	sg_dma_address(&tx_ctl->sg) = 0;
904
905	kfree(tx_ctl->buf);
906	tx_ctl->buf = NULL;
907}
908
909static bool ks8842_dma_filter_fn(struct dma_chan *chan, void *filter_param)
910{
911	return chan->chan_id == (long)filter_param;
912}
913
914static int ks8842_alloc_dma_bufs(struct net_device *netdev)
915{
916	struct ks8842_adapter *adapter = netdev_priv(netdev);
917	struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx;
918	struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
919	int err;
920
921	dma_cap_mask_t mask;
922
923	dma_cap_zero(mask);
924	dma_cap_set(DMA_SLAVE, mask);
925	dma_cap_set(DMA_PRIVATE, mask);
926
927	sg_init_table(&tx_ctl->sg, 1);
928
929	tx_ctl->chan = dma_request_channel(mask, ks8842_dma_filter_fn,
930					   (void *)(long)tx_ctl->channel);
931	if (!tx_ctl->chan) {
932		err = -ENODEV;
933		goto err;
934	}
935
936	/* allocate DMA buffer */
937	tx_ctl->buf = kmalloc(DMA_BUFFER_SIZE, GFP_KERNEL);
938	if (!tx_ctl->buf) {
939		err = -ENOMEM;
940		goto err;
941	}
942
943	sg_dma_address(&tx_ctl->sg) = dma_map_single(adapter->dev,
944		tx_ctl->buf, DMA_BUFFER_SIZE, DMA_TO_DEVICE);
945	if (dma_mapping_error(adapter->dev, sg_dma_address(&tx_ctl->sg))) {
946		err = -ENOMEM;
947		sg_dma_address(&tx_ctl->sg) = 0;
948		goto err;
949	}
950
951	rx_ctl->chan = dma_request_channel(mask, ks8842_dma_filter_fn,
952					   (void *)(long)rx_ctl->channel);
953	if (!rx_ctl->chan) {
954		err = -ENODEV;
955		goto err;
956	}
957
958	tasklet_setup(&rx_ctl->tasklet, ks8842_rx_frame_dma_tasklet);
959
960	return 0;
961err:
962	ks8842_dealloc_dma_bufs(adapter);
963	return err;
964}
965
966/* Netdevice operations */
967
968static int ks8842_open(struct net_device *netdev)
969{
970	struct ks8842_adapter *adapter = netdev_priv(netdev);
971	int err;
972
973	netdev_dbg(netdev, "%s - entry\n", __func__);
974
975	if (KS8842_USE_DMA(adapter)) {
976		err = ks8842_alloc_dma_bufs(netdev);
977
978		if (!err) {
979			/* start RX dma */
980			err = __ks8842_start_new_rx_dma(netdev);
981			if (err)
982				ks8842_dealloc_dma_bufs(adapter);
983		}
984
985		if (err) {
986			printk(KERN_WARNING DRV_NAME
987				": Failed to initiate DMA, running PIO\n");
988			ks8842_dealloc_dma_bufs(adapter);
989			adapter->dma_rx.channel = -1;
990			adapter->dma_tx.channel = -1;
991		}
992	}
993
994	/* reset the HW */
995	ks8842_reset_hw(adapter);
996
997	ks8842_write_mac_addr(adapter, netdev->dev_addr);
998
999	ks8842_update_link_status(netdev, adapter);
1000
1001	err = request_irq(adapter->irq, ks8842_irq, IRQF_SHARED, DRV_NAME,
1002		netdev);
1003	if (err) {
1004		pr_err("Failed to request IRQ: %d: %d\n", adapter->irq, err);
1005		return err;
1006	}
1007
1008	return 0;
1009}
1010
1011static int ks8842_close(struct net_device *netdev)
1012{
1013	struct ks8842_adapter *adapter = netdev_priv(netdev);
1014
1015	netdev_dbg(netdev, "%s - entry\n", __func__);
1016
1017	cancel_work_sync(&adapter->timeout_work);
1018
1019	if (KS8842_USE_DMA(adapter))
1020		ks8842_dealloc_dma_bufs(adapter);
1021
1022	/* free the irq */
1023	free_irq(adapter->irq, netdev);
1024
1025	/* disable the switch */
1026	ks8842_write16(adapter, 32, 0x0, REG_SW_ID_AND_ENABLE);
1027
1028	return 0;
1029}
1030
1031static netdev_tx_t ks8842_xmit_frame(struct sk_buff *skb,
1032				     struct net_device *netdev)
1033{
1034	int ret;
1035	struct ks8842_adapter *adapter = netdev_priv(netdev);
1036
1037	netdev_dbg(netdev, "%s: entry\n", __func__);
1038
1039	if (KS8842_USE_DMA(adapter)) {
1040		unsigned long flags;
1041		ret = ks8842_tx_frame_dma(skb, netdev);
1042		/* for now only allow one transfer at the time */
1043		spin_lock_irqsave(&adapter->lock, flags);
1044		if (adapter->dma_tx.adesc)
1045			netif_stop_queue(netdev);
1046		spin_unlock_irqrestore(&adapter->lock, flags);
1047		return ret;
1048	}
1049
1050	ret = ks8842_tx_frame(skb, netdev);
1051
1052	if (ks8842_tx_fifo_space(adapter) <  netdev->mtu + 8)
1053		netif_stop_queue(netdev);
1054
1055	return ret;
1056}
1057
1058static int ks8842_set_mac(struct net_device *netdev, void *p)
1059{
1060	struct ks8842_adapter *adapter = netdev_priv(netdev);
1061	struct sockaddr *addr = p;
1062	char *mac = (u8 *)addr->sa_data;
1063
1064	netdev_dbg(netdev, "%s: entry\n", __func__);
1065
1066	if (!is_valid_ether_addr(addr->sa_data))
1067		return -EADDRNOTAVAIL;
1068
1069	eth_hw_addr_set(netdev, mac);
1070
1071	ks8842_write_mac_addr(adapter, mac);
1072	return 0;
1073}
1074
1075static void ks8842_tx_timeout_work(struct work_struct *work)
1076{
1077	struct ks8842_adapter *adapter =
1078		container_of(work, struct ks8842_adapter, timeout_work);
1079	struct net_device *netdev = adapter->netdev;
1080	unsigned long flags;
1081
1082	netdev_dbg(netdev, "%s: entry\n", __func__);
1083
1084	spin_lock_irqsave(&adapter->lock, flags);
1085
1086	if (KS8842_USE_DMA(adapter))
1087		ks8842_stop_dma(adapter);
1088
1089	/* disable interrupts */
1090	ks8842_write16(adapter, 18, 0, REG_IER);
1091	ks8842_write16(adapter, 18, 0xFFFF, REG_ISR);
1092
1093	netif_stop_queue(netdev);
1094
1095	spin_unlock_irqrestore(&adapter->lock, flags);
1096
1097	ks8842_reset_hw(adapter);
1098
1099	ks8842_write_mac_addr(adapter, netdev->dev_addr);
1100
1101	ks8842_update_link_status(netdev, adapter);
1102
1103	if (KS8842_USE_DMA(adapter))
1104		__ks8842_start_new_rx_dma(netdev);
1105}
1106
1107static void ks8842_tx_timeout(struct net_device *netdev, unsigned int txqueue)
1108{
1109	struct ks8842_adapter *adapter = netdev_priv(netdev);
1110
1111	netdev_dbg(netdev, "%s: entry\n", __func__);
1112
1113	schedule_work(&adapter->timeout_work);
1114}
1115
1116static const struct net_device_ops ks8842_netdev_ops = {
1117	.ndo_open		= ks8842_open,
1118	.ndo_stop		= ks8842_close,
1119	.ndo_start_xmit		= ks8842_xmit_frame,
1120	.ndo_set_mac_address	= ks8842_set_mac,
1121	.ndo_tx_timeout 	= ks8842_tx_timeout,
1122	.ndo_validate_addr	= eth_validate_addr
1123};
1124
1125static const struct ethtool_ops ks8842_ethtool_ops = {
1126	.get_link		= ethtool_op_get_link,
1127};
1128
1129static int ks8842_probe(struct platform_device *pdev)
1130{
1131	int err = -ENOMEM;
1132	struct resource *iomem;
1133	struct net_device *netdev;
1134	struct ks8842_adapter *adapter;
1135	struct ks8842_platform_data *pdata = dev_get_platdata(&pdev->dev);
1136	u16 id;
1137	unsigned i;
1138
1139	iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1140	if (!iomem) {
1141		dev_err(&pdev->dev, "Invalid resource\n");
1142		return -EINVAL;
1143	}
1144	if (!request_mem_region(iomem->start, resource_size(iomem), DRV_NAME))
1145		goto err_mem_region;
1146
1147	netdev = alloc_etherdev(sizeof(struct ks8842_adapter));
1148	if (!netdev)
1149		goto err_alloc_etherdev;
1150
1151	SET_NETDEV_DEV(netdev, &pdev->dev);
1152
1153	adapter = netdev_priv(netdev);
1154	adapter->netdev = netdev;
1155	INIT_WORK(&adapter->timeout_work, ks8842_tx_timeout_work);
1156	adapter->hw_addr = ioremap(iomem->start, resource_size(iomem));
1157	adapter->conf_flags = iomem->flags;
1158
1159	if (!adapter->hw_addr)
1160		goto err_ioremap;
1161
1162	adapter->irq = platform_get_irq(pdev, 0);
1163	if (adapter->irq < 0) {
1164		err = adapter->irq;
1165		goto err_get_irq;
1166	}
1167
1168	adapter->dev = (pdev->dev.parent) ? pdev->dev.parent : &pdev->dev;
1169
1170	/* DMA is only supported when accessed via timberdale */
1171	if (!(adapter->conf_flags & MICREL_KS884X) && pdata &&
1172		(pdata->tx_dma_channel != -1) &&
1173		(pdata->rx_dma_channel != -1)) {
1174		adapter->dma_rx.channel = pdata->rx_dma_channel;
1175		adapter->dma_tx.channel = pdata->tx_dma_channel;
1176	} else {
1177		adapter->dma_rx.channel = -1;
1178		adapter->dma_tx.channel = -1;
1179	}
1180
1181	tasklet_setup(&adapter->tasklet, ks8842_tasklet);
1182	spin_lock_init(&adapter->lock);
1183
1184	netdev->netdev_ops = &ks8842_netdev_ops;
1185	netdev->ethtool_ops = &ks8842_ethtool_ops;
1186
1187	/* Check if a mac address was given */
1188	i = netdev->addr_len;
1189	if (pdata) {
1190		for (i = 0; i < netdev->addr_len; i++)
1191			if (pdata->macaddr[i] != 0)
1192				break;
1193
1194		if (i < netdev->addr_len)
1195			/* an address was passed, use it */
1196			eth_hw_addr_set(netdev, pdata->macaddr);
1197	}
1198
1199	if (i == netdev->addr_len) {
1200		ks8842_init_mac_addr(adapter);
1201
1202		if (!is_valid_ether_addr(netdev->dev_addr))
1203			eth_hw_addr_random(netdev);
1204	}
1205
1206	id = ks8842_read16(adapter, 32, REG_SW_ID_AND_ENABLE);
1207
1208	strcpy(netdev->name, "eth%d");
1209	err = register_netdev(netdev);
1210	if (err)
1211		goto err_register;
1212
1213	platform_set_drvdata(pdev, netdev);
1214
1215	pr_info("Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
1216		(id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
1217
1218	return 0;
1219
1220err_register:
1221err_get_irq:
1222	iounmap(adapter->hw_addr);
1223err_ioremap:
1224	free_netdev(netdev);
1225err_alloc_etherdev:
1226	release_mem_region(iomem->start, resource_size(iomem));
1227err_mem_region:
1228	return err;
1229}
1230
1231static int ks8842_remove(struct platform_device *pdev)
1232{
1233	struct net_device *netdev = platform_get_drvdata(pdev);
1234	struct ks8842_adapter *adapter = netdev_priv(netdev);
1235	struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1236
1237	unregister_netdev(netdev);
1238	tasklet_kill(&adapter->tasklet);
1239	iounmap(adapter->hw_addr);
1240	free_netdev(netdev);
1241	release_mem_region(iomem->start, resource_size(iomem));
1242	return 0;
1243}
1244
1245
1246static struct platform_driver ks8842_platform_driver = {
1247	.driver = {
1248		.name	= DRV_NAME,
1249	},
1250	.probe		= ks8842_probe,
1251	.remove		= ks8842_remove,
1252};
1253
1254module_platform_driver(ks8842_platform_driver);
1255
1256MODULE_DESCRIPTION("Timberdale KS8842 ethernet driver");
1257MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
1258MODULE_LICENSE("GPL v2");
1259MODULE_ALIAS("platform:ks8842");
1260
1261