1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 *	Driver for the Macintosh 68K onboard MACE controller with PSC
4 *	driven DMA. The MACE driver code is derived from mace.c. The
5 *	Mac68k theory of operation is courtesy of the MacBSD wizards.
6 *
7 *	Copyright (C) 1996 Paul Mackerras.
8 *	Copyright (C) 1998 Alan Cox <alan@lxorguk.ukuu.org.uk>
9 *
10 *	Modified heavily by Joshua M. Thompson based on Dave Huang's NetBSD driver
11 *
12 *	Copyright (C) 2007 Finn Thain
13 *
14 *	Converted to DMA API, converted to unified driver model,
15 *	sync'd some routines with mace.c and fixed various bugs.
16 */
17
18
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/netdevice.h>
22#include <linux/etherdevice.h>
23#include <linux/delay.h>
24#include <linux/string.h>
25#include <linux/crc32.h>
26#include <linux/bitrev.h>
27#include <linux/dma-mapping.h>
28#include <linux/platform_device.h>
29#include <linux/gfp.h>
30#include <linux/interrupt.h>
31#include <asm/io.h>
32#include <asm/macints.h>
33#include <asm/mac_psc.h>
34#include <asm/page.h>
35#include "mace.h"
36
37static char mac_mace_string[] = "macmace";
38
39#define N_TX_BUFF_ORDER	0
40#define N_TX_RING	(1 << N_TX_BUFF_ORDER)
41#define N_RX_BUFF_ORDER	3
42#define N_RX_RING	(1 << N_RX_BUFF_ORDER)
43
44#define TX_TIMEOUT	HZ
45
46#define MACE_BUFF_SIZE	0x800
47
48/* Chip rev needs workaround on HW & multicast addr change */
49#define BROKEN_ADDRCHG_REV	0x0941
50
51/* The MACE is simply wired down on a Mac68K box */
52
53#define MACE_BASE	(void *)(0x50F1C000)
54#define MACE_PROM	(void *)(0x50F08001)
55
56struct mace_data {
57	volatile struct mace *mace;
58	unsigned char *tx_ring;
59	dma_addr_t tx_ring_phys;
60	unsigned char *rx_ring;
61	dma_addr_t rx_ring_phys;
62	int dma_intr;
63	int rx_slot, rx_tail;
64	int tx_slot, tx_sloti, tx_count;
65	int chipid;
66	struct device *device;
67};
68
69struct mace_frame {
70	u8	rcvcnt;
71	u8	pad1;
72	u8	rcvsts;
73	u8	pad2;
74	u8	rntpc;
75	u8	pad3;
76	u8	rcvcc;
77	u8	pad4;
78	u32	pad5;
79	u32	pad6;
80	u8	data[1];
81	/* And frame continues.. */
82};
83
84#define PRIV_BYTES	sizeof(struct mace_data)
85
86static int mace_open(struct net_device *dev);
87static int mace_close(struct net_device *dev);
88static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
89static void mace_set_multicast(struct net_device *dev);
90static int mace_set_address(struct net_device *dev, void *addr);
91static void mace_reset(struct net_device *dev);
92static irqreturn_t mace_interrupt(int irq, void *dev_id);
93static irqreturn_t mace_dma_intr(int irq, void *dev_id);
94static void mace_tx_timeout(struct net_device *dev, unsigned int txqueue);
95static void __mace_set_address(struct net_device *dev, void *addr);
96
97/*
98 * Load a receive DMA channel with a base address and ring length
99 */
100
101static void mace_load_rxdma_base(struct net_device *dev, int set)
102{
103	struct mace_data *mp = netdev_priv(dev);
104
105	psc_write_word(PSC_ENETRD_CMD + set, 0x0100);
106	psc_write_long(PSC_ENETRD_ADDR + set, (u32) mp->rx_ring_phys);
107	psc_write_long(PSC_ENETRD_LEN + set, N_RX_RING);
108	psc_write_word(PSC_ENETRD_CMD + set, 0x9800);
109	mp->rx_tail = 0;
110}
111
112/*
113 * Reset the receive DMA subsystem
114 */
115
116static void mace_rxdma_reset(struct net_device *dev)
117{
118	struct mace_data *mp = netdev_priv(dev);
119	volatile struct mace *mace = mp->mace;
120	u8 maccc = mace->maccc;
121
122	mace->maccc = maccc & ~ENRCV;
123
124	psc_write_word(PSC_ENETRD_CTL, 0x8800);
125	mace_load_rxdma_base(dev, 0x00);
126	psc_write_word(PSC_ENETRD_CTL, 0x0400);
127
128	psc_write_word(PSC_ENETRD_CTL, 0x8800);
129	mace_load_rxdma_base(dev, 0x10);
130	psc_write_word(PSC_ENETRD_CTL, 0x0400);
131
132	mace->maccc = maccc;
133	mp->rx_slot = 0;
134
135	psc_write_word(PSC_ENETRD_CMD + PSC_SET0, 0x9800);
136	psc_write_word(PSC_ENETRD_CMD + PSC_SET1, 0x9800);
137}
138
139/*
140 * Reset the transmit DMA subsystem
141 */
142
143static void mace_txdma_reset(struct net_device *dev)
144{
145	struct mace_data *mp = netdev_priv(dev);
146	volatile struct mace *mace = mp->mace;
147	u8 maccc;
148
149	psc_write_word(PSC_ENETWR_CTL, 0x8800);
150
151	maccc = mace->maccc;
152	mace->maccc = maccc & ~ENXMT;
153
154	mp->tx_slot = mp->tx_sloti = 0;
155	mp->tx_count = N_TX_RING;
156
157	psc_write_word(PSC_ENETWR_CTL, 0x0400);
158	mace->maccc = maccc;
159}
160
161/*
162 * Disable DMA
163 */
164
165static void mace_dma_off(struct net_device *dev)
166{
167	psc_write_word(PSC_ENETRD_CTL, 0x8800);
168	psc_write_word(PSC_ENETRD_CTL, 0x1000);
169	psc_write_word(PSC_ENETRD_CMD + PSC_SET0, 0x1100);
170	psc_write_word(PSC_ENETRD_CMD + PSC_SET1, 0x1100);
171
172	psc_write_word(PSC_ENETWR_CTL, 0x8800);
173	psc_write_word(PSC_ENETWR_CTL, 0x1000);
174	psc_write_word(PSC_ENETWR_CMD + PSC_SET0, 0x1100);
175	psc_write_word(PSC_ENETWR_CMD + PSC_SET1, 0x1100);
176}
177
178static const struct net_device_ops mace_netdev_ops = {
179	.ndo_open		= mace_open,
180	.ndo_stop		= mace_close,
181	.ndo_start_xmit		= mace_xmit_start,
182	.ndo_tx_timeout		= mace_tx_timeout,
183	.ndo_set_rx_mode	= mace_set_multicast,
184	.ndo_set_mac_address	= mace_set_address,
185	.ndo_validate_addr	= eth_validate_addr,
186};
187
188/*
189 * Not really much of a probe. The hardware table tells us if this
190 * model of Macintrash has a MACE (AV macintoshes)
191 */
192
193static int mace_probe(struct platform_device *pdev)
194{
195	int j;
196	struct mace_data *mp;
197	unsigned char *addr;
198	struct net_device *dev;
199	unsigned char checksum = 0;
200	int err;
201
202	dev = alloc_etherdev(PRIV_BYTES);
203	if (!dev)
204		return -ENOMEM;
205
206	mp = netdev_priv(dev);
207
208	mp->device = &pdev->dev;
209	platform_set_drvdata(pdev, dev);
210	SET_NETDEV_DEV(dev, &pdev->dev);
211
212	dev->base_addr = (u32)MACE_BASE;
213	mp->mace = MACE_BASE;
214
215	dev->irq = IRQ_MAC_MACE;
216	mp->dma_intr = IRQ_MAC_MACE_DMA;
217
218	mp->chipid = mp->mace->chipid_hi << 8 | mp->mace->chipid_lo;
219
220	/*
221	 * The PROM contains 8 bytes which total 0xFF when XOR'd
222	 * together. Due to the usual peculiar apple brain damage
223	 * the bytes are spaced out in a strange boundary and the
224	 * bits are reversed.
225	 */
226
227	addr = MACE_PROM;
228
229	for (j = 0; j < 6; ++j) {
230		u8 v = bitrev8(addr[j<<4]);
231		checksum ^= v;
232		dev->dev_addr[j] = v;
233	}
234	for (; j < 8; ++j) {
235		checksum ^= bitrev8(addr[j<<4]);
236	}
237
238	if (checksum != 0xFF) {
239		free_netdev(dev);
240		return -ENODEV;
241	}
242
243	dev->netdev_ops		= &mace_netdev_ops;
244	dev->watchdog_timeo	= TX_TIMEOUT;
245
246	pr_info("Onboard MACE, hardware address %pM, chip revision 0x%04X\n",
247		dev->dev_addr, mp->chipid);
248
249	err = register_netdev(dev);
250	if (!err)
251		return 0;
252
253	free_netdev(dev);
254	return err;
255}
256
257/*
258 * Reset the chip.
259 */
260
261static void mace_reset(struct net_device *dev)
262{
263	struct mace_data *mp = netdev_priv(dev);
264	volatile struct mace *mb = mp->mace;
265	int i;
266
267	/* soft-reset the chip */
268	i = 200;
269	while (--i) {
270		mb->biucc = SWRST;
271		if (mb->biucc & SWRST) {
272			udelay(10);
273			continue;
274		}
275		break;
276	}
277	if (!i) {
278		printk(KERN_ERR "macmace: cannot reset chip!\n");
279		return;
280	}
281
282	mb->maccc = 0;	/* turn off tx, rx */
283	mb->imr = 0xFF;	/* disable all intrs for now */
284	i = mb->ir;
285
286	mb->biucc = XMTSP_64;
287	mb->utr = RTRD;
288	mb->fifocc = XMTFW_8 | RCVFW_64 | XMTFWU | RCVFWU;
289
290	mb->xmtfc = AUTO_PAD_XMIT; /* auto-pad short frames */
291	mb->rcvfc = 0;
292
293	/* load up the hardware address */
294	__mace_set_address(dev, dev->dev_addr);
295
296	/* clear the multicast filter */
297	if (mp->chipid == BROKEN_ADDRCHG_REV)
298		mb->iac = LOGADDR;
299	else {
300		mb->iac = ADDRCHG | LOGADDR;
301		while ((mb->iac & ADDRCHG) != 0)
302			;
303	}
304	for (i = 0; i < 8; ++i)
305		mb->ladrf = 0;
306
307	/* done changing address */
308	if (mp->chipid != BROKEN_ADDRCHG_REV)
309		mb->iac = 0;
310
311	mb->plscc = PORTSEL_AUI;
312}
313
314/*
315 * Load the address on a mace controller.
316 */
317
318static void __mace_set_address(struct net_device *dev, void *addr)
319{
320	struct mace_data *mp = netdev_priv(dev);
321	volatile struct mace *mb = mp->mace;
322	unsigned char *p = addr;
323	int i;
324
325	/* load up the hardware address */
326	if (mp->chipid == BROKEN_ADDRCHG_REV)
327		mb->iac = PHYADDR;
328	else {
329		mb->iac = ADDRCHG | PHYADDR;
330		while ((mb->iac & ADDRCHG) != 0)
331			;
332	}
333	for (i = 0; i < 6; ++i)
334		mb->padr = dev->dev_addr[i] = p[i];
335	if (mp->chipid != BROKEN_ADDRCHG_REV)
336		mb->iac = 0;
337}
338
339static int mace_set_address(struct net_device *dev, void *addr)
340{
341	struct mace_data *mp = netdev_priv(dev);
342	volatile struct mace *mb = mp->mace;
343	unsigned long flags;
344	u8 maccc;
345
346	local_irq_save(flags);
347
348	maccc = mb->maccc;
349
350	__mace_set_address(dev, addr);
351
352	mb->maccc = maccc;
353
354	local_irq_restore(flags);
355
356	return 0;
357}
358
359/*
360 * Open the Macintosh MACE. Most of this is playing with the DMA
361 * engine. The ethernet chip is quite friendly.
362 */
363
364static int mace_open(struct net_device *dev)
365{
366	struct mace_data *mp = netdev_priv(dev);
367	volatile struct mace *mb = mp->mace;
368
369	/* reset the chip */
370	mace_reset(dev);
371
372	if (request_irq(dev->irq, mace_interrupt, 0, dev->name, dev)) {
373		printk(KERN_ERR "%s: can't get irq %d\n", dev->name, dev->irq);
374		return -EAGAIN;
375	}
376	if (request_irq(mp->dma_intr, mace_dma_intr, 0, dev->name, dev)) {
377		printk(KERN_ERR "%s: can't get irq %d\n", dev->name, mp->dma_intr);
378		free_irq(dev->irq, dev);
379		return -EAGAIN;
380	}
381
382	/* Allocate the DMA ring buffers */
383
384	mp->tx_ring = dma_alloc_coherent(mp->device,
385					 N_TX_RING * MACE_BUFF_SIZE,
386					 &mp->tx_ring_phys, GFP_KERNEL);
387	if (mp->tx_ring == NULL)
388		goto out1;
389
390	mp->rx_ring = dma_alloc_coherent(mp->device,
391					 N_RX_RING * MACE_BUFF_SIZE,
392					 &mp->rx_ring_phys, GFP_KERNEL);
393	if (mp->rx_ring == NULL)
394		goto out2;
395
396	mace_dma_off(dev);
397
398	/* Not sure what these do */
399
400	psc_write_word(PSC_ENETWR_CTL, 0x9000);
401	psc_write_word(PSC_ENETRD_CTL, 0x9000);
402	psc_write_word(PSC_ENETWR_CTL, 0x0400);
403	psc_write_word(PSC_ENETRD_CTL, 0x0400);
404
405	mace_rxdma_reset(dev);
406	mace_txdma_reset(dev);
407
408	/* turn it on! */
409	mb->maccc = ENXMT | ENRCV;
410	/* enable all interrupts except receive interrupts */
411	mb->imr = RCVINT;
412	return 0;
413
414out2:
415	dma_free_coherent(mp->device, N_TX_RING * MACE_BUFF_SIZE,
416	                  mp->tx_ring, mp->tx_ring_phys);
417out1:
418	free_irq(dev->irq, dev);
419	free_irq(mp->dma_intr, dev);
420	return -ENOMEM;
421}
422
423/*
424 * Shut down the mace and its interrupt channel
425 */
426
427static int mace_close(struct net_device *dev)
428{
429	struct mace_data *mp = netdev_priv(dev);
430	volatile struct mace *mb = mp->mace;
431
432	mb->maccc = 0;		/* disable rx and tx	 */
433	mb->imr = 0xFF;		/* disable all irqs	 */
434	mace_dma_off(dev);	/* disable rx and tx dma */
435
436	return 0;
437}
438
439/*
440 * Transmit a frame
441 */
442
443static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
444{
445	struct mace_data *mp = netdev_priv(dev);
446	unsigned long flags;
447
448	/* Stop the queue since there's only the one buffer */
449
450	local_irq_save(flags);
451	netif_stop_queue(dev);
452	if (!mp->tx_count) {
453		printk(KERN_ERR "macmace: tx queue running but no free buffers.\n");
454		local_irq_restore(flags);
455		return NETDEV_TX_BUSY;
456	}
457	mp->tx_count--;
458	local_irq_restore(flags);
459
460	dev->stats.tx_packets++;
461	dev->stats.tx_bytes += skb->len;
462
463	/* We need to copy into our xmit buffer to take care of alignment and caching issues */
464	skb_copy_from_linear_data(skb, mp->tx_ring, skb->len);
465
466	/* load the Tx DMA and fire it off */
467
468	psc_write_long(PSC_ENETWR_ADDR + mp->tx_slot, (u32)  mp->tx_ring_phys);
469	psc_write_long(PSC_ENETWR_LEN + mp->tx_slot, skb->len);
470	psc_write_word(PSC_ENETWR_CMD + mp->tx_slot, 0x9800);
471
472	mp->tx_slot ^= 0x10;
473
474	dev_kfree_skb(skb);
475
476	return NETDEV_TX_OK;
477}
478
479static void mace_set_multicast(struct net_device *dev)
480{
481	struct mace_data *mp = netdev_priv(dev);
482	volatile struct mace *mb = mp->mace;
483	int i;
484	u32 crc;
485	u8 maccc;
486	unsigned long flags;
487
488	local_irq_save(flags);
489	maccc = mb->maccc;
490	mb->maccc &= ~PROM;
491
492	if (dev->flags & IFF_PROMISC) {
493		mb->maccc |= PROM;
494	} else {
495		unsigned char multicast_filter[8];
496		struct netdev_hw_addr *ha;
497
498		if (dev->flags & IFF_ALLMULTI) {
499			for (i = 0; i < 8; i++) {
500				multicast_filter[i] = 0xFF;
501			}
502		} else {
503			for (i = 0; i < 8; i++)
504				multicast_filter[i] = 0;
505			netdev_for_each_mc_addr(ha, dev) {
506				crc = ether_crc_le(6, ha->addr);
507				/* bit number in multicast_filter */
508				i = crc >> 26;
509				multicast_filter[i >> 3] |= 1 << (i & 7);
510			}
511		}
512
513		if (mp->chipid == BROKEN_ADDRCHG_REV)
514			mb->iac = LOGADDR;
515		else {
516			mb->iac = ADDRCHG | LOGADDR;
517			while ((mb->iac & ADDRCHG) != 0)
518				;
519		}
520		for (i = 0; i < 8; ++i)
521			mb->ladrf = multicast_filter[i];
522		if (mp->chipid != BROKEN_ADDRCHG_REV)
523			mb->iac = 0;
524	}
525
526	mb->maccc = maccc;
527	local_irq_restore(flags);
528}
529
530static void mace_handle_misc_intrs(struct net_device *dev, int intr)
531{
532	struct mace_data *mp = netdev_priv(dev);
533	volatile struct mace *mb = mp->mace;
534	static int mace_babbles, mace_jabbers;
535
536	if (intr & MPCO)
537		dev->stats.rx_missed_errors += 256;
538	dev->stats.rx_missed_errors += mb->mpc;   /* reading clears it */
539	if (intr & RNTPCO)
540		dev->stats.rx_length_errors += 256;
541	dev->stats.rx_length_errors += mb->rntpc; /* reading clears it */
542	if (intr & CERR)
543		++dev->stats.tx_heartbeat_errors;
544	if (intr & BABBLE)
545		if (mace_babbles++ < 4)
546			printk(KERN_DEBUG "macmace: babbling transmitter\n");
547	if (intr & JABBER)
548		if (mace_jabbers++ < 4)
549			printk(KERN_DEBUG "macmace: jabbering transceiver\n");
550}
551
552static irqreturn_t mace_interrupt(int irq, void *dev_id)
553{
554	struct net_device *dev = (struct net_device *) dev_id;
555	struct mace_data *mp = netdev_priv(dev);
556	volatile struct mace *mb = mp->mace;
557	int intr, fs;
558	unsigned long flags;
559
560	/* don't want the dma interrupt handler to fire */
561	local_irq_save(flags);
562
563	intr = mb->ir; /* read interrupt register */
564	mace_handle_misc_intrs(dev, intr);
565
566	if (intr & XMTINT) {
567		fs = mb->xmtfs;
568		if ((fs & XMTSV) == 0) {
569			printk(KERN_ERR "macmace: xmtfs not valid! (fs=%x)\n", fs);
570			mace_reset(dev);
571			/*
572			 * XXX mace likes to hang the machine after a xmtfs error.
573			 * This is hard to reproduce, resetting *may* help
574			 */
575		}
576		/* dma should have finished */
577		if (!mp->tx_count) {
578			printk(KERN_DEBUG "macmace: tx ring ran out? (fs=%x)\n", fs);
579		}
580		/* Update stats */
581		if (fs & (UFLO|LCOL|LCAR|RTRY)) {
582			++dev->stats.tx_errors;
583			if (fs & LCAR)
584				++dev->stats.tx_carrier_errors;
585			else if (fs & (UFLO|LCOL|RTRY)) {
586				++dev->stats.tx_aborted_errors;
587				if (mb->xmtfs & UFLO) {
588					dev->stats.tx_fifo_errors++;
589					mace_txdma_reset(dev);
590				}
591			}
592		}
593	}
594
595	if (mp->tx_count)
596		netif_wake_queue(dev);
597
598	local_irq_restore(flags);
599
600	return IRQ_HANDLED;
601}
602
603static void mace_tx_timeout(struct net_device *dev, unsigned int txqueue)
604{
605	struct mace_data *mp = netdev_priv(dev);
606	volatile struct mace *mb = mp->mace;
607	unsigned long flags;
608
609	local_irq_save(flags);
610
611	/* turn off both tx and rx and reset the chip */
612	mb->maccc = 0;
613	printk(KERN_ERR "macmace: transmit timeout - resetting\n");
614	mace_txdma_reset(dev);
615	mace_reset(dev);
616
617	/* restart rx dma */
618	mace_rxdma_reset(dev);
619
620	mp->tx_count = N_TX_RING;
621	netif_wake_queue(dev);
622
623	/* turn it on! */
624	mb->maccc = ENXMT | ENRCV;
625	/* enable all interrupts except receive interrupts */
626	mb->imr = RCVINT;
627
628	local_irq_restore(flags);
629}
630
631/*
632 * Handle a newly arrived frame
633 */
634
635static void mace_dma_rx_frame(struct net_device *dev, struct mace_frame *mf)
636{
637	struct sk_buff *skb;
638	unsigned int frame_status = mf->rcvsts;
639
640	if (frame_status & (RS_OFLO | RS_CLSN | RS_FRAMERR | RS_FCSERR)) {
641		dev->stats.rx_errors++;
642		if (frame_status & RS_OFLO)
643			dev->stats.rx_fifo_errors++;
644		if (frame_status & RS_CLSN)
645			dev->stats.collisions++;
646		if (frame_status & RS_FRAMERR)
647			dev->stats.rx_frame_errors++;
648		if (frame_status & RS_FCSERR)
649			dev->stats.rx_crc_errors++;
650	} else {
651		unsigned int frame_length = mf->rcvcnt + ((frame_status & 0x0F) << 8 );
652
653		skb = netdev_alloc_skb(dev, frame_length + 2);
654		if (!skb) {
655			dev->stats.rx_dropped++;
656			return;
657		}
658		skb_reserve(skb, 2);
659		skb_put_data(skb, mf->data, frame_length);
660
661		skb->protocol = eth_type_trans(skb, dev);
662		netif_rx(skb);
663		dev->stats.rx_packets++;
664		dev->stats.rx_bytes += frame_length;
665	}
666}
667
668/*
669 * The PSC has passed us a DMA interrupt event.
670 */
671
672static irqreturn_t mace_dma_intr(int irq, void *dev_id)
673{
674	struct net_device *dev = (struct net_device *) dev_id;
675	struct mace_data *mp = netdev_priv(dev);
676	int left, head;
677	u16 status;
678	u32 baka;
679
680	/* Not sure what this does */
681
682	while ((baka = psc_read_long(PSC_MYSTERY)) != psc_read_long(PSC_MYSTERY));
683	if (!(baka & 0x60000000)) return IRQ_NONE;
684
685	/*
686	 * Process the read queue
687	 */
688
689	status = psc_read_word(PSC_ENETRD_CTL);
690
691	if (status & 0x2000) {
692		mace_rxdma_reset(dev);
693	} else if (status & 0x0100) {
694		psc_write_word(PSC_ENETRD_CMD + mp->rx_slot, 0x1100);
695
696		left = psc_read_long(PSC_ENETRD_LEN + mp->rx_slot);
697		head = N_RX_RING - left;
698
699		/* Loop through the ring buffer and process new packages */
700
701		while (mp->rx_tail < head) {
702			mace_dma_rx_frame(dev, (struct mace_frame*) (mp->rx_ring
703				+ (mp->rx_tail * MACE_BUFF_SIZE)));
704			mp->rx_tail++;
705		}
706
707		/* If we're out of buffers in this ring then switch to */
708		/* the other set, otherwise just reactivate this one.  */
709
710		if (!left) {
711			mace_load_rxdma_base(dev, mp->rx_slot);
712			mp->rx_slot ^= 0x10;
713		} else {
714			psc_write_word(PSC_ENETRD_CMD + mp->rx_slot, 0x9800);
715		}
716	}
717
718	/*
719	 * Process the write queue
720	 */
721
722	status = psc_read_word(PSC_ENETWR_CTL);
723
724	if (status & 0x2000) {
725		mace_txdma_reset(dev);
726	} else if (status & 0x0100) {
727		psc_write_word(PSC_ENETWR_CMD + mp->tx_sloti, 0x0100);
728		mp->tx_sloti ^= 0x10;
729		mp->tx_count++;
730	}
731	return IRQ_HANDLED;
732}
733
734MODULE_LICENSE("GPL");
735MODULE_DESCRIPTION("Macintosh MACE ethernet driver");
736MODULE_ALIAS("platform:macmace");
737
738static int mac_mace_device_remove(struct platform_device *pdev)
739{
740	struct net_device *dev = platform_get_drvdata(pdev);
741	struct mace_data *mp = netdev_priv(dev);
742
743	unregister_netdev(dev);
744
745	free_irq(dev->irq, dev);
746	free_irq(IRQ_MAC_MACE_DMA, dev);
747
748	dma_free_coherent(mp->device, N_RX_RING * MACE_BUFF_SIZE,
749	                  mp->rx_ring, mp->rx_ring_phys);
750	dma_free_coherent(mp->device, N_TX_RING * MACE_BUFF_SIZE,
751	                  mp->tx_ring, mp->tx_ring_phys);
752
753	free_netdev(dev);
754
755	return 0;
756}
757
758static struct platform_driver mac_mace_driver = {
759	.probe  = mace_probe,
760	.remove = mac_mace_device_remove,
761	.driver	= {
762		.name	= mac_mace_string,
763	},
764};
765
766module_platform_driver(mac_mace_driver);
767