xref: /kernel/linux/linux-5.10/drivers/net/wan/wanxl.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * wanXL serial card driver for Linux
4 * host part
5 *
6 * Copyright (C) 2003 Krzysztof Halasa <khc@pm.waw.pl>
7 *
8 * Status:
9 *   - Only DTE (external clock) support with NRZ and NRZI encodings
10 *   - wanXL100 will require minor driver modifications, no access to hw
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/slab.h>
18#include <linux/sched.h>
19#include <linux/types.h>
20#include <linux/fcntl.h>
21#include <linux/string.h>
22#include <linux/errno.h>
23#include <linux/init.h>
24#include <linux/interrupt.h>
25#include <linux/ioport.h>
26#include <linux/netdevice.h>
27#include <linux/hdlc.h>
28#include <linux/pci.h>
29#include <linux/dma-mapping.h>
30#include <linux/delay.h>
31#include <asm/io.h>
32
33#include "wanxl.h"
34
35static const char* version = "wanXL serial card driver version: 0.48";
36
37#define PLX_CTL_RESET   0x40000000 /* adapter reset */
38
39#undef DEBUG_PKT
40#undef DEBUG_PCI
41
42/* MAILBOX #1 - PUTS COMMANDS */
43#define MBX1_CMD_ABORTJ 0x85000000 /* Abort and Jump */
44#ifdef __LITTLE_ENDIAN
45#define MBX1_CMD_BSWAP  0x8C000001 /* little-endian Byte Swap Mode */
46#else
47#define MBX1_CMD_BSWAP  0x8C000000 /* big-endian Byte Swap Mode */
48#endif
49
50/* MAILBOX #2 - DRAM SIZE */
51#define MBX2_MEMSZ_MASK 0xFFFF0000 /* PUTS Memory Size Register mask */
52
53
54struct port {
55	struct net_device *dev;
56	struct card *card;
57	spinlock_t lock;	/* for wanxl_xmit */
58        int node;		/* physical port #0 - 3 */
59	unsigned int clock_type;
60	int tx_in, tx_out;
61	struct sk_buff *tx_skbs[TX_BUFFERS];
62};
63
64
65struct card_status {
66	desc_t rx_descs[RX_QUEUE_LENGTH];
67	port_status_t port_status[4];
68};
69
70
71struct card {
72	int n_ports;		/* 1, 2 or 4 ports */
73	u8 irq;
74
75	u8 __iomem *plx;	/* PLX PCI9060 virtual base address */
76	struct pci_dev *pdev;	/* for pci_name(pdev) */
77	int rx_in;
78	struct sk_buff *rx_skbs[RX_QUEUE_LENGTH];
79	struct card_status *status;	/* shared between host and card */
80	dma_addr_t status_address;
81	struct port ports[];	/* 1 - 4 port structures follow */
82};
83
84
85
86static inline struct port *dev_to_port(struct net_device *dev)
87{
88	return (struct port *)dev_to_hdlc(dev)->priv;
89}
90
91
92static inline port_status_t *get_status(struct port *port)
93{
94	return &port->card->status->port_status[port->node];
95}
96
97
98#ifdef DEBUG_PCI
99static inline dma_addr_t pci_map_single_debug(struct pci_dev *pdev, void *ptr,
100					      size_t size, int direction)
101{
102	dma_addr_t addr = dma_map_single(&pdev->dev, ptr, size, direction);
103	if (addr + size > 0x100000000LL)
104		pr_crit("%s: pci_map_single() returned memory at 0x%llx!\n",
105			pci_name(pdev), (unsigned long long)addr);
106	return addr;
107}
108
109#undef pci_map_single
110#define pci_map_single pci_map_single_debug
111#endif
112
113
114/* Cable and/or personality module change interrupt service */
115static inline void wanxl_cable_intr(struct port *port)
116{
117	u32 value = get_status(port)->cable;
118	int valid = 1;
119	const char *cable, *pm, *dte = "", *dsr = "", *dcd = "";
120
121	switch(value & 0x7) {
122	case STATUS_CABLE_V35: cable = "V.35"; break;
123	case STATUS_CABLE_X21: cable = "X.21"; break;
124	case STATUS_CABLE_V24: cable = "V.24"; break;
125	case STATUS_CABLE_EIA530: cable = "EIA530"; break;
126	case STATUS_CABLE_NONE: cable = "no"; break;
127	default: cable = "invalid";
128	}
129
130	switch((value >> STATUS_CABLE_PM_SHIFT) & 0x7) {
131	case STATUS_CABLE_V35: pm = "V.35"; break;
132	case STATUS_CABLE_X21: pm = "X.21"; break;
133	case STATUS_CABLE_V24: pm = "V.24"; break;
134	case STATUS_CABLE_EIA530: pm = "EIA530"; break;
135	case STATUS_CABLE_NONE: pm = "no personality"; valid = 0; break;
136	default: pm = "invalid personality"; valid = 0;
137	}
138
139	if (valid) {
140		if ((value & 7) == ((value >> STATUS_CABLE_PM_SHIFT) & 7)) {
141			dsr = (value & STATUS_CABLE_DSR) ? ", DSR ON" :
142				", DSR off";
143			dcd = (value & STATUS_CABLE_DCD) ? ", carrier ON" :
144				", carrier off";
145		}
146		dte = (value & STATUS_CABLE_DCE) ? " DCE" : " DTE";
147	}
148	netdev_info(port->dev, "%s%s module, %s cable%s%s\n",
149		    pm, dte, cable, dsr, dcd);
150
151	if (value & STATUS_CABLE_DCD)
152		netif_carrier_on(port->dev);
153	else
154		netif_carrier_off(port->dev);
155}
156
157
158
159/* Transmit complete interrupt service */
160static inline void wanxl_tx_intr(struct port *port)
161{
162	struct net_device *dev = port->dev;
163	while (1) {
164                desc_t *desc = &get_status(port)->tx_descs[port->tx_in];
165		struct sk_buff *skb = port->tx_skbs[port->tx_in];
166
167		switch (desc->stat) {
168		case PACKET_FULL:
169		case PACKET_EMPTY:
170			netif_wake_queue(dev);
171			return;
172
173		case PACKET_UNDERRUN:
174			dev->stats.tx_errors++;
175			dev->stats.tx_fifo_errors++;
176			break;
177
178		default:
179			dev->stats.tx_packets++;
180			dev->stats.tx_bytes += skb->len;
181		}
182                desc->stat = PACKET_EMPTY; /* Free descriptor */
183		dma_unmap_single(&port->card->pdev->dev, desc->address,
184				 skb->len, DMA_TO_DEVICE);
185		dev_consume_skb_irq(skb);
186                port->tx_in = (port->tx_in + 1) % TX_BUFFERS;
187        }
188}
189
190
191
192/* Receive complete interrupt service */
193static inline void wanxl_rx_intr(struct card *card)
194{
195	desc_t *desc;
196	while (desc = &card->status->rx_descs[card->rx_in],
197	       desc->stat != PACKET_EMPTY) {
198		if ((desc->stat & PACKET_PORT_MASK) > card->n_ports)
199			pr_crit("%s: received packet for nonexistent port\n",
200				pci_name(card->pdev));
201		else {
202			struct sk_buff *skb = card->rx_skbs[card->rx_in];
203			struct port *port = &card->ports[desc->stat &
204						    PACKET_PORT_MASK];
205			struct net_device *dev = port->dev;
206
207			if (!skb)
208				dev->stats.rx_dropped++;
209			else {
210				dma_unmap_single(&card->pdev->dev,
211						 desc->address, BUFFER_LENGTH,
212						 DMA_FROM_DEVICE);
213				skb_put(skb, desc->length);
214
215#ifdef DEBUG_PKT
216				printk(KERN_DEBUG "%s RX(%i):", dev->name,
217				       skb->len);
218				debug_frame(skb);
219#endif
220				dev->stats.rx_packets++;
221				dev->stats.rx_bytes += skb->len;
222				skb->protocol = hdlc_type_trans(skb, dev);
223				netif_rx(skb);
224				skb = NULL;
225			}
226
227			if (!skb) {
228				skb = dev_alloc_skb(BUFFER_LENGTH);
229				desc->address = skb ?
230					dma_map_single(&card->pdev->dev,
231						       skb->data,
232						       BUFFER_LENGTH,
233						       DMA_FROM_DEVICE) : 0;
234				card->rx_skbs[card->rx_in] = skb;
235			}
236		}
237		desc->stat = PACKET_EMPTY; /* Free descriptor */
238		card->rx_in = (card->rx_in + 1) % RX_QUEUE_LENGTH;
239	}
240}
241
242
243
244static irqreturn_t wanxl_intr(int irq, void* dev_id)
245{
246	struct card *card = dev_id;
247        int i;
248        u32 stat;
249        int handled = 0;
250
251
252        while((stat = readl(card->plx + PLX_DOORBELL_FROM_CARD)) != 0) {
253                handled = 1;
254		writel(stat, card->plx + PLX_DOORBELL_FROM_CARD);
255
256                for (i = 0; i < card->n_ports; i++) {
257			if (stat & (1 << (DOORBELL_FROM_CARD_TX_0 + i)))
258				wanxl_tx_intr(&card->ports[i]);
259			if (stat & (1 << (DOORBELL_FROM_CARD_CABLE_0 + i)))
260				wanxl_cable_intr(&card->ports[i]);
261		}
262		if (stat & (1 << DOORBELL_FROM_CARD_RX))
263			wanxl_rx_intr(card);
264        }
265
266        return IRQ_RETVAL(handled);
267}
268
269
270
271static netdev_tx_t wanxl_xmit(struct sk_buff *skb, struct net_device *dev)
272{
273	struct port *port = dev_to_port(dev);
274	desc_t *desc;
275
276        spin_lock(&port->lock);
277
278	desc = &get_status(port)->tx_descs[port->tx_out];
279        if (desc->stat != PACKET_EMPTY) {
280                /* should never happen - previous xmit should stop queue */
281#ifdef DEBUG_PKT
282                printk(KERN_DEBUG "%s: transmitter buffer full\n", dev->name);
283#endif
284		netif_stop_queue(dev);
285		spin_unlock(&port->lock);
286		return NETDEV_TX_BUSY;       /* request packet to be queued */
287	}
288
289#ifdef DEBUG_PKT
290	printk(KERN_DEBUG "%s TX(%i):", dev->name, skb->len);
291	debug_frame(skb);
292#endif
293
294	port->tx_skbs[port->tx_out] = skb;
295	desc->address = dma_map_single(&port->card->pdev->dev, skb->data,
296				       skb->len, DMA_TO_DEVICE);
297	desc->length = skb->len;
298	desc->stat = PACKET_FULL;
299	writel(1 << (DOORBELL_TO_CARD_TX_0 + port->node),
300	       port->card->plx + PLX_DOORBELL_TO_CARD);
301
302	port->tx_out = (port->tx_out + 1) % TX_BUFFERS;
303
304	if (get_status(port)->tx_descs[port->tx_out].stat != PACKET_EMPTY) {
305		netif_stop_queue(dev);
306#ifdef DEBUG_PKT
307		printk(KERN_DEBUG "%s: transmitter buffer full\n", dev->name);
308#endif
309	}
310
311	spin_unlock(&port->lock);
312	return NETDEV_TX_OK;
313}
314
315
316
317static int wanxl_attach(struct net_device *dev, unsigned short encoding,
318			unsigned short parity)
319{
320	struct port *port = dev_to_port(dev);
321
322	if (encoding != ENCODING_NRZ &&
323	    encoding != ENCODING_NRZI)
324		return -EINVAL;
325
326	if (parity != PARITY_NONE &&
327	    parity != PARITY_CRC32_PR1_CCITT &&
328	    parity != PARITY_CRC16_PR1_CCITT &&
329	    parity != PARITY_CRC32_PR0_CCITT &&
330	    parity != PARITY_CRC16_PR0_CCITT)
331		return -EINVAL;
332
333	get_status(port)->encoding = encoding;
334	get_status(port)->parity = parity;
335	return 0;
336}
337
338
339
340static int wanxl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
341{
342	const size_t size = sizeof(sync_serial_settings);
343	sync_serial_settings line;
344	struct port *port = dev_to_port(dev);
345
346	if (cmd != SIOCWANDEV)
347		return hdlc_ioctl(dev, ifr, cmd);
348
349	switch (ifr->ifr_settings.type) {
350	case IF_GET_IFACE:
351		ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
352		if (ifr->ifr_settings.size < size) {
353			ifr->ifr_settings.size = size; /* data size wanted */
354			return -ENOBUFS;
355		}
356		memset(&line, 0, sizeof(line));
357		line.clock_type = get_status(port)->clocking;
358		line.clock_rate = 0;
359		line.loopback = 0;
360
361		if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size))
362			return -EFAULT;
363		return 0;
364
365	case IF_IFACE_SYNC_SERIAL:
366		if (!capable(CAP_NET_ADMIN))
367			return -EPERM;
368		if (dev->flags & IFF_UP)
369			return -EBUSY;
370
371		if (copy_from_user(&line, ifr->ifr_settings.ifs_ifsu.sync,
372				   size))
373			return -EFAULT;
374
375		if (line.clock_type != CLOCK_EXT &&
376		    line.clock_type != CLOCK_TXFROMRX)
377			return -EINVAL; /* No such clock setting */
378
379		if (line.loopback != 0)
380			return -EINVAL;
381
382		get_status(port)->clocking = line.clock_type;
383		return 0;
384
385	default:
386		return hdlc_ioctl(dev, ifr, cmd);
387        }
388}
389
390
391
392static int wanxl_open(struct net_device *dev)
393{
394	struct port *port = dev_to_port(dev);
395	u8 __iomem *dbr = port->card->plx + PLX_DOORBELL_TO_CARD;
396	unsigned long timeout;
397	int i;
398
399	if (get_status(port)->open) {
400		netdev_err(dev, "port already open\n");
401		return -EIO;
402	}
403	if ((i = hdlc_open(dev)) != 0)
404		return i;
405
406	port->tx_in = port->tx_out = 0;
407	for (i = 0; i < TX_BUFFERS; i++)
408		get_status(port)->tx_descs[i].stat = PACKET_EMPTY;
409	/* signal the card */
410	writel(1 << (DOORBELL_TO_CARD_OPEN_0 + port->node), dbr);
411
412	timeout = jiffies + HZ;
413	do {
414		if (get_status(port)->open) {
415			netif_start_queue(dev);
416			return 0;
417		}
418	} while (time_after(timeout, jiffies));
419
420	netdev_err(dev, "unable to open port\n");
421	/* ask the card to close the port, should it be still alive */
422	writel(1 << (DOORBELL_TO_CARD_CLOSE_0 + port->node), dbr);
423	return -EFAULT;
424}
425
426
427
428static int wanxl_close(struct net_device *dev)
429{
430	struct port *port = dev_to_port(dev);
431	unsigned long timeout;
432	int i;
433
434	hdlc_close(dev);
435	/* signal the card */
436	writel(1 << (DOORBELL_TO_CARD_CLOSE_0 + port->node),
437	       port->card->plx + PLX_DOORBELL_TO_CARD);
438
439	timeout = jiffies + HZ;
440	do {
441		if (!get_status(port)->open)
442			break;
443	} while (time_after(timeout, jiffies));
444
445	if (get_status(port)->open)
446		netdev_err(dev, "unable to close port\n");
447
448	netif_stop_queue(dev);
449
450	for (i = 0; i < TX_BUFFERS; i++) {
451		desc_t *desc = &get_status(port)->tx_descs[i];
452
453		if (desc->stat != PACKET_EMPTY) {
454			desc->stat = PACKET_EMPTY;
455			dma_unmap_single(&port->card->pdev->dev,
456					 desc->address, port->tx_skbs[i]->len,
457					 DMA_TO_DEVICE);
458			dev_kfree_skb(port->tx_skbs[i]);
459		}
460	}
461	return 0;
462}
463
464
465
466static struct net_device_stats *wanxl_get_stats(struct net_device *dev)
467{
468	struct port *port = dev_to_port(dev);
469
470	dev->stats.rx_over_errors = get_status(port)->rx_overruns;
471	dev->stats.rx_frame_errors = get_status(port)->rx_frame_errors;
472	dev->stats.rx_errors = dev->stats.rx_over_errors +
473		dev->stats.rx_frame_errors;
474	return &dev->stats;
475}
476
477
478
479static int wanxl_puts_command(struct card *card, u32 cmd)
480{
481	unsigned long timeout = jiffies + 5 * HZ;
482
483	writel(cmd, card->plx + PLX_MAILBOX_1);
484	do {
485		if (readl(card->plx + PLX_MAILBOX_1) == 0)
486			return 0;
487
488		schedule();
489	}while (time_after(timeout, jiffies));
490
491	return -1;
492}
493
494
495
496static void wanxl_reset(struct card *card)
497{
498	u32 old_value = readl(card->plx + PLX_CONTROL) & ~PLX_CTL_RESET;
499
500	writel(0x80, card->plx + PLX_MAILBOX_0);
501	writel(old_value | PLX_CTL_RESET, card->plx + PLX_CONTROL);
502	readl(card->plx + PLX_CONTROL); /* wait for posted write */
503	udelay(1);
504	writel(old_value, card->plx + PLX_CONTROL);
505	readl(card->plx + PLX_CONTROL); /* wait for posted write */
506}
507
508
509
510static void wanxl_pci_remove_one(struct pci_dev *pdev)
511{
512	struct card *card = pci_get_drvdata(pdev);
513	int i;
514
515	for (i = 0; i < card->n_ports; i++) {
516		unregister_hdlc_device(card->ports[i].dev);
517		free_netdev(card->ports[i].dev);
518	}
519
520	/* unregister and free all host resources */
521	if (card->irq)
522		free_irq(card->irq, card);
523
524	wanxl_reset(card);
525
526	for (i = 0; i < RX_QUEUE_LENGTH; i++)
527		if (card->rx_skbs[i]) {
528			dma_unmap_single(&card->pdev->dev,
529					 card->status->rx_descs[i].address,
530					 BUFFER_LENGTH, DMA_FROM_DEVICE);
531			dev_kfree_skb(card->rx_skbs[i]);
532		}
533
534	if (card->plx)
535		iounmap(card->plx);
536
537	if (card->status)
538		dma_free_coherent(&pdev->dev, sizeof(struct card_status),
539				  card->status, card->status_address);
540
541	pci_release_regions(pdev);
542	pci_disable_device(pdev);
543	kfree(card);
544}
545
546
547#include "wanxlfw.inc"
548
549static const struct net_device_ops wanxl_ops = {
550	.ndo_open       = wanxl_open,
551	.ndo_stop       = wanxl_close,
552	.ndo_start_xmit = hdlc_start_xmit,
553	.ndo_do_ioctl   = wanxl_ioctl,
554	.ndo_get_stats  = wanxl_get_stats,
555};
556
557static int wanxl_pci_init_one(struct pci_dev *pdev,
558			      const struct pci_device_id *ent)
559{
560	struct card *card;
561	u32 ramsize, stat;
562	unsigned long timeout;
563	u32 plx_phy;		/* PLX PCI base address */
564	u32 mem_phy;		/* memory PCI base addr */
565	u8 __iomem *mem;	/* memory virtual base addr */
566	int i, ports;
567
568#ifndef MODULE
569	pr_info_once("%s\n", version);
570#endif
571
572	i = pci_enable_device(pdev);
573	if (i)
574		return i;
575
576	/* QUICC can only access first 256 MB of host RAM directly,
577	   but PLX9060 DMA does 32-bits for actual packet data transfers */
578
579	/* FIXME when PCI/DMA subsystems are fixed.
580	   We set both dma_mask and consistent_dma_mask to 28 bits
581	   and pray pci_alloc_consistent() will use this info. It should
582	   work on most platforms */
583	if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(28)) ||
584	    dma_set_mask(&pdev->dev, DMA_BIT_MASK(28))) {
585		pr_err("No usable DMA configuration\n");
586		pci_disable_device(pdev);
587		return -EIO;
588	}
589
590	i = pci_request_regions(pdev, "wanXL");
591	if (i) {
592		pci_disable_device(pdev);
593		return i;
594	}
595
596	switch (pdev->device) {
597	case PCI_DEVICE_ID_SBE_WANXL100: ports = 1; break;
598	case PCI_DEVICE_ID_SBE_WANXL200: ports = 2; break;
599	default: ports = 4;
600	}
601
602	card = kzalloc(struct_size(card, ports, ports), GFP_KERNEL);
603	if (card == NULL) {
604		pci_release_regions(pdev);
605		pci_disable_device(pdev);
606		return -ENOBUFS;
607	}
608
609	pci_set_drvdata(pdev, card);
610	card->pdev = pdev;
611
612	card->status = dma_alloc_coherent(&pdev->dev,
613					  sizeof(struct card_status),
614					  &card->status_address, GFP_KERNEL);
615	if (card->status == NULL) {
616		wanxl_pci_remove_one(pdev);
617		return -ENOBUFS;
618	}
619
620#ifdef DEBUG_PCI
621	printk(KERN_DEBUG "wanXL %s: pci_alloc_consistent() returned memory"
622	       " at 0x%LX\n", pci_name(pdev),
623	       (unsigned long long)card->status_address);
624#endif
625
626	/* FIXME when PCI/DMA subsystems are fixed.
627	   We set both dma_mask and consistent_dma_mask back to 32 bits
628	   to indicate the card can do 32-bit DMA addressing */
629	if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)) ||
630	    dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
631		pr_err("No usable DMA configuration\n");
632		wanxl_pci_remove_one(pdev);
633		return -EIO;
634	}
635
636	/* set up PLX mapping */
637	plx_phy = pci_resource_start(pdev, 0);
638
639	card->plx = ioremap(plx_phy, 0x70);
640	if (!card->plx) {
641		pr_err("ioremap() failed\n");
642 		wanxl_pci_remove_one(pdev);
643		return -EFAULT;
644	}
645
646#if RESET_WHILE_LOADING
647	wanxl_reset(card);
648#endif
649
650	timeout = jiffies + 20 * HZ;
651	while ((stat = readl(card->plx + PLX_MAILBOX_0)) != 0) {
652		if (time_before(timeout, jiffies)) {
653			pr_warn("%s: timeout waiting for PUTS to complete\n",
654				pci_name(pdev));
655			wanxl_pci_remove_one(pdev);
656			return -ENODEV;
657		}
658
659		switch(stat & 0xC0) {
660		case 0x00:	/* hmm - PUTS completed with non-zero code? */
661		case 0x80:	/* PUTS still testing the hardware */
662			break;
663
664		default:
665			pr_warn("%s: PUTS test 0x%X failed\n",
666				pci_name(pdev), stat & 0x30);
667			wanxl_pci_remove_one(pdev);
668			return -ENODEV;
669		}
670
671		schedule();
672	}
673
674	/* get on-board memory size (PUTS detects no more than 4 MB) */
675	ramsize = readl(card->plx + PLX_MAILBOX_2) & MBX2_MEMSZ_MASK;
676
677	/* set up on-board RAM mapping */
678	mem_phy = pci_resource_start(pdev, 2);
679
680
681	/* sanity check the board's reported memory size */
682	if (ramsize < BUFFERS_ADDR +
683	    (TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH * ports) {
684		pr_warn("%s: no enough on-board RAM (%u bytes detected, %u bytes required)\n",
685			pci_name(pdev), ramsize,
686			BUFFERS_ADDR +
687			(TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH * ports);
688		wanxl_pci_remove_one(pdev);
689		return -ENODEV;
690	}
691
692	if (wanxl_puts_command(card, MBX1_CMD_BSWAP)) {
693		pr_warn("%s: unable to Set Byte Swap Mode\n", pci_name(pdev));
694		wanxl_pci_remove_one(pdev);
695		return -ENODEV;
696	}
697
698	for (i = 0; i < RX_QUEUE_LENGTH; i++) {
699		struct sk_buff *skb = dev_alloc_skb(BUFFER_LENGTH);
700		card->rx_skbs[i] = skb;
701		if (skb)
702			card->status->rx_descs[i].address =
703				dma_map_single(&card->pdev->dev, skb->data,
704					       BUFFER_LENGTH, DMA_FROM_DEVICE);
705	}
706
707	mem = ioremap(mem_phy, PDM_OFFSET + sizeof(firmware));
708	if (!mem) {
709		pr_err("ioremap() failed\n");
710 		wanxl_pci_remove_one(pdev);
711		return -EFAULT;
712	}
713
714	for (i = 0; i < sizeof(firmware); i += 4)
715		writel(ntohl(*(__be32*)(firmware + i)), mem + PDM_OFFSET + i);
716
717	for (i = 0; i < ports; i++)
718		writel(card->status_address +
719		       (void *)&card->status->port_status[i] -
720		       (void *)card->status, mem + PDM_OFFSET + 4 + i * 4);
721	writel(card->status_address, mem + PDM_OFFSET + 20);
722	writel(PDM_OFFSET, mem);
723	iounmap(mem);
724
725	writel(0, card->plx + PLX_MAILBOX_5);
726
727	if (wanxl_puts_command(card, MBX1_CMD_ABORTJ)) {
728		pr_warn("%s: unable to Abort and Jump\n", pci_name(pdev));
729		wanxl_pci_remove_one(pdev);
730		return -ENODEV;
731	}
732
733	timeout = jiffies + 5 * HZ;
734	do {
735		if ((stat = readl(card->plx + PLX_MAILBOX_5)) != 0)
736			break;
737		schedule();
738	}while (time_after(timeout, jiffies));
739
740	if (!stat) {
741		pr_warn("%s: timeout while initializing card firmware\n",
742			pci_name(pdev));
743		wanxl_pci_remove_one(pdev);
744		return -ENODEV;
745	}
746
747#if DETECT_RAM
748	ramsize = stat;
749#endif
750
751	pr_info("%s: at 0x%X, %u KB of RAM at 0x%X, irq %u\n",
752		pci_name(pdev), plx_phy, ramsize / 1024, mem_phy, pdev->irq);
753
754	/* Allocate IRQ */
755	if (request_irq(pdev->irq, wanxl_intr, IRQF_SHARED, "wanXL", card)) {
756		pr_warn("%s: could not allocate IRQ%i\n",
757			pci_name(pdev), pdev->irq);
758		wanxl_pci_remove_one(pdev);
759		return -EBUSY;
760	}
761	card->irq = pdev->irq;
762
763	for (i = 0; i < ports; i++) {
764		hdlc_device *hdlc;
765		struct port *port = &card->ports[i];
766		struct net_device *dev = alloc_hdlcdev(port);
767		if (!dev) {
768			pr_err("%s: unable to allocate memory\n",
769			       pci_name(pdev));
770			wanxl_pci_remove_one(pdev);
771			return -ENOMEM;
772		}
773
774		port->dev = dev;
775		hdlc = dev_to_hdlc(dev);
776		spin_lock_init(&port->lock);
777		dev->tx_queue_len = 50;
778		dev->netdev_ops = &wanxl_ops;
779		hdlc->attach = wanxl_attach;
780		hdlc->xmit = wanxl_xmit;
781		port->card = card;
782		port->node = i;
783		get_status(port)->clocking = CLOCK_EXT;
784		if (register_hdlc_device(dev)) {
785			pr_err("%s: unable to register hdlc device\n",
786			       pci_name(pdev));
787			free_netdev(dev);
788			wanxl_pci_remove_one(pdev);
789			return -ENOBUFS;
790		}
791		card->n_ports++;
792	}
793
794	pr_info("%s: port", pci_name(pdev));
795	for (i = 0; i < ports; i++)
796		pr_cont("%s #%i: %s",
797			i ? "," : "", i, card->ports[i].dev->name);
798	pr_cont("\n");
799
800	for (i = 0; i < ports; i++)
801		wanxl_cable_intr(&card->ports[i]); /* get carrier status etc.*/
802
803	return 0;
804}
805
806static const struct pci_device_id wanxl_pci_tbl[] = {
807	{ PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL100, PCI_ANY_ID,
808	  PCI_ANY_ID, 0, 0, 0 },
809	{ PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL200, PCI_ANY_ID,
810	  PCI_ANY_ID, 0, 0, 0 },
811	{ PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL400, PCI_ANY_ID,
812	  PCI_ANY_ID, 0, 0, 0 },
813	{ 0, }
814};
815
816
817static struct pci_driver wanxl_pci_driver = {
818	.name		= "wanXL",
819	.id_table	= wanxl_pci_tbl,
820	.probe		= wanxl_pci_init_one,
821	.remove		= wanxl_pci_remove_one,
822};
823
824
825static int __init wanxl_init_module(void)
826{
827#ifdef MODULE
828	pr_info("%s\n", version);
829#endif
830	return pci_register_driver(&wanxl_pci_driver);
831}
832
833static void __exit wanxl_cleanup_module(void)
834{
835	pci_unregister_driver(&wanxl_pci_driver);
836}
837
838
839MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
840MODULE_DESCRIPTION("SBE Inc. wanXL serial port driver");
841MODULE_LICENSE("GPL v2");
842MODULE_DEVICE_TABLE(pci, wanxl_pci_tbl);
843
844module_init(wanxl_init_module);
845module_exit(wanxl_cleanup_module);
846