1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Davicom DM9000 Fast Ethernet driver for Linux.
4 * Copyright (C) 1997 Sten Wang
5 *
6 * (C) Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved.
7 *
8 * Additional updates, Copyright:
9 * Ben Dooks <ben@simtec.co.uk>
10 * Sascha Hauer <s.hauer@pengutronix.de>
11 */
12
13 #include <linux/module.h>
14 #include <linux/ioport.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/interrupt.h>
18 #include <linux/skbuff.h>
19 #include <linux/spinlock.h>
20 #include <linux/crc32.h>
21 #include <linux/mii.h>
22 #include <linux/of.h>
23 #include <linux/of_net.h>
24 #include <linux/ethtool.h>
25 #include <linux/dm9000.h>
26 #include <linux/delay.h>
27 #include <linux/platform_device.h>
28 #include <linux/irq.h>
29 #include <linux/slab.h>
30 #include <linux/regulator/consumer.h>
31 #include <linux/gpio.h>
32 #include <linux/of_gpio.h>
33
34 #include <asm/delay.h>
35 #include <asm/irq.h>
36 #include <asm/io.h>
37
38 #include "dm9000.h"
39
40 /* Board/System/Debug information/definition ---------------- */
41
42 #define DM9000_PHY 0x40 /* PHY address 0x01 */
43
44 #define CARDNAME "dm9000"
45
46 /*
47 * Transmit timeout, default 5 seconds.
48 */
49 static int watchdog = 5000;
50 module_param(watchdog, int, 0400);
51 MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
52
53 /*
54 * Debug messages level
55 */
56 static int debug;
57 module_param(debug, int, 0644);
58 MODULE_PARM_DESC(debug, "dm9000 debug level (0-6)");
59
60 /* DM9000 register address locking.
61 *
62 * The DM9000 uses an address register to control where data written
63 * to the data register goes. This means that the address register
64 * must be preserved over interrupts or similar calls.
65 *
66 * During interrupt and other critical calls, a spinlock is used to
67 * protect the system, but the calls themselves save the address
68 * in the address register in case they are interrupting another
69 * access to the device.
70 *
71 * For general accesses a lock is provided so that calls which are
72 * allowed to sleep are serialised so that the address register does
73 * not need to be saved. This lock also serves to serialise access
74 * to the EEPROM and PHY access registers which are shared between
75 * these two devices.
76 */
77
78 /* The driver supports the original DM9000E, and now the two newer
79 * devices, DM9000A and DM9000B.
80 */
81
82 enum dm9000_type {
83 TYPE_DM9000E, /* original DM9000 */
84 TYPE_DM9000A,
85 TYPE_DM9000B
86 };
87
88 /* Structure/enum declaration ------------------------------- */
89 struct board_info {
90
91 void __iomem *io_addr; /* Register I/O base address */
92 void __iomem *io_data; /* Data I/O address */
93 u16 irq; /* IRQ */
94
95 u16 tx_pkt_cnt;
96 u16 queue_pkt_len;
97 u16 queue_start_addr;
98 u16 queue_ip_summed;
99 u16 dbug_cnt;
100 u8 io_mode; /* 0:word, 2:byte */
101 u8 phy_addr;
102 u8 imr_all;
103
104 unsigned int flags;
105 unsigned int in_timeout:1;
106 unsigned int in_suspend:1;
107 unsigned int wake_supported:1;
108
109 enum dm9000_type type;
110
111 void (*inblk)(void __iomem *port, void *data, int length);
112 void (*outblk)(void __iomem *port, void *data, int length);
113 void (*dumpblk)(void __iomem *port, int length);
114
115 struct device *dev; /* parent device */
116
117 struct resource *addr_res; /* resources found */
118 struct resource *data_res;
119 struct resource *addr_req; /* resources requested */
120 struct resource *data_req;
121
122 int irq_wake;
123
124 struct mutex addr_lock; /* phy and eeprom access lock */
125
126 struct delayed_work phy_poll;
127 struct net_device *ndev;
128
129 spinlock_t lock;
130
131 struct mii_if_info mii;
132 u32 msg_enable;
133 u32 wake_state;
134
135 int ip_summed;
136
137 struct regulator *power_supply;
138 };
139
140 /* debug code */
141
142 #define dm9000_dbg(db, lev, msg...) do { \
143 if ((lev) < debug) { \
144 dev_dbg(db->dev, msg); \
145 } \
146 } while (0)
147
to_dm9000_board(struct net_device *dev)148 static inline struct board_info *to_dm9000_board(struct net_device *dev)
149 {
150 return netdev_priv(dev);
151 }
152
153 /* DM9000 network board routine ---------------------------- */
154
155 /*
156 * Read a byte from I/O port
157 */
158 static u8
ior(struct board_info *db, int reg)159 ior(struct board_info *db, int reg)
160 {
161 writeb(reg, db->io_addr);
162 return readb(db->io_data);
163 }
164
165 /*
166 * Write a byte to I/O port
167 */
168
169 static void
iow(struct board_info *db, int reg, int value)170 iow(struct board_info *db, int reg, int value)
171 {
172 writeb(reg, db->io_addr);
173 writeb(value, db->io_data);
174 }
175
176 static void
dm9000_reset(struct board_info *db)177 dm9000_reset(struct board_info *db)
178 {
179 dev_dbg(db->dev, "resetting device\n");
180
181 /* Reset DM9000, see DM9000 Application Notes V1.22 Jun 11, 2004 page 29
182 * The essential point is that we have to do a double reset, and the
183 * instruction is to set LBK into MAC internal loopback mode.
184 */
185 iow(db, DM9000_NCR, NCR_RST | NCR_MAC_LBK);
186 udelay(100); /* Application note says at least 20 us */
187 if (ior(db, DM9000_NCR) & 1)
188 dev_err(db->dev, "dm9000 did not respond to first reset\n");
189
190 iow(db, DM9000_NCR, 0);
191 iow(db, DM9000_NCR, NCR_RST | NCR_MAC_LBK);
192 udelay(100);
193 if (ior(db, DM9000_NCR) & 1)
194 dev_err(db->dev, "dm9000 did not respond to second reset\n");
195 }
196
197 /* routines for sending block to chip */
198
dm9000_outblk_8bit(void __iomem *reg, void *data, int count)199 static void dm9000_outblk_8bit(void __iomem *reg, void *data, int count)
200 {
201 iowrite8_rep(reg, data, count);
202 }
203
dm9000_outblk_16bit(void __iomem *reg, void *data, int count)204 static void dm9000_outblk_16bit(void __iomem *reg, void *data, int count)
205 {
206 iowrite16_rep(reg, data, (count+1) >> 1);
207 }
208
dm9000_outblk_32bit(void __iomem *reg, void *data, int count)209 static void dm9000_outblk_32bit(void __iomem *reg, void *data, int count)
210 {
211 iowrite32_rep(reg, data, (count+3) >> 2);
212 }
213
214 /* input block from chip to memory */
215
dm9000_inblk_8bit(void __iomem *reg, void *data, int count)216 static void dm9000_inblk_8bit(void __iomem *reg, void *data, int count)
217 {
218 ioread8_rep(reg, data, count);
219 }
220
221
dm9000_inblk_16bit(void __iomem *reg, void *data, int count)222 static void dm9000_inblk_16bit(void __iomem *reg, void *data, int count)
223 {
224 ioread16_rep(reg, data, (count+1) >> 1);
225 }
226
dm9000_inblk_32bit(void __iomem *reg, void *data, int count)227 static void dm9000_inblk_32bit(void __iomem *reg, void *data, int count)
228 {
229 ioread32_rep(reg, data, (count+3) >> 2);
230 }
231
232 /* dump block from chip to null */
233
dm9000_dumpblk_8bit(void __iomem *reg, int count)234 static void dm9000_dumpblk_8bit(void __iomem *reg, int count)
235 {
236 int i;
237 int tmp;
238
239 for (i = 0; i < count; i++)
240 tmp = readb(reg);
241 }
242
dm9000_dumpblk_16bit(void __iomem *reg, int count)243 static void dm9000_dumpblk_16bit(void __iomem *reg, int count)
244 {
245 int i;
246 int tmp;
247
248 count = (count + 1) >> 1;
249
250 for (i = 0; i < count; i++)
251 tmp = readw(reg);
252 }
253
dm9000_dumpblk_32bit(void __iomem *reg, int count)254 static void dm9000_dumpblk_32bit(void __iomem *reg, int count)
255 {
256 int i;
257 int tmp;
258
259 count = (count + 3) >> 2;
260
261 for (i = 0; i < count; i++)
262 tmp = readl(reg);
263 }
264
265 /*
266 * Sleep, either by using msleep() or if we are suspending, then
267 * use mdelay() to sleep.
268 */
dm9000_msleep(struct board_info *db, unsigned int ms)269 static void dm9000_msleep(struct board_info *db, unsigned int ms)
270 {
271 if (db->in_suspend || db->in_timeout)
272 mdelay(ms);
273 else
274 msleep(ms);
275 }
276
277 /* Read a word from phyxcer */
278 static int
dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)279 dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
280 {
281 struct board_info *db = netdev_priv(dev);
282 unsigned long flags;
283 unsigned int reg_save;
284 int ret;
285
286 mutex_lock(&db->addr_lock);
287
288 spin_lock_irqsave(&db->lock, flags);
289
290 /* Save previous register address */
291 reg_save = readb(db->io_addr);
292
293 /* Fill the phyxcer register into REG_0C */
294 iow(db, DM9000_EPAR, DM9000_PHY | reg);
295
296 /* Issue phyxcer read command */
297 iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS);
298
299 writeb(reg_save, db->io_addr);
300 spin_unlock_irqrestore(&db->lock, flags);
301
302 dm9000_msleep(db, 1); /* Wait read complete */
303
304 spin_lock_irqsave(&db->lock, flags);
305 reg_save = readb(db->io_addr);
306
307 iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */
308
309 /* The read data keeps on REG_0D & REG_0E */
310 ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL);
311
312 /* restore the previous address */
313 writeb(reg_save, db->io_addr);
314 spin_unlock_irqrestore(&db->lock, flags);
315
316 mutex_unlock(&db->addr_lock);
317
318 dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret);
319 return ret;
320 }
321
322 /* Write a word to phyxcer */
323 static void
dm9000_phy_write(struct net_device *dev, int phyaddr_unused, int reg, int value)324 dm9000_phy_write(struct net_device *dev,
325 int phyaddr_unused, int reg, int value)
326 {
327 struct board_info *db = netdev_priv(dev);
328 unsigned long flags;
329 unsigned long reg_save;
330
331 dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
332 if (!db->in_timeout)
333 mutex_lock(&db->addr_lock);
334
335 spin_lock_irqsave(&db->lock, flags);
336
337 /* Save previous register address */
338 reg_save = readb(db->io_addr);
339
340 /* Fill the phyxcer register into REG_0C */
341 iow(db, DM9000_EPAR, DM9000_PHY | reg);
342
343 /* Fill the written data into REG_0D & REG_0E */
344 iow(db, DM9000_EPDRL, value);
345 iow(db, DM9000_EPDRH, value >> 8);
346
347 /* Issue phyxcer write command */
348 iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW);
349
350 writeb(reg_save, db->io_addr);
351 spin_unlock_irqrestore(&db->lock, flags);
352
353 dm9000_msleep(db, 1); /* Wait write complete */
354
355 spin_lock_irqsave(&db->lock, flags);
356 reg_save = readb(db->io_addr);
357
358 iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */
359
360 /* restore the previous address */
361 writeb(reg_save, db->io_addr);
362
363 spin_unlock_irqrestore(&db->lock, flags);
364 if (!db->in_timeout)
365 mutex_unlock(&db->addr_lock);
366 }
367
368 /* dm9000_set_io
369 *
370 * select the specified set of io routines to use with the
371 * device
372 */
373
dm9000_set_io(struct board_info *db, int byte_width)374 static void dm9000_set_io(struct board_info *db, int byte_width)
375 {
376 /* use the size of the data resource to work out what IO
377 * routines we want to use
378 */
379
380 switch (byte_width) {
381 case 1:
382 db->dumpblk = dm9000_dumpblk_8bit;
383 db->outblk = dm9000_outblk_8bit;
384 db->inblk = dm9000_inblk_8bit;
385 break;
386
387
388 case 3:
389 dev_dbg(db->dev, ": 3 byte IO, falling back to 16bit\n");
390 fallthrough;
391 case 2:
392 db->dumpblk = dm9000_dumpblk_16bit;
393 db->outblk = dm9000_outblk_16bit;
394 db->inblk = dm9000_inblk_16bit;
395 break;
396
397 case 4:
398 default:
399 db->dumpblk = dm9000_dumpblk_32bit;
400 db->outblk = dm9000_outblk_32bit;
401 db->inblk = dm9000_inblk_32bit;
402 break;
403 }
404 }
405
dm9000_schedule_poll(struct board_info *db)406 static void dm9000_schedule_poll(struct board_info *db)
407 {
408 if (db->type == TYPE_DM9000E)
409 schedule_delayed_work(&db->phy_poll, HZ * 2);
410 }
411
dm9000_ioctl(struct net_device *dev, struct ifreq *req, int cmd)412 static int dm9000_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
413 {
414 struct board_info *dm = to_dm9000_board(dev);
415
416 if (!netif_running(dev))
417 return -EINVAL;
418
419 return generic_mii_ioctl(&dm->mii, if_mii(req), cmd, NULL);
420 }
421
422 static unsigned int
dm9000_read_locked(struct board_info *db, int reg)423 dm9000_read_locked(struct board_info *db, int reg)
424 {
425 unsigned long flags;
426 unsigned int ret;
427
428 spin_lock_irqsave(&db->lock, flags);
429 ret = ior(db, reg);
430 spin_unlock_irqrestore(&db->lock, flags);
431
432 return ret;
433 }
434
dm9000_wait_eeprom(struct board_info *db)435 static int dm9000_wait_eeprom(struct board_info *db)
436 {
437 unsigned int status;
438 int timeout = 8; /* wait max 8msec */
439
440 /* The DM9000 data sheets say we should be able to
441 * poll the ERRE bit in EPCR to wait for the EEPROM
442 * operation. From testing several chips, this bit
443 * does not seem to work.
444 *
445 * We attempt to use the bit, but fall back to the
446 * timeout (which is why we do not return an error
447 * on expiry) to say that the EEPROM operation has
448 * completed.
449 */
450
451 while (1) {
452 status = dm9000_read_locked(db, DM9000_EPCR);
453
454 if ((status & EPCR_ERRE) == 0)
455 break;
456
457 msleep(1);
458
459 if (timeout-- < 0) {
460 dev_dbg(db->dev, "timeout waiting EEPROM\n");
461 break;
462 }
463 }
464
465 return 0;
466 }
467
468 /*
469 * Read a word data from EEPROM
470 */
471 static void
dm9000_read_eeprom(struct board_info *db, int offset, u8 *to)472 dm9000_read_eeprom(struct board_info *db, int offset, u8 *to)
473 {
474 unsigned long flags;
475
476 if (db->flags & DM9000_PLATF_NO_EEPROM) {
477 to[0] = 0xff;
478 to[1] = 0xff;
479 return;
480 }
481
482 mutex_lock(&db->addr_lock);
483
484 spin_lock_irqsave(&db->lock, flags);
485
486 iow(db, DM9000_EPAR, offset);
487 iow(db, DM9000_EPCR, EPCR_ERPRR);
488
489 spin_unlock_irqrestore(&db->lock, flags);
490
491 dm9000_wait_eeprom(db);
492
493 /* delay for at-least 150uS */
494 msleep(1);
495
496 spin_lock_irqsave(&db->lock, flags);
497
498 iow(db, DM9000_EPCR, 0x0);
499
500 to[0] = ior(db, DM9000_EPDRL);
501 to[1] = ior(db, DM9000_EPDRH);
502
503 spin_unlock_irqrestore(&db->lock, flags);
504
505 mutex_unlock(&db->addr_lock);
506 }
507
508 /*
509 * Write a word data to SROM
510 */
511 static void
dm9000_write_eeprom(struct board_info *db, int offset, u8 *data)512 dm9000_write_eeprom(struct board_info *db, int offset, u8 *data)
513 {
514 unsigned long flags;
515
516 if (db->flags & DM9000_PLATF_NO_EEPROM)
517 return;
518
519 mutex_lock(&db->addr_lock);
520
521 spin_lock_irqsave(&db->lock, flags);
522 iow(db, DM9000_EPAR, offset);
523 iow(db, DM9000_EPDRH, data[1]);
524 iow(db, DM9000_EPDRL, data[0]);
525 iow(db, DM9000_EPCR, EPCR_WEP | EPCR_ERPRW);
526 spin_unlock_irqrestore(&db->lock, flags);
527
528 dm9000_wait_eeprom(db);
529
530 mdelay(1); /* wait at least 150uS to clear */
531
532 spin_lock_irqsave(&db->lock, flags);
533 iow(db, DM9000_EPCR, 0);
534 spin_unlock_irqrestore(&db->lock, flags);
535
536 mutex_unlock(&db->addr_lock);
537 }
538
539 /* ethtool ops */
540
dm9000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)541 static void dm9000_get_drvinfo(struct net_device *dev,
542 struct ethtool_drvinfo *info)
543 {
544 struct board_info *dm = to_dm9000_board(dev);
545
546 strlcpy(info->driver, CARDNAME, sizeof(info->driver));
547 strlcpy(info->bus_info, to_platform_device(dm->dev)->name,
548 sizeof(info->bus_info));
549 }
550
dm9000_get_msglevel(struct net_device *dev)551 static u32 dm9000_get_msglevel(struct net_device *dev)
552 {
553 struct board_info *dm = to_dm9000_board(dev);
554
555 return dm->msg_enable;
556 }
557
dm9000_set_msglevel(struct net_device *dev, u32 value)558 static void dm9000_set_msglevel(struct net_device *dev, u32 value)
559 {
560 struct board_info *dm = to_dm9000_board(dev);
561
562 dm->msg_enable = value;
563 }
564
dm9000_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd)565 static int dm9000_get_link_ksettings(struct net_device *dev,
566 struct ethtool_link_ksettings *cmd)
567 {
568 struct board_info *dm = to_dm9000_board(dev);
569
570 mii_ethtool_get_link_ksettings(&dm->mii, cmd);
571 return 0;
572 }
573
dm9000_set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd)574 static int dm9000_set_link_ksettings(struct net_device *dev,
575 const struct ethtool_link_ksettings *cmd)
576 {
577 struct board_info *dm = to_dm9000_board(dev);
578
579 return mii_ethtool_set_link_ksettings(&dm->mii, cmd);
580 }
581
dm9000_nway_reset(struct net_device *dev)582 static int dm9000_nway_reset(struct net_device *dev)
583 {
584 struct board_info *dm = to_dm9000_board(dev);
585 return mii_nway_restart(&dm->mii);
586 }
587
dm9000_set_features(struct net_device *dev, netdev_features_t features)588 static int dm9000_set_features(struct net_device *dev,
589 netdev_features_t features)
590 {
591 struct board_info *dm = to_dm9000_board(dev);
592 netdev_features_t changed = dev->features ^ features;
593 unsigned long flags;
594
595 if (!(changed & NETIF_F_RXCSUM))
596 return 0;
597
598 spin_lock_irqsave(&dm->lock, flags);
599 iow(dm, DM9000_RCSR, (features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0);
600 spin_unlock_irqrestore(&dm->lock, flags);
601
602 return 0;
603 }
604
dm9000_get_link(struct net_device *dev)605 static u32 dm9000_get_link(struct net_device *dev)
606 {
607 struct board_info *dm = to_dm9000_board(dev);
608 u32 ret;
609
610 if (dm->flags & DM9000_PLATF_EXT_PHY)
611 ret = mii_link_ok(&dm->mii);
612 else
613 ret = dm9000_read_locked(dm, DM9000_NSR) & NSR_LINKST ? 1 : 0;
614
615 return ret;
616 }
617
618 #define DM_EEPROM_MAGIC (0x444D394B)
619
dm9000_get_eeprom_len(struct net_device *dev)620 static int dm9000_get_eeprom_len(struct net_device *dev)
621 {
622 return 128;
623 }
624
dm9000_get_eeprom(struct net_device *dev, struct ethtool_eeprom *ee, u8 *data)625 static int dm9000_get_eeprom(struct net_device *dev,
626 struct ethtool_eeprom *ee, u8 *data)
627 {
628 struct board_info *dm = to_dm9000_board(dev);
629 int offset = ee->offset;
630 int len = ee->len;
631 int i;
632
633 /* EEPROM access is aligned to two bytes */
634
635 if ((len & 1) != 0 || (offset & 1) != 0)
636 return -EINVAL;
637
638 if (dm->flags & DM9000_PLATF_NO_EEPROM)
639 return -ENOENT;
640
641 ee->magic = DM_EEPROM_MAGIC;
642
643 for (i = 0; i < len; i += 2)
644 dm9000_read_eeprom(dm, (offset + i) / 2, data + i);
645
646 return 0;
647 }
648
dm9000_set_eeprom(struct net_device *dev, struct ethtool_eeprom *ee, u8 *data)649 static int dm9000_set_eeprom(struct net_device *dev,
650 struct ethtool_eeprom *ee, u8 *data)
651 {
652 struct board_info *dm = to_dm9000_board(dev);
653 int offset = ee->offset;
654 int len = ee->len;
655 int done;
656
657 /* EEPROM access is aligned to two bytes */
658
659 if (dm->flags & DM9000_PLATF_NO_EEPROM)
660 return -ENOENT;
661
662 if (ee->magic != DM_EEPROM_MAGIC)
663 return -EINVAL;
664
665 while (len > 0) {
666 if (len & 1 || offset & 1) {
667 int which = offset & 1;
668 u8 tmp[2];
669
670 dm9000_read_eeprom(dm, offset / 2, tmp);
671 tmp[which] = *data;
672 dm9000_write_eeprom(dm, offset / 2, tmp);
673
674 done = 1;
675 } else {
676 dm9000_write_eeprom(dm, offset / 2, data);
677 done = 2;
678 }
679
680 data += done;
681 offset += done;
682 len -= done;
683 }
684
685 return 0;
686 }
687
dm9000_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)688 static void dm9000_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
689 {
690 struct board_info *dm = to_dm9000_board(dev);
691
692 memset(w, 0, sizeof(struct ethtool_wolinfo));
693
694 /* note, we could probably support wake-phy too */
695 w->supported = dm->wake_supported ? WAKE_MAGIC : 0;
696 w->wolopts = dm->wake_state;
697 }
698
dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)699 static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
700 {
701 struct board_info *dm = to_dm9000_board(dev);
702 unsigned long flags;
703 u32 opts = w->wolopts;
704 u32 wcr = 0;
705
706 if (!dm->wake_supported)
707 return -EOPNOTSUPP;
708
709 if (opts & ~WAKE_MAGIC)
710 return -EINVAL;
711
712 if (opts & WAKE_MAGIC)
713 wcr |= WCR_MAGICEN;
714
715 mutex_lock(&dm->addr_lock);
716
717 spin_lock_irqsave(&dm->lock, flags);
718 iow(dm, DM9000_WCR, wcr);
719 spin_unlock_irqrestore(&dm->lock, flags);
720
721 mutex_unlock(&dm->addr_lock);
722
723 if (dm->wake_state != opts) {
724 /* change in wol state, update IRQ state */
725
726 if (!dm->wake_state)
727 irq_set_irq_wake(dm->irq_wake, 1);
728 else if (dm->wake_state && !opts)
729 irq_set_irq_wake(dm->irq_wake, 0);
730 }
731
732 dm->wake_state = opts;
733 return 0;
734 }
735
736 static const struct ethtool_ops dm9000_ethtool_ops = {
737 .get_drvinfo = dm9000_get_drvinfo,
738 .get_msglevel = dm9000_get_msglevel,
739 .set_msglevel = dm9000_set_msglevel,
740 .nway_reset = dm9000_nway_reset,
741 .get_link = dm9000_get_link,
742 .get_wol = dm9000_get_wol,
743 .set_wol = dm9000_set_wol,
744 .get_eeprom_len = dm9000_get_eeprom_len,
745 .get_eeprom = dm9000_get_eeprom,
746 .set_eeprom = dm9000_set_eeprom,
747 .get_link_ksettings = dm9000_get_link_ksettings,
748 .set_link_ksettings = dm9000_set_link_ksettings,
749 };
750
dm9000_show_carrier(struct board_info *db, unsigned carrier, unsigned nsr)751 static void dm9000_show_carrier(struct board_info *db,
752 unsigned carrier, unsigned nsr)
753 {
754 int lpa;
755 struct net_device *ndev = db->ndev;
756 struct mii_if_info *mii = &db->mii;
757 unsigned ncr = dm9000_read_locked(db, DM9000_NCR);
758
759 if (carrier) {
760 lpa = mii->mdio_read(mii->dev, mii->phy_id, MII_LPA);
761 dev_info(db->dev,
762 "%s: link up, %dMbps, %s-duplex, lpa 0x%04X\n",
763 ndev->name, (nsr & NSR_SPEED) ? 10 : 100,
764 (ncr & NCR_FDX) ? "full" : "half", lpa);
765 } else {
766 dev_info(db->dev, "%s: link down\n", ndev->name);
767 }
768 }
769
770 static void
dm9000_poll_work(struct work_struct *w)771 dm9000_poll_work(struct work_struct *w)
772 {
773 struct delayed_work *dw = to_delayed_work(w);
774 struct board_info *db = container_of(dw, struct board_info, phy_poll);
775 struct net_device *ndev = db->ndev;
776
777 if (db->flags & DM9000_PLATF_SIMPLE_PHY &&
778 !(db->flags & DM9000_PLATF_EXT_PHY)) {
779 unsigned nsr = dm9000_read_locked(db, DM9000_NSR);
780 unsigned old_carrier = netif_carrier_ok(ndev) ? 1 : 0;
781 unsigned new_carrier;
782
783 new_carrier = (nsr & NSR_LINKST) ? 1 : 0;
784
785 if (old_carrier != new_carrier) {
786 if (netif_msg_link(db))
787 dm9000_show_carrier(db, new_carrier, nsr);
788
789 if (!new_carrier)
790 netif_carrier_off(ndev);
791 else
792 netif_carrier_on(ndev);
793 }
794 } else
795 mii_check_media(&db->mii, netif_msg_link(db), 0);
796
797 if (netif_running(ndev))
798 dm9000_schedule_poll(db);
799 }
800
801 /* dm9000_release_board
802 *
803 * release a board, and any mapped resources
804 */
805
806 static void
dm9000_release_board(struct platform_device *pdev, struct board_info *db)807 dm9000_release_board(struct platform_device *pdev, struct board_info *db)
808 {
809 /* unmap our resources */
810
811 iounmap(db->io_addr);
812 iounmap(db->io_data);
813
814 /* release the resources */
815
816 if (db->data_req)
817 release_resource(db->data_req);
818 kfree(db->data_req);
819
820 if (db->addr_req)
821 release_resource(db->addr_req);
822 kfree(db->addr_req);
823 }
824
dm9000_type_to_char(enum dm9000_type type)825 static unsigned char dm9000_type_to_char(enum dm9000_type type)
826 {
827 switch (type) {
828 case TYPE_DM9000E: return 'e';
829 case TYPE_DM9000A: return 'a';
830 case TYPE_DM9000B: return 'b';
831 }
832
833 return '?';
834 }
835
836 /*
837 * Set DM9000 multicast address
838 */
839 static void
dm9000_hash_table_unlocked(struct net_device *dev)840 dm9000_hash_table_unlocked(struct net_device *dev)
841 {
842 struct board_info *db = netdev_priv(dev);
843 struct netdev_hw_addr *ha;
844 int i, oft;
845 u32 hash_val;
846 u16 hash_table[4] = { 0, 0, 0, 0x8000 }; /* broadcast address */
847 u8 rcr = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN;
848
849 dm9000_dbg(db, 1, "entering %s\n", __func__);
850
851 for (i = 0, oft = DM9000_PAR; i < 6; i++, oft++)
852 iow(db, oft, dev->dev_addr[i]);
853
854 if (dev->flags & IFF_PROMISC)
855 rcr |= RCR_PRMSC;
856
857 if (dev->flags & IFF_ALLMULTI)
858 rcr |= RCR_ALL;
859
860 /* the multicast address in Hash Table : 64 bits */
861 netdev_for_each_mc_addr(ha, dev) {
862 hash_val = ether_crc_le(6, ha->addr) & 0x3f;
863 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
864 }
865
866 /* Write the hash table to MAC MD table */
867 for (i = 0, oft = DM9000_MAR; i < 4; i++) {
868 iow(db, oft++, hash_table[i]);
869 iow(db, oft++, hash_table[i] >> 8);
870 }
871
872 iow(db, DM9000_RCR, rcr);
873 }
874
875 static void
dm9000_hash_table(struct net_device *dev)876 dm9000_hash_table(struct net_device *dev)
877 {
878 struct board_info *db = netdev_priv(dev);
879 unsigned long flags;
880
881 spin_lock_irqsave(&db->lock, flags);
882 dm9000_hash_table_unlocked(dev);
883 spin_unlock_irqrestore(&db->lock, flags);
884 }
885
886 static void
dm9000_mask_interrupts(struct board_info *db)887 dm9000_mask_interrupts(struct board_info *db)
888 {
889 iow(db, DM9000_IMR, IMR_PAR);
890 }
891
892 static void
dm9000_unmask_interrupts(struct board_info *db)893 dm9000_unmask_interrupts(struct board_info *db)
894 {
895 iow(db, DM9000_IMR, db->imr_all);
896 }
897
898 /*
899 * Initialize dm9000 board
900 */
901 static void
dm9000_init_dm9000(struct net_device *dev)902 dm9000_init_dm9000(struct net_device *dev)
903 {
904 struct board_info *db = netdev_priv(dev);
905 unsigned int imr;
906 unsigned int ncr;
907
908 dm9000_dbg(db, 1, "entering %s\n", __func__);
909
910 dm9000_reset(db);
911 dm9000_mask_interrupts(db);
912
913 /* I/O mode */
914 db->io_mode = ior(db, DM9000_ISR) >> 6; /* ISR bit7:6 keeps I/O mode */
915
916 /* Checksum mode */
917 if (dev->hw_features & NETIF_F_RXCSUM)
918 iow(db, DM9000_RCSR,
919 (dev->features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0);
920
921 iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */
922 iow(db, DM9000_GPR, 0);
923
924 /* If we are dealing with DM9000B, some extra steps are required: a
925 * manual phy reset, and setting init params.
926 */
927 if (db->type == TYPE_DM9000B) {
928 dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET);
929 dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM);
930 }
931
932 ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
933
934 /* if wol is needed, then always set NCR_WAKEEN otherwise we end
935 * up dumping the wake events if we disable this. There is already
936 * a wake-mask in DM9000_WCR */
937 if (db->wake_supported)
938 ncr |= NCR_WAKEEN;
939
940 iow(db, DM9000_NCR, ncr);
941
942 /* Program operating register */
943 iow(db, DM9000_TCR, 0); /* TX Polling clear */
944 iow(db, DM9000_BPTR, 0x3f); /* Less 3Kb, 200us */
945 iow(db, DM9000_FCR, 0xff); /* Flow Control */
946 iow(db, DM9000_SMCR, 0); /* Special Mode */
947 /* clear TX status */
948 iow(db, DM9000_NSR, NSR_WAKEST | NSR_TX2END | NSR_TX1END);
949 iow(db, DM9000_ISR, ISR_CLR_STATUS); /* Clear interrupt status */
950
951 /* Set address filter table */
952 dm9000_hash_table_unlocked(dev);
953
954 imr = IMR_PAR | IMR_PTM | IMR_PRM;
955 if (db->type != TYPE_DM9000E)
956 imr |= IMR_LNKCHNG;
957
958 db->imr_all = imr;
959
960 /* Init Driver variable */
961 db->tx_pkt_cnt = 0;
962 db->queue_pkt_len = 0;
963 netif_trans_update(dev);
964 }
965
966 /* Our watchdog timed out. Called by the networking layer */
dm9000_timeout(struct net_device *dev, unsigned int txqueue)967 static void dm9000_timeout(struct net_device *dev, unsigned int txqueue)
968 {
969 struct board_info *db = netdev_priv(dev);
970 u8 reg_save;
971 unsigned long flags;
972
973 /* Save previous register address */
974 spin_lock_irqsave(&db->lock, flags);
975 db->in_timeout = 1;
976 reg_save = readb(db->io_addr);
977
978 netif_stop_queue(dev);
979 dm9000_init_dm9000(dev);
980 dm9000_unmask_interrupts(db);
981 /* We can accept TX packets again */
982 netif_trans_update(dev); /* prevent tx timeout */
983 netif_wake_queue(dev);
984
985 /* Restore previous register address */
986 writeb(reg_save, db->io_addr);
987 db->in_timeout = 0;
988 spin_unlock_irqrestore(&db->lock, flags);
989 }
990
dm9000_send_packet(struct net_device *dev, int ip_summed, u16 pkt_len)991 static void dm9000_send_packet(struct net_device *dev,
992 int ip_summed,
993 u16 pkt_len)
994 {
995 struct board_info *dm = to_dm9000_board(dev);
996
997 /* The DM9000 is not smart enough to leave fragmented packets alone. */
998 if (dm->ip_summed != ip_summed) {
999 if (ip_summed == CHECKSUM_NONE)
1000 iow(dm, DM9000_TCCR, 0);
1001 else
1002 iow(dm, DM9000_TCCR, TCCR_IP | TCCR_UDP | TCCR_TCP);
1003 dm->ip_summed = ip_summed;
1004 }
1005
1006 /* Set TX length to DM9000 */
1007 iow(dm, DM9000_TXPLL, pkt_len);
1008 iow(dm, DM9000_TXPLH, pkt_len >> 8);
1009
1010 /* Issue TX polling command */
1011 iow(dm, DM9000_TCR, TCR_TXREQ); /* Cleared after TX complete */
1012 }
1013
1014 /*
1015 * Hardware start transmission.
1016 * Send a packet to media from the upper layer.
1017 */
1018 static int
dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev)1019 dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev)
1020 {
1021 unsigned long flags;
1022 struct board_info *db = netdev_priv(dev);
1023
1024 dm9000_dbg(db, 3, "%s:\n", __func__);
1025
1026 if (db->tx_pkt_cnt > 1)
1027 return NETDEV_TX_BUSY;
1028
1029 spin_lock_irqsave(&db->lock, flags);
1030
1031 /* Move data to DM9000 TX RAM */
1032 writeb(DM9000_MWCMD, db->io_addr);
1033
1034 (db->outblk)(db->io_data, skb->data, skb->len);
1035 dev->stats.tx_bytes += skb->len;
1036
1037 db->tx_pkt_cnt++;
1038 /* TX control: First packet immediately send, second packet queue */
1039 if (db->tx_pkt_cnt == 1) {
1040 dm9000_send_packet(dev, skb->ip_summed, skb->len);
1041 } else {
1042 /* Second packet */
1043 db->queue_pkt_len = skb->len;
1044 db->queue_ip_summed = skb->ip_summed;
1045 netif_stop_queue(dev);
1046 }
1047
1048 spin_unlock_irqrestore(&db->lock, flags);
1049
1050 /* free this SKB */
1051 dev_consume_skb_any(skb);
1052
1053 return NETDEV_TX_OK;
1054 }
1055
1056 /*
1057 * DM9000 interrupt handler
1058 * receive the packet to upper layer, free the transmitted packet
1059 */
1060
dm9000_tx_done(struct net_device *dev, struct board_info *db)1061 static void dm9000_tx_done(struct net_device *dev, struct board_info *db)
1062 {
1063 int tx_status = ior(db, DM9000_NSR); /* Got TX status */
1064
1065 if (tx_status & (NSR_TX2END | NSR_TX1END)) {
1066 /* One packet sent complete */
1067 db->tx_pkt_cnt--;
1068 dev->stats.tx_packets++;
1069
1070 if (netif_msg_tx_done(db))
1071 dev_dbg(db->dev, "tx done, NSR %02x\n", tx_status);
1072
1073 /* Queue packet check & send */
1074 if (db->tx_pkt_cnt > 0)
1075 dm9000_send_packet(dev, db->queue_ip_summed,
1076 db->queue_pkt_len);
1077 netif_wake_queue(dev);
1078 }
1079 }
1080
1081 struct dm9000_rxhdr {
1082 u8 RxPktReady;
1083 u8 RxStatus;
1084 __le16 RxLen;
1085 } __packed;
1086
1087 /*
1088 * Received a packet and pass to upper layer
1089 */
1090 static void
dm9000_rx(struct net_device *dev)1091 dm9000_rx(struct net_device *dev)
1092 {
1093 struct board_info *db = netdev_priv(dev);
1094 struct dm9000_rxhdr rxhdr;
1095 struct sk_buff *skb;
1096 u8 rxbyte, *rdptr;
1097 bool GoodPacket;
1098 int RxLen;
1099
1100 /* Check packet ready or not */
1101 do {
1102 ior(db, DM9000_MRCMDX); /* Dummy read */
1103
1104 /* Get most updated data */
1105 rxbyte = readb(db->io_data);
1106
1107 /* Status check: this byte must be 0 or 1 */
1108 if (rxbyte & DM9000_PKT_ERR) {
1109 dev_warn(db->dev, "status check fail: %d\n", rxbyte);
1110 iow(db, DM9000_RCR, 0x00); /* Stop Device */
1111 return;
1112 }
1113
1114 if (!(rxbyte & DM9000_PKT_RDY))
1115 return;
1116
1117 /* A packet ready now & Get status/length */
1118 GoodPacket = true;
1119 writeb(DM9000_MRCMD, db->io_addr);
1120
1121 (db->inblk)(db->io_data, &rxhdr, sizeof(rxhdr));
1122
1123 RxLen = le16_to_cpu(rxhdr.RxLen);
1124
1125 if (netif_msg_rx_status(db))
1126 dev_dbg(db->dev, "RX: status %02x, length %04x\n",
1127 rxhdr.RxStatus, RxLen);
1128
1129 /* Packet Status check */
1130 if (RxLen < 0x40) {
1131 GoodPacket = false;
1132 if (netif_msg_rx_err(db))
1133 dev_dbg(db->dev, "RX: Bad Packet (runt)\n");
1134 }
1135
1136 if (RxLen > DM9000_PKT_MAX) {
1137 dev_dbg(db->dev, "RST: RX Len:%x\n", RxLen);
1138 }
1139
1140 /* rxhdr.RxStatus is identical to RSR register. */
1141 if (rxhdr.RxStatus & (RSR_FOE | RSR_CE | RSR_AE |
1142 RSR_PLE | RSR_RWTO |
1143 RSR_LCS | RSR_RF)) {
1144 GoodPacket = false;
1145 if (rxhdr.RxStatus & RSR_FOE) {
1146 if (netif_msg_rx_err(db))
1147 dev_dbg(db->dev, "fifo error\n");
1148 dev->stats.rx_fifo_errors++;
1149 }
1150 if (rxhdr.RxStatus & RSR_CE) {
1151 if (netif_msg_rx_err(db))
1152 dev_dbg(db->dev, "crc error\n");
1153 dev->stats.rx_crc_errors++;
1154 }
1155 if (rxhdr.RxStatus & RSR_RF) {
1156 if (netif_msg_rx_err(db))
1157 dev_dbg(db->dev, "length error\n");
1158 dev->stats.rx_length_errors++;
1159 }
1160 }
1161
1162 /* Move data from DM9000 */
1163 if (GoodPacket &&
1164 ((skb = netdev_alloc_skb(dev, RxLen + 4)) != NULL)) {
1165 skb_reserve(skb, 2);
1166 rdptr = skb_put(skb, RxLen - 4);
1167
1168 /* Read received packet from RX SRAM */
1169
1170 (db->inblk)(db->io_data, rdptr, RxLen);
1171 dev->stats.rx_bytes += RxLen;
1172
1173 /* Pass to upper layer */
1174 skb->protocol = eth_type_trans(skb, dev);
1175 if (dev->features & NETIF_F_RXCSUM) {
1176 if ((((rxbyte & 0x1c) << 3) & rxbyte) == 0)
1177 skb->ip_summed = CHECKSUM_UNNECESSARY;
1178 else
1179 skb_checksum_none_assert(skb);
1180 }
1181 netif_rx(skb);
1182 dev->stats.rx_packets++;
1183
1184 } else {
1185 /* need to dump the packet's data */
1186
1187 (db->dumpblk)(db->io_data, RxLen);
1188 }
1189 } while (rxbyte & DM9000_PKT_RDY);
1190 }
1191
dm9000_interrupt(int irq, void *dev_id)1192 static irqreturn_t dm9000_interrupt(int irq, void *dev_id)
1193 {
1194 struct net_device *dev = dev_id;
1195 struct board_info *db = netdev_priv(dev);
1196 int int_status;
1197 unsigned long flags;
1198 u8 reg_save;
1199
1200 dm9000_dbg(db, 3, "entering %s\n", __func__);
1201
1202 /* A real interrupt coming */
1203
1204 /* holders of db->lock must always block IRQs */
1205 spin_lock_irqsave(&db->lock, flags);
1206
1207 /* Save previous register address */
1208 reg_save = readb(db->io_addr);
1209
1210 dm9000_mask_interrupts(db);
1211 /* Got DM9000 interrupt status */
1212 int_status = ior(db, DM9000_ISR); /* Got ISR */
1213 iow(db, DM9000_ISR, int_status); /* Clear ISR status */
1214
1215 if (netif_msg_intr(db))
1216 dev_dbg(db->dev, "interrupt status %02x\n", int_status);
1217
1218 /* Received the coming packet */
1219 if (int_status & ISR_PRS)
1220 dm9000_rx(dev);
1221
1222 /* Transmit Interrupt check */
1223 if (int_status & ISR_PTS)
1224 dm9000_tx_done(dev, db);
1225
1226 if (db->type != TYPE_DM9000E) {
1227 if (int_status & ISR_LNKCHNG) {
1228 /* fire a link-change request */
1229 schedule_delayed_work(&db->phy_poll, 1);
1230 }
1231 }
1232
1233 dm9000_unmask_interrupts(db);
1234 /* Restore previous register address */
1235 writeb(reg_save, db->io_addr);
1236
1237 spin_unlock_irqrestore(&db->lock, flags);
1238
1239 return IRQ_HANDLED;
1240 }
1241
dm9000_wol_interrupt(int irq, void *dev_id)1242 static irqreturn_t dm9000_wol_interrupt(int irq, void *dev_id)
1243 {
1244 struct net_device *dev = dev_id;
1245 struct board_info *db = netdev_priv(dev);
1246 unsigned long flags;
1247 unsigned nsr, wcr;
1248
1249 spin_lock_irqsave(&db->lock, flags);
1250
1251 nsr = ior(db, DM9000_NSR);
1252 wcr = ior(db, DM9000_WCR);
1253
1254 dev_dbg(db->dev, "%s: NSR=0x%02x, WCR=0x%02x\n", __func__, nsr, wcr);
1255
1256 if (nsr & NSR_WAKEST) {
1257 /* clear, so we can avoid */
1258 iow(db, DM9000_NSR, NSR_WAKEST);
1259
1260 if (wcr & WCR_LINKST)
1261 dev_info(db->dev, "wake by link status change\n");
1262 if (wcr & WCR_SAMPLEST)
1263 dev_info(db->dev, "wake by sample packet\n");
1264 if (wcr & WCR_MAGICST)
1265 dev_info(db->dev, "wake by magic packet\n");
1266 if (!(wcr & (WCR_LINKST | WCR_SAMPLEST | WCR_MAGICST)))
1267 dev_err(db->dev, "wake signalled with no reason? "
1268 "NSR=0x%02x, WSR=0x%02x\n", nsr, wcr);
1269 }
1270
1271 spin_unlock_irqrestore(&db->lock, flags);
1272
1273 return (nsr & NSR_WAKEST) ? IRQ_HANDLED : IRQ_NONE;
1274 }
1275
1276 #ifdef CONFIG_NET_POLL_CONTROLLER
1277 /*
1278 *Used by netconsole
1279 */
dm9000_poll_controller(struct net_device *dev)1280 static void dm9000_poll_controller(struct net_device *dev)
1281 {
1282 disable_irq(dev->irq);
1283 dm9000_interrupt(dev->irq, dev);
1284 enable_irq(dev->irq);
1285 }
1286 #endif
1287
1288 /*
1289 * Open the interface.
1290 * The interface is opened whenever "ifconfig" actives it.
1291 */
1292 static int
dm9000_open(struct net_device *dev)1293 dm9000_open(struct net_device *dev)
1294 {
1295 struct board_info *db = netdev_priv(dev);
1296 unsigned int irq_flags = irq_get_trigger_type(dev->irq);
1297
1298 if (netif_msg_ifup(db))
1299 dev_dbg(db->dev, "enabling %s\n", dev->name);
1300
1301 /* If there is no IRQ type specified, tell the user that this is a
1302 * problem
1303 */
1304 if (irq_flags == IRQF_TRIGGER_NONE)
1305 dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n");
1306
1307 irq_flags |= IRQF_SHARED;
1308
1309 /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */
1310 iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
1311 mdelay(1); /* delay needs by DM9000B */
1312
1313 /* Initialize DM9000 board */
1314 dm9000_init_dm9000(dev);
1315
1316 if (request_irq(dev->irq, dm9000_interrupt, irq_flags, dev->name, dev))
1317 return -EAGAIN;
1318 /* Now that we have an interrupt handler hooked up we can unmask
1319 * our interrupts
1320 */
1321 dm9000_unmask_interrupts(db);
1322
1323 /* Init driver variable */
1324 db->dbug_cnt = 0;
1325
1326 mii_check_media(&db->mii, netif_msg_link(db), 1);
1327 netif_start_queue(dev);
1328
1329 /* Poll initial link status */
1330 schedule_delayed_work(&db->phy_poll, 1);
1331
1332 return 0;
1333 }
1334
1335 static void
dm9000_shutdown(struct net_device *dev)1336 dm9000_shutdown(struct net_device *dev)
1337 {
1338 struct board_info *db = netdev_priv(dev);
1339
1340 /* RESET device */
1341 dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */
1342 iow(db, DM9000_GPR, 0x01); /* Power-Down PHY */
1343 dm9000_mask_interrupts(db);
1344 iow(db, DM9000_RCR, 0x00); /* Disable RX */
1345 }
1346
1347 /*
1348 * Stop the interface.
1349 * The interface is stopped when it is brought.
1350 */
1351 static int
dm9000_stop(struct net_device *ndev)1352 dm9000_stop(struct net_device *ndev)
1353 {
1354 struct board_info *db = netdev_priv(ndev);
1355
1356 if (netif_msg_ifdown(db))
1357 dev_dbg(db->dev, "shutting down %s\n", ndev->name);
1358
1359 cancel_delayed_work_sync(&db->phy_poll);
1360
1361 netif_stop_queue(ndev);
1362 netif_carrier_off(ndev);
1363
1364 /* free interrupt */
1365 free_irq(ndev->irq, ndev);
1366
1367 dm9000_shutdown(ndev);
1368
1369 return 0;
1370 }
1371
1372 static const struct net_device_ops dm9000_netdev_ops = {
1373 .ndo_open = dm9000_open,
1374 .ndo_stop = dm9000_stop,
1375 .ndo_start_xmit = dm9000_start_xmit,
1376 .ndo_tx_timeout = dm9000_timeout,
1377 .ndo_set_rx_mode = dm9000_hash_table,
1378 .ndo_do_ioctl = dm9000_ioctl,
1379 .ndo_set_features = dm9000_set_features,
1380 .ndo_validate_addr = eth_validate_addr,
1381 .ndo_set_mac_address = eth_mac_addr,
1382 #ifdef CONFIG_NET_POLL_CONTROLLER
1383 .ndo_poll_controller = dm9000_poll_controller,
1384 #endif
1385 };
1386
dm9000_parse_dt(struct device *dev)1387 static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev)
1388 {
1389 struct dm9000_plat_data *pdata;
1390 struct device_node *np = dev->of_node;
1391 const void *mac_addr;
1392
1393 if (!IS_ENABLED(CONFIG_OF) || !np)
1394 return ERR_PTR(-ENXIO);
1395
1396 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
1397 if (!pdata)
1398 return ERR_PTR(-ENOMEM);
1399
1400 if (of_find_property(np, "davicom,ext-phy", NULL))
1401 pdata->flags |= DM9000_PLATF_EXT_PHY;
1402 if (of_find_property(np, "davicom,no-eeprom", NULL))
1403 pdata->flags |= DM9000_PLATF_NO_EEPROM;
1404
1405 mac_addr = of_get_mac_address(np);
1406 if (!IS_ERR(mac_addr))
1407 ether_addr_copy(pdata->dev_addr, mac_addr);
1408 else if (PTR_ERR(mac_addr) == -EPROBE_DEFER)
1409 return ERR_CAST(mac_addr);
1410
1411 return pdata;
1412 }
1413
1414 /*
1415 * Search DM9000 board, allocate space and register it
1416 */
1417 static int
dm9000_probe(struct platform_device *pdev)1418 dm9000_probe(struct platform_device *pdev)
1419 {
1420 struct dm9000_plat_data *pdata = dev_get_platdata(&pdev->dev);
1421 struct board_info *db; /* Point a board information structure */
1422 struct net_device *ndev;
1423 struct device *dev = &pdev->dev;
1424 const unsigned char *mac_src;
1425 int ret = 0;
1426 int iosize;
1427 int i;
1428 u32 id_val;
1429 int reset_gpios;
1430 enum of_gpio_flags flags;
1431 struct regulator *power;
1432 bool inv_mac_addr = false;
1433
1434 power = devm_regulator_get(dev, "vcc");
1435 if (IS_ERR(power)) {
1436 if (PTR_ERR(power) == -EPROBE_DEFER)
1437 return -EPROBE_DEFER;
1438 dev_dbg(dev, "no regulator provided\n");
1439 } else {
1440 ret = regulator_enable(power);
1441 if (ret != 0) {
1442 dev_err(dev,
1443 "Failed to enable power regulator: %d\n", ret);
1444 return ret;
1445 }
1446 dev_dbg(dev, "regulator enabled\n");
1447 }
1448
1449 reset_gpios = of_get_named_gpio_flags(dev->of_node, "reset-gpios", 0,
1450 &flags);
1451 if (gpio_is_valid(reset_gpios)) {
1452 ret = devm_gpio_request_one(dev, reset_gpios, flags,
1453 "dm9000_reset");
1454 if (ret) {
1455 dev_err(dev, "failed to request reset gpio %d: %d\n",
1456 reset_gpios, ret);
1457 goto out_regulator_disable;
1458 }
1459
1460 /* According to manual PWRST# Low Period Min 1ms */
1461 msleep(2);
1462 gpio_set_value(reset_gpios, 1);
1463 /* Needs 3ms to read eeprom when PWRST is deasserted */
1464 msleep(4);
1465 }
1466
1467 if (!pdata) {
1468 pdata = dm9000_parse_dt(&pdev->dev);
1469 if (IS_ERR(pdata)) {
1470 ret = PTR_ERR(pdata);
1471 goto out_regulator_disable;
1472 }
1473 }
1474
1475 /* Init network device */
1476 ndev = alloc_etherdev(sizeof(struct board_info));
1477 if (!ndev) {
1478 ret = -ENOMEM;
1479 goto out_regulator_disable;
1480 }
1481
1482 SET_NETDEV_DEV(ndev, &pdev->dev);
1483
1484 dev_dbg(&pdev->dev, "dm9000_probe()\n");
1485
1486 /* setup board info structure */
1487 db = netdev_priv(ndev);
1488
1489 db->dev = &pdev->dev;
1490 db->ndev = ndev;
1491 if (!IS_ERR(power))
1492 db->power_supply = power;
1493
1494 spin_lock_init(&db->lock);
1495 mutex_init(&db->addr_lock);
1496
1497 INIT_DELAYED_WORK(&db->phy_poll, dm9000_poll_work);
1498
1499 db->addr_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1500 db->data_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1501
1502 if (!db->addr_res || !db->data_res) {
1503 dev_err(db->dev, "insufficient resources addr=%p data=%p\n",
1504 db->addr_res, db->data_res);
1505 ret = -ENOENT;
1506 goto out;
1507 }
1508
1509 ndev->irq = platform_get_irq(pdev, 0);
1510 if (ndev->irq < 0) {
1511 ret = ndev->irq;
1512 goto out;
1513 }
1514
1515 db->irq_wake = platform_get_irq_optional(pdev, 1);
1516 if (db->irq_wake >= 0) {
1517 dev_dbg(db->dev, "wakeup irq %d\n", db->irq_wake);
1518
1519 ret = request_irq(db->irq_wake, dm9000_wol_interrupt,
1520 IRQF_SHARED, dev_name(db->dev), ndev);
1521 if (ret) {
1522 dev_err(db->dev, "cannot get wakeup irq (%d)\n", ret);
1523 } else {
1524
1525 /* test to see if irq is really wakeup capable */
1526 ret = irq_set_irq_wake(db->irq_wake, 1);
1527 if (ret) {
1528 dev_err(db->dev, "irq %d cannot set wakeup (%d)\n",
1529 db->irq_wake, ret);
1530 ret = 0;
1531 } else {
1532 irq_set_irq_wake(db->irq_wake, 0);
1533 db->wake_supported = 1;
1534 }
1535 }
1536 }
1537
1538 iosize = resource_size(db->addr_res);
1539 db->addr_req = request_mem_region(db->addr_res->start, iosize,
1540 pdev->name);
1541
1542 if (db->addr_req == NULL) {
1543 dev_err(db->dev, "cannot claim address reg area\n");
1544 ret = -EIO;
1545 goto out;
1546 }
1547
1548 db->io_addr = ioremap(db->addr_res->start, iosize);
1549
1550 if (db->io_addr == NULL) {
1551 dev_err(db->dev, "failed to ioremap address reg\n");
1552 ret = -EINVAL;
1553 goto out;
1554 }
1555
1556 iosize = resource_size(db->data_res);
1557 db->data_req = request_mem_region(db->data_res->start, iosize,
1558 pdev->name);
1559
1560 if (db->data_req == NULL) {
1561 dev_err(db->dev, "cannot claim data reg area\n");
1562 ret = -EIO;
1563 goto out;
1564 }
1565
1566 db->io_data = ioremap(db->data_res->start, iosize);
1567
1568 if (db->io_data == NULL) {
1569 dev_err(db->dev, "failed to ioremap data reg\n");
1570 ret = -EINVAL;
1571 goto out;
1572 }
1573
1574 /* fill in parameters for net-dev structure */
1575 ndev->base_addr = (unsigned long)db->io_addr;
1576
1577 /* ensure at least we have a default set of IO routines */
1578 dm9000_set_io(db, iosize);
1579
1580 /* check to see if anything is being over-ridden */
1581 if (pdata != NULL) {
1582 /* check to see if the driver wants to over-ride the
1583 * default IO width */
1584
1585 if (pdata->flags & DM9000_PLATF_8BITONLY)
1586 dm9000_set_io(db, 1);
1587
1588 if (pdata->flags & DM9000_PLATF_16BITONLY)
1589 dm9000_set_io(db, 2);
1590
1591 if (pdata->flags & DM9000_PLATF_32BITONLY)
1592 dm9000_set_io(db, 4);
1593
1594 /* check to see if there are any IO routine
1595 * over-rides */
1596
1597 if (pdata->inblk != NULL)
1598 db->inblk = pdata->inblk;
1599
1600 if (pdata->outblk != NULL)
1601 db->outblk = pdata->outblk;
1602
1603 if (pdata->dumpblk != NULL)
1604 db->dumpblk = pdata->dumpblk;
1605
1606 db->flags = pdata->flags;
1607 }
1608
1609 #ifdef CONFIG_DM9000_FORCE_SIMPLE_PHY_POLL
1610 db->flags |= DM9000_PLATF_SIMPLE_PHY;
1611 #endif
1612
1613 dm9000_reset(db);
1614
1615 /* try multiple times, DM9000 sometimes gets the read wrong */
1616 for (i = 0; i < 8; i++) {
1617 id_val = ior(db, DM9000_VIDL);
1618 id_val |= (u32)ior(db, DM9000_VIDH) << 8;
1619 id_val |= (u32)ior(db, DM9000_PIDL) << 16;
1620 id_val |= (u32)ior(db, DM9000_PIDH) << 24;
1621
1622 if (id_val == DM9000_ID)
1623 break;
1624 dev_err(db->dev, "read wrong id 0x%08x\n", id_val);
1625 }
1626
1627 if (id_val != DM9000_ID) {
1628 dev_err(db->dev, "wrong id: 0x%08x\n", id_val);
1629 ret = -ENODEV;
1630 goto out;
1631 }
1632
1633 /* Identify what type of DM9000 we are working on */
1634
1635 id_val = ior(db, DM9000_CHIPR);
1636 dev_dbg(db->dev, "dm9000 revision 0x%02x\n", id_val);
1637
1638 switch (id_val) {
1639 case CHIPR_DM9000A:
1640 db->type = TYPE_DM9000A;
1641 break;
1642 case CHIPR_DM9000B:
1643 db->type = TYPE_DM9000B;
1644 break;
1645 default:
1646 dev_dbg(db->dev, "ID %02x => defaulting to DM9000E\n", id_val);
1647 db->type = TYPE_DM9000E;
1648 }
1649
1650 /* dm9000a/b are capable of hardware checksum offload */
1651 if (db->type == TYPE_DM9000A || db->type == TYPE_DM9000B) {
1652 ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
1653 ndev->features |= ndev->hw_features;
1654 }
1655
1656 /* from this point we assume that we have found a DM9000 */
1657
1658 ndev->netdev_ops = &dm9000_netdev_ops;
1659 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
1660 ndev->ethtool_ops = &dm9000_ethtool_ops;
1661
1662 db->msg_enable = NETIF_MSG_LINK;
1663 db->mii.phy_id_mask = 0x1f;
1664 db->mii.reg_num_mask = 0x1f;
1665 db->mii.force_media = 0;
1666 db->mii.full_duplex = 0;
1667 db->mii.dev = ndev;
1668 db->mii.mdio_read = dm9000_phy_read;
1669 db->mii.mdio_write = dm9000_phy_write;
1670
1671 mac_src = "eeprom";
1672
1673 /* try reading the node address from the attached EEPROM */
1674 for (i = 0; i < 6; i += 2)
1675 dm9000_read_eeprom(db, i / 2, ndev->dev_addr+i);
1676
1677 if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) {
1678 mac_src = "platform data";
1679 memcpy(ndev->dev_addr, pdata->dev_addr, ETH_ALEN);
1680 }
1681
1682 if (!is_valid_ether_addr(ndev->dev_addr)) {
1683 /* try reading from mac */
1684
1685 mac_src = "chip";
1686 for (i = 0; i < 6; i++)
1687 ndev->dev_addr[i] = ior(db, i+DM9000_PAR);
1688 }
1689
1690 if (!is_valid_ether_addr(ndev->dev_addr)) {
1691 inv_mac_addr = true;
1692 eth_hw_addr_random(ndev);
1693 mac_src = "random";
1694 }
1695
1696
1697 platform_set_drvdata(pdev, ndev);
1698 ret = register_netdev(ndev);
1699
1700 if (ret == 0) {
1701 if (inv_mac_addr)
1702 dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please set using ip\n",
1703 ndev->name);
1704 printk(KERN_INFO "%s: dm9000%c at %p,%p IRQ %d MAC: %pM (%s)\n",
1705 ndev->name, dm9000_type_to_char(db->type),
1706 db->io_addr, db->io_data, ndev->irq,
1707 ndev->dev_addr, mac_src);
1708 }
1709 return 0;
1710
1711 out:
1712 dev_err(db->dev, "not found (%d).\n", ret);
1713
1714 dm9000_release_board(pdev, db);
1715 free_netdev(ndev);
1716
1717 out_regulator_disable:
1718 if (!IS_ERR(power))
1719 regulator_disable(power);
1720
1721 return ret;
1722 }
1723
1724 static int
dm9000_drv_suspend(struct device *dev)1725 dm9000_drv_suspend(struct device *dev)
1726 {
1727 struct net_device *ndev = dev_get_drvdata(dev);
1728 struct board_info *db;
1729
1730 if (ndev) {
1731 db = netdev_priv(ndev);
1732 db->in_suspend = 1;
1733
1734 if (!netif_running(ndev))
1735 return 0;
1736
1737 netif_device_detach(ndev);
1738
1739 /* only shutdown if not using WoL */
1740 if (!db->wake_state)
1741 dm9000_shutdown(ndev);
1742 }
1743 return 0;
1744 }
1745
1746 static int
dm9000_drv_resume(struct device *dev)1747 dm9000_drv_resume(struct device *dev)
1748 {
1749 struct net_device *ndev = dev_get_drvdata(dev);
1750 struct board_info *db = netdev_priv(ndev);
1751
1752 if (ndev) {
1753 if (netif_running(ndev)) {
1754 /* reset if we were not in wake mode to ensure if
1755 * the device was powered off it is in a known state */
1756 if (!db->wake_state) {
1757 dm9000_init_dm9000(ndev);
1758 dm9000_unmask_interrupts(db);
1759 }
1760
1761 netif_device_attach(ndev);
1762 }
1763
1764 db->in_suspend = 0;
1765 }
1766 return 0;
1767 }
1768
1769 static const struct dev_pm_ops dm9000_drv_pm_ops = {
1770 .suspend = dm9000_drv_suspend,
1771 .resume = dm9000_drv_resume,
1772 };
1773
1774 static int
dm9000_drv_remove(struct platform_device *pdev)1775 dm9000_drv_remove(struct platform_device *pdev)
1776 {
1777 struct net_device *ndev = platform_get_drvdata(pdev);
1778 struct board_info *dm = to_dm9000_board(ndev);
1779
1780 unregister_netdev(ndev);
1781 dm9000_release_board(pdev, dm);
1782 free_netdev(ndev); /* free device structure */
1783 if (dm->power_supply)
1784 regulator_disable(dm->power_supply);
1785
1786 dev_dbg(&pdev->dev, "released and freed device\n");
1787 return 0;
1788 }
1789
1790 #ifdef CONFIG_OF
1791 static const struct of_device_id dm9000_of_matches[] = {
1792 { .compatible = "davicom,dm9000", },
1793 { /* sentinel */ }
1794 };
1795 MODULE_DEVICE_TABLE(of, dm9000_of_matches);
1796 #endif
1797
1798 static struct platform_driver dm9000_driver = {
1799 .driver = {
1800 .name = "dm9000",
1801 .pm = &dm9000_drv_pm_ops,
1802 .of_match_table = of_match_ptr(dm9000_of_matches),
1803 },
1804 .probe = dm9000_probe,
1805 .remove = dm9000_drv_remove,
1806 };
1807
1808 module_platform_driver(dm9000_driver);
1809
1810 MODULE_AUTHOR("Sascha Hauer, Ben Dooks");
1811 MODULE_DESCRIPTION("Davicom DM9000 network driver");
1812 MODULE_LICENSE("GPL");
1813 MODULE_ALIAS("platform:dm9000");
1814