1// SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 1997-2000 LAN Media Corporation (LMC) 4 * All rights reserved. www.lanmedia.com 5 * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl> 6 * 7 * This code is written by: 8 * Andrew Stanley-Jones (asj@cban.com) 9 * Rob Braun (bbraun@vix.com), 10 * Michael Graff (explorer@vix.com) and 11 * Matt Thomas (matt@3am-software.com). 12 * 13 * With Help By: 14 * David Boggs 15 * Ron Crane 16 * Alan Cox 17 * 18 * Driver for the LanMedia LMC5200, LMC5245, LMC1000, LMC1200 cards. 19 * 20 * To control link specific options lmcctl is required. 21 * It can be obtained from ftp.lanmedia.com. 22 * 23 * Linux driver notes: 24 * Linux uses the device struct lmc_private to pass private information 25 * around. 26 * 27 * The initialization portion of this driver (the lmc_reset() and the 28 * lmc_dec_reset() functions, as well as the led controls and the 29 * lmc_initcsrs() functions. 30 * 31 * The watchdog function runs every second and checks to see if 32 * we still have link, and that the timing source is what we expected 33 * it to be. If link is lost, the interface is marked down, and 34 * we no longer can transmit. 35 */ 36 37#include <linux/kernel.h> 38#include <linux/module.h> 39#include <linux/string.h> 40#include <linux/timer.h> 41#include <linux/ptrace.h> 42#include <linux/errno.h> 43#include <linux/ioport.h> 44#include <linux/slab.h> 45#include <linux/interrupt.h> 46#include <linux/pci.h> 47#include <linux/delay.h> 48#include <linux/hdlc.h> 49#include <linux/in.h> 50#include <linux/if_arp.h> 51#include <linux/netdevice.h> 52#include <linux/etherdevice.h> 53#include <linux/skbuff.h> 54#include <linux/inet.h> 55#include <linux/bitops.h> 56#include <asm/processor.h> /* Processor type for cache alignment. */ 57#include <asm/io.h> 58#include <asm/dma.h> 59#include <linux/uaccess.h> 60//#include <asm/spinlock.h> 61 62#define DRIVER_MAJOR_VERSION 1 63#define DRIVER_MINOR_VERSION 34 64#define DRIVER_SUB_VERSION 0 65 66#define DRIVER_VERSION ((DRIVER_MAJOR_VERSION << 8) + DRIVER_MINOR_VERSION) 67 68#include "lmc.h" 69#include "lmc_var.h" 70#include "lmc_ioctl.h" 71#include "lmc_debug.h" 72#include "lmc_proto.h" 73 74static int LMC_PKT_BUF_SZ = 1542; 75 76static const struct pci_device_id lmc_pci_tbl[] = { 77 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST, 78 PCI_VENDOR_ID_LMC, PCI_ANY_ID }, 79 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST, 80 PCI_ANY_ID, PCI_VENDOR_ID_LMC }, 81 { 0 } 82}; 83 84MODULE_DEVICE_TABLE(pci, lmc_pci_tbl); 85MODULE_LICENSE("GPL v2"); 86 87 88static netdev_tx_t lmc_start_xmit(struct sk_buff *skb, 89 struct net_device *dev); 90static int lmc_rx (struct net_device *dev); 91static int lmc_open(struct net_device *dev); 92static int lmc_close(struct net_device *dev); 93static struct net_device_stats *lmc_get_stats(struct net_device *dev); 94static irqreturn_t lmc_interrupt(int irq, void *dev_instance); 95static void lmc_initcsrs(lmc_softc_t * const sc, lmc_csrptr_t csr_base, size_t csr_size); 96static void lmc_softreset(lmc_softc_t * const); 97static void lmc_running_reset(struct net_device *dev); 98static int lmc_ifdown(struct net_device * const); 99static void lmc_watchdog(struct timer_list *t); 100static void lmc_reset(lmc_softc_t * const sc); 101static void lmc_dec_reset(lmc_softc_t * const sc); 102static void lmc_driver_timeout(struct net_device *dev, unsigned int txqueue); 103 104/* 105 * linux reserves 16 device specific IOCTLs. We call them 106 * LMCIOC* to control various bits of our world. 107 */ 108int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ 109{ 110 lmc_softc_t *sc = dev_to_sc(dev); 111 lmc_ctl_t ctl; 112 int ret = -EOPNOTSUPP; 113 u16 regVal; 114 unsigned long flags; 115 116 /* 117 * Most functions mess with the structure 118 * Disable interrupts while we do the polling 119 */ 120 121 switch (cmd) { 122 /* 123 * Return current driver state. Since we keep this up 124 * To date internally, just copy this out to the user. 125 */ 126 case LMCIOCGINFO: /*fold01*/ 127 if (copy_to_user(ifr->ifr_data, &sc->ictl, sizeof(lmc_ctl_t))) 128 ret = -EFAULT; 129 else 130 ret = 0; 131 break; 132 133 case LMCIOCSINFO: /*fold01*/ 134 if (!capable(CAP_NET_ADMIN)) { 135 ret = -EPERM; 136 break; 137 } 138 139 if(dev->flags & IFF_UP){ 140 ret = -EBUSY; 141 break; 142 } 143 144 if (copy_from_user(&ctl, ifr->ifr_data, sizeof(lmc_ctl_t))) { 145 ret = -EFAULT; 146 break; 147 } 148 149 spin_lock_irqsave(&sc->lmc_lock, flags); 150 sc->lmc_media->set_status (sc, &ctl); 151 152 if(ctl.crc_length != sc->ictl.crc_length) { 153 sc->lmc_media->set_crc_length(sc, ctl.crc_length); 154 if (sc->ictl.crc_length == LMC_CTL_CRC_LENGTH_16) 155 sc->TxDescriptControlInit |= LMC_TDES_ADD_CRC_DISABLE; 156 else 157 sc->TxDescriptControlInit &= ~LMC_TDES_ADD_CRC_DISABLE; 158 } 159 spin_unlock_irqrestore(&sc->lmc_lock, flags); 160 161 ret = 0; 162 break; 163 164 case LMCIOCIFTYPE: /*fold01*/ 165 { 166 u16 old_type = sc->if_type; 167 u16 new_type; 168 169 if (!capable(CAP_NET_ADMIN)) { 170 ret = -EPERM; 171 break; 172 } 173 174 if (copy_from_user(&new_type, ifr->ifr_data, sizeof(u16))) { 175 ret = -EFAULT; 176 break; 177 } 178 179 180 if (new_type == old_type) 181 { 182 ret = 0 ; 183 break; /* no change */ 184 } 185 186 spin_lock_irqsave(&sc->lmc_lock, flags); 187 lmc_proto_close(sc); 188 189 sc->if_type = new_type; 190 lmc_proto_attach(sc); 191 ret = lmc_proto_open(sc); 192 spin_unlock_irqrestore(&sc->lmc_lock, flags); 193 break; 194 } 195 196 case LMCIOCGETXINFO: /*fold01*/ 197 spin_lock_irqsave(&sc->lmc_lock, flags); 198 sc->lmc_xinfo.Magic0 = 0xBEEFCAFE; 199 200 sc->lmc_xinfo.PciCardType = sc->lmc_cardtype; 201 sc->lmc_xinfo.PciSlotNumber = 0; 202 sc->lmc_xinfo.DriverMajorVersion = DRIVER_MAJOR_VERSION; 203 sc->lmc_xinfo.DriverMinorVersion = DRIVER_MINOR_VERSION; 204 sc->lmc_xinfo.DriverSubVersion = DRIVER_SUB_VERSION; 205 sc->lmc_xinfo.XilinxRevisionNumber = 206 lmc_mii_readreg (sc, 0, 3) & 0xf; 207 sc->lmc_xinfo.MaxFrameSize = LMC_PKT_BUF_SZ; 208 sc->lmc_xinfo.link_status = sc->lmc_media->get_link_status (sc); 209 sc->lmc_xinfo.mii_reg16 = lmc_mii_readreg (sc, 0, 16); 210 spin_unlock_irqrestore(&sc->lmc_lock, flags); 211 212 sc->lmc_xinfo.Magic1 = 0xDEADBEEF; 213 214 if (copy_to_user(ifr->ifr_data, &sc->lmc_xinfo, 215 sizeof(struct lmc_xinfo))) 216 ret = -EFAULT; 217 else 218 ret = 0; 219 220 break; 221 222 case LMCIOCGETLMCSTATS: 223 spin_lock_irqsave(&sc->lmc_lock, flags); 224 if (sc->lmc_cardtype == LMC_CARDTYPE_T1) { 225 lmc_mii_writereg(sc, 0, 17, T1FRAMER_FERR_LSB); 226 sc->extra_stats.framingBitErrorCount += 227 lmc_mii_readreg(sc, 0, 18) & 0xff; 228 lmc_mii_writereg(sc, 0, 17, T1FRAMER_FERR_MSB); 229 sc->extra_stats.framingBitErrorCount += 230 (lmc_mii_readreg(sc, 0, 18) & 0xff) << 8; 231 lmc_mii_writereg(sc, 0, 17, T1FRAMER_LCV_LSB); 232 sc->extra_stats.lineCodeViolationCount += 233 lmc_mii_readreg(sc, 0, 18) & 0xff; 234 lmc_mii_writereg(sc, 0, 17, T1FRAMER_LCV_MSB); 235 sc->extra_stats.lineCodeViolationCount += 236 (lmc_mii_readreg(sc, 0, 18) & 0xff) << 8; 237 lmc_mii_writereg(sc, 0, 17, T1FRAMER_AERR); 238 regVal = lmc_mii_readreg(sc, 0, 18) & 0xff; 239 240 sc->extra_stats.lossOfFrameCount += 241 (regVal & T1FRAMER_LOF_MASK) >> 4; 242 sc->extra_stats.changeOfFrameAlignmentCount += 243 (regVal & T1FRAMER_COFA_MASK) >> 2; 244 sc->extra_stats.severelyErroredFrameCount += 245 regVal & T1FRAMER_SEF_MASK; 246 } 247 spin_unlock_irqrestore(&sc->lmc_lock, flags); 248 if (copy_to_user(ifr->ifr_data, &sc->lmc_device->stats, 249 sizeof(sc->lmc_device->stats)) || 250 copy_to_user(ifr->ifr_data + sizeof(sc->lmc_device->stats), 251 &sc->extra_stats, sizeof(sc->extra_stats))) 252 ret = -EFAULT; 253 else 254 ret = 0; 255 break; 256 257 case LMCIOCCLEARLMCSTATS: 258 if (!capable(CAP_NET_ADMIN)) { 259 ret = -EPERM; 260 break; 261 } 262 263 spin_lock_irqsave(&sc->lmc_lock, flags); 264 memset(&sc->lmc_device->stats, 0, sizeof(sc->lmc_device->stats)); 265 memset(&sc->extra_stats, 0, sizeof(sc->extra_stats)); 266 sc->extra_stats.check = STATCHECK; 267 sc->extra_stats.version_size = (DRIVER_VERSION << 16) + 268 sizeof(sc->lmc_device->stats) + sizeof(sc->extra_stats); 269 sc->extra_stats.lmc_cardtype = sc->lmc_cardtype; 270 spin_unlock_irqrestore(&sc->lmc_lock, flags); 271 ret = 0; 272 break; 273 274 case LMCIOCSETCIRCUIT: /*fold01*/ 275 if (!capable(CAP_NET_ADMIN)){ 276 ret = -EPERM; 277 break; 278 } 279 280 if(dev->flags & IFF_UP){ 281 ret = -EBUSY; 282 break; 283 } 284 285 if (copy_from_user(&ctl, ifr->ifr_data, sizeof(lmc_ctl_t))) { 286 ret = -EFAULT; 287 break; 288 } 289 spin_lock_irqsave(&sc->lmc_lock, flags); 290 sc->lmc_media->set_circuit_type(sc, ctl.circuit_type); 291 sc->ictl.circuit_type = ctl.circuit_type; 292 spin_unlock_irqrestore(&sc->lmc_lock, flags); 293 ret = 0; 294 295 break; 296 297 case LMCIOCRESET: /*fold01*/ 298 if (!capable(CAP_NET_ADMIN)){ 299 ret = -EPERM; 300 break; 301 } 302 303 spin_lock_irqsave(&sc->lmc_lock, flags); 304 /* Reset driver and bring back to current state */ 305 printk (" REG16 before reset +%04x\n", lmc_mii_readreg (sc, 0, 16)); 306 lmc_running_reset (dev); 307 printk (" REG16 after reset +%04x\n", lmc_mii_readreg (sc, 0, 16)); 308 309 LMC_EVENT_LOG(LMC_EVENT_FORCEDRESET, LMC_CSR_READ (sc, csr_status), lmc_mii_readreg (sc, 0, 16)); 310 spin_unlock_irqrestore(&sc->lmc_lock, flags); 311 312 ret = 0; 313 break; 314 315#ifdef DEBUG 316 case LMCIOCDUMPEVENTLOG: 317 if (copy_to_user(ifr->ifr_data, &lmcEventLogIndex, sizeof(u32))) { 318 ret = -EFAULT; 319 break; 320 } 321 if (copy_to_user(ifr->ifr_data + sizeof(u32), lmcEventLogBuf, 322 sizeof(lmcEventLogBuf))) 323 ret = -EFAULT; 324 else 325 ret = 0; 326 327 break; 328#endif /* end ifdef _DBG_EVENTLOG */ 329 case LMCIOCT1CONTROL: /*fold01*/ 330 if (sc->lmc_cardtype != LMC_CARDTYPE_T1){ 331 ret = -EOPNOTSUPP; 332 break; 333 } 334 break; 335 case LMCIOCXILINX: /*fold01*/ 336 { 337 struct lmc_xilinx_control xc; /*fold02*/ 338 339 if (!capable(CAP_NET_ADMIN)){ 340 ret = -EPERM; 341 break; 342 } 343 344 /* 345 * Stop the xwitter whlie we restart the hardware 346 */ 347 netif_stop_queue(dev); 348 349 if (copy_from_user(&xc, ifr->ifr_data, sizeof(struct lmc_xilinx_control))) { 350 ret = -EFAULT; 351 break; 352 } 353 switch(xc.command){ 354 case lmc_xilinx_reset: /*fold02*/ 355 { 356 u16 mii; 357 spin_lock_irqsave(&sc->lmc_lock, flags); 358 mii = lmc_mii_readreg (sc, 0, 16); 359 360 /* 361 * Make all of them 0 and make input 362 */ 363 lmc_gpio_mkinput(sc, 0xff); 364 365 /* 366 * make the reset output 367 */ 368 lmc_gpio_mkoutput(sc, LMC_GEP_RESET); 369 370 /* 371 * RESET low to force configuration. This also forces 372 * the transmitter clock to be internal, but we expect to reset 373 * that later anyway. 374 */ 375 376 sc->lmc_gpio &= ~LMC_GEP_RESET; 377 LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio); 378 379 380 /* 381 * hold for more than 10 microseconds 382 */ 383 udelay(50); 384 385 sc->lmc_gpio |= LMC_GEP_RESET; 386 LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio); 387 388 389 /* 390 * stop driving Xilinx-related signals 391 */ 392 lmc_gpio_mkinput(sc, 0xff); 393 394 /* Reset the frammer hardware */ 395 sc->lmc_media->set_link_status (sc, 1); 396 sc->lmc_media->set_status (sc, NULL); 397// lmc_softreset(sc); 398 399 { 400 int i; 401 for(i = 0; i < 5; i++){ 402 lmc_led_on(sc, LMC_DS3_LED0); 403 mdelay(100); 404 lmc_led_off(sc, LMC_DS3_LED0); 405 lmc_led_on(sc, LMC_DS3_LED1); 406 mdelay(100); 407 lmc_led_off(sc, LMC_DS3_LED1); 408 lmc_led_on(sc, LMC_DS3_LED3); 409 mdelay(100); 410 lmc_led_off(sc, LMC_DS3_LED3); 411 lmc_led_on(sc, LMC_DS3_LED2); 412 mdelay(100); 413 lmc_led_off(sc, LMC_DS3_LED2); 414 } 415 } 416 spin_unlock_irqrestore(&sc->lmc_lock, flags); 417 418 419 420 ret = 0x0; 421 422 } 423 424 break; 425 case lmc_xilinx_load_prom: /*fold02*/ 426 { 427 u16 mii; 428 int timeout = 500000; 429 spin_lock_irqsave(&sc->lmc_lock, flags); 430 mii = lmc_mii_readreg (sc, 0, 16); 431 432 /* 433 * Make all of them 0 and make input 434 */ 435 lmc_gpio_mkinput(sc, 0xff); 436 437 /* 438 * make the reset output 439 */ 440 lmc_gpio_mkoutput(sc, LMC_GEP_DP | LMC_GEP_RESET); 441 442 /* 443 * RESET low to force configuration. This also forces 444 * the transmitter clock to be internal, but we expect to reset 445 * that later anyway. 446 */ 447 448 sc->lmc_gpio &= ~(LMC_GEP_RESET | LMC_GEP_DP); 449 LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio); 450 451 452 /* 453 * hold for more than 10 microseconds 454 */ 455 udelay(50); 456 457 sc->lmc_gpio |= LMC_GEP_DP | LMC_GEP_RESET; 458 LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio); 459 460 /* 461 * busy wait for the chip to reset 462 */ 463 while( (LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0 && 464 (timeout-- > 0)) 465 cpu_relax(); 466 467 468 /* 469 * stop driving Xilinx-related signals 470 */ 471 lmc_gpio_mkinput(sc, 0xff); 472 spin_unlock_irqrestore(&sc->lmc_lock, flags); 473 474 ret = 0x0; 475 476 477 break; 478 479 } 480 481 case lmc_xilinx_load: /*fold02*/ 482 { 483 char *data; 484 int pos; 485 int timeout = 500000; 486 487 if (!xc.data) { 488 ret = -EINVAL; 489 break; 490 } 491 492 data = memdup_user(xc.data, xc.len); 493 if (IS_ERR(data)) { 494 ret = PTR_ERR(data); 495 break; 496 } 497 498 printk("%s: Starting load of data Len: %d at 0x%p == 0x%p\n", dev->name, xc.len, xc.data, data); 499 500 spin_lock_irqsave(&sc->lmc_lock, flags); 501 lmc_gpio_mkinput(sc, 0xff); 502 503 /* 504 * Clear the Xilinx and start prgramming from the DEC 505 */ 506 507 /* 508 * Set ouput as: 509 * Reset: 0 (active) 510 * DP: 0 (active) 511 * Mode: 1 512 * 513 */ 514 sc->lmc_gpio = 0x00; 515 sc->lmc_gpio &= ~LMC_GEP_DP; 516 sc->lmc_gpio &= ~LMC_GEP_RESET; 517 sc->lmc_gpio |= LMC_GEP_MODE; 518 LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio); 519 520 lmc_gpio_mkoutput(sc, LMC_GEP_MODE | LMC_GEP_DP | LMC_GEP_RESET); 521 522 /* 523 * Wait at least 10 us 20 to be safe 524 */ 525 udelay(50); 526 527 /* 528 * Clear reset and activate programming lines 529 * Reset: Input 530 * DP: Input 531 * Clock: Output 532 * Data: Output 533 * Mode: Output 534 */ 535 lmc_gpio_mkinput(sc, LMC_GEP_DP | LMC_GEP_RESET); 536 537 /* 538 * Set LOAD, DATA, Clock to 1 539 */ 540 sc->lmc_gpio = 0x00; 541 sc->lmc_gpio |= LMC_GEP_MODE; 542 sc->lmc_gpio |= LMC_GEP_DATA; 543 sc->lmc_gpio |= LMC_GEP_CLK; 544 LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio); 545 546 lmc_gpio_mkoutput(sc, LMC_GEP_DATA | LMC_GEP_CLK | LMC_GEP_MODE ); 547 548 /* 549 * busy wait for the chip to reset 550 */ 551 while( (LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0 && 552 (timeout-- > 0)) 553 cpu_relax(); 554 555 printk(KERN_DEBUG "%s: Waited %d for the Xilinx to clear it's memory\n", dev->name, 500000-timeout); 556 557 for(pos = 0; pos < xc.len; pos++){ 558 switch(data[pos]){ 559 case 0: 560 sc->lmc_gpio &= ~LMC_GEP_DATA; /* Data is 0 */ 561 break; 562 case 1: 563 sc->lmc_gpio |= LMC_GEP_DATA; /* Data is 1 */ 564 break; 565 default: 566 printk(KERN_WARNING "%s Bad data in xilinx programming data at %d, got %d wanted 0 or 1\n", dev->name, pos, data[pos]); 567 sc->lmc_gpio |= LMC_GEP_DATA; /* Assume it's 1 */ 568 } 569 sc->lmc_gpio &= ~LMC_GEP_CLK; /* Clock to zero */ 570 sc->lmc_gpio |= LMC_GEP_MODE; 571 LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio); 572 udelay(1); 573 574 sc->lmc_gpio |= LMC_GEP_CLK; /* Put the clack back to one */ 575 sc->lmc_gpio |= LMC_GEP_MODE; 576 LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio); 577 udelay(1); 578 } 579 if((LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0){ 580 printk(KERN_WARNING "%s: Reprogramming FAILED. Needs to be reprogrammed. (corrupted data)\n", dev->name); 581 } 582 else if((LMC_CSR_READ(sc, csr_gp) & LMC_GEP_DP) == 0){ 583 printk(KERN_WARNING "%s: Reprogramming FAILED. Needs to be reprogrammed. (done)\n", dev->name); 584 } 585 else { 586 printk(KERN_DEBUG "%s: Done reprogramming Xilinx, %d bits, good luck!\n", dev->name, pos); 587 } 588 589 lmc_gpio_mkinput(sc, 0xff); 590 591 sc->lmc_miireg16 |= LMC_MII16_FIFO_RESET; 592 lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16); 593 594 sc->lmc_miireg16 &= ~LMC_MII16_FIFO_RESET; 595 lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16); 596 spin_unlock_irqrestore(&sc->lmc_lock, flags); 597 598 kfree(data); 599 600 ret = 0; 601 602 break; 603 } 604 default: /*fold02*/ 605 ret = -EBADE; 606 break; 607 } 608 609 netif_wake_queue(dev); 610 sc->lmc_txfull = 0; 611 612 } 613 break; 614 default: /*fold01*/ 615 /* If we don't know what to do, give the protocol a shot. */ 616 ret = lmc_proto_ioctl (sc, ifr, cmd); 617 break; 618 } 619 620 return ret; 621} 622 623 624/* the watchdog process that cruises around */ 625static void lmc_watchdog(struct timer_list *t) /*fold00*/ 626{ 627 lmc_softc_t *sc = from_timer(sc, t, timer); 628 struct net_device *dev = sc->lmc_device; 629 int link_status; 630 u32 ticks; 631 unsigned long flags; 632 633 spin_lock_irqsave(&sc->lmc_lock, flags); 634 635 if(sc->check != 0xBEAFCAFE){ 636 printk("LMC: Corrupt net_device struct, breaking out\n"); 637 spin_unlock_irqrestore(&sc->lmc_lock, flags); 638 return; 639 } 640 641 642 /* Make sure the tx jabber and rx watchdog are off, 643 * and the transmit and receive processes are running. 644 */ 645 646 LMC_CSR_WRITE (sc, csr_15, 0x00000011); 647 sc->lmc_cmdmode |= TULIP_CMD_TXRUN | TULIP_CMD_RXRUN; 648 LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode); 649 650 if (sc->lmc_ok == 0) 651 goto kick_timer; 652 653 LMC_EVENT_LOG(LMC_EVENT_WATCHDOG, LMC_CSR_READ (sc, csr_status), lmc_mii_readreg (sc, 0, 16)); 654 655 /* --- begin time out check ----------------------------------- 656 * check for a transmit interrupt timeout 657 * Has the packet xmt vs xmt serviced threshold been exceeded */ 658 if (sc->lmc_taint_tx == sc->lastlmc_taint_tx && 659 sc->lmc_device->stats.tx_packets > sc->lasttx_packets && 660 sc->tx_TimeoutInd == 0) 661 { 662 663 /* wait for the watchdog to come around again */ 664 sc->tx_TimeoutInd = 1; 665 } 666 else if (sc->lmc_taint_tx == sc->lastlmc_taint_tx && 667 sc->lmc_device->stats.tx_packets > sc->lasttx_packets && 668 sc->tx_TimeoutInd) 669 { 670 671 LMC_EVENT_LOG(LMC_EVENT_XMTINTTMO, LMC_CSR_READ (sc, csr_status), 0); 672 673 sc->tx_TimeoutDisplay = 1; 674 sc->extra_stats.tx_TimeoutCnt++; 675 676 /* DEC chip is stuck, hit it with a RESET!!!! */ 677 lmc_running_reset (dev); 678 679 680 /* look at receive & transmit process state to make sure they are running */ 681 LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0); 682 683 /* look at: DSR - 02 for Reg 16 684 * CTS - 08 685 * DCD - 10 686 * RI - 20 687 * for Reg 17 688 */ 689 LMC_EVENT_LOG(LMC_EVENT_RESET2, lmc_mii_readreg (sc, 0, 16), lmc_mii_readreg (sc, 0, 17)); 690 691 /* reset the transmit timeout detection flag */ 692 sc->tx_TimeoutInd = 0; 693 sc->lastlmc_taint_tx = sc->lmc_taint_tx; 694 sc->lasttx_packets = sc->lmc_device->stats.tx_packets; 695 } else { 696 sc->tx_TimeoutInd = 0; 697 sc->lastlmc_taint_tx = sc->lmc_taint_tx; 698 sc->lasttx_packets = sc->lmc_device->stats.tx_packets; 699 } 700 701 /* --- end time out check ----------------------------------- */ 702 703 704 link_status = sc->lmc_media->get_link_status (sc); 705 706 /* 707 * hardware level link lost, but the interface is marked as up. 708 * Mark it as down. 709 */ 710 if ((link_status == 0) && (sc->last_link_status != 0)) { 711 printk(KERN_WARNING "%s: hardware/physical link down\n", dev->name); 712 sc->last_link_status = 0; 713 /* lmc_reset (sc); Why reset??? The link can go down ok */ 714 715 /* Inform the world that link has been lost */ 716 netif_carrier_off(dev); 717 } 718 719 /* 720 * hardware link is up, but the interface is marked as down. 721 * Bring it back up again. 722 */ 723 if (link_status != 0 && sc->last_link_status == 0) { 724 printk(KERN_WARNING "%s: hardware/physical link up\n", dev->name); 725 sc->last_link_status = 1; 726 /* lmc_reset (sc); Again why reset??? */ 727 728 netif_carrier_on(dev); 729 } 730 731 /* Call media specific watchdog functions */ 732 sc->lmc_media->watchdog(sc); 733 734 /* 735 * Poke the transmitter to make sure it 736 * never stops, even if we run out of mem 737 */ 738 LMC_CSR_WRITE(sc, csr_rxpoll, 0); 739 740 /* 741 * Check for code that failed 742 * and try and fix it as appropriate 743 */ 744 if(sc->failed_ring == 1){ 745 /* 746 * Failed to setup the recv/xmit rin 747 * Try again 748 */ 749 sc->failed_ring = 0; 750 lmc_softreset(sc); 751 } 752 if(sc->failed_recv_alloc == 1){ 753 /* 754 * We failed to alloc mem in the 755 * interrupt handler, go through the rings 756 * and rebuild them 757 */ 758 sc->failed_recv_alloc = 0; 759 lmc_softreset(sc); 760 } 761 762 763 /* 764 * remember the timer value 765 */ 766kick_timer: 767 768 ticks = LMC_CSR_READ (sc, csr_gp_timer); 769 LMC_CSR_WRITE (sc, csr_gp_timer, 0xffffffffUL); 770 sc->ictl.ticks = 0x0000ffff - (ticks & 0x0000ffff); 771 772 /* 773 * restart this timer. 774 */ 775 sc->timer.expires = jiffies + (HZ); 776 add_timer (&sc->timer); 777 778 spin_unlock_irqrestore(&sc->lmc_lock, flags); 779} 780 781static int lmc_attach(struct net_device *dev, unsigned short encoding, 782 unsigned short parity) 783{ 784 if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT) 785 return 0; 786 return -EINVAL; 787} 788 789static const struct net_device_ops lmc_ops = { 790 .ndo_open = lmc_open, 791 .ndo_stop = lmc_close, 792 .ndo_start_xmit = hdlc_start_xmit, 793 .ndo_do_ioctl = lmc_ioctl, 794 .ndo_tx_timeout = lmc_driver_timeout, 795 .ndo_get_stats = lmc_get_stats, 796}; 797 798static int lmc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 799{ 800 lmc_softc_t *sc; 801 struct net_device *dev; 802 u16 subdevice; 803 u16 AdapModelNum; 804 int err; 805 static int cards_found; 806 807 err = pcim_enable_device(pdev); 808 if (err) { 809 printk(KERN_ERR "lmc: pci enable failed: %d\n", err); 810 return err; 811 } 812 813 err = pci_request_regions(pdev, "lmc"); 814 if (err) { 815 printk(KERN_ERR "lmc: pci_request_region failed\n"); 816 return err; 817 } 818 819 /* 820 * Allocate our own device structure 821 */ 822 sc = devm_kzalloc(&pdev->dev, sizeof(lmc_softc_t), GFP_KERNEL); 823 if (!sc) 824 return -ENOMEM; 825 826 dev = alloc_hdlcdev(sc); 827 if (!dev) { 828 printk(KERN_ERR "lmc:alloc_netdev for device failed\n"); 829 return -ENOMEM; 830 } 831 832 833 dev->type = ARPHRD_HDLC; 834 dev_to_hdlc(dev)->xmit = lmc_start_xmit; 835 dev_to_hdlc(dev)->attach = lmc_attach; 836 dev->netdev_ops = &lmc_ops; 837 dev->watchdog_timeo = HZ; /* 1 second */ 838 dev->tx_queue_len = 100; 839 sc->lmc_device = dev; 840 sc->name = dev->name; 841 sc->if_type = LMC_PPP; 842 sc->check = 0xBEAFCAFE; 843 dev->base_addr = pci_resource_start(pdev, 0); 844 dev->irq = pdev->irq; 845 pci_set_drvdata(pdev, dev); 846 SET_NETDEV_DEV(dev, &pdev->dev); 847 848 /* 849 * This will get the protocol layer ready and do any 1 time init's 850 * Must have a valid sc and dev structure 851 */ 852 lmc_proto_attach(sc); 853 854 /* Init the spin lock so can call it latter */ 855 856 spin_lock_init(&sc->lmc_lock); 857 pci_set_master(pdev); 858 859 printk(KERN_INFO "%s: detected at %lx, irq %d\n", dev->name, 860 dev->base_addr, dev->irq); 861 862 err = register_hdlc_device(dev); 863 if (err) { 864 printk(KERN_ERR "%s: register_netdev failed.\n", dev->name); 865 free_netdev(dev); 866 return err; 867 } 868 869 sc->lmc_cardtype = LMC_CARDTYPE_UNKNOWN; 870 sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT; 871 872 /* 873 * 874 * Check either the subvendor or the subdevice, some systems reverse 875 * the setting in the bois, seems to be version and arch dependent? 876 * Fix the error, exchange the two values 877 */ 878 if ((subdevice = pdev->subsystem_device) == PCI_VENDOR_ID_LMC) 879 subdevice = pdev->subsystem_vendor; 880 881 switch (subdevice) { 882 case PCI_DEVICE_ID_LMC_HSSI: 883 printk(KERN_INFO "%s: LMC HSSI\n", dev->name); 884 sc->lmc_cardtype = LMC_CARDTYPE_HSSI; 885 sc->lmc_media = &lmc_hssi_media; 886 break; 887 case PCI_DEVICE_ID_LMC_DS3: 888 printk(KERN_INFO "%s: LMC DS3\n", dev->name); 889 sc->lmc_cardtype = LMC_CARDTYPE_DS3; 890 sc->lmc_media = &lmc_ds3_media; 891 break; 892 case PCI_DEVICE_ID_LMC_SSI: 893 printk(KERN_INFO "%s: LMC SSI\n", dev->name); 894 sc->lmc_cardtype = LMC_CARDTYPE_SSI; 895 sc->lmc_media = &lmc_ssi_media; 896 break; 897 case PCI_DEVICE_ID_LMC_T1: 898 printk(KERN_INFO "%s: LMC T1\n", dev->name); 899 sc->lmc_cardtype = LMC_CARDTYPE_T1; 900 sc->lmc_media = &lmc_t1_media; 901 break; 902 default: 903 printk(KERN_WARNING "%s: LMC UNKNOWN CARD!\n", dev->name); 904 unregister_hdlc_device(dev); 905 return -EIO; 906 break; 907 } 908 909 lmc_initcsrs (sc, dev->base_addr, 8); 910 911 lmc_gpio_mkinput (sc, 0xff); 912 sc->lmc_gpio = 0; /* drive no signals yet */ 913 914 sc->lmc_media->defaults (sc); 915 916 sc->lmc_media->set_link_status (sc, LMC_LINK_UP); 917 918 /* verify that the PCI Sub System ID matches the Adapter Model number 919 * from the MII register 920 */ 921 AdapModelNum = (lmc_mii_readreg (sc, 0, 3) & 0x3f0) >> 4; 922 923 if ((AdapModelNum != LMC_ADAP_T1 || /* detect LMC1200 */ 924 subdevice != PCI_DEVICE_ID_LMC_T1) && 925 (AdapModelNum != LMC_ADAP_SSI || /* detect LMC1000 */ 926 subdevice != PCI_DEVICE_ID_LMC_SSI) && 927 (AdapModelNum != LMC_ADAP_DS3 || /* detect LMC5245 */ 928 subdevice != PCI_DEVICE_ID_LMC_DS3) && 929 (AdapModelNum != LMC_ADAP_HSSI || /* detect LMC5200 */ 930 subdevice != PCI_DEVICE_ID_LMC_HSSI)) 931 printk(KERN_WARNING "%s: Model number (%d) miscompare for PCI" 932 " Subsystem ID = 0x%04x\n", 933 dev->name, AdapModelNum, subdevice); 934 935 /* 936 * reset clock 937 */ 938 LMC_CSR_WRITE (sc, csr_gp_timer, 0xFFFFFFFFUL); 939 940 sc->board_idx = cards_found++; 941 sc->extra_stats.check = STATCHECK; 942 sc->extra_stats.version_size = (DRIVER_VERSION << 16) + 943 sizeof(sc->lmc_device->stats) + sizeof(sc->extra_stats); 944 sc->extra_stats.lmc_cardtype = sc->lmc_cardtype; 945 946 sc->lmc_ok = 0; 947 sc->last_link_status = 0; 948 949 return 0; 950} 951 952/* 953 * Called from pci when removing module. 954 */ 955static void lmc_remove_one(struct pci_dev *pdev) 956{ 957 struct net_device *dev = pci_get_drvdata(pdev); 958 959 if (dev) { 960 printk(KERN_DEBUG "%s: removing...\n", dev->name); 961 unregister_hdlc_device(dev); 962 free_netdev(dev); 963 } 964} 965 966/* After this is called, packets can be sent. 967 * Does not initialize the addresses 968 */ 969static int lmc_open(struct net_device *dev) 970{ 971 lmc_softc_t *sc = dev_to_sc(dev); 972 int err; 973 974 lmc_led_on(sc, LMC_DS3_LED0); 975 976 lmc_dec_reset(sc); 977 lmc_reset(sc); 978 979 LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ(sc, csr_status), 0); 980 LMC_EVENT_LOG(LMC_EVENT_RESET2, lmc_mii_readreg(sc, 0, 16), 981 lmc_mii_readreg(sc, 0, 17)); 982 983 if (sc->lmc_ok) 984 return 0; 985 986 lmc_softreset (sc); 987 988 /* Since we have to use PCI bus, this should work on x86,alpha,ppc */ 989 if (request_irq (dev->irq, lmc_interrupt, IRQF_SHARED, dev->name, dev)){ 990 printk(KERN_WARNING "%s: could not get irq: %d\n", dev->name, dev->irq); 991 return -EAGAIN; 992 } 993 sc->got_irq = 1; 994 995 /* Assert Terminal Active */ 996 sc->lmc_miireg16 |= LMC_MII16_LED_ALL; 997 sc->lmc_media->set_link_status (sc, LMC_LINK_UP); 998 999 /* 1000 * reset to last state. 1001 */ 1002 sc->lmc_media->set_status (sc, NULL); 1003 1004 /* setup default bits to be used in tulip_desc_t transmit descriptor 1005 * -baz */ 1006 sc->TxDescriptControlInit = ( 1007 LMC_TDES_INTERRUPT_ON_COMPLETION 1008 | LMC_TDES_FIRST_SEGMENT 1009 | LMC_TDES_LAST_SEGMENT 1010 | LMC_TDES_SECOND_ADDR_CHAINED 1011 | LMC_TDES_DISABLE_PADDING 1012 ); 1013 1014 if (sc->ictl.crc_length == LMC_CTL_CRC_LENGTH_16) { 1015 /* disable 32 bit CRC generated by ASIC */ 1016 sc->TxDescriptControlInit |= LMC_TDES_ADD_CRC_DISABLE; 1017 } 1018 sc->lmc_media->set_crc_length(sc, sc->ictl.crc_length); 1019 /* Acknoledge the Terminal Active and light LEDs */ 1020 1021 /* dev->flags |= IFF_UP; */ 1022 1023 if ((err = lmc_proto_open(sc)) != 0) 1024 return err; 1025 1026 netif_start_queue(dev); 1027 sc->extra_stats.tx_tbusy0++; 1028 1029 /* 1030 * select what interrupts we want to get 1031 */ 1032 sc->lmc_intrmask = 0; 1033 /* Should be using the default interrupt mask defined in the .h file. */ 1034 sc->lmc_intrmask |= (TULIP_STS_NORMALINTR 1035 | TULIP_STS_RXINTR 1036 | TULIP_STS_TXINTR 1037 | TULIP_STS_ABNRMLINTR 1038 | TULIP_STS_SYSERROR 1039 | TULIP_STS_TXSTOPPED 1040 | TULIP_STS_TXUNDERFLOW 1041 | TULIP_STS_RXSTOPPED 1042 | TULIP_STS_RXNOBUF 1043 ); 1044 LMC_CSR_WRITE (sc, csr_intr, sc->lmc_intrmask); 1045 1046 sc->lmc_cmdmode |= TULIP_CMD_TXRUN; 1047 sc->lmc_cmdmode |= TULIP_CMD_RXRUN; 1048 LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode); 1049 1050 sc->lmc_ok = 1; /* Run watchdog */ 1051 1052 /* 1053 * Set the if up now - pfb 1054 */ 1055 1056 sc->last_link_status = 1; 1057 1058 /* 1059 * Setup a timer for the watchdog on probe, and start it running. 1060 * Since lmc_ok == 0, it will be a NOP for now. 1061 */ 1062 timer_setup(&sc->timer, lmc_watchdog, 0); 1063 sc->timer.expires = jiffies + HZ; 1064 add_timer (&sc->timer); 1065 1066 return 0; 1067} 1068 1069/* Total reset to compensate for the AdTran DSU doing bad things 1070 * under heavy load 1071 */ 1072 1073static void lmc_running_reset (struct net_device *dev) /*fold00*/ 1074{ 1075 lmc_softc_t *sc = dev_to_sc(dev); 1076 1077 /* stop interrupts */ 1078 /* Clear the interrupt mask */ 1079 LMC_CSR_WRITE (sc, csr_intr, 0x00000000); 1080 1081 lmc_dec_reset (sc); 1082 lmc_reset (sc); 1083 lmc_softreset (sc); 1084 /* sc->lmc_miireg16 |= LMC_MII16_LED_ALL; */ 1085 sc->lmc_media->set_link_status (sc, 1); 1086 sc->lmc_media->set_status (sc, NULL); 1087 1088 netif_wake_queue(dev); 1089 1090 sc->lmc_txfull = 0; 1091 sc->extra_stats.tx_tbusy0++; 1092 1093 sc->lmc_intrmask = TULIP_DEFAULT_INTR_MASK; 1094 LMC_CSR_WRITE (sc, csr_intr, sc->lmc_intrmask); 1095 1096 sc->lmc_cmdmode |= (TULIP_CMD_TXRUN | TULIP_CMD_RXRUN); 1097 LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode); 1098} 1099 1100 1101/* This is what is called when you ifconfig down a device. 1102 * This disables the timer for the watchdog and keepalives, 1103 * and disables the irq for dev. 1104 */ 1105static int lmc_close(struct net_device *dev) 1106{ 1107 /* not calling release_region() as we should */ 1108 lmc_softc_t *sc = dev_to_sc(dev); 1109 1110 sc->lmc_ok = 0; 1111 sc->lmc_media->set_link_status (sc, 0); 1112 del_timer (&sc->timer); 1113 lmc_proto_close(sc); 1114 lmc_ifdown (dev); 1115 1116 return 0; 1117} 1118 1119/* Ends the transfer of packets */ 1120/* When the interface goes down, this is called */ 1121static int lmc_ifdown (struct net_device *dev) /*fold00*/ 1122{ 1123 lmc_softc_t *sc = dev_to_sc(dev); 1124 u32 csr6; 1125 int i; 1126 1127 /* Don't let anything else go on right now */ 1128 // dev->start = 0; 1129 netif_stop_queue(dev); 1130 sc->extra_stats.tx_tbusy1++; 1131 1132 /* stop interrupts */ 1133 /* Clear the interrupt mask */ 1134 LMC_CSR_WRITE (sc, csr_intr, 0x00000000); 1135 1136 /* Stop Tx and Rx on the chip */ 1137 csr6 = LMC_CSR_READ (sc, csr_command); 1138 csr6 &= ~LMC_DEC_ST; /* Turn off the Transmission bit */ 1139 csr6 &= ~LMC_DEC_SR; /* Turn off the Receive bit */ 1140 LMC_CSR_WRITE (sc, csr_command, csr6); 1141 1142 sc->lmc_device->stats.rx_missed_errors += 1143 LMC_CSR_READ(sc, csr_missed_frames) & 0xffff; 1144 1145 /* release the interrupt */ 1146 if(sc->got_irq == 1){ 1147 free_irq (dev->irq, dev); 1148 sc->got_irq = 0; 1149 } 1150 1151 /* free skbuffs in the Rx queue */ 1152 for (i = 0; i < LMC_RXDESCS; i++) 1153 { 1154 struct sk_buff *skb = sc->lmc_rxq[i]; 1155 sc->lmc_rxq[i] = NULL; 1156 sc->lmc_rxring[i].status = 0; 1157 sc->lmc_rxring[i].length = 0; 1158 sc->lmc_rxring[i].buffer1 = 0xDEADBEEF; 1159 if (skb != NULL) 1160 dev_kfree_skb(skb); 1161 sc->lmc_rxq[i] = NULL; 1162 } 1163 1164 for (i = 0; i < LMC_TXDESCS; i++) 1165 { 1166 if (sc->lmc_txq[i] != NULL) 1167 dev_kfree_skb(sc->lmc_txq[i]); 1168 sc->lmc_txq[i] = NULL; 1169 } 1170 1171 lmc_led_off (sc, LMC_MII16_LED_ALL); 1172 1173 netif_wake_queue(dev); 1174 sc->extra_stats.tx_tbusy0++; 1175 1176 return 0; 1177} 1178 1179/* Interrupt handling routine. This will take an incoming packet, or clean 1180 * up after a trasmit. 1181 */ 1182static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/ 1183{ 1184 struct net_device *dev = (struct net_device *) dev_instance; 1185 lmc_softc_t *sc = dev_to_sc(dev); 1186 u32 csr; 1187 int i; 1188 s32 stat; 1189 unsigned int badtx; 1190 u32 firstcsr; 1191 int max_work = LMC_RXDESCS; 1192 int handled = 0; 1193 1194 spin_lock(&sc->lmc_lock); 1195 1196 /* 1197 * Read the csr to find what interrupts we have (if any) 1198 */ 1199 csr = LMC_CSR_READ (sc, csr_status); 1200 1201 /* 1202 * Make sure this is our interrupt 1203 */ 1204 if ( ! (csr & sc->lmc_intrmask)) { 1205 goto lmc_int_fail_out; 1206 } 1207 1208 firstcsr = csr; 1209 1210 /* always go through this loop at least once */ 1211 while (csr & sc->lmc_intrmask) { 1212 handled = 1; 1213 1214 /* 1215 * Clear interrupt bits, we handle all case below 1216 */ 1217 LMC_CSR_WRITE (sc, csr_status, csr); 1218 1219 /* 1220 * One of 1221 * - Transmit process timed out CSR5<1> 1222 * - Transmit jabber timeout CSR5<3> 1223 * - Transmit underflow CSR5<5> 1224 * - Transmit Receiver buffer unavailable CSR5<7> 1225 * - Receive process stopped CSR5<8> 1226 * - Receive watchdog timeout CSR5<9> 1227 * - Early transmit interrupt CSR5<10> 1228 * 1229 * Is this really right? Should we do a running reset for jabber? 1230 * (being a WAN card and all) 1231 */ 1232 if (csr & TULIP_STS_ABNRMLINTR){ 1233 lmc_running_reset (dev); 1234 break; 1235 } 1236 1237 if (csr & TULIP_STS_RXINTR) 1238 lmc_rx (dev); 1239 1240 if (csr & (TULIP_STS_TXINTR | TULIP_STS_TXNOBUF | TULIP_STS_TXSTOPPED)) { 1241 1242 int n_compl = 0 ; 1243 /* reset the transmit timeout detection flag -baz */ 1244 sc->extra_stats.tx_NoCompleteCnt = 0; 1245 1246 badtx = sc->lmc_taint_tx; 1247 i = badtx % LMC_TXDESCS; 1248 1249 while ((badtx < sc->lmc_next_tx)) { 1250 stat = sc->lmc_txring[i].status; 1251 1252 LMC_EVENT_LOG (LMC_EVENT_XMTINT, stat, 1253 sc->lmc_txring[i].length); 1254 /* 1255 * If bit 31 is 1 the tulip owns it break out of the loop 1256 */ 1257 if (stat & 0x80000000) 1258 break; 1259 1260 n_compl++ ; /* i.e., have an empty slot in ring */ 1261 /* 1262 * If we have no skbuff or have cleared it 1263 * Already continue to the next buffer 1264 */ 1265 if (sc->lmc_txq[i] == NULL) 1266 continue; 1267 1268 /* 1269 * Check the total error summary to look for any errors 1270 */ 1271 if (stat & 0x8000) { 1272 sc->lmc_device->stats.tx_errors++; 1273 if (stat & 0x4104) 1274 sc->lmc_device->stats.tx_aborted_errors++; 1275 if (stat & 0x0C00) 1276 sc->lmc_device->stats.tx_carrier_errors++; 1277 if (stat & 0x0200) 1278 sc->lmc_device->stats.tx_window_errors++; 1279 if (stat & 0x0002) 1280 sc->lmc_device->stats.tx_fifo_errors++; 1281 } else { 1282 sc->lmc_device->stats.tx_bytes += sc->lmc_txring[i].length & 0x7ff; 1283 1284 sc->lmc_device->stats.tx_packets++; 1285 } 1286 1287 dev_consume_skb_irq(sc->lmc_txq[i]); 1288 sc->lmc_txq[i] = NULL; 1289 1290 badtx++; 1291 i = badtx % LMC_TXDESCS; 1292 } 1293 1294 if (sc->lmc_next_tx - badtx > LMC_TXDESCS) 1295 { 1296 printk ("%s: out of sync pointer\n", dev->name); 1297 badtx += LMC_TXDESCS; 1298 } 1299 LMC_EVENT_LOG(LMC_EVENT_TBUSY0, n_compl, 0); 1300 sc->lmc_txfull = 0; 1301 netif_wake_queue(dev); 1302 sc->extra_stats.tx_tbusy0++; 1303 1304 1305#ifdef DEBUG 1306 sc->extra_stats.dirtyTx = badtx; 1307 sc->extra_stats.lmc_next_tx = sc->lmc_next_tx; 1308 sc->extra_stats.lmc_txfull = sc->lmc_txfull; 1309#endif 1310 sc->lmc_taint_tx = badtx; 1311 1312 /* 1313 * Why was there a break here??? 1314 */ 1315 } /* end handle transmit interrupt */ 1316 1317 if (csr & TULIP_STS_SYSERROR) { 1318 u32 error; 1319 printk (KERN_WARNING "%s: system bus error csr: %#8.8x\n", dev->name, csr); 1320 error = csr>>23 & 0x7; 1321 switch(error){ 1322 case 0x000: 1323 printk(KERN_WARNING "%s: Parity Fault (bad)\n", dev->name); 1324 break; 1325 case 0x001: 1326 printk(KERN_WARNING "%s: Master Abort (naughty)\n", dev->name); 1327 break; 1328 case 0x002: 1329 printk(KERN_WARNING "%s: Target Abort (not so naughty)\n", dev->name); 1330 break; 1331 default: 1332 printk(KERN_WARNING "%s: This bus error code was supposed to be reserved!\n", dev->name); 1333 } 1334 lmc_dec_reset (sc); 1335 lmc_reset (sc); 1336 LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0); 1337 LMC_EVENT_LOG(LMC_EVENT_RESET2, 1338 lmc_mii_readreg (sc, 0, 16), 1339 lmc_mii_readreg (sc, 0, 17)); 1340 1341 } 1342 1343 1344 if(max_work-- <= 0) 1345 break; 1346 1347 /* 1348 * Get current csr status to make sure 1349 * we've cleared all interrupts 1350 */ 1351 csr = LMC_CSR_READ (sc, csr_status); 1352 } /* end interrupt loop */ 1353 LMC_EVENT_LOG(LMC_EVENT_INT, firstcsr, csr); 1354 1355lmc_int_fail_out: 1356 1357 spin_unlock(&sc->lmc_lock); 1358 1359 return IRQ_RETVAL(handled); 1360} 1361 1362static netdev_tx_t lmc_start_xmit(struct sk_buff *skb, 1363 struct net_device *dev) 1364{ 1365 lmc_softc_t *sc = dev_to_sc(dev); 1366 u32 flag; 1367 int entry; 1368 unsigned long flags; 1369 1370 spin_lock_irqsave(&sc->lmc_lock, flags); 1371 1372 /* normal path, tbusy known to be zero */ 1373 1374 entry = sc->lmc_next_tx % LMC_TXDESCS; 1375 1376 sc->lmc_txq[entry] = skb; 1377 sc->lmc_txring[entry].buffer1 = virt_to_bus (skb->data); 1378 1379 LMC_CONSOLE_LOG("xmit", skb->data, skb->len); 1380 1381#ifndef GCOM 1382 /* If the queue is less than half full, don't interrupt */ 1383 if (sc->lmc_next_tx - sc->lmc_taint_tx < LMC_TXDESCS / 2) 1384 { 1385 /* Do not interrupt on completion of this packet */ 1386 flag = 0x60000000; 1387 netif_wake_queue(dev); 1388 } 1389 else if (sc->lmc_next_tx - sc->lmc_taint_tx == LMC_TXDESCS / 2) 1390 { 1391 /* This generates an interrupt on completion of this packet */ 1392 flag = 0xe0000000; 1393 netif_wake_queue(dev); 1394 } 1395 else if (sc->lmc_next_tx - sc->lmc_taint_tx < LMC_TXDESCS - 1) 1396 { 1397 /* Do not interrupt on completion of this packet */ 1398 flag = 0x60000000; 1399 netif_wake_queue(dev); 1400 } 1401 else 1402 { 1403 /* This generates an interrupt on completion of this packet */ 1404 flag = 0xe0000000; 1405 sc->lmc_txfull = 1; 1406 netif_stop_queue(dev); 1407 } 1408#else 1409 flag = LMC_TDES_INTERRUPT_ON_COMPLETION; 1410 1411 if (sc->lmc_next_tx - sc->lmc_taint_tx >= LMC_TXDESCS - 1) 1412 { /* ring full, go busy */ 1413 sc->lmc_txfull = 1; 1414 netif_stop_queue(dev); 1415 sc->extra_stats.tx_tbusy1++; 1416 LMC_EVENT_LOG(LMC_EVENT_TBUSY1, entry, 0); 1417 } 1418#endif 1419 1420 1421 if (entry == LMC_TXDESCS - 1) /* last descriptor in ring */ 1422 flag |= LMC_TDES_END_OF_RING; /* flag as such for Tulip */ 1423 1424 /* don't pad small packets either */ 1425 flag = sc->lmc_txring[entry].length = (skb->len) | flag | 1426 sc->TxDescriptControlInit; 1427 1428 /* set the transmit timeout flag to be checked in 1429 * the watchdog timer handler. -baz 1430 */ 1431 1432 sc->extra_stats.tx_NoCompleteCnt++; 1433 sc->lmc_next_tx++; 1434 1435 /* give ownership to the chip */ 1436 LMC_EVENT_LOG(LMC_EVENT_XMT, flag, entry); 1437 sc->lmc_txring[entry].status = 0x80000000; 1438 1439 /* send now! */ 1440 LMC_CSR_WRITE (sc, csr_txpoll, 0); 1441 1442 spin_unlock_irqrestore(&sc->lmc_lock, flags); 1443 1444 return NETDEV_TX_OK; 1445} 1446 1447 1448static int lmc_rx(struct net_device *dev) 1449{ 1450 lmc_softc_t *sc = dev_to_sc(dev); 1451 int i; 1452 int rx_work_limit = LMC_RXDESCS; 1453 int rxIntLoopCnt; /* debug -baz */ 1454 int localLengthErrCnt = 0; 1455 long stat; 1456 struct sk_buff *skb, *nsb; 1457 u16 len; 1458 1459 lmc_led_on(sc, LMC_DS3_LED3); 1460 1461 rxIntLoopCnt = 0; /* debug -baz */ 1462 1463 i = sc->lmc_next_rx % LMC_RXDESCS; 1464 1465 while (((stat = sc->lmc_rxring[i].status) & LMC_RDES_OWN_BIT) != DESC_OWNED_BY_DC21X4) 1466 { 1467 rxIntLoopCnt++; /* debug -baz */ 1468 len = ((stat & LMC_RDES_FRAME_LENGTH) >> RDES_FRAME_LENGTH_BIT_NUMBER); 1469 if ((stat & 0x0300) != 0x0300) { /* Check first segment and last segment */ 1470 if ((stat & 0x0000ffff) != 0x7fff) { 1471 /* Oversized frame */ 1472 sc->lmc_device->stats.rx_length_errors++; 1473 goto skip_packet; 1474 } 1475 } 1476 1477 if (stat & 0x00000008) { /* Catch a dribbling bit error */ 1478 sc->lmc_device->stats.rx_errors++; 1479 sc->lmc_device->stats.rx_frame_errors++; 1480 goto skip_packet; 1481 } 1482 1483 1484 if (stat & 0x00000004) { /* Catch a CRC error by the Xilinx */ 1485 sc->lmc_device->stats.rx_errors++; 1486 sc->lmc_device->stats.rx_crc_errors++; 1487 goto skip_packet; 1488 } 1489 1490 if (len > LMC_PKT_BUF_SZ) { 1491 sc->lmc_device->stats.rx_length_errors++; 1492 localLengthErrCnt++; 1493 goto skip_packet; 1494 } 1495 1496 if (len < sc->lmc_crcSize + 2) { 1497 sc->lmc_device->stats.rx_length_errors++; 1498 sc->extra_stats.rx_SmallPktCnt++; 1499 localLengthErrCnt++; 1500 goto skip_packet; 1501 } 1502 1503 if(stat & 0x00004000){ 1504 printk(KERN_WARNING "%s: Receiver descriptor error, receiver out of sync?\n", dev->name); 1505 } 1506 1507 len -= sc->lmc_crcSize; 1508 1509 skb = sc->lmc_rxq[i]; 1510 1511 /* 1512 * We ran out of memory at some point 1513 * just allocate an skb buff and continue. 1514 */ 1515 1516 if (!skb) { 1517 nsb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2); 1518 if (nsb) { 1519 sc->lmc_rxq[i] = nsb; 1520 nsb->dev = dev; 1521 sc->lmc_rxring[i].buffer1 = virt_to_bus(skb_tail_pointer(nsb)); 1522 } 1523 sc->failed_recv_alloc = 1; 1524 goto skip_packet; 1525 } 1526 1527 sc->lmc_device->stats.rx_packets++; 1528 sc->lmc_device->stats.rx_bytes += len; 1529 1530 LMC_CONSOLE_LOG("recv", skb->data, len); 1531 1532 /* 1533 * I'm not sure of the sanity of this 1534 * Packets could be arriving at a constant 1535 * 44.210mbits/sec and we're going to copy 1536 * them into a new buffer?? 1537 */ 1538 1539 if(len > (LMC_MTU - (LMC_MTU>>2))){ /* len > LMC_MTU * 0.75 */ 1540 /* 1541 * If it's a large packet don't copy it just hand it up 1542 */ 1543 give_it_anyways: 1544 1545 sc->lmc_rxq[i] = NULL; 1546 sc->lmc_rxring[i].buffer1 = 0x0; 1547 1548 skb_put (skb, len); 1549 skb->protocol = lmc_proto_type(sc, skb); 1550 skb_reset_mac_header(skb); 1551 /* skb_reset_network_header(skb); */ 1552 skb->dev = dev; 1553 lmc_proto_netif(sc, skb); 1554 1555 /* 1556 * This skb will be destroyed by the upper layers, make a new one 1557 */ 1558 nsb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2); 1559 if (nsb) { 1560 sc->lmc_rxq[i] = nsb; 1561 nsb->dev = dev; 1562 sc->lmc_rxring[i].buffer1 = virt_to_bus(skb_tail_pointer(nsb)); 1563 /* Transferred to 21140 below */ 1564 } 1565 else { 1566 /* 1567 * We've run out of memory, stop trying to allocate 1568 * memory and exit the interrupt handler 1569 * 1570 * The chip may run out of receivers and stop 1571 * in which care we'll try to allocate the buffer 1572 * again. (once a second) 1573 */ 1574 sc->extra_stats.rx_BuffAllocErr++; 1575 LMC_EVENT_LOG(LMC_EVENT_RCVINT, stat, len); 1576 sc->failed_recv_alloc = 1; 1577 goto skip_out_of_mem; 1578 } 1579 } 1580 else { 1581 nsb = dev_alloc_skb(len); 1582 if(!nsb) { 1583 goto give_it_anyways; 1584 } 1585 skb_copy_from_linear_data(skb, skb_put(nsb, len), len); 1586 1587 nsb->protocol = lmc_proto_type(sc, nsb); 1588 skb_reset_mac_header(nsb); 1589 /* skb_reset_network_header(nsb); */ 1590 nsb->dev = dev; 1591 lmc_proto_netif(sc, nsb); 1592 } 1593 1594 skip_packet: 1595 LMC_EVENT_LOG(LMC_EVENT_RCVINT, stat, len); 1596 sc->lmc_rxring[i].status = DESC_OWNED_BY_DC21X4; 1597 1598 sc->lmc_next_rx++; 1599 i = sc->lmc_next_rx % LMC_RXDESCS; 1600 rx_work_limit--; 1601 if (rx_work_limit < 0) 1602 break; 1603 } 1604 1605 /* detect condition for LMC1000 where DSU cable attaches and fills 1606 * descriptors with bogus packets 1607 * 1608 if (localLengthErrCnt > LMC_RXDESCS - 3) { 1609 sc->extra_stats.rx_BadPktSurgeCnt++; 1610 LMC_EVENT_LOG(LMC_EVENT_BADPKTSURGE, localLengthErrCnt, 1611 sc->extra_stats.rx_BadPktSurgeCnt); 1612 } */ 1613 1614 /* save max count of receive descriptors serviced */ 1615 if (rxIntLoopCnt > sc->extra_stats.rxIntLoopCnt) 1616 sc->extra_stats.rxIntLoopCnt = rxIntLoopCnt; /* debug -baz */ 1617 1618#ifdef DEBUG 1619 if (rxIntLoopCnt == 0) 1620 { 1621 for (i = 0; i < LMC_RXDESCS; i++) 1622 { 1623 if ((sc->lmc_rxring[i].status & LMC_RDES_OWN_BIT) 1624 != DESC_OWNED_BY_DC21X4) 1625 { 1626 rxIntLoopCnt++; 1627 } 1628 } 1629 LMC_EVENT_LOG(LMC_EVENT_RCVEND, rxIntLoopCnt, 0); 1630 } 1631#endif 1632 1633 1634 lmc_led_off(sc, LMC_DS3_LED3); 1635 1636skip_out_of_mem: 1637 return 0; 1638} 1639 1640static struct net_device_stats *lmc_get_stats(struct net_device *dev) 1641{ 1642 lmc_softc_t *sc = dev_to_sc(dev); 1643 unsigned long flags; 1644 1645 spin_lock_irqsave(&sc->lmc_lock, flags); 1646 1647 sc->lmc_device->stats.rx_missed_errors += LMC_CSR_READ(sc, csr_missed_frames) & 0xffff; 1648 1649 spin_unlock_irqrestore(&sc->lmc_lock, flags); 1650 1651 return &sc->lmc_device->stats; 1652} 1653 1654static struct pci_driver lmc_driver = { 1655 .name = "lmc", 1656 .id_table = lmc_pci_tbl, 1657 .probe = lmc_init_one, 1658 .remove = lmc_remove_one, 1659}; 1660 1661module_pci_driver(lmc_driver); 1662 1663unsigned lmc_mii_readreg (lmc_softc_t * const sc, unsigned devaddr, unsigned regno) /*fold00*/ 1664{ 1665 int i; 1666 int command = (0xf6 << 10) | (devaddr << 5) | regno; 1667 int retval = 0; 1668 1669 LMC_MII_SYNC (sc); 1670 1671 for (i = 15; i >= 0; i--) 1672 { 1673 int dataval = (command & (1 << i)) ? 0x20000 : 0; 1674 1675 LMC_CSR_WRITE (sc, csr_9, dataval); 1676 lmc_delay (); 1677 /* __SLOW_DOWN_IO; */ 1678 LMC_CSR_WRITE (sc, csr_9, dataval | 0x10000); 1679 lmc_delay (); 1680 /* __SLOW_DOWN_IO; */ 1681 } 1682 1683 for (i = 19; i > 0; i--) 1684 { 1685 LMC_CSR_WRITE (sc, csr_9, 0x40000); 1686 lmc_delay (); 1687 /* __SLOW_DOWN_IO; */ 1688 retval = (retval << 1) | ((LMC_CSR_READ (sc, csr_9) & 0x80000) ? 1 : 0); 1689 LMC_CSR_WRITE (sc, csr_9, 0x40000 | 0x10000); 1690 lmc_delay (); 1691 /* __SLOW_DOWN_IO; */ 1692 } 1693 1694 return (retval >> 1) & 0xffff; 1695} 1696 1697void lmc_mii_writereg (lmc_softc_t * const sc, unsigned devaddr, unsigned regno, unsigned data) /*fold00*/ 1698{ 1699 int i = 32; 1700 int command = (0x5002 << 16) | (devaddr << 23) | (regno << 18) | data; 1701 1702 LMC_MII_SYNC (sc); 1703 1704 i = 31; 1705 while (i >= 0) 1706 { 1707 int datav; 1708 1709 if (command & (1 << i)) 1710 datav = 0x20000; 1711 else 1712 datav = 0x00000; 1713 1714 LMC_CSR_WRITE (sc, csr_9, datav); 1715 lmc_delay (); 1716 /* __SLOW_DOWN_IO; */ 1717 LMC_CSR_WRITE (sc, csr_9, (datav | 0x10000)); 1718 lmc_delay (); 1719 /* __SLOW_DOWN_IO; */ 1720 i--; 1721 } 1722 1723 i = 2; 1724 while (i > 0) 1725 { 1726 LMC_CSR_WRITE (sc, csr_9, 0x40000); 1727 lmc_delay (); 1728 /* __SLOW_DOWN_IO; */ 1729 LMC_CSR_WRITE (sc, csr_9, 0x50000); 1730 lmc_delay (); 1731 /* __SLOW_DOWN_IO; */ 1732 i--; 1733 } 1734} 1735 1736static void lmc_softreset (lmc_softc_t * const sc) /*fold00*/ 1737{ 1738 int i; 1739 1740 /* Initialize the receive rings and buffers. */ 1741 sc->lmc_txfull = 0; 1742 sc->lmc_next_rx = 0; 1743 sc->lmc_next_tx = 0; 1744 sc->lmc_taint_rx = 0; 1745 sc->lmc_taint_tx = 0; 1746 1747 /* 1748 * Setup each one of the receiver buffers 1749 * allocate an skbuff for each one, setup the descriptor table 1750 * and point each buffer at the next one 1751 */ 1752 1753 for (i = 0; i < LMC_RXDESCS; i++) 1754 { 1755 struct sk_buff *skb; 1756 1757 if (sc->lmc_rxq[i] == NULL) 1758 { 1759 skb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2); 1760 if(skb == NULL){ 1761 printk(KERN_WARNING "%s: Failed to allocate receiver ring, will try again\n", sc->name); 1762 sc->failed_ring = 1; 1763 break; 1764 } 1765 else{ 1766 sc->lmc_rxq[i] = skb; 1767 } 1768 } 1769 else 1770 { 1771 skb = sc->lmc_rxq[i]; 1772 } 1773 1774 skb->dev = sc->lmc_device; 1775 1776 /* owned by 21140 */ 1777 sc->lmc_rxring[i].status = 0x80000000; 1778 1779 /* used to be PKT_BUF_SZ now uses skb since we lose some to head room */ 1780 sc->lmc_rxring[i].length = skb_tailroom(skb); 1781 1782 /* use to be tail which is dumb since you're thinking why write 1783 * to the end of the packj,et but since there's nothing there tail == data 1784 */ 1785 sc->lmc_rxring[i].buffer1 = virt_to_bus (skb->data); 1786 1787 /* This is fair since the structure is static and we have the next address */ 1788 sc->lmc_rxring[i].buffer2 = virt_to_bus (&sc->lmc_rxring[i + 1]); 1789 1790 } 1791 1792 /* 1793 * Sets end of ring 1794 */ 1795 if (i != 0) { 1796 sc->lmc_rxring[i - 1].length |= 0x02000000; /* Set end of buffers flag */ 1797 sc->lmc_rxring[i - 1].buffer2 = virt_to_bus(&sc->lmc_rxring[0]); /* Point back to the start */ 1798 } 1799 LMC_CSR_WRITE (sc, csr_rxlist, virt_to_bus (sc->lmc_rxring)); /* write base address */ 1800 1801 /* Initialize the transmit rings and buffers */ 1802 for (i = 0; i < LMC_TXDESCS; i++) 1803 { 1804 if (sc->lmc_txq[i] != NULL){ /* have buffer */ 1805 dev_kfree_skb(sc->lmc_txq[i]); /* free it */ 1806 sc->lmc_device->stats.tx_dropped++; /* We just dropped a packet */ 1807 } 1808 sc->lmc_txq[i] = NULL; 1809 sc->lmc_txring[i].status = 0x00000000; 1810 sc->lmc_txring[i].buffer2 = virt_to_bus (&sc->lmc_txring[i + 1]); 1811 } 1812 sc->lmc_txring[i - 1].buffer2 = virt_to_bus (&sc->lmc_txring[0]); 1813 LMC_CSR_WRITE (sc, csr_txlist, virt_to_bus (sc->lmc_txring)); 1814} 1815 1816void lmc_gpio_mkinput(lmc_softc_t * const sc, u32 bits) /*fold00*/ 1817{ 1818 sc->lmc_gpio_io &= ~bits; 1819 LMC_CSR_WRITE(sc, csr_gp, TULIP_GP_PINSET | (sc->lmc_gpio_io)); 1820} 1821 1822void lmc_gpio_mkoutput(lmc_softc_t * const sc, u32 bits) /*fold00*/ 1823{ 1824 sc->lmc_gpio_io |= bits; 1825 LMC_CSR_WRITE(sc, csr_gp, TULIP_GP_PINSET | (sc->lmc_gpio_io)); 1826} 1827 1828void lmc_led_on(lmc_softc_t * const sc, u32 led) /*fold00*/ 1829{ 1830 if ((~sc->lmc_miireg16) & led) /* Already on! */ 1831 return; 1832 1833 sc->lmc_miireg16 &= ~led; 1834 lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16); 1835} 1836 1837void lmc_led_off(lmc_softc_t * const sc, u32 led) /*fold00*/ 1838{ 1839 if (sc->lmc_miireg16 & led) /* Already set don't do anything */ 1840 return; 1841 1842 sc->lmc_miireg16 |= led; 1843 lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16); 1844} 1845 1846static void lmc_reset(lmc_softc_t * const sc) /*fold00*/ 1847{ 1848 sc->lmc_miireg16 |= LMC_MII16_FIFO_RESET; 1849 lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16); 1850 1851 sc->lmc_miireg16 &= ~LMC_MII16_FIFO_RESET; 1852 lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16); 1853 1854 /* 1855 * make some of the GPIO pins be outputs 1856 */ 1857 lmc_gpio_mkoutput(sc, LMC_GEP_RESET); 1858 1859 /* 1860 * RESET low to force state reset. This also forces 1861 * the transmitter clock to be internal, but we expect to reset 1862 * that later anyway. 1863 */ 1864 sc->lmc_gpio &= ~(LMC_GEP_RESET); 1865 LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio); 1866 1867 /* 1868 * hold for more than 10 microseconds 1869 */ 1870 udelay(50); 1871 1872 /* 1873 * stop driving Xilinx-related signals 1874 */ 1875 lmc_gpio_mkinput(sc, LMC_GEP_RESET); 1876 1877 /* 1878 * Call media specific init routine 1879 */ 1880 sc->lmc_media->init(sc); 1881 1882 sc->extra_stats.resetCount++; 1883} 1884 1885static void lmc_dec_reset(lmc_softc_t * const sc) /*fold00*/ 1886{ 1887 u32 val; 1888 1889 /* 1890 * disable all interrupts 1891 */ 1892 sc->lmc_intrmask = 0; 1893 LMC_CSR_WRITE(sc, csr_intr, sc->lmc_intrmask); 1894 1895 /* 1896 * Reset the chip with a software reset command. 1897 * Wait 10 microseconds (actually 50 PCI cycles but at 1898 * 33MHz that comes to two microseconds but wait a 1899 * bit longer anyways) 1900 */ 1901 LMC_CSR_WRITE(sc, csr_busmode, TULIP_BUSMODE_SWRESET); 1902 udelay(25); 1903#ifdef __sparc__ 1904 sc->lmc_busmode = LMC_CSR_READ(sc, csr_busmode); 1905 sc->lmc_busmode = 0x00100000; 1906 sc->lmc_busmode &= ~TULIP_BUSMODE_SWRESET; 1907 LMC_CSR_WRITE(sc, csr_busmode, sc->lmc_busmode); 1908#endif 1909 sc->lmc_cmdmode = LMC_CSR_READ(sc, csr_command); 1910 1911 /* 1912 * We want: 1913 * no ethernet address in frames we write 1914 * disable padding (txdesc, padding disable) 1915 * ignore runt frames (rdes0 bit 15) 1916 * no receiver watchdog or transmitter jabber timer 1917 * (csr15 bit 0,14 == 1) 1918 * if using 16-bit CRC, turn off CRC (trans desc, crc disable) 1919 */ 1920 1921 sc->lmc_cmdmode |= ( TULIP_CMD_PROMISCUOUS 1922 | TULIP_CMD_FULLDUPLEX 1923 | TULIP_CMD_PASSBADPKT 1924 | TULIP_CMD_NOHEARTBEAT 1925 | TULIP_CMD_PORTSELECT 1926 | TULIP_CMD_RECEIVEALL 1927 | TULIP_CMD_MUSTBEONE 1928 ); 1929 sc->lmc_cmdmode &= ~( TULIP_CMD_OPERMODE 1930 | TULIP_CMD_THRESHOLDCTL 1931 | TULIP_CMD_STOREFWD 1932 | TULIP_CMD_TXTHRSHLDCTL 1933 ); 1934 1935 LMC_CSR_WRITE(sc, csr_command, sc->lmc_cmdmode); 1936 1937 /* 1938 * disable receiver watchdog and transmit jabber 1939 */ 1940 val = LMC_CSR_READ(sc, csr_sia_general); 1941 val |= (TULIP_WATCHDOG_TXDISABLE | TULIP_WATCHDOG_RXDISABLE); 1942 LMC_CSR_WRITE(sc, csr_sia_general, val); 1943} 1944 1945static void lmc_initcsrs(lmc_softc_t * const sc, lmc_csrptr_t csr_base, /*fold00*/ 1946 size_t csr_size) 1947{ 1948 sc->lmc_csrs.csr_busmode = csr_base + 0 * csr_size; 1949 sc->lmc_csrs.csr_txpoll = csr_base + 1 * csr_size; 1950 sc->lmc_csrs.csr_rxpoll = csr_base + 2 * csr_size; 1951 sc->lmc_csrs.csr_rxlist = csr_base + 3 * csr_size; 1952 sc->lmc_csrs.csr_txlist = csr_base + 4 * csr_size; 1953 sc->lmc_csrs.csr_status = csr_base + 5 * csr_size; 1954 sc->lmc_csrs.csr_command = csr_base + 6 * csr_size; 1955 sc->lmc_csrs.csr_intr = csr_base + 7 * csr_size; 1956 sc->lmc_csrs.csr_missed_frames = csr_base + 8 * csr_size; 1957 sc->lmc_csrs.csr_9 = csr_base + 9 * csr_size; 1958 sc->lmc_csrs.csr_10 = csr_base + 10 * csr_size; 1959 sc->lmc_csrs.csr_11 = csr_base + 11 * csr_size; 1960 sc->lmc_csrs.csr_12 = csr_base + 12 * csr_size; 1961 sc->lmc_csrs.csr_13 = csr_base + 13 * csr_size; 1962 sc->lmc_csrs.csr_14 = csr_base + 14 * csr_size; 1963 sc->lmc_csrs.csr_15 = csr_base + 15 * csr_size; 1964} 1965 1966static void lmc_driver_timeout(struct net_device *dev, unsigned int txqueue) 1967{ 1968 lmc_softc_t *sc = dev_to_sc(dev); 1969 u32 csr6; 1970 unsigned long flags; 1971 1972 spin_lock_irqsave(&sc->lmc_lock, flags); 1973 1974 printk("%s: Xmitter busy|\n", dev->name); 1975 1976 sc->extra_stats.tx_tbusy_calls++; 1977 if (jiffies - dev_trans_start(dev) < TX_TIMEOUT) 1978 goto bug_out; 1979 1980 /* 1981 * Chip seems to have locked up 1982 * Reset it 1983 * This whips out all our descriptor 1984 * table and starts from scartch 1985 */ 1986 1987 LMC_EVENT_LOG(LMC_EVENT_XMTPRCTMO, 1988 LMC_CSR_READ (sc, csr_status), 1989 sc->extra_stats.tx_ProcTimeout); 1990 1991 lmc_running_reset (dev); 1992 1993 LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0); 1994 LMC_EVENT_LOG(LMC_EVENT_RESET2, 1995 lmc_mii_readreg (sc, 0, 16), 1996 lmc_mii_readreg (sc, 0, 17)); 1997 1998 /* restart the tx processes */ 1999 csr6 = LMC_CSR_READ (sc, csr_command); 2000 LMC_CSR_WRITE (sc, csr_command, csr6 | 0x0002); 2001 LMC_CSR_WRITE (sc, csr_command, csr6 | 0x2002); 2002 2003 /* immediate transmit */ 2004 LMC_CSR_WRITE (sc, csr_txpoll, 0); 2005 2006 sc->lmc_device->stats.tx_errors++; 2007 sc->extra_stats.tx_ProcTimeout++; /* -baz */ 2008 2009 netif_trans_update(dev); /* prevent tx timeout */ 2010 2011bug_out: 2012 2013 spin_unlock_irqrestore(&sc->lmc_lock, flags); 2014} 2015