1/* 2 * ni6510 (am7990 'lance' chip) driver for Linux-net-3 3 * BETAcode v0.71 (96/09/29) for 2.0.0 (or later) 4 * copyrights (c) 1994,1995,1996 by M.Hipp 5 * 6 * This driver can handle the old ni6510 board and the newer ni6510 7 * EtherBlaster. (probably it also works with every full NE2100 8 * compatible card) 9 * 10 * driver probes: io: 0x360,0x300,0x320,0x340 / dma: 3,5,6,7 11 * 12 * This is an extension to the Linux operating system, and is covered by the 13 * same GNU General Public License that covers the Linux-kernel. 14 * 15 * comments/bugs/suggestions can be sent to: 16 * Michael Hipp 17 * email: hippm@informatik.uni-tuebingen.de 18 * 19 * sources: 20 * some things are from the 'ni6510-packet-driver for dos by Russ Nelson' 21 * and from the original drivers by D.Becker 22 * 23 * known problems: 24 * - on some PCI boards (including my own) the card/board/ISA-bridge has 25 * problems with bus master DMA. This results in lotsa overruns. 26 * It may help to '#define RCV_PARANOIA_CHECK' or try to #undef 27 * the XMT and RCV_VIA_SKB option .. this reduces driver performance. 28 * Or just play with your BIOS options to optimize ISA-DMA access. 29 * Maybe you also wanna play with the LOW_PERFORAMCE and MID_PERFORMANCE 30 * defines -> please report me your experience then 31 * - Harald reported for ASUS SP3G mainboards, that you should use 32 * the 'optimal settings' from the user's manual on page 3-12! 33 * 34 * credits: 35 * thanx to Jason Sullivan for sending me a ni6510 card! 36 * lot of debug runs with ASUS SP3G Boards (Intel Saturn) by Harald Koenig 37 * 38 * simple performance test: (486DX-33/Ni6510-EB receives from 486DX4-100/Ni6510-EB) 39 * average: FTP -> 8384421 bytes received in 8.5 seconds 40 * (no RCV_VIA_SKB,no XMT_VIA_SKB,PARANOIA_CHECK,4 XMIT BUFS, 8 RCV_BUFFS) 41 * peak: FTP -> 8384421 bytes received in 7.5 seconds 42 * (RCV_VIA_SKB,XMT_VIA_SKB,no PARANOIA_CHECK,1(!) XMIT BUF, 16 RCV BUFFS) 43 */ 44 45/* 46 * 99.Jun.8: added support for /proc/net/dev byte count for xosview (HK) 47 * 96.Sept.29: virt_to_bus stuff added for new memory modell 48 * 96.April.29: Added Harald Koenig's Patches (MH) 49 * 96.April.13: enhanced error handling .. more tests (MH) 50 * 96.April.5/6: a lot of performance tests. Got it stable now (hopefully) (MH) 51 * 96.April.1: (no joke ;) .. added EtherBlaster and Module support (MH) 52 * 96.Feb.19: fixed a few bugs .. cleanups .. tested for 1.3.66 (MH) 53 * hopefully no more 16MB limit 54 * 55 * 95.Nov.18: multicast tweaked (AC). 56 * 57 * 94.Aug.22: changes in xmit_intr (ack more than one xmitted-packet), ni65_send_packet (p->lock) (MH) 58 * 59 * 94.July.16: fixed bugs in recv_skb and skb-alloc stuff (MH) 60 */ 61 62#include <linux/kernel.h> 63#include <linux/string.h> 64#include <linux/errno.h> 65#include <linux/ioport.h> 66#include <linux/slab.h> 67#include <linux/interrupt.h> 68#include <linux/delay.h> 69#include <linux/init.h> 70#include <linux/netdevice.h> 71#include <linux/etherdevice.h> 72#include <linux/skbuff.h> 73#include <linux/module.h> 74#include <linux/bitops.h> 75 76#include <asm/io.h> 77#include <asm/dma.h> 78 79#include "ni65.h" 80 81/* 82 * the current setting allows an acceptable performance 83 * for 'RCV_PARANOIA_CHECK' read the 'known problems' part in 84 * the header of this file 85 * 'invert' the defines for max. performance. This may cause DMA problems 86 * on some boards (e.g on my ASUS SP3G) 87 */ 88#undef XMT_VIA_SKB 89#undef RCV_VIA_SKB 90#define RCV_PARANOIA_CHECK 91 92#define MID_PERFORMANCE 93 94#if defined( LOW_PERFORMANCE ) 95 static int isa0=7,isa1=7,csr80=0x0c10; 96#elif defined( MID_PERFORMANCE ) 97 static int isa0=5,isa1=5,csr80=0x2810; 98#else /* high performance */ 99 static int isa0=4,isa1=4,csr80=0x0017; 100#endif 101 102/* 103 * a few card/vendor specific defines 104 */ 105#define NI65_ID0 0x00 106#define NI65_ID1 0x55 107#define NI65_EB_ID0 0x52 108#define NI65_EB_ID1 0x44 109#define NE2100_ID0 0x57 110#define NE2100_ID1 0x57 111 112#define PORT p->cmdr_addr 113 114/* 115 * buffer configuration 116 */ 117#if 1 118#define RMDNUM 16 119#define RMDNUMMASK 0x80000000 120#else 121#define RMDNUM 8 122#define RMDNUMMASK 0x60000000 /* log2(RMDNUM)<<29 */ 123#endif 124 125#if 0 126#define TMDNUM 1 127#define TMDNUMMASK 0x00000000 128#else 129#define TMDNUM 4 130#define TMDNUMMASK 0x40000000 /* log2(TMDNUM)<<29 */ 131#endif 132 133/* slightly oversized */ 134#define R_BUF_SIZE 1544 135#define T_BUF_SIZE 1544 136 137/* 138 * lance register defines 139 */ 140#define L_DATAREG 0x00 141#define L_ADDRREG 0x02 142#define L_RESET 0x04 143#define L_CONFIG 0x05 144#define L_BUSIF 0x06 145 146/* 147 * to access the lance/am7990-regs, you have to write 148 * reg-number into L_ADDRREG, then you can access it using L_DATAREG 149 */ 150#define CSR0 0x00 151#define CSR1 0x01 152#define CSR2 0x02 153#define CSR3 0x03 154 155#define INIT_RING_BEFORE_START 0x1 156#define FULL_RESET_ON_ERROR 0x2 157 158#if 0 159#define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);inw(PORT+L_ADDRREG); \ 160 outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);} 161#define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_ADDRREG),\ 162 inw(PORT+L_DATAREG)) 163#if 0 164#define writedatareg(val) {outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);} 165#else 166#define writedatareg(val) { writereg(val,CSR0); } 167#endif 168#else 169#define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);outw(val,PORT+L_DATAREG);} 170#define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_DATAREG)) 171#define writedatareg(val) { writereg(val,CSR0); } 172#endif 173 174static unsigned char ni_vendor[] = { 0x02,0x07,0x01 }; 175 176static struct card { 177 unsigned char id0,id1; 178 short id_offset; 179 short total_size; 180 short cmd_offset; 181 short addr_offset; 182 unsigned char *vendor_id; 183 char *cardname; 184 unsigned long config; 185} cards[] = { 186 { 187 .id0 = NI65_ID0, 188 .id1 = NI65_ID1, 189 .id_offset = 0x0e, 190 .total_size = 0x10, 191 .cmd_offset = 0x0, 192 .addr_offset = 0x8, 193 .vendor_id = ni_vendor, 194 .cardname = "ni6510", 195 .config = 0x1, 196 }, 197 { 198 .id0 = NI65_EB_ID0, 199 .id1 = NI65_EB_ID1, 200 .id_offset = 0x0e, 201 .total_size = 0x18, 202 .cmd_offset = 0x10, 203 .addr_offset = 0x0, 204 .vendor_id = ni_vendor, 205 .cardname = "ni6510 EtherBlaster", 206 .config = 0x2, 207 }, 208 { 209 .id0 = NE2100_ID0, 210 .id1 = NE2100_ID1, 211 .id_offset = 0x0e, 212 .total_size = 0x18, 213 .cmd_offset = 0x10, 214 .addr_offset = 0x0, 215 .vendor_id = NULL, 216 .cardname = "generic NE2100", 217 .config = 0x0, 218 }, 219}; 220#define NUM_CARDS 3 221 222struct priv 223{ 224 struct rmd rmdhead[RMDNUM]; 225 struct tmd tmdhead[TMDNUM]; 226 struct init_block ib; 227 int rmdnum; 228 int tmdnum,tmdlast; 229#ifdef RCV_VIA_SKB 230 struct sk_buff *recv_skb[RMDNUM]; 231#else 232 void *recvbounce[RMDNUM]; 233#endif 234#ifdef XMT_VIA_SKB 235 struct sk_buff *tmd_skb[TMDNUM]; 236#endif 237 void *tmdbounce[TMDNUM]; 238 int tmdbouncenum; 239 int lock,xmit_queued; 240 241 void *self; 242 int cmdr_addr; 243 int cardno; 244 int features; 245 spinlock_t ring_lock; 246}; 247 248static int ni65_probe1(struct net_device *dev,int); 249static irqreturn_t ni65_interrupt(int irq, void * dev_id); 250static void ni65_recv_intr(struct net_device *dev,int); 251static void ni65_xmit_intr(struct net_device *dev,int); 252static int ni65_open(struct net_device *dev); 253static int ni65_lance_reinit(struct net_device *dev); 254static void ni65_init_lance(struct priv *p,unsigned char*,int,int); 255static netdev_tx_t ni65_send_packet(struct sk_buff *skb, 256 struct net_device *dev); 257static void ni65_timeout(struct net_device *dev, unsigned int txqueue); 258static int ni65_close(struct net_device *dev); 259static int ni65_alloc_buffer(struct net_device *dev); 260static void ni65_free_buffer(struct priv *p); 261static void set_multicast_list(struct net_device *dev); 262 263static int irqtab[] __initdata = { 9,12,15,5 }; /* irq config-translate */ 264static int dmatab[] __initdata = { 0,3,5,6,7 }; /* dma config-translate and autodetect */ 265 266static int debuglevel = 1; 267 268/* 269 * set 'performance' registers .. we must STOP lance for that 270 */ 271static void ni65_set_performance(struct priv *p) 272{ 273 writereg(CSR0_STOP | CSR0_CLRALL,CSR0); /* STOP */ 274 275 if( !(cards[p->cardno].config & 0x02) ) 276 return; 277 278 outw(80,PORT+L_ADDRREG); 279 if(inw(PORT+L_ADDRREG) != 80) 280 return; 281 282 writereg( (csr80 & 0x3fff) ,80); /* FIFO watermarks */ 283 outw(0,PORT+L_ADDRREG); 284 outw((short)isa0,PORT+L_BUSIF); /* write ISA 0: DMA_R : isa0 * 50ns */ 285 outw(1,PORT+L_ADDRREG); 286 outw((short)isa1,PORT+L_BUSIF); /* write ISA 1: DMA_W : isa1 * 50ns */ 287 288 outw(CSR0,PORT+L_ADDRREG); /* switch back to CSR0 */ 289} 290 291/* 292 * open interface (up) 293 */ 294static int ni65_open(struct net_device *dev) 295{ 296 struct priv *p = dev->ml_priv; 297 int irqval = request_irq(dev->irq, ni65_interrupt,0, 298 cards[p->cardno].cardname,dev); 299 if (irqval) { 300 printk(KERN_ERR "%s: unable to get IRQ %d (irqval=%d).\n", 301 dev->name,dev->irq, irqval); 302 return -EAGAIN; 303 } 304 305 if(ni65_lance_reinit(dev)) 306 { 307 netif_start_queue(dev); 308 return 0; 309 } 310 else 311 { 312 free_irq(dev->irq,dev); 313 return -EAGAIN; 314 } 315} 316 317/* 318 * close interface (down) 319 */ 320static int ni65_close(struct net_device *dev) 321{ 322 struct priv *p = dev->ml_priv; 323 324 netif_stop_queue(dev); 325 326 outw(inw(PORT+L_RESET),PORT+L_RESET); /* that's the hard way */ 327 328#ifdef XMT_VIA_SKB 329 { 330 int i; 331 for(i=0;i<TMDNUM;i++) 332 { 333 if(p->tmd_skb[i]) { 334 dev_kfree_skb(p->tmd_skb[i]); 335 p->tmd_skb[i] = NULL; 336 } 337 } 338 } 339#endif 340 free_irq(dev->irq,dev); 341 return 0; 342} 343 344static void cleanup_card(struct net_device *dev) 345{ 346 struct priv *p = dev->ml_priv; 347 disable_dma(dev->dma); 348 free_dma(dev->dma); 349 release_region(dev->base_addr, cards[p->cardno].total_size); 350 ni65_free_buffer(p); 351} 352 353/* set: io,irq,dma or set it when calling insmod */ 354static int irq; 355static int io; 356static int dma; 357 358/* 359 * Probe The Card (not the lance-chip) 360 */ 361struct net_device * __init ni65_probe(int unit) 362{ 363 struct net_device *dev = alloc_etherdev(0); 364 static const int ports[] = { 0x360, 0x300, 0x320, 0x340, 0 }; 365 const int *port; 366 int err = 0; 367 368 if (!dev) 369 return ERR_PTR(-ENOMEM); 370 371 if (unit >= 0) { 372 sprintf(dev->name, "eth%d", unit); 373 netdev_boot_setup_check(dev); 374 irq = dev->irq; 375 dma = dev->dma; 376 } else { 377 dev->base_addr = io; 378 } 379 380 if (dev->base_addr > 0x1ff) { /* Check a single specified location. */ 381 err = ni65_probe1(dev, dev->base_addr); 382 } else if (dev->base_addr > 0) { /* Don't probe at all. */ 383 err = -ENXIO; 384 } else { 385 for (port = ports; *port && ni65_probe1(dev, *port); port++) 386 ; 387 if (!*port) 388 err = -ENODEV; 389 } 390 if (err) 391 goto out; 392 393 err = register_netdev(dev); 394 if (err) 395 goto out1; 396 return dev; 397out1: 398 cleanup_card(dev); 399out: 400 free_netdev(dev); 401 return ERR_PTR(err); 402} 403 404static const struct net_device_ops ni65_netdev_ops = { 405 .ndo_open = ni65_open, 406 .ndo_stop = ni65_close, 407 .ndo_start_xmit = ni65_send_packet, 408 .ndo_tx_timeout = ni65_timeout, 409 .ndo_set_rx_mode = set_multicast_list, 410 .ndo_set_mac_address = eth_mac_addr, 411 .ndo_validate_addr = eth_validate_addr, 412}; 413 414/* 415 * this is the real card probe .. 416 */ 417static int __init ni65_probe1(struct net_device *dev,int ioaddr) 418{ 419 int i,j; 420 struct priv *p; 421 unsigned long flags; 422 423 dev->irq = irq; 424 dev->dma = dma; 425 426 for(i=0;i<NUM_CARDS;i++) { 427 if(!request_region(ioaddr, cards[i].total_size, cards[i].cardname)) 428 continue; 429 if(cards[i].id_offset >= 0) { 430 if(inb(ioaddr+cards[i].id_offset+0) != cards[i].id0 || 431 inb(ioaddr+cards[i].id_offset+1) != cards[i].id1) { 432 release_region(ioaddr, cards[i].total_size); 433 continue; 434 } 435 } 436 if(cards[i].vendor_id) { 437 for(j=0;j<3;j++) 438 if(inb(ioaddr+cards[i].addr_offset+j) != cards[i].vendor_id[j]) 439 release_region(ioaddr, cards[i].total_size); 440 } 441 break; 442 } 443 if(i == NUM_CARDS) 444 return -ENODEV; 445 446 for(j=0;j<6;j++) 447 dev->dev_addr[j] = inb(ioaddr+cards[i].addr_offset+j); 448 449 if( (j=ni65_alloc_buffer(dev)) < 0) { 450 release_region(ioaddr, cards[i].total_size); 451 return j; 452 } 453 p = dev->ml_priv; 454 p->cmdr_addr = ioaddr + cards[i].cmd_offset; 455 p->cardno = i; 456 spin_lock_init(&p->ring_lock); 457 458 printk(KERN_INFO "%s: %s found at %#3x, ", dev->name, cards[p->cardno].cardname , ioaddr); 459 460 outw(inw(PORT+L_RESET),PORT+L_RESET); /* first: reset the card */ 461 if( (j=readreg(CSR0)) != 0x4) { 462 printk("failed.\n"); 463 printk(KERN_ERR "%s: Can't RESET card: %04x\n", dev->name, j); 464 ni65_free_buffer(p); 465 release_region(ioaddr, cards[p->cardno].total_size); 466 return -EAGAIN; 467 } 468 469 outw(88,PORT+L_ADDRREG); 470 if(inw(PORT+L_ADDRREG) == 88) { 471 unsigned long v; 472 v = inw(PORT+L_DATAREG); 473 v <<= 16; 474 outw(89,PORT+L_ADDRREG); 475 v |= inw(PORT+L_DATAREG); 476 printk("Version %#08lx, ",v); 477 p->features = INIT_RING_BEFORE_START; 478 } 479 else { 480 printk("ancient LANCE, "); 481 p->features = 0x0; 482 } 483 484 if(test_bit(0,&cards[i].config)) { 485 dev->irq = irqtab[(inw(ioaddr+L_CONFIG)>>2)&3]; 486 dev->dma = dmatab[inw(ioaddr+L_CONFIG)&3]; 487 printk("IRQ %d (from card), DMA %d (from card).\n",dev->irq,dev->dma); 488 } 489 else { 490 if(dev->dma == 0) { 491 /* 'stuck test' from lance.c */ 492 unsigned long dma_channels = 493 ((inb(DMA1_STAT_REG) >> 4) & 0x0f) 494 | (inb(DMA2_STAT_REG) & 0xf0); 495 for(i=1;i<5;i++) { 496 int dma = dmatab[i]; 497 if(test_bit(dma,&dma_channels) || request_dma(dma,"ni6510")) 498 continue; 499 500 flags=claim_dma_lock(); 501 disable_dma(dma); 502 set_dma_mode(dma,DMA_MODE_CASCADE); 503 enable_dma(dma); 504 release_dma_lock(flags); 505 506 ni65_init_lance(p,dev->dev_addr,0,0); /* trigger memory access */ 507 508 flags=claim_dma_lock(); 509 disable_dma(dma); 510 free_dma(dma); 511 release_dma_lock(flags); 512 513 if(readreg(CSR0) & CSR0_IDON) 514 break; 515 } 516 if(i == 5) { 517 printk("failed.\n"); 518 printk(KERN_ERR "%s: Can't detect DMA channel!\n", dev->name); 519 ni65_free_buffer(p); 520 release_region(ioaddr, cards[p->cardno].total_size); 521 return -EAGAIN; 522 } 523 dev->dma = dmatab[i]; 524 printk("DMA %d (autodetected), ",dev->dma); 525 } 526 else 527 printk("DMA %d (assigned), ",dev->dma); 528 529 if(dev->irq < 2) 530 { 531 unsigned long irq_mask; 532 533 ni65_init_lance(p,dev->dev_addr,0,0); 534 irq_mask = probe_irq_on(); 535 writereg(CSR0_INIT|CSR0_INEA,CSR0); /* trigger interrupt */ 536 msleep(20); 537 dev->irq = probe_irq_off(irq_mask); 538 if(!dev->irq) 539 { 540 printk("Failed to detect IRQ line!\n"); 541 ni65_free_buffer(p); 542 release_region(ioaddr, cards[p->cardno].total_size); 543 return -EAGAIN; 544 } 545 printk("IRQ %d (autodetected).\n",dev->irq); 546 } 547 else 548 printk("IRQ %d (assigned).\n",dev->irq); 549 } 550 551 if(request_dma(dev->dma, cards[p->cardno].cardname ) != 0) 552 { 553 printk(KERN_ERR "%s: Can't request dma-channel %d\n",dev->name,(int) dev->dma); 554 ni65_free_buffer(p); 555 release_region(ioaddr, cards[p->cardno].total_size); 556 return -EAGAIN; 557 } 558 559 dev->base_addr = ioaddr; 560 dev->netdev_ops = &ni65_netdev_ops; 561 dev->watchdog_timeo = HZ/2; 562 563 return 0; /* everything is OK */ 564} 565 566/* 567 * set lance register and trigger init 568 */ 569static void ni65_init_lance(struct priv *p,unsigned char *daddr,int filter,int mode) 570{ 571 int i; 572 u32 pib; 573 574 writereg(CSR0_CLRALL|CSR0_STOP,CSR0); 575 576 for(i=0;i<6;i++) 577 p->ib.eaddr[i] = daddr[i]; 578 579 for(i=0;i<8;i++) 580 p->ib.filter[i] = filter; 581 p->ib.mode = mode; 582 583 p->ib.trp = (u32) isa_virt_to_bus(p->tmdhead) | TMDNUMMASK; 584 p->ib.rrp = (u32) isa_virt_to_bus(p->rmdhead) | RMDNUMMASK; 585 writereg(0,CSR3); /* busmaster/no word-swap */ 586 pib = (u32) isa_virt_to_bus(&p->ib); 587 writereg(pib & 0xffff,CSR1); 588 writereg(pib >> 16,CSR2); 589 590 writereg(CSR0_INIT,CSR0); /* this changes L_ADDRREG to CSR0 */ 591 592 for(i=0;i<32;i++) 593 { 594 mdelay(4); 595 if(inw(PORT+L_DATAREG) & (CSR0_IDON | CSR0_MERR) ) 596 break; /* init ok ? */ 597 } 598} 599 600/* 601 * allocate memory area and check the 16MB border 602 */ 603static void *ni65_alloc_mem(struct net_device *dev,char *what,int size,int type) 604{ 605 struct sk_buff *skb=NULL; 606 unsigned char *ptr; 607 void *ret; 608 609 if(type) { 610 ret = skb = alloc_skb(2+16+size,GFP_KERNEL|GFP_DMA); 611 if(!skb) { 612 printk(KERN_WARNING "%s: unable to allocate %s memory.\n",dev->name,what); 613 return NULL; 614 } 615 skb_reserve(skb,2+16); 616 skb_put(skb,R_BUF_SIZE); /* grab the whole space .. (not necessary) */ 617 ptr = skb->data; 618 } 619 else { 620 ret = ptr = kmalloc(T_BUF_SIZE,GFP_KERNEL | GFP_DMA); 621 if(!ret) 622 return NULL; 623 } 624 if( (u32) virt_to_phys(ptr+size) > 0x1000000) { 625 printk(KERN_WARNING "%s: unable to allocate %s memory in lower 16MB!\n",dev->name,what); 626 if(type) 627 kfree_skb(skb); 628 else 629 kfree(ptr); 630 return NULL; 631 } 632 return ret; 633} 634 635/* 636 * allocate all memory structures .. send/recv buffers etc ... 637 */ 638static int ni65_alloc_buffer(struct net_device *dev) 639{ 640 unsigned char *ptr; 641 struct priv *p; 642 int i; 643 644 /* 645 * we need 8-aligned memory .. 646 */ 647 ptr = ni65_alloc_mem(dev,"BUFFER",sizeof(struct priv)+8,0); 648 if(!ptr) 649 return -ENOMEM; 650 651 p = dev->ml_priv = (struct priv *) (((unsigned long) ptr + 7) & ~0x7); 652 memset((char *)p, 0, sizeof(struct priv)); 653 p->self = ptr; 654 655 for(i=0;i<TMDNUM;i++) 656 { 657#ifdef XMT_VIA_SKB 658 p->tmd_skb[i] = NULL; 659#endif 660 p->tmdbounce[i] = ni65_alloc_mem(dev,"XMIT",T_BUF_SIZE,0); 661 if(!p->tmdbounce[i]) { 662 ni65_free_buffer(p); 663 return -ENOMEM; 664 } 665 } 666 667 for(i=0;i<RMDNUM;i++) 668 { 669#ifdef RCV_VIA_SKB 670 p->recv_skb[i] = ni65_alloc_mem(dev,"RECV",R_BUF_SIZE,1); 671 if(!p->recv_skb[i]) { 672 ni65_free_buffer(p); 673 return -ENOMEM; 674 } 675#else 676 p->recvbounce[i] = ni65_alloc_mem(dev,"RECV",R_BUF_SIZE,0); 677 if(!p->recvbounce[i]) { 678 ni65_free_buffer(p); 679 return -ENOMEM; 680 } 681#endif 682 } 683 684 return 0; /* everything is OK */ 685} 686 687/* 688 * free buffers and private struct 689 */ 690static void ni65_free_buffer(struct priv *p) 691{ 692 int i; 693 694 if(!p) 695 return; 696 697 for(i=0;i<TMDNUM;i++) { 698 kfree(p->tmdbounce[i]); 699#ifdef XMT_VIA_SKB 700 dev_kfree_skb(p->tmd_skb[i]); 701#endif 702 } 703 704 for(i=0;i<RMDNUM;i++) 705 { 706#ifdef RCV_VIA_SKB 707 dev_kfree_skb(p->recv_skb[i]); 708#else 709 kfree(p->recvbounce[i]); 710#endif 711 } 712 kfree(p->self); 713} 714 715 716/* 717 * stop and (re)start lance .. e.g after an error 718 */ 719static void ni65_stop_start(struct net_device *dev,struct priv *p) 720{ 721 int csr0 = CSR0_INEA; 722 723 writedatareg(CSR0_STOP); 724 725 if(debuglevel > 1) 726 printk(KERN_DEBUG "ni65_stop_start\n"); 727 728 if(p->features & INIT_RING_BEFORE_START) { 729 int i; 730#ifdef XMT_VIA_SKB 731 struct sk_buff *skb_save[TMDNUM]; 732#endif 733 unsigned long buffer[TMDNUM]; 734 short blen[TMDNUM]; 735 736 if(p->xmit_queued) { 737 while(1) { 738 if((p->tmdhead[p->tmdlast].u.s.status & XMIT_OWN)) 739 break; 740 p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1); 741 if(p->tmdlast == p->tmdnum) 742 break; 743 } 744 } 745 746 for(i=0;i<TMDNUM;i++) { 747 struct tmd *tmdp = p->tmdhead + i; 748#ifdef XMT_VIA_SKB 749 skb_save[i] = p->tmd_skb[i]; 750#endif 751 buffer[i] = (u32) isa_bus_to_virt(tmdp->u.buffer); 752 blen[i] = tmdp->blen; 753 tmdp->u.s.status = 0x0; 754 } 755 756 for(i=0;i<RMDNUM;i++) { 757 struct rmd *rmdp = p->rmdhead + i; 758 rmdp->u.s.status = RCV_OWN; 759 } 760 p->tmdnum = p->xmit_queued = 0; 761 writedatareg(CSR0_STRT | csr0); 762 763 for(i=0;i<TMDNUM;i++) { 764 int num = (i + p->tmdlast) & (TMDNUM-1); 765 p->tmdhead[i].u.buffer = (u32) isa_virt_to_bus((char *)buffer[num]); /* status is part of buffer field */ 766 p->tmdhead[i].blen = blen[num]; 767 if(p->tmdhead[i].u.s.status & XMIT_OWN) { 768 p->tmdnum = (p->tmdnum + 1) & (TMDNUM-1); 769 p->xmit_queued = 1; 770 writedatareg(CSR0_TDMD | CSR0_INEA | csr0); 771 } 772#ifdef XMT_VIA_SKB 773 p->tmd_skb[i] = skb_save[num]; 774#endif 775 } 776 p->rmdnum = p->tmdlast = 0; 777 if(!p->lock) 778 if (p->tmdnum || !p->xmit_queued) 779 netif_wake_queue(dev); 780 netif_trans_update(dev); /* prevent tx timeout */ 781 } 782 else 783 writedatareg(CSR0_STRT | csr0); 784} 785 786/* 787 * init lance (write init-values .. init-buffers) (open-helper) 788 */ 789static int ni65_lance_reinit(struct net_device *dev) 790{ 791 int i; 792 struct priv *p = dev->ml_priv; 793 unsigned long flags; 794 795 p->lock = 0; 796 p->xmit_queued = 0; 797 798 flags=claim_dma_lock(); 799 disable_dma(dev->dma); /* I've never worked with dma, but we do it like the packetdriver */ 800 set_dma_mode(dev->dma,DMA_MODE_CASCADE); 801 enable_dma(dev->dma); 802 release_dma_lock(flags); 803 804 outw(inw(PORT+L_RESET),PORT+L_RESET); /* first: reset the card */ 805 if( (i=readreg(CSR0) ) != 0x4) 806 { 807 printk(KERN_ERR "%s: can't RESET %s card: %04x\n",dev->name, 808 cards[p->cardno].cardname,(int) i); 809 flags=claim_dma_lock(); 810 disable_dma(dev->dma); 811 release_dma_lock(flags); 812 return 0; 813 } 814 815 p->rmdnum = p->tmdnum = p->tmdlast = p->tmdbouncenum = 0; 816 for(i=0;i<TMDNUM;i++) 817 { 818 struct tmd *tmdp = p->tmdhead + i; 819#ifdef XMT_VIA_SKB 820 if(p->tmd_skb[i]) { 821 dev_kfree_skb(p->tmd_skb[i]); 822 p->tmd_skb[i] = NULL; 823 } 824#endif 825 tmdp->u.buffer = 0x0; 826 tmdp->u.s.status = XMIT_START | XMIT_END; 827 tmdp->blen = tmdp->status2 = 0; 828 } 829 830 for(i=0;i<RMDNUM;i++) 831 { 832 struct rmd *rmdp = p->rmdhead + i; 833#ifdef RCV_VIA_SKB 834 rmdp->u.buffer = (u32) isa_virt_to_bus(p->recv_skb[i]->data); 835#else 836 rmdp->u.buffer = (u32) isa_virt_to_bus(p->recvbounce[i]); 837#endif 838 rmdp->blen = -(R_BUF_SIZE-8); 839 rmdp->mlen = 0; 840 rmdp->u.s.status = RCV_OWN; 841 } 842 843 if(dev->flags & IFF_PROMISC) 844 ni65_init_lance(p,dev->dev_addr,0x00,M_PROM); 845 else if (netdev_mc_count(dev) || dev->flags & IFF_ALLMULTI) 846 ni65_init_lance(p,dev->dev_addr,0xff,0x0); 847 else 848 ni65_init_lance(p,dev->dev_addr,0x00,0x00); 849 850 /* 851 * ni65_set_lance_mem() sets L_ADDRREG to CSR0 852 * NOW, WE WILL NEVER CHANGE THE L_ADDRREG, CSR0 IS ALWAYS SELECTED 853 */ 854 855 if(inw(PORT+L_DATAREG) & CSR0_IDON) { 856 ni65_set_performance(p); 857 /* init OK: start lance , enable interrupts */ 858 writedatareg(CSR0_CLRALL | CSR0_INEA | CSR0_STRT); 859 return 1; /* ->OK */ 860 } 861 printk(KERN_ERR "%s: can't init lance, status: %04x\n",dev->name,(int) inw(PORT+L_DATAREG)); 862 flags=claim_dma_lock(); 863 disable_dma(dev->dma); 864 release_dma_lock(flags); 865 return 0; /* ->Error */ 866} 867 868/* 869 * interrupt handler 870 */ 871static irqreturn_t ni65_interrupt(int irq, void * dev_id) 872{ 873 int csr0 = 0; 874 struct net_device *dev = dev_id; 875 struct priv *p; 876 int bcnt = 32; 877 878 p = dev->ml_priv; 879 880 spin_lock(&p->ring_lock); 881 882 while(--bcnt) { 883 csr0 = inw(PORT+L_DATAREG); 884 885#if 0 886 writedatareg( (csr0 & CSR0_CLRALL) ); /* ack interrupts, disable int. */ 887#else 888 writedatareg( (csr0 & CSR0_CLRALL) | CSR0_INEA ); /* ack interrupts, interrupts enabled */ 889#endif 890 891 if(!(csr0 & (CSR0_ERR | CSR0_RINT | CSR0_TINT))) 892 break; 893 894 if(csr0 & CSR0_RINT) /* RECV-int? */ 895 ni65_recv_intr(dev,csr0); 896 if(csr0 & CSR0_TINT) /* XMIT-int? */ 897 ni65_xmit_intr(dev,csr0); 898 899 if(csr0 & CSR0_ERR) 900 { 901 if(debuglevel > 1) 902 printk(KERN_ERR "%s: general error: %04x.\n",dev->name,csr0); 903 if(csr0 & CSR0_BABL) 904 dev->stats.tx_errors++; 905 if(csr0 & CSR0_MISS) { 906 int i; 907 for(i=0;i<RMDNUM;i++) 908 printk("%02x ",p->rmdhead[i].u.s.status); 909 printk("\n"); 910 dev->stats.rx_errors++; 911 } 912 if(csr0 & CSR0_MERR) { 913 if(debuglevel > 1) 914 printk(KERN_ERR "%s: Ooops .. memory error: %04x.\n",dev->name,csr0); 915 ni65_stop_start(dev,p); 916 } 917 } 918 } 919 920#ifdef RCV_PARANOIA_CHECK 921{ 922 int j; 923 for(j=0;j<RMDNUM;j++) 924 { 925 int i, num2; 926 for(i=RMDNUM-1;i>0;i--) { 927 num2 = (p->rmdnum + i) & (RMDNUM-1); 928 if(!(p->rmdhead[num2].u.s.status & RCV_OWN)) 929 break; 930 } 931 932 if(i) { 933 int k, num1; 934 for(k=0;k<RMDNUM;k++) { 935 num1 = (p->rmdnum + k) & (RMDNUM-1); 936 if(!(p->rmdhead[num1].u.s.status & RCV_OWN)) 937 break; 938 } 939 if(!k) 940 break; 941 942 if(debuglevel > 0) 943 { 944 char buf[256],*buf1; 945 buf1 = buf; 946 for(k=0;k<RMDNUM;k++) { 947 sprintf(buf1,"%02x ",(p->rmdhead[k].u.s.status)); /* & RCV_OWN) ); */ 948 buf1 += 3; 949 } 950 *buf1 = 0; 951 printk(KERN_ERR "%s: Ooops, receive ring corrupted %2d %2d | %s\n",dev->name,p->rmdnum,i,buf); 952 } 953 954 p->rmdnum = num1; 955 ni65_recv_intr(dev,csr0); 956 if((p->rmdhead[num2].u.s.status & RCV_OWN)) 957 break; /* ok, we are 'in sync' again */ 958 } 959 else 960 break; 961 } 962} 963#endif 964 965 if( (csr0 & (CSR0_RXON | CSR0_TXON)) != (CSR0_RXON | CSR0_TXON) ) { 966 printk(KERN_DEBUG "%s: RX or TX was offline -> restart\n",dev->name); 967 ni65_stop_start(dev,p); 968 } 969 else 970 writedatareg(CSR0_INEA); 971 972 spin_unlock(&p->ring_lock); 973 return IRQ_HANDLED; 974} 975 976/* 977 * We have received an Xmit-Interrupt .. 978 * send a new packet if necessary 979 */ 980static void ni65_xmit_intr(struct net_device *dev,int csr0) 981{ 982 struct priv *p = dev->ml_priv; 983 984 while(p->xmit_queued) 985 { 986 struct tmd *tmdp = p->tmdhead + p->tmdlast; 987 int tmdstat = tmdp->u.s.status; 988 989 if(tmdstat & XMIT_OWN) 990 break; 991 992 if(tmdstat & XMIT_ERR) 993 { 994#if 0 995 if(tmdp->status2 & XMIT_TDRMASK && debuglevel > 3) 996 printk(KERN_ERR "%s: tdr-problems (e.g. no resistor)\n",dev->name); 997#endif 998 /* checking some errors */ 999 if(tmdp->status2 & XMIT_RTRY) 1000 dev->stats.tx_aborted_errors++; 1001 if(tmdp->status2 & XMIT_LCAR) 1002 dev->stats.tx_carrier_errors++; 1003 if(tmdp->status2 & (XMIT_BUFF | XMIT_UFLO )) { 1004 /* this stops the xmitter */ 1005 dev->stats.tx_fifo_errors++; 1006 if(debuglevel > 0) 1007 printk(KERN_ERR "%s: Xmit FIFO/BUFF error\n",dev->name); 1008 if(p->features & INIT_RING_BEFORE_START) { 1009 tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END; /* test: resend this frame */ 1010 ni65_stop_start(dev,p); 1011 break; /* no more Xmit processing .. */ 1012 } 1013 else 1014 ni65_stop_start(dev,p); 1015 } 1016 if(debuglevel > 2) 1017 printk(KERN_ERR "%s: xmit-error: %04x %02x-%04x\n",dev->name,csr0,(int) tmdstat,(int) tmdp->status2); 1018 if(!(csr0 & CSR0_BABL)) /* don't count errors twice */ 1019 dev->stats.tx_errors++; 1020 tmdp->status2 = 0; 1021 } 1022 else { 1023 dev->stats.tx_bytes -= (short)(tmdp->blen); 1024 dev->stats.tx_packets++; 1025 } 1026 1027#ifdef XMT_VIA_SKB 1028 if(p->tmd_skb[p->tmdlast]) { 1029 dev_consume_skb_irq(p->tmd_skb[p->tmdlast]); 1030 p->tmd_skb[p->tmdlast] = NULL; 1031 } 1032#endif 1033 1034 p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1); 1035 if(p->tmdlast == p->tmdnum) 1036 p->xmit_queued = 0; 1037 } 1038 netif_wake_queue(dev); 1039} 1040 1041/* 1042 * We have received a packet 1043 */ 1044static void ni65_recv_intr(struct net_device *dev,int csr0) 1045{ 1046 struct rmd *rmdp; 1047 int rmdstat,len; 1048 int cnt=0; 1049 struct priv *p = dev->ml_priv; 1050 1051 rmdp = p->rmdhead + p->rmdnum; 1052 while(!( (rmdstat = rmdp->u.s.status) & RCV_OWN)) 1053 { 1054 cnt++; 1055 if( (rmdstat & (RCV_START | RCV_END | RCV_ERR)) != (RCV_START | RCV_END) ) /* error or oversized? */ 1056 { 1057 if(!(rmdstat & RCV_ERR)) { 1058 if(rmdstat & RCV_START) 1059 { 1060 dev->stats.rx_length_errors++; 1061 printk(KERN_ERR "%s: recv, packet too long: %d\n",dev->name,rmdp->mlen & 0x0fff); 1062 } 1063 } 1064 else { 1065 if(debuglevel > 2) 1066 printk(KERN_ERR "%s: receive-error: %04x, lance-status: %04x/%04x\n", 1067 dev->name,(int) rmdstat,csr0,(int) inw(PORT+L_DATAREG) ); 1068 if(rmdstat & RCV_FRAM) 1069 dev->stats.rx_frame_errors++; 1070 if(rmdstat & RCV_OFLO) 1071 dev->stats.rx_over_errors++; 1072 if(rmdstat & RCV_CRC) 1073 dev->stats.rx_crc_errors++; 1074 if(rmdstat & RCV_BUF_ERR) 1075 dev->stats.rx_fifo_errors++; 1076 } 1077 if(!(csr0 & CSR0_MISS)) /* don't count errors twice */ 1078 dev->stats.rx_errors++; 1079 } 1080 else if( (len = (rmdp->mlen & 0x0fff) - 4) >= 60) 1081 { 1082#ifdef RCV_VIA_SKB 1083 struct sk_buff *skb = alloc_skb(R_BUF_SIZE+2+16,GFP_ATOMIC); 1084 if (skb) 1085 skb_reserve(skb,16); 1086#else 1087 struct sk_buff *skb = netdev_alloc_skb(dev, len + 2); 1088#endif 1089 if(skb) 1090 { 1091 skb_reserve(skb,2); 1092#ifdef RCV_VIA_SKB 1093 if( (unsigned long) (skb->data + R_BUF_SIZE) > 0x1000000) { 1094 skb_put(skb,len); 1095 skb_copy_to_linear_data(skb, (unsigned char *)(p->recv_skb[p->rmdnum]->data),len); 1096 } 1097 else { 1098 struct sk_buff *skb1 = p->recv_skb[p->rmdnum]; 1099 skb_put(skb,R_BUF_SIZE); 1100 p->recv_skb[p->rmdnum] = skb; 1101 rmdp->u.buffer = (u32) isa_virt_to_bus(skb->data); 1102 skb = skb1; 1103 skb_trim(skb,len); 1104 } 1105#else 1106 skb_put(skb,len); 1107 skb_copy_to_linear_data(skb, (unsigned char *) p->recvbounce[p->rmdnum],len); 1108#endif 1109 dev->stats.rx_packets++; 1110 dev->stats.rx_bytes += len; 1111 skb->protocol=eth_type_trans(skb,dev); 1112 netif_rx(skb); 1113 } 1114 else 1115 { 1116 printk(KERN_ERR "%s: can't alloc new sk_buff\n",dev->name); 1117 dev->stats.rx_dropped++; 1118 } 1119 } 1120 else { 1121 printk(KERN_INFO "%s: received runt packet\n",dev->name); 1122 dev->stats.rx_errors++; 1123 } 1124 rmdp->blen = -(R_BUF_SIZE-8); 1125 rmdp->mlen = 0; 1126 rmdp->u.s.status = RCV_OWN; /* change owner */ 1127 p->rmdnum = (p->rmdnum + 1) & (RMDNUM-1); 1128 rmdp = p->rmdhead + p->rmdnum; 1129 } 1130} 1131 1132/* 1133 * kick xmitter .. 1134 */ 1135 1136static void ni65_timeout(struct net_device *dev, unsigned int txqueue) 1137{ 1138 int i; 1139 struct priv *p = dev->ml_priv; 1140 1141 printk(KERN_ERR "%s: xmitter timed out, try to restart!\n",dev->name); 1142 for(i=0;i<TMDNUM;i++) 1143 printk("%02x ",p->tmdhead[i].u.s.status); 1144 printk("\n"); 1145 ni65_lance_reinit(dev); 1146 netif_trans_update(dev); /* prevent tx timeout */ 1147 netif_wake_queue(dev); 1148} 1149 1150/* 1151 * Send a packet 1152 */ 1153 1154static netdev_tx_t ni65_send_packet(struct sk_buff *skb, 1155 struct net_device *dev) 1156{ 1157 struct priv *p = dev->ml_priv; 1158 1159 netif_stop_queue(dev); 1160 1161 if (test_and_set_bit(0, (void*)&p->lock)) { 1162 printk(KERN_ERR "%s: Queue was locked.\n", dev->name); 1163 return NETDEV_TX_BUSY; 1164 } 1165 1166 { 1167 short len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; 1168 struct tmd *tmdp; 1169 unsigned long flags; 1170 1171#ifdef XMT_VIA_SKB 1172 if( (unsigned long) (skb->data + skb->len) > 0x1000000) { 1173#endif 1174 1175 skb_copy_from_linear_data(skb, p->tmdbounce[p->tmdbouncenum], 1176 skb->len > T_BUF_SIZE ? T_BUF_SIZE : 1177 skb->len); 1178 if (len > skb->len) 1179 memset((char *)p->tmdbounce[p->tmdbouncenum]+skb->len, 0, len-skb->len); 1180 dev_kfree_skb (skb); 1181 1182 spin_lock_irqsave(&p->ring_lock, flags); 1183 tmdp = p->tmdhead + p->tmdnum; 1184 tmdp->u.buffer = (u32) isa_virt_to_bus(p->tmdbounce[p->tmdbouncenum]); 1185 p->tmdbouncenum = (p->tmdbouncenum + 1) & (TMDNUM - 1); 1186 1187#ifdef XMT_VIA_SKB 1188 } 1189 else { 1190 spin_lock_irqsave(&p->ring_lock, flags); 1191 1192 tmdp = p->tmdhead + p->tmdnum; 1193 tmdp->u.buffer = (u32) isa_virt_to_bus(skb->data); 1194 p->tmd_skb[p->tmdnum] = skb; 1195 } 1196#endif 1197 tmdp->blen = -len; 1198 1199 tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END; 1200 writedatareg(CSR0_TDMD | CSR0_INEA); /* enable xmit & interrupt */ 1201 1202 p->xmit_queued = 1; 1203 p->tmdnum = (p->tmdnum + 1) & (TMDNUM-1); 1204 1205 if(p->tmdnum != p->tmdlast) 1206 netif_wake_queue(dev); 1207 1208 p->lock = 0; 1209 1210 spin_unlock_irqrestore(&p->ring_lock, flags); 1211 } 1212 1213 return NETDEV_TX_OK; 1214} 1215 1216static void set_multicast_list(struct net_device *dev) 1217{ 1218 if(!ni65_lance_reinit(dev)) 1219 printk(KERN_ERR "%s: Can't switch card into MC mode!\n",dev->name); 1220 netif_wake_queue(dev); 1221} 1222 1223#ifdef MODULE 1224static struct net_device *dev_ni65; 1225 1226module_param_hw(irq, int, irq, 0); 1227module_param_hw(io, int, ioport, 0); 1228module_param_hw(dma, int, dma, 0); 1229MODULE_PARM_DESC(irq, "ni6510 IRQ number (ignored for some cards)"); 1230MODULE_PARM_DESC(io, "ni6510 I/O base address"); 1231MODULE_PARM_DESC(dma, "ni6510 ISA DMA channel (ignored for some cards)"); 1232 1233int __init init_module(void) 1234{ 1235 dev_ni65 = ni65_probe(-1); 1236 return PTR_ERR_OR_ZERO(dev_ni65); 1237} 1238 1239void __exit cleanup_module(void) 1240{ 1241 unregister_netdev(dev_ni65); 1242 cleanup_card(dev_ni65); 1243 free_netdev(dev_ni65); 1244} 1245#endif /* MODULE */ 1246 1247MODULE_LICENSE("GPL"); 1248