Lines Matching refs:dev
361 static int i596_open(struct net_device *dev);
362 static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
364 static int i596_close(struct net_device *dev);
365 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
366 static void i596_tx_timeout (struct net_device *dev, unsigned int txqueue);
368 static void set_multicast_list(struct net_device *dev);
375 static inline void CA(struct net_device *dev)
379 ((struct i596_reg *) dev->base_addr)->ca = 1;
386 i = *(volatile u32 *) (dev->base_addr);
392 static inline void MPU_PORT(struct net_device *dev, int c, volatile void *x)
396 struct i596_reg *p = (struct i596_reg *) (dev->base_addr);
405 *(volatile u32 *) dev->base_addr = v;
407 *(volatile u32 *) dev->base_addr = v;
413 static inline int wait_istat(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
419 dev->name, str, lp->scb.status, lp->scb.command);
427 static inline int wait_cmd(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
433 dev->name, str, lp->scb.status, lp->scb.command);
441 static inline int wait_cfg(struct net_device *dev, struct i596_cmd *cmd, int delcnt, char *str)
448 printk(KERN_ERR "%s: %s.\n", dev->name, str);
456 static void i596_display_data(struct net_device *dev)
458 struct i596_private *lp = dev->ml_priv;
503 struct net_device *dev = dev_id;
520 printk(KERN_ERR "%s: Error interrupt\n", dev->name);
521 i596_display_data(dev);
526 static inline void remove_rx_bufs(struct net_device *dev)
528 struct i596_private *lp = dev->ml_priv;
540 static inline int init_rx_bufs(struct net_device *dev)
542 struct i596_private *lp = dev->ml_priv;
550 struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
553 remove_rx_bufs(dev);
596 static void rebuild_rx_bufs(struct net_device *dev)
598 struct i596_private *lp = dev->ml_priv;
615 static int init_i596_mem(struct net_device *dev)
617 struct i596_private *lp = dev->ml_priv;
620 MPU_PORT(dev, PORT_RESET, NULL);
649 MPU_PORT(dev, PORT_ALTSCP, (void *)virt_to_bus((void *)&lp->scp));
678 DEB(DEB_INIT,printk(KERN_DEBUG "%s: starting i82596.\n", dev->name));
680 CA(dev);
682 if (wait_istat(dev,lp,1000,"initialization timed out"))
684 DEB(DEB_INIT,printk(KERN_DEBUG "%s: i82596 initialization successful\n", dev->name));
687 rebuild_rx_bufs(dev);
708 DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdConfigure\n", dev->name));
711 i596_add_cmd(dev, &lp->cf_cmd.cmd);
713 DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name));
714 memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, ETH_ALEN);
716 i596_add_cmd(dev, &lp->sa_cmd.cmd);
718 DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdTDR\n", dev->name));
720 i596_add_cmd(dev, &lp->tdr_cmd.cmd);
724 if (wait_cmd(dev,lp,1000,"timed out waiting to issue RX_START")) {
728 DEB(DEB_INIT,printk(KERN_DEBUG "%s: Issuing RX_START\n", dev->name));
730 CA(dev);
734 if (wait_cmd(dev,lp,1000,"RX_START not processed"))
736 DEB(DEB_INIT,printk(KERN_DEBUG "%s: Receive unit started OK\n", dev->name));
740 printk(KERN_CRIT "%s: Failed to initialise 82596\n", dev->name);
741 MPU_PORT(dev, PORT_RESET, NULL);
745 static inline int i596_rx(struct net_device *dev)
747 struct i596_private *lp = dev->ml_priv;
763 printk(KERN_CRIT "%s: rbd chain broken!\n", dev->name);
787 newskb = netdev_alloc_skb(dev, PKT_BUF_SZ);
802 skb = netdev_alloc_skb(dev, pkt_len + 2);
807 dev->stats.rx_dropped++;
815 skb->protocol=eth_type_trans(skb,dev);
822 dev->stats.rx_packets++;
823 dev->stats.rx_bytes+=pkt_len;
828 dev->name, rfd->stat));
829 dev->stats.rx_errors++;
831 dev->stats.collisions++;
833 dev->stats.rx_length_errors++;
835 dev->stats.rx_over_errors++;
837 dev->stats.rx_fifo_errors++;
839 dev->stats.rx_frame_errors++;
841 dev->stats.rx_crc_errors++;
843 dev->stats.rx_length_errors++;
877 static void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp)
894 dev->stats.tx_errors++;
895 dev->stats.tx_aborted_errors++;
906 wait_cmd(dev,lp,100,"i596_cleanup_cmd timed out");
910 static void i596_reset(struct net_device *dev, struct i596_private *lp,
919 wait_cmd(dev,lp,100,"i596_reset timed out");
921 netif_stop_queue(dev);
924 CA(dev);
927 wait_cmd(dev,lp,1000,"i596_reset 2 timed out");
930 i596_cleanup_cmd(dev,lp);
931 i596_rx(dev);
933 netif_start_queue(dev);
934 init_i596_mem(dev);
937 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd)
939 struct i596_private *lp = dev->ml_priv;
940 int ioaddr = dev->base_addr;
956 wait_cmd(dev,lp,100,"i596_add_cmd timed out");
959 CA(dev);
972 printk(KERN_NOTICE "%s: command unit timed out, status resetting.\n", dev->name);
974 i596_reset(dev, lp, ioaddr);
978 static int i596_open(struct net_device *dev)
982 DEB(DEB_OPEN,printk(KERN_DEBUG "%s: i596_open() irq %d.\n", dev->name, dev->irq));
984 if (request_irq(dev->irq, i596_interrupt, 0, "i82596", dev)) {
985 printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq);
990 if (request_irq(0x56, i596_error, 0, "i82596_error", dev)) {
996 res = init_rx_bufs(dev);
1000 netif_start_queue(dev);
1002 if (init_i596_mem(dev)) {
1010 netif_stop_queue(dev);
1011 remove_rx_bufs(dev);
1014 free_irq(0x56, dev);
1017 free_irq(dev->irq, dev);
1022 static void i596_tx_timeout (struct net_device *dev, unsigned int txqueue)
1024 struct i596_private *lp = dev->ml_priv;
1025 int ioaddr = dev->base_addr;
1029 dev->name));
1031 dev->stats.tx_errors++;
1034 if (lp->last_restart == dev->stats.tx_packets) {
1037 i596_reset (dev, lp, ioaddr);
1042 CA (dev);
1043 lp->last_restart = dev->stats.tx_packets;
1046 netif_trans_update(dev); /* prevent tx timeout */
1047 netif_wake_queue (dev);
1050 static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
1052 struct i596_private *lp = dev->ml_priv;
1058 dev->name, skb->len, skb->data));
1065 netif_stop_queue(dev);
1072 dev->name);
1073 dev->stats.tx_dropped++;
1096 i596_add_cmd(dev, &tx_cmd->cmd);
1098 dev->stats.tx_packets++;
1099 dev->stats.tx_bytes += length;
1102 netif_start_queue(dev);
1128 struct net_device *dev;
1139 dev = alloc_etherdev(0);
1140 if (!dev)
1144 sprintf(dev->name, "eth%d", unit);
1145 netdev_boot_setup_check(dev);
1147 dev->base_addr = io;
1148 dev->irq = irq;
1159 dev->base_addr = MVME_I596_BASE;
1160 dev->irq = (unsigned) MVME16x_IRQ_I596;
1174 dev->base_addr = BVME_I596_BASE;
1175 dev->irq = (unsigned) BVME_IRQ_I596;
1183 dev->mem_start = (int)__get_free_pages(GFP_ATOMIC, 0);
1184 if (!dev->mem_start) {
1189 DEB(DEB_PROBE,printk(KERN_INFO "%s: 82596 at %#3lx,", dev->name, dev->base_addr));
1192 DEB(DEB_PROBE,printk(" %2.2X", dev->dev_addr[i] = eth_addr[i]));
1194 DEB(DEB_PROBE,printk(" IRQ %d.\n", dev->irq));
1199 dev->netdev_ops = &i596_netdev_ops;
1200 dev->watchdog_timeo = TX_TIMEOUT;
1202 dev->ml_priv = (void *)(dev->mem_start);
1204 lp = dev->ml_priv;
1207 dev->name, (unsigned long)lp,
1212 cache_push(virt_to_phys((void *)(dev->mem_start)), 4096);
1213 cache_clear(virt_to_phys((void *)(dev->mem_start)), 4096);
1214 kernel_set_cachemode((void *)(dev->mem_start), 4096, IOMAP_NOCACHE_SER);
1221 err = register_netdev(dev);
1224 return dev;
1230 kernel_set_cachemode((void *)(dev->mem_start), 4096,
1233 free_page ((u32)(dev->mem_start));
1236 free_netdev(dev);
1242 struct net_device *dev = dev_id;
1256 if (dev == NULL) {
1261 ioaddr = dev->base_addr;
1262 lp = dev->ml_priv;
1266 wait_cmd(dev,lp,100,"i596 interrupt, timeout");
1270 dev->name, irq, status));
1279 DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt completed command.\n", dev->name));
1281 DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt command unit inactive %x.\n", dev->name, status & 0x0700));
1300 dev->stats.tx_errors++;
1302 dev->stats.collisions++;
1304 dev->stats.tx_heartbeat_errors++;
1306 dev->stats.tx_carrier_errors++;
1308 dev->stats.collisions++;
1310 dev->stats.tx_aborted_errors++;
1323 DEB(DEB_TDR,printk(KERN_INFO "%s: link ok.\n", dev->name));
1326 printk(KERN_ERR "%s: Transceiver problem.\n", dev->name);
1328 printk(KERN_ERR "%s: Termination problem.\n", dev->name);
1330 printk(KERN_ERR "%s: Short circuit.\n", dev->name);
1332 DEB(DEB_TDR,printk(KERN_INFO "%s: Time %d.\n", dev->name, status & 0x07ff));
1358 DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt received a frame.\n", dev->name));
1359 i596_rx(dev);
1362 if (netif_running(dev)) {
1363 DEB(DEB_ERRORS,printk(KERN_ERR "%s: i596 interrupt receive unit inactive, status 0x%x\n", dev->name, status));
1365 dev->stats.rx_errors++;
1366 dev->stats.rx_fifo_errors++;
1367 rebuild_rx_bufs(dev);
1371 wait_cmd(dev,lp,100,"i596 interrupt, timeout");
1391 CA(dev);
1393 DEB(DEB_INTS,printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name));
1399 static int i596_close(struct net_device *dev)
1401 struct i596_private *lp = dev->ml_priv;
1404 netif_stop_queue(dev);
1407 dev->name, lp->scb.status));
1411 wait_cmd(dev,lp,100,"close1 timed out");
1413 CA(dev);
1415 wait_cmd(dev,lp,100,"close2 timed out");
1418 DEB(DEB_STRUCT,i596_display_data(dev));
1419 i596_cleanup_cmd(dev,lp);
1440 free_irq(0x56, dev);
1442 free_irq(dev->irq, dev);
1443 remove_rx_bufs(dev);
1452 static void set_multicast_list(struct net_device *dev)
1454 struct i596_private *lp = dev->ml_priv;
1458 dev->name, netdev_mc_count(dev),
1459 dev->flags & IFF_PROMISC ? "ON" : "OFF",
1460 dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
1462 if (wait_cfg(dev, &lp->cf_cmd.cmd, 1000, "config change request timed out"))
1465 if ((dev->flags & IFF_PROMISC) && !(lp->cf_cmd.i596_config[8] & 0x01)) {
1469 if (!(dev->flags & IFF_PROMISC) && (lp->cf_cmd.i596_config[8] & 0x01)) {
1473 if ((dev->flags & IFF_ALLMULTI) && (lp->cf_cmd.i596_config[11] & 0x20)) {
1477 if (!(dev->flags & IFF_ALLMULTI) && !(lp->cf_cmd.i596_config[11] & 0x20)) {
1483 i596_add_cmd(dev, &lp->cf_cmd.cmd);
1486 cnt = netdev_mc_count(dev);
1491 dev->name, cnt);
1494 if (!netdev_mc_empty(dev)) {
1499 if (wait_cfg(dev, &lp->mc_cmd.cmd, 1000, "multicast list change request timed out"))
1505 netdev_for_each_mc_addr(ha, dev) {
1511 dev->name, cp));
1514 i596_add_cmd(dev, &cmd->cmd);