Lines Matching defs:dev
549 struct net_device *dev;
569 /* external phy that is used: only valid if dev->if_port != PORT_TP */
598 static void move_int_phy(struct net_device *dev, int addr);
600 static int mdio_read(struct net_device *dev, int reg);
601 static void mdio_write(struct net_device *dev, int reg, u16 data);
602 static void init_phy_fixup(struct net_device *dev);
603 static int miiport_read(struct net_device *dev, int phy_id, int reg);
604 static void miiport_write(struct net_device *dev, int phy_id, int reg, u16 data);
605 static int find_mii(struct net_device *dev);
606 static void natsemi_reset(struct net_device *dev);
607 static void natsemi_reload_eeprom(struct net_device *dev);
608 static void natsemi_stop_rxtx(struct net_device *dev);
609 static int netdev_open(struct net_device *dev);
610 static void do_cable_magic(struct net_device *dev);
611 static void undo_cable_magic(struct net_device *dev);
612 static void check_link(struct net_device *dev);
614 static void dump_ring(struct net_device *dev);
615 static void ns_tx_timeout(struct net_device *dev, unsigned int txqueue);
616 static int alloc_ring(struct net_device *dev);
617 static void refill_rx(struct net_device *dev);
618 static void init_ring(struct net_device *dev);
619 static void drain_tx(struct net_device *dev);
620 static void drain_ring(struct net_device *dev);
621 static void free_ring(struct net_device *dev);
622 static void reinit_ring(struct net_device *dev);
623 static void init_registers(struct net_device *dev);
624 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
626 static void netdev_error(struct net_device *dev, int intr_status);
628 static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do);
629 static void netdev_tx_done(struct net_device *dev);
630 static int natsemi_change_mtu(struct net_device *dev, int new_mtu);
632 static void natsemi_poll_controller(struct net_device *dev);
634 static void __set_rx_mode(struct net_device *dev);
635 static void set_rx_mode(struct net_device *dev);
636 static void __get_stats(struct net_device *dev);
637 static struct net_device_stats *get_stats(struct net_device *dev);
638 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
639 static int netdev_set_wol(struct net_device *dev, u32 newval);
640 static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur);
641 static int netdev_set_sopass(struct net_device *dev, u8 *newval);
642 static int netdev_get_sopass(struct net_device *dev, u8 *data);
643 static int netdev_get_ecmd(struct net_device *dev,
645 static int netdev_set_ecmd(struct net_device *dev,
647 static void enable_wol_mode(struct net_device *dev, int enable_intr);
648 static int netdev_close(struct net_device *dev);
649 static int netdev_get_regs(struct net_device *dev, u8 *buf);
650 static int netdev_get_eeprom(struct net_device *dev, u8 *buf);
654 static ssize_t natsemi_show_##_name(struct device *dev, \
656 static ssize_t natsemi_set_##_name(struct device *dev, \
662 device_create_file(&_dev->dev, &dev_attr_##_name)
664 device_remove_file(&_dev->dev, &dev_attr_##_name)
668 static ssize_t natsemi_show_dspcfg_workaround(struct device *dev,
672 struct netdev_private *np = netdev_priv(to_net_dev(dev));
677 static ssize_t natsemi_set_dspcfg_workaround(struct device *dev,
681 struct netdev_private *np = netdev_priv(to_net_dev(dev));
703 static inline void __iomem *ns_ioaddr(struct net_device *dev)
705 struct netdev_private *np = netdev_priv(dev);
710 static inline void natsemi_irq_enable(struct net_device *dev)
712 writel(1, ns_ioaddr(dev) + IntrEnable);
713 readl(ns_ioaddr(dev) + IntrEnable);
716 static inline void natsemi_irq_disable(struct net_device *dev)
718 writel(0, ns_ioaddr(dev) + IntrEnable);
719 readl(ns_ioaddr(dev) + IntrEnable);
722 static void move_int_phy(struct net_device *dev, int addr)
724 struct netdev_private *np = netdev_priv(dev);
725 void __iomem *ioaddr = ns_ioaddr(dev);
747 static void natsemi_init_media(struct net_device *dev)
749 struct netdev_private *np = netdev_priv(dev);
753 netif_carrier_on(dev);
755 netif_carrier_off(dev);
758 tmp = mdio_read(dev, MII_BMCR);
762 np->advertising= mdio_read(dev, MII_ADVERTISE);
769 (mdio_read(dev, MII_BMCR) & BMCR_ANENABLE)?
781 pci_name(np->pci_dev), mdio_read(dev, MII_BMSR),
804 struct net_device *dev;
843 dev = alloc_etherdev(sizeof (struct netdev_private));
844 if (!dev)
846 SET_NETDEV_DEV(dev, &pdev->dev);
862 dev->dev_addr[i*2] = (eedata << 1) + (prev_eedata >> 15);
863 dev->dev_addr[i*2+1] = eedata >> 7;
867 np = netdev_priv(dev);
870 netif_napi_add(dev, &np->napi, natsemi_poll, 64);
871 np->dev = dev;
874 pci_set_drvdata(pdev, dev);
897 dev->if_port = PORT_MII;
899 dev->if_port = PORT_TP;
901 natsemi_reload_eeprom(dev);
902 natsemi_reset(dev);
904 if (dev->if_port != PORT_TP) {
905 np->phy_addr_external = find_mii(dev);
909 dev->if_port = PORT_TP;
929 dev->netdev_ops = &natsemi_netdev_ops;
930 dev->watchdog_timeo = TX_TIMEOUT;
932 dev->ethtool_ops = ðtool_ops;
935 dev->min_mtu = ETH_ZLEN + ETH_FCS_LEN;
936 dev->max_mtu = NATSEMI_RX_LIMIT - NATSEMI_HEADERS;
939 dev->mtu = mtu;
941 natsemi_init_media(dev);
949 i = register_netdev(dev);
959 dev->name, natsemi_pci_info[chip_idx].name,
961 dev->dev_addr, irq);
962 if (dev->if_port == PORT_TP)
972 unregister_netdev(dev);
978 free_netdev(dev);
1048 static int mii_getbit (struct net_device *dev)
1051 void __iomem *ioaddr = ns_ioaddr(dev);
1060 static void mii_send_bits (struct net_device *dev, u32 data, int len)
1063 void __iomem *ioaddr = ns_ioaddr(dev);
1077 static int miiport_read(struct net_device *dev, int phy_id, int reg)
1084 mii_send_bits (dev, 0xffffffff, 32);
1088 mii_send_bits (dev, cmd, 14);
1090 if (mii_getbit (dev))
1095 retval |= mii_getbit (dev);
1098 mii_getbit (dev);
1102 static void miiport_write(struct net_device *dev, int phy_id, int reg, u16 data)
1107 mii_send_bits (dev, 0xffffffff, 32);
1111 mii_send_bits (dev, cmd, 32);
1113 mii_getbit (dev);
1116 static int mdio_read(struct net_device *dev, int reg)
1118 struct netdev_private *np = netdev_priv(dev);
1119 void __iomem *ioaddr = ns_ioaddr(dev);
1125 if (dev->if_port == PORT_TP)
1128 return miiport_read(dev, np->phy_addr_external, reg);
1131 static void mdio_write(struct net_device *dev, int reg, u16 data)
1133 struct netdev_private *np = netdev_priv(dev);
1134 void __iomem *ioaddr = ns_ioaddr(dev);
1137 if (dev->if_port == PORT_TP)
1140 miiport_write(dev, np->phy_addr_external, reg, data);
1143 static void init_phy_fixup(struct net_device *dev)
1145 struct netdev_private *np = netdev_priv(dev);
1146 void __iomem *ioaddr = ns_ioaddr(dev);
1152 tmp = mdio_read(dev, MII_BMCR);
1156 np->advertising != mdio_read(dev, MII_ADVERTISE))
1160 mdio_write(dev, MII_ADVERTISE, np->advertising);
1177 mdio_write(dev, MII_BMCR, tmp);
1182 np->mii = (mdio_read(dev, MII_PHYSID1) << 16)
1183 + mdio_read(dev, MII_PHYSID2);
1189 tmp = mdio_read(dev, MII_MCTRL);
1191 if (dev->if_port == PORT_FIBRE)
1195 mdio_write(dev, MII_MCTRL, tmp);
1240 dev->name, i*10);
1244 dev->name, i*10);
1256 static int switch_port_external(struct net_device *dev)
1258 struct netdev_private *np = netdev_priv(dev);
1259 void __iomem *ioaddr = ns_ioaddr(dev);
1268 dev->name);
1283 move_int_phy(dev, np->phy_addr_external);
1284 init_phy_fixup(dev);
1289 static int switch_port_internal(struct net_device *dev)
1291 struct netdev_private *np = netdev_priv(dev);
1292 void __iomem *ioaddr = ns_ioaddr(dev);
1303 dev->name);
1325 dev->name, i*10);
1328 init_phy_fixup(dev);
1341 static int find_mii(struct net_device *dev)
1343 struct netdev_private *np = netdev_priv(dev);
1349 did_switch = switch_port_external(dev);
1358 move_int_phy(dev, i);
1359 tmp = miiport_read(dev, i, MII_BMSR);
1362 np->mii = (mdio_read(dev, MII_PHYSID1) << 16)
1363 + mdio_read(dev, MII_PHYSID2);
1373 switch_port_internal(dev);
1384 static void natsemi_reset(struct net_device *dev)
1392 struct netdev_private *np = netdev_priv(dev);
1393 void __iomem *ioaddr = ns_ioaddr(dev);
1429 dev->name, i*5);
1432 dev->name, i*5);
1438 if (dev->if_port == PORT_TP)
1461 static void reset_rx(struct net_device *dev)
1464 struct netdev_private *np = netdev_priv(dev);
1465 void __iomem *ioaddr = ns_ioaddr(dev);
1479 dev->name, i*15);
1482 dev->name, i*15);
1486 static void natsemi_reload_eeprom(struct net_device *dev)
1488 struct netdev_private *np = netdev_priv(dev);
1489 void __iomem *ioaddr = ns_ioaddr(dev);
1507 static void natsemi_stop_rxtx(struct net_device *dev)
1509 void __iomem * ioaddr = ns_ioaddr(dev);
1510 struct netdev_private *np = netdev_priv(dev);
1521 dev->name, i*5);
1524 dev->name, i*5);
1528 static int netdev_open(struct net_device *dev)
1530 struct netdev_private *np = netdev_priv(dev);
1531 void __iomem * ioaddr = ns_ioaddr(dev);
1536 natsemi_reset(dev);
1538 i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
1543 dev->name, irq);
1544 i = alloc_ring(dev);
1546 free_irq(irq, dev);
1551 init_ring(dev);
1553 init_registers(dev);
1554 /* now set the MAC address according to dev->dev_addr */
1556 u16 mac = (dev->dev_addr[2*i+1]<<8) + dev->dev_addr[2*i];
1564 netif_start_queue(dev);
1568 dev->name, (int)readl(ioaddr + ChipCmd));
1578 static void do_cable_magic(struct net_device *dev)
1580 struct netdev_private *np = netdev_priv(dev);
1581 void __iomem *ioaddr = ns_ioaddr(dev);
1583 if (dev->if_port != PORT_TP)
1609 np = netdev_priv(dev);
1622 static void undo_cable_magic(struct net_device *dev)
1625 struct netdev_private *np = netdev_priv(dev);
1626 void __iomem * ioaddr = ns_ioaddr(dev);
1628 if (dev->if_port != PORT_TP)
1642 static void check_link(struct net_device *dev)
1644 struct netdev_private *np = netdev_priv(dev);
1645 void __iomem * ioaddr = ns_ioaddr(dev);
1657 mdio_read(dev, MII_BMSR);
1658 bmsr = mdio_read(dev, MII_BMSR);
1661 if (netif_carrier_ok(dev)) {
1664 dev->name);
1665 netif_carrier_off(dev);
1666 undo_cable_magic(dev);
1670 if (!netif_carrier_ok(dev)) {
1672 printk(KERN_NOTICE "%s: link up.\n", dev->name);
1673 netif_carrier_on(dev);
1674 do_cable_magic(dev);
1681 np->advertising & mdio_read(dev, MII_LPA));
1684 } else if (mdio_read(dev, MII_BMCR) & BMCR_FULLDPLX)
1694 "link capability.\n", dev->name,
1708 static void init_registers(struct net_device *dev)
1710 struct netdev_private *np = netdev_priv(dev);
1711 void __iomem * ioaddr = ns_ioaddr(dev);
1713 init_phy_fixup(dev);
1760 dev->name, readl(ioaddr + WOLCmd));
1763 check_link(dev);
1764 __set_rx_mode(dev);
1768 natsemi_irq_enable(dev);
1790 struct net_device *dev = np->dev;
1791 void __iomem * ioaddr = ns_ioaddr(dev);
1800 dev->name);
1803 if (dev->if_port == PORT_TP) {
1812 if (!netif_queue_stopped(dev)) {
1816 "re-initializing\n", dev->name);
1819 natsemi_stop_rxtx(dev);
1820 dump_ring(dev);
1821 reinit_ring(dev);
1822 init_registers(dev);
1832 check_link(dev);
1837 check_link(dev);
1843 refill_rx(dev);
1858 static void dump_ring(struct net_device *dev)
1860 struct netdev_private *np = netdev_priv(dev);
1881 static void ns_tx_timeout(struct net_device *dev, unsigned int txqueue)
1883 struct netdev_private *np = netdev_priv(dev);
1884 void __iomem * ioaddr = ns_ioaddr(dev);
1894 dev->name, readl(ioaddr + IntrStatus));
1895 dump_ring(dev);
1897 natsemi_reset(dev);
1898 reinit_ring(dev);
1899 init_registers(dev);
1903 dev->name);
1908 netif_trans_update(dev); /* prevent tx timeout */
1909 dev->stats.tx_errors++;
1910 netif_wake_queue(dev);
1913 static int alloc_ring(struct net_device *dev)
1915 struct netdev_private *np = netdev_priv(dev);
1916 np->rx_ring = dma_alloc_coherent(&np->pci_dev->dev,
1925 static void refill_rx(struct net_device *dev)
1927 struct netdev_private *np = netdev_priv(dev);
1935 skb = netdev_alloc_skb(dev, buflen);
1939 np->rx_dma[entry] = dma_map_single(&np->pci_dev->dev,
1942 if (dma_mapping_error(&np->pci_dev->dev, np->rx_dma[entry])) {
1953 printk(KERN_WARNING "%s: going OOM.\n", dev->name);
1958 static void set_bufsize(struct net_device *dev)
1960 struct netdev_private *np = netdev_priv(dev);
1961 if (dev->mtu <= ETH_DATA_LEN)
1964 np->rx_buf_sz = dev->mtu + NATSEMI_HEADERS;
1967 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1968 static void init_ring(struct net_device *dev)
1970 struct netdev_private *np = netdev_priv(dev);
1987 set_bufsize(dev);
2002 refill_rx(dev);
2003 dump_ring(dev);
2006 static void drain_tx(struct net_device *dev)
2008 struct netdev_private *np = netdev_priv(dev);
2013 dma_unmap_single(&np->pci_dev->dev, np->tx_dma[i],
2016 dev->stats.tx_dropped++;
2022 static void drain_rx(struct net_device *dev)
2024 struct netdev_private *np = netdev_priv(dev);
2033 dma_unmap_single(&np->pci_dev->dev, np->rx_dma[i],
2042 static void drain_ring(struct net_device *dev)
2044 drain_rx(dev);
2045 drain_tx(dev);
2048 static void free_ring(struct net_device *dev)
2050 struct netdev_private *np = netdev_priv(dev);
2051 dma_free_coherent(&np->pci_dev->dev,
2056 static void reinit_rx(struct net_device *dev)
2058 struct netdev_private *np = netdev_priv(dev);
2069 refill_rx(dev);
2072 static void reinit_ring(struct net_device *dev)
2074 struct netdev_private *np = netdev_priv(dev);
2078 drain_tx(dev);
2083 reinit_rx(dev);
2086 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
2088 struct netdev_private *np = netdev_priv(dev);
2089 void __iomem * ioaddr = ns_ioaddr(dev);
2100 np->tx_dma[entry] = dma_map_single(&np->pci_dev->dev, skb->data,
2102 if (dma_mapping_error(&np->pci_dev->dev, np->tx_dma[entry])) {
2105 dev->stats.tx_dropped++;
2120 netdev_tx_done(dev);
2122 netif_stop_queue(dev);
2128 dev->stats.tx_dropped++;
2134 dev->name, np->cur_tx, entry);
2139 static void netdev_tx_done(struct net_device *dev)
2141 struct netdev_private *np = netdev_priv(dev);
2150 dev->name, np->dirty_tx,
2153 dev->stats.tx_packets++;
2154 dev->stats.tx_bytes += np->tx_skbuff[entry]->len;
2159 dev->stats.tx_aborted_errors++;
2161 dev->stats.tx_fifo_errors++;
2163 dev->stats.tx_carrier_errors++;
2165 dev->stats.tx_window_errors++;
2166 dev->stats.tx_errors++;
2168 dma_unmap_single(&np->pci_dev->dev, np->tx_dma[entry],
2174 if (netif_queue_stopped(dev) &&
2177 netif_wake_queue(dev);
2185 struct net_device *dev = dev_instance;
2186 struct netdev_private *np = netdev_priv(dev);
2187 void __iomem * ioaddr = ns_ioaddr(dev);
2203 dev->name, np->intr_status,
2210 natsemi_irq_disable(dev);
2215 dev->name, np->intr_status,
2227 struct net_device *dev = np->dev;
2228 void __iomem * ioaddr = ns_ioaddr(dev);
2235 dev->name, np->intr_status,
2243 netdev_rx(dev, &work_done, budget);
2249 netdev_tx_done(dev);
2255 netdev_error(dev, np->intr_status);
2269 natsemi_irq_enable(dev);
2277 static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do)
2279 struct netdev_private *np = netdev_priv(dev);
2284 void __iomem * ioaddr = ns_ioaddr(dev);
2311 "status %#08x.\n", dev->name,
2313 dev->stats.rx_length_errors++;
2321 reset_rx(dev);
2322 reinit_rx(dev);
2324 check_link(dev);
2333 dev->stats.rx_errors++;
2335 dev->stats.rx_over_errors++;
2337 dev->stats.rx_length_errors++;
2339 dev->stats.rx_frame_errors++;
2341 dev->stats.rx_crc_errors++;
2354 (skb = netdev_alloc_skb(dev, pkt_len + RX_OFFSET)) != NULL) {
2357 dma_sync_single_for_cpu(&np->pci_dev->dev,
2364 dma_sync_single_for_device(&np->pci_dev->dev,
2369 dma_unmap_single(&np->pci_dev->dev,
2376 skb->protocol = eth_type_trans(skb, dev);
2378 dev->stats.rx_packets++;
2379 dev->stats.rx_bytes += pkt_len;
2385 refill_rx(dev);
2394 static void netdev_error(struct net_device *dev, int intr_status)
2396 struct netdev_private *np = netdev_priv(dev);
2397 void __iomem * ioaddr = ns_ioaddr(dev);
2401 u16 lpa = mdio_read(dev, MII_LPA);
2402 if (mdio_read(dev, MII_BMCR) & BMCR_ANENABLE &&
2406 " %#04x partner %#04x.\n", dev->name,
2412 check_link(dev);
2415 __get_stats(dev);
2423 dev->name, np->tx_config);
2428 dev->name, np->tx_config);
2435 dev->name, wol_status);
2440 dev->name);
2442 dev->stats.rx_fifo_errors++;
2443 dev->stats.rx_errors++;
2447 printk(KERN_NOTICE "%s: PCI error %#08x\n", dev->name,
2449 dev->stats.tx_fifo_errors++;
2450 dev->stats.tx_errors++;
2451 dev->stats.rx_fifo_errors++;
2452 dev->stats.rx_errors++;
2457 static void __get_stats(struct net_device *dev)
2459 void __iomem * ioaddr = ns_ioaddr(dev);
2462 dev->stats.rx_crc_errors += readl(ioaddr + RxCRCErrs);
2463 dev->stats.rx_missed_errors += readl(ioaddr + RxMissed);
2466 static struct net_device_stats *get_stats(struct net_device *dev)
2468 struct netdev_private *np = netdev_priv(dev);
2472 if (netif_running(dev) && !np->hands_off)
2473 __get_stats(dev);
2476 return &dev->stats;
2480 static void natsemi_poll_controller(struct net_device *dev)
2482 struct netdev_private *np = netdev_priv(dev);
2486 intr_handler(irq, dev);
2492 static void __set_rx_mode(struct net_device *dev)
2494 void __iomem * ioaddr = ns_ioaddr(dev);
2495 struct netdev_private *np = netdev_priv(dev);
2499 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
2502 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
2503 (dev->flags & IFF_ALLMULTI)) {
2511 netdev_for_each_mc_addr(ha, dev) {
2527 static int natsemi_change_mtu(struct net_device *dev, int new_mtu)
2529 dev->mtu = new_mtu;
2532 if (netif_running(dev)) {
2533 struct netdev_private *np = netdev_priv(dev);
2534 void __iomem * ioaddr = ns_ioaddr(dev);
2540 natsemi_stop_rxtx(dev);
2542 drain_rx(dev);
2544 set_bufsize(dev);
2545 reinit_rx(dev);
2555 static void set_rx_mode(struct net_device *dev)
2557 struct netdev_private *np = netdev_priv(dev);
2560 __set_rx_mode(dev);
2564 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2566 struct netdev_private *np = netdev_priv(dev);
2572 static int get_regs_len(struct net_device *dev)
2577 static int get_eeprom_len(struct net_device *dev)
2579 struct netdev_private *np = netdev_priv(dev);
2583 static int get_link_ksettings(struct net_device *dev,
2586 struct netdev_private *np = netdev_priv(dev);
2588 netdev_get_ecmd(dev, ecmd);
2593 static int set_link_ksettings(struct net_device *dev,
2596 struct netdev_private *np = netdev_priv(dev);
2599 res = netdev_set_ecmd(dev, ecmd);
2604 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2606 struct netdev_private *np = netdev_priv(dev);
2608 netdev_get_wol(dev, &wol->supported, &wol->wolopts);
2609 netdev_get_sopass(dev, wol->sopass);
2613 static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2615 struct netdev_private *np = netdev_priv(dev);
2618 netdev_set_wol(dev, wol->wolopts);
2619 res = netdev_set_sopass(dev, wol->sopass);
2624 static void get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
2626 struct netdev_private *np = netdev_priv(dev);
2629 netdev_get_regs(dev, buf);
2633 static u32 get_msglevel(struct net_device *dev)
2635 struct netdev_private *np = netdev_priv(dev);
2639 static void set_msglevel(struct net_device *dev, u32 val)
2641 struct netdev_private *np = netdev_priv(dev);
2645 static int nway_reset(struct net_device *dev)
2650 tmp = mdio_read(dev, MII_BMCR);
2653 mdio_write(dev, MII_BMCR, tmp);
2659 static u32 get_link(struct net_device *dev)
2662 mdio_read(dev, MII_BMSR);
2663 return (mdio_read(dev, MII_BMSR)&BMSR_LSTATUS) ? 1:0;
2666 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
2668 struct netdev_private *np = netdev_priv(dev);
2678 res = netdev_get_eeprom(dev, eebuf);
2702 static int netdev_set_wol(struct net_device *dev, u32 newval)
2704 struct netdev_private *np = netdev_priv(dev);
2705 void __iomem * ioaddr = ns_ioaddr(dev);
2732 static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur)
2734 struct netdev_private *np = netdev_priv(dev);
2735 void __iomem * ioaddr = ns_ioaddr(dev);
2768 static int netdev_set_sopass(struct net_device *dev, u8 *newval)
2770 struct netdev_private *np = netdev_priv(dev);
2771 void __iomem * ioaddr = ns_ioaddr(dev);
2800 static int netdev_get_sopass(struct net_device *dev, u8 *data)
2802 struct netdev_private *np = netdev_priv(dev);
2803 void __iomem * ioaddr = ns_ioaddr(dev);
2829 static int netdev_get_ecmd(struct net_device *dev,
2832 struct netdev_private *np = netdev_priv(dev);
2836 ecmd->base.port = dev->if_port;
2891 np->advertising & mdio_read(dev, MII_LPA));
2912 static int netdev_set_ecmd(struct net_device *dev,
2915 struct netdev_private *np = netdev_priv(dev);
2968 dev->if_port = ecmd->base.port;
2992 switch_port_internal(dev);
2994 switch_port_external(dev);
2997 init_phy_fixup(dev);
2998 check_link(dev);
3002 static int netdev_get_regs(struct net_device *dev, u8 *buf)
3008 void __iomem * ioaddr = ns_ioaddr(dev);
3017 rbuf[i] = mdio_read(dev, i & 0x1f);
3039 dev->name, rbuf[4] & rbuf[5]);
3054 static int netdev_get_eeprom(struct net_device *dev, u8 *buf)
3058 void __iomem * ioaddr = ns_ioaddr(dev);
3059 struct netdev_private *np = netdev_priv(dev);
3072 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3075 struct netdev_private *np = netdev_priv(dev);
3087 if (dev->if_port == PORT_TP) {
3089 data->val_out = mdio_read(dev,
3094 move_int_phy(dev, data->phy_id & 0x1f);
3095 data->val_out = miiport_read(dev, data->phy_id & 0x1f,
3101 if (dev->if_port == PORT_TP) {
3105 mdio_write(dev, data->reg_num & 0x1f,
3113 move_int_phy(dev, data->phy_id & 0x1f);
3114 miiport_write(dev, data->phy_id & 0x1f,
3124 static void enable_wol_mode(struct net_device *dev, int enable_intr)
3126 void __iomem * ioaddr = ns_ioaddr(dev);
3127 struct netdev_private *np = netdev_priv(dev);
3131 dev->name);
3153 natsemi_irq_enable(dev);
3157 static int netdev_close(struct net_device *dev)
3159 void __iomem * ioaddr = ns_ioaddr(dev);
3160 struct netdev_private *np = netdev_priv(dev);
3166 dev->name, (int)readl(ioaddr + ChipCmd));
3170 dev->name, np->cur_tx, np->dirty_tx,
3185 natsemi_irq_disable(dev);
3190 free_irq(irq, dev);
3205 natsemi_stop_rxtx(dev);
3207 __get_stats(dev);
3211 netif_carrier_off(dev);
3212 netif_stop_queue(dev);
3214 dump_ring(dev);
3215 drain_ring(dev);
3216 free_ring(dev);
3224 enable_wol_mode(dev, 0);
3236 struct net_device *dev = pci_get_drvdata(pdev);
3237 void __iomem * ioaddr = ns_ioaddr(dev);
3240 unregister_netdev (dev);
3242 free_netdev (dev);
3273 struct net_device *dev = dev_get_drvdata(dev_d);
3274 struct netdev_private *np = netdev_priv(dev);
3275 void __iomem * ioaddr = ns_ioaddr(dev);
3278 if (netif_running (dev)) {
3286 natsemi_irq_disable(dev);
3288 natsemi_stop_rxtx(dev);
3289 netif_stop_queue(dev);
3297 __get_stats(dev);
3300 drain_ring(dev);
3309 enable_wol_mode(dev, 0);
3316 netif_device_detach(dev);
3324 struct net_device *dev = dev_get_drvdata(dev_d);
3325 struct netdev_private *np = netdev_priv(dev);
3328 if (netif_device_present(dev))
3330 if (netif_running(dev)) {
3338 natsemi_reset(dev);
3339 init_ring(dev);
3343 init_registers(dev);
3344 netif_device_attach(dev);
3350 netif_device_attach(dev);