Lines Matching refs:dev
324 struct net_device *dev;
375 static void __cp_set_rx_mode (struct net_device *dev);
379 static void cp_poll_controller(struct net_device *dev);
381 static int cp_get_eeprom_len(struct net_device *dev);
382 static int cp_get_eeprom(struct net_device *dev,
384 static int cp_set_eeprom(struct net_device *dev,
409 unsigned int mtu = cp->dev->mtu;
423 skb->protocol = eth_type_trans (skb, cp->dev);
425 cp->dev->stats.rx_packets++;
426 cp->dev->stats.rx_bytes += skb->len;
437 netif_dbg(cp, rx_err, cp->dev, "rx err, slot %d status 0x%x len %d\n",
439 cp->dev->stats.rx_errors++;
441 cp->dev->stats.rx_frame_errors++;
443 cp->dev->stats.rx_crc_errors++;
445 cp->dev->stats.rx_length_errors++;
447 cp->dev->stats.rx_length_errors++;
449 cp->dev->stats.rx_fifo_errors++;
466 struct net_device *dev = cp->dev;
497 dev->stats.rx_dropped++;
507 netif_dbg(cp, rx_status, dev, "rx slot %d status 0x%x len %d\n",
512 dev->stats.rx_dropped++;
516 new_mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
518 if (dma_mapping_error(&cp->pdev->dev, new_mapping)) {
519 dev->stats.rx_dropped++;
524 dma_unmap_single(&cp->pdev->dev, mapping,
570 struct net_device *dev = dev_instance;
576 if (unlikely(dev == NULL))
578 cp = netdev_priv(dev);
592 netif_dbg(cp, intr, dev, "intr, status %04x cmd %02x cpcmd %04x\n",
598 if (unlikely(!netif_running(dev))) {
620 netdev_err(dev, "PCI bus error, status=%04x, PCI status=%04x\n",
637 static void cp_poll_controller(struct net_device *dev)
639 struct cp_private *cp = netdev_priv(dev);
643 cp_interrupt(irq, dev);
667 dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
673 netif_dbg(cp, tx_err, cp->dev,
675 cp->dev->stats.tx_errors++;
677 cp->dev->stats.tx_window_errors++;
679 cp->dev->stats.tx_aborted_errors++;
681 cp->dev->stats.tx_carrier_errors++;
683 cp->dev->stats.tx_fifo_errors++;
685 cp->dev->stats.collisions +=
687 cp->dev->stats.tx_packets++;
688 cp->dev->stats.tx_bytes += skb->len;
689 netif_dbg(cp, tx_done, cp->dev,
704 netdev_completed_queue(cp->dev, pkts_compl, bytes_compl);
706 netif_wake_queue(cp->dev);
726 dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
732 struct net_device *dev)
734 struct cp_private *cp = netdev_priv(dev);
745 netif_stop_queue(dev);
747 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
756 netdev_WARN_ONCE(dev, "Net bug: GSO size %d too large for 8139CP\n",
784 mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
785 if (dma_mapping_error(&cp->pdev->dev, mapping))
799 netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
812 first_mapping = dma_map_single(&cp->pdev->dev, skb->data,
814 if (dma_mapping_error(&cp->pdev->dev, first_mapping))
827 mapping = dma_map_single(&cp->pdev->dev,
830 if (dma_mapping_error(&cp->pdev->dev, mapping)) {
864 netif_dbg(cp, tx_queued, cp->dev, "tx queued, slots %d-%d, skblen %d\n",
869 netdev_sent_queue(dev, skb->len);
871 netif_stop_queue(dev);
881 cp->dev->stats.tx_dropped++;
888 static void __cp_set_rx_mode (struct net_device *dev)
890 struct cp_private *cp = netdev_priv(dev);
895 if (dev->flags & IFF_PROMISC) {
901 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
902 (dev->flags & IFF_ALLMULTI)) {
910 netdev_for_each_mc_addr(ha, dev) {
926 static void cp_set_rx_mode (struct net_device *dev)
929 struct cp_private *cp = netdev_priv(dev);
932 __cp_set_rx_mode(dev);
939 cp->dev->stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff);
943 static struct net_device_stats *cp_get_stats(struct net_device *dev)
945 struct cp_private *cp = netdev_priv(dev);
950 if (netif_running(dev) && netif_device_present(dev))
954 return &dev->stats;
968 netdev_reset_queue(cp->dev);
984 netdev_err(cp->dev, "hardware reset timeout\n");
1020 netdev_reset_queue(cp->dev);
1030 struct net_device *dev = cp->dev;
1037 cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
1038 cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
1043 __cp_set_rx_mode(dev);
1060 struct net_device *dev = cp->dev;
1067 skb = netdev_alloc_skb_ip_align(dev, cp->rx_buf_sz);
1071 mapping = dma_map_single(&cp->pdev->dev, skb->data,
1073 if (dma_mapping_error(&cp->pdev->dev, mapping)) {
1115 struct device *d = &cp->pdev->dev;
1141 dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
1152 dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
1157 cp->dev->stats.tx_dropped++;
1160 netdev_reset_queue(cp->dev);
1173 dma_free_coherent(&cp->pdev->dev, CP_RING_BYTES, cp->rx_ring,
1179 static int cp_open (struct net_device *dev)
1181 struct cp_private *cp = netdev_priv(dev);
1185 netif_dbg(cp, ifup, dev, "enabling interface\n");
1195 rc = request_irq(irq, cp_interrupt, IRQF_SHARED, dev->name, dev);
1201 netif_carrier_off(dev);
1203 netif_start_queue(dev);
1214 static int cp_close (struct net_device *dev)
1216 struct cp_private *cp = netdev_priv(dev);
1221 netif_dbg(cp, ifdown, dev, "disabling interface\n");
1225 netif_stop_queue(dev);
1226 netif_carrier_off(dev);
1232 free_irq(cp->pdev->irq, dev);
1238 static void cp_tx_timeout(struct net_device *dev, unsigned int txqueue)
1240 struct cp_private *cp = netdev_priv(dev);
1244 netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n",
1250 netif_dbg(cp, tx_err, cp->dev, "TX ring head %d tail %d desc %x\n",
1253 netif_dbg(cp, tx_err, cp->dev,
1265 __cp_set_rx_mode(dev);
1268 netif_wake_queue(dev);
1274 static int cp_change_mtu(struct net_device *dev, int new_mtu)
1276 struct cp_private *cp = netdev_priv(dev);
1279 if (!netif_running(dev)) {
1280 dev->mtu = new_mtu;
1286 cp_close(dev);
1287 dev->mtu = new_mtu;
1289 return cp_open(dev);
1303 static int mdio_read(struct net_device *dev, int phy_id, int location)
1305 struct cp_private *cp = netdev_priv(dev);
1312 static void mdio_write(struct net_device *dev, int phy_id, int location,
1315 struct cp_private *cp = netdev_priv(dev);
1381 static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1383 struct cp_private *cp = netdev_priv(dev);
1390 static void cp_get_ringparam(struct net_device *dev,
1399 static int cp_get_regs_len(struct net_device *dev)
1404 static int cp_get_sset_count (struct net_device *dev, int sset)
1414 static int cp_get_link_ksettings(struct net_device *dev,
1417 struct cp_private *cp = netdev_priv(dev);
1427 static int cp_set_link_ksettings(struct net_device *dev,
1430 struct cp_private *cp = netdev_priv(dev);
1441 static int cp_nway_reset(struct net_device *dev)
1443 struct cp_private *cp = netdev_priv(dev);
1447 static u32 cp_get_msglevel(struct net_device *dev)
1449 struct cp_private *cp = netdev_priv(dev);
1453 static void cp_set_msglevel(struct net_device *dev, u32 value)
1455 struct cp_private *cp = netdev_priv(dev);
1459 static int cp_set_features(struct net_device *dev, netdev_features_t features)
1461 struct cp_private *cp = netdev_priv(dev);
1464 if (!((dev->features ^ features) & NETIF_F_RXCSUM))
1485 static void cp_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1488 struct cp_private *cp = netdev_priv(dev);
1501 static void cp_get_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1503 struct cp_private *cp = netdev_priv(dev);
1511 static int cp_set_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1513 struct cp_private *cp = netdev_priv(dev);
1524 static void cp_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
1536 static void cp_get_ethtool_stats (struct net_device *dev,
1539 struct cp_private *cp = netdev_priv(dev);
1544 nic_stats = dma_alloc_coherent(&cp->pdev->dev, sizeof(*nic_stats),
1580 dma_free_coherent(&cp->pdev->dev, sizeof(*nic_stats), nic_stats, dma);
1604 static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1606 struct cp_private *cp = netdev_priv(dev);
1610 if (!netif_running(dev))
1619 static int cp_set_mac_address(struct net_device *dev, void *p)
1621 struct cp_private *cp = netdev_priv(dev);
1627 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1632 cpw32_f(MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
1633 cpw32_f(MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
1758 static int cp_get_eeprom_len(struct net_device *dev)
1760 struct cp_private *cp = netdev_priv(dev);
1770 static int cp_get_eeprom(struct net_device *dev,
1773 struct cp_private *cp = netdev_priv(dev);
1808 static int cp_set_eeprom(struct net_device *dev,
1811 struct cp_private *cp = netdev_priv(dev);
1857 struct net_device *dev,
1886 struct net_device *dev;
1897 dev_info(&pdev->dev,
1903 dev = alloc_etherdev(sizeof(struct cp_private));
1904 if (!dev)
1906 SET_NETDEV_DEV(dev, &pdev->dev);
1908 cp = netdev_priv(dev);
1910 cp->dev = dev;
1913 cp->mii_if.dev = dev;
1936 dev_err(&pdev->dev, "no MMIO resource\n");
1941 dev_err(&pdev->dev, "MMIO resource (%llx) too small\n",
1956 dev_err(&pdev->dev,
1962 dev_err(&pdev->dev,
1971 dev->features |= NETIF_F_RXCSUM;
1972 dev->hw_features |= NETIF_F_RXCSUM;
1977 dev_err(&pdev->dev, "Cannot map PCI MMIO (%Lx@%Lx)\n",
1989 ((__le16 *) (dev->dev_addr))[i] =
1992 dev->netdev_ops = &cp_netdev_ops;
1993 netif_napi_add(dev, &cp->napi, cp_rx_poll, 16);
1994 dev->ethtool_ops = &cp_ethtool_ops;
1995 dev->watchdog_timeo = TX_TIMEOUT;
1997 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
2001 dev->features |= NETIF_F_HIGHDMA;
2003 dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
2005 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
2009 dev->min_mtu = CP_MIN_MTU;
2010 dev->max_mtu = CP_MAX_MTU;
2012 rc = register_netdev(dev);
2016 netdev_info(dev, "RTL-8139C+ at 0x%p, %pM, IRQ %d\n",
2017 regs, dev->dev_addr, pdev->irq);
2019 pci_set_drvdata(pdev, dev);
2038 free_netdev(dev);
2044 struct net_device *dev = pci_get_drvdata(pdev);
2045 struct cp_private *cp = netdev_priv(dev);
2047 unregister_netdev(dev);
2054 free_netdev(dev);
2059 struct net_device *dev = dev_get_drvdata(device);
2060 struct cp_private *cp = netdev_priv(dev);
2063 if (!netif_running(dev))
2066 netif_device_detach (dev);
2067 netif_stop_queue (dev);
2084 struct net_device *dev = dev_get_drvdata(device);
2085 struct cp_private *cp = netdev_priv(dev);
2088 if (!netif_running(dev))
2091 netif_device_attach (dev);
2097 netif_start_queue (dev);