Lines Matching defs:dev

284 static int mv643xx_eth_open(struct net_device *dev);
285 static int mv643xx_eth_stop(struct net_device *dev);
372 struct net_device *dev;
496 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
509 struct net_device_stats *stats = &mp->dev->stats;
533 dma_unmap_single(mp->dev->dev.parent, rx_desc->buf_ptr,
570 skb->protocol = eth_type_trans(skb, mp->dev);
582 netdev_err(mp->dev,
610 skb = netdev_alloc_skb(mp->dev, mp->skb_size);
630 rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent,
727 txq_put_data_tso(struct net_device *dev, struct tx_queue *txq,
753 desc->buf_ptr = dma_map_single(dev->dev.parent, data,
755 if (unlikely(dma_mapping_error(dev->dev.parent,
819 struct net_device *dev)
830 netdev_dbg(dev, "not enough descriptors for TSO!\n");
859 ret = txq_put_data_tso(dev, txq, skb, tso.data, size,
923 desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent,
930 struct net_device *dev)
945 netdev_err(dev, "tx queue full?!\n");
970 desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data,
993 static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
995 struct mv643xx_eth_private *mp = netdev_priv(dev);
1002 nq = netdev_get_tx_queue(dev, queue);
1005 netdev_printk(KERN_DEBUG, dev,
1013 ret = txq_submit_tso(txq, skb, dev);
1015 ret = txq_submit_skb(txq, skb, dev);
1035 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
1060 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
1094 dma_unmap_page(mp->dev->dev.parent,
1099 dma_unmap_single(mp->dev->dev.parent,
1113 netdev_info(mp->dev, "tx error\n");
1114 mp->dev->stats.tx_errors++;
1143 mtu = (mp->dev->mtu + 255) >> 8;
1211 static void mv643xx_eth_adjust_link(struct net_device *dev)
1213 struct mv643xx_eth_private *mp = netdev_priv(dev);
1220 if (dev->phydev->autoneg == AUTONEG_ENABLE) {
1228 if (dev->phydev->speed == SPEED_1000) {
1237 if (dev->phydev->speed == SPEED_100)
1242 if (dev->phydev->duplex == DUPLEX_FULL)
1252 static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev)
1254 struct mv643xx_eth_private *mp = netdev_priv(dev);
1255 struct net_device_stats *stats = &dev->stats;
1486 struct net_device *dev = mp->dev;
1488 phy_ethtool_ksettings_get(dev->phydev, cmd);
1541 mv643xx_eth_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1545 if (dev->phydev)
1546 phy_ethtool_get_wol(dev->phydev, wol);
1550 mv643xx_eth_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1554 if (!dev->phydev)
1557 err = phy_ethtool_set_wol(dev->phydev, wol);
1562 netdev_info(dev, "The PHY does not support set_wol, was CONFIG_MARVELL_PHY enabled?\n");
1567 mv643xx_eth_get_link_ksettings(struct net_device *dev,
1570 struct mv643xx_eth_private *mp = netdev_priv(dev);
1572 if (dev->phydev)
1579 mv643xx_eth_set_link_ksettings(struct net_device *dev,
1586 if (!dev->phydev)
1598 ret = phy_ethtool_ksettings_set(dev->phydev, &c);
1600 mv643xx_eth_adjust_link(dev);
1604 static void mv643xx_eth_get_drvinfo(struct net_device *dev,
1615 static int mv643xx_eth_get_coalesce(struct net_device *dev,
1620 struct mv643xx_eth_private *mp = netdev_priv(dev);
1628 static int mv643xx_eth_set_coalesce(struct net_device *dev,
1633 struct mv643xx_eth_private *mp = netdev_priv(dev);
1642 mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er,
1646 struct mv643xx_eth_private *mp = netdev_priv(dev);
1656 mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er,
1660 struct mv643xx_eth_private *mp = netdev_priv(dev);
1669 netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
1672 if (netif_running(dev)) {
1673 mv643xx_eth_stop(dev);
1674 if (mv643xx_eth_open(dev)) {
1675 netdev_err(dev,
1686 mv643xx_eth_set_features(struct net_device *dev, netdev_features_t features)
1688 struct mv643xx_eth_private *mp = netdev_priv(dev);
1696 static void mv643xx_eth_get_strings(struct net_device *dev,
1710 static void mv643xx_eth_get_ethtool_stats(struct net_device *dev,
1714 struct mv643xx_eth_private *mp = netdev_priv(dev);
1717 mv643xx_eth_get_stats(dev);
1727 p = ((void *)mp->dev) + stat->netdev_off;
1736 static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset)
1785 static u32 uc_addr_filter_mask(struct net_device *dev)
1790 if (dev->flags & IFF_PROMISC)
1793 nibbles = 1 << (dev->dev_addr[5] & 0x0f);
1794 netdev_for_each_uc_addr(ha, dev) {
1795 if (memcmp(dev->dev_addr, ha->addr, 5))
1797 if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0)
1806 static void mv643xx_eth_program_unicast_filter(struct net_device *dev)
1808 struct mv643xx_eth_private *mp = netdev_priv(dev);
1813 uc_addr_set(mp, dev->dev_addr);
1817 nibbles = uc_addr_filter_mask(dev);
1862 static void mv643xx_eth_program_multicast_filter(struct net_device *dev)
1864 struct mv643xx_eth_private *mp = netdev_priv(dev);
1870 if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI))
1879 netdev_for_each_mc_addr(ha, dev) {
1914 static void mv643xx_eth_set_rx_mode(struct net_device *dev)
1916 mv643xx_eth_program_unicast_filter(dev);
1917 mv643xx_eth_program_multicast_filter(dev);
1920 static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr)
1927 eth_hw_addr_set(dev, sa->sa_data);
1929 netif_addr_lock_bh(dev);
1930 mv643xx_eth_program_unicast_filter(dev);
1931 netif_addr_unlock_bh(dev);
1960 rxq->rx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
1966 netdev_err(mp->dev,
1997 dma_free_coherent(mp->dev->dev.parent, size,
2020 netdev_err(mp->dev, "error freeing rx ring -- %d skbs stuck\n",
2028 dma_free_coherent(mp->dev->dev.parent, rxq->rx_desc_area_size,
2064 txq->tx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
2070 netdev_err(mp->dev,
2100 txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent,
2117 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
2135 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
2140 dma_free_coherent(mp->dev->dev.parent,
2182 struct net_device *dev = (struct net_device *)dev_id;
2183 struct mv643xx_eth_private *mp = netdev_priv(dev);
2196 struct net_device *dev = mp->dev;
2204 if (netif_carrier_ok(dev)) {
2207 netdev_info(dev, "link down\n");
2209 netif_carrier_off(dev);
2238 netdev_info(dev, "link up, %d Mb/s, %s duplex, flow control %sabled\n",
2241 if (!netif_carrier_ok(dev))
2242 netif_carrier_on(dev);
2320 struct net_device *dev = mp->dev;
2327 if (dev->phydev) {
2330 mv643xx_eth_get_link_ksettings(dev, &cmd);
2331 phy_init_hw(dev->phydev);
2333 dev, (const struct ethtool_link_ksettings *)&cmd);
2334 phy_start(dev->phydev);
2346 if (!dev->phydev)
2367 mv643xx_eth_set_features(mp->dev, mp->dev->features);
2377 mv643xx_eth_program_unicast_filter(mp->dev);
2404 skb_size = mp->dev->mtu + 36;
2422 static int mv643xx_eth_open(struct net_device *dev)
2424 struct mv643xx_eth_private *mp = netdev_priv(dev);
2432 err = request_irq(dev->irq, mv643xx_eth_irq,
2433 IRQF_SHARED, dev->name, dev);
2435 netdev_err(dev, "can't assign irq\n");
2486 free_irq(dev->irq, dev);
2517 static int mv643xx_eth_stop(struct net_device *dev)
2519 struct mv643xx_eth_private *mp = netdev_priv(dev);
2530 netif_carrier_off(dev);
2531 if (dev->phydev)
2532 phy_stop(dev->phydev);
2533 free_irq(dev->irq, dev);
2536 mv643xx_eth_get_stats(dev);
2548 static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2552 if (!dev->phydev)
2555 ret = phy_mii_ioctl(dev->phydev, ifr, cmd);
2557 mv643xx_eth_adjust_link(dev);
2561 static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
2563 struct mv643xx_eth_private *mp = netdev_priv(dev);
2565 dev->mtu = new_mtu;
2569 if (!netif_running(dev))
2578 mv643xx_eth_stop(dev);
2579 if (mv643xx_eth_open(dev)) {
2580 netdev_err(dev,
2592 if (netif_running(mp->dev)) {
2593 netif_tx_stop_all_queues(mp->dev);
2596 netif_tx_wake_all_queues(mp->dev);
2600 static void mv643xx_eth_tx_timeout(struct net_device *dev, unsigned int txqueue)
2602 struct mv643xx_eth_private *mp = netdev_priv(dev);
2604 netdev_info(dev, "tx timeout\n");
2610 static void mv643xx_eth_netpoll(struct net_device *dev)
2612 struct mv643xx_eth_private *mp = netdev_priv(dev);
2617 mv643xx_eth_irq(dev->irq, dev);
2733 dev_err(&pdev->dev, "missing interrupt on %pOFn\n", pnp);
2738 dev_err(&pdev->dev, "missing reg property on %pOFn\n", pnp);
2743 dev_err(&pdev->dev, "invalid reg property on %pOFn\n", pnp);
2751 dev_err(&pdev->dev, "too many ports registered\n");
2778 ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
2779 ppdev->dev.of_node = pnp;
2805 struct device_node *pnp, *np = pdev->dev.of_node;
2812 pd = devm_kzalloc(&pdev->dev, sizeof(*pd), GFP_KERNEL);
2815 pdev->dev.platform_data = pd;
2858 msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL);
2863 msp->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
2867 msp->clk = devm_clk_get(&pdev->dev, NULL);
2881 pd = dev_get_platdata(&pdev->dev);
2937 struct net_device *dev = mp->dev;
2941 eth_hw_addr_set(dev, pd->mac_addr);
2946 eth_hw_addr_set(dev, addr);
2964 netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
2975 struct device *dev = mp->dev->dev.parent;
2979 if (dev->of_node)
2980 err = of_get_phy_mode(dev->of_node, &iface);
2985 if (!dev->of_node || err)
3015 phydev = phy_connect(mp->dev, phy_id, mv643xx_eth_adjust_link,
3028 struct net_device *dev = mp->dev;
3029 struct phy_device *phy = dev->phydev;
3049 struct net_device *dev = mp->dev;
3059 if (!dev->phydev) {
3097 struct net_device *dev;
3102 pd = dev_get_platdata(&pdev->dev);
3104 dev_err(&pdev->dev, "no mv643xx_eth_platform_data\n");
3109 dev_err(&pdev->dev, "no mv643xx_eth_platform_data->shared\n");
3113 dev = alloc_etherdev_mq(sizeof(struct mv643xx_eth_private), 8);
3114 if (!dev)
3117 SET_NETDEV_DEV(dev, &pdev->dev);
3118 mp = netdev_priv(dev);
3125 mp->dev = dev;
3127 if (of_device_is_compatible(pdev->dev.of_node,
3172 mp->clk = devm_clk_get(&pdev->dev, NULL);
3181 netif_set_real_num_tx_queues(dev, mp->txq_count);
3182 netif_set_real_num_rx_queues(dev, mp->rxq_count);
3186 phydev = of_phy_connect(mp->dev, pd->phy_node,
3208 dev->ethtool_ops = &mv643xx_eth_ethtool_ops;
3222 netif_napi_add(dev, &mp->napi, mv643xx_eth_poll);
3232 dev->irq = irq;
3234 dev->netdev_ops = &mv643xx_eth_netdev_ops;
3236 dev->watchdog_timeo = 2 * HZ;
3237 dev->base_addr = 0;
3239 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
3240 dev->vlan_features = dev->features;
3242 dev->features |= NETIF_F_RXCSUM;
3243 dev->hw_features = dev->features;
3245 dev->priv_flags |= IFF_UNICAST_FLT;
3246 netif_set_tso_max_segs(dev, MV643XX_MAX_TSO_SEGS);
3249 dev->min_mtu = 64;
3250 dev->max_mtu = 9500;
3255 netif_carrier_off(dev);
3262 err = register_netdev(dev);
3266 netdev_notice(dev, "port %d with MAC address %pM\n",
3267 mp->port_num, dev->dev_addr);
3270 netdev_notice(dev, "configured with sram\n");
3277 free_netdev(dev);
3285 struct net_device *dev = mp->dev;
3287 unregister_netdev(mp->dev);
3288 if (dev->phydev)
3289 phy_disconnect(dev->phydev);
3295 free_netdev(mp->dev);
3308 if (netif_running(mp->dev))