Lines Matching defs:dev
508 struct net_device *dev;
804 mvneta_get_stats64(struct net_device *dev,
807 struct mvneta_port *pp = netdev_priv(dev);
839 stats->tx_dropped = dev->stats.tx_dropped;
1072 dev_warn(pp->dev->dev.parent,
1145 netdev_info(pp->dev, "fail to configure mbus window to BM\n");
1157 struct device_node *dn = pdev->dev.of_node;
1169 netdev_info(pp->dev, "missing long pool id\n");
1176 MVNETA_RX_PKT_SIZE(pp->dev->mtu));
1178 netdev_info(pp->dev, "fail to obtain long pool for port\n");
1196 netdev_info(pp->dev, "fail to obtain short pool for port\n");
1248 netdev_info(pp->dev, "fail to update MTU, fall back to software BM\n");
1295 netdev_warn(pp->dev,
1318 netdev_warn(pp->dev,
1334 netdev_warn(pp->dev,
1826 netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
1830 netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
1834 netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
1838 netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
1847 if ((pp->dev->features & NETIF_F_RXCSUM) &&
1889 dma_unmap_single(pp->dev->dev.parent,
1916 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
2095 struct device *dev = pp->dev->dev.parent;
2124 dma_addr = dma_map_single(dev, data, len,
2126 if (dma_mapping_error(dev, dma_addr)) {
2140 dma_sync_single_for_device(dev, dma_addr, len,
2165 dma_unmap_single(dev, tx_desc->buf_phys_addr,
2189 nq = netdev_get_tx_queue(pp->dev, txq->id);
2212 mvneta_xdp_xmit(struct net_device *dev, int num_frame,
2215 struct mvneta_port *pp = netdev_priv(dev);
2230 nq = netdev_get_tx_queue(pp->dev, txq->id);
2279 err = xdp_do_redirect(pp->dev, xdp, prog);
2295 bpf_warn_invalid_xdp_action(pp->dev, prog, act);
2298 trace_xdp_exception(pp->dev, prog, act);
2322 struct net_device *dev = pp->dev;
2335 dma_sync_single_for_cpu(dev->dev.parent,
2356 struct net_device *dev = pp->dev;
2368 dma_sync_single_for_cpu(dev->dev.parent,
2430 struct net_device *dev = pp->dev;
2513 skb->protocol = eth_type_trans(skb, dev);
2542 struct net_device *dev = pp->dev;
2588 skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
2592 dma_sync_single_range_for_cpu(&pp->bm_priv->pdev->dev,
2600 skb->protocol = eth_type_trans(skb, dev);
2620 netdev_err(dev, "Linux processing - Can't refill\n");
2637 dma_unmap_single(&pp->bm_priv->pdev->dev, phys_addr,
2649 skb->protocol = eth_type_trans(skb, dev);
2673 struct device *dev = pp->dev->dev.parent;
2678 dma_free_coherent(dev, MVNETA_TSO_PAGE_SIZE,
2689 struct device *dev = pp->dev->dev.parent;
2694 txq->tso_hdrs[i] = dma_alloc_coherent(dev, MVNETA_TSO_PAGE_SIZE,
2742 mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
2751 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data,
2753 if (unlikely(dma_mapping_error(dev->dev.parent,
2790 dma_unmap_single(pp->dev->dev.parent,
2803 static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
2808 struct mvneta_port *pp = netdev_priv(dev);
2840 if (mvneta_tso_put_data(dev, txq, skb,
2878 dma_map_single(pp->dev->dev.parent, addr,
2881 if (dma_mapping_error(pp->dev->dev.parent,
2911 static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
2913 struct mvneta_port *pp = netdev_priv(dev);
2922 if (!netif_running(dev))
2926 frags = mvneta_tx_tso(skb, dev, txq);
2939 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
2942 if (unlikely(dma_mapping_error(dev->dev.parent,
2964 dma_unmap_single(dev->dev.parent,
2976 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
2996 dev->stats.tx_dropped++;
3009 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
3032 nq = netdev_get_tx_queue(pp->dev, txq->id);
3152 netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n",
3159 netdev_info(pp->dev,
3200 static void mvneta_set_rx_mode(struct net_device *dev)
3202 struct mvneta_port *pp = netdev_priv(dev);
3205 if (dev->flags & IFF_PROMISC) {
3215 mvneta_mac_addr_set(pp, dev->dev_addr, pp->rxq_def);
3217 if (dev->flags & IFF_ALLMULTI) {
3226 if (!netdev_mc_empty(dev)) {
3227 netdev_for_each_mc_addr(ha, dev) {
3252 disable_percpu_irq(port->pp->dev->irq);
3277 struct mvneta_port *pp = netdev_priv(napi->dev);
3280 if (!netif_running(pp->dev)) {
3334 enable_percpu_irq(pp->dev->irq, 0);
3355 .dev = pp->dev->dev.parent,
3369 err = __xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id, 0,
3403 netdev_err(pp->dev,
3445 rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
3511 dma_free_coherent(pp->dev->dev.parent,
3539 txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
3564 netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id);
3602 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
3608 dma_free_coherent(pp->dev->dev.parent,
3668 netdev_err(pp->dev, "%s: can't create rxq=%d\n",
3686 netdev_err(pp->dev, "%s: can't create txq=%d\n",
3781 netif_tx_start_all_queues(pp->dev);
3792 if (device_may_wakeup(&pp->dev->dev))
3808 netif_carrier_off(pp->dev);
3811 netif_tx_stop_all_queues(pp->dev);
3832 enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE);
3839 disable_percpu_irq(pp->dev->irq);
3843 static int mvneta_change_mtu(struct net_device *dev, int mtu)
3845 struct mvneta_port *pp = netdev_priv(dev);
3850 netdev_info(dev, "Illegal MTU value %d, rounding to %d\n",
3857 netdev_info(dev, "Illegal MTU %d for XDP prog without frags\n",
3863 dev->mtu = mtu;
3865 if (!netif_running(dev)) {
3869 netdev_update_features(dev);
3885 pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu);
3889 netdev_err(dev, "unable to setup rxqs after MTU change\n");
3895 netdev_err(dev, "unable to setup txqs after MTU change\n");
3902 netdev_update_features(dev);
3907 static netdev_features_t mvneta_fix_features(struct net_device *dev,
3910 struct mvneta_port *pp = netdev_priv(dev);
3912 if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) {
3914 netdev_info(dev,
3938 static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
3940 struct mvneta_port *pp = netdev_priv(dev);
3944 ret = eth_prepare_mac_addr_change(dev, addr);
3948 mvneta_mac_addr_set(pp, dev->dev_addr, -1);
3953 eth_commit_mac_addr_change(dev, addr);
4082 struct net_device *ndev = to_net_dev(config->dev);
4091 struct net_device *ndev = to_net_dev(config->dev);
4125 struct net_device *ndev = to_net_dev(config->dev);
4181 struct net_device *ndev = to_net_dev(config->dev);
4223 struct net_device *ndev = to_net_dev(config->dev);
4246 struct net_device *ndev = to_net_dev(config->dev);
4308 netdev_err(pp->dev, "could not attach PHY: %d\n", err);
4311 device_set_wakeup_capable(&pp->dev->dev, !!wol.supported);
4315 device_set_wakeup_enable(&pp->dev->dev, !!wol.wolopts);
4396 netif_tx_stop_all_queues(pp->dev);
4432 netif_tx_start_all_queues(pp->dev);
4473 netif_tx_start_all_queues(pp->dev);
4477 static int mvneta_open(struct net_device *dev)
4479 struct mvneta_port *pp = netdev_priv(dev);
4482 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
4494 ret = request_irq(pp->dev->irq, mvneta_isr, 0,
4495 dev->name, pp);
4497 ret = request_percpu_irq(pp->dev->irq, mvneta_percpu_isr,
4498 dev->name, pp->ports);
4500 netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
4527 netdev_err(dev, "cannot probe MDIO bus\n");
4545 free_irq(pp->dev->irq, pp);
4548 free_percpu_irq(pp->dev->irq, pp->ports);
4558 static int mvneta_stop(struct net_device *dev)
4560 struct mvneta_port *pp = netdev_priv(dev);
4580 free_percpu_irq(dev->irq, pp->ports);
4584 free_irq(dev->irq, pp);
4593 static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4595 struct mvneta_port *pp = netdev_priv(dev);
4600 static int mvneta_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
4603 bool need_update, running = netif_running(dev);
4604 struct mvneta_port *pp = netdev_priv(dev);
4608 dev->mtu > MVNETA_MAX_RX_BUF_SIZE) {
4621 mvneta_stop(dev);
4628 return mvneta_open(dev);
4633 static int mvneta_xdp(struct net_device *dev, struct netdev_bpf *xdp)
4637 return mvneta_xdp_setup(dev, xdp->prog, xdp->extack);
4665 static int mvneta_ethtool_nway_reset(struct net_device *dev)
4667 struct mvneta_port *pp = netdev_priv(dev);
4674 mvneta_ethtool_set_coalesce(struct net_device *dev,
4679 struct mvneta_port *pp = netdev_priv(dev);
4701 mvneta_ethtool_get_coalesce(struct net_device *dev,
4706 struct mvneta_port *pp = netdev_priv(dev);
4716 static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
4723 strscpy(drvinfo->bus_info, dev_name(&dev->dev),
4743 mvneta_ethtool_set_ringparam(struct net_device *dev,
4748 struct mvneta_port *pp = netdev_priv(dev);
4758 netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
4761 if (netif_running(dev)) {
4762 mvneta_stop(dev);
4763 if (mvneta_open(dev)) {
4764 netdev_err(dev,
4773 static void mvneta_ethtool_get_pauseparam(struct net_device *dev,
4776 struct mvneta_port *pp = netdev_priv(dev);
4781 static int mvneta_ethtool_set_pauseparam(struct net_device *dev,
4784 struct mvneta_port *pp = netdev_priv(dev);
4929 static void mvneta_ethtool_get_stats(struct net_device *dev,
4932 struct mvneta_port *pp = netdev_priv(dev);
4944 static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
4948 struct mvneta_port *pp = netdev_priv(dev);
4959 static u32 mvneta_ethtool_get_rxfh_indir_size(struct net_device *dev)
4964 static int mvneta_ethtool_get_rxnfc(struct net_device *dev,
4984 netif_tx_stop_all_queues(pp->dev);
5005 mvneta_set_rx_mode(pp->dev);
5028 netif_tx_start_all_queues(pp->dev);
5033 static int mvneta_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
5036 struct mvneta_port *pp = netdev_priv(dev);
5057 static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
5060 struct mvneta_port *pp = netdev_priv(dev);
5077 static void mvneta_ethtool_get_wol(struct net_device *dev,
5080 struct mvneta_port *pp = netdev_priv(dev);
5085 static int mvneta_ethtool_set_wol(struct net_device *dev,
5088 struct mvneta_port *pp = netdev_priv(dev);
5093 device_set_wakeup_enable(&dev->dev, !!wol->wolopts);
5098 static int mvneta_ethtool_get_eee(struct net_device *dev,
5101 struct mvneta_port *pp = netdev_priv(dev);
5114 static int mvneta_ethtool_set_eee(struct net_device *dev,
5117 struct mvneta_port *pp = netdev_priv(dev);
5217 static int mvneta_setup_mqprio(struct net_device *dev,
5220 struct mvneta_port *pp = netdev_priv(dev);
5236 netdev_reset_tc(dev);
5240 netdev_set_num_tc(dev, mqprio->qopt.num_tc);
5243 netdev_set_tc_queue(dev, tc, mqprio->qopt.count[tc],
5286 static int mvneta_setup_tc(struct net_device *dev, enum tc_setup_type type,
5291 return mvneta_setup_mqprio(dev, type_data);
5340 static int mvneta_init(struct device *dev, struct mvneta_port *pp)
5350 pp->txqs = devm_kcalloc(dev, txq_number, sizeof(*pp->txqs), GFP_KERNEL);
5362 pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*pp->rxqs), GFP_KERNEL);
5374 = devm_kmalloc_array(pp->dev->dev.parent,
5455 struct device_node *dn = pdev->dev.of_node;
5458 struct net_device *dev;
5468 dev = devm_alloc_etherdev_mqs(&pdev->dev, sizeof(struct mvneta_port),
5470 if (!dev)
5473 dev->tx_queue_len = MVNETA_MAX_TXD;
5474 dev->watchdog_timeo = 5 * HZ;
5475 dev->netdev_ops = &mvneta_netdev_ops;
5476 dev->ethtool_ops = &mvneta_eth_tool_ops;
5478 pp = netdev_priv(dev);
5487 dev_err(&pdev->dev, "incorrect phy-mode\n");
5493 comphy = devm_of_phy_get(&pdev->dev, dn, NULL);
5514 dev->irq = irq_of_parse_and_map(dn, 0);
5515 if (dev->irq == 0)
5518 pp->clk = devm_clk_get(&pdev->dev, "core");
5520 pp->clk = devm_clk_get(&pdev->dev, NULL);
5528 pp->clk_bus = devm_clk_get(&pdev->dev, "bus");
5535 pp->phylink_config.dev = &dev->dev;
5566 phylink = phylink_create(&pp->phylink_config, pdev->dev.fwnode,
5589 err = of_get_ethdev_address(dn, dev);
5596 eth_hw_addr_set(dev, hw_mac_addr);
5599 eth_hw_addr_random(dev);
5607 dev_info(&pdev->dev,
5630 pp->dev = dev;
5631 SET_NETDEV_DEV(dev, &pdev->dev);
5642 dev_info(&pdev->dev,
5662 err = mvneta_init(&pdev->dev, pp);
5668 dev_err(&pdev->dev, "can't power up port\n");
5676 netif_napi_add(dev, &pp->napi, mvneta_poll);
5682 netif_napi_add(dev, &port->napi, mvneta_poll);
5687 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5689 dev->hw_features |= dev->features;
5690 dev->vlan_features |= dev->features;
5692 dev->xdp_features = NETDEV_XDP_ACT_BASIC |
5697 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
5698 netif_set_tso_max_segs(dev, MVNETA_MAX_TSO_SEGS);
5701 dev->min_mtu = ETH_MIN_MTU;
5703 dev->max_mtu = 9676;
5705 err = register_netdev(dev);
5707 dev_err(&pdev->dev, "failed to register\n");
5711 netdev_info(dev, "Using %s mac address %pM\n", mac_from,
5712 dev->dev_addr);
5714 platform_set_drvdata(pdev, pp->dev);
5735 irq_dispose_mapping(dev->irq);
5742 struct net_device *dev = platform_get_drvdata(pdev);
5743 struct mvneta_port *pp = netdev_priv(dev);
5745 unregister_netdev(dev);
5750 irq_dispose_mapping(dev->irq);
5767 struct net_device *dev = dev_get_drvdata(device);
5768 struct mvneta_port *pp = netdev_priv(dev);
5770 if (!netif_running(dev))
5801 netif_device_detach(dev);
5811 struct net_device *dev = dev_get_drvdata(device);
5812 struct mvneta_port *pp = netdev_priv(dev);
5823 dev_info(&pdev->dev, "use SW buffer management\n");
5835 netif_device_attach(dev);
5837 if (!netif_running(dev))
5867 mvneta_set_rx_mode(dev);