Lines Matching refs:mhi_netdev
49 struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
52 schedule_delayed_work(&mhi_netdev->rx_refill, 0);
64 struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
68 cancel_delayed_work_sync(&mhi_netdev->rx_refill);
75 struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
76 struct mhi_device *mdev = mhi_netdev->mdev;
93 u64_stats_update_begin(&mhi_netdev->stats.tx_syncp);
94 u64_stats_inc(&mhi_netdev->stats.tx_dropped);
95 u64_stats_update_end(&mhi_netdev->stats.tx_syncp);
103 struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
107 start = u64_stats_fetch_begin(&mhi_netdev->stats.rx_syncp);
108 stats->rx_packets = u64_stats_read(&mhi_netdev->stats.rx_packets);
109 stats->rx_bytes = u64_stats_read(&mhi_netdev->stats.rx_bytes);
110 stats->rx_errors = u64_stats_read(&mhi_netdev->stats.rx_errors);
111 } while (u64_stats_fetch_retry(&mhi_netdev->stats.rx_syncp, start));
114 start = u64_stats_fetch_begin(&mhi_netdev->stats.tx_syncp);
115 stats->tx_packets = u64_stats_read(&mhi_netdev->stats.tx_packets);
116 stats->tx_bytes = u64_stats_read(&mhi_netdev->stats.tx_bytes);
117 stats->tx_errors = u64_stats_read(&mhi_netdev->stats.tx_errors);
118 stats->tx_dropped = u64_stats_read(&mhi_netdev->stats.tx_dropped);
119 } while (u64_stats_fetch_retry(&mhi_netdev->stats.tx_syncp, start));
143 static struct sk_buff *mhi_net_skb_agg(struct mhi_net_dev *mhi_netdev,
146 struct sk_buff *head = mhi_netdev->skbagg_head;
147 struct sk_buff *tail = mhi_netdev->skbagg_tail;
151 mhi_netdev->skbagg_head = skb;
164 mhi_netdev->skbagg_tail = skb;
166 return mhi_netdev->skbagg_head;
172 struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
187 netdev_warn_once(mhi_netdev->ndev,
190 mhi_net_skb_agg(mhi_netdev, skb);
199 u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
200 u64_stats_inc(&mhi_netdev->stats.rx_errors);
201 u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
206 if (mhi_netdev->skbagg_head) {
208 skb = mhi_net_skb_agg(mhi_netdev, skb);
209 mhi_netdev->skbagg_head = NULL;
224 u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
225 u64_stats_inc(&mhi_netdev->stats.rx_packets);
226 u64_stats_add(&mhi_netdev->stats.rx_bytes, skb->len);
227 u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
232 if (free_desc_count >= mhi_netdev->rx_queue_sz / 2)
233 schedule_delayed_work(&mhi_netdev->rx_refill, 0);
239 struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
240 struct net_device *ndev = mhi_netdev->ndev;
241 struct mhi_device *mdev = mhi_netdev->mdev;
249 u64_stats_update_begin(&mhi_netdev->stats.tx_syncp);
253 u64_stats_update_end(&mhi_netdev->stats.tx_syncp);
257 u64_stats_inc(&mhi_netdev->stats.tx_errors);
259 u64_stats_inc(&mhi_netdev->stats.tx_packets);
260 u64_stats_add(&mhi_netdev->stats.tx_bytes, mhi_res->bytes_xferd);
262 u64_stats_update_end(&mhi_netdev->stats.tx_syncp);
270 struct mhi_net_dev *mhi_netdev = container_of(work, struct mhi_net_dev,
272 struct net_device *ndev = mhi_netdev->ndev;
273 struct mhi_device *mdev = mhi_netdev->mdev;
278 size = mhi_netdev->mru ? mhi_netdev->mru : READ_ONCE(ndev->mtu);
300 if (mhi_get_free_desc_count(mdev, DMA_FROM_DEVICE) == mhi_netdev->rx_queue_sz)
301 schedule_delayed_work(&mhi_netdev->rx_refill, HZ / 2);
306 struct mhi_net_dev *mhi_netdev;
309 mhi_netdev = netdev_priv(ndev);
311 dev_set_drvdata(&mhi_dev->dev, mhi_netdev);
312 mhi_netdev->ndev = ndev;
313 mhi_netdev->mdev = mhi_dev;
314 mhi_netdev->skbagg_head = NULL;
315 mhi_netdev->mru = mhi_dev->mhi_cntrl->mru;
317 INIT_DELAYED_WORK(&mhi_netdev->rx_refill, mhi_net_rx_refill_work);
318 u64_stats_init(&mhi_netdev->stats.rx_syncp);
319 u64_stats_init(&mhi_netdev->stats.tx_syncp);
327 mhi_netdev->rx_queue_sz = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
338 struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
344 kfree_skb(mhi_netdev->skbagg_head);
376 struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
378 mhi_net_dellink(mhi_dev, mhi_netdev->ndev);