162306a36Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-or-later
262306a36Sopenharmony_ci/* MHI Network driver - Network over MHI bus
362306a36Sopenharmony_ci *
462306a36Sopenharmony_ci * Copyright (C) 2020 Linaro Ltd <loic.poulain@linaro.org>
562306a36Sopenharmony_ci */
662306a36Sopenharmony_ci
762306a36Sopenharmony_ci#include <linux/if_arp.h>
862306a36Sopenharmony_ci#include <linux/mhi.h>
962306a36Sopenharmony_ci#include <linux/mod_devicetable.h>
1062306a36Sopenharmony_ci#include <linux/module.h>
1162306a36Sopenharmony_ci#include <linux/netdevice.h>
1262306a36Sopenharmony_ci#include <linux/skbuff.h>
1362306a36Sopenharmony_ci#include <linux/u64_stats_sync.h>
1462306a36Sopenharmony_ci
1562306a36Sopenharmony_ci#define MHI_NET_MIN_MTU		ETH_MIN_MTU
1662306a36Sopenharmony_ci#define MHI_NET_MAX_MTU		0xffff
1762306a36Sopenharmony_ci#define MHI_NET_DEFAULT_MTU	0x4000
1862306a36Sopenharmony_ci
1962306a36Sopenharmony_cistruct mhi_net_stats {
2062306a36Sopenharmony_ci	u64_stats_t rx_packets;
2162306a36Sopenharmony_ci	u64_stats_t rx_bytes;
2262306a36Sopenharmony_ci	u64_stats_t rx_errors;
2362306a36Sopenharmony_ci	u64_stats_t tx_packets;
2462306a36Sopenharmony_ci	u64_stats_t tx_bytes;
2562306a36Sopenharmony_ci	u64_stats_t tx_errors;
2662306a36Sopenharmony_ci	u64_stats_t tx_dropped;
2762306a36Sopenharmony_ci	struct u64_stats_sync tx_syncp;
2862306a36Sopenharmony_ci	struct u64_stats_sync rx_syncp;
2962306a36Sopenharmony_ci};
3062306a36Sopenharmony_ci
3162306a36Sopenharmony_cistruct mhi_net_dev {
3262306a36Sopenharmony_ci	struct mhi_device *mdev;
3362306a36Sopenharmony_ci	struct net_device *ndev;
3462306a36Sopenharmony_ci	struct sk_buff *skbagg_head;
3562306a36Sopenharmony_ci	struct sk_buff *skbagg_tail;
3662306a36Sopenharmony_ci	struct delayed_work rx_refill;
3762306a36Sopenharmony_ci	struct mhi_net_stats stats;
3862306a36Sopenharmony_ci	u32 rx_queue_sz;
3962306a36Sopenharmony_ci	int msg_enable;
4062306a36Sopenharmony_ci	unsigned int mru;
4162306a36Sopenharmony_ci};
4262306a36Sopenharmony_ci
4362306a36Sopenharmony_cistruct mhi_device_info {
4462306a36Sopenharmony_ci	const char *netname;
4562306a36Sopenharmony_ci};
4662306a36Sopenharmony_ci
4762306a36Sopenharmony_cistatic int mhi_ndo_open(struct net_device *ndev)
4862306a36Sopenharmony_ci{
4962306a36Sopenharmony_ci	struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
5062306a36Sopenharmony_ci
5162306a36Sopenharmony_ci	/* Feed the rx buffer pool */
5262306a36Sopenharmony_ci	schedule_delayed_work(&mhi_netdev->rx_refill, 0);
5362306a36Sopenharmony_ci
5462306a36Sopenharmony_ci	/* Carrier is established via out-of-band channel (e.g. qmi) */
5562306a36Sopenharmony_ci	netif_carrier_on(ndev);
5662306a36Sopenharmony_ci
5762306a36Sopenharmony_ci	netif_start_queue(ndev);
5862306a36Sopenharmony_ci
5962306a36Sopenharmony_ci	return 0;
6062306a36Sopenharmony_ci}
6162306a36Sopenharmony_ci
6262306a36Sopenharmony_cistatic int mhi_ndo_stop(struct net_device *ndev)
6362306a36Sopenharmony_ci{
6462306a36Sopenharmony_ci	struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
6562306a36Sopenharmony_ci
6662306a36Sopenharmony_ci	netif_stop_queue(ndev);
6762306a36Sopenharmony_ci	netif_carrier_off(ndev);
6862306a36Sopenharmony_ci	cancel_delayed_work_sync(&mhi_netdev->rx_refill);
6962306a36Sopenharmony_ci
7062306a36Sopenharmony_ci	return 0;
7162306a36Sopenharmony_ci}
7262306a36Sopenharmony_ci
7362306a36Sopenharmony_cistatic netdev_tx_t mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev)
7462306a36Sopenharmony_ci{
7562306a36Sopenharmony_ci	struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
7662306a36Sopenharmony_ci	struct mhi_device *mdev = mhi_netdev->mdev;
7762306a36Sopenharmony_ci	int err;
7862306a36Sopenharmony_ci
7962306a36Sopenharmony_ci	err = mhi_queue_skb(mdev, DMA_TO_DEVICE, skb, skb->len, MHI_EOT);
8062306a36Sopenharmony_ci	if (unlikely(err)) {
8162306a36Sopenharmony_ci		net_err_ratelimited("%s: Failed to queue TX buf (%d)\n",
8262306a36Sopenharmony_ci				    ndev->name, err);
8362306a36Sopenharmony_ci		dev_kfree_skb_any(skb);
8462306a36Sopenharmony_ci		goto exit_drop;
8562306a36Sopenharmony_ci	}
8662306a36Sopenharmony_ci
8762306a36Sopenharmony_ci	if (mhi_queue_is_full(mdev, DMA_TO_DEVICE))
8862306a36Sopenharmony_ci		netif_stop_queue(ndev);
8962306a36Sopenharmony_ci
9062306a36Sopenharmony_ci	return NETDEV_TX_OK;
9162306a36Sopenharmony_ci
9262306a36Sopenharmony_ciexit_drop:
9362306a36Sopenharmony_ci	u64_stats_update_begin(&mhi_netdev->stats.tx_syncp);
9462306a36Sopenharmony_ci	u64_stats_inc(&mhi_netdev->stats.tx_dropped);
9562306a36Sopenharmony_ci	u64_stats_update_end(&mhi_netdev->stats.tx_syncp);
9662306a36Sopenharmony_ci
9762306a36Sopenharmony_ci	return NETDEV_TX_OK;
9862306a36Sopenharmony_ci}
9962306a36Sopenharmony_ci
10062306a36Sopenharmony_cistatic void mhi_ndo_get_stats64(struct net_device *ndev,
10162306a36Sopenharmony_ci				struct rtnl_link_stats64 *stats)
10262306a36Sopenharmony_ci{
10362306a36Sopenharmony_ci	struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
10462306a36Sopenharmony_ci	unsigned int start;
10562306a36Sopenharmony_ci
10662306a36Sopenharmony_ci	do {
10762306a36Sopenharmony_ci		start = u64_stats_fetch_begin(&mhi_netdev->stats.rx_syncp);
10862306a36Sopenharmony_ci		stats->rx_packets = u64_stats_read(&mhi_netdev->stats.rx_packets);
10962306a36Sopenharmony_ci		stats->rx_bytes = u64_stats_read(&mhi_netdev->stats.rx_bytes);
11062306a36Sopenharmony_ci		stats->rx_errors = u64_stats_read(&mhi_netdev->stats.rx_errors);
11162306a36Sopenharmony_ci	} while (u64_stats_fetch_retry(&mhi_netdev->stats.rx_syncp, start));
11262306a36Sopenharmony_ci
11362306a36Sopenharmony_ci	do {
11462306a36Sopenharmony_ci		start = u64_stats_fetch_begin(&mhi_netdev->stats.tx_syncp);
11562306a36Sopenharmony_ci		stats->tx_packets = u64_stats_read(&mhi_netdev->stats.tx_packets);
11662306a36Sopenharmony_ci		stats->tx_bytes = u64_stats_read(&mhi_netdev->stats.tx_bytes);
11762306a36Sopenharmony_ci		stats->tx_errors = u64_stats_read(&mhi_netdev->stats.tx_errors);
11862306a36Sopenharmony_ci		stats->tx_dropped = u64_stats_read(&mhi_netdev->stats.tx_dropped);
11962306a36Sopenharmony_ci	} while (u64_stats_fetch_retry(&mhi_netdev->stats.tx_syncp, start));
12062306a36Sopenharmony_ci}
12162306a36Sopenharmony_ci
12262306a36Sopenharmony_cistatic const struct net_device_ops mhi_netdev_ops = {
12362306a36Sopenharmony_ci	.ndo_open               = mhi_ndo_open,
12462306a36Sopenharmony_ci	.ndo_stop               = mhi_ndo_stop,
12562306a36Sopenharmony_ci	.ndo_start_xmit         = mhi_ndo_xmit,
12662306a36Sopenharmony_ci	.ndo_get_stats64	= mhi_ndo_get_stats64,
12762306a36Sopenharmony_ci};
12862306a36Sopenharmony_ci
12962306a36Sopenharmony_cistatic void mhi_net_setup(struct net_device *ndev)
13062306a36Sopenharmony_ci{
13162306a36Sopenharmony_ci	ndev->header_ops = NULL;  /* No header */
13262306a36Sopenharmony_ci	ndev->type = ARPHRD_RAWIP;
13362306a36Sopenharmony_ci	ndev->hard_header_len = 0;
13462306a36Sopenharmony_ci	ndev->addr_len = 0;
13562306a36Sopenharmony_ci	ndev->flags = IFF_POINTOPOINT | IFF_NOARP;
13662306a36Sopenharmony_ci	ndev->netdev_ops = &mhi_netdev_ops;
13762306a36Sopenharmony_ci	ndev->mtu = MHI_NET_DEFAULT_MTU;
13862306a36Sopenharmony_ci	ndev->min_mtu = MHI_NET_MIN_MTU;
13962306a36Sopenharmony_ci	ndev->max_mtu = MHI_NET_MAX_MTU;
14062306a36Sopenharmony_ci	ndev->tx_queue_len = 1000;
14162306a36Sopenharmony_ci}
14262306a36Sopenharmony_ci
14362306a36Sopenharmony_cistatic struct sk_buff *mhi_net_skb_agg(struct mhi_net_dev *mhi_netdev,
14462306a36Sopenharmony_ci				       struct sk_buff *skb)
14562306a36Sopenharmony_ci{
14662306a36Sopenharmony_ci	struct sk_buff *head = mhi_netdev->skbagg_head;
14762306a36Sopenharmony_ci	struct sk_buff *tail = mhi_netdev->skbagg_tail;
14862306a36Sopenharmony_ci
14962306a36Sopenharmony_ci	/* This is non-paged skb chaining using frag_list */
15062306a36Sopenharmony_ci	if (!head) {
15162306a36Sopenharmony_ci		mhi_netdev->skbagg_head = skb;
15262306a36Sopenharmony_ci		return skb;
15362306a36Sopenharmony_ci	}
15462306a36Sopenharmony_ci
15562306a36Sopenharmony_ci	if (!skb_shinfo(head)->frag_list)
15662306a36Sopenharmony_ci		skb_shinfo(head)->frag_list = skb;
15762306a36Sopenharmony_ci	else
15862306a36Sopenharmony_ci		tail->next = skb;
15962306a36Sopenharmony_ci
16062306a36Sopenharmony_ci	head->len += skb->len;
16162306a36Sopenharmony_ci	head->data_len += skb->len;
16262306a36Sopenharmony_ci	head->truesize += skb->truesize;
16362306a36Sopenharmony_ci
16462306a36Sopenharmony_ci	mhi_netdev->skbagg_tail = skb;
16562306a36Sopenharmony_ci
16662306a36Sopenharmony_ci	return mhi_netdev->skbagg_head;
16762306a36Sopenharmony_ci}
16862306a36Sopenharmony_ci
16962306a36Sopenharmony_cistatic void mhi_net_dl_callback(struct mhi_device *mhi_dev,
17062306a36Sopenharmony_ci				struct mhi_result *mhi_res)
17162306a36Sopenharmony_ci{
17262306a36Sopenharmony_ci	struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
17362306a36Sopenharmony_ci	struct sk_buff *skb = mhi_res->buf_addr;
17462306a36Sopenharmony_ci	int free_desc_count;
17562306a36Sopenharmony_ci
17662306a36Sopenharmony_ci	free_desc_count = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
17762306a36Sopenharmony_ci
17862306a36Sopenharmony_ci	if (unlikely(mhi_res->transaction_status)) {
17962306a36Sopenharmony_ci		switch (mhi_res->transaction_status) {
18062306a36Sopenharmony_ci		case -EOVERFLOW:
18162306a36Sopenharmony_ci			/* Packet can not fit in one MHI buffer and has been
18262306a36Sopenharmony_ci			 * split over multiple MHI transfers, do re-aggregation.
18362306a36Sopenharmony_ci			 * That usually means the device side MTU is larger than
18462306a36Sopenharmony_ci			 * the host side MTU/MRU. Since this is not optimal,
18562306a36Sopenharmony_ci			 * print a warning (once).
18662306a36Sopenharmony_ci			 */
18762306a36Sopenharmony_ci			netdev_warn_once(mhi_netdev->ndev,
18862306a36Sopenharmony_ci					 "Fragmented packets received, fix MTU?\n");
18962306a36Sopenharmony_ci			skb_put(skb, mhi_res->bytes_xferd);
19062306a36Sopenharmony_ci			mhi_net_skb_agg(mhi_netdev, skb);
19162306a36Sopenharmony_ci			break;
19262306a36Sopenharmony_ci		case -ENOTCONN:
19362306a36Sopenharmony_ci			/* MHI layer stopping/resetting the DL channel */
19462306a36Sopenharmony_ci			dev_kfree_skb_any(skb);
19562306a36Sopenharmony_ci			return;
19662306a36Sopenharmony_ci		default:
19762306a36Sopenharmony_ci			/* Unknown error, simply drop */
19862306a36Sopenharmony_ci			dev_kfree_skb_any(skb);
19962306a36Sopenharmony_ci			u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
20062306a36Sopenharmony_ci			u64_stats_inc(&mhi_netdev->stats.rx_errors);
20162306a36Sopenharmony_ci			u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
20262306a36Sopenharmony_ci		}
20362306a36Sopenharmony_ci	} else {
20462306a36Sopenharmony_ci		skb_put(skb, mhi_res->bytes_xferd);
20562306a36Sopenharmony_ci
20662306a36Sopenharmony_ci		if (mhi_netdev->skbagg_head) {
20762306a36Sopenharmony_ci			/* Aggregate the final fragment */
20862306a36Sopenharmony_ci			skb = mhi_net_skb_agg(mhi_netdev, skb);
20962306a36Sopenharmony_ci			mhi_netdev->skbagg_head = NULL;
21062306a36Sopenharmony_ci		}
21162306a36Sopenharmony_ci
21262306a36Sopenharmony_ci		switch (skb->data[0] & 0xf0) {
21362306a36Sopenharmony_ci		case 0x40:
21462306a36Sopenharmony_ci			skb->protocol = htons(ETH_P_IP);
21562306a36Sopenharmony_ci			break;
21662306a36Sopenharmony_ci		case 0x60:
21762306a36Sopenharmony_ci			skb->protocol = htons(ETH_P_IPV6);
21862306a36Sopenharmony_ci			break;
21962306a36Sopenharmony_ci		default:
22062306a36Sopenharmony_ci			skb->protocol = htons(ETH_P_MAP);
22162306a36Sopenharmony_ci			break;
22262306a36Sopenharmony_ci		}
22362306a36Sopenharmony_ci
22462306a36Sopenharmony_ci		u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
22562306a36Sopenharmony_ci		u64_stats_inc(&mhi_netdev->stats.rx_packets);
22662306a36Sopenharmony_ci		u64_stats_add(&mhi_netdev->stats.rx_bytes, skb->len);
22762306a36Sopenharmony_ci		u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
22862306a36Sopenharmony_ci		__netif_rx(skb);
22962306a36Sopenharmony_ci	}
23062306a36Sopenharmony_ci
23162306a36Sopenharmony_ci	/* Refill if RX buffers queue becomes low */
23262306a36Sopenharmony_ci	if (free_desc_count >= mhi_netdev->rx_queue_sz / 2)
23362306a36Sopenharmony_ci		schedule_delayed_work(&mhi_netdev->rx_refill, 0);
23462306a36Sopenharmony_ci}
23562306a36Sopenharmony_ci
23662306a36Sopenharmony_cistatic void mhi_net_ul_callback(struct mhi_device *mhi_dev,
23762306a36Sopenharmony_ci				struct mhi_result *mhi_res)
23862306a36Sopenharmony_ci{
23962306a36Sopenharmony_ci	struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
24062306a36Sopenharmony_ci	struct net_device *ndev = mhi_netdev->ndev;
24162306a36Sopenharmony_ci	struct mhi_device *mdev = mhi_netdev->mdev;
24262306a36Sopenharmony_ci	struct sk_buff *skb = mhi_res->buf_addr;
24362306a36Sopenharmony_ci
24462306a36Sopenharmony_ci	/* Hardware has consumed the buffer, so free the skb (which is not
24562306a36Sopenharmony_ci	 * freed by the MHI stack) and perform accounting.
24662306a36Sopenharmony_ci	 */
24762306a36Sopenharmony_ci	dev_consume_skb_any(skb);
24862306a36Sopenharmony_ci
24962306a36Sopenharmony_ci	u64_stats_update_begin(&mhi_netdev->stats.tx_syncp);
25062306a36Sopenharmony_ci	if (unlikely(mhi_res->transaction_status)) {
25162306a36Sopenharmony_ci		/* MHI layer stopping/resetting the UL channel */
25262306a36Sopenharmony_ci		if (mhi_res->transaction_status == -ENOTCONN) {
25362306a36Sopenharmony_ci			u64_stats_update_end(&mhi_netdev->stats.tx_syncp);
25462306a36Sopenharmony_ci			return;
25562306a36Sopenharmony_ci		}
25662306a36Sopenharmony_ci
25762306a36Sopenharmony_ci		u64_stats_inc(&mhi_netdev->stats.tx_errors);
25862306a36Sopenharmony_ci	} else {
25962306a36Sopenharmony_ci		u64_stats_inc(&mhi_netdev->stats.tx_packets);
26062306a36Sopenharmony_ci		u64_stats_add(&mhi_netdev->stats.tx_bytes, mhi_res->bytes_xferd);
26162306a36Sopenharmony_ci	}
26262306a36Sopenharmony_ci	u64_stats_update_end(&mhi_netdev->stats.tx_syncp);
26362306a36Sopenharmony_ci
26462306a36Sopenharmony_ci	if (netif_queue_stopped(ndev) && !mhi_queue_is_full(mdev, DMA_TO_DEVICE))
26562306a36Sopenharmony_ci		netif_wake_queue(ndev);
26662306a36Sopenharmony_ci}
26762306a36Sopenharmony_ci
26862306a36Sopenharmony_cistatic void mhi_net_rx_refill_work(struct work_struct *work)
26962306a36Sopenharmony_ci{
27062306a36Sopenharmony_ci	struct mhi_net_dev *mhi_netdev = container_of(work, struct mhi_net_dev,
27162306a36Sopenharmony_ci						      rx_refill.work);
27262306a36Sopenharmony_ci	struct net_device *ndev = mhi_netdev->ndev;
27362306a36Sopenharmony_ci	struct mhi_device *mdev = mhi_netdev->mdev;
27462306a36Sopenharmony_ci	struct sk_buff *skb;
27562306a36Sopenharmony_ci	unsigned int size;
27662306a36Sopenharmony_ci	int err;
27762306a36Sopenharmony_ci
27862306a36Sopenharmony_ci	size = mhi_netdev->mru ? mhi_netdev->mru : READ_ONCE(ndev->mtu);
27962306a36Sopenharmony_ci
28062306a36Sopenharmony_ci	while (!mhi_queue_is_full(mdev, DMA_FROM_DEVICE)) {
28162306a36Sopenharmony_ci		skb = netdev_alloc_skb(ndev, size);
28262306a36Sopenharmony_ci		if (unlikely(!skb))
28362306a36Sopenharmony_ci			break;
28462306a36Sopenharmony_ci
28562306a36Sopenharmony_ci		err = mhi_queue_skb(mdev, DMA_FROM_DEVICE, skb, size, MHI_EOT);
28662306a36Sopenharmony_ci		if (unlikely(err)) {
28762306a36Sopenharmony_ci			net_err_ratelimited("%s: Failed to queue RX buf (%d)\n",
28862306a36Sopenharmony_ci					    ndev->name, err);
28962306a36Sopenharmony_ci			kfree_skb(skb);
29062306a36Sopenharmony_ci			break;
29162306a36Sopenharmony_ci		}
29262306a36Sopenharmony_ci
29362306a36Sopenharmony_ci		/* Do not hog the CPU if rx buffers are consumed faster than
29462306a36Sopenharmony_ci		 * queued (unlikely).
29562306a36Sopenharmony_ci		 */
29662306a36Sopenharmony_ci		cond_resched();
29762306a36Sopenharmony_ci	}
29862306a36Sopenharmony_ci
29962306a36Sopenharmony_ci	/* If we're still starved of rx buffers, reschedule later */
30062306a36Sopenharmony_ci	if (mhi_get_free_desc_count(mdev, DMA_FROM_DEVICE) == mhi_netdev->rx_queue_sz)
30162306a36Sopenharmony_ci		schedule_delayed_work(&mhi_netdev->rx_refill, HZ / 2);
30262306a36Sopenharmony_ci}
30362306a36Sopenharmony_ci
30462306a36Sopenharmony_cistatic int mhi_net_newlink(struct mhi_device *mhi_dev, struct net_device *ndev)
30562306a36Sopenharmony_ci{
30662306a36Sopenharmony_ci	struct mhi_net_dev *mhi_netdev;
30762306a36Sopenharmony_ci	int err;
30862306a36Sopenharmony_ci
30962306a36Sopenharmony_ci	mhi_netdev = netdev_priv(ndev);
31062306a36Sopenharmony_ci
31162306a36Sopenharmony_ci	dev_set_drvdata(&mhi_dev->dev, mhi_netdev);
31262306a36Sopenharmony_ci	mhi_netdev->ndev = ndev;
31362306a36Sopenharmony_ci	mhi_netdev->mdev = mhi_dev;
31462306a36Sopenharmony_ci	mhi_netdev->skbagg_head = NULL;
31562306a36Sopenharmony_ci	mhi_netdev->mru = mhi_dev->mhi_cntrl->mru;
31662306a36Sopenharmony_ci
31762306a36Sopenharmony_ci	INIT_DELAYED_WORK(&mhi_netdev->rx_refill, mhi_net_rx_refill_work);
31862306a36Sopenharmony_ci	u64_stats_init(&mhi_netdev->stats.rx_syncp);
31962306a36Sopenharmony_ci	u64_stats_init(&mhi_netdev->stats.tx_syncp);
32062306a36Sopenharmony_ci
32162306a36Sopenharmony_ci	/* Start MHI channels */
32262306a36Sopenharmony_ci	err = mhi_prepare_for_transfer(mhi_dev);
32362306a36Sopenharmony_ci	if (err)
32462306a36Sopenharmony_ci		return err;
32562306a36Sopenharmony_ci
32662306a36Sopenharmony_ci	/* Number of transfer descriptors determines size of the queue */
32762306a36Sopenharmony_ci	mhi_netdev->rx_queue_sz = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
32862306a36Sopenharmony_ci
32962306a36Sopenharmony_ci	err = register_netdev(ndev);
33062306a36Sopenharmony_ci	if (err)
33162306a36Sopenharmony_ci		return err;
33262306a36Sopenharmony_ci
33362306a36Sopenharmony_ci	return 0;
33462306a36Sopenharmony_ci}
33562306a36Sopenharmony_ci
33662306a36Sopenharmony_cistatic void mhi_net_dellink(struct mhi_device *mhi_dev, struct net_device *ndev)
33762306a36Sopenharmony_ci{
33862306a36Sopenharmony_ci	struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
33962306a36Sopenharmony_ci
34062306a36Sopenharmony_ci	unregister_netdev(ndev);
34162306a36Sopenharmony_ci
34262306a36Sopenharmony_ci	mhi_unprepare_from_transfer(mhi_dev);
34362306a36Sopenharmony_ci
34462306a36Sopenharmony_ci	kfree_skb(mhi_netdev->skbagg_head);
34562306a36Sopenharmony_ci
34662306a36Sopenharmony_ci	free_netdev(ndev);
34762306a36Sopenharmony_ci
34862306a36Sopenharmony_ci	dev_set_drvdata(&mhi_dev->dev, NULL);
34962306a36Sopenharmony_ci}
35062306a36Sopenharmony_ci
35162306a36Sopenharmony_cistatic int mhi_net_probe(struct mhi_device *mhi_dev,
35262306a36Sopenharmony_ci			 const struct mhi_device_id *id)
35362306a36Sopenharmony_ci{
35462306a36Sopenharmony_ci	const struct mhi_device_info *info = (struct mhi_device_info *)id->driver_data;
35562306a36Sopenharmony_ci	struct net_device *ndev;
35662306a36Sopenharmony_ci	int err;
35762306a36Sopenharmony_ci
35862306a36Sopenharmony_ci	ndev = alloc_netdev(sizeof(struct mhi_net_dev), info->netname,
35962306a36Sopenharmony_ci			    NET_NAME_PREDICTABLE, mhi_net_setup);
36062306a36Sopenharmony_ci	if (!ndev)
36162306a36Sopenharmony_ci		return -ENOMEM;
36262306a36Sopenharmony_ci
36362306a36Sopenharmony_ci	SET_NETDEV_DEV(ndev, &mhi_dev->dev);
36462306a36Sopenharmony_ci
36562306a36Sopenharmony_ci	err = mhi_net_newlink(mhi_dev, ndev);
36662306a36Sopenharmony_ci	if (err) {
36762306a36Sopenharmony_ci		free_netdev(ndev);
36862306a36Sopenharmony_ci		return err;
36962306a36Sopenharmony_ci	}
37062306a36Sopenharmony_ci
37162306a36Sopenharmony_ci	return 0;
37262306a36Sopenharmony_ci}
37362306a36Sopenharmony_ci
37462306a36Sopenharmony_cistatic void mhi_net_remove(struct mhi_device *mhi_dev)
37562306a36Sopenharmony_ci{
37662306a36Sopenharmony_ci	struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
37762306a36Sopenharmony_ci
37862306a36Sopenharmony_ci	mhi_net_dellink(mhi_dev, mhi_netdev->ndev);
37962306a36Sopenharmony_ci}
38062306a36Sopenharmony_ci
38162306a36Sopenharmony_cistatic const struct mhi_device_info mhi_hwip0 = {
38262306a36Sopenharmony_ci	.netname = "mhi_hwip%d",
38362306a36Sopenharmony_ci};
38462306a36Sopenharmony_ci
38562306a36Sopenharmony_cistatic const struct mhi_device_info mhi_swip0 = {
38662306a36Sopenharmony_ci	.netname = "mhi_swip%d",
38762306a36Sopenharmony_ci};
38862306a36Sopenharmony_ci
38962306a36Sopenharmony_cistatic const struct mhi_device_id mhi_net_id_table[] = {
39062306a36Sopenharmony_ci	/* Hardware accelerated data PATH (to modem IPA), protocol agnostic */
39162306a36Sopenharmony_ci	{ .chan = "IP_HW0", .driver_data = (kernel_ulong_t)&mhi_hwip0 },
39262306a36Sopenharmony_ci	/* Software data PATH (to modem CPU) */
39362306a36Sopenharmony_ci	{ .chan = "IP_SW0", .driver_data = (kernel_ulong_t)&mhi_swip0 },
39462306a36Sopenharmony_ci	{}
39562306a36Sopenharmony_ci};
39662306a36Sopenharmony_ciMODULE_DEVICE_TABLE(mhi, mhi_net_id_table);
39762306a36Sopenharmony_ci
39862306a36Sopenharmony_cistatic struct mhi_driver mhi_net_driver = {
39962306a36Sopenharmony_ci	.probe = mhi_net_probe,
40062306a36Sopenharmony_ci	.remove = mhi_net_remove,
40162306a36Sopenharmony_ci	.dl_xfer_cb = mhi_net_dl_callback,
40262306a36Sopenharmony_ci	.ul_xfer_cb = mhi_net_ul_callback,
40362306a36Sopenharmony_ci	.id_table = mhi_net_id_table,
40462306a36Sopenharmony_ci	.driver = {
40562306a36Sopenharmony_ci		.name = "mhi_net",
40662306a36Sopenharmony_ci	},
40762306a36Sopenharmony_ci};
40862306a36Sopenharmony_ci
40962306a36Sopenharmony_cimodule_mhi_driver(mhi_net_driver);
41062306a36Sopenharmony_ci
41162306a36Sopenharmony_ciMODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>");
41262306a36Sopenharmony_ciMODULE_DESCRIPTION("Network over MHI");
41362306a36Sopenharmony_ciMODULE_LICENSE("GPL v2");
414