1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
3 *
4 * RMNET Data ingress/egress handler
5 */
6
7#include <linux/netdevice.h>
8#include <linux/netdev_features.h>
9#include <linux/if_arp.h>
10#include <net/sock.h>
11#include "rmnet_private.h"
12#include "rmnet_config.h"
13#include "rmnet_vnd.h"
14#include "rmnet_map.h"
15#include "rmnet_handlers.h"
16
17#define RMNET_IP_VERSION_4 0x40
18#define RMNET_IP_VERSION_6 0x60
19
20/* Helper Functions */
21
22static void rmnet_set_skb_proto(struct sk_buff *skb)
23{
24	switch (skb->data[0] & 0xF0) {
25	case RMNET_IP_VERSION_4:
26		skb->protocol = htons(ETH_P_IP);
27		break;
28	case RMNET_IP_VERSION_6:
29		skb->protocol = htons(ETH_P_IPV6);
30		break;
31	default:
32		skb->protocol = htons(ETH_P_MAP);
33		break;
34	}
35}
36
37/* Generic handler */
38
39static void
40rmnet_deliver_skb(struct sk_buff *skb)
41{
42	struct rmnet_priv *priv = netdev_priv(skb->dev);
43
44	skb_reset_transport_header(skb);
45	skb_reset_network_header(skb);
46	rmnet_vnd_rx_fixup(skb, skb->dev);
47
48	skb->pkt_type = PACKET_HOST;
49	skb_set_mac_header(skb, 0);
50	gro_cells_receive(&priv->gro_cells, skb);
51}
52
53/* MAP handler */
54
55static void
56__rmnet_map_ingress_handler(struct sk_buff *skb,
57			    struct rmnet_port *port)
58{
59	struct rmnet_endpoint *ep;
60	u16 len, pad;
61	u8 mux_id;
62
63	if (RMNET_MAP_GET_CD_BIT(skb)) {
64		if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS)
65			return rmnet_map_command(skb, port);
66
67		goto free_skb;
68	}
69
70	mux_id = RMNET_MAP_GET_MUX_ID(skb);
71	pad = RMNET_MAP_GET_PAD(skb);
72	len = RMNET_MAP_GET_LENGTH(skb) - pad;
73
74	if (mux_id >= RMNET_MAX_LOGICAL_EP)
75		goto free_skb;
76
77	ep = rmnet_get_endpoint(port, mux_id);
78	if (!ep)
79		goto free_skb;
80
81	skb->dev = ep->egress_dev;
82
83	/* Subtract MAP header */
84	skb_pull(skb, sizeof(struct rmnet_map_header));
85	rmnet_set_skb_proto(skb);
86
87	if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
88		if (!rmnet_map_checksum_downlink_packet(skb, len + pad))
89			skb->ip_summed = CHECKSUM_UNNECESSARY;
90	}
91
92	skb_trim(skb, len);
93	rmnet_deliver_skb(skb);
94	return;
95
96free_skb:
97	kfree_skb(skb);
98}
99
100static void
101rmnet_map_ingress_handler(struct sk_buff *skb,
102			  struct rmnet_port *port)
103{
104	struct sk_buff *skbn;
105
106	if (skb->dev->type == ARPHRD_ETHER) {
107		if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_ATOMIC)) {
108			kfree_skb(skb);
109			return;
110		}
111
112		skb_push(skb, ETH_HLEN);
113	}
114
115	if (port->data_format & RMNET_FLAGS_INGRESS_DEAGGREGATION) {
116		while ((skbn = rmnet_map_deaggregate(skb, port)) != NULL)
117			__rmnet_map_ingress_handler(skbn, port);
118
119		consume_skb(skb);
120	} else {
121		__rmnet_map_ingress_handler(skb, port);
122	}
123}
124
125static int rmnet_map_egress_handler(struct sk_buff *skb,
126				    struct rmnet_port *port, u8 mux_id,
127				    struct net_device *orig_dev)
128{
129	int required_headroom, additional_header_len;
130	struct rmnet_map_header *map_header;
131
132	additional_header_len = 0;
133	required_headroom = sizeof(struct rmnet_map_header);
134
135	if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4) {
136		additional_header_len = sizeof(struct rmnet_map_ul_csum_header);
137		required_headroom += additional_header_len;
138	}
139
140	if (skb_headroom(skb) < required_headroom) {
141		if (pskb_expand_head(skb, required_headroom, 0, GFP_ATOMIC))
142			return -ENOMEM;
143	}
144
145	if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4)
146		rmnet_map_checksum_uplink_packet(skb, orig_dev);
147
148	map_header = rmnet_map_add_map_header(skb, additional_header_len, 0);
149	if (!map_header)
150		return -ENOMEM;
151
152	map_header->mux_id = mux_id;
153
154	skb->protocol = htons(ETH_P_MAP);
155
156	return 0;
157}
158
159static void
160rmnet_bridge_handler(struct sk_buff *skb, struct net_device *bridge_dev)
161{
162	if (skb_mac_header_was_set(skb))
163		skb_push(skb, skb->mac_len);
164
165	if (bridge_dev) {
166		skb->dev = bridge_dev;
167		dev_queue_xmit(skb);
168	}
169}
170
171/* Ingress / Egress Entry Points */
172
173/* Processes packet as per ingress data format for receiving device. Logical
174 * endpoint is determined from packet inspection. Packet is then sent to the
175 * egress device listed in the logical endpoint configuration.
176 */
177rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
178{
179	struct sk_buff *skb = *pskb;
180	struct rmnet_port *port;
181	struct net_device *dev;
182
183	if (!skb)
184		goto done;
185
186	if (skb->pkt_type == PACKET_LOOPBACK)
187		return RX_HANDLER_PASS;
188
189	dev = skb->dev;
190	port = rmnet_get_port_rcu(dev);
191	if (unlikely(!port)) {
192		atomic_long_inc(&skb->dev->rx_nohandler);
193		kfree_skb(skb);
194		goto done;
195	}
196
197	switch (port->rmnet_mode) {
198	case RMNET_EPMODE_VND:
199		rmnet_map_ingress_handler(skb, port);
200		break;
201	case RMNET_EPMODE_BRIDGE:
202		rmnet_bridge_handler(skb, port->bridge_ep);
203		break;
204	}
205
206done:
207	return RX_HANDLER_CONSUMED;
208}
209
210/* Modifies packet as per logical endpoint configuration and egress data format
211 * for egress device configured in logical endpoint. Packet is then transmitted
212 * on the egress device.
213 */
214void rmnet_egress_handler(struct sk_buff *skb)
215{
216	struct net_device *orig_dev;
217	struct rmnet_port *port;
218	struct rmnet_priv *priv;
219	u8 mux_id;
220
221	sk_pacing_shift_update(skb->sk, 8);
222
223	orig_dev = skb->dev;
224	priv = netdev_priv(orig_dev);
225	skb->dev = priv->real_dev;
226	mux_id = priv->mux_id;
227
228	port = rmnet_get_port_rcu(skb->dev);
229	if (!port)
230		goto drop;
231
232	if (rmnet_map_egress_handler(skb, port, mux_id, orig_dev))
233		goto drop;
234
235	rmnet_vnd_tx_fixup(skb, orig_dev);
236
237	dev_queue_xmit(skb);
238	return;
239
240drop:
241	this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
242	kfree_skb(skb);
243}
244