xref: /kernel/linux/linux-5.10/net/ipv4/gre_offload.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 *	IPV4 GSO/GRO offload support
4 *	Linux INET implementation
5 *
6 *	GRE GSO support
7 */
8
9#include <linux/skbuff.h>
10#include <linux/init.h>
11#include <net/protocol.h>
12#include <net/gre.h>
13
14static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
15				       netdev_features_t features)
16{
17	int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
18	bool need_csum, need_recompute_csum, gso_partial;
19	struct sk_buff *segs = ERR_PTR(-EINVAL);
20	u16 mac_offset = skb->mac_header;
21	__be16 protocol = skb->protocol;
22	u16 mac_len = skb->mac_len;
23	int gre_offset, outer_hlen;
24
25	if (!skb->encapsulation)
26		goto out;
27
28	if (unlikely(tnl_hlen < sizeof(struct gre_base_hdr)))
29		goto out;
30
31	if (unlikely(!pskb_may_pull(skb, tnl_hlen)))
32		goto out;
33
34	/* setup inner skb. */
35	skb->encapsulation = 0;
36	SKB_GSO_CB(skb)->encap_level = 0;
37	__skb_pull(skb, tnl_hlen);
38	skb_reset_mac_header(skb);
39	skb_set_network_header(skb, skb_inner_network_offset(skb));
40	skb->mac_len = skb_inner_network_offset(skb);
41	skb->protocol = skb->inner_protocol;
42
43	need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM);
44	need_recompute_csum = skb->csum_not_inet;
45	skb->encap_hdr_csum = need_csum;
46
47	features &= skb->dev->hw_enc_features;
48
49	/* segment inner packet. */
50	segs = skb_mac_gso_segment(skb, features);
51	if (IS_ERR_OR_NULL(segs)) {
52		skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,
53				     mac_len);
54		goto out;
55	}
56
57	gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
58
59	outer_hlen = skb_tnl_header_len(skb);
60	gre_offset = outer_hlen - tnl_hlen;
61	skb = segs;
62	do {
63		struct gre_base_hdr *greh;
64		__sum16 *pcsum;
65
66		/* Set up inner headers if we are offloading inner checksum */
67		if (skb->ip_summed == CHECKSUM_PARTIAL) {
68			skb_reset_inner_headers(skb);
69			skb->encapsulation = 1;
70		}
71
72		skb->mac_len = mac_len;
73		skb->protocol = protocol;
74
75		__skb_push(skb, outer_hlen);
76		skb_reset_mac_header(skb);
77		skb_set_network_header(skb, mac_len);
78		skb_set_transport_header(skb, gre_offset);
79
80		if (!need_csum)
81			continue;
82
83		greh = (struct gre_base_hdr *)skb_transport_header(skb);
84		pcsum = (__sum16 *)(greh + 1);
85
86		if (gso_partial && skb_is_gso(skb)) {
87			unsigned int partial_adj;
88
89			/* Adjust checksum to account for the fact that
90			 * the partial checksum is based on actual size
91			 * whereas headers should be based on MSS size.
92			 */
93			partial_adj = skb->len + skb_headroom(skb) -
94				      SKB_GSO_CB(skb)->data_offset -
95				      skb_shinfo(skb)->gso_size;
96			*pcsum = ~csum_fold((__force __wsum)htonl(partial_adj));
97		} else {
98			*pcsum = 0;
99		}
100
101		*(pcsum + 1) = 0;
102		if (need_recompute_csum && !skb_is_gso(skb)) {
103			__wsum csum;
104
105			csum = skb_checksum(skb, gre_offset,
106					    skb->len - gre_offset, 0);
107			*pcsum = csum_fold(csum);
108		} else {
109			*pcsum = gso_make_checksum(skb, 0);
110		}
111	} while ((skb = skb->next));
112out:
113	return segs;
114}
115
116static struct sk_buff *gre_gro_receive(struct list_head *head,
117				       struct sk_buff *skb)
118{
119	struct sk_buff *pp = NULL;
120	struct sk_buff *p;
121	const struct gre_base_hdr *greh;
122	unsigned int hlen, grehlen;
123	unsigned int off;
124	int flush = 1;
125	struct packet_offload *ptype;
126	__be16 type;
127
128	if (NAPI_GRO_CB(skb)->encap_mark)
129		goto out;
130
131	NAPI_GRO_CB(skb)->encap_mark = 1;
132
133	off = skb_gro_offset(skb);
134	hlen = off + sizeof(*greh);
135	greh = skb_gro_header_fast(skb, off);
136	if (skb_gro_header_hard(skb, hlen)) {
137		greh = skb_gro_header_slow(skb, hlen, off);
138		if (unlikely(!greh))
139			goto out;
140	}
141
142	/* Only support version 0 and K (key), C (csum) flags. Note that
143	 * although the support for the S (seq#) flag can be added easily
144	 * for GRO, this is problematic for GSO hence can not be enabled
145	 * here because a GRO pkt may end up in the forwarding path, thus
146	 * requiring GSO support to break it up correctly.
147	 */
148	if ((greh->flags & ~(GRE_KEY|GRE_CSUM)) != 0)
149		goto out;
150
151	/* We can only support GRE_CSUM if we can track the location of
152	 * the GRE header.  In the case of FOU/GUE we cannot because the
153	 * outer UDP header displaces the GRE header leaving us in a state
154	 * of limbo.
155	 */
156	if ((greh->flags & GRE_CSUM) && NAPI_GRO_CB(skb)->is_fou)
157		goto out;
158
159	type = greh->protocol;
160
161	rcu_read_lock();
162	ptype = gro_find_receive_by_type(type);
163	if (!ptype)
164		goto out_unlock;
165
166	grehlen = GRE_HEADER_SECTION;
167
168	if (greh->flags & GRE_KEY)
169		grehlen += GRE_HEADER_SECTION;
170
171	if (greh->flags & GRE_CSUM)
172		grehlen += GRE_HEADER_SECTION;
173
174	hlen = off + grehlen;
175	if (skb_gro_header_hard(skb, hlen)) {
176		greh = skb_gro_header_slow(skb, hlen, off);
177		if (unlikely(!greh))
178			goto out_unlock;
179	}
180
181	/* Don't bother verifying checksum if we're going to flush anyway. */
182	if ((greh->flags & GRE_CSUM) && !NAPI_GRO_CB(skb)->flush) {
183		if (skb_gro_checksum_simple_validate(skb))
184			goto out_unlock;
185
186		skb_gro_checksum_try_convert(skb, IPPROTO_GRE,
187					     null_compute_pseudo);
188	}
189
190	list_for_each_entry(p, head, list) {
191		const struct gre_base_hdr *greh2;
192
193		if (!NAPI_GRO_CB(p)->same_flow)
194			continue;
195
196		/* The following checks are needed to ensure only pkts
197		 * from the same tunnel are considered for aggregation.
198		 * The criteria for "the same tunnel" includes:
199		 * 1) same version (we only support version 0 here)
200		 * 2) same protocol (we only support ETH_P_IP for now)
201		 * 3) same set of flags
202		 * 4) same key if the key field is present.
203		 */
204		greh2 = (struct gre_base_hdr *)(p->data + off);
205
206		if (greh2->flags != greh->flags ||
207		    greh2->protocol != greh->protocol) {
208			NAPI_GRO_CB(p)->same_flow = 0;
209			continue;
210		}
211		if (greh->flags & GRE_KEY) {
212			/* compare keys */
213			if (*(__be32 *)(greh2+1) != *(__be32 *)(greh+1)) {
214				NAPI_GRO_CB(p)->same_flow = 0;
215				continue;
216			}
217		}
218	}
219
220	skb_gro_pull(skb, grehlen);
221
222	/* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/
223	skb_gro_postpull_rcsum(skb, greh, grehlen);
224
225	pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
226	flush = 0;
227
228out_unlock:
229	rcu_read_unlock();
230out:
231	skb_gro_flush_final(skb, pp, flush);
232
233	return pp;
234}
235
236static int gre_gro_complete(struct sk_buff *skb, int nhoff)
237{
238	struct gre_base_hdr *greh = (struct gre_base_hdr *)(skb->data + nhoff);
239	struct packet_offload *ptype;
240	unsigned int grehlen = sizeof(*greh);
241	int err = -ENOENT;
242	__be16 type;
243
244	skb->encapsulation = 1;
245	skb_shinfo(skb)->gso_type = SKB_GSO_GRE;
246
247	type = greh->protocol;
248	if (greh->flags & GRE_KEY)
249		grehlen += GRE_HEADER_SECTION;
250
251	if (greh->flags & GRE_CSUM)
252		grehlen += GRE_HEADER_SECTION;
253
254	rcu_read_lock();
255	ptype = gro_find_complete_by_type(type);
256	if (ptype)
257		err = ptype->callbacks.gro_complete(skb, nhoff + grehlen);
258
259	rcu_read_unlock();
260
261	skb_set_inner_mac_header(skb, nhoff + grehlen);
262
263	return err;
264}
265
266static const struct net_offload gre_offload = {
267	.callbacks = {
268		.gso_segment = gre_gso_segment,
269		.gro_receive = gre_gro_receive,
270		.gro_complete = gre_gro_complete,
271	},
272};
273
274static int __init gre_offload_init(void)
275{
276	int err;
277
278	err = inet_add_offload(&gre_offload, IPPROTO_GRE);
279#if IS_ENABLED(CONFIG_IPV6)
280	if (err)
281		return err;
282
283	err = inet6_add_offload(&gre_offload, IPPROTO_GRE);
284	if (err)
285		inet_del_offload(&gre_offload, IPPROTO_GRE);
286#endif
287
288	return err;
289}
290device_initcall(gre_offload_init);
291