xref: /kernel/linux/linux-6.6/net/ipv4/gre_offload.c (revision 62306a36)
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 *	IPV4 GSO/GRO offload support
4 *	Linux INET implementation
5 *
6 *	GRE GSO support
7 */
8
9#include <linux/skbuff.h>
10#include <linux/init.h>
11#include <net/protocol.h>
12#include <net/gre.h>
13#include <net/gro.h>
14#include <net/gso.h>
15
16static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
17				       netdev_features_t features)
18{
19	int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
20	bool need_csum, offload_csum, gso_partial, need_ipsec;
21	struct sk_buff *segs = ERR_PTR(-EINVAL);
22	u16 mac_offset = skb->mac_header;
23	__be16 protocol = skb->protocol;
24	u16 mac_len = skb->mac_len;
25	int gre_offset, outer_hlen;
26
27	if (!skb->encapsulation)
28		goto out;
29
30	if (unlikely(tnl_hlen < sizeof(struct gre_base_hdr)))
31		goto out;
32
33	if (unlikely(!pskb_may_pull(skb, tnl_hlen)))
34		goto out;
35
36	/* setup inner skb. */
37	skb->encapsulation = 0;
38	SKB_GSO_CB(skb)->encap_level = 0;
39	__skb_pull(skb, tnl_hlen);
40	skb_reset_mac_header(skb);
41	skb_set_network_header(skb, skb_inner_network_offset(skb));
42	skb->mac_len = skb_inner_network_offset(skb);
43	skb->protocol = skb->inner_protocol;
44
45	need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM);
46	skb->encap_hdr_csum = need_csum;
47
48	features &= skb->dev->hw_enc_features;
49	if (need_csum)
50		features &= ~NETIF_F_SCTP_CRC;
51
52	need_ipsec = skb_dst(skb) && dst_xfrm(skb_dst(skb));
53	/* Try to offload checksum if possible */
54	offload_csum = !!(need_csum && !need_ipsec &&
55			  (skb->dev->features & NETIF_F_HW_CSUM));
56
57	/* segment inner packet. */
58	segs = skb_mac_gso_segment(skb, features);
59	if (IS_ERR_OR_NULL(segs)) {
60		skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,
61				     mac_len);
62		goto out;
63	}
64
65	gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
66
67	outer_hlen = skb_tnl_header_len(skb);
68	gre_offset = outer_hlen - tnl_hlen;
69	skb = segs;
70	do {
71		struct gre_base_hdr *greh;
72		__sum16 *pcsum;
73
74		/* Set up inner headers if we are offloading inner checksum */
75		if (skb->ip_summed == CHECKSUM_PARTIAL) {
76			skb_reset_inner_headers(skb);
77			skb->encapsulation = 1;
78		}
79
80		skb->mac_len = mac_len;
81		skb->protocol = protocol;
82
83		__skb_push(skb, outer_hlen);
84		skb_reset_mac_header(skb);
85		skb_set_network_header(skb, mac_len);
86		skb_set_transport_header(skb, gre_offset);
87
88		if (!need_csum)
89			continue;
90
91		greh = (struct gre_base_hdr *)skb_transport_header(skb);
92		pcsum = (__sum16 *)(greh + 1);
93
94		if (gso_partial && skb_is_gso(skb)) {
95			unsigned int partial_adj;
96
97			/* Adjust checksum to account for the fact that
98			 * the partial checksum is based on actual size
99			 * whereas headers should be based on MSS size.
100			 */
101			partial_adj = skb->len + skb_headroom(skb) -
102				      SKB_GSO_CB(skb)->data_offset -
103				      skb_shinfo(skb)->gso_size;
104			*pcsum = ~csum_fold((__force __wsum)htonl(partial_adj));
105		} else {
106			*pcsum = 0;
107		}
108
109		*(pcsum + 1) = 0;
110		if (skb->encapsulation || !offload_csum) {
111			*pcsum = gso_make_checksum(skb, 0);
112		} else {
113			skb->ip_summed = CHECKSUM_PARTIAL;
114			skb->csum_start = skb_transport_header(skb) - skb->head;
115			skb->csum_offset = sizeof(*greh);
116		}
117	} while ((skb = skb->next));
118out:
119	return segs;
120}
121
122static struct sk_buff *gre_gro_receive(struct list_head *head,
123				       struct sk_buff *skb)
124{
125	struct sk_buff *pp = NULL;
126	struct sk_buff *p;
127	const struct gre_base_hdr *greh;
128	unsigned int hlen, grehlen;
129	unsigned int off;
130	int flush = 1;
131	struct packet_offload *ptype;
132	__be16 type;
133
134	if (NAPI_GRO_CB(skb)->encap_mark)
135		goto out;
136
137	NAPI_GRO_CB(skb)->encap_mark = 1;
138
139	off = skb_gro_offset(skb);
140	hlen = off + sizeof(*greh);
141	greh = skb_gro_header(skb, hlen, off);
142	if (unlikely(!greh))
143		goto out;
144
145	/* Only support version 0 and K (key), C (csum) flags. Note that
146	 * although the support for the S (seq#) flag can be added easily
147	 * for GRO, this is problematic for GSO hence can not be enabled
148	 * here because a GRO pkt may end up in the forwarding path, thus
149	 * requiring GSO support to break it up correctly.
150	 */
151	if ((greh->flags & ~(GRE_KEY|GRE_CSUM)) != 0)
152		goto out;
153
154	/* We can only support GRE_CSUM if we can track the location of
155	 * the GRE header.  In the case of FOU/GUE we cannot because the
156	 * outer UDP header displaces the GRE header leaving us in a state
157	 * of limbo.
158	 */
159	if ((greh->flags & GRE_CSUM) && NAPI_GRO_CB(skb)->is_fou)
160		goto out;
161
162	type = greh->protocol;
163
164	ptype = gro_find_receive_by_type(type);
165	if (!ptype)
166		goto out;
167
168	grehlen = GRE_HEADER_SECTION;
169
170	if (greh->flags & GRE_KEY)
171		grehlen += GRE_HEADER_SECTION;
172
173	if (greh->flags & GRE_CSUM)
174		grehlen += GRE_HEADER_SECTION;
175
176	hlen = off + grehlen;
177	if (skb_gro_header_hard(skb, hlen)) {
178		greh = skb_gro_header_slow(skb, hlen, off);
179		if (unlikely(!greh))
180			goto out;
181	}
182
183	/* Don't bother verifying checksum if we're going to flush anyway. */
184	if ((greh->flags & GRE_CSUM) && !NAPI_GRO_CB(skb)->flush) {
185		if (skb_gro_checksum_simple_validate(skb))
186			goto out;
187
188		skb_gro_checksum_try_convert(skb, IPPROTO_GRE,
189					     null_compute_pseudo);
190	}
191
192	list_for_each_entry(p, head, list) {
193		const struct gre_base_hdr *greh2;
194
195		if (!NAPI_GRO_CB(p)->same_flow)
196			continue;
197
198		/* The following checks are needed to ensure only pkts
199		 * from the same tunnel are considered for aggregation.
200		 * The criteria for "the same tunnel" includes:
201		 * 1) same version (we only support version 0 here)
202		 * 2) same protocol (we only support ETH_P_IP for now)
203		 * 3) same set of flags
204		 * 4) same key if the key field is present.
205		 */
206		greh2 = (struct gre_base_hdr *)(p->data + off);
207
208		if (greh2->flags != greh->flags ||
209		    greh2->protocol != greh->protocol) {
210			NAPI_GRO_CB(p)->same_flow = 0;
211			continue;
212		}
213		if (greh->flags & GRE_KEY) {
214			/* compare keys */
215			if (*(__be32 *)(greh2+1) != *(__be32 *)(greh+1)) {
216				NAPI_GRO_CB(p)->same_flow = 0;
217				continue;
218			}
219		}
220	}
221
222	skb_gro_pull(skb, grehlen);
223
224	/* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/
225	skb_gro_postpull_rcsum(skb, greh, grehlen);
226
227	pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
228	flush = 0;
229
230out:
231	skb_gro_flush_final(skb, pp, flush);
232
233	return pp;
234}
235
236static int gre_gro_complete(struct sk_buff *skb, int nhoff)
237{
238	struct gre_base_hdr *greh = (struct gre_base_hdr *)(skb->data + nhoff);
239	struct packet_offload *ptype;
240	unsigned int grehlen = sizeof(*greh);
241	int err = -ENOENT;
242	__be16 type;
243
244	skb->encapsulation = 1;
245	skb_shinfo(skb)->gso_type = SKB_GSO_GRE;
246
247	type = greh->protocol;
248	if (greh->flags & GRE_KEY)
249		grehlen += GRE_HEADER_SECTION;
250
251	if (greh->flags & GRE_CSUM)
252		grehlen += GRE_HEADER_SECTION;
253
254	ptype = gro_find_complete_by_type(type);
255	if (ptype)
256		err = ptype->callbacks.gro_complete(skb, nhoff + grehlen);
257
258	skb_set_inner_mac_header(skb, nhoff + grehlen);
259
260	return err;
261}
262
263static const struct net_offload gre_offload = {
264	.callbacks = {
265		.gso_segment = gre_gso_segment,
266		.gro_receive = gre_gro_receive,
267		.gro_complete = gre_gro_complete,
268	},
269};
270
271static int __init gre_offload_init(void)
272{
273	int err;
274
275	err = inet_add_offload(&gre_offload, IPPROTO_GRE);
276#if IS_ENABLED(CONFIG_IPV6)
277	if (err)
278		return err;
279
280	err = inet6_add_offload(&gre_offload, IPPROTO_GRE);
281	if (err)
282		inet_del_offload(&gre_offload, IPPROTO_GRE);
283#endif
284
285	return err;
286}
287device_initcall(gre_offload_init);
288