xref: /kernel/linux/linux-6.6/net/core/gso.c (revision 62306a36)
1// SPDX-License-Identifier: GPL-2.0-or-later
2#include <linux/skbuff.h>
3#include <linux/sctp.h>
4#include <net/gso.h>
5#include <net/gro.h>
6
7/**
8 *	skb_eth_gso_segment - segmentation handler for ethernet protocols.
9 *	@skb: buffer to segment
10 *	@features: features for the output path (see dev->features)
11 *	@type: Ethernet Protocol ID
12 */
13struct sk_buff *skb_eth_gso_segment(struct sk_buff *skb,
14				    netdev_features_t features, __be16 type)
15{
16	struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
17	struct packet_offload *ptype;
18
19	rcu_read_lock();
20	list_for_each_entry_rcu(ptype, &offload_base, list) {
21		if (ptype->type == type && ptype->callbacks.gso_segment) {
22			segs = ptype->callbacks.gso_segment(skb, features);
23			break;
24		}
25	}
26	rcu_read_unlock();
27
28	return segs;
29}
30EXPORT_SYMBOL(skb_eth_gso_segment);
31
32/**
33 *	skb_mac_gso_segment - mac layer segmentation handler.
34 *	@skb: buffer to segment
35 *	@features: features for the output path (see dev->features)
36 */
37struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
38				    netdev_features_t features)
39{
40	struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
41	struct packet_offload *ptype;
42	int vlan_depth = skb->mac_len;
43	__be16 type = skb_network_protocol(skb, &vlan_depth);
44
45	if (unlikely(!type))
46		return ERR_PTR(-EINVAL);
47
48	__skb_pull(skb, vlan_depth);
49
50	rcu_read_lock();
51	list_for_each_entry_rcu(ptype, &offload_base, list) {
52		if (ptype->type == type && ptype->callbacks.gso_segment) {
53			segs = ptype->callbacks.gso_segment(skb, features);
54			break;
55		}
56	}
57	rcu_read_unlock();
58
59	__skb_push(skb, skb->data - skb_mac_header(skb));
60
61	return segs;
62}
63EXPORT_SYMBOL(skb_mac_gso_segment);
64/* openvswitch calls this on rx path, so we need a different check.
65 */
66static bool skb_needs_check(const struct sk_buff *skb, bool tx_path)
67{
68	if (tx_path)
69		return skb->ip_summed != CHECKSUM_PARTIAL &&
70		       skb->ip_summed != CHECKSUM_UNNECESSARY;
71
72	return skb->ip_summed == CHECKSUM_NONE;
73}
74
75/**
76 *	__skb_gso_segment - Perform segmentation on skb.
77 *	@skb: buffer to segment
78 *	@features: features for the output path (see dev->features)
79 *	@tx_path: whether it is called in TX path
80 *
81 *	This function segments the given skb and returns a list of segments.
82 *
83 *	It may return NULL if the skb requires no segmentation.  This is
84 *	only possible when GSO is used for verifying header integrity.
85 *
86 *	Segmentation preserves SKB_GSO_CB_OFFSET bytes of previous skb cb.
87 */
88struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
89				  netdev_features_t features, bool tx_path)
90{
91	struct sk_buff *segs;
92
93	if (unlikely(skb_needs_check(skb, tx_path))) {
94		int err;
95
96		/* We're going to init ->check field in TCP or UDP header */
97		err = skb_cow_head(skb, 0);
98		if (err < 0)
99			return ERR_PTR(err);
100	}
101
102	/* Only report GSO partial support if it will enable us to
103	 * support segmentation on this frame without needing additional
104	 * work.
105	 */
106	if (features & NETIF_F_GSO_PARTIAL) {
107		netdev_features_t partial_features = NETIF_F_GSO_ROBUST;
108		struct net_device *dev = skb->dev;
109
110		partial_features |= dev->features & dev->gso_partial_features;
111		if (!skb_gso_ok(skb, features | partial_features))
112			features &= ~NETIF_F_GSO_PARTIAL;
113	}
114
115	BUILD_BUG_ON(SKB_GSO_CB_OFFSET +
116		     sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
117
118	SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
119	SKB_GSO_CB(skb)->encap_level = 0;
120
121	skb_reset_mac_header(skb);
122	skb_reset_mac_len(skb);
123
124	segs = skb_mac_gso_segment(skb, features);
125
126	if (segs != skb && unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs)))
127		skb_warn_bad_offload(skb);
128
129	return segs;
130}
131EXPORT_SYMBOL(__skb_gso_segment);
132
133/**
134 * skb_gso_transport_seglen - Return length of individual segments of a gso packet
135 *
136 * @skb: GSO skb
137 *
138 * skb_gso_transport_seglen is used to determine the real size of the
139 * individual segments, including Layer4 headers (TCP/UDP).
140 *
141 * The MAC/L2 or network (IP, IPv6) headers are not accounted for.
142 */
143static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
144{
145	const struct skb_shared_info *shinfo = skb_shinfo(skb);
146	unsigned int thlen = 0;
147
148	if (skb->encapsulation) {
149		thlen = skb_inner_transport_header(skb) -
150			skb_transport_header(skb);
151
152		if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
153			thlen += inner_tcp_hdrlen(skb);
154	} else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
155		thlen = tcp_hdrlen(skb);
156	} else if (unlikely(skb_is_gso_sctp(skb))) {
157		thlen = sizeof(struct sctphdr);
158	} else if (shinfo->gso_type & SKB_GSO_UDP_L4) {
159		thlen = sizeof(struct udphdr);
160	}
161	/* UFO sets gso_size to the size of the fragmentation
162	 * payload, i.e. the size of the L4 (UDP) header is already
163	 * accounted for.
164	 */
165	return thlen + shinfo->gso_size;
166}
167
168/**
169 * skb_gso_network_seglen - Return length of individual segments of a gso packet
170 *
171 * @skb: GSO skb
172 *
173 * skb_gso_network_seglen is used to determine the real size of the
174 * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
175 *
176 * The MAC/L2 header is not accounted for.
177 */
178static unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
179{
180	unsigned int hdr_len = skb_transport_header(skb) -
181			       skb_network_header(skb);
182
183	return hdr_len + skb_gso_transport_seglen(skb);
184}
185
186/**
187 * skb_gso_mac_seglen - Return length of individual segments of a gso packet
188 *
189 * @skb: GSO skb
190 *
191 * skb_gso_mac_seglen is used to determine the real size of the
192 * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4
193 * headers (TCP/UDP).
194 */
195static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
196{
197	unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
198
199	return hdr_len + skb_gso_transport_seglen(skb);
200}
201
202/**
203 * skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS
204 *
205 * There are a couple of instances where we have a GSO skb, and we
206 * want to determine what size it would be after it is segmented.
207 *
208 * We might want to check:
209 * -    L3+L4+payload size (e.g. IP forwarding)
210 * - L2+L3+L4+payload size (e.g. sanity check before passing to driver)
211 *
212 * This is a helper to do that correctly considering GSO_BY_FRAGS.
213 *
214 * @skb: GSO skb
215 *
216 * @seg_len: The segmented length (from skb_gso_*_seglen). In the
217 *           GSO_BY_FRAGS case this will be [header sizes + GSO_BY_FRAGS].
218 *
219 * @max_len: The maximum permissible length.
220 *
221 * Returns true if the segmented length <= max length.
222 */
223static inline bool skb_gso_size_check(const struct sk_buff *skb,
224				      unsigned int seg_len,
225				      unsigned int max_len) {
226	const struct skb_shared_info *shinfo = skb_shinfo(skb);
227	const struct sk_buff *iter;
228
229	if (shinfo->gso_size != GSO_BY_FRAGS)
230		return seg_len <= max_len;
231
232	/* Undo this so we can re-use header sizes */
233	seg_len -= GSO_BY_FRAGS;
234
235	skb_walk_frags(skb, iter) {
236		if (seg_len + skb_headlen(iter) > max_len)
237			return false;
238	}
239
240	return true;
241}
242
243/**
244 * skb_gso_validate_network_len - Will a split GSO skb fit into a given MTU?
245 *
246 * @skb: GSO skb
247 * @mtu: MTU to validate against
248 *
249 * skb_gso_validate_network_len validates if a given skb will fit a
250 * wanted MTU once split. It considers L3 headers, L4 headers, and the
251 * payload.
252 */
253bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu)
254{
255	return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu);
256}
257EXPORT_SYMBOL_GPL(skb_gso_validate_network_len);
258
259/**
260 * skb_gso_validate_mac_len - Will a split GSO skb fit in a given length?
261 *
262 * @skb: GSO skb
263 * @len: length to validate against
264 *
265 * skb_gso_validate_mac_len validates if a given skb will fit a wanted
266 * length once split, including L2, L3 and L4 headers and the payload.
267 */
268bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len)
269{
270	return skb_gso_size_check(skb, skb_gso_mac_seglen(skb), len);
271}
272EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len);
273
274