xref: /kernel/linux/linux-5.10/include/net/udp.h (revision 8c2ecf20)
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
4 *		operating system.  INET is implemented using the  BSD Socket
5 *		interface as the means of communication with the user level.
6 *
7 *		Definitions for the UDP module.
8 *
9 * Version:	@(#)udp.h	1.0.2	05/07/93
10 *
11 * Authors:	Ross Biro
12 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13 *
14 * Fixes:
15 *		Alan Cox	: Turned on udp checksums. I don't want to
16 *				  chase 'memory corruption' bugs that aren't!
17 */
18#ifndef _UDP_H
19#define _UDP_H
20
21#include <linux/list.h>
22#include <linux/bug.h>
23#include <net/inet_sock.h>
24#include <net/sock.h>
25#include <net/snmp.h>
26#include <net/ip.h>
27#include <linux/ipv6.h>
28#include <linux/seq_file.h>
29#include <linux/poll.h>
30#include <linux/indirect_call_wrapper.h>
31
32/**
33 *	struct udp_skb_cb  -  UDP(-Lite) private variables
34 *
35 *	@header:      private variables used by IPv4/IPv6
36 *	@cscov:       checksum coverage length (UDP-Lite only)
37 *	@partial_cov: if set indicates partial csum coverage
38 */
39struct udp_skb_cb {
40	union {
41		struct inet_skb_parm	h4;
42#if IS_ENABLED(CONFIG_IPV6)
43		struct inet6_skb_parm	h6;
44#endif
45	} header;
46	__u16		cscov;
47	__u8		partial_cov;
48};
49#define UDP_SKB_CB(__skb)	((struct udp_skb_cb *)((__skb)->cb))
50
51/**
52 *	struct udp_hslot - UDP hash slot
53 *
54 *	@head:	head of list of sockets
55 *	@count:	number of sockets in 'head' list
56 *	@lock:	spinlock protecting changes to head/count
57 */
58struct udp_hslot {
59	struct hlist_head	head;
60	int			count;
61	spinlock_t		lock;
62} __attribute__((aligned(2 * sizeof(long))));
63
64/**
65 *	struct udp_table - UDP table
66 *
67 *	@hash:	hash table, sockets are hashed on (local port)
68 *	@hash2:	hash table, sockets are hashed on (local port, local address)
69 *	@mask:	number of slots in hash tables, minus 1
70 *	@log:	log2(number of slots in hash table)
71 */
72struct udp_table {
73	struct udp_hslot	*hash;
74	struct udp_hslot	*hash2;
75	unsigned int		mask;
76	unsigned int		log;
77};
78extern struct udp_table udp_table;
79void udp_table_init(struct udp_table *, const char *);
80static inline struct udp_hslot *udp_hashslot(struct udp_table *table,
81					     struct net *net, unsigned int num)
82{
83	return &table->hash[udp_hashfn(net, num, table->mask)];
84}
85/*
86 * For secondary hash, net_hash_mix() is performed before calling
87 * udp_hashslot2(), this explains difference with udp_hashslot()
88 */
89static inline struct udp_hslot *udp_hashslot2(struct udp_table *table,
90					      unsigned int hash)
91{
92	return &table->hash2[hash & table->mask];
93}
94
95extern struct proto udp_prot;
96
97extern atomic_long_t udp_memory_allocated;
98
99/* sysctl variables for udp */
100extern long sysctl_udp_mem[3];
101extern int sysctl_udp_rmem_min;
102extern int sysctl_udp_wmem_min;
103
104struct sk_buff;
105
106/*
107 *	Generic checksumming routines for UDP(-Lite) v4 and v6
108 */
109static inline __sum16 __udp_lib_checksum_complete(struct sk_buff *skb)
110{
111	return (UDP_SKB_CB(skb)->cscov == skb->len ?
112		__skb_checksum_complete(skb) :
113		__skb_checksum_complete_head(skb, UDP_SKB_CB(skb)->cscov));
114}
115
116static inline int udp_lib_checksum_complete(struct sk_buff *skb)
117{
118	return !skb_csum_unnecessary(skb) &&
119		__udp_lib_checksum_complete(skb);
120}
121
122/**
123 * 	udp_csum_outgoing  -  compute UDPv4/v6 checksum over fragments
124 * 	@sk: 	socket we are writing to
125 * 	@skb: 	sk_buff containing the filled-in UDP header
126 * 	        (checksum field must be zeroed out)
127 */
128static inline __wsum udp_csum_outgoing(struct sock *sk, struct sk_buff *skb)
129{
130	__wsum csum = csum_partial(skb_transport_header(skb),
131				   sizeof(struct udphdr), 0);
132	skb_queue_walk(&sk->sk_write_queue, skb) {
133		csum = csum_add(csum, skb->csum);
134	}
135	return csum;
136}
137
138static inline __wsum udp_csum(struct sk_buff *skb)
139{
140	__wsum csum = csum_partial(skb_transport_header(skb),
141				   sizeof(struct udphdr), skb->csum);
142
143	for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) {
144		csum = csum_add(csum, skb->csum);
145	}
146	return csum;
147}
148
149static inline __sum16 udp_v4_check(int len, __be32 saddr,
150				   __be32 daddr, __wsum base)
151{
152	return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_UDP, base);
153}
154
155void udp_set_csum(bool nocheck, struct sk_buff *skb,
156		  __be32 saddr, __be32 daddr, int len);
157
158static inline void udp_csum_pull_header(struct sk_buff *skb)
159{
160	if (!skb->csum_valid && skb->ip_summed == CHECKSUM_NONE)
161		skb->csum = csum_partial(skb->data, sizeof(struct udphdr),
162					 skb->csum);
163	skb_pull_rcsum(skb, sizeof(struct udphdr));
164	UDP_SKB_CB(skb)->cscov -= sizeof(struct udphdr);
165}
166
167typedef struct sock *(*udp_lookup_t)(struct sk_buff *skb, __be16 sport,
168				     __be16 dport);
169
170INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp4_gro_receive(struct list_head *,
171							   struct sk_buff *));
172INDIRECT_CALLABLE_DECLARE(int udp4_gro_complete(struct sk_buff *, int));
173INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp6_gro_receive(struct list_head *,
174							   struct sk_buff *));
175INDIRECT_CALLABLE_DECLARE(int udp6_gro_complete(struct sk_buff *, int));
176struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
177				struct udphdr *uh, struct sock *sk);
178int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
179void udp_v6_early_demux(struct sk_buff *skb);
180
181struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
182				  netdev_features_t features, bool is_ipv6);
183
184static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
185{
186	struct udphdr *uh;
187	unsigned int hlen, off;
188
189	off  = skb_gro_offset(skb);
190	hlen = off + sizeof(*uh);
191	uh   = skb_gro_header_fast(skb, off);
192	if (skb_gro_header_hard(skb, hlen))
193		uh = skb_gro_header_slow(skb, hlen, off);
194
195	return uh;
196}
197
198/* hash routines shared between UDPv4/6 and UDP-Litev4/6 */
199static inline int udp_lib_hash(struct sock *sk)
200{
201	BUG();
202	return 0;
203}
204
205void udp_lib_unhash(struct sock *sk);
206void udp_lib_rehash(struct sock *sk, u16 new_hash);
207
208static inline void udp_lib_close(struct sock *sk, long timeout)
209{
210	sk_common_release(sk);
211}
212
213int udp_lib_get_port(struct sock *sk, unsigned short snum,
214		     unsigned int hash2_nulladdr);
215
216u32 udp_flow_hashrnd(void);
217
218static inline __be16 udp_flow_src_port(struct net *net, struct sk_buff *skb,
219				       int min, int max, bool use_eth)
220{
221	u32 hash;
222
223	if (min >= max) {
224		/* Use default range */
225		inet_get_local_port_range(net, &min, &max);
226	}
227
228	hash = skb_get_hash(skb);
229	if (unlikely(!hash)) {
230		if (use_eth) {
231			/* Can't find a normal hash, caller has indicated an
232			 * Ethernet packet so use that to compute a hash.
233			 */
234			hash = jhash(skb->data, 2 * ETH_ALEN,
235				     (__force u32) skb->protocol);
236		} else {
237			/* Can't derive any sort of hash for the packet, set
238			 * to some consistent random value.
239			 */
240			hash = udp_flow_hashrnd();
241		}
242	}
243
244	/* Since this is being sent on the wire obfuscate hash a bit
245	 * to minimize possbility that any useful information to an
246	 * attacker is leaked. Only upper 16 bits are relevant in the
247	 * computation for 16 bit port value.
248	 */
249	hash ^= hash << 16;
250
251	return htons((((u64) hash * (max - min)) >> 32) + min);
252}
253
254static inline int udp_rqueue_get(struct sock *sk)
255{
256	return sk_rmem_alloc_get(sk) - READ_ONCE(udp_sk(sk)->forward_deficit);
257}
258
259static inline bool udp_sk_bound_dev_eq(struct net *net, int bound_dev_if,
260				       int dif, int sdif)
261{
262#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
263	return inet_bound_dev_eq(!!READ_ONCE(net->ipv4.sysctl_udp_l3mdev_accept),
264				 bound_dev_if, dif, sdif);
265#else
266	return inet_bound_dev_eq(true, bound_dev_if, dif, sdif);
267#endif
268}
269
270/* net/ipv4/udp.c */
271void udp_destruct_common(struct sock *sk);
272void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len);
273int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb);
274void udp_skb_destructor(struct sock *sk, struct sk_buff *skb);
275struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
276			       int noblock, int *off, int *err);
277static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags,
278					   int noblock, int *err)
279{
280	int off = 0;
281
282	return __skb_recv_udp(sk, flags, noblock, &off, err);
283}
284
285int udp_v4_early_demux(struct sk_buff *skb);
286bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
287int udp_get_port(struct sock *sk, unsigned short snum,
288		 int (*saddr_cmp)(const struct sock *,
289				  const struct sock *));
290int udp_err(struct sk_buff *, u32);
291int udp_abort(struct sock *sk, int err);
292int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
293int udp_push_pending_frames(struct sock *sk);
294void udp_flush_pending_frames(struct sock *sk);
295int udp_cmsg_send(struct sock *sk, struct msghdr *msg, u16 *gso_size);
296void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst);
297int udp_rcv(struct sk_buff *skb);
298int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
299int udp_init_sock(struct sock *sk);
300int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
301int __udp_disconnect(struct sock *sk, int flags);
302int udp_disconnect(struct sock *sk, int flags);
303__poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait);
304struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
305				       netdev_features_t features,
306				       bool is_ipv6);
307int udp_lib_getsockopt(struct sock *sk, int level, int optname,
308		       char __user *optval, int __user *optlen);
309int udp_lib_setsockopt(struct sock *sk, int level, int optname,
310		       sockptr_t optval, unsigned int optlen,
311		       int (*push_pending_frames)(struct sock *));
312struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
313			     __be32 daddr, __be16 dport, int dif);
314struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
315			       __be32 daddr, __be16 dport, int dif, int sdif,
316			       struct udp_table *tbl, struct sk_buff *skb);
317struct sock *udp4_lib_lookup_skb(struct sk_buff *skb,
318				 __be16 sport, __be16 dport);
319struct sock *udp6_lib_lookup(struct net *net,
320			     const struct in6_addr *saddr, __be16 sport,
321			     const struct in6_addr *daddr, __be16 dport,
322			     int dif);
323struct sock *__udp6_lib_lookup(struct net *net,
324			       const struct in6_addr *saddr, __be16 sport,
325			       const struct in6_addr *daddr, __be16 dport,
326			       int dif, int sdif, struct udp_table *tbl,
327			       struct sk_buff *skb);
328struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
329				 __be16 sport, __be16 dport);
330
331/* UDP uses skb->dev_scratch to cache as much information as possible and avoid
332 * possibly multiple cache miss on dequeue()
333 */
334struct udp_dev_scratch {
335	/* skb->truesize and the stateless bit are embedded in a single field;
336	 * do not use a bitfield since the compiler emits better/smaller code
337	 * this way
338	 */
339	u32 _tsize_state;
340
341#if BITS_PER_LONG == 64
342	/* len and the bit needed to compute skb_csum_unnecessary
343	 * will be on cold cache lines at recvmsg time.
344	 * skb->len can be stored on 16 bits since the udp header has been
345	 * already validated and pulled.
346	 */
347	u16 len;
348	bool is_linear;
349	bool csum_unnecessary;
350#endif
351};
352
353static inline struct udp_dev_scratch *udp_skb_scratch(struct sk_buff *skb)
354{
355	return (struct udp_dev_scratch *)&skb->dev_scratch;
356}
357
358#if BITS_PER_LONG == 64
359static inline unsigned int udp_skb_len(struct sk_buff *skb)
360{
361	return udp_skb_scratch(skb)->len;
362}
363
364static inline bool udp_skb_csum_unnecessary(struct sk_buff *skb)
365{
366	return udp_skb_scratch(skb)->csum_unnecessary;
367}
368
369static inline bool udp_skb_is_linear(struct sk_buff *skb)
370{
371	return udp_skb_scratch(skb)->is_linear;
372}
373
374#else
375static inline unsigned int udp_skb_len(struct sk_buff *skb)
376{
377	return skb->len;
378}
379
380static inline bool udp_skb_csum_unnecessary(struct sk_buff *skb)
381{
382	return skb_csum_unnecessary(skb);
383}
384
385static inline bool udp_skb_is_linear(struct sk_buff *skb)
386{
387	return !skb_is_nonlinear(skb);
388}
389#endif
390
391static inline int copy_linear_skb(struct sk_buff *skb, int len, int off,
392				  struct iov_iter *to)
393{
394	int n;
395
396	n = copy_to_iter(skb->data + off, len, to);
397	if (n == len)
398		return 0;
399
400	iov_iter_revert(to, n);
401	return -EFAULT;
402}
403
404/*
405 * 	SNMP statistics for UDP and UDP-Lite
406 */
407#define UDP_INC_STATS(net, field, is_udplite)		      do { \
408	if (is_udplite) SNMP_INC_STATS((net)->mib.udplite_statistics, field);       \
409	else		SNMP_INC_STATS((net)->mib.udp_statistics, field);  }  while(0)
410#define __UDP_INC_STATS(net, field, is_udplite) 	      do { \
411	if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_statistics, field);         \
412	else		__SNMP_INC_STATS((net)->mib.udp_statistics, field);    }  while(0)
413
414#define __UDP6_INC_STATS(net, field, is_udplite)	    do { \
415	if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_stats_in6, field);\
416	else		__SNMP_INC_STATS((net)->mib.udp_stats_in6, field);  \
417} while(0)
418#define UDP6_INC_STATS(net, field, __lite)		    do { \
419	if (__lite) SNMP_INC_STATS((net)->mib.udplite_stats_in6, field);  \
420	else	    SNMP_INC_STATS((net)->mib.udp_stats_in6, field);      \
421} while(0)
422
423#if IS_ENABLED(CONFIG_IPV6)
424#define __UDPX_MIB(sk, ipv4)						\
425({									\
426	ipv4 ? (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics :	\
427				 sock_net(sk)->mib.udp_statistics) :	\
428		(IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_stats_in6 :	\
429				 sock_net(sk)->mib.udp_stats_in6);	\
430})
431#else
432#define __UDPX_MIB(sk, ipv4)						\
433({									\
434	IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics :		\
435			 sock_net(sk)->mib.udp_statistics;		\
436})
437#endif
438
439#define __UDPX_INC_STATS(sk, field) \
440	__SNMP_INC_STATS(__UDPX_MIB(sk, (sk)->sk_family == AF_INET), field)
441
442#ifdef CONFIG_PROC_FS
443struct udp_seq_afinfo {
444	sa_family_t			family;
445	struct udp_table		*udp_table;
446};
447
448struct udp_iter_state {
449	struct seq_net_private  p;
450	int			bucket;
451	struct udp_seq_afinfo	*bpf_seq_afinfo;
452};
453
454void *udp_seq_start(struct seq_file *seq, loff_t *pos);
455void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos);
456void udp_seq_stop(struct seq_file *seq, void *v);
457
458extern const struct seq_operations udp_seq_ops;
459extern const struct seq_operations udp6_seq_ops;
460
461int udp4_proc_init(void);
462void udp4_proc_exit(void);
463#endif /* CONFIG_PROC_FS */
464
465int udpv4_offload_init(void);
466
467void udp_init(void);
468
469DECLARE_STATIC_KEY_FALSE(udp_encap_needed_key);
470void udp_encap_enable(void);
471void udp_encap_disable(void);
472#if IS_ENABLED(CONFIG_IPV6)
473DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
474void udpv6_encap_enable(void);
475#endif
476
477static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
478					      struct sk_buff *skb, bool ipv4)
479{
480	netdev_features_t features = NETIF_F_SG;
481	struct sk_buff *segs;
482
483	/* Avoid csum recalculation by skb_segment unless userspace explicitly
484	 * asks for the final checksum values
485	 */
486	if (!inet_get_convert_csum(sk))
487		features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
488
489	/* UDP segmentation expects packets of type CHECKSUM_PARTIAL or
490	 * CHECKSUM_NONE in __udp_gso_segment. UDP GRO indeed builds partial
491	 * packets in udp_gro_complete_segment. As does UDP GSO, verified by
492	 * udp_send_skb. But when those packets are looped in dev_loopback_xmit
493	 * their ip_summed CHECKSUM_NONE is changed to CHECKSUM_UNNECESSARY.
494	 * Reset in this specific case, where PARTIAL is both correct and
495	 * required.
496	 */
497	if (skb->pkt_type == PACKET_LOOPBACK)
498		skb->ip_summed = CHECKSUM_PARTIAL;
499
500	/* the GSO CB lays after the UDP one, no need to save and restore any
501	 * CB fragment
502	 */
503	segs = __skb_gso_segment(skb, features, false);
504	if (IS_ERR_OR_NULL(segs)) {
505		int segs_nr = skb_shinfo(skb)->gso_segs;
506
507		atomic_add(segs_nr, &sk->sk_drops);
508		SNMP_ADD_STATS(__UDPX_MIB(sk, ipv4), UDP_MIB_INERRORS, segs_nr);
509		kfree_skb(skb);
510		return NULL;
511	}
512
513	consume_skb(skb);
514	return segs;
515}
516
517#ifdef CONFIG_BPF_STREAM_PARSER
518struct sk_psock;
519struct proto *udp_bpf_get_proto(struct sock *sk, struct sk_psock *psock);
520#endif /* BPF_STREAM_PARSER */
521
522#endif	/* _UDP_H */
523