xref: /kernel/linux/linux-6.6/include/net/ipv6_frag.h (revision 62306a36)
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _IPV6_FRAG_H
3#define _IPV6_FRAG_H
4#include <linux/icmpv6.h>
5#include <linux/kernel.h>
6#include <net/addrconf.h>
7#include <net/ipv6.h>
8#include <net/inet_frag.h>
9
10enum ip6_defrag_users {
11	IP6_DEFRAG_LOCAL_DELIVER,
12	IP6_DEFRAG_CONNTRACK_IN,
13	__IP6_DEFRAG_CONNTRACK_IN	= IP6_DEFRAG_CONNTRACK_IN + USHRT_MAX,
14	IP6_DEFRAG_CONNTRACK_OUT,
15	__IP6_DEFRAG_CONNTRACK_OUT	= IP6_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
16	IP6_DEFRAG_CONNTRACK_BRIDGE_IN,
17	__IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
18};
19
20/*
21 *	Equivalent of ipv4 struct ip
22 */
23struct frag_queue {
24	struct inet_frag_queue	q;
25
26	int			iif;
27	__u16			nhoffset;
28	u8			ecn;
29};
30
31#if IS_ENABLED(CONFIG_IPV6)
32static inline void ip6frag_init(struct inet_frag_queue *q, const void *a)
33{
34	struct frag_queue *fq = container_of(q, struct frag_queue, q);
35	const struct frag_v6_compare_key *key = a;
36
37	q->key.v6 = *key;
38	fq->ecn = 0;
39}
40
41static inline u32 ip6frag_key_hashfn(const void *data, u32 len, u32 seed)
42{
43	return jhash2(data,
44		      sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
45}
46
47static inline u32 ip6frag_obj_hashfn(const void *data, u32 len, u32 seed)
48{
49	const struct inet_frag_queue *fq = data;
50
51	return jhash2((const u32 *)&fq->key.v6,
52		      sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
53}
54
55static inline int
56ip6frag_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
57{
58	const struct frag_v6_compare_key *key = arg->key;
59	const struct inet_frag_queue *fq = ptr;
60
61	return !!memcmp(&fq->key, key, sizeof(*key));
62}
63
64static inline void
65ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq)
66{
67	struct net_device *dev = NULL;
68	struct sk_buff *head;
69
70	rcu_read_lock();
71	/* Paired with the WRITE_ONCE() in fqdir_pre_exit(). */
72	if (READ_ONCE(fq->q.fqdir->dead))
73		goto out_rcu_unlock;
74	spin_lock(&fq->q.lock);
75
76	if (fq->q.flags & INET_FRAG_COMPLETE)
77		goto out;
78
79	fq->q.flags |= INET_FRAG_DROP;
80	inet_frag_kill(&fq->q);
81
82	dev = dev_get_by_index_rcu(net, fq->iif);
83	if (!dev)
84		goto out;
85
86	__IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
87	__IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
88
89	/* Don't send error if the first segment did not arrive. */
90	if (!(fq->q.flags & INET_FRAG_FIRST_IN))
91		goto out;
92
93	/* sk_buff::dev and sk_buff::rbnode are unionized. So we
94	 * pull the head out of the tree in order to be able to
95	 * deal with head->dev.
96	 */
97	head = inet_frag_pull_head(&fq->q);
98	if (!head)
99		goto out;
100
101	head->dev = dev;
102	spin_unlock(&fq->q.lock);
103
104	icmpv6_send(head, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
105	kfree_skb_reason(head, SKB_DROP_REASON_FRAG_REASM_TIMEOUT);
106	goto out_rcu_unlock;
107
108out:
109	spin_unlock(&fq->q.lock);
110out_rcu_unlock:
111	rcu_read_unlock();
112	inet_frag_put(&fq->q);
113}
114
115/* Check if the upper layer header is truncated in the first fragment. */
116static inline bool
117ipv6frag_thdr_truncated(struct sk_buff *skb, int start, u8 *nexthdrp)
118{
119	u8 nexthdr = *nexthdrp;
120	__be16 frag_off;
121	int offset;
122
123	offset = ipv6_skip_exthdr(skb, start, &nexthdr, &frag_off);
124	if (offset < 0 || (frag_off & htons(IP6_OFFSET)))
125		return false;
126	switch (nexthdr) {
127	case NEXTHDR_TCP:
128		offset += sizeof(struct tcphdr);
129		break;
130	case NEXTHDR_UDP:
131		offset += sizeof(struct udphdr);
132		break;
133	case NEXTHDR_ICMP:
134		offset += sizeof(struct icmp6hdr);
135		break;
136	default:
137		offset += 1;
138	}
139	if (offset > skb->len)
140		return true;
141	return false;
142}
143
144#endif
145#endif
146