xref: /kernel/linux/linux-5.10/drivers/net/gtp.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* GTP according to GSM TS 09.60 / 3GPP TS 29.060
3 *
4 * (C) 2012-2014 by sysmocom - s.f.m.c. GmbH
5 * (C) 2016 by Pablo Neira Ayuso <pablo@netfilter.org>
6 *
7 * Author: Harald Welte <hwelte@sysmocom.de>
8 *	   Pablo Neira Ayuso <pablo@netfilter.org>
9 *	   Andreas Schultz <aschultz@travelping.com>
10 */
11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14#include <linux/module.h>
15#include <linux/skbuff.h>
16#include <linux/udp.h>
17#include <linux/rculist.h>
18#include <linux/jhash.h>
19#include <linux/if_tunnel.h>
20#include <linux/net.h>
21#include <linux/file.h>
22#include <linux/gtp.h>
23
24#include <net/net_namespace.h>
25#include <net/protocol.h>
26#include <net/ip.h>
27#include <net/udp.h>
28#include <net/udp_tunnel.h>
29#include <net/icmp.h>
30#include <net/xfrm.h>
31#include <net/genetlink.h>
32#include <net/netns/generic.h>
33#include <net/gtp.h>
34
35/* An active session for the subscriber. */
36struct pdp_ctx {
37	struct hlist_node	hlist_tid;
38	struct hlist_node	hlist_addr;
39
40	union {
41		struct {
42			u64	tid;
43			u16	flow;
44		} v0;
45		struct {
46			u32	i_tei;
47			u32	o_tei;
48		} v1;
49	} u;
50	u8			gtp_version;
51	u16			af;
52
53	struct in_addr		ms_addr_ip4;
54	struct in_addr		peer_addr_ip4;
55
56	struct sock		*sk;
57	struct net_device       *dev;
58
59	atomic_t		tx_seq;
60	struct rcu_head		rcu_head;
61};
62
63/* One instance of the GTP device. */
64struct gtp_dev {
65	struct list_head	list;
66
67	struct sock		*sk0;
68	struct sock		*sk1u;
69
70	struct net_device	*dev;
71
72	unsigned int		role;
73	unsigned int		hash_size;
74	struct hlist_head	*tid_hash;
75	struct hlist_head	*addr_hash;
76};
77
78static unsigned int gtp_net_id __read_mostly;
79
80struct gtp_net {
81	struct list_head gtp_dev_list;
82};
83
84static u32 gtp_h_initval;
85
86static void pdp_context_delete(struct pdp_ctx *pctx);
87
88static inline u32 gtp0_hashfn(u64 tid)
89{
90	u32 *tid32 = (u32 *) &tid;
91	return jhash_2words(tid32[0], tid32[1], gtp_h_initval);
92}
93
94static inline u32 gtp1u_hashfn(u32 tid)
95{
96	return jhash_1word(tid, gtp_h_initval);
97}
98
99static inline u32 ipv4_hashfn(__be32 ip)
100{
101	return jhash_1word((__force u32)ip, gtp_h_initval);
102}
103
104/* Resolve a PDP context structure based on the 64bit TID. */
105static struct pdp_ctx *gtp0_pdp_find(struct gtp_dev *gtp, u64 tid)
106{
107	struct hlist_head *head;
108	struct pdp_ctx *pdp;
109
110	head = &gtp->tid_hash[gtp0_hashfn(tid) % gtp->hash_size];
111
112	hlist_for_each_entry_rcu(pdp, head, hlist_tid) {
113		if (pdp->gtp_version == GTP_V0 &&
114		    pdp->u.v0.tid == tid)
115			return pdp;
116	}
117	return NULL;
118}
119
120/* Resolve a PDP context structure based on the 32bit TEI. */
121static struct pdp_ctx *gtp1_pdp_find(struct gtp_dev *gtp, u32 tid)
122{
123	struct hlist_head *head;
124	struct pdp_ctx *pdp;
125
126	head = &gtp->tid_hash[gtp1u_hashfn(tid) % gtp->hash_size];
127
128	hlist_for_each_entry_rcu(pdp, head, hlist_tid) {
129		if (pdp->gtp_version == GTP_V1 &&
130		    pdp->u.v1.i_tei == tid)
131			return pdp;
132	}
133	return NULL;
134}
135
136/* Resolve a PDP context based on IPv4 address of MS. */
137static struct pdp_ctx *ipv4_pdp_find(struct gtp_dev *gtp, __be32 ms_addr)
138{
139	struct hlist_head *head;
140	struct pdp_ctx *pdp;
141
142	head = &gtp->addr_hash[ipv4_hashfn(ms_addr) % gtp->hash_size];
143
144	hlist_for_each_entry_rcu(pdp, head, hlist_addr) {
145		if (pdp->af == AF_INET &&
146		    pdp->ms_addr_ip4.s_addr == ms_addr)
147			return pdp;
148	}
149
150	return NULL;
151}
152
153static bool gtp_check_ms_ipv4(struct sk_buff *skb, struct pdp_ctx *pctx,
154				  unsigned int hdrlen, unsigned int role)
155{
156	struct iphdr *iph;
157
158	if (!pskb_may_pull(skb, hdrlen + sizeof(struct iphdr)))
159		return false;
160
161	iph = (struct iphdr *)(skb->data + hdrlen);
162
163	if (role == GTP_ROLE_SGSN)
164		return iph->daddr == pctx->ms_addr_ip4.s_addr;
165	else
166		return iph->saddr == pctx->ms_addr_ip4.s_addr;
167}
168
169/* Check if the inner IP address in this packet is assigned to any
170 * existing mobile subscriber.
171 */
172static bool gtp_check_ms(struct sk_buff *skb, struct pdp_ctx *pctx,
173			     unsigned int hdrlen, unsigned int role)
174{
175	switch (ntohs(skb->protocol)) {
176	case ETH_P_IP:
177		return gtp_check_ms_ipv4(skb, pctx, hdrlen, role);
178	}
179	return false;
180}
181
182static int gtp_rx(struct pdp_ctx *pctx, struct sk_buff *skb,
183			unsigned int hdrlen, unsigned int role)
184{
185	if (!gtp_check_ms(skb, pctx, hdrlen, role)) {
186		netdev_dbg(pctx->dev, "No PDP ctx for this MS\n");
187		return 1;
188	}
189
190	/* Get rid of the GTP + UDP headers. */
191	if (iptunnel_pull_header(skb, hdrlen, skb->protocol,
192				 !net_eq(sock_net(pctx->sk), dev_net(pctx->dev))))
193		return -1;
194
195	netdev_dbg(pctx->dev, "forwarding packet from GGSN to uplink\n");
196
197	/* Now that the UDP and the GTP header have been removed, set up the
198	 * new network header. This is required by the upper layer to
199	 * calculate the transport header.
200	 */
201	skb_reset_network_header(skb);
202
203	skb->dev = pctx->dev;
204
205	dev_sw_netstats_rx_add(pctx->dev, skb->len);
206
207	netif_rx(skb);
208	return 0;
209}
210
211/* 1 means pass up to the stack, -1 means drop and 0 means decapsulated. */
212static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
213{
214	unsigned int hdrlen = sizeof(struct udphdr) +
215			      sizeof(struct gtp0_header);
216	struct gtp0_header *gtp0;
217	struct pdp_ctx *pctx;
218
219	if (!pskb_may_pull(skb, hdrlen))
220		return -1;
221
222	gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr));
223
224	if ((gtp0->flags >> 5) != GTP_V0)
225		return 1;
226
227	if (gtp0->type != GTP_TPDU)
228		return 1;
229
230	pctx = gtp0_pdp_find(gtp, be64_to_cpu(gtp0->tid));
231	if (!pctx) {
232		netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
233		return 1;
234	}
235
236	return gtp_rx(pctx, skb, hdrlen, gtp->role);
237}
238
239static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
240{
241	unsigned int hdrlen = sizeof(struct udphdr) +
242			      sizeof(struct gtp1_header);
243	struct gtp1_header *gtp1;
244	struct pdp_ctx *pctx;
245
246	if (!pskb_may_pull(skb, hdrlen))
247		return -1;
248
249	gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr));
250
251	if ((gtp1->flags >> 5) != GTP_V1)
252		return 1;
253
254	if (gtp1->type != GTP_TPDU)
255		return 1;
256
257	/* From 29.060: "This field shall be present if and only if any one or
258	 * more of the S, PN and E flags are set.".
259	 *
260	 * If any of the bit is set, then the remaining ones also have to be
261	 * set.
262	 */
263	if (gtp1->flags & GTP1_F_MASK)
264		hdrlen += 4;
265
266	/* Make sure the header is larger enough, including extensions. */
267	if (!pskb_may_pull(skb, hdrlen))
268		return -1;
269
270	gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr));
271
272	pctx = gtp1_pdp_find(gtp, ntohl(gtp1->tid));
273	if (!pctx) {
274		netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
275		return 1;
276	}
277
278	return gtp_rx(pctx, skb, hdrlen, gtp->role);
279}
280
281static void __gtp_encap_destroy(struct sock *sk)
282{
283	struct gtp_dev *gtp;
284
285	lock_sock(sk);
286	gtp = sk->sk_user_data;
287	if (gtp) {
288		if (gtp->sk0 == sk)
289			gtp->sk0 = NULL;
290		else
291			gtp->sk1u = NULL;
292		udp_sk(sk)->encap_type = 0;
293		rcu_assign_sk_user_data(sk, NULL);
294		release_sock(sk);
295		sock_put(sk);
296		return;
297	}
298	release_sock(sk);
299}
300
301static void gtp_encap_destroy(struct sock *sk)
302{
303	rtnl_lock();
304	__gtp_encap_destroy(sk);
305	rtnl_unlock();
306}
307
308static void gtp_encap_disable_sock(struct sock *sk)
309{
310	if (!sk)
311		return;
312
313	__gtp_encap_destroy(sk);
314}
315
316static void gtp_encap_disable(struct gtp_dev *gtp)
317{
318	gtp_encap_disable_sock(gtp->sk0);
319	gtp_encap_disable_sock(gtp->sk1u);
320}
321
322/* UDP encapsulation receive handler. See net/ipv4/udp.c.
323 * Return codes: 0: success, <0: error, >0: pass up to userspace UDP socket.
324 */
325static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
326{
327	struct gtp_dev *gtp;
328	int ret = 0;
329
330	gtp = rcu_dereference_sk_user_data(sk);
331	if (!gtp)
332		return 1;
333
334	netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk);
335
336	switch (udp_sk(sk)->encap_type) {
337	case UDP_ENCAP_GTP0:
338		netdev_dbg(gtp->dev, "received GTP0 packet\n");
339		ret = gtp0_udp_encap_recv(gtp, skb);
340		break;
341	case UDP_ENCAP_GTP1U:
342		netdev_dbg(gtp->dev, "received GTP1U packet\n");
343		ret = gtp1u_udp_encap_recv(gtp, skb);
344		break;
345	default:
346		ret = -1; /* Shouldn't happen. */
347	}
348
349	switch (ret) {
350	case 1:
351		netdev_dbg(gtp->dev, "pass up to the process\n");
352		break;
353	case 0:
354		break;
355	case -1:
356		netdev_dbg(gtp->dev, "GTP packet has been dropped\n");
357		kfree_skb(skb);
358		ret = 0;
359		break;
360	}
361
362	return ret;
363}
364
365static int gtp_dev_init(struct net_device *dev)
366{
367	struct gtp_dev *gtp = netdev_priv(dev);
368
369	gtp->dev = dev;
370
371	dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
372	if (!dev->tstats)
373		return -ENOMEM;
374
375	return 0;
376}
377
378static void gtp_dev_uninit(struct net_device *dev)
379{
380	struct gtp_dev *gtp = netdev_priv(dev);
381
382	gtp_encap_disable(gtp);
383	free_percpu(dev->tstats);
384}
385
386static struct rtable *ip4_route_output_gtp(struct flowi4 *fl4,
387					   const struct sock *sk,
388					   __be32 daddr)
389{
390	memset(fl4, 0, sizeof(*fl4));
391	fl4->flowi4_oif		= sk->sk_bound_dev_if;
392	fl4->daddr		= daddr;
393	fl4->saddr		= inet_sk(sk)->inet_saddr;
394	fl4->flowi4_tos		= RT_CONN_FLAGS(sk);
395	fl4->flowi4_proto	= sk->sk_protocol;
396
397	return ip_route_output_key(sock_net(sk), fl4);
398}
399
400static inline void gtp0_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
401{
402	int payload_len = skb->len;
403	struct gtp0_header *gtp0;
404
405	gtp0 = skb_push(skb, sizeof(*gtp0));
406
407	gtp0->flags	= 0x1e; /* v0, GTP-non-prime. */
408	gtp0->type	= GTP_TPDU;
409	gtp0->length	= htons(payload_len);
410	gtp0->seq	= htons((atomic_inc_return(&pctx->tx_seq) - 1) % 0xffff);
411	gtp0->flow	= htons(pctx->u.v0.flow);
412	gtp0->number	= 0xff;
413	gtp0->spare[0]	= gtp0->spare[1] = gtp0->spare[2] = 0xff;
414	gtp0->tid	= cpu_to_be64(pctx->u.v0.tid);
415}
416
417static inline void gtp1_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
418{
419	int payload_len = skb->len;
420	struct gtp1_header *gtp1;
421
422	gtp1 = skb_push(skb, sizeof(*gtp1));
423
424	/* Bits    8  7  6  5  4  3  2	1
425	 *	  +--+--+--+--+--+--+--+--+
426	 *	  |version |PT| 0| E| S|PN|
427	 *	  +--+--+--+--+--+--+--+--+
428	 *	    0  0  1  1	1  0  0  0
429	 */
430	gtp1->flags	= 0x30; /* v1, GTP-non-prime. */
431	gtp1->type	= GTP_TPDU;
432	gtp1->length	= htons(payload_len);
433	gtp1->tid	= htonl(pctx->u.v1.o_tei);
434
435	/* TODO: Suppport for extension header, sequence number and N-PDU.
436	 *	 Update the length field if any of them is available.
437	 */
438}
439
440struct gtp_pktinfo {
441	struct sock		*sk;
442	struct iphdr		*iph;
443	struct flowi4		fl4;
444	struct rtable		*rt;
445	struct pdp_ctx		*pctx;
446	struct net_device	*dev;
447	__be16			gtph_port;
448};
449
450static void gtp_push_header(struct sk_buff *skb, struct gtp_pktinfo *pktinfo)
451{
452	switch (pktinfo->pctx->gtp_version) {
453	case GTP_V0:
454		pktinfo->gtph_port = htons(GTP0_PORT);
455		gtp0_push_header(skb, pktinfo->pctx);
456		break;
457	case GTP_V1:
458		pktinfo->gtph_port = htons(GTP1U_PORT);
459		gtp1_push_header(skb, pktinfo->pctx);
460		break;
461	}
462}
463
464static inline void gtp_set_pktinfo_ipv4(struct gtp_pktinfo *pktinfo,
465					struct sock *sk, struct iphdr *iph,
466					struct pdp_ctx *pctx, struct rtable *rt,
467					struct flowi4 *fl4,
468					struct net_device *dev)
469{
470	pktinfo->sk	= sk;
471	pktinfo->iph	= iph;
472	pktinfo->pctx	= pctx;
473	pktinfo->rt	= rt;
474	pktinfo->fl4	= *fl4;
475	pktinfo->dev	= dev;
476}
477
478static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
479			     struct gtp_pktinfo *pktinfo)
480{
481	struct gtp_dev *gtp = netdev_priv(dev);
482	struct pdp_ctx *pctx;
483	struct rtable *rt;
484	struct flowi4 fl4;
485	struct iphdr *iph;
486	__be16 df;
487	int mtu;
488
489	/* Read the IP destination address and resolve the PDP context.
490	 * Prepend PDP header with TEI/TID from PDP ctx.
491	 */
492	iph = ip_hdr(skb);
493	if (gtp->role == GTP_ROLE_SGSN)
494		pctx = ipv4_pdp_find(gtp, iph->saddr);
495	else
496		pctx = ipv4_pdp_find(gtp, iph->daddr);
497
498	if (!pctx) {
499		netdev_dbg(dev, "no PDP ctx found for %pI4, skip\n",
500			   &iph->daddr);
501		return -ENOENT;
502	}
503	netdev_dbg(dev, "found PDP context %p\n", pctx);
504
505	rt = ip4_route_output_gtp(&fl4, pctx->sk, pctx->peer_addr_ip4.s_addr);
506	if (IS_ERR(rt)) {
507		netdev_dbg(dev, "no route to SSGN %pI4\n",
508			   &pctx->peer_addr_ip4.s_addr);
509		dev->stats.tx_carrier_errors++;
510		goto err;
511	}
512
513	if (rt->dst.dev == dev) {
514		netdev_dbg(dev, "circular route to SSGN %pI4\n",
515			   &pctx->peer_addr_ip4.s_addr);
516		dev->stats.collisions++;
517		goto err_rt;
518	}
519
520	skb_dst_drop(skb);
521
522	/* This is similar to tnl_update_pmtu(). */
523	df = iph->frag_off;
524	if (df) {
525		mtu = dst_mtu(&rt->dst) - dev->hard_header_len -
526			sizeof(struct iphdr) - sizeof(struct udphdr);
527		switch (pctx->gtp_version) {
528		case GTP_V0:
529			mtu -= sizeof(struct gtp0_header);
530			break;
531		case GTP_V1:
532			mtu -= sizeof(struct gtp1_header);
533			break;
534		}
535	} else {
536		mtu = dst_mtu(&rt->dst);
537	}
538
539	rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu, false);
540
541	if (iph->frag_off & htons(IP_DF) &&
542	    ((!skb_is_gso(skb) && skb->len > mtu) ||
543	     (skb_is_gso(skb) && !skb_gso_validate_network_len(skb, mtu)))) {
544		netdev_dbg(dev, "packet too big, fragmentation needed\n");
545		icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
546			      htonl(mtu));
547		goto err_rt;
548	}
549
550	gtp_set_pktinfo_ipv4(pktinfo, pctx->sk, iph, pctx, rt, &fl4, dev);
551	gtp_push_header(skb, pktinfo);
552
553	return 0;
554err_rt:
555	ip_rt_put(rt);
556err:
557	return -EBADMSG;
558}
559
560static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
561{
562	unsigned int proto = ntohs(skb->protocol);
563	struct gtp_pktinfo pktinfo;
564	int err;
565
566	/* Ensure there is sufficient headroom. */
567	if (skb_cow_head(skb, dev->needed_headroom))
568		goto tx_err;
569
570	if (!pskb_inet_may_pull(skb))
571		goto tx_err;
572
573	skb_reset_inner_headers(skb);
574
575	/* PDP context lookups in gtp_build_skb_*() need rcu read-side lock. */
576	rcu_read_lock();
577	switch (proto) {
578	case ETH_P_IP:
579		err = gtp_build_skb_ip4(skb, dev, &pktinfo);
580		break;
581	default:
582		err = -EOPNOTSUPP;
583		break;
584	}
585	rcu_read_unlock();
586
587	if (err < 0)
588		goto tx_err;
589
590	switch (proto) {
591	case ETH_P_IP:
592		netdev_dbg(pktinfo.dev, "gtp -> IP src: %pI4 dst: %pI4\n",
593			   &pktinfo.iph->saddr, &pktinfo.iph->daddr);
594		udp_tunnel_xmit_skb(pktinfo.rt, pktinfo.sk, skb,
595				    pktinfo.fl4.saddr, pktinfo.fl4.daddr,
596				    pktinfo.iph->tos,
597				    ip4_dst_hoplimit(&pktinfo.rt->dst),
598				    0,
599				    pktinfo.gtph_port, pktinfo.gtph_port,
600				    true, false);
601		break;
602	}
603
604	return NETDEV_TX_OK;
605tx_err:
606	dev->stats.tx_errors++;
607	dev_kfree_skb(skb);
608	return NETDEV_TX_OK;
609}
610
611static const struct net_device_ops gtp_netdev_ops = {
612	.ndo_init		= gtp_dev_init,
613	.ndo_uninit		= gtp_dev_uninit,
614	.ndo_start_xmit		= gtp_dev_xmit,
615	.ndo_get_stats64	= ip_tunnel_get_stats64,
616};
617
618static void gtp_link_setup(struct net_device *dev)
619{
620	dev->netdev_ops		= &gtp_netdev_ops;
621	dev->needs_free_netdev	= true;
622
623	dev->hard_header_len = 0;
624	dev->addr_len = 0;
625
626	/* Zero header length. */
627	dev->type = ARPHRD_NONE;
628	dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
629
630	dev->priv_flags	|= IFF_NO_QUEUE;
631	dev->features	|= NETIF_F_LLTX;
632	netif_keep_dst(dev);
633
634	/* Assume largest header, ie. GTPv0. */
635	dev->needed_headroom	= LL_MAX_HEADER +
636				  sizeof(struct iphdr) +
637				  sizeof(struct udphdr) +
638				  sizeof(struct gtp0_header);
639}
640
641static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
642static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]);
643
644static void gtp_destructor(struct net_device *dev)
645{
646	struct gtp_dev *gtp = netdev_priv(dev);
647
648	kfree(gtp->addr_hash);
649	kfree(gtp->tid_hash);
650}
651
652static int gtp_newlink(struct net *src_net, struct net_device *dev,
653		       struct nlattr *tb[], struct nlattr *data[],
654		       struct netlink_ext_ack *extack)
655{
656	struct gtp_dev *gtp;
657	struct gtp_net *gn;
658	int hashsize, err;
659
660	if (!data[IFLA_GTP_FD0] && !data[IFLA_GTP_FD1])
661		return -EINVAL;
662
663	gtp = netdev_priv(dev);
664
665	if (!data[IFLA_GTP_PDP_HASHSIZE]) {
666		hashsize = 1024;
667	} else {
668		hashsize = nla_get_u32(data[IFLA_GTP_PDP_HASHSIZE]);
669		if (!hashsize)
670			hashsize = 1024;
671	}
672
673	err = gtp_hashtable_new(gtp, hashsize);
674	if (err < 0)
675		return err;
676
677	err = gtp_encap_enable(gtp, data);
678	if (err < 0)
679		goto out_hashtable;
680
681	err = register_netdevice(dev);
682	if (err < 0) {
683		netdev_dbg(dev, "failed to register new netdev %d\n", err);
684		goto out_encap;
685	}
686
687	gn = net_generic(dev_net(dev), gtp_net_id);
688	list_add_rcu(&gtp->list, &gn->gtp_dev_list);
689	dev->priv_destructor = gtp_destructor;
690
691	netdev_dbg(dev, "registered new GTP interface\n");
692
693	return 0;
694
695out_encap:
696	gtp_encap_disable(gtp);
697out_hashtable:
698	kfree(gtp->addr_hash);
699	kfree(gtp->tid_hash);
700	return err;
701}
702
703static void gtp_dellink(struct net_device *dev, struct list_head *head)
704{
705	struct gtp_dev *gtp = netdev_priv(dev);
706	struct pdp_ctx *pctx;
707	int i;
708
709	for (i = 0; i < gtp->hash_size; i++)
710		hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid)
711			pdp_context_delete(pctx);
712
713	list_del_rcu(&gtp->list);
714	unregister_netdevice_queue(dev, head);
715}
716
717static const struct nla_policy gtp_policy[IFLA_GTP_MAX + 1] = {
718	[IFLA_GTP_FD0]			= { .type = NLA_U32 },
719	[IFLA_GTP_FD1]			= { .type = NLA_U32 },
720	[IFLA_GTP_PDP_HASHSIZE]		= { .type = NLA_U32 },
721	[IFLA_GTP_ROLE]			= { .type = NLA_U32 },
722};
723
724static int gtp_validate(struct nlattr *tb[], struct nlattr *data[],
725			struct netlink_ext_ack *extack)
726{
727	if (!data)
728		return -EINVAL;
729
730	return 0;
731}
732
733static size_t gtp_get_size(const struct net_device *dev)
734{
735	return nla_total_size(sizeof(__u32));	/* IFLA_GTP_PDP_HASHSIZE */
736}
737
738static int gtp_fill_info(struct sk_buff *skb, const struct net_device *dev)
739{
740	struct gtp_dev *gtp = netdev_priv(dev);
741
742	if (nla_put_u32(skb, IFLA_GTP_PDP_HASHSIZE, gtp->hash_size))
743		goto nla_put_failure;
744
745	return 0;
746
747nla_put_failure:
748	return -EMSGSIZE;
749}
750
751static struct rtnl_link_ops gtp_link_ops __read_mostly = {
752	.kind		= "gtp",
753	.maxtype	= IFLA_GTP_MAX,
754	.policy		= gtp_policy,
755	.priv_size	= sizeof(struct gtp_dev),
756	.setup		= gtp_link_setup,
757	.validate	= gtp_validate,
758	.newlink	= gtp_newlink,
759	.dellink	= gtp_dellink,
760	.get_size	= gtp_get_size,
761	.fill_info	= gtp_fill_info,
762};
763
764static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize)
765{
766	int i;
767
768	gtp->addr_hash = kmalloc_array(hsize, sizeof(struct hlist_head),
769				       GFP_KERNEL | __GFP_NOWARN);
770	if (gtp->addr_hash == NULL)
771		return -ENOMEM;
772
773	gtp->tid_hash = kmalloc_array(hsize, sizeof(struct hlist_head),
774				      GFP_KERNEL | __GFP_NOWARN);
775	if (gtp->tid_hash == NULL)
776		goto err1;
777
778	gtp->hash_size = hsize;
779
780	for (i = 0; i < hsize; i++) {
781		INIT_HLIST_HEAD(&gtp->addr_hash[i]);
782		INIT_HLIST_HEAD(&gtp->tid_hash[i]);
783	}
784	return 0;
785err1:
786	kfree(gtp->addr_hash);
787	return -ENOMEM;
788}
789
790static struct sock *gtp_encap_enable_socket(int fd, int type,
791					    struct gtp_dev *gtp)
792{
793	struct udp_tunnel_sock_cfg tuncfg = {NULL};
794	struct socket *sock;
795	struct sock *sk;
796	int err;
797
798	pr_debug("enable gtp on %d, %d\n", fd, type);
799
800	sock = sockfd_lookup(fd, &err);
801	if (!sock) {
802		pr_debug("gtp socket fd=%d not found\n", fd);
803		return ERR_PTR(err);
804	}
805
806	sk = sock->sk;
807	if (sk->sk_protocol != IPPROTO_UDP ||
808	    sk->sk_type != SOCK_DGRAM ||
809	    (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)) {
810		pr_debug("socket fd=%d not UDP\n", fd);
811		sk = ERR_PTR(-EINVAL);
812		goto out_sock;
813	}
814
815	lock_sock(sk);
816	if (sk->sk_user_data) {
817		sk = ERR_PTR(-EBUSY);
818		goto out_rel_sock;
819	}
820
821	sock_hold(sk);
822
823	tuncfg.sk_user_data = gtp;
824	tuncfg.encap_type = type;
825	tuncfg.encap_rcv = gtp_encap_recv;
826	tuncfg.encap_destroy = gtp_encap_destroy;
827
828	setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg);
829
830out_rel_sock:
831	release_sock(sock->sk);
832out_sock:
833	sockfd_put(sock);
834	return sk;
835}
836
837static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[])
838{
839	struct sock *sk1u = NULL;
840	struct sock *sk0 = NULL;
841	unsigned int role = GTP_ROLE_GGSN;
842
843	if (data[IFLA_GTP_FD0]) {
844		u32 fd0 = nla_get_u32(data[IFLA_GTP_FD0]);
845
846		sk0 = gtp_encap_enable_socket(fd0, UDP_ENCAP_GTP0, gtp);
847		if (IS_ERR(sk0))
848			return PTR_ERR(sk0);
849	}
850
851	if (data[IFLA_GTP_FD1]) {
852		u32 fd1 = nla_get_u32(data[IFLA_GTP_FD1]);
853
854		sk1u = gtp_encap_enable_socket(fd1, UDP_ENCAP_GTP1U, gtp);
855		if (IS_ERR(sk1u)) {
856			gtp_encap_disable_sock(sk0);
857			return PTR_ERR(sk1u);
858		}
859	}
860
861	if (data[IFLA_GTP_ROLE]) {
862		role = nla_get_u32(data[IFLA_GTP_ROLE]);
863		if (role > GTP_ROLE_SGSN) {
864			gtp_encap_disable_sock(sk0);
865			gtp_encap_disable_sock(sk1u);
866			return -EINVAL;
867		}
868	}
869
870	gtp->sk0 = sk0;
871	gtp->sk1u = sk1u;
872	gtp->role = role;
873
874	return 0;
875}
876
877static struct gtp_dev *gtp_find_dev(struct net *src_net, struct nlattr *nla[])
878{
879	struct gtp_dev *gtp = NULL;
880	struct net_device *dev;
881	struct net *net;
882
883	/* Examine the link attributes and figure out which network namespace
884	 * we are talking about.
885	 */
886	if (nla[GTPA_NET_NS_FD])
887		net = get_net_ns_by_fd(nla_get_u32(nla[GTPA_NET_NS_FD]));
888	else
889		net = get_net(src_net);
890
891	if (IS_ERR(net))
892		return NULL;
893
894	/* Check if there's an existing gtpX device to configure */
895	dev = dev_get_by_index_rcu(net, nla_get_u32(nla[GTPA_LINK]));
896	if (dev && dev->netdev_ops == &gtp_netdev_ops)
897		gtp = netdev_priv(dev);
898
899	put_net(net);
900	return gtp;
901}
902
903static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
904{
905	pctx->gtp_version = nla_get_u32(info->attrs[GTPA_VERSION]);
906	pctx->af = AF_INET;
907	pctx->peer_addr_ip4.s_addr =
908		nla_get_be32(info->attrs[GTPA_PEER_ADDRESS]);
909	pctx->ms_addr_ip4.s_addr =
910		nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
911
912	switch (pctx->gtp_version) {
913	case GTP_V0:
914		/* According to TS 09.60, sections 7.5.1 and 7.5.2, the flow
915		 * label needs to be the same for uplink and downlink packets,
916		 * so let's annotate this.
917		 */
918		pctx->u.v0.tid = nla_get_u64(info->attrs[GTPA_TID]);
919		pctx->u.v0.flow = nla_get_u16(info->attrs[GTPA_FLOW]);
920		break;
921	case GTP_V1:
922		pctx->u.v1.i_tei = nla_get_u32(info->attrs[GTPA_I_TEI]);
923		pctx->u.v1.o_tei = nla_get_u32(info->attrs[GTPA_O_TEI]);
924		break;
925	default:
926		break;
927	}
928}
929
930static struct pdp_ctx *gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk,
931				   struct genl_info *info)
932{
933	struct pdp_ctx *pctx, *pctx_tid = NULL;
934	struct net_device *dev = gtp->dev;
935	u32 hash_ms, hash_tid = 0;
936	unsigned int version;
937	bool found = false;
938	__be32 ms_addr;
939
940	ms_addr = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
941	hash_ms = ipv4_hashfn(ms_addr) % gtp->hash_size;
942	version = nla_get_u32(info->attrs[GTPA_VERSION]);
943
944	pctx = ipv4_pdp_find(gtp, ms_addr);
945	if (pctx)
946		found = true;
947	if (version == GTP_V0)
948		pctx_tid = gtp0_pdp_find(gtp,
949					 nla_get_u64(info->attrs[GTPA_TID]));
950	else if (version == GTP_V1)
951		pctx_tid = gtp1_pdp_find(gtp,
952					 nla_get_u32(info->attrs[GTPA_I_TEI]));
953	if (pctx_tid)
954		found = true;
955
956	if (found) {
957		if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
958			return ERR_PTR(-EEXIST);
959		if (info->nlhdr->nlmsg_flags & NLM_F_REPLACE)
960			return ERR_PTR(-EOPNOTSUPP);
961
962		if (pctx && pctx_tid)
963			return ERR_PTR(-EEXIST);
964		if (!pctx)
965			pctx = pctx_tid;
966
967		ipv4_pdp_fill(pctx, info);
968
969		if (pctx->gtp_version == GTP_V0)
970			netdev_dbg(dev, "GTPv0-U: update tunnel id = %llx (pdp %p)\n",
971				   pctx->u.v0.tid, pctx);
972		else if (pctx->gtp_version == GTP_V1)
973			netdev_dbg(dev, "GTPv1-U: update tunnel id = %x/%x (pdp %p)\n",
974				   pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx);
975
976		return pctx;
977
978	}
979
980	pctx = kmalloc(sizeof(*pctx), GFP_ATOMIC);
981	if (pctx == NULL)
982		return ERR_PTR(-ENOMEM);
983
984	sock_hold(sk);
985	pctx->sk = sk;
986	pctx->dev = gtp->dev;
987	ipv4_pdp_fill(pctx, info);
988	atomic_set(&pctx->tx_seq, 0);
989
990	switch (pctx->gtp_version) {
991	case GTP_V0:
992		/* TS 09.60: "The flow label identifies unambiguously a GTP
993		 * flow.". We use the tid for this instead, I cannot find a
994		 * situation in which this doesn't unambiguosly identify the
995		 * PDP context.
996		 */
997		hash_tid = gtp0_hashfn(pctx->u.v0.tid) % gtp->hash_size;
998		break;
999	case GTP_V1:
1000		hash_tid = gtp1u_hashfn(pctx->u.v1.i_tei) % gtp->hash_size;
1001		break;
1002	}
1003
1004	hlist_add_head_rcu(&pctx->hlist_addr, &gtp->addr_hash[hash_ms]);
1005	hlist_add_head_rcu(&pctx->hlist_tid, &gtp->tid_hash[hash_tid]);
1006
1007	switch (pctx->gtp_version) {
1008	case GTP_V0:
1009		netdev_dbg(dev, "GTPv0-U: new PDP ctx id=%llx ssgn=%pI4 ms=%pI4 (pdp=%p)\n",
1010			   pctx->u.v0.tid, &pctx->peer_addr_ip4,
1011			   &pctx->ms_addr_ip4, pctx);
1012		break;
1013	case GTP_V1:
1014		netdev_dbg(dev, "GTPv1-U: new PDP ctx id=%x/%x ssgn=%pI4 ms=%pI4 (pdp=%p)\n",
1015			   pctx->u.v1.i_tei, pctx->u.v1.o_tei,
1016			   &pctx->peer_addr_ip4, &pctx->ms_addr_ip4, pctx);
1017		break;
1018	}
1019
1020	return pctx;
1021}
1022
1023static void pdp_context_free(struct rcu_head *head)
1024{
1025	struct pdp_ctx *pctx = container_of(head, struct pdp_ctx, rcu_head);
1026
1027	sock_put(pctx->sk);
1028	kfree(pctx);
1029}
1030
1031static void pdp_context_delete(struct pdp_ctx *pctx)
1032{
1033	hlist_del_rcu(&pctx->hlist_tid);
1034	hlist_del_rcu(&pctx->hlist_addr);
1035	call_rcu(&pctx->rcu_head, pdp_context_free);
1036}
1037
1038static int gtp_tunnel_notify(struct pdp_ctx *pctx, u8 cmd, gfp_t allocation);
1039
1040static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
1041{
1042	unsigned int version;
1043	struct pdp_ctx *pctx;
1044	struct gtp_dev *gtp;
1045	struct sock *sk;
1046	int err;
1047
1048	if (!info->attrs[GTPA_VERSION] ||
1049	    !info->attrs[GTPA_LINK] ||
1050	    !info->attrs[GTPA_PEER_ADDRESS] ||
1051	    !info->attrs[GTPA_MS_ADDRESS])
1052		return -EINVAL;
1053
1054	version = nla_get_u32(info->attrs[GTPA_VERSION]);
1055
1056	switch (version) {
1057	case GTP_V0:
1058		if (!info->attrs[GTPA_TID] ||
1059		    !info->attrs[GTPA_FLOW])
1060			return -EINVAL;
1061		break;
1062	case GTP_V1:
1063		if (!info->attrs[GTPA_I_TEI] ||
1064		    !info->attrs[GTPA_O_TEI])
1065			return -EINVAL;
1066		break;
1067
1068	default:
1069		return -EINVAL;
1070	}
1071
1072	rtnl_lock();
1073
1074	gtp = gtp_find_dev(sock_net(skb->sk), info->attrs);
1075	if (!gtp) {
1076		err = -ENODEV;
1077		goto out_unlock;
1078	}
1079
1080	if (version == GTP_V0)
1081		sk = gtp->sk0;
1082	else if (version == GTP_V1)
1083		sk = gtp->sk1u;
1084	else
1085		sk = NULL;
1086
1087	if (!sk) {
1088		err = -ENODEV;
1089		goto out_unlock;
1090	}
1091
1092	pctx = gtp_pdp_add(gtp, sk, info);
1093	if (IS_ERR(pctx)) {
1094		err = PTR_ERR(pctx);
1095	} else {
1096		gtp_tunnel_notify(pctx, GTP_CMD_NEWPDP, GFP_KERNEL);
1097		err = 0;
1098	}
1099
1100out_unlock:
1101	rtnl_unlock();
1102	return err;
1103}
1104
1105static struct pdp_ctx *gtp_find_pdp_by_link(struct net *net,
1106					    struct nlattr *nla[])
1107{
1108	struct gtp_dev *gtp;
1109
1110	gtp = gtp_find_dev(net, nla);
1111	if (!gtp)
1112		return ERR_PTR(-ENODEV);
1113
1114	if (nla[GTPA_MS_ADDRESS]) {
1115		__be32 ip = nla_get_be32(nla[GTPA_MS_ADDRESS]);
1116
1117		return ipv4_pdp_find(gtp, ip);
1118	} else if (nla[GTPA_VERSION]) {
1119		u32 gtp_version = nla_get_u32(nla[GTPA_VERSION]);
1120
1121		if (gtp_version == GTP_V0 && nla[GTPA_TID])
1122			return gtp0_pdp_find(gtp, nla_get_u64(nla[GTPA_TID]));
1123		else if (gtp_version == GTP_V1 && nla[GTPA_I_TEI])
1124			return gtp1_pdp_find(gtp, nla_get_u32(nla[GTPA_I_TEI]));
1125	}
1126
1127	return ERR_PTR(-EINVAL);
1128}
1129
1130static struct pdp_ctx *gtp_find_pdp(struct net *net, struct nlattr *nla[])
1131{
1132	struct pdp_ctx *pctx;
1133
1134	if (nla[GTPA_LINK])
1135		pctx = gtp_find_pdp_by_link(net, nla);
1136	else
1137		pctx = ERR_PTR(-EINVAL);
1138
1139	if (!pctx)
1140		pctx = ERR_PTR(-ENOENT);
1141
1142	return pctx;
1143}
1144
1145static int gtp_genl_del_pdp(struct sk_buff *skb, struct genl_info *info)
1146{
1147	struct pdp_ctx *pctx;
1148	int err = 0;
1149
1150	if (!info->attrs[GTPA_VERSION])
1151		return -EINVAL;
1152
1153	rcu_read_lock();
1154
1155	pctx = gtp_find_pdp(sock_net(skb->sk), info->attrs);
1156	if (IS_ERR(pctx)) {
1157		err = PTR_ERR(pctx);
1158		goto out_unlock;
1159	}
1160
1161	if (pctx->gtp_version == GTP_V0)
1162		netdev_dbg(pctx->dev, "GTPv0-U: deleting tunnel id = %llx (pdp %p)\n",
1163			   pctx->u.v0.tid, pctx);
1164	else if (pctx->gtp_version == GTP_V1)
1165		netdev_dbg(pctx->dev, "GTPv1-U: deleting tunnel id = %x/%x (pdp %p)\n",
1166			   pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx);
1167
1168	gtp_tunnel_notify(pctx, GTP_CMD_DELPDP, GFP_ATOMIC);
1169	pdp_context_delete(pctx);
1170
1171out_unlock:
1172	rcu_read_unlock();
1173	return err;
1174}
1175
1176static struct genl_family gtp_genl_family;
1177
1178enum gtp_multicast_groups {
1179	GTP_GENL_MCGRP,
1180};
1181
1182static const struct genl_multicast_group gtp_genl_mcgrps[] = {
1183	[GTP_GENL_MCGRP] = { .name = GTP_GENL_MCGRP_NAME },
1184};
1185
1186static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
1187			      int flags, u32 type, struct pdp_ctx *pctx)
1188{
1189	void *genlh;
1190
1191	genlh = genlmsg_put(skb, snd_portid, snd_seq, &gtp_genl_family, flags,
1192			    type);
1193	if (genlh == NULL)
1194		goto nlmsg_failure;
1195
1196	if (nla_put_u32(skb, GTPA_VERSION, pctx->gtp_version) ||
1197	    nla_put_u32(skb, GTPA_LINK, pctx->dev->ifindex) ||
1198	    nla_put_be32(skb, GTPA_PEER_ADDRESS, pctx->peer_addr_ip4.s_addr) ||
1199	    nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms_addr_ip4.s_addr))
1200		goto nla_put_failure;
1201
1202	switch (pctx->gtp_version) {
1203	case GTP_V0:
1204		if (nla_put_u64_64bit(skb, GTPA_TID, pctx->u.v0.tid, GTPA_PAD) ||
1205		    nla_put_u16(skb, GTPA_FLOW, pctx->u.v0.flow))
1206			goto nla_put_failure;
1207		break;
1208	case GTP_V1:
1209		if (nla_put_u32(skb, GTPA_I_TEI, pctx->u.v1.i_tei) ||
1210		    nla_put_u32(skb, GTPA_O_TEI, pctx->u.v1.o_tei))
1211			goto nla_put_failure;
1212		break;
1213	}
1214	genlmsg_end(skb, genlh);
1215	return 0;
1216
1217nlmsg_failure:
1218nla_put_failure:
1219	genlmsg_cancel(skb, genlh);
1220	return -EMSGSIZE;
1221}
1222
1223static int gtp_tunnel_notify(struct pdp_ctx *pctx, u8 cmd, gfp_t allocation)
1224{
1225	struct sk_buff *msg;
1226	int ret;
1227
1228	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, allocation);
1229	if (!msg)
1230		return -ENOMEM;
1231
1232	ret = gtp_genl_fill_info(msg, 0, 0, 0, cmd, pctx);
1233	if (ret < 0) {
1234		nlmsg_free(msg);
1235		return ret;
1236	}
1237
1238	ret = genlmsg_multicast_netns(&gtp_genl_family, dev_net(pctx->dev), msg,
1239				      0, GTP_GENL_MCGRP, GFP_ATOMIC);
1240	return ret;
1241}
1242
1243static int gtp_genl_get_pdp(struct sk_buff *skb, struct genl_info *info)
1244{
1245	struct pdp_ctx *pctx = NULL;
1246	struct sk_buff *skb2;
1247	int err;
1248
1249	if (!info->attrs[GTPA_VERSION])
1250		return -EINVAL;
1251
1252	rcu_read_lock();
1253
1254	pctx = gtp_find_pdp(sock_net(skb->sk), info->attrs);
1255	if (IS_ERR(pctx)) {
1256		err = PTR_ERR(pctx);
1257		goto err_unlock;
1258	}
1259
1260	skb2 = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
1261	if (skb2 == NULL) {
1262		err = -ENOMEM;
1263		goto err_unlock;
1264	}
1265
1266	err = gtp_genl_fill_info(skb2, NETLINK_CB(skb).portid, info->snd_seq,
1267				 0, info->nlhdr->nlmsg_type, pctx);
1268	if (err < 0)
1269		goto err_unlock_free;
1270
1271	rcu_read_unlock();
1272	return genlmsg_unicast(genl_info_net(info), skb2, info->snd_portid);
1273
1274err_unlock_free:
1275	kfree_skb(skb2);
1276err_unlock:
1277	rcu_read_unlock();
1278	return err;
1279}
1280
1281static int gtp_genl_dump_pdp(struct sk_buff *skb,
1282				struct netlink_callback *cb)
1283{
1284	struct gtp_dev *last_gtp = (struct gtp_dev *)cb->args[2], *gtp;
1285	int i, j, bucket = cb->args[0], skip = cb->args[1];
1286	struct net *net = sock_net(skb->sk);
1287	struct pdp_ctx *pctx;
1288	struct gtp_net *gn;
1289
1290	gn = net_generic(net, gtp_net_id);
1291
1292	if (cb->args[4])
1293		return 0;
1294
1295	rcu_read_lock();
1296	list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) {
1297		if (last_gtp && last_gtp != gtp)
1298			continue;
1299		else
1300			last_gtp = NULL;
1301
1302		for (i = bucket; i < gtp->hash_size; i++) {
1303			j = 0;
1304			hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i],
1305						 hlist_tid) {
1306				if (j >= skip &&
1307				    gtp_genl_fill_info(skb,
1308					    NETLINK_CB(cb->skb).portid,
1309					    cb->nlh->nlmsg_seq,
1310					    NLM_F_MULTI,
1311					    cb->nlh->nlmsg_type, pctx)) {
1312					cb->args[0] = i;
1313					cb->args[1] = j;
1314					cb->args[2] = (unsigned long)gtp;
1315					goto out;
1316				}
1317				j++;
1318			}
1319			skip = 0;
1320		}
1321		bucket = 0;
1322	}
1323	cb->args[4] = 1;
1324out:
1325	rcu_read_unlock();
1326	return skb->len;
1327}
1328
1329static const struct nla_policy gtp_genl_policy[GTPA_MAX + 1] = {
1330	[GTPA_LINK]		= { .type = NLA_U32, },
1331	[GTPA_VERSION]		= { .type = NLA_U32, },
1332	[GTPA_TID]		= { .type = NLA_U64, },
1333	[GTPA_PEER_ADDRESS]	= { .type = NLA_U32, },
1334	[GTPA_MS_ADDRESS]	= { .type = NLA_U32, },
1335	[GTPA_FLOW]		= { .type = NLA_U16, },
1336	[GTPA_NET_NS_FD]	= { .type = NLA_U32, },
1337	[GTPA_I_TEI]		= { .type = NLA_U32, },
1338	[GTPA_O_TEI]		= { .type = NLA_U32, },
1339};
1340
1341static const struct genl_small_ops gtp_genl_ops[] = {
1342	{
1343		.cmd = GTP_CMD_NEWPDP,
1344		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1345		.doit = gtp_genl_new_pdp,
1346		.flags = GENL_ADMIN_PERM,
1347	},
1348	{
1349		.cmd = GTP_CMD_DELPDP,
1350		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1351		.doit = gtp_genl_del_pdp,
1352		.flags = GENL_ADMIN_PERM,
1353	},
1354	{
1355		.cmd = GTP_CMD_GETPDP,
1356		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
1357		.doit = gtp_genl_get_pdp,
1358		.dumpit = gtp_genl_dump_pdp,
1359		.flags = GENL_ADMIN_PERM,
1360	},
1361};
1362
1363static struct genl_family gtp_genl_family __ro_after_init = {
1364	.name		= "gtp",
1365	.version	= 0,
1366	.hdrsize	= 0,
1367	.maxattr	= GTPA_MAX,
1368	.policy = gtp_genl_policy,
1369	.netnsok	= true,
1370	.module		= THIS_MODULE,
1371	.small_ops	= gtp_genl_ops,
1372	.n_small_ops	= ARRAY_SIZE(gtp_genl_ops),
1373	.mcgrps		= gtp_genl_mcgrps,
1374	.n_mcgrps	= ARRAY_SIZE(gtp_genl_mcgrps),
1375};
1376
1377static int __net_init gtp_net_init(struct net *net)
1378{
1379	struct gtp_net *gn = net_generic(net, gtp_net_id);
1380
1381	INIT_LIST_HEAD(&gn->gtp_dev_list);
1382	return 0;
1383}
1384
1385static void __net_exit gtp_net_exit(struct net *net)
1386{
1387	struct gtp_net *gn = net_generic(net, gtp_net_id);
1388	struct gtp_dev *gtp;
1389	LIST_HEAD(list);
1390
1391	rtnl_lock();
1392	list_for_each_entry(gtp, &gn->gtp_dev_list, list)
1393		gtp_dellink(gtp->dev, &list);
1394
1395	unregister_netdevice_many(&list);
1396	rtnl_unlock();
1397}
1398
1399static struct pernet_operations gtp_net_ops = {
1400	.init	= gtp_net_init,
1401	.exit	= gtp_net_exit,
1402	.id	= &gtp_net_id,
1403	.size	= sizeof(struct gtp_net),
1404};
1405
1406static int __init gtp_init(void)
1407{
1408	int err;
1409
1410	get_random_bytes(&gtp_h_initval, sizeof(gtp_h_initval));
1411
1412	err = rtnl_link_register(&gtp_link_ops);
1413	if (err < 0)
1414		goto error_out;
1415
1416	err = genl_register_family(&gtp_genl_family);
1417	if (err < 0)
1418		goto unreg_rtnl_link;
1419
1420	err = register_pernet_subsys(&gtp_net_ops);
1421	if (err < 0)
1422		goto unreg_genl_family;
1423
1424	pr_info("GTP module loaded (pdp ctx size %zd bytes)\n",
1425		sizeof(struct pdp_ctx));
1426	return 0;
1427
1428unreg_genl_family:
1429	genl_unregister_family(&gtp_genl_family);
1430unreg_rtnl_link:
1431	rtnl_link_unregister(&gtp_link_ops);
1432error_out:
1433	pr_err("error loading GTP module loaded\n");
1434	return err;
1435}
1436late_initcall(gtp_init);
1437
1438static void __exit gtp_fini(void)
1439{
1440	genl_unregister_family(&gtp_genl_family);
1441	rtnl_link_unregister(&gtp_link_ops);
1442	unregister_pernet_subsys(&gtp_net_ops);
1443
1444	pr_info("GTP module unloaded\n");
1445}
1446module_exit(gtp_fini);
1447
1448MODULE_LICENSE("GPL");
1449MODULE_AUTHOR("Harald Welte <hwelte@sysmocom.de>");
1450MODULE_DESCRIPTION("Interface driver for GTP encapsulated traffic");
1451MODULE_ALIAS_RTNL_LINK("gtp");
1452MODULE_ALIAS_GENL_FAMILY("gtp");
1453