xref: /kernel/linux/linux-5.10/net/l2tp/l2tp_ip.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* L2TPv3 IP encapsulation support
3 *
4 * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
5 */
6
7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9#include <asm/ioctls.h>
10#include <linux/icmp.h>
11#include <linux/module.h>
12#include <linux/skbuff.h>
13#include <linux/random.h>
14#include <linux/socket.h>
15#include <linux/l2tp.h>
16#include <linux/in.h>
17#include <net/sock.h>
18#include <net/ip.h>
19#include <net/icmp.h>
20#include <net/udp.h>
21#include <net/inet_common.h>
22#include <net/tcp_states.h>
23#include <net/protocol.h>
24#include <net/xfrm.h>
25
26#include "l2tp_core.h"
27
28struct l2tp_ip_sock {
29	/* inet_sock has to be the first member of l2tp_ip_sock */
30	struct inet_sock	inet;
31
32	u32			conn_id;
33	u32			peer_conn_id;
34};
35
36static DEFINE_RWLOCK(l2tp_ip_lock);
37static struct hlist_head l2tp_ip_table;
38static struct hlist_head l2tp_ip_bind_table;
39
40static inline struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk)
41{
42	return (struct l2tp_ip_sock *)sk;
43}
44
45static struct sock *__l2tp_ip_bind_lookup(const struct net *net, __be32 laddr,
46					  __be32 raddr, int dif, u32 tunnel_id)
47{
48	struct sock *sk;
49
50	sk_for_each_bound(sk, &l2tp_ip_bind_table) {
51		const struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk);
52		const struct inet_sock *inet = inet_sk(sk);
53
54		if (!net_eq(sock_net(sk), net))
55			continue;
56
57		if (sk->sk_bound_dev_if && dif && sk->sk_bound_dev_if != dif)
58			continue;
59
60		if (inet->inet_rcv_saddr && laddr &&
61		    inet->inet_rcv_saddr != laddr)
62			continue;
63
64		if (inet->inet_daddr && raddr && inet->inet_daddr != raddr)
65			continue;
66
67		if (l2tp->conn_id != tunnel_id)
68			continue;
69
70		goto found;
71	}
72
73	sk = NULL;
74found:
75	return sk;
76}
77
78/* When processing receive frames, there are two cases to
79 * consider. Data frames consist of a non-zero session-id and an
80 * optional cookie. Control frames consist of a regular L2TP header
81 * preceded by 32-bits of zeros.
82 *
83 * L2TPv3 Session Header Over IP
84 *
85 *  0                   1                   2                   3
86 *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
87 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
88 * |                           Session ID                          |
89 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
90 * |               Cookie (optional, maximum 64 bits)...
91 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
92 *                                                                 |
93 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
94 *
95 * L2TPv3 Control Message Header Over IP
96 *
97 *  0                   1                   2                   3
98 *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
99 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
100 * |                      (32 bits of zeros)                       |
101 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
102 * |T|L|x|x|S|x|x|x|x|x|x|x|  Ver  |             Length            |
103 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
104 * |                     Control Connection ID                     |
105 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
106 * |               Ns              |               Nr              |
107 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
108 *
109 * All control frames are passed to userspace.
110 */
111static int l2tp_ip_recv(struct sk_buff *skb)
112{
113	struct net *net = dev_net(skb->dev);
114	struct sock *sk;
115	u32 session_id;
116	u32 tunnel_id;
117	unsigned char *ptr, *optr;
118	struct l2tp_session *session;
119	struct l2tp_tunnel *tunnel = NULL;
120	struct iphdr *iph;
121
122	if (!pskb_may_pull(skb, 4))
123		goto discard;
124
125	/* Point to L2TP header */
126	optr = skb->data;
127	ptr = skb->data;
128	session_id = ntohl(*((__be32 *)ptr));
129	ptr += 4;
130
131	/* RFC3931: L2TP/IP packets have the first 4 bytes containing
132	 * the session_id. If it is 0, the packet is a L2TP control
133	 * frame and the session_id value can be discarded.
134	 */
135	if (session_id == 0) {
136		__skb_pull(skb, 4);
137		goto pass_up;
138	}
139
140	/* Ok, this is a data packet. Lookup the session. */
141	session = l2tp_session_get(net, session_id);
142	if (!session)
143		goto discard;
144
145	tunnel = session->tunnel;
146	if (!tunnel)
147		goto discard_sess;
148
149	if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
150		goto discard_sess;
151
152	l2tp_recv_common(session, skb, ptr, optr, 0, skb->len);
153	l2tp_session_dec_refcount(session);
154
155	return 0;
156
157pass_up:
158	/* Get the tunnel_id from the L2TP header */
159	if (!pskb_may_pull(skb, 12))
160		goto discard;
161
162	if ((skb->data[0] & 0xc0) != 0xc0)
163		goto discard;
164
165	tunnel_id = ntohl(*(__be32 *)&skb->data[4]);
166	iph = (struct iphdr *)skb_network_header(skb);
167
168	read_lock_bh(&l2tp_ip_lock);
169	sk = __l2tp_ip_bind_lookup(net, iph->daddr, iph->saddr, inet_iif(skb),
170				   tunnel_id);
171	if (!sk) {
172		read_unlock_bh(&l2tp_ip_lock);
173		goto discard;
174	}
175	sock_hold(sk);
176	read_unlock_bh(&l2tp_ip_lock);
177
178	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
179		goto discard_put;
180
181	nf_reset_ct(skb);
182
183	return sk_receive_skb(sk, skb, 1);
184
185discard_sess:
186	l2tp_session_dec_refcount(session);
187	goto discard;
188
189discard_put:
190	sock_put(sk);
191
192discard:
193	kfree_skb(skb);
194	return 0;
195}
196
197static int l2tp_ip_hash(struct sock *sk)
198{
199	if (sk_unhashed(sk)) {
200		write_lock_bh(&l2tp_ip_lock);
201		sk_add_node(sk, &l2tp_ip_table);
202		write_unlock_bh(&l2tp_ip_lock);
203	}
204	return 0;
205}
206
207static void l2tp_ip_unhash(struct sock *sk)
208{
209	if (sk_unhashed(sk))
210		return;
211	write_lock_bh(&l2tp_ip_lock);
212	sk_del_node_init(sk);
213	write_unlock_bh(&l2tp_ip_lock);
214}
215
216static int l2tp_ip_open(struct sock *sk)
217{
218	/* Prevent autobind. We don't have ports. */
219	inet_sk(sk)->inet_num = IPPROTO_L2TP;
220
221	l2tp_ip_hash(sk);
222	return 0;
223}
224
225static void l2tp_ip_close(struct sock *sk, long timeout)
226{
227	write_lock_bh(&l2tp_ip_lock);
228	hlist_del_init(&sk->sk_bind_node);
229	sk_del_node_init(sk);
230	write_unlock_bh(&l2tp_ip_lock);
231	sk_common_release(sk);
232}
233
234static void l2tp_ip_destroy_sock(struct sock *sk)
235{
236	struct l2tp_tunnel *tunnel = l2tp_sk_to_tunnel(sk);
237	struct sk_buff *skb;
238
239	while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
240		kfree_skb(skb);
241
242	if (tunnel)
243		l2tp_tunnel_delete(tunnel);
244}
245
246static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
247{
248	struct inet_sock *inet = inet_sk(sk);
249	struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *)uaddr;
250	struct net *net = sock_net(sk);
251	int ret;
252	int chk_addr_ret;
253
254	if (addr_len < sizeof(struct sockaddr_l2tpip))
255		return -EINVAL;
256	if (addr->l2tp_family != AF_INET)
257		return -EINVAL;
258
259	lock_sock(sk);
260
261	ret = -EINVAL;
262	if (!sock_flag(sk, SOCK_ZAPPED))
263		goto out;
264
265	if (sk->sk_state != TCP_CLOSE)
266		goto out;
267
268	chk_addr_ret = inet_addr_type(net, addr->l2tp_addr.s_addr);
269	ret = -EADDRNOTAVAIL;
270	if (addr->l2tp_addr.s_addr && chk_addr_ret != RTN_LOCAL &&
271	    chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST)
272		goto out;
273
274	if (addr->l2tp_addr.s_addr) {
275		inet->inet_rcv_saddr = addr->l2tp_addr.s_addr;
276		inet->inet_saddr = addr->l2tp_addr.s_addr;
277	}
278	if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
279		inet->inet_saddr = 0;  /* Use device */
280
281	write_lock_bh(&l2tp_ip_lock);
282	if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr, 0,
283				  sk->sk_bound_dev_if, addr->l2tp_conn_id)) {
284		write_unlock_bh(&l2tp_ip_lock);
285		ret = -EADDRINUSE;
286		goto out;
287	}
288
289	sk_dst_reset(sk);
290	l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id;
291
292	sk_add_bind_node(sk, &l2tp_ip_bind_table);
293	sk_del_node_init(sk);
294	write_unlock_bh(&l2tp_ip_lock);
295
296	ret = 0;
297	sock_reset_flag(sk, SOCK_ZAPPED);
298
299out:
300	release_sock(sk);
301
302	return ret;
303}
304
305static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
306{
307	struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *)uaddr;
308	int rc;
309
310	if (addr_len < sizeof(*lsa))
311		return -EINVAL;
312
313	if (ipv4_is_multicast(lsa->l2tp_addr.s_addr))
314		return -EINVAL;
315
316	lock_sock(sk);
317
318	/* Must bind first - autobinding does not work */
319	if (sock_flag(sk, SOCK_ZAPPED)) {
320		rc = -EINVAL;
321		goto out_sk;
322	}
323
324	rc = __ip4_datagram_connect(sk, uaddr, addr_len);
325	if (rc < 0)
326		goto out_sk;
327
328	l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
329
330	write_lock_bh(&l2tp_ip_lock);
331	hlist_del_init(&sk->sk_bind_node);
332	sk_add_bind_node(sk, &l2tp_ip_bind_table);
333	write_unlock_bh(&l2tp_ip_lock);
334
335out_sk:
336	release_sock(sk);
337
338	return rc;
339}
340
341static int l2tp_ip_disconnect(struct sock *sk, int flags)
342{
343	if (sock_flag(sk, SOCK_ZAPPED))
344		return 0;
345
346	return __udp_disconnect(sk, flags);
347}
348
349static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr,
350			   int peer)
351{
352	struct sock *sk		= sock->sk;
353	struct inet_sock *inet	= inet_sk(sk);
354	struct l2tp_ip_sock *lsk = l2tp_ip_sk(sk);
355	struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *)uaddr;
356
357	memset(lsa, 0, sizeof(*lsa));
358	lsa->l2tp_family = AF_INET;
359	if (peer) {
360		if (!inet->inet_dport)
361			return -ENOTCONN;
362		lsa->l2tp_conn_id = lsk->peer_conn_id;
363		lsa->l2tp_addr.s_addr = inet->inet_daddr;
364	} else {
365		__be32 addr = inet->inet_rcv_saddr;
366
367		if (!addr)
368			addr = inet->inet_saddr;
369		lsa->l2tp_conn_id = lsk->conn_id;
370		lsa->l2tp_addr.s_addr = addr;
371	}
372	return sizeof(*lsa);
373}
374
375static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb)
376{
377	int rc;
378
379	/* Charge it to the socket, dropping if the queue is full. */
380	rc = sock_queue_rcv_skb(sk, skb);
381	if (rc < 0)
382		goto drop;
383
384	return 0;
385
386drop:
387	IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS);
388	kfree_skb(skb);
389	return 0;
390}
391
392/* Userspace will call sendmsg() on the tunnel socket to send L2TP
393 * control frames.
394 */
395static int l2tp_ip_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
396{
397	struct sk_buff *skb;
398	int rc;
399	struct inet_sock *inet = inet_sk(sk);
400	struct rtable *rt = NULL;
401	struct flowi4 *fl4;
402	int connected = 0;
403	__be32 daddr;
404
405	lock_sock(sk);
406
407	rc = -ENOTCONN;
408	if (sock_flag(sk, SOCK_DEAD))
409		goto out;
410
411	/* Get and verify the address. */
412	if (msg->msg_name) {
413		DECLARE_SOCKADDR(struct sockaddr_l2tpip *, lip, msg->msg_name);
414
415		rc = -EINVAL;
416		if (msg->msg_namelen < sizeof(*lip))
417			goto out;
418
419		if (lip->l2tp_family != AF_INET) {
420			rc = -EAFNOSUPPORT;
421			if (lip->l2tp_family != AF_UNSPEC)
422				goto out;
423		}
424
425		daddr = lip->l2tp_addr.s_addr;
426	} else {
427		rc = -EDESTADDRREQ;
428		if (sk->sk_state != TCP_ESTABLISHED)
429			goto out;
430
431		daddr = inet->inet_daddr;
432		connected = 1;
433	}
434
435	/* Allocate a socket buffer */
436	rc = -ENOMEM;
437	skb = sock_wmalloc(sk, 2 + NET_SKB_PAD + sizeof(struct iphdr) +
438			   4 + len, 0, GFP_KERNEL);
439	if (!skb)
440		goto error;
441
442	/* Reserve space for headers, putting IP header on 4-byte boundary. */
443	skb_reserve(skb, 2 + NET_SKB_PAD);
444	skb_reset_network_header(skb);
445	skb_reserve(skb, sizeof(struct iphdr));
446	skb_reset_transport_header(skb);
447
448	/* Insert 0 session_id */
449	*((__be32 *)skb_put(skb, 4)) = 0;
450
451	/* Copy user data into skb */
452	rc = memcpy_from_msg(skb_put(skb, len), msg, len);
453	if (rc < 0) {
454		kfree_skb(skb);
455		goto error;
456	}
457
458	fl4 = &inet->cork.fl.u.ip4;
459	if (connected)
460		rt = (struct rtable *)__sk_dst_check(sk, 0);
461
462	rcu_read_lock();
463	if (!rt) {
464		const struct ip_options_rcu *inet_opt;
465
466		inet_opt = rcu_dereference(inet->inet_opt);
467
468		/* Use correct destination address if we have options. */
469		if (inet_opt && inet_opt->opt.srr)
470			daddr = inet_opt->opt.faddr;
471
472		/* If this fails, retransmit mechanism of transport layer will
473		 * keep trying until route appears or the connection times
474		 * itself out.
475		 */
476		rt = ip_route_output_ports(sock_net(sk), fl4, sk,
477					   daddr, inet->inet_saddr,
478					   inet->inet_dport, inet->inet_sport,
479					   sk->sk_protocol, RT_CONN_FLAGS(sk),
480					   sk->sk_bound_dev_if);
481		if (IS_ERR(rt))
482			goto no_route;
483		if (connected) {
484			sk_setup_caps(sk, &rt->dst);
485		} else {
486			skb_dst_set(skb, &rt->dst);
487			goto xmit;
488		}
489	}
490
491	/* We dont need to clone dst here, it is guaranteed to not disappear.
492	 *  __dev_xmit_skb() might force a refcount if needed.
493	 */
494	skb_dst_set_noref(skb, &rt->dst);
495
496xmit:
497	/* Queue the packet to IP for output */
498	rc = ip_queue_xmit(sk, skb, &inet->cork.fl);
499	rcu_read_unlock();
500
501error:
502	if (rc >= 0)
503		rc = len;
504
505out:
506	release_sock(sk);
507	return rc;
508
509no_route:
510	rcu_read_unlock();
511	IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
512	kfree_skb(skb);
513	rc = -EHOSTUNREACH;
514	goto out;
515}
516
517static int l2tp_ip_recvmsg(struct sock *sk, struct msghdr *msg,
518			   size_t len, int noblock, int flags, int *addr_len)
519{
520	struct inet_sock *inet = inet_sk(sk);
521	size_t copied = 0;
522	int err = -EOPNOTSUPP;
523	DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
524	struct sk_buff *skb;
525
526	if (flags & MSG_OOB)
527		goto out;
528
529	skb = skb_recv_datagram(sk, flags, noblock, &err);
530	if (!skb)
531		goto out;
532
533	copied = skb->len;
534	if (len < copied) {
535		msg->msg_flags |= MSG_TRUNC;
536		copied = len;
537	}
538
539	err = skb_copy_datagram_msg(skb, 0, msg, copied);
540	if (err)
541		goto done;
542
543	sock_recv_timestamp(msg, sk, skb);
544
545	/* Copy the address. */
546	if (sin) {
547		sin->sin_family = AF_INET;
548		sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
549		sin->sin_port = 0;
550		memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
551		*addr_len = sizeof(*sin);
552	}
553	if (inet->cmsg_flags)
554		ip_cmsg_recv(msg, skb);
555	if (flags & MSG_TRUNC)
556		copied = skb->len;
557done:
558	skb_free_datagram(sk, skb);
559out:
560	return err ? err : copied;
561}
562
563int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg)
564{
565	struct sk_buff *skb;
566	int amount;
567
568	switch (cmd) {
569	case SIOCOUTQ:
570		amount = sk_wmem_alloc_get(sk);
571		break;
572	case SIOCINQ:
573		spin_lock_bh(&sk->sk_receive_queue.lock);
574		skb = skb_peek(&sk->sk_receive_queue);
575		amount = skb ? skb->len : 0;
576		spin_unlock_bh(&sk->sk_receive_queue.lock);
577		break;
578
579	default:
580		return -ENOIOCTLCMD;
581	}
582
583	return put_user(amount, (int __user *)arg);
584}
585EXPORT_SYMBOL_GPL(l2tp_ioctl);
586
587static struct proto l2tp_ip_prot = {
588	.name		   = "L2TP/IP",
589	.owner		   = THIS_MODULE,
590	.init		   = l2tp_ip_open,
591	.close		   = l2tp_ip_close,
592	.bind		   = l2tp_ip_bind,
593	.connect	   = l2tp_ip_connect,
594	.disconnect	   = l2tp_ip_disconnect,
595	.ioctl		   = l2tp_ioctl,
596	.destroy	   = l2tp_ip_destroy_sock,
597	.setsockopt	   = ip_setsockopt,
598	.getsockopt	   = ip_getsockopt,
599	.sendmsg	   = l2tp_ip_sendmsg,
600	.recvmsg	   = l2tp_ip_recvmsg,
601	.backlog_rcv	   = l2tp_ip_backlog_recv,
602	.hash		   = l2tp_ip_hash,
603	.unhash		   = l2tp_ip_unhash,
604	.obj_size	   = sizeof(struct l2tp_ip_sock),
605};
606
607static const struct proto_ops l2tp_ip_ops = {
608	.family		   = PF_INET,
609	.owner		   = THIS_MODULE,
610	.release	   = inet_release,
611	.bind		   = inet_bind,
612	.connect	   = inet_dgram_connect,
613	.socketpair	   = sock_no_socketpair,
614	.accept		   = sock_no_accept,
615	.getname	   = l2tp_ip_getname,
616	.poll		   = datagram_poll,
617	.ioctl		   = inet_ioctl,
618	.gettstamp	   = sock_gettstamp,
619	.listen		   = sock_no_listen,
620	.shutdown	   = inet_shutdown,
621	.setsockopt	   = sock_common_setsockopt,
622	.getsockopt	   = sock_common_getsockopt,
623	.sendmsg	   = inet_sendmsg,
624	.recvmsg	   = sock_common_recvmsg,
625	.mmap		   = sock_no_mmap,
626	.sendpage	   = sock_no_sendpage,
627};
628
629static struct inet_protosw l2tp_ip_protosw = {
630	.type		= SOCK_DGRAM,
631	.protocol	= IPPROTO_L2TP,
632	.prot		= &l2tp_ip_prot,
633	.ops		= &l2tp_ip_ops,
634};
635
636static struct net_protocol l2tp_ip_protocol __read_mostly = {
637	.handler	= l2tp_ip_recv,
638	.netns_ok	= 1,
639};
640
641static int __init l2tp_ip_init(void)
642{
643	int err;
644
645	pr_info("L2TP IP encapsulation support (L2TPv3)\n");
646
647	err = proto_register(&l2tp_ip_prot, 1);
648	if (err != 0)
649		goto out;
650
651	err = inet_add_protocol(&l2tp_ip_protocol, IPPROTO_L2TP);
652	if (err)
653		goto out1;
654
655	inet_register_protosw(&l2tp_ip_protosw);
656	return 0;
657
658out1:
659	proto_unregister(&l2tp_ip_prot);
660out:
661	return err;
662}
663
664static void __exit l2tp_ip_exit(void)
665{
666	inet_unregister_protosw(&l2tp_ip_protosw);
667	inet_del_protocol(&l2tp_ip_protocol, IPPROTO_L2TP);
668	proto_unregister(&l2tp_ip_prot);
669}
670
671module_init(l2tp_ip_init);
672module_exit(l2tp_ip_exit);
673
674MODULE_LICENSE("GPL");
675MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
676MODULE_DESCRIPTION("L2TP over IP");
677MODULE_VERSION("1.0");
678
679/* Use the value of SOCK_DGRAM (2) directory, because __stringify doesn't like
680 * enums
681 */
682MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 2, IPPROTO_L2TP);
683MODULE_ALIAS_NET_PF_PROTO(PF_INET, IPPROTO_L2TP);
684