xref: /kernel/linux/linux-5.10/net/ipv6/exthdrs.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 *	Extension Header handling for IPv6
4 *	Linux INET6 implementation
5 *
6 *	Authors:
7 *	Pedro Roque		<roque@di.fc.ul.pt>
8 *	Andi Kleen		<ak@muc.de>
9 *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
10 */
11
12/* Changes:
13 *	yoshfuji		: ensure not to overrun while parsing
14 *				  tlv options.
15 *	Mitsuru KANDA @USAGI and: Remove ipv6_parse_exthdrs().
16 *	YOSHIFUJI Hideaki @USAGI  Register inbound extension header
17 *				  handlers as inet6_protocol{}.
18 */
19
20#include <linux/errno.h>
21#include <linux/types.h>
22#include <linux/socket.h>
23#include <linux/sockios.h>
24#include <linux/net.h>
25#include <linux/netdevice.h>
26#include <linux/in6.h>
27#include <linux/icmpv6.h>
28#include <linux/slab.h>
29#include <linux/export.h>
30
31#include <net/dst.h>
32#include <net/sock.h>
33#include <net/snmp.h>
34
35#include <net/ipv6.h>
36#include <net/protocol.h>
37#include <net/transp_v6.h>
38#include <net/rawv6.h>
39#include <net/ndisc.h>
40#include <net/ip6_route.h>
41#include <net/addrconf.h>
42#include <net/calipso.h>
43#if IS_ENABLED(CONFIG_IPV6_MIP6)
44#include <net/xfrm.h>
45#endif
46#include <linux/seg6.h>
47#include <net/seg6.h>
48#ifdef CONFIG_IPV6_SEG6_HMAC
49#include <net/seg6_hmac.h>
50#endif
51#include <net/rpl.h>
52
53#include <linux/uaccess.h>
54
55/*
56 *	Parsing tlv encoded headers.
57 *
58 *	Parsing function "func" returns true, if parsing succeed
59 *	and false, if it failed.
60 *	It MUST NOT touch skb->h.
61 */
62
63struct tlvtype_proc {
64	int	type;
65	bool	(*func)(struct sk_buff *skb, int offset);
66};
67
68/*********************
69  Generic functions
70 *********************/
71
72/* An unknown option is detected, decide what to do */
73
74static bool ip6_tlvopt_unknown(struct sk_buff *skb, int optoff,
75			       bool disallow_unknowns)
76{
77	if (disallow_unknowns) {
78		/* If unknown TLVs are disallowed by configuration
79		 * then always silently drop packet. Note this also
80		 * means no ICMP parameter problem is sent which
81		 * could be a good property to mitigate a reflection DOS
82		 * attack.
83		 */
84
85		goto drop;
86	}
87
88	switch ((skb_network_header(skb)[optoff] & 0xC0) >> 6) {
89	case 0: /* ignore */
90		return true;
91
92	case 1: /* drop packet */
93		break;
94
95	case 3: /* Send ICMP if not a multicast address and drop packet */
96		/* Actually, it is redundant check. icmp_send
97		   will recheck in any case.
98		 */
99		if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr))
100			break;
101		fallthrough;
102	case 2: /* send ICMP PARM PROB regardless and drop packet */
103		icmpv6_param_prob(skb, ICMPV6_UNK_OPTION, optoff);
104		return false;
105	}
106
107drop:
108	kfree_skb(skb);
109	return false;
110}
111
112/* Parse tlv encoded option header (hop-by-hop or destination) */
113
114static bool ip6_parse_tlv(const struct tlvtype_proc *procs,
115			  struct sk_buff *skb,
116			  int max_count)
117{
118	int len = (skb_transport_header(skb)[1] + 1) << 3;
119	const unsigned char *nh = skb_network_header(skb);
120	int off = skb_network_header_len(skb);
121	const struct tlvtype_proc *curr;
122	bool disallow_unknowns = false;
123	int tlv_count = 0;
124	int padlen = 0;
125
126	if (unlikely(max_count < 0)) {
127		disallow_unknowns = true;
128		max_count = -max_count;
129	}
130
131	if (skb_transport_offset(skb) + len > skb_headlen(skb))
132		goto bad;
133
134	off += 2;
135	len -= 2;
136
137	while (len > 0) {
138		int optlen, i;
139
140		if (nh[off] == IPV6_TLV_PAD1) {
141			padlen++;
142			if (padlen > 7)
143				goto bad;
144			off++;
145			len--;
146			continue;
147		}
148		if (len < 2)
149			goto bad;
150		optlen = nh[off + 1] + 2;
151		if (optlen > len)
152			goto bad;
153
154		if (nh[off] == IPV6_TLV_PADN) {
155			/* RFC 2460 states that the purpose of PadN is
156			 * to align the containing header to multiples
157			 * of 8. 7 is therefore the highest valid value.
158			 * See also RFC 4942, Section 2.1.9.5.
159			 */
160			padlen += optlen;
161			if (padlen > 7)
162				goto bad;
163			/* RFC 4942 recommends receiving hosts to
164			 * actively check PadN payload to contain
165			 * only zeroes.
166			 */
167			for (i = 2; i < optlen; i++) {
168				if (nh[off + i] != 0)
169					goto bad;
170			}
171		} else {
172			tlv_count++;
173			if (tlv_count > max_count)
174				goto bad;
175
176			for (curr = procs; curr->type >= 0; curr++) {
177				if (curr->type == nh[off]) {
178					/* type specific length/alignment
179					   checks will be performed in the
180					   func(). */
181					if (curr->func(skb, off) == false)
182						return false;
183					break;
184				}
185			}
186			if (curr->type < 0 &&
187			    !ip6_tlvopt_unknown(skb, off, disallow_unknowns))
188				return false;
189
190			padlen = 0;
191		}
192		off += optlen;
193		len -= optlen;
194	}
195
196	if (len == 0)
197		return true;
198bad:
199	kfree_skb(skb);
200	return false;
201}
202
203/*****************************
204  Destination options header.
205 *****************************/
206
207#if IS_ENABLED(CONFIG_IPV6_MIP6)
208static bool ipv6_dest_hao(struct sk_buff *skb, int optoff)
209{
210	struct ipv6_destopt_hao *hao;
211	struct inet6_skb_parm *opt = IP6CB(skb);
212	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
213	int ret;
214
215	if (opt->dsthao) {
216		net_dbg_ratelimited("hao duplicated\n");
217		goto discard;
218	}
219	opt->dsthao = opt->dst1;
220	opt->dst1 = 0;
221
222	hao = (struct ipv6_destopt_hao *)(skb_network_header(skb) + optoff);
223
224	if (hao->length != 16) {
225		net_dbg_ratelimited("hao invalid option length = %d\n",
226				    hao->length);
227		goto discard;
228	}
229
230	if (!(ipv6_addr_type(&hao->addr) & IPV6_ADDR_UNICAST)) {
231		net_dbg_ratelimited("hao is not an unicast addr: %pI6\n",
232				    &hao->addr);
233		goto discard;
234	}
235
236	ret = xfrm6_input_addr(skb, (xfrm_address_t *)&ipv6h->daddr,
237			       (xfrm_address_t *)&hao->addr, IPPROTO_DSTOPTS);
238	if (unlikely(ret < 0))
239		goto discard;
240
241	if (skb_cloned(skb)) {
242		if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
243			goto discard;
244
245		/* update all variable using below by copied skbuff */
246		hao = (struct ipv6_destopt_hao *)(skb_network_header(skb) +
247						  optoff);
248		ipv6h = ipv6_hdr(skb);
249	}
250
251	if (skb->ip_summed == CHECKSUM_COMPLETE)
252		skb->ip_summed = CHECKSUM_NONE;
253
254	swap(ipv6h->saddr, hao->addr);
255
256	if (skb->tstamp == 0)
257		__net_timestamp(skb);
258
259	return true;
260
261 discard:
262	kfree_skb(skb);
263	return false;
264}
265#endif
266
267static const struct tlvtype_proc tlvprocdestopt_lst[] = {
268#if IS_ENABLED(CONFIG_IPV6_MIP6)
269	{
270		.type	= IPV6_TLV_HAO,
271		.func	= ipv6_dest_hao,
272	},
273#endif
274	{-1,			NULL}
275};
276
277static int ipv6_destopt_rcv(struct sk_buff *skb)
278{
279	struct inet6_dev *idev = __in6_dev_get(skb->dev);
280	struct inet6_skb_parm *opt = IP6CB(skb);
281#if IS_ENABLED(CONFIG_IPV6_MIP6)
282	__u16 dstbuf;
283#endif
284	struct dst_entry *dst = skb_dst(skb);
285	struct net *net = dev_net(skb->dev);
286	int extlen;
287
288	if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
289	    !pskb_may_pull(skb, (skb_transport_offset(skb) +
290				 ((skb_transport_header(skb)[1] + 1) << 3)))) {
291		__IP6_INC_STATS(dev_net(dst->dev), idev,
292				IPSTATS_MIB_INHDRERRORS);
293fail_and_free:
294		kfree_skb(skb);
295		return -1;
296	}
297
298	extlen = (skb_transport_header(skb)[1] + 1) << 3;
299	if (extlen > net->ipv6.sysctl.max_dst_opts_len)
300		goto fail_and_free;
301
302	opt->lastopt = opt->dst1 = skb_network_header_len(skb);
303#if IS_ENABLED(CONFIG_IPV6_MIP6)
304	dstbuf = opt->dst1;
305#endif
306
307	if (ip6_parse_tlv(tlvprocdestopt_lst, skb,
308			  net->ipv6.sysctl.max_dst_opts_cnt)) {
309		skb->transport_header += extlen;
310		opt = IP6CB(skb);
311#if IS_ENABLED(CONFIG_IPV6_MIP6)
312		opt->nhoff = dstbuf;
313#else
314		opt->nhoff = opt->dst1;
315#endif
316		return 1;
317	}
318
319	__IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
320	return -1;
321}
322
323static void seg6_update_csum(struct sk_buff *skb)
324{
325	struct ipv6_sr_hdr *hdr;
326	struct in6_addr *addr;
327	__be32 from, to;
328
329	/* srh is at transport offset and seg_left is already decremented
330	 * but daddr is not yet updated with next segment
331	 */
332
333	hdr = (struct ipv6_sr_hdr *)skb_transport_header(skb);
334	addr = hdr->segments + hdr->segments_left;
335
336	hdr->segments_left++;
337	from = *(__be32 *)hdr;
338
339	hdr->segments_left--;
340	to = *(__be32 *)hdr;
341
342	/* update skb csum with diff resulting from seg_left decrement */
343
344	update_csum_diff4(skb, from, to);
345
346	/* compute csum diff between current and next segment and update */
347
348	update_csum_diff16(skb, (__be32 *)(&ipv6_hdr(skb)->daddr),
349			   (__be32 *)addr);
350}
351
352static int ipv6_srh_rcv(struct sk_buff *skb)
353{
354	struct inet6_skb_parm *opt = IP6CB(skb);
355	struct net *net = dev_net(skb->dev);
356	struct ipv6_sr_hdr *hdr;
357	struct inet6_dev *idev;
358	struct in6_addr *addr;
359	int accept_seg6;
360
361	hdr = (struct ipv6_sr_hdr *)skb_transport_header(skb);
362
363	idev = __in6_dev_get(skb->dev);
364
365	accept_seg6 = net->ipv6.devconf_all->seg6_enabled;
366	if (accept_seg6 > idev->cnf.seg6_enabled)
367		accept_seg6 = idev->cnf.seg6_enabled;
368
369	if (!accept_seg6) {
370		kfree_skb(skb);
371		return -1;
372	}
373
374#ifdef CONFIG_IPV6_SEG6_HMAC
375	if (!seg6_hmac_validate_skb(skb)) {
376		kfree_skb(skb);
377		return -1;
378	}
379#endif
380
381looped_back:
382	if (hdr->segments_left == 0) {
383		if (hdr->nexthdr == NEXTHDR_IPV6) {
384			int offset = (hdr->hdrlen + 1) << 3;
385
386			skb_postpull_rcsum(skb, skb_network_header(skb),
387					   skb_network_header_len(skb));
388
389			if (!pskb_pull(skb, offset)) {
390				kfree_skb(skb);
391				return -1;
392			}
393			skb_postpull_rcsum(skb, skb_transport_header(skb),
394					   offset);
395
396			skb_reset_network_header(skb);
397			skb_reset_transport_header(skb);
398			skb->encapsulation = 0;
399
400			__skb_tunnel_rx(skb, skb->dev, net);
401
402			netif_rx(skb);
403			return -1;
404		}
405
406		opt->srcrt = skb_network_header_len(skb);
407		opt->lastopt = opt->srcrt;
408		skb->transport_header += (hdr->hdrlen + 1) << 3;
409		opt->nhoff = (&hdr->nexthdr) - skb_network_header(skb);
410
411		return 1;
412	}
413
414	if (hdr->segments_left >= (hdr->hdrlen >> 1)) {
415		__IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
416		icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
417				  ((&hdr->segments_left) -
418				   skb_network_header(skb)));
419		return -1;
420	}
421
422	if (skb_cloned(skb)) {
423		if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
424			__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
425					IPSTATS_MIB_OUTDISCARDS);
426			kfree_skb(skb);
427			return -1;
428		}
429	}
430
431	hdr = (struct ipv6_sr_hdr *)skb_transport_header(skb);
432
433	hdr->segments_left--;
434	addr = hdr->segments + hdr->segments_left;
435
436	skb_push(skb, sizeof(struct ipv6hdr));
437
438	if (skb->ip_summed == CHECKSUM_COMPLETE)
439		seg6_update_csum(skb);
440
441	ipv6_hdr(skb)->daddr = *addr;
442
443	skb_dst_drop(skb);
444
445	ip6_route_input(skb);
446
447	if (skb_dst(skb)->error) {
448		dst_input(skb);
449		return -1;
450	}
451
452	if (skb_dst(skb)->dev->flags & IFF_LOOPBACK) {
453		if (ipv6_hdr(skb)->hop_limit <= 1) {
454			__IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
455			icmpv6_send(skb, ICMPV6_TIME_EXCEED,
456				    ICMPV6_EXC_HOPLIMIT, 0);
457			kfree_skb(skb);
458			return -1;
459		}
460		ipv6_hdr(skb)->hop_limit--;
461
462		skb_pull(skb, sizeof(struct ipv6hdr));
463		goto looped_back;
464	}
465
466	dst_input(skb);
467
468	return -1;
469}
470
471static int ipv6_rpl_srh_rcv(struct sk_buff *skb)
472{
473	struct ipv6_rpl_sr_hdr *hdr, *ohdr, *chdr;
474	struct inet6_skb_parm *opt = IP6CB(skb);
475	struct net *net = dev_net(skb->dev);
476	struct inet6_dev *idev;
477	struct ipv6hdr *oldhdr;
478	struct in6_addr addr;
479	unsigned char *buf;
480	int accept_rpl_seg;
481	int i, err;
482	u64 n = 0;
483	u32 r;
484
485	idev = __in6_dev_get(skb->dev);
486
487	accept_rpl_seg = net->ipv6.devconf_all->rpl_seg_enabled;
488	if (accept_rpl_seg > idev->cnf.rpl_seg_enabled)
489		accept_rpl_seg = idev->cnf.rpl_seg_enabled;
490
491	if (!accept_rpl_seg) {
492		kfree_skb(skb);
493		return -1;
494	}
495
496looped_back:
497	hdr = (struct ipv6_rpl_sr_hdr *)skb_transport_header(skb);
498
499	if (hdr->segments_left == 0) {
500		if (hdr->nexthdr == NEXTHDR_IPV6) {
501			int offset = (hdr->hdrlen + 1) << 3;
502
503			skb_postpull_rcsum(skb, skb_network_header(skb),
504					   skb_network_header_len(skb));
505
506			if (!pskb_pull(skb, offset)) {
507				kfree_skb(skb);
508				return -1;
509			}
510			skb_postpull_rcsum(skb, skb_transport_header(skb),
511					   offset);
512
513			skb_reset_network_header(skb);
514			skb_reset_transport_header(skb);
515			skb->encapsulation = 0;
516
517			__skb_tunnel_rx(skb, skb->dev, net);
518
519			netif_rx(skb);
520			return -1;
521		}
522
523		opt->srcrt = skb_network_header_len(skb);
524		opt->lastopt = opt->srcrt;
525		skb->transport_header += (hdr->hdrlen + 1) << 3;
526		opt->nhoff = (&hdr->nexthdr) - skb_network_header(skb);
527
528		return 1;
529	}
530
531	if (!pskb_may_pull(skb, sizeof(*hdr))) {
532		kfree_skb(skb);
533		return -1;
534	}
535
536	n = (hdr->hdrlen << 3) - hdr->pad - (16 - hdr->cmpre);
537	r = do_div(n, (16 - hdr->cmpri));
538	/* checks if calculation was without remainder and n fits into
539	 * unsigned char which is segments_left field. Should not be
540	 * higher than that.
541	 */
542	if (r || (n + 1) > 255) {
543		kfree_skb(skb);
544		return -1;
545	}
546
547	if (hdr->segments_left > n + 1) {
548		__IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
549		icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
550				  ((&hdr->segments_left) -
551				   skb_network_header(skb)));
552		return -1;
553	}
554
555	if (!pskb_may_pull(skb, ipv6_rpl_srh_size(n, hdr->cmpri,
556						  hdr->cmpre))) {
557		kfree_skb(skb);
558		return -1;
559	}
560
561	hdr->segments_left--;
562	i = n - hdr->segments_left;
563
564	buf = kcalloc(struct_size(hdr, segments.addr, n + 2), 2, GFP_ATOMIC);
565	if (unlikely(!buf)) {
566		kfree_skb(skb);
567		return -1;
568	}
569
570	ohdr = (struct ipv6_rpl_sr_hdr *)buf;
571	ipv6_rpl_srh_decompress(ohdr, hdr, &ipv6_hdr(skb)->daddr, n);
572	chdr = (struct ipv6_rpl_sr_hdr *)(buf + ((ohdr->hdrlen + 1) << 3));
573
574	if ((ipv6_addr_type(&ipv6_hdr(skb)->daddr) & IPV6_ADDR_MULTICAST) ||
575	    (ipv6_addr_type(&ohdr->rpl_segaddr[i]) & IPV6_ADDR_MULTICAST)) {
576		kfree_skb(skb);
577		kfree(buf);
578		return -1;
579	}
580
581	err = ipv6_chk_rpl_srh_loop(net, ohdr->rpl_segaddr, n + 1);
582	if (err) {
583		icmpv6_send(skb, ICMPV6_PARAMPROB, 0, 0);
584		kfree_skb(skb);
585		kfree(buf);
586		return -1;
587	}
588
589	addr = ipv6_hdr(skb)->daddr;
590	ipv6_hdr(skb)->daddr = ohdr->rpl_segaddr[i];
591	ohdr->rpl_segaddr[i] = addr;
592
593	ipv6_rpl_srh_compress(chdr, ohdr, &ipv6_hdr(skb)->daddr, n);
594
595	oldhdr = ipv6_hdr(skb);
596
597	skb_pull(skb, ((hdr->hdrlen + 1) << 3));
598	skb_postpull_rcsum(skb, oldhdr,
599			   sizeof(struct ipv6hdr) + ((hdr->hdrlen + 1) << 3));
600	if (unlikely(!hdr->segments_left)) {
601		if (pskb_expand_head(skb, sizeof(struct ipv6hdr) + ((chdr->hdrlen + 1) << 3), 0,
602				     GFP_ATOMIC)) {
603			__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_OUTDISCARDS);
604			kfree_skb(skb);
605			kfree(buf);
606			return -1;
607		}
608
609		oldhdr = ipv6_hdr(skb);
610	}
611	skb_push(skb, ((chdr->hdrlen + 1) << 3) + sizeof(struct ipv6hdr));
612	skb_reset_network_header(skb);
613	skb_mac_header_rebuild(skb);
614	skb_set_transport_header(skb, sizeof(struct ipv6hdr));
615
616	memmove(ipv6_hdr(skb), oldhdr, sizeof(struct ipv6hdr));
617	memcpy(skb_transport_header(skb), chdr, (chdr->hdrlen + 1) << 3);
618
619	ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
620	skb_postpush_rcsum(skb, ipv6_hdr(skb),
621			   sizeof(struct ipv6hdr) + ((chdr->hdrlen + 1) << 3));
622
623	kfree(buf);
624
625	skb_dst_drop(skb);
626
627	ip6_route_input(skb);
628
629	if (skb_dst(skb)->error) {
630		dst_input(skb);
631		return -1;
632	}
633
634	if (skb_dst(skb)->dev->flags & IFF_LOOPBACK) {
635		if (ipv6_hdr(skb)->hop_limit <= 1) {
636			__IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
637			icmpv6_send(skb, ICMPV6_TIME_EXCEED,
638				    ICMPV6_EXC_HOPLIMIT, 0);
639			kfree_skb(skb);
640			return -1;
641		}
642		ipv6_hdr(skb)->hop_limit--;
643
644		skb_pull(skb, sizeof(struct ipv6hdr));
645		goto looped_back;
646	}
647
648	dst_input(skb);
649
650	return -1;
651}
652
653/********************************
654  Routing header.
655 ********************************/
656
657/* called with rcu_read_lock() */
658static int ipv6_rthdr_rcv(struct sk_buff *skb)
659{
660	struct inet6_dev *idev = __in6_dev_get(skb->dev);
661	struct inet6_skb_parm *opt = IP6CB(skb);
662	struct in6_addr *addr = NULL;
663	struct in6_addr daddr;
664	int n, i;
665	struct ipv6_rt_hdr *hdr;
666	struct rt0_hdr *rthdr;
667	struct net *net = dev_net(skb->dev);
668	int accept_source_route = net->ipv6.devconf_all->accept_source_route;
669
670	idev = __in6_dev_get(skb->dev);
671	if (idev && accept_source_route > idev->cnf.accept_source_route)
672		accept_source_route = idev->cnf.accept_source_route;
673
674	if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
675	    !pskb_may_pull(skb, (skb_transport_offset(skb) +
676				 ((skb_transport_header(skb)[1] + 1) << 3)))) {
677		__IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
678		kfree_skb(skb);
679		return -1;
680	}
681
682	hdr = (struct ipv6_rt_hdr *)skb_transport_header(skb);
683
684	if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ||
685	    skb->pkt_type != PACKET_HOST) {
686		__IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
687		kfree_skb(skb);
688		return -1;
689	}
690
691	switch (hdr->type) {
692	case IPV6_SRCRT_TYPE_4:
693		/* segment routing */
694		return ipv6_srh_rcv(skb);
695	case IPV6_SRCRT_TYPE_3:
696		/* rpl segment routing */
697		return ipv6_rpl_srh_rcv(skb);
698	default:
699		break;
700	}
701
702looped_back:
703	if (hdr->segments_left == 0) {
704		switch (hdr->type) {
705#if IS_ENABLED(CONFIG_IPV6_MIP6)
706		case IPV6_SRCRT_TYPE_2:
707			/* Silently discard type 2 header unless it was
708			 * processed by own
709			 */
710			if (!addr) {
711				__IP6_INC_STATS(net, idev,
712						IPSTATS_MIB_INADDRERRORS);
713				kfree_skb(skb);
714				return -1;
715			}
716			break;
717#endif
718		default:
719			break;
720		}
721
722		opt->lastopt = opt->srcrt = skb_network_header_len(skb);
723		skb->transport_header += (hdr->hdrlen + 1) << 3;
724		opt->dst0 = opt->dst1;
725		opt->dst1 = 0;
726		opt->nhoff = (&hdr->nexthdr) - skb_network_header(skb);
727		return 1;
728	}
729
730	switch (hdr->type) {
731#if IS_ENABLED(CONFIG_IPV6_MIP6)
732	case IPV6_SRCRT_TYPE_2:
733		if (accept_source_route < 0)
734			goto unknown_rh;
735		/* Silently discard invalid RTH type 2 */
736		if (hdr->hdrlen != 2 || hdr->segments_left != 1) {
737			__IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
738			kfree_skb(skb);
739			return -1;
740		}
741		break;
742#endif
743	default:
744		goto unknown_rh;
745	}
746
747	/*
748	 *	This is the routing header forwarding algorithm from
749	 *	RFC 2460, page 16.
750	 */
751
752	n = hdr->hdrlen >> 1;
753
754	if (hdr->segments_left > n) {
755		__IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
756		icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
757				  ((&hdr->segments_left) -
758				   skb_network_header(skb)));
759		return -1;
760	}
761
762	/* We are about to mangle packet header. Be careful!
763	   Do not damage packets queued somewhere.
764	 */
765	if (skb_cloned(skb)) {
766		/* the copy is a forwarded packet */
767		if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
768			__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
769					IPSTATS_MIB_OUTDISCARDS);
770			kfree_skb(skb);
771			return -1;
772		}
773		hdr = (struct ipv6_rt_hdr *)skb_transport_header(skb);
774	}
775
776	if (skb->ip_summed == CHECKSUM_COMPLETE)
777		skb->ip_summed = CHECKSUM_NONE;
778
779	i = n - --hdr->segments_left;
780
781	rthdr = (struct rt0_hdr *) hdr;
782	addr = rthdr->addr;
783	addr += i - 1;
784
785	switch (hdr->type) {
786#if IS_ENABLED(CONFIG_IPV6_MIP6)
787	case IPV6_SRCRT_TYPE_2:
788		if (xfrm6_input_addr(skb, (xfrm_address_t *)addr,
789				     (xfrm_address_t *)&ipv6_hdr(skb)->saddr,
790				     IPPROTO_ROUTING) < 0) {
791			__IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
792			kfree_skb(skb);
793			return -1;
794		}
795		if (!ipv6_chk_home_addr(dev_net(skb_dst(skb)->dev), addr)) {
796			__IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
797			kfree_skb(skb);
798			return -1;
799		}
800		break;
801#endif
802	default:
803		break;
804	}
805
806	if (ipv6_addr_is_multicast(addr)) {
807		__IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
808		kfree_skb(skb);
809		return -1;
810	}
811
812	daddr = *addr;
813	*addr = ipv6_hdr(skb)->daddr;
814	ipv6_hdr(skb)->daddr = daddr;
815
816	skb_dst_drop(skb);
817	ip6_route_input(skb);
818	if (skb_dst(skb)->error) {
819		skb_push(skb, skb->data - skb_network_header(skb));
820		dst_input(skb);
821		return -1;
822	}
823
824	if (skb_dst(skb)->dev->flags&IFF_LOOPBACK) {
825		if (ipv6_hdr(skb)->hop_limit <= 1) {
826			__IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
827			icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
828				    0);
829			kfree_skb(skb);
830			return -1;
831		}
832		ipv6_hdr(skb)->hop_limit--;
833		goto looped_back;
834	}
835
836	skb_push(skb, skb->data - skb_network_header(skb));
837	dst_input(skb);
838	return -1;
839
840unknown_rh:
841	__IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
842	icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
843			  (&hdr->type) - skb_network_header(skb));
844	return -1;
845}
846
847static const struct inet6_protocol rthdr_protocol = {
848	.handler	=	ipv6_rthdr_rcv,
849	.flags		=	INET6_PROTO_NOPOLICY,
850};
851
852static const struct inet6_protocol destopt_protocol = {
853	.handler	=	ipv6_destopt_rcv,
854	.flags		=	INET6_PROTO_NOPOLICY,
855};
856
857static const struct inet6_protocol nodata_protocol = {
858	.handler	=	dst_discard,
859	.flags		=	INET6_PROTO_NOPOLICY,
860};
861
862int __init ipv6_exthdrs_init(void)
863{
864	int ret;
865
866	ret = inet6_add_protocol(&rthdr_protocol, IPPROTO_ROUTING);
867	if (ret)
868		goto out;
869
870	ret = inet6_add_protocol(&destopt_protocol, IPPROTO_DSTOPTS);
871	if (ret)
872		goto out_rthdr;
873
874	ret = inet6_add_protocol(&nodata_protocol, IPPROTO_NONE);
875	if (ret)
876		goto out_destopt;
877
878out:
879	return ret;
880out_destopt:
881	inet6_del_protocol(&destopt_protocol, IPPROTO_DSTOPTS);
882out_rthdr:
883	inet6_del_protocol(&rthdr_protocol, IPPROTO_ROUTING);
884	goto out;
885};
886
887void ipv6_exthdrs_exit(void)
888{
889	inet6_del_protocol(&nodata_protocol, IPPROTO_NONE);
890	inet6_del_protocol(&destopt_protocol, IPPROTO_DSTOPTS);
891	inet6_del_protocol(&rthdr_protocol, IPPROTO_ROUTING);
892}
893
894/**********************************
895  Hop-by-hop options.
896 **********************************/
897
898/*
899 * Note: we cannot rely on skb_dst(skb) before we assign it in ip6_route_input().
900 */
901static inline struct inet6_dev *ipv6_skb_idev(struct sk_buff *skb)
902{
903	return skb_dst(skb) ? ip6_dst_idev(skb_dst(skb)) : __in6_dev_get(skb->dev);
904}
905
906static inline struct net *ipv6_skb_net(struct sk_buff *skb)
907{
908	return skb_dst(skb) ? dev_net(skb_dst(skb)->dev) : dev_net(skb->dev);
909}
910
911/* Router Alert as of RFC 2711 */
912
913static bool ipv6_hop_ra(struct sk_buff *skb, int optoff)
914{
915	const unsigned char *nh = skb_network_header(skb);
916
917	if (nh[optoff + 1] == 2) {
918		IP6CB(skb)->flags |= IP6SKB_ROUTERALERT;
919		memcpy(&IP6CB(skb)->ra, nh + optoff + 2, sizeof(IP6CB(skb)->ra));
920		return true;
921	}
922	net_dbg_ratelimited("ipv6_hop_ra: wrong RA length %d\n",
923			    nh[optoff + 1]);
924	kfree_skb(skb);
925	return false;
926}
927
928/* Jumbo payload */
929
930static bool ipv6_hop_jumbo(struct sk_buff *skb, int optoff)
931{
932	const unsigned char *nh = skb_network_header(skb);
933	struct inet6_dev *idev = __in6_dev_get_safely(skb->dev);
934	struct net *net = ipv6_skb_net(skb);
935	u32 pkt_len;
936
937	if (nh[optoff + 1] != 4 || (optoff & 3) != 2) {
938		net_dbg_ratelimited("ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n",
939				    nh[optoff+1]);
940		__IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
941		goto drop;
942	}
943
944	pkt_len = ntohl(*(__be32 *)(nh + optoff + 2));
945	if (pkt_len <= IPV6_MAXPLEN) {
946		__IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
947		icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff+2);
948		return false;
949	}
950	if (ipv6_hdr(skb)->payload_len) {
951		__IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
952		icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff);
953		return false;
954	}
955
956	if (pkt_len > skb->len - sizeof(struct ipv6hdr)) {
957		__IP6_INC_STATS(net, idev, IPSTATS_MIB_INTRUNCATEDPKTS);
958		goto drop;
959	}
960
961	if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr)))
962		goto drop;
963
964	IP6CB(skb)->flags |= IP6SKB_JUMBOGRAM;
965	return true;
966
967drop:
968	kfree_skb(skb);
969	return false;
970}
971
972/* CALIPSO RFC 5570 */
973
974static bool ipv6_hop_calipso(struct sk_buff *skb, int optoff)
975{
976	const unsigned char *nh = skb_network_header(skb);
977
978	if (nh[optoff + 1] < 8)
979		goto drop;
980
981	if (nh[optoff + 6] * 4 + 8 > nh[optoff + 1])
982		goto drop;
983
984	if (!calipso_validate(skb, nh + optoff))
985		goto drop;
986
987	return true;
988
989drop:
990	kfree_skb(skb);
991	return false;
992}
993
994static const struct tlvtype_proc tlvprochopopt_lst[] = {
995	{
996		.type	= IPV6_TLV_ROUTERALERT,
997		.func	= ipv6_hop_ra,
998	},
999	{
1000		.type	= IPV6_TLV_JUMBO,
1001		.func	= ipv6_hop_jumbo,
1002	},
1003	{
1004		.type	= IPV6_TLV_CALIPSO,
1005		.func	= ipv6_hop_calipso,
1006	},
1007	{ -1, }
1008};
1009
1010int ipv6_parse_hopopts(struct sk_buff *skb)
1011{
1012	struct inet6_skb_parm *opt = IP6CB(skb);
1013	struct net *net = dev_net(skb->dev);
1014	int extlen;
1015
1016	/*
1017	 * skb_network_header(skb) is equal to skb->data, and
1018	 * skb_network_header_len(skb) is always equal to
1019	 * sizeof(struct ipv6hdr) by definition of
1020	 * hop-by-hop options.
1021	 */
1022	if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + 8) ||
1023	    !pskb_may_pull(skb, (sizeof(struct ipv6hdr) +
1024				 ((skb_transport_header(skb)[1] + 1) << 3)))) {
1025fail_and_free:
1026		kfree_skb(skb);
1027		return -1;
1028	}
1029
1030	extlen = (skb_transport_header(skb)[1] + 1) << 3;
1031	if (extlen > net->ipv6.sysctl.max_hbh_opts_len)
1032		goto fail_and_free;
1033
1034	opt->flags |= IP6SKB_HOPBYHOP;
1035	if (ip6_parse_tlv(tlvprochopopt_lst, skb,
1036			  net->ipv6.sysctl.max_hbh_opts_cnt)) {
1037		skb->transport_header += extlen;
1038		opt = IP6CB(skb);
1039		opt->nhoff = sizeof(struct ipv6hdr);
1040		return 1;
1041	}
1042	return -1;
1043}
1044
1045/*
1046 *	Creating outbound headers.
1047 *
1048 *	"build" functions work when skb is filled from head to tail (datagram)
1049 *	"push"	functions work when headers are added from tail to head (tcp)
1050 *
1051 *	In both cases we assume, that caller reserved enough room
1052 *	for headers.
1053 */
1054
1055static void ipv6_push_rthdr0(struct sk_buff *skb, u8 *proto,
1056			     struct ipv6_rt_hdr *opt,
1057			     struct in6_addr **addr_p, struct in6_addr *saddr)
1058{
1059	struct rt0_hdr *phdr, *ihdr;
1060	int hops;
1061
1062	ihdr = (struct rt0_hdr *) opt;
1063
1064	phdr = skb_push(skb, (ihdr->rt_hdr.hdrlen + 1) << 3);
1065	memcpy(phdr, ihdr, sizeof(struct rt0_hdr));
1066
1067	hops = ihdr->rt_hdr.hdrlen >> 1;
1068
1069	if (hops > 1)
1070		memcpy(phdr->addr, ihdr->addr + 1,
1071		       (hops - 1) * sizeof(struct in6_addr));
1072
1073	phdr->addr[hops - 1] = **addr_p;
1074	*addr_p = ihdr->addr;
1075
1076	phdr->rt_hdr.nexthdr = *proto;
1077	*proto = NEXTHDR_ROUTING;
1078}
1079
1080static void ipv6_push_rthdr4(struct sk_buff *skb, u8 *proto,
1081			     struct ipv6_rt_hdr *opt,
1082			     struct in6_addr **addr_p, struct in6_addr *saddr)
1083{
1084	struct ipv6_sr_hdr *sr_phdr, *sr_ihdr;
1085	int plen, hops;
1086
1087	sr_ihdr = (struct ipv6_sr_hdr *)opt;
1088	plen = (sr_ihdr->hdrlen + 1) << 3;
1089
1090	sr_phdr = skb_push(skb, plen);
1091	memcpy(sr_phdr, sr_ihdr, sizeof(struct ipv6_sr_hdr));
1092
1093	hops = sr_ihdr->first_segment + 1;
1094	memcpy(sr_phdr->segments + 1, sr_ihdr->segments + 1,
1095	       (hops - 1) * sizeof(struct in6_addr));
1096
1097	sr_phdr->segments[0] = **addr_p;
1098	*addr_p = &sr_ihdr->segments[sr_ihdr->segments_left];
1099
1100	if (sr_ihdr->hdrlen > hops * 2) {
1101		int tlvs_offset, tlvs_length;
1102
1103		tlvs_offset = (1 + hops * 2) << 3;
1104		tlvs_length = (sr_ihdr->hdrlen - hops * 2) << 3;
1105		memcpy((char *)sr_phdr + tlvs_offset,
1106		       (char *)sr_ihdr + tlvs_offset, tlvs_length);
1107	}
1108
1109#ifdef CONFIG_IPV6_SEG6_HMAC
1110	if (sr_has_hmac(sr_phdr)) {
1111		struct net *net = NULL;
1112
1113		if (skb->dev)
1114			net = dev_net(skb->dev);
1115		else if (skb->sk)
1116			net = sock_net(skb->sk);
1117
1118		WARN_ON(!net);
1119
1120		if (net)
1121			seg6_push_hmac(net, saddr, sr_phdr);
1122	}
1123#endif
1124
1125	sr_phdr->nexthdr = *proto;
1126	*proto = NEXTHDR_ROUTING;
1127}
1128
1129static void ipv6_push_rthdr(struct sk_buff *skb, u8 *proto,
1130			    struct ipv6_rt_hdr *opt,
1131			    struct in6_addr **addr_p, struct in6_addr *saddr)
1132{
1133	switch (opt->type) {
1134	case IPV6_SRCRT_TYPE_0:
1135	case IPV6_SRCRT_STRICT:
1136	case IPV6_SRCRT_TYPE_2:
1137		ipv6_push_rthdr0(skb, proto, opt, addr_p, saddr);
1138		break;
1139	case IPV6_SRCRT_TYPE_4:
1140		ipv6_push_rthdr4(skb, proto, opt, addr_p, saddr);
1141		break;
1142	default:
1143		break;
1144	}
1145}
1146
1147static void ipv6_push_exthdr(struct sk_buff *skb, u8 *proto, u8 type, struct ipv6_opt_hdr *opt)
1148{
1149	struct ipv6_opt_hdr *h = skb_push(skb, ipv6_optlen(opt));
1150
1151	memcpy(h, opt, ipv6_optlen(opt));
1152	h->nexthdr = *proto;
1153	*proto = type;
1154}
1155
1156void ipv6_push_nfrag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt,
1157			  u8 *proto,
1158			  struct in6_addr **daddr, struct in6_addr *saddr)
1159{
1160	if (opt->srcrt) {
1161		ipv6_push_rthdr(skb, proto, opt->srcrt, daddr, saddr);
1162		/*
1163		 * IPV6_RTHDRDSTOPTS is ignored
1164		 * unless IPV6_RTHDR is set (RFC3542).
1165		 */
1166		if (opt->dst0opt)
1167			ipv6_push_exthdr(skb, proto, NEXTHDR_DEST, opt->dst0opt);
1168	}
1169	if (opt->hopopt)
1170		ipv6_push_exthdr(skb, proto, NEXTHDR_HOP, opt->hopopt);
1171}
1172
1173void ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, u8 *proto)
1174{
1175	if (opt->dst1opt)
1176		ipv6_push_exthdr(skb, proto, NEXTHDR_DEST, opt->dst1opt);
1177}
1178EXPORT_SYMBOL(ipv6_push_frag_opts);
1179
1180struct ipv6_txoptions *
1181ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt)
1182{
1183	struct ipv6_txoptions *opt2;
1184
1185	opt2 = sock_kmalloc(sk, opt->tot_len, GFP_ATOMIC);
1186	if (opt2) {
1187		long dif = (char *)opt2 - (char *)opt;
1188		memcpy(opt2, opt, opt->tot_len);
1189		if (opt2->hopopt)
1190			*((char **)&opt2->hopopt) += dif;
1191		if (opt2->dst0opt)
1192			*((char **)&opt2->dst0opt) += dif;
1193		if (opt2->dst1opt)
1194			*((char **)&opt2->dst1opt) += dif;
1195		if (opt2->srcrt)
1196			*((char **)&opt2->srcrt) += dif;
1197		refcount_set(&opt2->refcnt, 1);
1198	}
1199	return opt2;
1200}
1201EXPORT_SYMBOL_GPL(ipv6_dup_options);
1202
1203static void ipv6_renew_option(int renewtype,
1204			      struct ipv6_opt_hdr **dest,
1205			      struct ipv6_opt_hdr *old,
1206			      struct ipv6_opt_hdr *new,
1207			      int newtype, char **p)
1208{
1209	struct ipv6_opt_hdr *src;
1210
1211	src = (renewtype == newtype ? new : old);
1212	if (!src)
1213		return;
1214
1215	memcpy(*p, src, ipv6_optlen(src));
1216	*dest = (struct ipv6_opt_hdr *)*p;
1217	*p += CMSG_ALIGN(ipv6_optlen(*dest));
1218}
1219
1220/**
1221 * ipv6_renew_options - replace a specific ext hdr with a new one.
1222 *
1223 * @sk: sock from which to allocate memory
1224 * @opt: original options
1225 * @newtype: option type to replace in @opt
1226 * @newopt: new option of type @newtype to replace (user-mem)
1227 *
1228 * Returns a new set of options which is a copy of @opt with the
1229 * option type @newtype replaced with @newopt.
1230 *
1231 * @opt may be NULL, in which case a new set of options is returned
1232 * containing just @newopt.
1233 *
1234 * @newopt may be NULL, in which case the specified option type is
1235 * not copied into the new set of options.
1236 *
1237 * The new set of options is allocated from the socket option memory
1238 * buffer of @sk.
1239 */
1240struct ipv6_txoptions *
1241ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
1242		   int newtype, struct ipv6_opt_hdr *newopt)
1243{
1244	int tot_len = 0;
1245	char *p;
1246	struct ipv6_txoptions *opt2;
1247
1248	if (opt) {
1249		if (newtype != IPV6_HOPOPTS && opt->hopopt)
1250			tot_len += CMSG_ALIGN(ipv6_optlen(opt->hopopt));
1251		if (newtype != IPV6_RTHDRDSTOPTS && opt->dst0opt)
1252			tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst0opt));
1253		if (newtype != IPV6_RTHDR && opt->srcrt)
1254			tot_len += CMSG_ALIGN(ipv6_optlen(opt->srcrt));
1255		if (newtype != IPV6_DSTOPTS && opt->dst1opt)
1256			tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst1opt));
1257	}
1258
1259	if (newopt)
1260		tot_len += CMSG_ALIGN(ipv6_optlen(newopt));
1261
1262	if (!tot_len)
1263		return NULL;
1264
1265	tot_len += sizeof(*opt2);
1266	opt2 = sock_kmalloc(sk, tot_len, GFP_ATOMIC);
1267	if (!opt2)
1268		return ERR_PTR(-ENOBUFS);
1269
1270	memset(opt2, 0, tot_len);
1271	refcount_set(&opt2->refcnt, 1);
1272	opt2->tot_len = tot_len;
1273	p = (char *)(opt2 + 1);
1274
1275	ipv6_renew_option(IPV6_HOPOPTS, &opt2->hopopt,
1276			  (opt ? opt->hopopt : NULL),
1277			  newopt, newtype, &p);
1278	ipv6_renew_option(IPV6_RTHDRDSTOPTS, &opt2->dst0opt,
1279			  (opt ? opt->dst0opt : NULL),
1280			  newopt, newtype, &p);
1281	ipv6_renew_option(IPV6_RTHDR,
1282			  (struct ipv6_opt_hdr **)&opt2->srcrt,
1283			  (opt ? (struct ipv6_opt_hdr *)opt->srcrt : NULL),
1284			  newopt, newtype, &p);
1285	ipv6_renew_option(IPV6_DSTOPTS, &opt2->dst1opt,
1286			  (opt ? opt->dst1opt : NULL),
1287			  newopt, newtype, &p);
1288
1289	opt2->opt_nflen = (opt2->hopopt ? ipv6_optlen(opt2->hopopt) : 0) +
1290			  (opt2->dst0opt ? ipv6_optlen(opt2->dst0opt) : 0) +
1291			  (opt2->srcrt ? ipv6_optlen(opt2->srcrt) : 0);
1292	opt2->opt_flen = (opt2->dst1opt ? ipv6_optlen(opt2->dst1opt) : 0);
1293
1294	return opt2;
1295}
1296
1297struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
1298					  struct ipv6_txoptions *opt)
1299{
1300	/*
1301	 * ignore the dest before srcrt unless srcrt is being included.
1302	 * --yoshfuji
1303	 */
1304	if (opt && opt->dst0opt && !opt->srcrt) {
1305		if (opt_space != opt) {
1306			memcpy(opt_space, opt, sizeof(*opt_space));
1307			opt = opt_space;
1308		}
1309		opt->opt_nflen -= ipv6_optlen(opt->dst0opt);
1310		opt->dst0opt = NULL;
1311	}
1312
1313	return opt;
1314}
1315EXPORT_SYMBOL_GPL(ipv6_fixup_options);
1316
1317/**
1318 * fl6_update_dst - update flowi destination address with info given
1319 *                  by srcrt option, if any.
1320 *
1321 * @fl6: flowi6 for which daddr is to be updated
1322 * @opt: struct ipv6_txoptions in which to look for srcrt opt
1323 * @orig: copy of original daddr address if modified
1324 *
1325 * Returns NULL if no txoptions or no srcrt, otherwise returns orig
1326 * and initial value of fl6->daddr set in orig
1327 */
1328struct in6_addr *fl6_update_dst(struct flowi6 *fl6,
1329				const struct ipv6_txoptions *opt,
1330				struct in6_addr *orig)
1331{
1332	if (!opt || !opt->srcrt)
1333		return NULL;
1334
1335	*orig = fl6->daddr;
1336
1337	switch (opt->srcrt->type) {
1338	case IPV6_SRCRT_TYPE_0:
1339	case IPV6_SRCRT_STRICT:
1340	case IPV6_SRCRT_TYPE_2:
1341		fl6->daddr = *((struct rt0_hdr *)opt->srcrt)->addr;
1342		break;
1343	case IPV6_SRCRT_TYPE_4:
1344	{
1345		struct ipv6_sr_hdr *srh = (struct ipv6_sr_hdr *)opt->srcrt;
1346
1347		fl6->daddr = srh->segments[srh->segments_left];
1348		break;
1349	}
1350	default:
1351		return NULL;
1352	}
1353
1354	return orig;
1355}
1356EXPORT_SYMBOL_GPL(fl6_update_dst);
1357