1// SPDX-License-Identifier: GPL-2.0-or-later 2/* 3 * UDP over IPv6 4 * Linux INET6 implementation 5 * 6 * Authors: 7 * Pedro Roque <roque@di.fc.ul.pt> 8 * 9 * Based on linux/ipv4/udp.c 10 * 11 * Fixes: 12 * Hideaki YOSHIFUJI : sin6_scope_id support 13 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which 14 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind 15 * a single port at the same time. 16 * Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data 17 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/udp6 to seq_file. 18 */ 19 20#include <linux/errno.h> 21#include <linux/types.h> 22#include <linux/socket.h> 23#include <linux/sockios.h> 24#include <linux/net.h> 25#include <linux/in6.h> 26#include <linux/netdevice.h> 27#include <linux/if_arp.h> 28#include <linux/ipv6.h> 29#include <linux/icmpv6.h> 30#include <linux/init.h> 31#include <linux/module.h> 32#include <linux/skbuff.h> 33#include <linux/slab.h> 34#include <linux/uaccess.h> 35#include <linux/indirect_call_wrapper.h> 36 37#include <net/addrconf.h> 38#include <net/ndisc.h> 39#include <net/protocol.h> 40#include <net/transp_v6.h> 41#include <net/ip6_route.h> 42#include <net/raw.h> 43#include <net/tcp_states.h> 44#include <net/ip6_checksum.h> 45#include <net/ip6_tunnel.h> 46#include <net/xfrm.h> 47#include <net/inet_hashtables.h> 48#include <net/inet6_hashtables.h> 49#include <net/busy_poll.h> 50#include <net/sock_reuseport.h> 51 52#include <linux/proc_fs.h> 53#include <linux/seq_file.h> 54#include <trace/events/skb.h> 55#include "udp_impl.h" 56 57static void udpv6_destruct_sock(struct sock *sk) 58{ 59 udp_destruct_common(sk); 60 inet6_sock_destruct(sk); 61} 62 63int udpv6_init_sock(struct sock *sk) 64{ 65 skb_queue_head_init(&udp_sk(sk)->reader_queue); 66 sk->sk_destruct = udpv6_destruct_sock; 67 return 0; 68} 69 70static u32 udp6_ehashfn(const struct net *net, 71 const struct in6_addr *laddr, 72 const u16 lport, 73 const struct in6_addr *faddr, 74 const __be16 fport) 75{ 76 static u32 udp6_ehash_secret __read_mostly; 77 static u32 udp_ipv6_hash_secret __read_mostly; 78 79 u32 lhash, fhash; 80 81 net_get_random_once(&udp6_ehash_secret, 82 sizeof(udp6_ehash_secret)); 83 net_get_random_once(&udp_ipv6_hash_secret, 84 sizeof(udp_ipv6_hash_secret)); 85 86 lhash = (__force u32)laddr->s6_addr32[3]; 87 fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret); 88 89 return __inet6_ehashfn(lhash, lport, fhash, fport, 90 udp6_ehash_secret + net_hash_mix(net)); 91} 92 93int udp_v6_get_port(struct sock *sk, unsigned short snum) 94{ 95 unsigned int hash2_nulladdr = 96 ipv6_portaddr_hash(sock_net(sk), &in6addr_any, snum); 97 unsigned int hash2_partial = 98 ipv6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0); 99 100 /* precompute partial secondary hash */ 101 udp_sk(sk)->udp_portaddr_hash = hash2_partial; 102 return udp_lib_get_port(sk, snum, hash2_nulladdr); 103} 104 105void udp_v6_rehash(struct sock *sk) 106{ 107 u16 new_hash = ipv6_portaddr_hash(sock_net(sk), 108 &sk->sk_v6_rcv_saddr, 109 inet_sk(sk)->inet_num); 110 111 udp_lib_rehash(sk, new_hash); 112} 113 114static int compute_score(struct sock *sk, struct net *net, 115 const struct in6_addr *saddr, __be16 sport, 116 const struct in6_addr *daddr, unsigned short hnum, 117 int dif, int sdif) 118{ 119 int score; 120 struct inet_sock *inet; 121 bool dev_match; 122 123 if (!net_eq(sock_net(sk), net) || 124 udp_sk(sk)->udp_port_hash != hnum || 125 sk->sk_family != PF_INET6) 126 return -1; 127 128 if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr)) 129 return -1; 130 131 score = 0; 132 inet = inet_sk(sk); 133 134 if (inet->inet_dport) { 135 if (inet->inet_dport != sport) 136 return -1; 137 score++; 138 } 139 140 if (!ipv6_addr_any(&sk->sk_v6_daddr)) { 141 if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr)) 142 return -1; 143 score++; 144 } 145 146 dev_match = udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif); 147 if (!dev_match) 148 return -1; 149 if (sk->sk_bound_dev_if) 150 score++; 151 152 if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id()) 153 score++; 154 155 return score; 156} 157 158static struct sock *lookup_reuseport(struct net *net, struct sock *sk, 159 struct sk_buff *skb, 160 const struct in6_addr *saddr, 161 __be16 sport, 162 const struct in6_addr *daddr, 163 unsigned int hnum) 164{ 165 struct sock *reuse_sk = NULL; 166 u32 hash; 167 168 if (sk->sk_reuseport && sk->sk_state != TCP_ESTABLISHED) { 169 hash = udp6_ehashfn(net, daddr, hnum, saddr, sport); 170 reuse_sk = reuseport_select_sock(sk, hash, skb, 171 sizeof(struct udphdr)); 172 } 173 return reuse_sk; 174} 175 176/* called with rcu_read_lock() */ 177static struct sock *udp6_lib_lookup2(struct net *net, 178 const struct in6_addr *saddr, __be16 sport, 179 const struct in6_addr *daddr, unsigned int hnum, 180 int dif, int sdif, struct udp_hslot *hslot2, 181 struct sk_buff *skb) 182{ 183 struct sock *sk, *result; 184 int score, badness; 185 186 result = NULL; 187 badness = -1; 188 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { 189 score = compute_score(sk, net, saddr, sport, 190 daddr, hnum, dif, sdif); 191 if (score > badness) { 192 badness = score; 193 result = lookup_reuseport(net, sk, skb, saddr, sport, daddr, hnum); 194 if (!result) { 195 result = sk; 196 continue; 197 } 198 199 /* Fall back to scoring if group has connections */ 200 if (!reuseport_has_conns(sk)) 201 return result; 202 203 /* Reuseport logic returned an error, keep original score. */ 204 if (IS_ERR(result)) 205 continue; 206 207 badness = compute_score(sk, net, saddr, sport, 208 daddr, hnum, dif, sdif); 209 } 210 } 211 return result; 212} 213 214static inline struct sock *udp6_lookup_run_bpf(struct net *net, 215 struct udp_table *udptable, 216 struct sk_buff *skb, 217 const struct in6_addr *saddr, 218 __be16 sport, 219 const struct in6_addr *daddr, 220 u16 hnum) 221{ 222 struct sock *sk, *reuse_sk; 223 bool no_reuseport; 224 225 if (udptable != &udp_table) 226 return NULL; /* only UDP is supported */ 227 228 no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_UDP, 229 saddr, sport, daddr, hnum, &sk); 230 if (no_reuseport || IS_ERR_OR_NULL(sk)) 231 return sk; 232 233 reuse_sk = lookup_reuseport(net, sk, skb, saddr, sport, daddr, hnum); 234 if (reuse_sk) 235 sk = reuse_sk; 236 return sk; 237} 238 239/* rcu_read_lock() must be held */ 240struct sock *__udp6_lib_lookup(struct net *net, 241 const struct in6_addr *saddr, __be16 sport, 242 const struct in6_addr *daddr, __be16 dport, 243 int dif, int sdif, struct udp_table *udptable, 244 struct sk_buff *skb) 245{ 246 unsigned short hnum = ntohs(dport); 247 unsigned int hash2, slot2; 248 struct udp_hslot *hslot2; 249 struct sock *result, *sk; 250 251 hash2 = ipv6_portaddr_hash(net, daddr, hnum); 252 slot2 = hash2 & udptable->mask; 253 hslot2 = &udptable->hash2[slot2]; 254 255 /* Lookup connected or non-wildcard sockets */ 256 result = udp6_lib_lookup2(net, saddr, sport, 257 daddr, hnum, dif, sdif, 258 hslot2, skb); 259 if (!IS_ERR_OR_NULL(result) && result->sk_state == TCP_ESTABLISHED) 260 goto done; 261 262 /* Lookup redirect from BPF */ 263 if (static_branch_unlikely(&bpf_sk_lookup_enabled)) { 264 sk = udp6_lookup_run_bpf(net, udptable, skb, 265 saddr, sport, daddr, hnum); 266 if (sk) { 267 result = sk; 268 goto done; 269 } 270 } 271 272 /* Got non-wildcard socket or error on first lookup */ 273 if (result) 274 goto done; 275 276 /* Lookup wildcard sockets */ 277 hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum); 278 slot2 = hash2 & udptable->mask; 279 hslot2 = &udptable->hash2[slot2]; 280 281 result = udp6_lib_lookup2(net, saddr, sport, 282 &in6addr_any, hnum, dif, sdif, 283 hslot2, skb); 284done: 285 if (IS_ERR(result)) 286 return NULL; 287 return result; 288} 289EXPORT_SYMBOL_GPL(__udp6_lib_lookup); 290 291static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb, 292 __be16 sport, __be16 dport, 293 struct udp_table *udptable) 294{ 295 const struct ipv6hdr *iph = ipv6_hdr(skb); 296 297 return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport, 298 &iph->daddr, dport, inet6_iif(skb), 299 inet6_sdif(skb), udptable, skb); 300} 301 302struct sock *udp6_lib_lookup_skb(struct sk_buff *skb, 303 __be16 sport, __be16 dport) 304{ 305 const struct ipv6hdr *iph = ipv6_hdr(skb); 306 307 return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport, 308 &iph->daddr, dport, inet6_iif(skb), 309 inet6_sdif(skb), &udp_table, NULL); 310} 311EXPORT_SYMBOL_GPL(udp6_lib_lookup_skb); 312 313/* Must be called under rcu_read_lock(). 314 * Does increment socket refcount. 315 */ 316#if IS_ENABLED(CONFIG_NF_TPROXY_IPV6) || IS_ENABLED(CONFIG_NF_SOCKET_IPV6) 317struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport, 318 const struct in6_addr *daddr, __be16 dport, int dif) 319{ 320 struct sock *sk; 321 322 sk = __udp6_lib_lookup(net, saddr, sport, daddr, dport, 323 dif, 0, &udp_table, NULL); 324 if (sk && !refcount_inc_not_zero(&sk->sk_refcnt)) 325 sk = NULL; 326 return sk; 327} 328EXPORT_SYMBOL_GPL(udp6_lib_lookup); 329#endif 330 331/* do not use the scratch area len for jumbogram: their length execeeds the 332 * scratch area space; note that the IP6CB flags is still in the first 333 * cacheline, so checking for jumbograms is cheap 334 */ 335static int udp6_skb_len(struct sk_buff *skb) 336{ 337 return unlikely(inet6_is_jumbogram(skb)) ? skb->len : udp_skb_len(skb); 338} 339 340/* 341 * This should be easy, if there is something there we 342 * return it, otherwise we block. 343 */ 344 345int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, 346 int noblock, int flags, int *addr_len) 347{ 348 struct ipv6_pinfo *np = inet6_sk(sk); 349 struct inet_sock *inet = inet_sk(sk); 350 struct sk_buff *skb; 351 unsigned int ulen, copied; 352 int off, err, peeking = flags & MSG_PEEK; 353 int is_udplite = IS_UDPLITE(sk); 354 struct udp_mib __percpu *mib; 355 bool checksum_valid = false; 356 int is_udp4; 357 358 if (flags & MSG_ERRQUEUE) 359 return ipv6_recv_error(sk, msg, len, addr_len); 360 361 if (np->rxpmtu && np->rxopt.bits.rxpmtu) 362 return ipv6_recv_rxpmtu(sk, msg, len, addr_len); 363 364try_again: 365 off = sk_peek_offset(sk, flags); 366 skb = __skb_recv_udp(sk, flags, noblock, &off, &err); 367 if (!skb) 368 return err; 369 370 ulen = udp6_skb_len(skb); 371 copied = len; 372 if (copied > ulen - off) 373 copied = ulen - off; 374 else if (copied < ulen) 375 msg->msg_flags |= MSG_TRUNC; 376 377 is_udp4 = (skb->protocol == htons(ETH_P_IP)); 378 mib = __UDPX_MIB(sk, is_udp4); 379 380 /* 381 * If checksum is needed at all, try to do it while copying the 382 * data. If the data is truncated, or if we only want a partial 383 * coverage checksum (UDP-Lite), do it before the copy. 384 */ 385 386 if (copied < ulen || peeking || 387 (is_udplite && UDP_SKB_CB(skb)->partial_cov)) { 388 checksum_valid = udp_skb_csum_unnecessary(skb) || 389 !__udp_lib_checksum_complete(skb); 390 if (!checksum_valid) 391 goto csum_copy_err; 392 } 393 394 if (checksum_valid || udp_skb_csum_unnecessary(skb)) { 395 if (udp_skb_is_linear(skb)) 396 err = copy_linear_skb(skb, copied, off, &msg->msg_iter); 397 else 398 err = skb_copy_datagram_msg(skb, off, msg, copied); 399 } else { 400 err = skb_copy_and_csum_datagram_msg(skb, off, msg); 401 if (err == -EINVAL) 402 goto csum_copy_err; 403 } 404 if (unlikely(err)) { 405 if (!peeking) { 406 atomic_inc(&sk->sk_drops); 407 SNMP_INC_STATS(mib, UDP_MIB_INERRORS); 408 } 409 kfree_skb(skb); 410 return err; 411 } 412 if (!peeking) 413 SNMP_INC_STATS(mib, UDP_MIB_INDATAGRAMS); 414 415 sock_recv_ts_and_drops(msg, sk, skb); 416 417 /* Copy the address. */ 418 if (msg->msg_name) { 419 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); 420 sin6->sin6_family = AF_INET6; 421 sin6->sin6_port = udp_hdr(skb)->source; 422 sin6->sin6_flowinfo = 0; 423 424 if (is_udp4) { 425 ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr, 426 &sin6->sin6_addr); 427 sin6->sin6_scope_id = 0; 428 } else { 429 sin6->sin6_addr = ipv6_hdr(skb)->saddr; 430 sin6->sin6_scope_id = 431 ipv6_iface_scope_id(&sin6->sin6_addr, 432 inet6_iif(skb)); 433 } 434 *addr_len = sizeof(*sin6); 435 436 if (cgroup_bpf_enabled) 437 BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, 438 (struct sockaddr *)sin6); 439 } 440 441 if (udp_sk(sk)->gro_enabled) 442 udp_cmsg_recv(msg, sk, skb); 443 444 if (np->rxopt.all) 445 ip6_datagram_recv_common_ctl(sk, msg, skb); 446 447 if (is_udp4) { 448 if (inet->cmsg_flags) 449 ip_cmsg_recv_offset(msg, sk, skb, 450 sizeof(struct udphdr), off); 451 } else { 452 if (np->rxopt.all) 453 ip6_datagram_recv_specific_ctl(sk, msg, skb); 454 } 455 456 err = copied; 457 if (flags & MSG_TRUNC) 458 err = ulen; 459 460 skb_consume_udp(sk, skb, peeking ? -err : err); 461 return err; 462 463csum_copy_err: 464 if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags, 465 udp_skb_destructor)) { 466 SNMP_INC_STATS(mib, UDP_MIB_CSUMERRORS); 467 SNMP_INC_STATS(mib, UDP_MIB_INERRORS); 468 } 469 kfree_skb(skb); 470 471 /* starting over for a new packet, but check if we need to yield */ 472 cond_resched(); 473 msg->msg_flags &= ~MSG_TRUNC; 474 goto try_again; 475} 476 477DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key); 478void udpv6_encap_enable(void) 479{ 480 static_branch_inc(&udpv6_encap_needed_key); 481} 482EXPORT_SYMBOL(udpv6_encap_enable); 483 484/* Handler for tunnels with arbitrary destination ports: no socket lookup, go 485 * through error handlers in encapsulations looking for a match. 486 */ 487static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb, 488 struct inet6_skb_parm *opt, 489 u8 type, u8 code, int offset, __be32 info) 490{ 491 int i; 492 493 for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) { 494 int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt, 495 u8 type, u8 code, int offset, __be32 info); 496 const struct ip6_tnl_encap_ops *encap; 497 498 encap = rcu_dereference(ip6tun_encaps[i]); 499 if (!encap) 500 continue; 501 handler = encap->err_handler; 502 if (handler && !handler(skb, opt, type, code, offset, info)) 503 return 0; 504 } 505 506 return -ENOENT; 507} 508 509/* Try to match ICMP errors to UDP tunnels by looking up a socket without 510 * reversing source and destination port: this will match tunnels that force the 511 * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that 512 * lwtunnels might actually break this assumption by being configured with 513 * different destination ports on endpoints, in this case we won't be able to 514 * trace ICMP messages back to them. 515 * 516 * If this doesn't match any socket, probe tunnels with arbitrary destination 517 * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port 518 * we've sent packets to won't necessarily match the local destination port. 519 * 520 * Then ask the tunnel implementation to match the error against a valid 521 * association. 522 * 523 * Return an error if we can't find a match, the socket if we need further 524 * processing, zero otherwise. 525 */ 526static struct sock *__udp6_lib_err_encap(struct net *net, 527 const struct ipv6hdr *hdr, int offset, 528 struct udphdr *uh, 529 struct udp_table *udptable, 530 struct sk_buff *skb, 531 struct inet6_skb_parm *opt, 532 u8 type, u8 code, __be32 info) 533{ 534 int network_offset, transport_offset; 535 struct sock *sk; 536 537 network_offset = skb_network_offset(skb); 538 transport_offset = skb_transport_offset(skb); 539 540 /* Network header needs to point to the outer IPv6 header inside ICMP */ 541 skb_reset_network_header(skb); 542 543 /* Transport header needs to point to the UDP header */ 544 skb_set_transport_header(skb, offset); 545 546 sk = __udp6_lib_lookup(net, &hdr->daddr, uh->source, 547 &hdr->saddr, uh->dest, 548 inet6_iif(skb), 0, udptable, skb); 549 if (sk) { 550 int (*lookup)(struct sock *sk, struct sk_buff *skb); 551 struct udp_sock *up = udp_sk(sk); 552 553 lookup = READ_ONCE(up->encap_err_lookup); 554 if (!lookup || lookup(sk, skb)) 555 sk = NULL; 556 } 557 558 if (!sk) { 559 sk = ERR_PTR(__udp6_lib_err_encap_no_sk(skb, opt, type, code, 560 offset, info)); 561 } 562 563 skb_set_transport_header(skb, transport_offset); 564 skb_set_network_header(skb, network_offset); 565 566 return sk; 567} 568 569int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 570 u8 type, u8 code, int offset, __be32 info, 571 struct udp_table *udptable) 572{ 573 struct ipv6_pinfo *np; 574 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data; 575 const struct in6_addr *saddr = &hdr->saddr; 576 const struct in6_addr *daddr = &hdr->daddr; 577 struct udphdr *uh = (struct udphdr *)(skb->data+offset); 578 bool tunnel = false; 579 struct sock *sk; 580 int harderr; 581 int err; 582 struct net *net = dev_net(skb->dev); 583 584 sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source, 585 inet6_iif(skb), inet6_sdif(skb), udptable, NULL); 586 if (!sk) { 587 /* No socket for error: try tunnels before discarding */ 588 sk = ERR_PTR(-ENOENT); 589 if (static_branch_unlikely(&udpv6_encap_needed_key)) { 590 sk = __udp6_lib_err_encap(net, hdr, offset, uh, 591 udptable, skb, 592 opt, type, code, info); 593 if (!sk) 594 return 0; 595 } 596 597 if (IS_ERR(sk)) { 598 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), 599 ICMP6_MIB_INERRORS); 600 return PTR_ERR(sk); 601 } 602 603 tunnel = true; 604 } 605 606 harderr = icmpv6_err_convert(type, code, &err); 607 np = inet6_sk(sk); 608 609 if (type == ICMPV6_PKT_TOOBIG) { 610 if (!ip6_sk_accept_pmtu(sk)) 611 goto out; 612 ip6_sk_update_pmtu(skb, sk, info); 613 if (np->pmtudisc != IPV6_PMTUDISC_DONT) 614 harderr = 1; 615 } 616 if (type == NDISC_REDIRECT) { 617 if (tunnel) { 618 ip6_redirect(skb, sock_net(sk), inet6_iif(skb), 619 sk->sk_mark, sk->sk_uid); 620 } else { 621 ip6_sk_redirect(skb, sk); 622 } 623 goto out; 624 } 625 626 /* Tunnels don't have an application socket: don't pass errors back */ 627 if (tunnel) 628 goto out; 629 630 if (!np->recverr) { 631 if (!harderr || sk->sk_state != TCP_ESTABLISHED) 632 goto out; 633 } else { 634 ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1)); 635 } 636 637 sk->sk_err = err; 638 sk->sk_error_report(sk); 639out: 640 return 0; 641} 642 643static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 644{ 645 int rc; 646 647 if (!ipv6_addr_any(&sk->sk_v6_daddr)) { 648 sock_rps_save_rxhash(sk, skb); 649 sk_mark_napi_id(sk, skb); 650 sk_incoming_cpu_update(sk); 651 } else { 652 sk_mark_napi_id_once(sk, skb); 653 } 654 655 rc = __udp_enqueue_schedule_skb(sk, skb); 656 if (rc < 0) { 657 int is_udplite = IS_UDPLITE(sk); 658 659 /* Note that an ENOMEM error is charged twice */ 660 if (rc == -ENOMEM) 661 UDP6_INC_STATS(sock_net(sk), 662 UDP_MIB_RCVBUFERRORS, is_udplite); 663 UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 664 kfree_skb(skb); 665 return -1; 666 } 667 668 return 0; 669} 670 671static __inline__ int udpv6_err(struct sk_buff *skb, 672 struct inet6_skb_parm *opt, u8 type, 673 u8 code, int offset, __be32 info) 674{ 675 return __udp6_lib_err(skb, opt, type, code, offset, info, &udp_table); 676} 677 678static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb) 679{ 680 struct udp_sock *up = udp_sk(sk); 681 int is_udplite = IS_UDPLITE(sk); 682 683 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) 684 goto drop; 685 686 if (static_branch_unlikely(&udpv6_encap_needed_key) && up->encap_type) { 687 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); 688 689 /* 690 * This is an encapsulation socket so pass the skb to 691 * the socket's udp_encap_rcv() hook. Otherwise, just 692 * fall through and pass this up the UDP socket. 693 * up->encap_rcv() returns the following value: 694 * =0 if skb was successfully passed to the encap 695 * handler or was discarded by it. 696 * >0 if skb should be passed on to UDP. 697 * <0 if skb should be resubmitted as proto -N 698 */ 699 700 /* if we're overly short, let UDP handle it */ 701 encap_rcv = READ_ONCE(up->encap_rcv); 702 if (encap_rcv) { 703 int ret; 704 705 /* Verify checksum before giving to encap */ 706 if (udp_lib_checksum_complete(skb)) 707 goto csum_error; 708 709 ret = encap_rcv(sk, skb); 710 if (ret <= 0) { 711 __UDP_INC_STATS(sock_net(sk), 712 UDP_MIB_INDATAGRAMS, 713 is_udplite); 714 return -ret; 715 } 716 } 717 718 /* FALLTHROUGH -- it's a UDP Packet */ 719 } 720 721 /* 722 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c). 723 */ 724 if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { 725 726 if (up->pcrlen == 0) { /* full coverage was set */ 727 net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n", 728 UDP_SKB_CB(skb)->cscov, skb->len); 729 goto drop; 730 } 731 if (UDP_SKB_CB(skb)->cscov < up->pcrlen) { 732 net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n", 733 UDP_SKB_CB(skb)->cscov, up->pcrlen); 734 goto drop; 735 } 736 } 737 738 prefetch(&sk->sk_rmem_alloc); 739 if (rcu_access_pointer(sk->sk_filter) && 740 udp_lib_checksum_complete(skb)) 741 goto csum_error; 742 743 if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr))) 744 goto drop; 745 746 udp_csum_pull_header(skb); 747 748 skb_dst_drop(skb); 749 750 return __udpv6_queue_rcv_skb(sk, skb); 751 752csum_error: 753 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); 754drop: 755 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 756 atomic_inc(&sk->sk_drops); 757 kfree_skb(skb); 758 return -1; 759} 760 761static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 762{ 763 struct sk_buff *next, *segs; 764 int ret; 765 766 if (likely(!udp_unexpected_gso(sk, skb))) 767 return udpv6_queue_rcv_one_skb(sk, skb); 768 769 __skb_push(skb, -skb_mac_offset(skb)); 770 segs = udp_rcv_segment(sk, skb, false); 771 skb_list_walk_safe(segs, skb, next) { 772 __skb_pull(skb, skb_transport_offset(skb)); 773 774 ret = udpv6_queue_rcv_one_skb(sk, skb); 775 if (ret > 0) 776 ip6_protocol_deliver_rcu(dev_net(skb->dev), skb, ret, 777 true); 778 } 779 return 0; 780} 781 782static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk, 783 __be16 loc_port, const struct in6_addr *loc_addr, 784 __be16 rmt_port, const struct in6_addr *rmt_addr, 785 int dif, int sdif, unsigned short hnum) 786{ 787 struct inet_sock *inet = inet_sk(sk); 788 789 if (!net_eq(sock_net(sk), net)) 790 return false; 791 792 if (udp_sk(sk)->udp_port_hash != hnum || 793 sk->sk_family != PF_INET6 || 794 (inet->inet_dport && inet->inet_dport != rmt_port) || 795 (!ipv6_addr_any(&sk->sk_v6_daddr) && 796 !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) || 797 !udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif) || 798 (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) && 799 !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr))) 800 return false; 801 if (!inet6_mc_check(sk, loc_addr, rmt_addr)) 802 return false; 803 return true; 804} 805 806static void udp6_csum_zero_error(struct sk_buff *skb) 807{ 808 /* RFC 2460 section 8.1 says that we SHOULD log 809 * this error. Well, it is reasonable. 810 */ 811 net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n", 812 &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source), 813 &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest)); 814} 815 816/* 817 * Note: called only from the BH handler context, 818 * so we don't need to lock the hashes. 819 */ 820static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, 821 const struct in6_addr *saddr, const struct in6_addr *daddr, 822 struct udp_table *udptable, int proto) 823{ 824 struct sock *sk, *first = NULL; 825 const struct udphdr *uh = udp_hdr(skb); 826 unsigned short hnum = ntohs(uh->dest); 827 struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum); 828 unsigned int offset = offsetof(typeof(*sk), sk_node); 829 unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10); 830 int dif = inet6_iif(skb); 831 int sdif = inet6_sdif(skb); 832 struct hlist_node *node; 833 struct sk_buff *nskb; 834 835 if (use_hash2) { 836 hash2_any = ipv6_portaddr_hash(net, &in6addr_any, hnum) & 837 udptable->mask; 838 hash2 = ipv6_portaddr_hash(net, daddr, hnum) & udptable->mask; 839start_lookup: 840 hslot = &udptable->hash2[hash2]; 841 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); 842 } 843 844 sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) { 845 if (!__udp_v6_is_mcast_sock(net, sk, uh->dest, daddr, 846 uh->source, saddr, dif, sdif, 847 hnum)) 848 continue; 849 /* If zero checksum and no_check is not on for 850 * the socket then skip it. 851 */ 852 if (!uh->check && !udp_sk(sk)->no_check6_rx) 853 continue; 854 if (!first) { 855 first = sk; 856 continue; 857 } 858 nskb = skb_clone(skb, GFP_ATOMIC); 859 if (unlikely(!nskb)) { 860 atomic_inc(&sk->sk_drops); 861 __UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS, 862 IS_UDPLITE(sk)); 863 __UDP6_INC_STATS(net, UDP_MIB_INERRORS, 864 IS_UDPLITE(sk)); 865 continue; 866 } 867 868 if (udpv6_queue_rcv_skb(sk, nskb) > 0) 869 consume_skb(nskb); 870 } 871 872 /* Also lookup *:port if we are using hash2 and haven't done so yet. */ 873 if (use_hash2 && hash2 != hash2_any) { 874 hash2 = hash2_any; 875 goto start_lookup; 876 } 877 878 if (first) { 879 if (udpv6_queue_rcv_skb(first, skb) > 0) 880 consume_skb(skb); 881 } else { 882 kfree_skb(skb); 883 __UDP6_INC_STATS(net, UDP_MIB_IGNOREDMULTI, 884 proto == IPPROTO_UDPLITE); 885 } 886 return 0; 887} 888 889static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) 890{ 891 if (udp_sk_rx_dst_set(sk, dst)) { 892 const struct rt6_info *rt = (const struct rt6_info *)dst; 893 894 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt); 895 } 896} 897 898/* wrapper for udp_queue_rcv_skb tacking care of csum conversion and 899 * return code conversion for ip layer consumption 900 */ 901static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb, 902 struct udphdr *uh) 903{ 904 int ret; 905 906 if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) 907 skb_checksum_try_convert(skb, IPPROTO_UDP, ip6_compute_pseudo); 908 909 ret = udpv6_queue_rcv_skb(sk, skb); 910 911 /* a return value > 0 means to resubmit the input */ 912 if (ret > 0) 913 return ret; 914 return 0; 915} 916 917int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, 918 int proto) 919{ 920 const struct in6_addr *saddr, *daddr; 921 struct net *net = dev_net(skb->dev); 922 struct udphdr *uh; 923 struct sock *sk; 924 bool refcounted; 925 u32 ulen = 0; 926 927 if (!pskb_may_pull(skb, sizeof(struct udphdr))) 928 goto discard; 929 930 saddr = &ipv6_hdr(skb)->saddr; 931 daddr = &ipv6_hdr(skb)->daddr; 932 uh = udp_hdr(skb); 933 934 ulen = ntohs(uh->len); 935 if (ulen > skb->len) 936 goto short_packet; 937 938 if (proto == IPPROTO_UDP) { 939 /* UDP validates ulen. */ 940 941 /* Check for jumbo payload */ 942 if (ulen == 0) 943 ulen = skb->len; 944 945 if (ulen < sizeof(*uh)) 946 goto short_packet; 947 948 if (ulen < skb->len) { 949 if (pskb_trim_rcsum(skb, ulen)) 950 goto short_packet; 951 saddr = &ipv6_hdr(skb)->saddr; 952 daddr = &ipv6_hdr(skb)->daddr; 953 uh = udp_hdr(skb); 954 } 955 } 956 957 if (udp6_csum_init(skb, uh, proto)) 958 goto csum_error; 959 960 /* Check if the socket is already available, e.g. due to early demux */ 961 sk = skb_steal_sock(skb, &refcounted); 962 if (sk) { 963 struct dst_entry *dst = skb_dst(skb); 964 int ret; 965 966 if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst)) 967 udp6_sk_rx_dst_set(sk, dst); 968 969 if (!uh->check && !udp_sk(sk)->no_check6_rx) { 970 if (refcounted) 971 sock_put(sk); 972 goto report_csum_error; 973 } 974 975 ret = udp6_unicast_rcv_skb(sk, skb, uh); 976 if (refcounted) 977 sock_put(sk); 978 return ret; 979 } 980 981 /* 982 * Multicast receive code 983 */ 984 if (ipv6_addr_is_multicast(daddr)) 985 return __udp6_lib_mcast_deliver(net, skb, 986 saddr, daddr, udptable, proto); 987 988 /* Unicast */ 989 sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable); 990 if (sk) { 991 if (!uh->check && !udp_sk(sk)->no_check6_rx) 992 goto report_csum_error; 993 return udp6_unicast_rcv_skb(sk, skb, uh); 994 } 995 996 if (!uh->check) 997 goto report_csum_error; 998 999 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) 1000 goto discard; 1001 1002 if (udp_lib_checksum_complete(skb)) 1003 goto csum_error; 1004 1005 __UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); 1006 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0); 1007 1008 kfree_skb(skb); 1009 return 0; 1010 1011short_packet: 1012 net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n", 1013 proto == IPPROTO_UDPLITE ? "-Lite" : "", 1014 saddr, ntohs(uh->source), 1015 ulen, skb->len, 1016 daddr, ntohs(uh->dest)); 1017 goto discard; 1018 1019report_csum_error: 1020 udp6_csum_zero_error(skb); 1021csum_error: 1022 __UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE); 1023discard: 1024 __UDP6_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); 1025 kfree_skb(skb); 1026 return 0; 1027} 1028 1029 1030static struct sock *__udp6_lib_demux_lookup(struct net *net, 1031 __be16 loc_port, const struct in6_addr *loc_addr, 1032 __be16 rmt_port, const struct in6_addr *rmt_addr, 1033 int dif, int sdif) 1034{ 1035 unsigned short hnum = ntohs(loc_port); 1036 unsigned int hash2 = ipv6_portaddr_hash(net, loc_addr, hnum); 1037 unsigned int slot2 = hash2 & udp_table.mask; 1038 struct udp_hslot *hslot2 = &udp_table.hash2[slot2]; 1039 const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum); 1040 struct sock *sk; 1041 1042 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { 1043 if (sk->sk_state == TCP_ESTABLISHED && 1044 inet6_match(net, sk, rmt_addr, loc_addr, ports, dif, sdif)) 1045 return sk; 1046 /* Only check first socket in chain */ 1047 break; 1048 } 1049 return NULL; 1050} 1051 1052void udp_v6_early_demux(struct sk_buff *skb) 1053{ 1054 struct net *net = dev_net(skb->dev); 1055 const struct udphdr *uh; 1056 struct sock *sk; 1057 struct dst_entry *dst; 1058 int dif = skb->dev->ifindex; 1059 int sdif = inet6_sdif(skb); 1060 1061 if (!pskb_may_pull(skb, skb_transport_offset(skb) + 1062 sizeof(struct udphdr))) 1063 return; 1064 1065 uh = udp_hdr(skb); 1066 1067 if (skb->pkt_type == PACKET_HOST) 1068 sk = __udp6_lib_demux_lookup(net, uh->dest, 1069 &ipv6_hdr(skb)->daddr, 1070 uh->source, &ipv6_hdr(skb)->saddr, 1071 dif, sdif); 1072 else 1073 return; 1074 1075 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt)) 1076 return; 1077 1078 skb->sk = sk; 1079 skb->destructor = sock_efree; 1080 dst = rcu_dereference(sk->sk_rx_dst); 1081 1082 if (dst) 1083 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie); 1084 if (dst) { 1085 /* set noref for now. 1086 * any place which wants to hold dst has to call 1087 * dst_hold_safe() 1088 */ 1089 skb_dst_set_noref(skb, dst); 1090 } 1091} 1092 1093INDIRECT_CALLABLE_SCOPE int udpv6_rcv(struct sk_buff *skb) 1094{ 1095 return __udp6_lib_rcv(skb, &udp_table, IPPROTO_UDP); 1096} 1097 1098/* 1099 * Throw away all pending data and cancel the corking. Socket is locked. 1100 */ 1101static void udp_v6_flush_pending_frames(struct sock *sk) 1102{ 1103 struct udp_sock *up = udp_sk(sk); 1104 1105 if (up->pending == AF_INET) 1106 udp_flush_pending_frames(sk); 1107 else if (up->pending) { 1108 up->len = 0; 1109 up->pending = 0; 1110 ip6_flush_pending_frames(sk); 1111 } 1112} 1113 1114static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr, 1115 int addr_len) 1116{ 1117 if (addr_len < offsetofend(struct sockaddr, sa_family)) 1118 return -EINVAL; 1119 /* The following checks are replicated from __ip6_datagram_connect() 1120 * and intended to prevent BPF program called below from accessing 1121 * bytes that are out of the bound specified by user in addr_len. 1122 */ 1123 if (uaddr->sa_family == AF_INET) { 1124 if (__ipv6_only_sock(sk)) 1125 return -EAFNOSUPPORT; 1126 return udp_pre_connect(sk, uaddr, addr_len); 1127 } 1128 1129 if (addr_len < SIN6_LEN_RFC2133) 1130 return -EINVAL; 1131 1132 return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr); 1133} 1134 1135/** 1136 * udp6_hwcsum_outgoing - handle outgoing HW checksumming 1137 * @sk: socket we are sending on 1138 * @skb: sk_buff containing the filled-in UDP header 1139 * (checksum field must be zeroed out) 1140 * @saddr: source address 1141 * @daddr: destination address 1142 * @len: length of packet 1143 */ 1144static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb, 1145 const struct in6_addr *saddr, 1146 const struct in6_addr *daddr, int len) 1147{ 1148 unsigned int offset; 1149 struct udphdr *uh = udp_hdr(skb); 1150 struct sk_buff *frags = skb_shinfo(skb)->frag_list; 1151 __wsum csum = 0; 1152 1153 if (!frags) { 1154 /* Only one fragment on the socket. */ 1155 skb->csum_start = skb_transport_header(skb) - skb->head; 1156 skb->csum_offset = offsetof(struct udphdr, check); 1157 uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0); 1158 } else { 1159 /* 1160 * HW-checksum won't work as there are two or more 1161 * fragments on the socket so that all csums of sk_buffs 1162 * should be together 1163 */ 1164 offset = skb_transport_offset(skb); 1165 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); 1166 csum = skb->csum; 1167 1168 skb->ip_summed = CHECKSUM_NONE; 1169 1170 do { 1171 csum = csum_add(csum, frags->csum); 1172 } while ((frags = frags->next)); 1173 1174 uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 1175 csum); 1176 if (uh->check == 0) 1177 uh->check = CSUM_MANGLED_0; 1178 } 1179} 1180 1181/* 1182 * Sending 1183 */ 1184 1185static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6, 1186 struct inet_cork *cork) 1187{ 1188 struct sock *sk = skb->sk; 1189 struct udphdr *uh; 1190 int err = 0; 1191 int is_udplite = IS_UDPLITE(sk); 1192 __wsum csum = 0; 1193 int offset = skb_transport_offset(skb); 1194 int len = skb->len - offset; 1195 int datalen = len - sizeof(*uh); 1196 1197 /* 1198 * Create a UDP header 1199 */ 1200 uh = udp_hdr(skb); 1201 uh->source = fl6->fl6_sport; 1202 uh->dest = fl6->fl6_dport; 1203 uh->len = htons(len); 1204 uh->check = 0; 1205 1206 if (cork->gso_size) { 1207 const int hlen = skb_network_header_len(skb) + 1208 sizeof(struct udphdr); 1209 1210 if (hlen + cork->gso_size > cork->fragsize) { 1211 kfree_skb(skb); 1212 return -EINVAL; 1213 } 1214 if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) { 1215 kfree_skb(skb); 1216 return -EINVAL; 1217 } 1218 if (udp_sk(sk)->no_check6_tx) { 1219 kfree_skb(skb); 1220 return -EINVAL; 1221 } 1222 if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite || 1223 dst_xfrm(skb_dst(skb))) { 1224 kfree_skb(skb); 1225 return -EIO; 1226 } 1227 1228 if (datalen > cork->gso_size) { 1229 skb_shinfo(skb)->gso_size = cork->gso_size; 1230 skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4; 1231 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen, 1232 cork->gso_size); 1233 } 1234 goto csum_partial; 1235 } 1236 1237 if (is_udplite) 1238 csum = udplite_csum(skb); 1239 else if (udp_sk(sk)->no_check6_tx) { /* UDP csum disabled */ 1240 skb->ip_summed = CHECKSUM_NONE; 1241 goto send; 1242 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ 1243csum_partial: 1244 udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, len); 1245 goto send; 1246 } else 1247 csum = udp_csum(skb); 1248 1249 /* add protocol-dependent pseudo-header */ 1250 uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr, 1251 len, fl6->flowi6_proto, csum); 1252 if (uh->check == 0) 1253 uh->check = CSUM_MANGLED_0; 1254 1255send: 1256 err = ip6_send_skb(skb); 1257 if (err) { 1258 if (err == -ENOBUFS && !inet6_sk(sk)->recverr) { 1259 UDP6_INC_STATS(sock_net(sk), 1260 UDP_MIB_SNDBUFERRORS, is_udplite); 1261 err = 0; 1262 } 1263 } else { 1264 UDP6_INC_STATS(sock_net(sk), 1265 UDP_MIB_OUTDATAGRAMS, is_udplite); 1266 } 1267 return err; 1268} 1269 1270static int udp_v6_push_pending_frames(struct sock *sk) 1271{ 1272 struct sk_buff *skb; 1273 struct udp_sock *up = udp_sk(sk); 1274 struct flowi6 fl6; 1275 int err = 0; 1276 1277 if (up->pending == AF_INET) 1278 return udp_push_pending_frames(sk); 1279 1280 /* ip6_finish_skb will release the cork, so make a copy of 1281 * fl6 here. 1282 */ 1283 fl6 = inet_sk(sk)->cork.fl.u.ip6; 1284 1285 skb = ip6_finish_skb(sk); 1286 if (!skb) 1287 goto out; 1288 1289 err = udp_v6_send_skb(skb, &fl6, &inet_sk(sk)->cork.base); 1290 1291out: 1292 up->len = 0; 1293 up->pending = 0; 1294 return err; 1295} 1296 1297int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) 1298{ 1299 struct ipv6_txoptions opt_space; 1300 struct udp_sock *up = udp_sk(sk); 1301 struct inet_sock *inet = inet_sk(sk); 1302 struct ipv6_pinfo *np = inet6_sk(sk); 1303 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); 1304 struct in6_addr *daddr, *final_p, final; 1305 struct ipv6_txoptions *opt = NULL; 1306 struct ipv6_txoptions *opt_to_free = NULL; 1307 struct ip6_flowlabel *flowlabel = NULL; 1308 struct flowi6 fl6; 1309 struct dst_entry *dst; 1310 struct ipcm6_cookie ipc6; 1311 int addr_len = msg->msg_namelen; 1312 bool connected = false; 1313 int ulen = len; 1314 int corkreq = READ_ONCE(up->corkflag) || msg->msg_flags&MSG_MORE; 1315 int err; 1316 int is_udplite = IS_UDPLITE(sk); 1317 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); 1318 1319 ipcm6_init(&ipc6); 1320 ipc6.gso_size = READ_ONCE(up->gso_size); 1321 ipc6.sockc.tsflags = sk->sk_tsflags; 1322 ipc6.sockc.mark = sk->sk_mark; 1323 1324 /* destination address check */ 1325 if (sin6) { 1326 if (addr_len < offsetof(struct sockaddr, sa_data)) 1327 return -EINVAL; 1328 1329 switch (sin6->sin6_family) { 1330 case AF_INET6: 1331 if (addr_len < SIN6_LEN_RFC2133) 1332 return -EINVAL; 1333 daddr = &sin6->sin6_addr; 1334 if (ipv6_addr_any(daddr) && 1335 ipv6_addr_v4mapped(&np->saddr)) 1336 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK), 1337 daddr); 1338 break; 1339 case AF_INET: 1340 goto do_udp_sendmsg; 1341 case AF_UNSPEC: 1342 msg->msg_name = sin6 = NULL; 1343 msg->msg_namelen = addr_len = 0; 1344 daddr = NULL; 1345 break; 1346 default: 1347 return -EINVAL; 1348 } 1349 } else if (!up->pending) { 1350 if (sk->sk_state != TCP_ESTABLISHED) 1351 return -EDESTADDRREQ; 1352 daddr = &sk->sk_v6_daddr; 1353 } else 1354 daddr = NULL; 1355 1356 if (daddr) { 1357 if (ipv6_addr_v4mapped(daddr)) { 1358 struct sockaddr_in sin; 1359 sin.sin_family = AF_INET; 1360 sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport; 1361 sin.sin_addr.s_addr = daddr->s6_addr32[3]; 1362 msg->msg_name = &sin; 1363 msg->msg_namelen = sizeof(sin); 1364do_udp_sendmsg: 1365 err = __ipv6_only_sock(sk) ? 1366 -ENETUNREACH : udp_sendmsg(sk, msg, len); 1367 msg->msg_name = sin6; 1368 msg->msg_namelen = addr_len; 1369 return err; 1370 } 1371 } 1372 1373 if (up->pending == AF_INET) 1374 return udp_sendmsg(sk, msg, len); 1375 1376 /* Rough check on arithmetic overflow, 1377 better check is made in ip6_append_data(). 1378 */ 1379 if (len > INT_MAX - sizeof(struct udphdr)) 1380 return -EMSGSIZE; 1381 1382 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; 1383 if (up->pending) { 1384 /* 1385 * There are pending frames. 1386 * The socket lock must be held while it's corked. 1387 */ 1388 lock_sock(sk); 1389 if (likely(up->pending)) { 1390 if (unlikely(up->pending != AF_INET6)) { 1391 release_sock(sk); 1392 return -EAFNOSUPPORT; 1393 } 1394 dst = NULL; 1395 goto do_append_data; 1396 } 1397 release_sock(sk); 1398 } 1399 ulen += sizeof(struct udphdr); 1400 1401 memset(&fl6, 0, sizeof(fl6)); 1402 1403 if (sin6) { 1404 if (sin6->sin6_port == 0) 1405 return -EINVAL; 1406 1407 fl6.fl6_dport = sin6->sin6_port; 1408 daddr = &sin6->sin6_addr; 1409 1410 if (np->sndflow) { 1411 fl6.flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK; 1412 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) { 1413 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); 1414 if (IS_ERR(flowlabel)) 1415 return -EINVAL; 1416 } 1417 } 1418 1419 /* 1420 * Otherwise it will be difficult to maintain 1421 * sk->sk_dst_cache. 1422 */ 1423 if (sk->sk_state == TCP_ESTABLISHED && 1424 ipv6_addr_equal(daddr, &sk->sk_v6_daddr)) 1425 daddr = &sk->sk_v6_daddr; 1426 1427 if (addr_len >= sizeof(struct sockaddr_in6) && 1428 sin6->sin6_scope_id && 1429 __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr))) 1430 fl6.flowi6_oif = sin6->sin6_scope_id; 1431 } else { 1432 if (sk->sk_state != TCP_ESTABLISHED) 1433 return -EDESTADDRREQ; 1434 1435 fl6.fl6_dport = inet->inet_dport; 1436 daddr = &sk->sk_v6_daddr; 1437 fl6.flowlabel = np->flow_label; 1438 connected = true; 1439 } 1440 1441 if (!fl6.flowi6_oif) 1442 fl6.flowi6_oif = sk->sk_bound_dev_if; 1443 1444 if (!fl6.flowi6_oif) 1445 fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex; 1446 1447 fl6.flowi6_uid = sk->sk_uid; 1448 1449 if (msg->msg_controllen) { 1450 opt = &opt_space; 1451 memset(opt, 0, sizeof(struct ipv6_txoptions)); 1452 opt->tot_len = sizeof(*opt); 1453 ipc6.opt = opt; 1454 1455 err = udp_cmsg_send(sk, msg, &ipc6.gso_size); 1456 if (err > 0) 1457 err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, 1458 &ipc6); 1459 if (err < 0) { 1460 fl6_sock_release(flowlabel); 1461 return err; 1462 } 1463 if ((fl6.flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) { 1464 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); 1465 if (IS_ERR(flowlabel)) 1466 return -EINVAL; 1467 } 1468 if (!(opt->opt_nflen|opt->opt_flen)) 1469 opt = NULL; 1470 connected = false; 1471 } 1472 if (!opt) { 1473 opt = txopt_get(np); 1474 opt_to_free = opt; 1475 } 1476 if (flowlabel) 1477 opt = fl6_merge_options(&opt_space, flowlabel, opt); 1478 opt = ipv6_fixup_options(&opt_space, opt); 1479 ipc6.opt = opt; 1480 1481 fl6.flowi6_proto = sk->sk_protocol; 1482 fl6.flowi6_mark = ipc6.sockc.mark; 1483 fl6.daddr = *daddr; 1484 if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr)) 1485 fl6.saddr = np->saddr; 1486 fl6.fl6_sport = inet->inet_sport; 1487 1488 if (cgroup_bpf_enabled && !connected) { 1489 err = BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, 1490 (struct sockaddr *)sin6, &fl6.saddr); 1491 if (err) 1492 goto out_no_dst; 1493 if (sin6) { 1494 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) { 1495 /* BPF program rewrote IPv6-only by IPv4-mapped 1496 * IPv6. It's currently unsupported. 1497 */ 1498 err = -ENOTSUPP; 1499 goto out_no_dst; 1500 } 1501 if (sin6->sin6_port == 0) { 1502 /* BPF program set invalid port. Reject it. */ 1503 err = -EINVAL; 1504 goto out_no_dst; 1505 } 1506 fl6.fl6_dport = sin6->sin6_port; 1507 fl6.daddr = sin6->sin6_addr; 1508 } 1509 } 1510 1511 if (ipv6_addr_any(&fl6.daddr)) 1512 fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */ 1513 1514 final_p = fl6_update_dst(&fl6, opt, &final); 1515 if (final_p) 1516 connected = false; 1517 1518 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) { 1519 fl6.flowi6_oif = np->mcast_oif; 1520 connected = false; 1521 } else if (!fl6.flowi6_oif) 1522 fl6.flowi6_oif = np->ucast_oif; 1523 1524 security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6)); 1525 1526 if (ipc6.tclass < 0) 1527 ipc6.tclass = np->tclass; 1528 1529 fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel); 1530 1531 dst = ip6_sk_dst_lookup_flow(sk, &fl6, final_p, connected); 1532 if (IS_ERR(dst)) { 1533 err = PTR_ERR(dst); 1534 dst = NULL; 1535 goto out; 1536 } 1537 1538 if (ipc6.hlimit < 0) 1539 ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst); 1540 1541 if (msg->msg_flags&MSG_CONFIRM) 1542 goto do_confirm; 1543back_from_confirm: 1544 1545 /* Lockless fast path for the non-corking case */ 1546 if (!corkreq) { 1547 struct inet_cork_full cork; 1548 struct sk_buff *skb; 1549 1550 skb = ip6_make_skb(sk, getfrag, msg, ulen, 1551 sizeof(struct udphdr), &ipc6, 1552 &fl6, (struct rt6_info *)dst, 1553 msg->msg_flags, &cork); 1554 err = PTR_ERR(skb); 1555 if (!IS_ERR_OR_NULL(skb)) 1556 err = udp_v6_send_skb(skb, &fl6, &cork.base); 1557 goto out; 1558 } 1559 1560 lock_sock(sk); 1561 if (unlikely(up->pending)) { 1562 /* The socket is already corked while preparing it. */ 1563 /* ... which is an evident application bug. --ANK */ 1564 release_sock(sk); 1565 1566 net_dbg_ratelimited("udp cork app bug 2\n"); 1567 err = -EINVAL; 1568 goto out; 1569 } 1570 1571 up->pending = AF_INET6; 1572 1573do_append_data: 1574 if (ipc6.dontfrag < 0) 1575 ipc6.dontfrag = np->dontfrag; 1576 up->len += ulen; 1577 err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr), 1578 &ipc6, &fl6, (struct rt6_info *)dst, 1579 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags); 1580 if (err) 1581 udp_v6_flush_pending_frames(sk); 1582 else if (!corkreq) 1583 err = udp_v6_push_pending_frames(sk); 1584 else if (unlikely(skb_queue_empty(&sk->sk_write_queue))) 1585 up->pending = 0; 1586 1587 if (err > 0) 1588 err = np->recverr ? net_xmit_errno(err) : 0; 1589 release_sock(sk); 1590 1591out: 1592 dst_release(dst); 1593out_no_dst: 1594 fl6_sock_release(flowlabel); 1595 txopt_put(opt_to_free); 1596 if (!err) 1597 return len; 1598 /* 1599 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting 1600 * ENOBUFS might not be good (it's not tunable per se), but otherwise 1601 * we don't have a good statistic (IpOutDiscards but it can be too many 1602 * things). We could add another new stat but at least for now that 1603 * seems like overkill. 1604 */ 1605 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 1606 UDP6_INC_STATS(sock_net(sk), 1607 UDP_MIB_SNDBUFERRORS, is_udplite); 1608 } 1609 return err; 1610 1611do_confirm: 1612 if (msg->msg_flags & MSG_PROBE) 1613 dst_confirm_neigh(dst, &fl6.daddr); 1614 if (!(msg->msg_flags&MSG_PROBE) || len) 1615 goto back_from_confirm; 1616 err = 0; 1617 goto out; 1618} 1619 1620void udpv6_destroy_sock(struct sock *sk) 1621{ 1622 struct udp_sock *up = udp_sk(sk); 1623 lock_sock(sk); 1624 1625 /* protects from races with udp_abort() */ 1626 sock_set_flag(sk, SOCK_DEAD); 1627 udp_v6_flush_pending_frames(sk); 1628 release_sock(sk); 1629 1630 if (static_branch_unlikely(&udpv6_encap_needed_key)) { 1631 if (up->encap_type) { 1632 void (*encap_destroy)(struct sock *sk); 1633 encap_destroy = READ_ONCE(up->encap_destroy); 1634 if (encap_destroy) 1635 encap_destroy(sk); 1636 } 1637 if (up->encap_enabled) { 1638 static_branch_dec(&udpv6_encap_needed_key); 1639 udp_encap_disable(); 1640 } 1641 } 1642} 1643 1644/* 1645 * Socket option code for UDP 1646 */ 1647int udpv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, 1648 unsigned int optlen) 1649{ 1650 if (level == SOL_UDP || level == SOL_UDPLITE) 1651 return udp_lib_setsockopt(sk, level, optname, 1652 optval, optlen, 1653 udp_v6_push_pending_frames); 1654 return ipv6_setsockopt(sk, level, optname, optval, optlen); 1655} 1656 1657int udpv6_getsockopt(struct sock *sk, int level, int optname, 1658 char __user *optval, int __user *optlen) 1659{ 1660 if (level == SOL_UDP || level == SOL_UDPLITE) 1661 return udp_lib_getsockopt(sk, level, optname, optval, optlen); 1662 return ipv6_getsockopt(sk, level, optname, optval, optlen); 1663} 1664 1665static const struct inet6_protocol udpv6_protocol = { 1666 .handler = udpv6_rcv, 1667 .err_handler = udpv6_err, 1668 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, 1669}; 1670 1671/* ------------------------------------------------------------------------ */ 1672#ifdef CONFIG_PROC_FS 1673int udp6_seq_show(struct seq_file *seq, void *v) 1674{ 1675 if (v == SEQ_START_TOKEN) { 1676 seq_puts(seq, IPV6_SEQ_DGRAM_HEADER); 1677 } else { 1678 int bucket = ((struct udp_iter_state *)seq->private)->bucket; 1679 struct inet_sock *inet = inet_sk(v); 1680 __u16 srcp = ntohs(inet->inet_sport); 1681 __u16 destp = ntohs(inet->inet_dport); 1682 __ip6_dgram_sock_seq_show(seq, v, srcp, destp, 1683 udp_rqueue_get(v), bucket); 1684 } 1685 return 0; 1686} 1687 1688const struct seq_operations udp6_seq_ops = { 1689 .start = udp_seq_start, 1690 .next = udp_seq_next, 1691 .stop = udp_seq_stop, 1692 .show = udp6_seq_show, 1693}; 1694EXPORT_SYMBOL(udp6_seq_ops); 1695 1696static struct udp_seq_afinfo udp6_seq_afinfo = { 1697 .family = AF_INET6, 1698 .udp_table = &udp_table, 1699}; 1700 1701int __net_init udp6_proc_init(struct net *net) 1702{ 1703 if (!proc_create_net_data("udp6", 0444, net->proc_net, &udp6_seq_ops, 1704 sizeof(struct udp_iter_state), &udp6_seq_afinfo)) 1705 return -ENOMEM; 1706 return 0; 1707} 1708 1709void udp6_proc_exit(struct net *net) 1710{ 1711 remove_proc_entry("udp6", net->proc_net); 1712} 1713#endif /* CONFIG_PROC_FS */ 1714 1715/* ------------------------------------------------------------------------ */ 1716 1717struct proto udpv6_prot = { 1718 .name = "UDPv6", 1719 .owner = THIS_MODULE, 1720 .close = udp_lib_close, 1721 .pre_connect = udpv6_pre_connect, 1722 .connect = ip6_datagram_connect, 1723 .disconnect = udp_disconnect, 1724 .ioctl = udp_ioctl, 1725 .init = udpv6_init_sock, 1726 .destroy = udpv6_destroy_sock, 1727 .setsockopt = udpv6_setsockopt, 1728 .getsockopt = udpv6_getsockopt, 1729 .sendmsg = udpv6_sendmsg, 1730 .recvmsg = udpv6_recvmsg, 1731 .release_cb = ip6_datagram_release_cb, 1732 .hash = udp_lib_hash, 1733 .unhash = udp_lib_unhash, 1734 .rehash = udp_v6_rehash, 1735 .get_port = udp_v6_get_port, 1736 .memory_allocated = &udp_memory_allocated, 1737 .sysctl_mem = sysctl_udp_mem, 1738 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min), 1739 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min), 1740 .obj_size = sizeof(struct udp6_sock), 1741 .h.udp_table = &udp_table, 1742 .diag_destroy = udp_abort, 1743}; 1744 1745static struct inet_protosw udpv6_protosw = { 1746 .type = SOCK_DGRAM, 1747 .protocol = IPPROTO_UDP, 1748 .prot = &udpv6_prot, 1749 .ops = &inet6_dgram_ops, 1750 .flags = INET_PROTOSW_PERMANENT, 1751}; 1752 1753int __init udpv6_init(void) 1754{ 1755 int ret; 1756 1757 ret = inet6_add_protocol(&udpv6_protocol, IPPROTO_UDP); 1758 if (ret) 1759 goto out; 1760 1761 ret = inet6_register_protosw(&udpv6_protosw); 1762 if (ret) 1763 goto out_udpv6_protocol; 1764out: 1765 return ret; 1766 1767out_udpv6_protocol: 1768 inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP); 1769 goto out; 1770} 1771 1772void udpv6_exit(void) 1773{ 1774 inet6_unregister_protosw(&udpv6_protosw); 1775 inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP); 1776} 1777