1// SPDX-License-Identifier: GPL-2.0 2#include <linux/crypto.h> 3#include <linux/err.h> 4#include <linux/init.h> 5#include <linux/kernel.h> 6#include <linux/list.h> 7#include <linux/tcp.h> 8#include <linux/rcupdate.h> 9#include <linux/rculist.h> 10#include <net/inetpeer.h> 11#include <net/tcp.h> 12 13void tcp_fastopen_init_key_once(struct net *net) 14{ 15 u8 key[TCP_FASTOPEN_KEY_LENGTH]; 16 struct tcp_fastopen_context *ctxt; 17 18 rcu_read_lock(); 19 ctxt = rcu_dereference(net->ipv4.tcp_fastopen_ctx); 20 if (ctxt) { 21 rcu_read_unlock(); 22 return; 23 } 24 rcu_read_unlock(); 25 26 /* tcp_fastopen_reset_cipher publishes the new context 27 * atomically, so we allow this race happening here. 28 * 29 * All call sites of tcp_fastopen_cookie_gen also check 30 * for a valid cookie, so this is an acceptable risk. 31 */ 32 get_random_bytes(key, sizeof(key)); 33 tcp_fastopen_reset_cipher(net, NULL, key, NULL); 34} 35 36static void tcp_fastopen_ctx_free(struct rcu_head *head) 37{ 38 struct tcp_fastopen_context *ctx = 39 container_of(head, struct tcp_fastopen_context, rcu); 40 41 kfree_sensitive(ctx); 42} 43 44void tcp_fastopen_destroy_cipher(struct sock *sk) 45{ 46 struct tcp_fastopen_context *ctx; 47 48 ctx = rcu_dereference_protected( 49 inet_csk(sk)->icsk_accept_queue.fastopenq.ctx, 1); 50 if (ctx) 51 call_rcu(&ctx->rcu, tcp_fastopen_ctx_free); 52} 53 54void tcp_fastopen_ctx_destroy(struct net *net) 55{ 56 struct tcp_fastopen_context *ctxt; 57 58 spin_lock(&net->ipv4.tcp_fastopen_ctx_lock); 59 60 ctxt = rcu_dereference_protected(net->ipv4.tcp_fastopen_ctx, 61 lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock)); 62 rcu_assign_pointer(net->ipv4.tcp_fastopen_ctx, NULL); 63 spin_unlock(&net->ipv4.tcp_fastopen_ctx_lock); 64 65 if (ctxt) 66 call_rcu(&ctxt->rcu, tcp_fastopen_ctx_free); 67} 68 69int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk, 70 void *primary_key, void *backup_key) 71{ 72 struct tcp_fastopen_context *ctx, *octx; 73 struct fastopen_queue *q; 74 int err = 0; 75 76 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); 77 if (!ctx) { 78 err = -ENOMEM; 79 goto out; 80 } 81 82 ctx->key[0].key[0] = get_unaligned_le64(primary_key); 83 ctx->key[0].key[1] = get_unaligned_le64(primary_key + 8); 84 if (backup_key) { 85 ctx->key[1].key[0] = get_unaligned_le64(backup_key); 86 ctx->key[1].key[1] = get_unaligned_le64(backup_key + 8); 87 ctx->num = 2; 88 } else { 89 ctx->num = 1; 90 } 91 92 spin_lock(&net->ipv4.tcp_fastopen_ctx_lock); 93 if (sk) { 94 q = &inet_csk(sk)->icsk_accept_queue.fastopenq; 95 octx = rcu_dereference_protected(q->ctx, 96 lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock)); 97 rcu_assign_pointer(q->ctx, ctx); 98 } else { 99 octx = rcu_dereference_protected(net->ipv4.tcp_fastopen_ctx, 100 lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock)); 101 rcu_assign_pointer(net->ipv4.tcp_fastopen_ctx, ctx); 102 } 103 spin_unlock(&net->ipv4.tcp_fastopen_ctx_lock); 104 105 if (octx) 106 call_rcu(&octx->rcu, tcp_fastopen_ctx_free); 107out: 108 return err; 109} 110 111int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk, 112 u64 *key) 113{ 114 struct tcp_fastopen_context *ctx; 115 int n_keys = 0, i; 116 117 rcu_read_lock(); 118 if (icsk) 119 ctx = rcu_dereference(icsk->icsk_accept_queue.fastopenq.ctx); 120 else 121 ctx = rcu_dereference(net->ipv4.tcp_fastopen_ctx); 122 if (ctx) { 123 n_keys = tcp_fastopen_context_len(ctx); 124 for (i = 0; i < n_keys; i++) { 125 put_unaligned_le64(ctx->key[i].key[0], key + (i * 2)); 126 put_unaligned_le64(ctx->key[i].key[1], key + (i * 2) + 1); 127 } 128 } 129 rcu_read_unlock(); 130 131 return n_keys; 132} 133 134static bool __tcp_fastopen_cookie_gen_cipher(struct request_sock *req, 135 struct sk_buff *syn, 136 const siphash_key_t *key, 137 struct tcp_fastopen_cookie *foc) 138{ 139 BUILD_BUG_ON(TCP_FASTOPEN_COOKIE_SIZE != sizeof(u64)); 140 141 if (req->rsk_ops->family == AF_INET) { 142 const struct iphdr *iph = ip_hdr(syn); 143 144 foc->val[0] = cpu_to_le64(siphash(&iph->saddr, 145 sizeof(iph->saddr) + 146 sizeof(iph->daddr), 147 key)); 148 foc->len = TCP_FASTOPEN_COOKIE_SIZE; 149 return true; 150 } 151#if IS_ENABLED(CONFIG_IPV6) 152 if (req->rsk_ops->family == AF_INET6) { 153 const struct ipv6hdr *ip6h = ipv6_hdr(syn); 154 155 foc->val[0] = cpu_to_le64(siphash(&ip6h->saddr, 156 sizeof(ip6h->saddr) + 157 sizeof(ip6h->daddr), 158 key)); 159 foc->len = TCP_FASTOPEN_COOKIE_SIZE; 160 return true; 161 } 162#endif 163 return false; 164} 165 166/* Generate the fastopen cookie by applying SipHash to both the source and 167 * destination addresses. 168 */ 169static void tcp_fastopen_cookie_gen(struct sock *sk, 170 struct request_sock *req, 171 struct sk_buff *syn, 172 struct tcp_fastopen_cookie *foc) 173{ 174 struct tcp_fastopen_context *ctx; 175 176 rcu_read_lock(); 177 ctx = tcp_fastopen_get_ctx(sk); 178 if (ctx) 179 __tcp_fastopen_cookie_gen_cipher(req, syn, &ctx->key[0], foc); 180 rcu_read_unlock(); 181} 182 183/* If an incoming SYN or SYNACK frame contains a payload and/or FIN, 184 * queue this additional data / FIN. 185 */ 186void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb) 187{ 188 struct tcp_sock *tp = tcp_sk(sk); 189 190 if (TCP_SKB_CB(skb)->end_seq == tp->rcv_nxt) 191 return; 192 193 skb = skb_clone(skb, GFP_ATOMIC); 194 if (!skb) 195 return; 196 197 skb_dst_drop(skb); 198 /* segs_in has been initialized to 1 in tcp_create_openreq_child(). 199 * Hence, reset segs_in to 0 before calling tcp_segs_in() 200 * to avoid double counting. Also, tcp_segs_in() expects 201 * skb->len to include the tcp_hdrlen. Hence, it should 202 * be called before __skb_pull(). 203 */ 204 tp->segs_in = 0; 205 tcp_segs_in(tp, skb); 206 __skb_pull(skb, tcp_hdrlen(skb)); 207 sk_forced_mem_schedule(sk, skb->truesize); 208 skb_set_owner_r(skb, sk); 209 210 TCP_SKB_CB(skb)->seq++; 211 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN; 212 213 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 214 __skb_queue_tail(&sk->sk_receive_queue, skb); 215 tp->syn_data_acked = 1; 216 217 /* u64_stats_update_begin(&tp->syncp) not needed here, 218 * as we certainly are not changing upper 32bit value (0) 219 */ 220 tp->bytes_received = skb->len; 221 222 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 223 tcp_fin(sk); 224} 225 226/* returns 0 - no key match, 1 for primary, 2 for backup */ 227static int tcp_fastopen_cookie_gen_check(struct sock *sk, 228 struct request_sock *req, 229 struct sk_buff *syn, 230 struct tcp_fastopen_cookie *orig, 231 struct tcp_fastopen_cookie *valid_foc) 232{ 233 struct tcp_fastopen_cookie search_foc = { .len = -1 }; 234 struct tcp_fastopen_cookie *foc = valid_foc; 235 struct tcp_fastopen_context *ctx; 236 int i, ret = 0; 237 238 rcu_read_lock(); 239 ctx = tcp_fastopen_get_ctx(sk); 240 if (!ctx) 241 goto out; 242 for (i = 0; i < tcp_fastopen_context_len(ctx); i++) { 243 __tcp_fastopen_cookie_gen_cipher(req, syn, &ctx->key[i], foc); 244 if (tcp_fastopen_cookie_match(foc, orig)) { 245 ret = i + 1; 246 goto out; 247 } 248 foc = &search_foc; 249 } 250out: 251 rcu_read_unlock(); 252 return ret; 253} 254 255static struct sock *tcp_fastopen_create_child(struct sock *sk, 256 struct sk_buff *skb, 257 struct request_sock *req) 258{ 259 struct tcp_sock *tp; 260 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; 261 struct sock *child; 262 bool own_req; 263 264 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, 265 NULL, &own_req); 266 if (!child) 267 return NULL; 268 269 spin_lock(&queue->fastopenq.lock); 270 queue->fastopenq.qlen++; 271 spin_unlock(&queue->fastopenq.lock); 272 273 /* Initialize the child socket. Have to fix some values to take 274 * into account the child is a Fast Open socket and is created 275 * only out of the bits carried in the SYN packet. 276 */ 277 tp = tcp_sk(child); 278 279 rcu_assign_pointer(tp->fastopen_rsk, req); 280 tcp_rsk(req)->tfo_listener = true; 281 282 /* RFC1323: The window in SYN & SYN/ACK segments is never 283 * scaled. So correct it appropriately. 284 */ 285 tp->snd_wnd = ntohs(tcp_hdr(skb)->window); 286 tp->max_window = tp->snd_wnd; 287 288 /* Activate the retrans timer so that SYNACK can be retransmitted. 289 * The request socket is not added to the ehash 290 * because it's been added to the accept queue directly. 291 */ 292 inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS, 293 TCP_TIMEOUT_INIT, TCP_RTO_MAX); 294 295 refcount_set(&req->rsk_refcnt, 2); 296 297 /* Now finish processing the fastopen child socket. */ 298 tcp_init_transfer(child, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, skb); 299 300 tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; 301 302 tcp_fastopen_add_skb(child, skb); 303 304 tcp_rsk(req)->rcv_nxt = tp->rcv_nxt; 305 tp->rcv_wup = tp->rcv_nxt; 306 /* tcp_conn_request() is sending the SYNACK, 307 * and queues the child into listener accept queue. 308 */ 309 return child; 310} 311 312static bool tcp_fastopen_queue_check(struct sock *sk) 313{ 314 struct fastopen_queue *fastopenq; 315 int max_qlen; 316 317 /* Make sure the listener has enabled fastopen, and we don't 318 * exceed the max # of pending TFO requests allowed before trying 319 * to validating the cookie in order to avoid burning CPU cycles 320 * unnecessarily. 321 * 322 * XXX (TFO) - The implication of checking the max_qlen before 323 * processing a cookie request is that clients can't differentiate 324 * between qlen overflow causing Fast Open to be disabled 325 * temporarily vs a server not supporting Fast Open at all. 326 */ 327 fastopenq = &inet_csk(sk)->icsk_accept_queue.fastopenq; 328 max_qlen = READ_ONCE(fastopenq->max_qlen); 329 if (max_qlen == 0) 330 return false; 331 332 if (fastopenq->qlen >= max_qlen) { 333 struct request_sock *req1; 334 spin_lock(&fastopenq->lock); 335 req1 = fastopenq->rskq_rst_head; 336 if (!req1 || time_after(req1->rsk_timer.expires, jiffies)) { 337 __NET_INC_STATS(sock_net(sk), 338 LINUX_MIB_TCPFASTOPENLISTENOVERFLOW); 339 spin_unlock(&fastopenq->lock); 340 return false; 341 } 342 fastopenq->rskq_rst_head = req1->dl_next; 343 fastopenq->qlen--; 344 spin_unlock(&fastopenq->lock); 345 reqsk_put(req1); 346 } 347 return true; 348} 349 350static bool tcp_fastopen_no_cookie(const struct sock *sk, 351 const struct dst_entry *dst, 352 int flag) 353{ 354 return (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen) & flag) || 355 tcp_sk(sk)->fastopen_no_cookie || 356 (dst && dst_metric(dst, RTAX_FASTOPEN_NO_COOKIE)); 357} 358 359/* Returns true if we should perform Fast Open on the SYN. The cookie (foc) 360 * may be updated and return the client in the SYN-ACK later. E.g., Fast Open 361 * cookie request (foc->len == 0). 362 */ 363struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb, 364 struct request_sock *req, 365 struct tcp_fastopen_cookie *foc, 366 const struct dst_entry *dst) 367{ 368 bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1; 369 int tcp_fastopen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen); 370 struct tcp_fastopen_cookie valid_foc = { .len = -1 }; 371 struct sock *child; 372 int ret = 0; 373 374 if (foc->len == 0) /* Client requests a cookie */ 375 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD); 376 377 if (!((tcp_fastopen & TFO_SERVER_ENABLE) && 378 (syn_data || foc->len >= 0) && 379 tcp_fastopen_queue_check(sk))) { 380 foc->len = -1; 381 return NULL; 382 } 383 384 if (tcp_fastopen_no_cookie(sk, dst, TFO_SERVER_COOKIE_NOT_REQD)) 385 goto fastopen; 386 387 if (foc->len == 0) { 388 /* Client requests a cookie. */ 389 tcp_fastopen_cookie_gen(sk, req, skb, &valid_foc); 390 } else if (foc->len > 0) { 391 ret = tcp_fastopen_cookie_gen_check(sk, req, skb, foc, 392 &valid_foc); 393 if (!ret) { 394 NET_INC_STATS(sock_net(sk), 395 LINUX_MIB_TCPFASTOPENPASSIVEFAIL); 396 } else { 397 /* Cookie is valid. Create a (full) child socket to 398 * accept the data in SYN before returning a SYN-ACK to 399 * ack the data. If we fail to create the socket, fall 400 * back and ack the ISN only but includes the same 401 * cookie. 402 * 403 * Note: Data-less SYN with valid cookie is allowed to 404 * send data in SYN_RECV state. 405 */ 406fastopen: 407 child = tcp_fastopen_create_child(sk, skb, req); 408 if (child) { 409 if (ret == 2) { 410 valid_foc.exp = foc->exp; 411 *foc = valid_foc; 412 NET_INC_STATS(sock_net(sk), 413 LINUX_MIB_TCPFASTOPENPASSIVEALTKEY); 414 } else { 415 foc->len = -1; 416 } 417 NET_INC_STATS(sock_net(sk), 418 LINUX_MIB_TCPFASTOPENPASSIVE); 419 return child; 420 } 421 NET_INC_STATS(sock_net(sk), 422 LINUX_MIB_TCPFASTOPENPASSIVEFAIL); 423 } 424 } 425 valid_foc.exp = foc->exp; 426 *foc = valid_foc; 427 return NULL; 428} 429 430bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss, 431 struct tcp_fastopen_cookie *cookie) 432{ 433 const struct dst_entry *dst; 434 435 tcp_fastopen_cache_get(sk, mss, cookie); 436 437 /* Firewall blackhole issue check */ 438 if (tcp_fastopen_active_should_disable(sk)) { 439 cookie->len = -1; 440 return false; 441 } 442 443 dst = __sk_dst_get(sk); 444 445 if (tcp_fastopen_no_cookie(sk, dst, TFO_CLIENT_NO_COOKIE)) { 446 cookie->len = -1; 447 return true; 448 } 449 if (cookie->len > 0) 450 return true; 451 tcp_sk(sk)->fastopen_client_fail = TFO_COOKIE_UNAVAILABLE; 452 return false; 453} 454 455/* This function checks if we want to defer sending SYN until the first 456 * write(). We defer under the following conditions: 457 * 1. fastopen_connect sockopt is set 458 * 2. we have a valid cookie 459 * Return value: return true if we want to defer until application writes data 460 * return false if we want to send out SYN immediately 461 */ 462bool tcp_fastopen_defer_connect(struct sock *sk, int *err) 463{ 464 struct tcp_fastopen_cookie cookie = { .len = 0 }; 465 struct tcp_sock *tp = tcp_sk(sk); 466 u16 mss; 467 468 if (tp->fastopen_connect && !tp->fastopen_req) { 469 if (tcp_fastopen_cookie_check(sk, &mss, &cookie)) { 470 inet_sk(sk)->defer_connect = 1; 471 return true; 472 } 473 474 /* Alloc fastopen_req in order for FO option to be included 475 * in SYN 476 */ 477 tp->fastopen_req = kzalloc(sizeof(*tp->fastopen_req), 478 sk->sk_allocation); 479 if (tp->fastopen_req) 480 tp->fastopen_req->cookie = cookie; 481 else 482 *err = -ENOBUFS; 483 } 484 return false; 485} 486EXPORT_SYMBOL(tcp_fastopen_defer_connect); 487 488/* 489 * The following code block is to deal with middle box issues with TFO: 490 * Middlebox firewall issues can potentially cause server's data being 491 * blackholed after a successful 3WHS using TFO. 492 * The proposed solution is to disable active TFO globally under the 493 * following circumstances: 494 * 1. client side TFO socket receives out of order FIN 495 * 2. client side TFO socket receives out of order RST 496 * 3. client side TFO socket has timed out three times consecutively during 497 * or after handshake 498 * We disable active side TFO globally for 1hr at first. Then if it 499 * happens again, we disable it for 2h, then 4h, 8h, ... 500 * And we reset the timeout back to 1hr when we see a successful active 501 * TFO connection with data exchanges. 502 */ 503 504/* Disable active TFO and record current jiffies and 505 * tfo_active_disable_times 506 */ 507void tcp_fastopen_active_disable(struct sock *sk) 508{ 509 struct net *net = sock_net(sk); 510 511 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout)) 512 return; 513 514 /* Paired with READ_ONCE() in tcp_fastopen_active_should_disable() */ 515 WRITE_ONCE(net->ipv4.tfo_active_disable_stamp, jiffies); 516 517 /* Paired with smp_rmb() in tcp_fastopen_active_should_disable(). 518 * We want net->ipv4.tfo_active_disable_stamp to be updated first. 519 */ 520 smp_mb__before_atomic(); 521 atomic_inc(&net->ipv4.tfo_active_disable_times); 522 523 NET_INC_STATS(net, LINUX_MIB_TCPFASTOPENBLACKHOLE); 524} 525 526/* Calculate timeout for tfo active disable 527 * Return true if we are still in the active TFO disable period 528 * Return false if timeout already expired and we should use active TFO 529 */ 530bool tcp_fastopen_active_should_disable(struct sock *sk) 531{ 532 unsigned int tfo_bh_timeout = 533 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout); 534 unsigned long timeout; 535 int tfo_da_times; 536 int multiplier; 537 538 if (!tfo_bh_timeout) 539 return false; 540 541 tfo_da_times = atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times); 542 if (!tfo_da_times) 543 return false; 544 545 /* Paired with smp_mb__before_atomic() in tcp_fastopen_active_disable() */ 546 smp_rmb(); 547 548 /* Limit timout to max: 2^6 * initial timeout */ 549 multiplier = 1 << min(tfo_da_times - 1, 6); 550 551 /* Paired with the WRITE_ONCE() in tcp_fastopen_active_disable(). */ 552 timeout = READ_ONCE(sock_net(sk)->ipv4.tfo_active_disable_stamp) + 553 multiplier * tfo_bh_timeout * HZ; 554 if (time_before(jiffies, timeout)) 555 return true; 556 557 /* Mark check bit so we can check for successful active TFO 558 * condition and reset tfo_active_disable_times 559 */ 560 tcp_sk(sk)->syn_fastopen_ch = 1; 561 return false; 562} 563 564/* Disable active TFO if FIN is the only packet in the ofo queue 565 * and no data is received. 566 * Also check if we can reset tfo_active_disable_times if data is 567 * received successfully on a marked active TFO sockets opened on 568 * a non-loopback interface 569 */ 570void tcp_fastopen_active_disable_ofo_check(struct sock *sk) 571{ 572 struct tcp_sock *tp = tcp_sk(sk); 573 struct dst_entry *dst; 574 struct sk_buff *skb; 575 576 if (!tp->syn_fastopen) 577 return; 578 579 if (!tp->data_segs_in) { 580 skb = skb_rb_first(&tp->out_of_order_queue); 581 if (skb && !skb_rb_next(skb)) { 582 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) { 583 tcp_fastopen_active_disable(sk); 584 return; 585 } 586 } 587 } else if (tp->syn_fastopen_ch && 588 atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times)) { 589 dst = sk_dst_get(sk); 590 if (!(dst && dst->dev && (dst->dev->flags & IFF_LOOPBACK))) 591 atomic_set(&sock_net(sk)->ipv4.tfo_active_disable_times, 0); 592 dst_release(dst); 593 } 594} 595 596void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired) 597{ 598 u32 timeouts = inet_csk(sk)->icsk_retransmits; 599 struct tcp_sock *tp = tcp_sk(sk); 600 601 /* Broken middle-boxes may black-hole Fast Open connection during or 602 * even after the handshake. Be extremely conservative and pause 603 * Fast Open globally after hitting the third consecutive timeout or 604 * exceeding the configured timeout limit. 605 */ 606 if ((tp->syn_fastopen || tp->syn_data || tp->syn_data_acked) && 607 (timeouts == 2 || (timeouts < 2 && expired))) { 608 tcp_fastopen_active_disable(sk); 609 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVEFAIL); 610 } 611} 612