Lines Matching refs:sk

26 static u32 tcp_clamp_rto_to_user_timeout(const struct sock *sk)
28 struct inet_connection_sock *icsk = inet_csk(sk);
32 start_ts = tcp_sk(sk)->retrans_stamp;
36 elapsed = tcp_time_stamp(tcp_sk(sk)) - start_ts;
44 u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when)
46 struct inet_connection_sock *icsk = inet_csk(sk);
65 * @sk: The socket the error has appeared on.
70 static void tcp_write_err(struct sock *sk)
72 WRITE_ONCE(sk->sk_err, READ_ONCE(sk->sk_err_soft) ? : ETIMEDOUT);
73 sk_error_report(sk);
75 tcp_write_queue_purge(sk);
76 tcp_done(sk);
77 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
82 * @sk: pointer to current socket
104 static int tcp_out_of_resources(struct sock *sk, bool do_reset)
106 struct tcp_sock *tp = tcp_sk(sk);
115 if (READ_ONCE(sk->sk_err_soft))
118 if (tcp_check_oom(sk, shift)) {
126 tcp_send_active_reset(sk, GFP_ATOMIC);
127 tcp_done(sk);
128 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
132 if (!check_net(sock_net(sk))) {
134 tcp_done(sk);
143 * @sk: Pointer to the current socket.
146 static int tcp_orphan_retries(struct sock *sk, bool alive)
148 int retries = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_orphan_retries); /* May be zero. */
151 if (READ_ONCE(sk->sk_err_soft) && !alive)
162 static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
164 const struct net *net = sock_net(sk);
175 mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
179 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
181 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
184 static unsigned int tcp_model_timeout(struct sock *sk,
200 * @sk: The current socket
211 static bool retransmits_timed_out(struct sock *sk,
217 if (!inet_csk(sk)->icsk_retransmits)
220 start_ts = tcp_sk(sk)->retrans_stamp;
224 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
225 rto_base = tcp_timeout_init(sk);
226 timeout = tcp_model_timeout(sk, boundary, rto_base);
229 return (s32)(tcp_time_stamp(tcp_sk(sk)) - start_ts - timeout) >= 0;
233 static int tcp_write_timeout(struct sock *sk)
235 struct inet_connection_sock *icsk = inet_csk(sk);
236 struct tcp_sock *tp = tcp_sk(sk);
237 struct net *net = sock_net(sk);
241 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
243 __dst_negative_advice(sk);
249 if (sk->sk_state == TCP_SYN_SENT)
254 if (retransmits_timed_out(sk, READ_ONCE(net->ipv4.sysctl_tcp_retries1), 0)) {
256 tcp_mtu_probing(icsk, sk);
258 __dst_negative_advice(sk);
262 if (sock_flag(sk, SOCK_DEAD)) {
265 retry_until = tcp_orphan_retries(sk, alive);
267 !retransmits_timed_out(sk, retry_until, 0);
269 if (tcp_out_of_resources(sk, do_reset))
274 expired = retransmits_timed_out(sk, retry_until,
276 tcp_fastopen_active_detect_blackhole(sk, expired);
279 tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RTO_CB,
285 tcp_write_err(sk);
289 if (sk_rethink_txhash(sk)) {
291 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTREHASH);
298 void tcp_delack_timer_handler(struct sock *sk)
300 struct inet_connection_sock *icsk = inet_csk(sk);
301 struct tcp_sock *tp = tcp_sk(sk);
303 if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
309 tcp_sack_compress_send_ack(sk);
317 sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
322 if (inet_csk_ack_scheduled(sk)) {
323 if (!inet_csk_in_pingpong_mode(sk)) {
330 inet_csk_exit_pingpong_mode(sk);
334 tcp_send_ack(sk);
335 __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS);
353 struct sock *sk = &icsk->icsk_inet.sk;
355 bh_lock_sock(sk);
356 if (!sock_owned_by_user(sk)) {
357 tcp_delack_timer_handler(sk);
359 __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
361 if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &sk->sk_tsq_flags))
362 sock_hold(sk);
364 bh_unlock_sock(sk);
365 sock_put(sk);
368 static void tcp_probe_timer(struct sock *sk)
370 struct inet_connection_sock *icsk = inet_csk(sk);
371 struct sk_buff *skb = tcp_send_head(sk);
372 struct tcp_sock *tp = tcp_sk(sk);
399 max_probes = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retries2);
400 if (sock_flag(sk, SOCK_DEAD)) {
403 max_probes = tcp_orphan_retries(sk, alive);
406 if (tcp_out_of_resources(sk, true))
411 abort: tcp_write_err(sk);
414 tcp_send_probe0(sk);
420 * sk here is the child socket, not the parent (listener) socket.
422 static void tcp_fastopen_synack_timer(struct sock *sk, struct request_sock *req)
424 struct inet_connection_sock *icsk = inet_csk(sk);
425 struct tcp_sock *tp = tcp_sk(sk);
434 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_synack_retries) + 1;
437 tcp_write_err(sk);
442 tcp_enter_loss(sk);
448 inet_rtx_syn_ack(sk, req);
453 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
457 static bool tcp_rtx_probe0_timed_out(const struct sock *sk,
460 const struct tcp_sock *tp = tcp_sk(sk);
464 rcv_delta = inet_csk(sk)->icsk_timeout - tp->rcv_tstamp;
476 * @sk: Pointer to the current socket.
485 void tcp_retransmit_timer(struct sock *sk)
487 struct tcp_sock *tp = tcp_sk(sk);
488 struct net *net = sock_net(sk);
489 struct inet_connection_sock *icsk = inet_csk(sk);
494 lockdep_sock_is_held(sk));
496 WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
497 sk->sk_state != TCP_FIN_WAIT1);
498 tcp_fastopen_synack_timer(sk, req);
508 skb = tcp_rtx_queue_head(sk);
514 if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
515 !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
521 struct inet_sock *inet = inet_sk(sk);
525 if (sk->sk_family == AF_INET) {
533 else if (sk->sk_family == AF_INET6) {
535 &sk->sk_v6_daddr, ntohs(inet->inet_dport),
541 if (tcp_rtx_probe0_timed_out(sk, skb)) {
542 tcp_write_err(sk);
545 tcp_enter_loss(sk);
546 tcp_retransmit_skb(sk, skb, 1);
547 __sk_dst_reset(sk);
551 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTS);
552 if (tcp_write_timeout(sk))
573 __NET_INC_STATS(sock_net(sk), mib_idx);
576 tcp_enter_loss(sk);
579 if (tcp_retransmit_skb(sk, tcp_rtx_queue_head(sk), 1) > 0) {
583 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
616 if (sk->sk_state == TCP_ESTABLISHED &&
622 tcp_rto_min(sk),
624 } else if (sk->sk_state != TCP_SYN_SENT ||
632 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
633 tcp_clamp_rto_to_user_timeout(sk), TCP_RTO_MAX);
634 if (retransmits_timed_out(sk, READ_ONCE(net->ipv4.sysctl_tcp_retries1) + 1, 0))
635 __sk_dst_reset(sk);
642 void tcp_write_timer_handler(struct sock *sk)
644 struct inet_connection_sock *icsk = inet_csk(sk);
647 if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
652 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
656 tcp_mstamp_refresh(tcp_sk(sk));
661 tcp_rack_reo_timeout(sk);
664 tcp_send_loss_probe(sk);
668 tcp_retransmit_timer(sk);
672 tcp_probe_timer(sk);
681 struct sock *sk = &icsk->icsk_inet.sk;
683 bh_lock_sock(sk);
684 if (!sock_owned_by_user(sk)) {
685 tcp_write_timer_handler(sk);
688 if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &sk->sk_tsq_flags))
689 sock_hold(sk);
691 bh_unlock_sock(sk);
692 sock_put(sk);
703 void tcp_set_keepalive(struct sock *sk, int val)
705 if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
708 if (val && !sock_flag(sk, SOCK_KEEPOPEN))
709 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk)));
711 inet_csk_delete_keepalive_timer(sk);
718 struct sock *sk = from_timer(sk, t, sk_timer);
719 struct inet_connection_sock *icsk = inet_csk(sk);
720 struct tcp_sock *tp = tcp_sk(sk);
724 bh_lock_sock(sk);
725 if (sock_owned_by_user(sk)) {
727 inet_csk_reset_keepalive_timer (sk, HZ/20);
731 if (sk->sk_state == TCP_LISTEN) {
737 if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
739 const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN;
742 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
746 tcp_send_active_reset(sk, GFP_ATOMIC);
750 if (!sock_flag(sk, SOCK_KEEPOPEN) ||
751 ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)))
757 if (tp->packets_out || !tcp_write_queue_empty(sk))
773 tcp_send_active_reset(sk, GFP_ATOMIC);
774 tcp_write_err(sk);
777 if (tcp_write_wakeup(sk, LINUX_MIB_TCPKEEPALIVE) <= 0) {
792 inet_csk_reset_keepalive_timer (sk, elapsed);
796 tcp_done(sk);
799 bh_unlock_sock(sk);
800 sock_put(sk);
806 struct sock *sk = (struct sock *)tp;
808 bh_lock_sock(sk);
809 if (!sock_owned_by_user(sk)) {
816 tcp_send_ack(sk);
820 &sk->sk_tsq_flags))
821 sock_hold(sk);
823 bh_unlock_sock(sk);
825 sock_put(sk);
830 void tcp_init_xmit_timers(struct sock *sk)
832 inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
834 hrtimer_init(&tcp_sk(sk)->pacing_timer, CLOCK_MONOTONIC,
836 tcp_sk(sk)->pacing_timer.function = tcp_pace_kick;
838 hrtimer_init(&tcp_sk(sk)->compressed_ack_timer, CLOCK_MONOTONIC,
840 tcp_sk(sk)->compressed_ack_timer.function = tcp_compressed_ack_kick;