Lines Matching defs:icsk
28 struct inet_connection_sock *icsk = inet_csk(sk);
33 user_timeout = READ_ONCE(icsk->icsk_user_timeout);
35 return icsk->icsk_rto;
41 return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(remaining));
46 struct inet_connection_sock *icsk = inet_csk(sk);
50 user_timeout = READ_ONCE(icsk->icsk_user_timeout);
51 if (!user_timeout || !icsk->icsk_probes_tstamp)
54 elapsed = tcp_jiffies32 - icsk->icsk_probes_tstamp;
162 static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
171 if (!icsk->icsk_mtup.enabled) {
172 icsk->icsk_mtup.enabled = 1;
173 icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
175 mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
179 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
181 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
235 struct inet_connection_sock *icsk = inet_csk(sk);
242 if (icsk->icsk_retransmits)
245 retry_until = READ_ONCE(icsk->icsk_syn_retries) ? :
252 expired = icsk->icsk_retransmits >= max_retransmits;
256 tcp_mtu_probing(icsk, sk);
263 const bool alive = icsk->icsk_rto < TCP_RTO_MAX;
275 READ_ONCE(icsk->icsk_user_timeout));
280 icsk->icsk_retransmits,
281 icsk->icsk_rto, (int)expired);
300 struct inet_connection_sock *icsk = inet_csk(sk);
313 if (!(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
316 if (time_after(icsk->icsk_ack.timeout, jiffies)) {
317 sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
320 icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
325 icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto);
331 icsk->icsk_ack.ato = TCP_ATO_MIN;
351 struct inet_connection_sock *icsk =
352 from_timer(icsk, t, icsk_delack_timer);
353 struct sock *sk = &icsk->icsk_inet.sk;
370 struct inet_connection_sock *icsk = inet_csk(sk);
376 icsk->icsk_probes_out = 0;
377 icsk->icsk_probes_tstamp = 0;
389 if (!icsk->icsk_probes_tstamp) {
390 icsk->icsk_probes_tstamp = tcp_jiffies32;
392 u32 user_timeout = READ_ONCE(icsk->icsk_user_timeout);
395 (s32)(tcp_jiffies32 - icsk->icsk_probes_tstamp) >=
401 const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX;
404 if (!alive && icsk->icsk_backoff >= max_probes)
410 if (icsk->icsk_probes_out >= max_probes) {
424 struct inet_connection_sock *icsk = inet_csk(sk);
433 max_retries = READ_ONCE(icsk->icsk_syn_retries) ? :
441 if (icsk->icsk_retransmits == 1)
450 icsk->icsk_retransmits++;
489 struct inet_connection_sock *icsk = inet_csk(sk);
555 if (icsk->icsk_retransmits == 0) {
558 if (icsk->icsk_ca_state == TCP_CA_Recovery) {
563 } else if (icsk->icsk_ca_state == TCP_CA_Loss) {
565 } else if ((icsk->icsk_ca_state == TCP_CA_Disorder) ||
578 icsk->icsk_retransmits++;
604 icsk->icsk_backoff++;
619 icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
620 icsk->icsk_backoff = 0;
621 icsk->icsk_rto = clamp(__tcp_set_rto(tp),
625 icsk->icsk_backoff >
630 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
644 struct inet_connection_sock *icsk = inet_csk(sk);
648 !icsk->icsk_pending)
651 if (time_after(icsk->icsk_timeout, jiffies)) {
652 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
657 event = icsk->icsk_pending;
667 icsk->icsk_pending = 0;
671 icsk->icsk_pending = 0;
679 struct inet_connection_sock *icsk =
680 from_timer(icsk, t, icsk_retransmit_timer);
681 struct sock *sk = &icsk->icsk_inet.sk;
719 struct inet_connection_sock *icsk = inet_csk(sk);
763 u32 user_timeout = READ_ONCE(icsk->icsk_user_timeout);
770 icsk->icsk_probes_out > 0) ||
772 icsk->icsk_probes_out >= keepalive_probes(tp))) {
778 icsk->icsk_probes_out++;