162306a36Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0 262306a36Sopenharmony_ci#include <linux/tcp.h> 362306a36Sopenharmony_ci#include <net/tcp.h> 462306a36Sopenharmony_ci 562306a36Sopenharmony_cistatic u32 tcp_rack_reo_wnd(const struct sock *sk) 662306a36Sopenharmony_ci{ 762306a36Sopenharmony_ci const struct tcp_sock *tp = tcp_sk(sk); 862306a36Sopenharmony_ci 962306a36Sopenharmony_ci if (!tp->reord_seen) { 1062306a36Sopenharmony_ci /* If reordering has not been observed, be aggressive during 1162306a36Sopenharmony_ci * the recovery or starting the recovery by DUPACK threshold. 1262306a36Sopenharmony_ci */ 1362306a36Sopenharmony_ci if (inet_csk(sk)->icsk_ca_state >= TCP_CA_Recovery) 1462306a36Sopenharmony_ci return 0; 1562306a36Sopenharmony_ci 1662306a36Sopenharmony_ci if (tp->sacked_out >= tp->reordering && 1762306a36Sopenharmony_ci !(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_recovery) & 1862306a36Sopenharmony_ci TCP_RACK_NO_DUPTHRESH)) 1962306a36Sopenharmony_ci return 0; 2062306a36Sopenharmony_ci } 2162306a36Sopenharmony_ci 2262306a36Sopenharmony_ci /* To be more reordering resilient, allow min_rtt/4 settling delay. 2362306a36Sopenharmony_ci * Use min_rtt instead of the smoothed RTT because reordering is 2462306a36Sopenharmony_ci * often a path property and less related to queuing or delayed ACKs. 2562306a36Sopenharmony_ci * Upon receiving DSACKs, linearly increase the window up to the 2662306a36Sopenharmony_ci * smoothed RTT. 2762306a36Sopenharmony_ci */ 2862306a36Sopenharmony_ci return min((tcp_min_rtt(tp) >> 2) * tp->rack.reo_wnd_steps, 2962306a36Sopenharmony_ci tp->srtt_us >> 3); 3062306a36Sopenharmony_ci} 3162306a36Sopenharmony_ci 3262306a36Sopenharmony_cis32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb, u32 reo_wnd) 3362306a36Sopenharmony_ci{ 3462306a36Sopenharmony_ci return tp->rack.rtt_us + reo_wnd - 3562306a36Sopenharmony_ci tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(skb)); 3662306a36Sopenharmony_ci} 3762306a36Sopenharmony_ci 3862306a36Sopenharmony_ci/* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01): 3962306a36Sopenharmony_ci * 4062306a36Sopenharmony_ci * Marks a packet lost, if some packet sent later has been (s)acked. 4162306a36Sopenharmony_ci * The underlying idea is similar to the traditional dupthresh and FACK 4262306a36Sopenharmony_ci * but they look at different metrics: 4362306a36Sopenharmony_ci * 4462306a36Sopenharmony_ci * dupthresh: 3 OOO packets delivered (packet count) 4562306a36Sopenharmony_ci * FACK: sequence delta to highest sacked sequence (sequence space) 4662306a36Sopenharmony_ci * RACK: sent time delta to the latest delivered packet (time domain) 4762306a36Sopenharmony_ci * 4862306a36Sopenharmony_ci * The advantage of RACK is it applies to both original and retransmitted 4962306a36Sopenharmony_ci * packet and therefore is robust against tail losses. Another advantage 5062306a36Sopenharmony_ci * is being more resilient to reordering by simply allowing some 5162306a36Sopenharmony_ci * "settling delay", instead of tweaking the dupthresh. 5262306a36Sopenharmony_ci * 5362306a36Sopenharmony_ci * When tcp_rack_detect_loss() detects some packets are lost and we 5462306a36Sopenharmony_ci * are not already in the CA_Recovery state, either tcp_rack_reo_timeout() 5562306a36Sopenharmony_ci * or tcp_time_to_recover()'s "Trick#1: the loss is proven" code path will 5662306a36Sopenharmony_ci * make us enter the CA_Recovery state. 5762306a36Sopenharmony_ci */ 5862306a36Sopenharmony_cistatic void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout) 5962306a36Sopenharmony_ci{ 6062306a36Sopenharmony_ci struct tcp_sock *tp = tcp_sk(sk); 6162306a36Sopenharmony_ci struct sk_buff *skb, *n; 6262306a36Sopenharmony_ci u32 reo_wnd; 6362306a36Sopenharmony_ci 6462306a36Sopenharmony_ci *reo_timeout = 0; 6562306a36Sopenharmony_ci reo_wnd = tcp_rack_reo_wnd(sk); 6662306a36Sopenharmony_ci list_for_each_entry_safe(skb, n, &tp->tsorted_sent_queue, 6762306a36Sopenharmony_ci tcp_tsorted_anchor) { 6862306a36Sopenharmony_ci struct tcp_skb_cb *scb = TCP_SKB_CB(skb); 6962306a36Sopenharmony_ci s32 remaining; 7062306a36Sopenharmony_ci 7162306a36Sopenharmony_ci /* Skip ones marked lost but not yet retransmitted */ 7262306a36Sopenharmony_ci if ((scb->sacked & TCPCB_LOST) && 7362306a36Sopenharmony_ci !(scb->sacked & TCPCB_SACKED_RETRANS)) 7462306a36Sopenharmony_ci continue; 7562306a36Sopenharmony_ci 7662306a36Sopenharmony_ci if (!tcp_skb_sent_after(tp->rack.mstamp, 7762306a36Sopenharmony_ci tcp_skb_timestamp_us(skb), 7862306a36Sopenharmony_ci tp->rack.end_seq, scb->end_seq)) 7962306a36Sopenharmony_ci break; 8062306a36Sopenharmony_ci 8162306a36Sopenharmony_ci /* A packet is lost if it has not been s/acked beyond 8262306a36Sopenharmony_ci * the recent RTT plus the reordering window. 8362306a36Sopenharmony_ci */ 8462306a36Sopenharmony_ci remaining = tcp_rack_skb_timeout(tp, skb, reo_wnd); 8562306a36Sopenharmony_ci if (remaining <= 0) { 8662306a36Sopenharmony_ci tcp_mark_skb_lost(sk, skb); 8762306a36Sopenharmony_ci list_del_init(&skb->tcp_tsorted_anchor); 8862306a36Sopenharmony_ci } else { 8962306a36Sopenharmony_ci /* Record maximum wait time */ 9062306a36Sopenharmony_ci *reo_timeout = max_t(u32, *reo_timeout, remaining); 9162306a36Sopenharmony_ci } 9262306a36Sopenharmony_ci } 9362306a36Sopenharmony_ci} 9462306a36Sopenharmony_ci 9562306a36Sopenharmony_cibool tcp_rack_mark_lost(struct sock *sk) 9662306a36Sopenharmony_ci{ 9762306a36Sopenharmony_ci struct tcp_sock *tp = tcp_sk(sk); 9862306a36Sopenharmony_ci u32 timeout; 9962306a36Sopenharmony_ci 10062306a36Sopenharmony_ci if (!tp->rack.advanced) 10162306a36Sopenharmony_ci return false; 10262306a36Sopenharmony_ci 10362306a36Sopenharmony_ci /* Reset the advanced flag to avoid unnecessary queue scanning */ 10462306a36Sopenharmony_ci tp->rack.advanced = 0; 10562306a36Sopenharmony_ci tcp_rack_detect_loss(sk, &timeout); 10662306a36Sopenharmony_ci if (timeout) { 10762306a36Sopenharmony_ci timeout = usecs_to_jiffies(timeout + TCP_TIMEOUT_MIN_US); 10862306a36Sopenharmony_ci inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT, 10962306a36Sopenharmony_ci timeout, inet_csk(sk)->icsk_rto); 11062306a36Sopenharmony_ci } 11162306a36Sopenharmony_ci return !!timeout; 11262306a36Sopenharmony_ci} 11362306a36Sopenharmony_ci 11462306a36Sopenharmony_ci/* Record the most recently (re)sent time among the (s)acked packets 11562306a36Sopenharmony_ci * This is "Step 3: Advance RACK.xmit_time and update RACK.RTT" from 11662306a36Sopenharmony_ci * draft-cheng-tcpm-rack-00.txt 11762306a36Sopenharmony_ci */ 11862306a36Sopenharmony_civoid tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq, 11962306a36Sopenharmony_ci u64 xmit_time) 12062306a36Sopenharmony_ci{ 12162306a36Sopenharmony_ci u32 rtt_us; 12262306a36Sopenharmony_ci 12362306a36Sopenharmony_ci rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, xmit_time); 12462306a36Sopenharmony_ci if (rtt_us < tcp_min_rtt(tp) && (sacked & TCPCB_RETRANS)) { 12562306a36Sopenharmony_ci /* If the sacked packet was retransmitted, it's ambiguous 12662306a36Sopenharmony_ci * whether the retransmission or the original (or the prior 12762306a36Sopenharmony_ci * retransmission) was sacked. 12862306a36Sopenharmony_ci * 12962306a36Sopenharmony_ci * If the original is lost, there is no ambiguity. Otherwise 13062306a36Sopenharmony_ci * we assume the original can be delayed up to aRTT + min_rtt. 13162306a36Sopenharmony_ci * the aRTT term is bounded by the fast recovery or timeout, 13262306a36Sopenharmony_ci * so it's at least one RTT (i.e., retransmission is at least 13362306a36Sopenharmony_ci * an RTT later). 13462306a36Sopenharmony_ci */ 13562306a36Sopenharmony_ci return; 13662306a36Sopenharmony_ci } 13762306a36Sopenharmony_ci tp->rack.advanced = 1; 13862306a36Sopenharmony_ci tp->rack.rtt_us = rtt_us; 13962306a36Sopenharmony_ci if (tcp_skb_sent_after(xmit_time, tp->rack.mstamp, 14062306a36Sopenharmony_ci end_seq, tp->rack.end_seq)) { 14162306a36Sopenharmony_ci tp->rack.mstamp = xmit_time; 14262306a36Sopenharmony_ci tp->rack.end_seq = end_seq; 14362306a36Sopenharmony_ci } 14462306a36Sopenharmony_ci} 14562306a36Sopenharmony_ci 14662306a36Sopenharmony_ci/* We have waited long enough to accommodate reordering. Mark the expired 14762306a36Sopenharmony_ci * packets lost and retransmit them. 14862306a36Sopenharmony_ci */ 14962306a36Sopenharmony_civoid tcp_rack_reo_timeout(struct sock *sk) 15062306a36Sopenharmony_ci{ 15162306a36Sopenharmony_ci struct tcp_sock *tp = tcp_sk(sk); 15262306a36Sopenharmony_ci u32 timeout, prior_inflight; 15362306a36Sopenharmony_ci u32 lost = tp->lost; 15462306a36Sopenharmony_ci 15562306a36Sopenharmony_ci prior_inflight = tcp_packets_in_flight(tp); 15662306a36Sopenharmony_ci tcp_rack_detect_loss(sk, &timeout); 15762306a36Sopenharmony_ci if (prior_inflight != tcp_packets_in_flight(tp)) { 15862306a36Sopenharmony_ci if (inet_csk(sk)->icsk_ca_state != TCP_CA_Recovery) { 15962306a36Sopenharmony_ci tcp_enter_recovery(sk, false); 16062306a36Sopenharmony_ci if (!inet_csk(sk)->icsk_ca_ops->cong_control) 16162306a36Sopenharmony_ci tcp_cwnd_reduction(sk, 1, tp->lost - lost, 0); 16262306a36Sopenharmony_ci } 16362306a36Sopenharmony_ci tcp_xmit_retransmit_queue(sk); 16462306a36Sopenharmony_ci } 16562306a36Sopenharmony_ci if (inet_csk(sk)->icsk_pending != ICSK_TIME_RETRANS) 16662306a36Sopenharmony_ci tcp_rearm_rto(sk); 16762306a36Sopenharmony_ci} 16862306a36Sopenharmony_ci 16962306a36Sopenharmony_ci/* Updates the RACK's reo_wnd based on DSACK and no. of recoveries. 17062306a36Sopenharmony_ci * 17162306a36Sopenharmony_ci * If a DSACK is received that seems like it may have been due to reordering 17262306a36Sopenharmony_ci * triggering fast recovery, increment reo_wnd by min_rtt/4 (upper bounded 17362306a36Sopenharmony_ci * by srtt), since there is possibility that spurious retransmission was 17462306a36Sopenharmony_ci * due to reordering delay longer than reo_wnd. 17562306a36Sopenharmony_ci * 17662306a36Sopenharmony_ci * Persist the current reo_wnd value for TCP_RACK_RECOVERY_THRESH (16) 17762306a36Sopenharmony_ci * no. of successful recoveries (accounts for full DSACK-based loss 17862306a36Sopenharmony_ci * recovery undo). After that, reset it to default (min_rtt/4). 17962306a36Sopenharmony_ci * 18062306a36Sopenharmony_ci * At max, reo_wnd is incremented only once per rtt. So that the new 18162306a36Sopenharmony_ci * DSACK on which we are reacting, is due to the spurious retx (approx) 18262306a36Sopenharmony_ci * after the reo_wnd has been updated last time. 18362306a36Sopenharmony_ci * 18462306a36Sopenharmony_ci * reo_wnd is tracked in terms of steps (of min_rtt/4), rather than 18562306a36Sopenharmony_ci * absolute value to account for change in rtt. 18662306a36Sopenharmony_ci */ 18762306a36Sopenharmony_civoid tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs) 18862306a36Sopenharmony_ci{ 18962306a36Sopenharmony_ci struct tcp_sock *tp = tcp_sk(sk); 19062306a36Sopenharmony_ci 19162306a36Sopenharmony_ci if ((READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_recovery) & 19262306a36Sopenharmony_ci TCP_RACK_STATIC_REO_WND) || 19362306a36Sopenharmony_ci !rs->prior_delivered) 19462306a36Sopenharmony_ci return; 19562306a36Sopenharmony_ci 19662306a36Sopenharmony_ci /* Disregard DSACK if a rtt has not passed since we adjusted reo_wnd */ 19762306a36Sopenharmony_ci if (before(rs->prior_delivered, tp->rack.last_delivered)) 19862306a36Sopenharmony_ci tp->rack.dsack_seen = 0; 19962306a36Sopenharmony_ci 20062306a36Sopenharmony_ci /* Adjust the reo_wnd if update is pending */ 20162306a36Sopenharmony_ci if (tp->rack.dsack_seen) { 20262306a36Sopenharmony_ci tp->rack.reo_wnd_steps = min_t(u32, 0xFF, 20362306a36Sopenharmony_ci tp->rack.reo_wnd_steps + 1); 20462306a36Sopenharmony_ci tp->rack.dsack_seen = 0; 20562306a36Sopenharmony_ci tp->rack.last_delivered = tp->delivered; 20662306a36Sopenharmony_ci tp->rack.reo_wnd_persist = TCP_RACK_RECOVERY_THRESH; 20762306a36Sopenharmony_ci } else if (!tp->rack.reo_wnd_persist) { 20862306a36Sopenharmony_ci tp->rack.reo_wnd_steps = 1; 20962306a36Sopenharmony_ci } 21062306a36Sopenharmony_ci} 21162306a36Sopenharmony_ci 21262306a36Sopenharmony_ci/* RFC6582 NewReno recovery for non-SACK connection. It simply retransmits 21362306a36Sopenharmony_ci * the next unacked packet upon receiving 21462306a36Sopenharmony_ci * a) three or more DUPACKs to start the fast recovery 21562306a36Sopenharmony_ci * b) an ACK acknowledging new data during the fast recovery. 21662306a36Sopenharmony_ci */ 21762306a36Sopenharmony_civoid tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced) 21862306a36Sopenharmony_ci{ 21962306a36Sopenharmony_ci const u8 state = inet_csk(sk)->icsk_ca_state; 22062306a36Sopenharmony_ci struct tcp_sock *tp = tcp_sk(sk); 22162306a36Sopenharmony_ci 22262306a36Sopenharmony_ci if ((state < TCP_CA_Recovery && tp->sacked_out >= tp->reordering) || 22362306a36Sopenharmony_ci (state == TCP_CA_Recovery && snd_una_advanced)) { 22462306a36Sopenharmony_ci struct sk_buff *skb = tcp_rtx_queue_head(sk); 22562306a36Sopenharmony_ci u32 mss; 22662306a36Sopenharmony_ci 22762306a36Sopenharmony_ci if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) 22862306a36Sopenharmony_ci return; 22962306a36Sopenharmony_ci 23062306a36Sopenharmony_ci mss = tcp_skb_mss(skb); 23162306a36Sopenharmony_ci if (tcp_skb_pcount(skb) > 1 && skb->len > mss) 23262306a36Sopenharmony_ci tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb, 23362306a36Sopenharmony_ci mss, mss, GFP_ATOMIC); 23462306a36Sopenharmony_ci 23562306a36Sopenharmony_ci tcp_mark_skb_lost(sk, skb); 23662306a36Sopenharmony_ci } 23762306a36Sopenharmony_ci} 238