Searched refs:reordering (Results 1 - 13 of 13) sorted by relevance
/kernel/linux/linux-5.10/net/ipv4/ |
H A D | tcp_recovery.c | 15 /* If reordering has not been observed, be aggressive during in tcp_rack_reo_wnd() 21 if (tp->sacked_out >= tp->reordering && in tcp_rack_reo_wnd() 27 /* To be more reordering resilient, allow min_rtt/4 settling delay. in tcp_rack_reo_wnd() 28 * Use min_rtt instead of the smoothed RTT because reordering is in tcp_rack_reo_wnd() 55 * is being more resilient to reordering by simply allowing some 87 * the recent RTT plus the reordering window. in tcp_rack_detect_loss() 151 /* We have waited long enough to accommodate reordering. Mark the expired 177 * due to reordering delay longer than reo_wnd. 225 if ((state < TCP_CA_Recovery && tp->sacked_out >= tp->reordering) || in tcp_newreno_mark_lost()
|
H A D | tcp_metrics.c | 450 if (val < tp->reordering && in tcp_update_metrics() 451 tp->reordering != in tcp_update_metrics() 454 tp->reordering); in tcp_update_metrics() 498 if (val && tp->reordering != val) in tcp_init_metrics() 499 tp->reordering = val; in tcp_init_metrics()
|
H A D | tcp_input.c | 432 nr_segs = max_t(u32, nr_segs, tp->reordering + 1); in tcp_sndbuf_expand() 1026 /* It's reordering when higher sequence was delivered (i.e. sacked) before 1027 * some lower never-retransmitted sequence ("low_seq"). The maximum reordering 1028 * distance is approximated in full-mss packet distance ("reordering"). 1042 if ((metric > tp->reordering * mss) && mss) { in tcp_check_sack_reordering() 1046 tp->reordering, in tcp_check_sack_reordering() 1051 tp->reordering = min_t(u32, (metric + mss - 1) / mss, in tcp_check_sack_reordering() 1155 * ever retransmitted -> reordering. Alas, we cannot use it 1158 * for retransmitted and already SACKed segment -> reordering.. 1205 * fragmentation and packet reordering pas 2141 u8 reordering; tcp_enter_loss() local [all...] |
H A D | tcp.c | 450 tp->reordering = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reordering); in tcp_init_sock() 3585 info->tcpi_reordering = tp->reordering; in tcp_get_info() 3743 nla_put_u32(stats, TCP_NLA_REORDERING, tp->reordering); in tcp_get_timestamping_opt_stats()
|
H A D | tcp_output.c | 2359 size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache; in tcp_mtu_probe()
|
/kernel/linux/linux-6.6/net/ipv4/ |
H A D | tcp_recovery.c | 10 /* If reordering has not been observed, be aggressive during in tcp_rack_reo_wnd() 16 if (tp->sacked_out >= tp->reordering && in tcp_rack_reo_wnd() 22 /* To be more reordering resilient, allow min_rtt/4 settling delay. in tcp_rack_reo_wnd() 23 * Use min_rtt instead of the smoothed RTT because reordering is in tcp_rack_reo_wnd() 50 * is being more resilient to reordering by simply allowing some 82 * the recent RTT plus the reordering window. in tcp_rack_detect_loss() 146 /* We have waited long enough to accommodate reordering. Mark the expired 171 * If a DSACK is received that seems like it may have been due to reordering 174 * due to reordering delay longer than reo_wnd. 222 if ((state < TCP_CA_Recovery && tp->sacked_out >= tp->reordering) || in tcp_newreno_mark_lost() [all...] |
H A D | tcp_metrics.c | 450 if (val < tp->reordering && in tcp_update_metrics() 451 tp->reordering != in tcp_update_metrics() 454 tp->reordering); in tcp_update_metrics() 498 if (val && tp->reordering != val) in tcp_init_metrics() 499 tp->reordering = val; in tcp_init_metrics()
|
H A D | tcp_input.c | 440 nr_segs = max_t(u32, nr_segs, tp->reordering + 1); in tcp_sndbuf_expand() 1030 * DSACKs that may have been due to reordering causing RACK to trigger in tcp_dsack_seen() 1032 * without having seen reordering, or that match TLP probes (TLP in tcp_dsack_seen() 1045 /* It's reordering when higher sequence was delivered (i.e. sacked) before 1046 * some lower never-retransmitted sequence ("low_seq"). The maximum reordering 1047 * distance is approximated in full-mss packet distance ("reordering"). 1061 if ((metric > tp->reordering * mss) && mss) { in tcp_check_sack_reordering() 1065 tp->reordering, in tcp_check_sack_reordering() 1070 tp->reordering = min_t(u32, (metric + mss - 1) / mss, in tcp_check_sack_reordering() 1174 * ever retransmitted -> reordering 2160 u8 reordering; tcp_enter_loss() local [all...] |
H A D | tcp.c | 447 tp->reordering = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reordering); in tcp_init_sock() 3750 info->tcpi_reordering = tp->reordering; in tcp_get_info() 3927 nla_put_u32(stats, TCP_NLA_REORDERING, tp->reordering); in tcp_get_timestamping_opt_stats()
|
H A D | tcp_output.c | 2417 size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache; in tcp_mtu_probe()
|
/kernel/linux/linux-5.10/include/linux/ |
H A D | tcp.h | 218 u8 reo_wnd_steps; /* Allowed reordering window */ 273 u32 reordering; /* Packet reordering metric. */ member 274 u32 reord_seen; /* number of data packet reordering events */
|
/kernel/linux/linux-6.6/include/linux/ |
H A D | tcp.h | 250 u8 reo_wnd_steps; /* Allowed reordering window */ 305 u32 reordering; /* Packet reordering metric. */ member 306 u32 reord_seen; /* number of data packet reordering events */
|
/kernel/linux/common_modules/newip/third_party/linux-5.10/net/newip/ |
H A D | tcp_nip.c | 1392 tp->reordering = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reordering); in tcp_nip_init_sock()
|
Completed in 39 milliseconds