/kernel/linux/linux-5.10/net/ipv4/ |
H A D | tcp_bic.c | 51 u32 cnt; /* increase cwnd by 1 after ACKs */ 83 static inline void bictcp_update(struct bictcp *ca, u32 cwnd) in bictcp_update() argument 85 if (ca->last_cwnd == cwnd && in bictcp_update() 89 ca->last_cwnd = cwnd; in bictcp_update() 96 if (cwnd <= low_window) { in bictcp_update() 97 ca->cnt = cwnd; in bictcp_update() 102 if (cwnd < ca->last_max_cwnd) { in bictcp_update() 103 __u32 dist = (ca->last_max_cwnd - cwnd) in bictcp_update() 108 ca->cnt = cwnd / max_increment; in bictcp_update() 111 ca->cnt = (cwnd * smooth_par in bictcp_update() [all...] |
H A D | tcp_cubic.c | 79 MODULE_PARM_DESC(hystart_low_window, "lower bound cwnd for hybrid slow start"); 85 u32 cnt; /* increase cwnd by 1 after ACKs */ 95 u32 tcp_cwnd; /* estimated tcp cwnd */ 159 * Shift epoch_start to keep cwnd growth to cubic curve. in bictcp_cwnd_event() 221 static inline void bictcp_update(struct bictcp *ca, u32 cwnd, u32 acked) in bictcp_update() argument 228 if (ca->last_cwnd == cwnd && in bictcp_update() 233 * On all cwnd reduction events, ca->epoch_start is set to 0, in bictcp_update() 239 ca->last_cwnd = cwnd; in bictcp_update() 245 ca->tcp_cwnd = cwnd; /* syn with cubic */ in bictcp_update() 247 if (ca->last_max_cwnd <= cwnd) { in bictcp_update() [all...] |
H A D | tcp_bbr.c | 10 * cwnd = max(cwnd_gain * bottleneck_bandwidth * min_rtt, 4) 110 cwnd_gain:10, /* current gain for setting cwnd */ 116 u32 prior_cwnd; /* prior cwnd upon entering loss recovery */ 157 /* The gain for deriving steady-state cwnd tolerates delayed/stretched ACKs: */ 193 /* Gain factor for adding extra_acked to target cwnd: */ 199 /* Time period for clamping cwnd increment due to ack aggregation */ 317 /* Save "last known good" cwnd so we can restore it after losses or PROBE_RTT */ 324 bbr->prior_cwnd = tp->snd_cwnd; /* this cwnd is good enough */ in bbr_save_cwnd() 325 else /* loss recovery or BBR_PROBE_RTT have temporarily cut cwnd */ in bbr_save_cwnd() 363 /* If we've never had a valid RTT sample, cap cwnd a in bbr_bdp() 392 bbr_quantization_budget(struct sock *sk, u32 cwnd) bbr_quantization_budget() argument 483 u32 cwnd = tp->snd_cwnd; bbr_set_cwnd_to_recover_or_restore() local 521 u32 cwnd = tp->snd_cwnd, target_cwnd = 0; bbr_set_cwnd() local [all...] |
H A D | tcp_highspeed.c | 17 unsigned int cwnd; member 108 * since I don't think we will see a cwnd this large. :) */ in hstcp_init() 126 * hstcp_aimd_vals[ca->ai-1].cwnd < in hstcp_cong_avoid() 128 * hstcp_aimd_vals[ca->ai].cwnd in hstcp_cong_avoid() 130 if (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd) { in hstcp_cong_avoid() 131 while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd && in hstcp_cong_avoid() 134 } else if (ca->ai && tp->snd_cwnd <= hstcp_aimd_vals[ca->ai-1].cwnd) { in hstcp_cong_avoid() 135 while (ca->ai && tp->snd_cwnd <= hstcp_aimd_vals[ca->ai-1].cwnd) in hstcp_cong_avoid() 141 /* cwnd = cwnd in hstcp_cong_avoid() [all...] |
/kernel/linux/linux-6.6/net/ipv4/ |
H A D | tcp_bic.c | 51 u32 cnt; /* increase cwnd by 1 after ACKs */ 83 static inline void bictcp_update(struct bictcp *ca, u32 cwnd) in bictcp_update() argument 85 if (ca->last_cwnd == cwnd && in bictcp_update() 89 ca->last_cwnd = cwnd; in bictcp_update() 96 if (cwnd <= low_window) { in bictcp_update() 97 ca->cnt = cwnd; in bictcp_update() 102 if (cwnd < ca->last_max_cwnd) { in bictcp_update() 103 __u32 dist = (ca->last_max_cwnd - cwnd) in bictcp_update() 108 ca->cnt = cwnd / max_increment; in bictcp_update() 111 ca->cnt = (cwnd * smooth_par in bictcp_update() [all...] |
H A D | tcp_cubic.c | 81 MODULE_PARM_DESC(hystart_low_window, "lower bound cwnd for hybrid slow start"); 87 u32 cnt; /* increase cwnd by 1 after ACKs */ 97 u32 tcp_cwnd; /* estimated tcp cwnd */ 152 * Shift epoch_start to keep cwnd growth to cubic curve. in cubictcp_cwnd_event() 214 static inline void bictcp_update(struct bictcp *ca, u32 cwnd, u32 acked) in bictcp_update() argument 221 if (ca->last_cwnd == cwnd && in bictcp_update() 226 * On all cwnd reduction events, ca->epoch_start is set to 0, in bictcp_update() 232 ca->last_cwnd = cwnd; in bictcp_update() 238 ca->tcp_cwnd = cwnd; /* syn with cubic */ in bictcp_update() 240 if (ca->last_max_cwnd <= cwnd) { in bictcp_update() [all...] |
H A D | tcp_bbr.c | 10 * cwnd = max(cwnd_gain * bottleneck_bandwidth * min_rtt, 4) 112 cwnd_gain:10, /* current gain for setting cwnd */ 118 u32 prior_cwnd; /* prior cwnd upon entering loss recovery */ 159 /* The gain for deriving steady-state cwnd tolerates delayed/stretched ACKs: */ 195 /* Gain factor for adding extra_acked to target cwnd: */ 201 /* Time period for clamping cwnd increment due to ack aggregation */ 319 /* Save "last known good" cwnd so we can restore it after losses or PROBE_RTT */ 326 bbr->prior_cwnd = tcp_snd_cwnd(tp); /* this cwnd is good enough */ in bbr_save_cwnd() 327 else /* loss recovery or BBR_PROBE_RTT have temporarily cut cwnd */ in bbr_save_cwnd() 365 /* If we've never had a valid RTT sample, cap cwnd a in bbr_bdp() 394 bbr_quantization_budget(struct sock *sk, u32 cwnd) bbr_quantization_budget() argument 485 u32 cwnd = tcp_snd_cwnd(tp); bbr_set_cwnd_to_recover_or_restore() local 523 u32 cwnd = tcp_snd_cwnd(tp), target_cwnd = 0; bbr_set_cwnd() local [all...] |
H A D | tcp_highspeed.c | 17 unsigned int cwnd; member 108 * since I don't think we will see a cwnd this large. :) */ in hstcp_init() 126 * hstcp_aimd_vals[ca->ai-1].cwnd < in hstcp_cong_avoid() 128 * hstcp_aimd_vals[ca->ai].cwnd in hstcp_cong_avoid() 130 if (tcp_snd_cwnd(tp) > hstcp_aimd_vals[ca->ai].cwnd) { in hstcp_cong_avoid() 131 while (tcp_snd_cwnd(tp) > hstcp_aimd_vals[ca->ai].cwnd && in hstcp_cong_avoid() 134 } else if (ca->ai && tcp_snd_cwnd(tp) <= hstcp_aimd_vals[ca->ai-1].cwnd) { in hstcp_cong_avoid() 135 while (ca->ai && tcp_snd_cwnd(tp) <= hstcp_aimd_vals[ca->ai-1].cwnd) in hstcp_cong_avoid() 141 /* cwnd = cwnd in hstcp_cong_avoid() [all...] |
/kernel/linux/linux-5.10/net/sctp/ |
H A D | transport.c | 394 /* This routine updates the transport's cwnd and partial_bytes_acked 401 __u32 cwnd, ssthresh, flight_size, pba, pmtu; in sctp_transport_raise_cwnd() local 403 cwnd = transport->cwnd; in sctp_transport_raise_cwnd() 415 if (cwnd <= ssthresh) { in sctp_transport_raise_cwnd() 417 * o When cwnd is less than or equal to ssthresh, an SCTP in sctp_transport_raise_cwnd() 419 * cwnd only if the current congestion window is being fully in sctp_transport_raise_cwnd() 422 * Only when these three conditions are met can the cwnd be in sctp_transport_raise_cwnd() 423 * increased; otherwise, the cwnd MUST not be increased. in sctp_transport_raise_cwnd() 424 * If these conditions are met, then cwnd MUS in sctp_transport_raise_cwnd() [all...] |
/kernel/linux/linux-6.6/net/sctp/ |
H A D | transport.c | 552 /* This routine updates the transport's cwnd and partial_bytes_acked 559 __u32 cwnd, ssthresh, flight_size, pba, pmtu; in sctp_transport_raise_cwnd() local 561 cwnd = transport->cwnd; in sctp_transport_raise_cwnd() 573 if (cwnd <= ssthresh) { in sctp_transport_raise_cwnd() 575 * o When cwnd is less than or equal to ssthresh, an SCTP in sctp_transport_raise_cwnd() 577 * cwnd only if the current congestion window is being fully in sctp_transport_raise_cwnd() 580 * Only when these three conditions are met can the cwnd be in sctp_transport_raise_cwnd() 581 * increased; otherwise, the cwnd MUST not be increased. in sctp_transport_raise_cwnd() 582 * If these conditions are met, then cwnd MUS in sctp_transport_raise_cwnd() [all...] |
/kernel/linux/linux-5.10/tools/testing/selftests/bpf/progs/ |
H A D | bpf_cubic.c | 55 /* calculate the "K" for (wmax-cwnd) = c/rtt * K^3 56 * so K = cubic_root( (wmax-cwnd)*rtt/c ) 63 * cwnd < 1 million packets 74 __u32 cnt; /* increase cwnd by 1 after ACKs */ 84 __u32 tcp_cwnd; /* estimated tcp cwnd */ 205 * Shift epoch_start to keep cwnd growth to cubic curve. in BPF_PROG() 272 static __always_inline void bictcp_update(struct bictcp *ca, __u32 cwnd, in bictcp_update() argument 280 if (ca->last_cwnd == cwnd && in bictcp_update() 285 * On all cwnd reduction events, ca->epoch_start is set to 0, in bictcp_update() 291 ca->last_cwnd = cwnd; in bictcp_update() [all...] |
/kernel/linux/linux-6.6/tools/testing/selftests/bpf/progs/ |
H A D | bpf_cubic.c | 55 /* calculate the "K" for (wmax-cwnd) = c/rtt * K^3 56 * so K = cubic_root( (wmax-cwnd)*rtt/c ) 63 * cwnd < 1 million packets 74 __u32 cnt; /* increase cwnd by 1 after ACKs */ 84 __u32 tcp_cwnd; /* estimated tcp cwnd */ 199 * Shift epoch_start to keep cwnd growth to cubic curve. in BPF_PROG() 266 static __always_inline void bictcp_update(struct bictcp *ca, __u32 cwnd, in bictcp_update() argument 274 if (ca->last_cwnd == cwnd && in bictcp_update() 279 * On all cwnd reduction events, ca->epoch_start is set to 0, in bictcp_update() 285 ca->last_cwnd = cwnd; in bictcp_update() [all...] |
/kernel/linux/linux-5.10/samples/bpf/ |
H A D | hbm_kern.h | 76 int cwnd; member 96 pkti->cwnd = tp->snd_cwnd; in get_tcp_info() 104 pkti->cwnd = 0; in get_tcp_info() 116 pkti->cwnd = 0; in hbm_get_pkt_info() 189 if (pkti->cwnd) { in hbm_update_stats() 191 pkti->cwnd); in hbm_update_stats()
|
/kernel/linux/linux-6.6/samples/bpf/ |
H A D | hbm_kern.h | 74 int cwnd; member 94 pkti->cwnd = tp->snd_cwnd; in get_tcp_info() 102 pkti->cwnd = 0; in get_tcp_info() 114 pkti->cwnd = 0; in hbm_get_pkt_info() 187 if (pkti->cwnd) { in hbm_update_stats() 189 pkti->cwnd); in hbm_update_stats()
|
/kernel/linux/linux-5.10/include/trace/events/ |
H A D | sctp.h | 23 __field(__u32, cwnd) 35 __entry->cwnd = sp->cwnd; 42 TP_printk("asoc=%#llx%s ipaddr=%pISpc state=%u cwnd=%u ssthresh=%u " 45 __entry->ipaddr, __entry->state, __entry->cwnd,
|
/kernel/linux/linux-6.6/include/trace/events/ |
H A D | sctp.h | 23 __field(__u32, cwnd) 35 __entry->cwnd = sp->cwnd; 42 TP_printk("asoc=%#llx%s ipaddr=%pISpc state=%u cwnd=%u ssthresh=%u " 45 __entry->ipaddr, __entry->state, __entry->cwnd,
|
/kernel/linux/linux-5.10/net/batman-adv/ |
H A D | tp_meter.c | 108 * batadv_tp_cwnd() - compute the new cwnd size 109 * @base: base cwnd size value 111 * @min: minimum cwnd value (usually MSS) 113 * Return the new cwnd size and ensure it does not exceed the Advertised 147 if (tp_vars->cwnd <= tp_vars->ss_threshold) { in batadv_tp_update_cwnd() 149 tp_vars->cwnd = batadv_tp_cwnd(tp_vars->cwnd, mss, mss); in batadv_tp_update_cwnd() 156 ((mss * mss) << 6) / (tp_vars->cwnd << 3)); in batadv_tp_update_cwnd() 162 tp_vars->cwnd = batadv_tp_cwnd(tp_vars->cwnd, ms in batadv_tp_update_cwnd() 634 u32 rtt, recv_ack, cwnd; batadv_tp_recv_ack() local [all...] |
/kernel/linux/linux-6.6/net/batman-adv/ |
H A D | tp_meter.c | 109 * batadv_tp_cwnd() - compute the new cwnd size 110 * @base: base cwnd size value 112 * @min: minimum cwnd value (usually MSS) 114 * Return the new cwnd size and ensure it does not exceed the Advertised 148 if (tp_vars->cwnd <= tp_vars->ss_threshold) { in batadv_tp_update_cwnd() 150 tp_vars->cwnd = batadv_tp_cwnd(tp_vars->cwnd, mss, mss); in batadv_tp_update_cwnd() 157 ((mss * mss) << 6) / (tp_vars->cwnd << 3)); in batadv_tp_update_cwnd() 163 tp_vars->cwnd = batadv_tp_cwnd(tp_vars->cwnd, ms in batadv_tp_update_cwnd() 636 u32 rtt, recv_ack, cwnd; batadv_tp_recv_ack() local [all...] |
/kernel/linux/linux-5.10/net/rxrpc/ |
H A D | input.c | 45 unsigned int cwnd = call->cong_cwnd; in rxrpc_congestion_management() local 55 cwnd = 1; in rxrpc_congestion_management() 56 if (cwnd >= call->cong_ssthresh && in rxrpc_congestion_management() 70 summary->cwnd = call->cong_cwnd; in rxrpc_congestion_management() 80 cwnd += 1; in rxrpc_congestion_management() 81 if (cwnd >= call->cong_ssthresh) { in rxrpc_congestion_management() 102 if (cumulative_acks >= cwnd) in rxrpc_congestion_management() 103 cwnd++; in rxrpc_congestion_management() 126 cwnd = call->cong_ssthresh + 3; in rxrpc_congestion_management() 135 cwnd in rxrpc_congestion_management() [all...] |
/kernel/linux/linux-6.6/net/rxrpc/ |
H A D | input.c | 28 unsigned int cwnd = call->cong_cwnd; in rxrpc_congestion_management() local 38 cwnd = 1; in rxrpc_congestion_management() 39 if (cwnd >= call->cong_ssthresh && in rxrpc_congestion_management() 51 summary->cwnd = call->cong_cwnd; in rxrpc_congestion_management() 61 cwnd += 1; in rxrpc_congestion_management() 62 if (cwnd >= call->cong_ssthresh) { in rxrpc_congestion_management() 83 if (cumulative_acks >= cwnd) in rxrpc_congestion_management() 84 cwnd++; in rxrpc_congestion_management() 107 cwnd = call->cong_ssthresh + 3; in rxrpc_congestion_management() 116 cwnd in rxrpc_congestion_management() [all...] |
/kernel/linux/linux-5.10/net/dccp/ccids/ |
H A D | ccid2.c | 79 * Ensure that Ack Ratio does not exceed ceil(cwnd/2), which is (2) from in ccid2_change_l_ack_ratio() 81 * acceptable since this causes starvation/deadlock whenever cwnd < 2. in ccid2_change_l_ack_ratio() 149 /* adjust pipe, cwnd etc */ in ccid2_hc_tx_rto_expire() 165 /* if we were blocked before, we may now send cwnd=1 packet */ in ccid2_hc_tx_rto_expire() 183 * ccid2_update_used_window - Track how much of cwnd is actually used 198 /* don't reduce cwnd below the initial window (IW) */ in ccid2_cwnd_application_limited() 217 u32 cwnd = hc->tx_cwnd, restart_cwnd, in ccid2_cwnd_restart() local 221 hc->tx_ssthresh = max(hc->tx_ssthresh, (cwnd >> 1) + (cwnd >> 2)); in ccid2_cwnd_restart() 223 /* don't reduce cwnd belo in ccid2_cwnd_restart() [all...] |
/kernel/linux/linux-6.6/net/dccp/ccids/ |
H A D | ccid2.c | 79 * Ensure that Ack Ratio does not exceed ceil(cwnd/2), which is (2) from in ccid2_change_l_ack_ratio() 81 * acceptable since this causes starvation/deadlock whenever cwnd < 2. in ccid2_change_l_ack_ratio() 149 /* adjust pipe, cwnd etc */ in ccid2_hc_tx_rto_expire() 165 /* if we were blocked before, we may now send cwnd=1 packet */ in ccid2_hc_tx_rto_expire() 183 * ccid2_update_used_window - Track how much of cwnd is actually used 201 /* don't reduce cwnd below the initial window (IW) */ in ccid2_cwnd_application_limited() 220 u32 cwnd = hc->tx_cwnd, restart_cwnd, in ccid2_cwnd_restart() local 224 hc->tx_ssthresh = max(hc->tx_ssthresh, (cwnd >> 1) + (cwnd >> 2)); in ccid2_cwnd_restart() 226 /* don't reduce cwnd belo in ccid2_cwnd_restart() [all...] |
/kernel/linux/linux-5.10/tools/testing/selftests/bpf/ |
H A D | bpf_tcp_helpers.h | 159 /* do new cwnd calculation (required) */ 163 /* call when cwnd event occurs (optional) */ 167 /* new value of cwnd after loss (required) */ 175 /* call when packets are delivered to update cwnd and pacing rate, 190 __u32 cwnd = min(tp->snd_cwnd + acked, tp->snd_ssthresh); in tcp_slow_start() local 192 acked -= cwnd - tp->snd_cwnd; in tcp_slow_start() 193 tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp); in tcp_slow_start() 207 /* If in slow start, ensure cwnd grows to twice what was ACKed. */ in tcp_is_cwnd_limited()
|
/kernel/linux/linux-5.10/include/linux/qed/ |
H A D | tcp_common.h | 92 __le32 cwnd; member 158 __le32 cwnd; member 238 __le32 cwnd; member
|
/kernel/linux/linux-6.6/include/linux/qed/ |
H A D | tcp_common.h | 92 __le32 cwnd; member 158 __le32 cwnd; member 238 __le32 cwnd; member
|