Lines Matching refs:cwnd
10 * cwnd = max(cwnd_gain * bottleneck_bandwidth * min_rtt, 4)
110 cwnd_gain:10, /* current gain for setting cwnd */
116 u32 prior_cwnd; /* prior cwnd upon entering loss recovery */
157 /* The gain for deriving steady-state cwnd tolerates delayed/stretched ACKs: */
193 /* Gain factor for adding extra_acked to target cwnd: */
199 /* Time period for clamping cwnd increment due to ack aggregation */
317 /* Save "last known good" cwnd so we can restore it after losses or PROBE_RTT */
324 bbr->prior_cwnd = tp->snd_cwnd; /* this cwnd is good enough */
325 else /* loss recovery or BBR_PROBE_RTT have temporarily cut cwnd */
363 /* If we've never had a valid RTT sample, cap cwnd at the initial
366 * ACKed so far. In this case, an RTO can cut cwnd to 1, in which
370 return TCP_INIT_CWND; /* be safe: cap at default initial cwnd*/
382 /* To achieve full performance in high-speed paths, we budget enough cwnd to
387 * Don't worry, at low rates (bbr_min_tso_rate) this won't bloat cwnd because
388 * in such cases tso_segs_goal is 1. The minimum cwnd is 4 packets,
392 static u32 bbr_quantization_budget(struct sock *sk, u32 cwnd)
397 cwnd += 3 * bbr_tso_segs_goal(sk);
399 /* Reduce delayed ACKs by rounding up cwnd to the next even number. */
400 cwnd = (cwnd + 1) & ~1U;
404 cwnd += 2;
406 return cwnd;
453 /* Find the cwnd increment based on estimate of ack aggregation */
472 * After recovery finishes, or upon undo, we restore the cwnd we had when
473 * recovery started (capped by the target cwnd based on estimated BDP).
483 u32 cwnd = tp->snd_cwnd;
487 * Then, in bbr_set_cwnd() we slow start up toward the target cwnd.
490 cwnd = max_t(s32, cwnd - rs->losses, 1);
496 /* Cut unused cwnd from app behavior, TSQ, or TSO deferral: */
497 cwnd = tcp_packets_in_flight(tp) + acked;
499 /* Exiting loss recovery; restore cwnd saved before recovery. */
500 cwnd = max(cwnd, bbr->prior_cwnd);
506 *new_cwnd = max(cwnd, tcp_packets_in_flight(tp) + acked);
509 *new_cwnd = cwnd;
513 /* Slow-start up toward target cwnd (if bw estimate is growing, or packet loss
521 u32 cwnd = tp->snd_cwnd, target_cwnd = 0;
526 if (bbr_set_cwnd_to_recover_or_restore(sk, rs, acked, &cwnd))
531 /* Increment the cwnd to account for excess ACKed data that seems
537 /* If we're below target cwnd, slow start cwnd toward target cwnd. */
538 if (bbr_full_bw_reached(sk)) /* only cut cwnd if we filled the pipe */
539 cwnd = min(cwnd + acked, target_cwnd);
540 else if (cwnd < target_cwnd || tp->delivered < TCP_INIT_CWND)
541 cwnd = cwnd + acked;
542 cwnd = max(cwnd, bbr_cwnd_min_target);
545 tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp); /* apply global cap */
808 * cwnd += max_extra_acked
810 * Max extra_acked is clamped by cwnd and bw * bbr_extra_acked_max_us (100 ms).
926 * we enter PROBE_RTT mode and cap the cwnd at bbr_cwnd_min_target=4 packets.
930 * performance penalty of PROBE_RTT's cwnd capping to roughly 2% (200ms/10s).
957 bbr_save_cwnd(sk); /* note cwnd so we can restore it */
995 bbr->cwnd_gain = bbr_high_gain; /* keep cwnd */
1080 /* Provision 3 * cwnd since BBR may slow-start even during recovery. */
1084 /* In theory BBR does not need to undo the cwnd since it does not
1085 * always reduce cwnd on losses (see bbr_main()). Keep it for now.
1097 /* Entering loss recovery, so save cwnd for when we exit or undo recovery. */