Lines Matching refs:cwnd

10  *   cwnd = max(cwnd_gain * bottleneck_bandwidth * min_rtt, 4)
112 cwnd_gain:10, /* current gain for setting cwnd */
118 u32 prior_cwnd; /* prior cwnd upon entering loss recovery */
159 /* The gain for deriving steady-state cwnd tolerates delayed/stretched ACKs: */
195 /* Gain factor for adding extra_acked to target cwnd: */
201 /* Time period for clamping cwnd increment due to ack aggregation */
319 /* Save "last known good" cwnd so we can restore it after losses or PROBE_RTT */
326 bbr->prior_cwnd = tcp_snd_cwnd(tp); /* this cwnd is good enough */
327 else /* loss recovery or BBR_PROBE_RTT have temporarily cut cwnd */
365 /* If we've never had a valid RTT sample, cap cwnd at the initial
368 * ACKed so far. In this case, an RTO can cut cwnd to 1, in which
372 return TCP_INIT_CWND; /* be safe: cap at default initial cwnd*/
384 /* To achieve full performance in high-speed paths, we budget enough cwnd to
389 * Don't worry, at low rates (bbr_min_tso_rate) this won't bloat cwnd because
390 * in such cases tso_segs_goal is 1. The minimum cwnd is 4 packets,
394 static u32 bbr_quantization_budget(struct sock *sk, u32 cwnd)
399 cwnd += 3 * bbr_tso_segs_goal(sk);
401 /* Reduce delayed ACKs by rounding up cwnd to the next even number. */
402 cwnd = (cwnd + 1) & ~1U;
406 cwnd += 2;
408 return cwnd;
455 /* Find the cwnd increment based on estimate of ack aggregation */
474 * After recovery finishes, or upon undo, we restore the cwnd we had when
475 * recovery started (capped by the target cwnd based on estimated BDP).
485 u32 cwnd = tcp_snd_cwnd(tp);
489 * Then, in bbr_set_cwnd() we slow start up toward the target cwnd.
492 cwnd = max_t(s32, cwnd - rs->losses, 1);
498 /* Cut unused cwnd from app behavior, TSQ, or TSO deferral: */
499 cwnd = tcp_packets_in_flight(tp) + acked;
501 /* Exiting loss recovery; restore cwnd saved before recovery. */
502 cwnd = max(cwnd, bbr->prior_cwnd);
508 *new_cwnd = max(cwnd, tcp_packets_in_flight(tp) + acked);
511 *new_cwnd = cwnd;
515 /* Slow-start up toward target cwnd (if bw estimate is growing, or packet loss
523 u32 cwnd = tcp_snd_cwnd(tp), target_cwnd = 0;
528 if (bbr_set_cwnd_to_recover_or_restore(sk, rs, acked, &cwnd))
533 /* Increment the cwnd to account for excess ACKed data that seems
539 /* If we're below target cwnd, slow start cwnd toward target cwnd. */
540 if (bbr_full_bw_reached(sk)) /* only cut cwnd if we filled the pipe */
541 cwnd = min(cwnd + acked, target_cwnd);
542 else if (cwnd < target_cwnd || tp->delivered < TCP_INIT_CWND)
543 cwnd = cwnd + acked;
544 cwnd = max(cwnd, bbr_cwnd_min_target);
547 tcp_snd_cwnd_set(tp, min(cwnd, tp->snd_cwnd_clamp)); /* apply global cap */
810 * cwnd += max_extra_acked
812 * Max extra_acked is clamped by cwnd and bw * bbr_extra_acked_max_us (100 ms).
928 * we enter PROBE_RTT mode and cap the cwnd at bbr_cwnd_min_target=4 packets.
932 * performance penalty of PROBE_RTT's cwnd capping to roughly 2% (200ms/10s).
959 bbr_save_cwnd(sk); /* note cwnd so we can restore it */
997 bbr->cwnd_gain = bbr_high_gain; /* keep cwnd */
1082 /* Provision 3 * cwnd since BBR may slow-start even during recovery. */
1086 /* In theory BBR does not need to undo the cwnd since it does not
1087 * always reduce cwnd on losses (see bbr_main()). Keep it for now.
1099 /* Entering loss recovery, so save cwnd for when we exit or undo recovery. */