Lines Matching refs:ca

105 static inline void bictcp_reset(struct bictcp *ca)
107 ca->cnt = 0;
108 ca->last_max_cwnd = 0;
109 ca->last_cwnd = 0;
110 ca->last_time = 0;
111 ca->bic_origin_point = 0;
112 ca->bic_K = 0;
113 ca->delay_min = 0;
114 ca->epoch_start = 0;
115 ca->ack_cnt = 0;
116 ca->tcp_cwnd = 0;
117 ca->found = 0;
128 struct bictcp *ca = inet_csk_ca(sk);
130 ca->round_start = ca->last_ack = bictcp_clock_us(sk);
131 ca->end_seq = tp->snd_nxt;
132 ca->curr_rtt = ~0U;
133 ca->sample_cnt = 0;
138 struct bictcp *ca = inet_csk_ca(sk);
140 bictcp_reset(ca);
152 struct bictcp *ca = inet_csk_ca(sk);
161 if (ca->epoch_start && delta > 0) {
162 ca->epoch_start += delta;
163 if (after(ca->epoch_start, now))
164 ca->epoch_start = now;
221 static inline void bictcp_update(struct bictcp *ca, u32 cwnd, u32 acked)
226 ca->ack_cnt += acked; /* count the number of ACKed packets */
228 if (ca->last_cwnd == cwnd &&
229 (s32)(tcp_jiffies32 - ca->last_time) <= HZ / 32)
232 /* The CUBIC function can update ca->cnt at most once per jiffy.
233 * On all cwnd reduction events, ca->epoch_start is set to 0,
234 * which will force a recalculation of ca->cnt.
236 if (ca->epoch_start && tcp_jiffies32 == ca->last_time)
239 ca->last_cwnd = cwnd;
240 ca->last_time = tcp_jiffies32;
242 if (ca->epoch_start == 0) {
243 ca->epoch_start = tcp_jiffies32; /* record beginning */
244 ca->ack_cnt = acked; /* start counting */
245 ca->tcp_cwnd = cwnd; /* syn with cubic */
247 if (ca->last_max_cwnd <= cwnd) {
248 ca->bic_K = 0;
249 ca->bic_origin_point = cwnd;
254 ca->bic_K = cubic_root(cube_factor
255 * (ca->last_max_cwnd - cwnd));
256 ca->bic_origin_point = ca->last_max_cwnd;
274 t = (s32)(tcp_jiffies32 - ca->epoch_start);
275 t += usecs_to_jiffies(ca->delay_min);
280 if (t < ca->bic_K) /* t - K */
281 offs = ca->bic_K - t;
283 offs = t - ca->bic_K;
287 if (t < ca->bic_K) /* below origin*/
288 bic_target = ca->bic_origin_point - delta;
290 bic_target = ca->bic_origin_point + delta;
294 ca->cnt = cwnd / (bic_target - cwnd);
296 ca->cnt = 100 * cwnd; /* very small increment*/
303 if (ca->last_max_cwnd == 0 && ca->cnt > 20)
304 ca->cnt = 20; /* increase cwnd 5% per RTT */
312 while (ca->ack_cnt > delta) { /* update tcp cwnd */
313 ca->ack_cnt -= delta;
314 ca->tcp_cwnd++;
317 if (ca->tcp_cwnd > cwnd) { /* if bic is slower than tcp */
318 delta = ca->tcp_cwnd - cwnd;
320 if (ca->cnt > max_cnt)
321 ca->cnt = max_cnt;
328 ca->cnt = max(ca->cnt, 2U);
334 struct bictcp *ca = inet_csk_ca(sk);
344 bictcp_update(ca, tp->snd_cwnd, acked);
345 tcp_cong_avoid_ai(tp, ca->cnt, acked);
351 struct bictcp *ca = inet_csk_ca(sk);
353 ca->epoch_start = 0; /* end of epoch */
356 if (tp->snd_cwnd < ca->last_max_cwnd && fast_convergence)
357 ca->last_max_cwnd = (tp->snd_cwnd * (BICTCP_BETA_SCALE + beta))
360 ca->last_max_cwnd = tp->snd_cwnd;
375 * slow start we begin with small TSO packets and ca->delay_min would
396 struct bictcp *ca = inet_csk_ca(sk);
399 if (after(tp->snd_una, ca->end_seq))
406 if ((s32)(now - ca->last_ack) <= hystart_ack_delta_us) {
407 ca->last_ack = now;
409 threshold = ca->delay_min + hystart_ack_delay(sk);
412 * ca->delay_min/2.
419 if ((s32)(now - ca->round_start) > threshold) {
420 ca->found = 1;
422 now - ca->round_start, threshold,
423 ca->delay_min, hystart_ack_delay(sk), tp->snd_cwnd);
436 if (ca->curr_rtt > delay)
437 ca->curr_rtt = delay;
438 if (ca->sample_cnt < HYSTART_MIN_SAMPLES) {
439 ca->sample_cnt++;
441 if (ca->curr_rtt > ca->delay_min +
442 HYSTART_DELAY_THRESH(ca->delay_min >> 3)) {
443 ca->found = 1;
458 struct bictcp *ca = inet_csk_ca(sk);
466 if (ca->epoch_start && (s32)(tcp_jiffies32 - ca->epoch_start) < HZ)
474 if (ca->delay_min == 0 || ca->delay_min > delay)
475 ca->delay_min = delay;
478 if (!ca->found && tcp_in_slow_start(tp) && hystart &&