Lines Matching defs:hc

53 	struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
54 enum ccid3_hc_tx_states oldstate = hc->tx_state;
60 hc->tx_state = state;
74 const struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
75 const __u32 w_init = clamp_t(__u32, 4380U, 2 * hc->tx_s, 4 * hc->tx_s);
77 return scaled_div(w_init << 6, hc->tx_rtt);
84 static void ccid3_update_send_interval(struct ccid3_hc_tx_sock *hc)
86 hc->tx_t_ipi = scaled_div32(((u64)hc->tx_s) << 6, hc->tx_x);
88 DCCP_BUG_ON(hc->tx_t_ipi == 0);
89 ccid3_pr_debug("t_ipi=%u, s=%u, X=%u\n", hc->tx_t_ipi,
90 hc->tx_s, (unsigned int)(hc->tx_x >> 6));
93 static u32 ccid3_hc_tx_idle_rtt(struct ccid3_hc_tx_sock *hc, ktime_t now)
95 u32 delta = ktime_us_delta(now, hc->tx_t_last_win_count);
97 return delta / hc->tx_rtt;
113 struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
114 __u64 min_rate = 2 * hc->tx_x_recv;
115 const __u64 old_x = hc->tx_x;
124 if (ccid3_hc_tx_idle_rtt(hc, now) >= 2) {
126 min_rate = max(min_rate, 2 * hc->tx_x_recv);
129 if (hc->tx_p > 0) {
131 hc->tx_x = min(((__u64)hc->tx_x_calc) << 6, min_rate);
132 hc->tx_x = max(hc->tx_x, (((__u64)hc->tx_s) << 6) / TFRC_T_MBI);
134 } else if (ktime_us_delta(now, hc->tx_t_ld) - (s64)hc->tx_rtt >= 0) {
136 hc->tx_x = min(2 * hc->tx_x, min_rate);
137 hc->tx_x = max(hc->tx_x,
138 scaled_div(((__u64)hc->tx_s) << 6, hc->tx_rtt));
139 hc->tx_t_ld = now;
142 if (hc->tx_x != old_x) {
145 (unsigned int)(hc->tx_x >> 6), hc->tx_x_calc,
146 (unsigned int)(hc->tx_x_recv >> 6));
148 ccid3_update_send_interval(hc);
158 static inline void ccid3_hc_tx_update_s(struct ccid3_hc_tx_sock *hc, int len)
160 const u16 old_s = hc->tx_s;
162 hc->tx_s = tfrc_ewma(hc->tx_s, len, 9);
164 if (hc->tx_s != old_s)
165 ccid3_update_send_interval(hc);
172 static inline void ccid3_hc_tx_update_win_count(struct ccid3_hc_tx_sock *hc,
175 u32 delta = ktime_us_delta(now, hc->tx_t_last_win_count),
176 quarter_rtts = (4 * delta) / hc->tx_rtt;
179 hc->tx_t_last_win_count = now;
180 hc->tx_last_win_count += min(quarter_rtts, 5U);
181 hc->tx_last_win_count &= 0xF; /* mod 16 */
187 struct ccid3_hc_tx_sock *hc = from_timer(hc, t, tx_no_feedback_timer);
188 struct sock *sk = hc->sk;
199 ccid3_tx_state_name(hc->tx_state));
206 if (hc->tx_state == TFRC_SSTATE_FBACK)
213 if (hc->tx_t_rto == 0 || hc->tx_p == 0) {
216 hc->tx_x = max(hc->tx_x / 2,
217 (((__u64)hc->tx_s) << 6) / TFRC_T_MBI);
218 ccid3_update_send_interval(hc);
230 if (hc->tx_x_calc > (hc->tx_x_recv >> 5))
231 hc->tx_x_recv =
232 max(hc->tx_x_recv / 2,
233 (((__u64)hc->tx_s) << 6) / (2*TFRC_T_MBI));
235 hc->tx_x_recv = hc->tx_x_calc;
236 hc->tx_x_recv <<= 4;
241 (unsigned long long)hc->tx_x);
247 if (unlikely(hc->tx_t_rto == 0)) /* no feedback received yet */
250 t_nfb = max(hc->tx_t_rto, 2 * hc->tx_t_ipi);
253 sk_reset_timer(sk, &hc->tx_no_feedback_timer,
270 struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
282 if (hc->tx_state == TFRC_SSTATE_NO_SENT) {
283 sk_reset_timer(sk, &hc->tx_no_feedback_timer, (jiffies +
285 hc->tx_last_win_count = 0;
286 hc->tx_t_last_win_count = now;
289 hc->tx_t_nom = now;
291 hc->tx_s = skb->len;
300 hc->tx_rtt = dp->dccps_syn_rtt;
301 hc->tx_x = rfc3390_initial_rate(sk);
302 hc->tx_t_ld = now;
310 hc->tx_rtt = DCCP_FALLBACK_RTT;
311 hc->tx_x = hc->tx_s;
312 hc->tx_x <<= 6;
314 ccid3_update_send_interval(hc);
319 delay = ktime_us_delta(hc->tx_t_nom, now);
332 ccid3_hc_tx_update_win_count(hc, now);
337 DCCP_SKB_CB(skb)->dccpd_ccval = hc->tx_last_win_count;
340 hc->tx_t_nom = ktime_add_us(hc->tx_t_nom, hc->tx_t_ipi);
346 struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
348 ccid3_hc_tx_update_s(hc, len);
350 if (tfrc_tx_hist_add(&hc->tx_hist, dccp_sk(sk)->dccps_gss))
356 struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
374 acked = tfrc_tx_hist_find_entry(hc->tx_hist, dccp_hdr_ack_seq(skb));
383 hc->tx_rtt = tfrc_ewma(hc->tx_rtt, r_sample, 9);
388 if (hc->tx_state == TFRC_SSTATE_NO_FBACK) {
391 if (hc->tx_t_rto == 0) {
395 hc->tx_x = rfc3390_initial_rate(sk);
396 hc->tx_t_ld = now;
398 ccid3_update_send_interval(hc);
401 } else if (hc->tx_p == 0) {
410 if (hc->tx_p > 0)
411 hc->tx_x_calc = tfrc_calc_x(hc->tx_s, hc->tx_rtt, hc->tx_p);
417 dccp_role(sk), sk, hc->tx_rtt, r_sample,
418 hc->tx_s, hc->tx_p, hc->tx_x_calc,
419 (unsigned int)(hc->tx_x_recv >> 6),
420 (unsigned int)(hc->tx_x >> 6));
423 sk_stop_timer(sk, &hc->tx_no_feedback_timer);
436 hc->tx_t_rto = max_t(u32, 4 * hc->tx_rtt,
442 t_nfb = max(hc->tx_t_rto, 2 * hc->tx_t_ipi);
448 sk_reset_timer(sk, &hc->tx_no_feedback_timer,
455 struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
473 hc->tx_x_recv = opt_val;
474 hc->tx_x_recv <<= 6;
480 hc->tx_p = tfrc_invert_loss_event_rate(opt_val);
491 struct ccid3_hc_tx_sock *hc = ccid_priv(ccid);
493 hc->tx_state = TFRC_SSTATE_NO_SENT;
494 hc->tx_hist = NULL;
495 hc->sk = sk;
496 timer_setup(&hc->tx_no_feedback_timer,
503 struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
505 sk_stop_timer(sk, &hc->tx_no_feedback_timer);
506 tfrc_tx_hist_purge(&hc->tx_hist);
518 const struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
527 tfrc.tfrctx_x = hc->tx_x;
528 tfrc.tfrctx_x_recv = hc->tx_x_recv;
529 tfrc.tfrctx_x_calc = hc->tx_x_calc;
530 tfrc.tfrctx_rtt = hc->tx_rtt;
531 tfrc.tfrctx_p = hc->tx_p;
532 tfrc.tfrctx_rto = hc->tx_t_rto;
533 tfrc.tfrctx_ipi = hc->tx_t_ipi;
574 struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
575 enum ccid3_hc_rx_states oldstate = hc->rx_state;
581 hc->rx_state = state;
588 struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
595 hc->rx_x_recv = 0;
596 hc->rx_pinv = ~0U; /* see RFC 4342, 8.5 */
609 if (hc->rx_x_recv > 0)
613 delta = ktime_us_delta(now, hc->rx_tstamp_last_feedback);
616 hc->rx_x_recv = scaled_div32(hc->rx_bytes_recv, delta);
623 hc->rx_x_recv, hc->rx_pinv);
625 hc->rx_tstamp_last_feedback = now;
626 hc->rx_last_counter = dccp_hdr(skb)->dccph_ccval;
627 hc->rx_bytes_recv = 0;
635 const struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
644 x_recv = htonl(hc->rx_x_recv);
645 pinv = htonl(hc->rx_pinv);
668 struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
673 if (hc->rx_rtt == 0) {
675 hc->rx_rtt = DCCP_FALLBACK_RTT;
678 delta = ktime_us_delta(ktime_get(), hc->rx_tstamp_last_feedback);
681 x_recv = scaled_div32(hc->rx_bytes_recv, delta);
684 if (hc->rx_x_recv == 0) {
688 x_recv = hc->rx_x_recv;
691 fval = scaled_div(hc->rx_s, hc->rx_rtt);
703 struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
708 if (unlikely(hc->rx_state == TFRC_RSTATE_NO_DATA)) {
713 hc->rx_s = payload;
723 if (tfrc_rx_hist_duplicate(&hc->rx_hist, skb))
731 hc->rx_s = tfrc_ewma(hc->rx_s, payload, 9);
732 hc->rx_bytes_recv += payload;
738 if (tfrc_rx_handle_loss(&hc->rx_hist, &hc->rx_li_hist,
744 if (tfrc_rx_hist_loss_pending(&hc->rx_hist))
753 if (!tfrc_lh_is_initialised(&hc->rx_li_hist)) {
754 const u32 sample = tfrc_rx_hist_sample_rtt(&hc->rx_hist, skb);
761 hc->rx_rtt = tfrc_ewma(hc->rx_rtt, sample, 9);
763 } else if (tfrc_lh_update_i_mean(&hc->rx_li_hist, skb)) {
774 if (SUB16(dccp_hdr(skb)->dccph_ccval, hc->rx_last_counter) > 3)
778 tfrc_rx_hist_add_packet(&hc->rx_hist, skb, ndp);
787 struct ccid3_hc_rx_sock *hc = ccid_priv(ccid);
789 hc->rx_state = TFRC_RSTATE_NO_DATA;
790 tfrc_lh_init(&hc->rx_li_hist);
791 return tfrc_rx_hist_alloc(&hc->rx_hist);
796 struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
798 tfrc_rx_hist_purge(&hc->rx_hist);
799 tfrc_lh_cleanup(&hc->rx_li_hist);
812 const struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
820 rx_info.tfrcrx_x_recv = hc->rx_x_recv;
821 rx_info.tfrcrx_rtt = hc->rx_rtt;
822 rx_info.tfrcrx_p = tfrc_invert_loss_event_rate(hc->rx_pinv);