Lines Matching defs:hc
53 struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
54 enum ccid3_hc_tx_states oldstate = hc->tx_state;
60 hc->tx_state = state;
74 const struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
75 const __u32 w_init = clamp_t(__u32, 4380U, 2 * hc->tx_s, 4 * hc->tx_s);
77 return scaled_div(w_init << 6, hc->tx_rtt);
82 * @hc: socket to have the send interval updated
86 static void ccid3_update_send_interval(struct ccid3_hc_tx_sock *hc)
88 hc->tx_t_ipi = scaled_div32(((u64)hc->tx_s) << 6, hc->tx_x);
90 DCCP_BUG_ON(hc->tx_t_ipi == 0);
91 ccid3_pr_debug("t_ipi=%u, s=%u, X=%u\n", hc->tx_t_ipi,
92 hc->tx_s, (unsigned int)(hc->tx_x >> 6));
95 static u32 ccid3_hc_tx_idle_rtt(struct ccid3_hc_tx_sock *hc, ktime_t now)
97 u32 delta = ktime_us_delta(now, hc->tx_t_last_win_count);
99 return delta / hc->tx_rtt;
116 struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
117 __u64 min_rate = 2 * hc->tx_x_recv;
118 const __u64 old_x = hc->tx_x;
127 if (ccid3_hc_tx_idle_rtt(hc, now) >= 2) {
129 min_rate = max(min_rate, 2 * hc->tx_x_recv);
132 if (hc->tx_p > 0) {
134 hc->tx_x = min(((__u64)hc->tx_x_calc) << 6, min_rate);
135 hc->tx_x = max(hc->tx_x, (((__u64)hc->tx_s) << 6) / TFRC_T_MBI);
137 } else if (ktime_us_delta(now, hc->tx_t_ld) - (s64)hc->tx_rtt >= 0) {
139 hc->tx_x = min(2 * hc->tx_x, min_rate);
140 hc->tx_x = max(hc->tx_x,
141 scaled_div(((__u64)hc->tx_s) << 6, hc->tx_rtt));
142 hc->tx_t_ld = now;
145 if (hc->tx_x != old_x) {
148 (unsigned int)(hc->tx_x >> 6), hc->tx_x_calc,
149 (unsigned int)(hc->tx_x_recv >> 6));
151 ccid3_update_send_interval(hc);
157 * @hc: socket to be updated
162 static inline void ccid3_hc_tx_update_s(struct ccid3_hc_tx_sock *hc, int len)
164 const u16 old_s = hc->tx_s;
166 hc->tx_s = tfrc_ewma(hc->tx_s, len, 9);
168 if (hc->tx_s != old_s)
169 ccid3_update_send_interval(hc);
176 static inline void ccid3_hc_tx_update_win_count(struct ccid3_hc_tx_sock *hc,
179 u32 delta = ktime_us_delta(now, hc->tx_t_last_win_count),
180 quarter_rtts = (4 * delta) / hc->tx_rtt;
183 hc->tx_t_last_win_count = now;
184 hc->tx_last_win_count += min(quarter_rtts, 5U);
185 hc->tx_last_win_count &= 0xF; /* mod 16 */
191 struct ccid3_hc_tx_sock *hc = from_timer(hc, t, tx_no_feedback_timer);
192 struct sock *sk = hc->sk;
203 ccid3_tx_state_name(hc->tx_state));
210 if (hc->tx_state == TFRC_SSTATE_FBACK)
217 if (hc->tx_t_rto == 0 || hc->tx_p == 0) {
220 hc->tx_x = max(hc->tx_x / 2,
221 (((__u64)hc->tx_s) << 6) / TFRC_T_MBI);
222 ccid3_update_send_interval(hc);
234 if (hc->tx_x_calc > (hc->tx_x_recv >> 5))
235 hc->tx_x_recv =
236 max(hc->tx_x_recv / 2,
237 (((__u64)hc->tx_s) << 6) / (2*TFRC_T_MBI));
239 hc->tx_x_recv = hc->tx_x_calc;
240 hc->tx_x_recv <<= 4;
245 (unsigned long long)hc->tx_x);
251 if (unlikely(hc->tx_t_rto == 0)) /* no feedback received yet */
254 t_nfb = max(hc->tx_t_rto, 2 * hc->tx_t_ipi);
257 sk_reset_timer(sk, &hc->tx_no_feedback_timer,
275 struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
287 if (hc->tx_state == TFRC_SSTATE_NO_SENT) {
288 sk_reset_timer(sk, &hc->tx_no_feedback_timer, (jiffies +
290 hc->tx_last_win_count = 0;
291 hc->tx_t_last_win_count = now;
294 hc->tx_t_nom = now;
296 hc->tx_s = skb->len;
305 hc->tx_rtt = dp->dccps_syn_rtt;
306 hc->tx_x = rfc3390_initial_rate(sk);
307 hc->tx_t_ld = now;
315 hc->tx_rtt = DCCP_FALLBACK_RTT;
316 hc->tx_x = hc->tx_s;
317 hc->tx_x <<= 6;
319 ccid3_update_send_interval(hc);
324 delay = ktime_us_delta(hc->tx_t_nom, now);
337 ccid3_hc_tx_update_win_count(hc, now);
342 DCCP_SKB_CB(skb)->dccpd_ccval = hc->tx_last_win_count;
345 hc->tx_t_nom = ktime_add_us(hc->tx_t_nom, hc->tx_t_ipi);
351 struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
353 ccid3_hc_tx_update_s(hc, len);
355 if (tfrc_tx_hist_add(&hc->tx_hist, dccp_sk(sk)->dccps_gss))
361 struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
379 acked = tfrc_tx_hist_find_entry(hc->tx_hist, dccp_hdr_ack_seq(skb));
388 hc->tx_rtt = tfrc_ewma(hc->tx_rtt, r_sample, 9);
393 if (hc->tx_state == TFRC_SSTATE_NO_FBACK) {
396 if (hc->tx_t_rto == 0) {
400 hc->tx_x = rfc3390_initial_rate(sk);
401 hc->tx_t_ld = now;
403 ccid3_update_send_interval(hc);
406 } else if (hc->tx_p == 0) {
415 if (hc->tx_p > 0)
416 hc->tx_x_calc = tfrc_calc_x(hc->tx_s, hc->tx_rtt, hc->tx_p);
422 dccp_role(sk), sk, hc->tx_rtt, r_sample,
423 hc->tx_s, hc->tx_p, hc->tx_x_calc,
424 (unsigned int)(hc->tx_x_recv >> 6),
425 (unsigned int)(hc->tx_x >> 6));
428 sk_stop_timer(sk, &hc->tx_no_feedback_timer);
441 hc->tx_t_rto = max_t(u32, 4 * hc->tx_rtt,
447 t_nfb = max(hc->tx_t_rto, 2 * hc->tx_t_ipi);
453 sk_reset_timer(sk, &hc->tx_no_feedback_timer,
460 struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
478 hc->tx_x_recv = opt_val;
479 hc->tx_x_recv <<= 6;
485 hc->tx_p = tfrc_invert_loss_event_rate(opt_val);
496 struct ccid3_hc_tx_sock *hc = ccid_priv(ccid);
498 hc->tx_state = TFRC_SSTATE_NO_SENT;
499 hc->tx_hist = NULL;
500 hc->sk = sk;
501 timer_setup(&hc->tx_no_feedback_timer,
508 struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
510 sk_stop_timer(sk, &hc->tx_no_feedback_timer);
511 tfrc_tx_hist_purge(&hc->tx_hist);
523 const struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
532 tfrc.tfrctx_x = hc->tx_x;
533 tfrc.tfrctx_x_recv = hc->tx_x_recv;
534 tfrc.tfrctx_x_calc = hc->tx_x_calc;
535 tfrc.tfrctx_rtt = hc->tx_rtt;
536 tfrc.tfrctx_p = hc->tx_p;
537 tfrc.tfrctx_rto = hc->tx_t_rto;
538 tfrc.tfrctx_ipi = hc->tx_t_ipi;
579 struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
580 enum ccid3_hc_rx_states oldstate = hc->rx_state;
586 hc->rx_state = state;
593 struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
600 hc->rx_x_recv = 0;
601 hc->rx_pinv = ~0U; /* see RFC 4342, 8.5 */
614 if (hc->rx_x_recv > 0)
618 delta = ktime_us_delta(now, hc->rx_tstamp_last_feedback);
621 hc->rx_x_recv = scaled_div32(hc->rx_bytes_recv, delta);
628 hc->rx_x_recv, hc->rx_pinv);
630 hc->rx_tstamp_last_feedback = now;
631 hc->rx_last_counter = dccp_hdr(skb)->dccph_ccval;
632 hc->rx_bytes_recv = 0;
640 const struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
649 x_recv = htonl(hc->rx_x_recv);
650 pinv = htonl(hc->rx_pinv);
674 struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
679 if (hc->rx_rtt == 0) {
681 hc->rx_rtt = DCCP_FALLBACK_RTT;
684 delta = ktime_us_delta(ktime_get(), hc->rx_tstamp_last_feedback);
687 x_recv = scaled_div32(hc->rx_bytes_recv, delta);
690 if (hc->rx_x_recv == 0) {
694 x_recv = hc->rx_x_recv;
697 fval = scaled_div(hc->rx_s, hc->rx_rtt);
709 struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
714 if (unlikely(hc->rx_state == TFRC_RSTATE_NO_DATA)) {
719 hc->rx_s = payload;
729 if (tfrc_rx_hist_duplicate(&hc->rx_hist, skb))
737 hc->rx_s = tfrc_ewma(hc->rx_s, payload, 9);
738 hc->rx_bytes_recv += payload;
744 if (tfrc_rx_handle_loss(&hc->rx_hist, &hc->rx_li_hist,
750 if (tfrc_rx_hist_loss_pending(&hc->rx_hist))
759 if (!tfrc_lh_is_initialised(&hc->rx_li_hist)) {
760 const u32 sample = tfrc_rx_hist_sample_rtt(&hc->rx_hist, skb);
767 hc->rx_rtt = tfrc_ewma(hc->rx_rtt, sample, 9);
769 } else if (tfrc_lh_update_i_mean(&hc->rx_li_hist, skb)) {
780 if (SUB16(dccp_hdr(skb)->dccph_ccval, hc->rx_last_counter) > 3)
784 tfrc_rx_hist_add_packet(&hc->rx_hist, skb, ndp);
793 struct ccid3_hc_rx_sock *hc = ccid_priv(ccid);
795 hc->rx_state = TFRC_RSTATE_NO_DATA;
796 tfrc_lh_init(&hc->rx_li_hist);
797 return tfrc_rx_hist_alloc(&hc->rx_hist);
802 struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
804 tfrc_rx_hist_purge(&hc->rx_hist);
805 tfrc_lh_cleanup(&hc->rx_li_hist);
818 const struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
826 rx_info.tfrcrx_x_recv = hc->rx_x_recv;
827 rx_info.tfrcrx_rtt = hc->rx_rtt;
828 rx_info.tfrcrx_p = tfrc_invert_loss_event_rate(hc->rx_pinv);