/kernel/linux/linux-6.6/net/ipv4/ |
H A D | bpf_tcp_ca.c | 85 case offsetof(struct sock, sk_pacing_rate): in bpf_tcp_ca_btf_struct_access() 86 end = offsetofend(struct sock, sk_pacing_rate); in bpf_tcp_ca_btf_struct_access()
|
H A D | tcp_bbr.c | 281 sk->sk_pacing_rate = bbr_bw_to_pacing_rate(sk, bw, bbr_high_gain); in bbr_init_pacing_rate_from_rtt() 293 if (bbr_full_bw_reached(sk) || rate > sk->sk_pacing_rate) in bbr_set_pacing_rate() 294 sk->sk_pacing_rate = rate; in bbr_set_pacing_rate() 300 return sk->sk_pacing_rate < (bbr_min_tso_rate >> 3) ? 1 : 2; in bbr_min_tso_segs() 312 sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift), in bbr_tso_segs_goal()
|
H A D | tcp_cubic.c | 379 rate = READ_ONCE(sk->sk_pacing_rate); in hystart_ack_delay()
|
H A D | tcp_output.c | 1210 unsigned long rate = sk->sk_pacing_rate; in tcp_update_skb_after_send() 1982 bytes = sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift); in tcp_tso_autosize() 2575 sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift)); in tcp_small_queue_check() 2583 u64 extra_bytes = (u64)sk->sk_pacing_rate * tcp_sk(sk)->tcp_tx_delay; in tcp_small_queue_check()
|
H A D | tcp.c | 3742 rate = READ_ONCE(sk->sk_pacing_rate); in tcp_get_info() 3919 rate = READ_ONCE(sk->sk_pacing_rate); in tcp_get_timestamping_opt_stats()
|
/kernel/linux/linux-5.10/net/ipv4/ |
H A D | tcp_bbr.c | 279 sk->sk_pacing_rate = bbr_bw_to_pacing_rate(sk, bw, bbr_high_gain); in bbr_init_pacing_rate_from_rtt() 291 if (bbr_full_bw_reached(sk) || rate > sk->sk_pacing_rate) in bbr_set_pacing_rate() 292 sk->sk_pacing_rate = rate; in bbr_set_pacing_rate() 298 return sk->sk_pacing_rate < (bbr_min_tso_rate >> 3) ? 1 : 2; in bbr_min_tso_segs() 310 sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift), in bbr_tso_segs_goal()
|
H A D | tcp_cubic.c | 386 rate = READ_ONCE(sk->sk_pacing_rate); in hystart_ack_delay()
|
H A D | tcp_output.c | 1205 unsigned long rate = sk->sk_pacing_rate; in tcp_update_skb_after_send() 1970 sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift), in tcp_tso_autosize() 2517 sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift)); in tcp_small_queue_check() 2525 u64 extra_bytes = (u64)sk->sk_pacing_rate * tcp_sk(sk)->tcp_tx_delay; in tcp_small_queue_check()
|
H A D | tcp.c | 3577 rate = READ_ONCE(sk->sk_pacing_rate); in tcp_get_info() 3735 rate = READ_ONCE(sk->sk_pacing_rate); in tcp_get_timestamping_opt_stats()
|
H A D | tcp_input.c | 908 /* set sk_pacing_rate to 200 % of current rate (mss * cwnd / srtt) */ in tcp_update_pacing_rate() 912 * In Slow Start [1], set sk_pacing_rate to 200 % the current rate. in tcp_update_pacing_rate() 929 /* WRITE_ONCE() is needed because sch_fq fetches sk_pacing_rate in tcp_update_pacing_rate() 933 WRITE_ONCE(sk->sk_pacing_rate, min_t(u64, rate, in tcp_update_pacing_rate()
|
/kernel/linux/linux-6.6/tools/testing/selftests/bpf/progs/ |
H A D | tcp_ca_write_sk_pacing.c | 48 sk->sk_pacing_rate = min(rate, sk->sk_max_pacing_rate); in BPF_PROG()
|
H A D | bpf_cubic.c | 440 rate = sk->sk_pacing_rate; in hystart_ack_delay()
|
/kernel/linux/linux-5.10/tools/testing/selftests/bpf/ |
H A D | bpf_tcp_helpers.h | 30 unsigned long sk_pacing_rate; member
|
/kernel/linux/linux-6.6/include/trace/events/ |
H A D | mptcp.h | 47 __entry->pace = ssk->sk_pacing_rate;
|
/kernel/linux/linux-6.6/tools/testing/selftests/bpf/ |
H A D | bpf_tcp_helpers.h | 39 unsigned long sk_pacing_rate; member
|
/kernel/linux/linux-5.10/tools/testing/selftests/bpf/progs/ |
H A D | bpf_cubic.c | 446 rate = sk->sk_pacing_rate; in hystart_ack_delay()
|
/kernel/linux/linux-5.10/net/core/ |
H A D | sock.c | 1189 sk->sk_pacing_rate = min(sk->sk_pacing_rate, ulval); in sock_setsockopt() 3053 sk->sk_pacing_rate = ~0UL; in sock_init_data_uid()
|
H A D | filter.c | 4832 sk->sk_pacing_rate = min(sk->sk_pacing_rate, in _bpf_setsockopt()
|
/kernel/linux/linux-6.6/net/core/ |
H A D | sock.c | 1449 sk->sk_pacing_rate = min(sk->sk_pacing_rate, ulval); in sk_setsockopt() 3486 sk->sk_pacing_rate = ~0UL; in sock_init_data_uid()
|
/kernel/linux/linux-5.10/net/sched/ |
H A D | sch_fq.c | 17 * Transport (eg TCP) can set in sk->sk_pacing_rate a rate, enqueue a 275 * they do not contain sk_pacing_rate in fq_classify() 610 rate = min(skb->sk->sk_pacing_rate, rate); in fq_dequeue()
|
/kernel/linux/linux-6.6/net/sched/ |
H A D | sch_fq.c | 17 * Transport (eg TCP) can set in sk->sk_pacing_rate a rate, enqueue a 275 * they do not contain sk_pacing_rate in fq_classify() 610 rate = min(skb->sk->sk_pacing_rate, rate); in fq_dequeue()
|
/kernel/linux/linux-6.6/net/mptcp/ |
H A D | protocol.c | 1445 subflow->avg_pacing_rate = READ_ONCE(ssk->sk_pacing_rate); in mptcp_subflow_get_send() 1485 READ_ONCE(ssk->sk_pacing_rate) * burst, in mptcp_subflow_get_send()
|
/kernel/linux/linux-5.10/include/net/ |
H A D | sock.h | 280 * @sk_pacing_rate: Pacing rate (if supported by transport/packet scheduler) 457 unsigned long sk_pacing_rate; /* bytes per second */ member
|
/kernel/linux/linux-5.10/net/mptcp/ |
H A D | protocol.c | 1136 pace = READ_ONCE(ssk->sk_pacing_rate); in mptcp_subflow_get_send()
|
/kernel/linux/linux-6.6/include/net/ |
H A D | sock.h | 276 * @sk_pacing_rate: Pacing rate (if supported by transport/packet scheduler) 466 unsigned long sk_pacing_rate; /* bytes per second */ member
|