/kernel/linux/linux-5.10/net/ipv4/ |
H A D | tcp_timer.c | 553 int mib_idx = 0; in tcp_retransmit_timer() local 557 mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL; in tcp_retransmit_timer() 559 mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL; in tcp_retransmit_timer() 561 mib_idx = LINUX_MIB_TCPLOSSFAILURES; in tcp_retransmit_timer() 565 mib_idx = LINUX_MIB_TCPSACKFAILURES; in tcp_retransmit_timer() 567 mib_idx = LINUX_MIB_TCPRENOFAILURES; in tcp_retransmit_timer() 569 if (mib_idx) in tcp_retransmit_timer() 570 __NET_INC_STATS(sock_net(sk), mib_idx); in tcp_retransmit_timer() local
|
H A D | tcp_input.c | 1853 int mib_idx; in tcp_sacktag_write_queue() local 1857 mib_idx = LINUX_MIB_TCPDSACKIGNOREDNOUNDO; in tcp_sacktag_write_queue() 1859 mib_idx = LINUX_MIB_TCPDSACKIGNOREDOLD; in tcp_sacktag_write_queue() 1865 mib_idx = LINUX_MIB_TCPSACKDISCARD; in tcp_sacktag_write_queue() 1868 NET_INC_STATS(sock_net(sk), mib_idx); in tcp_sacktag_write_queue() local 2537 int mib_idx; in tcp_try_undo_recovery() local 2545 mib_idx = LINUX_MIB_TCPLOSSUNDO; in tcp_try_undo_recovery() 2547 mib_idx = LINUX_MIB_TCPFULLUNDO; in tcp_try_undo_recovery() 2549 NET_INC_STATS(sock_net(sk), mib_idx); in tcp_try_undo_recovery() local 2789 int mib_idx; in tcp_enter_recovery() local 2796 NET_INC_STATS(sock_net(sk), mib_idx); tcp_enter_recovery() local 3576 __tcp_oow_rate_limited(struct net *net, int mib_idx, u32 *last_oow_ack_time) __tcp_oow_rate_limited() argument 3607 tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb, int mib_idx, u32 *last_oow_ack_time) tcp_oow_rate_limited() argument 4433 int mib_idx; tcp_dsack_set() local 4440 NET_INC_STATS(sock_net(sk), mib_idx); tcp_dsack_set() local [all...] |
H A D | tcp_minisocks.c | 44 const struct sk_buff *skb, int mib_idx) in tcp_timewait_check_oow_rate_limit() 48 if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx, in tcp_timewait_check_oow_rate_limit() 43 tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw, const struct sk_buff *skb, int mib_idx) tcp_timewait_check_oow_rate_limit() argument
|
H A D | tcp_output.c | 3324 int mib_idx; in tcp_xmit_retransmit_queue() local 3361 mib_idx = LINUX_MIB_TCPFASTRETRANS; in tcp_xmit_retransmit_queue() 3363 mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS; in tcp_xmit_retransmit_queue() 3375 NET_ADD_STATS(sock_net(sk), mib_idx, tcp_skb_pcount(skb)); in tcp_xmit_retransmit_queue() local
|
/kernel/linux/linux-6.6/net/ipv4/ |
H A D | tcp_timer.c | 556 int mib_idx = 0; in tcp_retransmit_timer() local 560 mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL; in tcp_retransmit_timer() 562 mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL; in tcp_retransmit_timer() 564 mib_idx = LINUX_MIB_TCPLOSSFAILURES; in tcp_retransmit_timer() 568 mib_idx = LINUX_MIB_TCPSACKFAILURES; in tcp_retransmit_timer() 570 mib_idx = LINUX_MIB_TCPRENOFAILURES; in tcp_retransmit_timer() 572 if (mib_idx) in tcp_retransmit_timer() 573 __NET_INC_STATS(sock_net(sk), mib_idx); in tcp_retransmit_timer() local
|
H A D | tcp_input.c | 1872 int mib_idx; in tcp_sacktag_write_queue() local 1876 mib_idx = LINUX_MIB_TCPDSACKIGNOREDNOUNDO; in tcp_sacktag_write_queue() 1878 mib_idx = LINUX_MIB_TCPDSACKIGNOREDOLD; in tcp_sacktag_write_queue() 1884 mib_idx = LINUX_MIB_TCPSACKDISCARD; in tcp_sacktag_write_queue() 1887 NET_INC_STATS(sock_net(sk), mib_idx); in tcp_sacktag_write_queue() local 2556 int mib_idx; in tcp_try_undo_recovery() local 2564 mib_idx = LINUX_MIB_TCPLOSSUNDO; in tcp_try_undo_recovery() 2566 mib_idx = LINUX_MIB_TCPFULLUNDO; in tcp_try_undo_recovery() 2568 NET_INC_STATS(sock_net(sk), mib_idx); in tcp_try_undo_recovery() local 2822 int mib_idx; in tcp_enter_recovery() local 2829 NET_INC_STATS(sock_net(sk), mib_idx); tcp_enter_recovery() local 3608 __tcp_oow_rate_limited(struct net *net, int mib_idx, u32 *last_oow_ack_time) __tcp_oow_rate_limited() argument 3639 tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb, int mib_idx, u32 *last_oow_ack_time) tcp_oow_rate_limited() argument 4478 int mib_idx; tcp_dsack_set() local 4485 NET_INC_STATS(sock_net(sk), mib_idx); tcp_dsack_set() local [all...] |
H A D | tcp_minisocks.c | 37 const struct sk_buff *skb, int mib_idx) in tcp_timewait_check_oow_rate_limit() 41 if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx, in tcp_timewait_check_oow_rate_limit() 36 tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw, const struct sk_buff *skb, int mib_idx) tcp_timewait_check_oow_rate_limit() argument
|
H A D | tcp_output.c | 3414 int mib_idx; in tcp_xmit_retransmit_queue() local 3451 mib_idx = LINUX_MIB_TCPFASTRETRANS; in tcp_xmit_retransmit_queue() 3453 mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS; in tcp_xmit_retransmit_queue() 3465 NET_ADD_STATS(sock_net(sk), mib_idx, tcp_skb_pcount(skb)); in tcp_xmit_retransmit_queue() local
|
/kernel/linux/common_modules/newip/third_party/linux-5.10/net/newip/ |
H A D | tcp_nip_input.c | 1572 static bool __tcp_nip_oow_rate_limited(struct net *net, int mib_idx, u32 *last_oow_ack_time) in __tcp_nip_oow_rate_limited() argument 1579 NET_INC_STATS(net, mib_idx); in __tcp_nip_oow_rate_limited()
|
/kernel/linux/linux-5.10/include/net/ |
H A D | tcp.h | 1560 int mib_idx, u32 *last_oow_ack_time);
|
/kernel/linux/linux-6.6/include/net/ |
H A D | tcp.h | 1642 int mib_idx, u32 *last_oow_ack_time);
|