/kernel/linux/common_modules/newip/third_party/linux-5.10/net/newip/ |
H A D | tcp_nip_input.c | 207 u32 diff = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(cur)->seq; in tcp_nip_left_overlap() 217 u32 diff = TCP_SKB_CB(cur)->end_seq - TCP_SKB_CB(skb)->seq; in tcp_nip_right_overlap() 226 tcb->end_seq -= diff; in tcp_nip_right_overlap() 261 if (TCP_SKB_CB(from)->seq != TCP_SKB_CB(to)->end_seq) in tcp_nip_try_coalesce() 272 TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq; in tcp_nip_try_coalesce() 330 if (unlikely(!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))) { in tcp_nip_ofo_queue() 338 tcp_nip_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq); in tcp_nip_ofo_queue() 369 u32 end_seq; in tcp_nip_data_queue_ofo() local 388 end_seq in tcp_nip_data_queue_ofo() 1680 tcp_nip_sequence(const struct tcp_sock *tp, u32 seq, u32 end_seq) tcp_nip_sequence() argument [all...] |
H A D | tcp_nip_output.c | 136 WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(skb)->end_seq); in tcp_nip_event_new_data_sent() 358 TCP_SKB_CB(skb)->end_seq = seq; in tcp_nip_init_nondata_skb() 369 tcb->end_seq += skb->len; in tcp_nip_connect_queue_skb() 374 WRITE_ONCE(tp->write_seq, tcb->end_seq); in tcp_nip_connect_queue_skb() 505 TCP_SKB_CB(skb)->tx.in_flight = TCP_SKB_CB(skb)->end_seq in __tcp_nip_transmit_skb() 614 tp->write_seq = TCP_SKB_CB(skb)->end_seq; in tcp_nip_queue_skb() 1000 TCP_SKB_CB(tskb)->end_seq++; in tcp_nip_send_fin() 1045 u32 end_seq = TCP_SKB_CB(skb)->end_seq; in tcp_nip_snd_wnd_test() local 1048 end_seq in tcp_nip_snd_wnd_test() [all...] |
/kernel/linux/linux-6.6/net/mptcp/ |
H A D | fastopen.c | 47 MPTCP_SKB_CB(skb)->end_seq = 0; in mptcp_fastopen_subflow_synack_set_params() 70 WARN_ON_ONCE(MPTCP_SKB_CB(skb)->end_seq); in __mptcp_fastopen_gen_msk_ackseq() 71 pr_debug("msk %p moving seq %llx -> %llx end_seq %llx -> %llx", sk, in __mptcp_fastopen_gen_msk_ackseq() 73 MPTCP_SKB_CB(skb)->end_seq, MPTCP_SKB_CB(skb)->end_seq + msk->ack_seq); in __mptcp_fastopen_gen_msk_ackseq() 75 MPTCP_SKB_CB(skb)->end_seq += msk->ack_seq; in __mptcp_fastopen_gen_msk_ackseq()
|
H A D | protocol.c | 160 to->len, MPTCP_SKB_CB(from)->end_seq); in mptcp_try_coalesce() 161 MPTCP_SKB_CB(to)->end_seq = MPTCP_SKB_CB(from)->end_seq; in mptcp_try_coalesce() 177 if (MPTCP_SKB_CB(from)->map_seq != MPTCP_SKB_CB(to)->end_seq) in mptcp_ooo_try_coalesce() 229 u64 seq, end_seq, max_seq; in mptcp_data_queue_ofo() local 233 end_seq = MPTCP_SKB_CB(skb)->end_seq; in mptcp_data_queue_ofo() 238 if (after64(end_seq, max_seq)) { in mptcp_data_queue_ofo() 242 (unsigned long long)end_seq - (unsigned long)max_seq, in mptcp_data_queue_ofo() 267 if (!before64(seq, MPTCP_SKB_CB(msk->ooo_last_skb)->end_seq)) { in mptcp_data_queue_ofo() 734 u64 end_seq; __mptcp_ofo_queue() local [all...] |
/kernel/linux/linux-5.10/net/ipv4/ |
H A D | tcp_recovery.c | 83 tp->rack.end_seq, scb->end_seq)) in tcp_rack_detect_loss() 123 void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq, in tcp_rack_advance() argument 145 end_seq, tp->rack.end_seq)) { in tcp_rack_advance() 147 tp->rack.end_seq = end_seq; in tcp_rack_advance()
|
H A D | tcp_input.c | 342 if (TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) in tcp_ecn_accept_cwr() 689 if (TCP_SKB_CB(skb)->end_seq - in tcp_rcv_rtt_measure_ts() 997 u32 end_seq, struct tcp_sacktag_state *state) in tcp_dsack_seen() 1001 if (!before(start_seq, end_seq)) in tcp_dsack_seen() 1004 seq_len = end_seq - start_seq; in tcp_dsack_seen() 1179 * fact that though start_seq (s) is before end_seq (i.e., not reversed), 1181 * happens when start_seq resides between end_seq wrap (e_w) and snd_nxt 1193 * similar case (end_seq after snd_nxt wrap) as earlier reversed check in 1211 u32 start_seq, u32 end_seq) in tcp_is_sackblock_valid() 1214 if (after(end_seq, t in tcp_is_sackblock_valid() 996 tcp_dsack_seen(struct tcp_sock *tp, u32 start_seq, u32 end_seq, struct tcp_sacktag_state *state) tcp_dsack_seen() argument 1210 tcp_is_sackblock_valid(struct tcp_sock *tp, bool is_dsack, u32 start_seq, u32 end_seq) tcp_is_sackblock_valid() argument 1294 tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, u32 start_seq, u32 end_seq) tcp_match_skb_to_sack() argument 1343 tcp_sacktag_one(struct sock *sk, struct tcp_sacktag_state *state, u8 sacked, u32 start_seq, u32 end_seq, int dup_sack, int pcount, u64 xmit_time) tcp_sacktag_one() argument 1436 u32 end_seq = start_seq + shifted; /* end of newly-SACKed */ tcp_shifted_skb() local 1542 tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, struct tcp_sacktag_state *state, u32 start_seq, u32 end_seq, bool dup_sack) tcp_shift_skb_data() argument 1680 tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk, struct tcp_sack_block *next_dup, struct tcp_sacktag_state *state, u32 start_seq, u32 end_seq, bool dup_sack_in) tcp_sacktag_walk() argument 1914 u32 end_seq = sp[i].end_seq; tcp_sacktag_write_queue() local 4298 tcp_sequence(const struct tcp_sock *tp, u32 seq, u32 end_seq) tcp_sequence() argument 4415 tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, u32 end_seq) tcp_sack_extend() argument 4428 tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq) tcp_dsack_set() argument 4448 tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq) tcp_dsack_extend() argument 4480 u32 end_seq = TCP_SKB_CB(skb)->end_seq; tcp_send_dupack() local 4548 tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq) tcp_sack_new_ofo_skb() argument 4780 u32 seq, end_seq; tcp_data_queue_ofo() local [all...] |
H A D | tcp_minisocks.c | 33 static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win) in tcp_in_window() argument 37 if (after(end_seq, s_win) && before(seq, e_win)) in tcp_in_window() 39 return seq == e_win && seq == end_seq; in tcp_in_window() 117 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, in tcp_timewait_state_process() 131 !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) || in tcp_timewait_state_process() 132 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) { in tcp_timewait_state_process() 141 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) in tcp_timewait_state_process() 146 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq; in tcp_timewait_state_process() 175 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) { in tcp_timewait_state_process() 714 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, in tcp_check_req() [all...] |
H A D | tcp_output.c | 73 WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(skb)->end_seq); in tcp_event_new_data_sent() 403 TCP_SKB_CB(skb)->end_seq = seq; in tcp_init_nondata_skb() 671 *ptr++ = htonl(sp[this_sack].end_seq); in tcp_options_write() 1259 TCP_SKB_CB(skb)->tx.in_flight = TCP_SKB_CB(skb)->end_seq in __tcp_transmit_skb() 1386 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) in __tcp_transmit_skb() 1435 WRITE_ONCE(tp->write_seq, TCP_SKB_CB(skb)->end_seq); in tcp_queue_skb() 1580 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; in tcp_fragment() 1581 TCP_SKB_CB(skb)->end_seq in tcp_fragment() 2102 u32 end_seq = TCP_SKB_CB(skb)->end_seq; tcp_snd_wnd_test() local [all...] |
H A D | tcp_rate.c | 92 scb->end_seq, rs->last_end_seq)) { in tcp_rate_skb_delivered() 97 rs->last_end_seq = scb->end_seq; in tcp_rate_skb_delivered()
|
H A D | tcp_illinois.c | 49 u32 end_seq; /* right edge of current RTT */ member 62 ca->end_seq = tp->snd_nxt; in rtt_reset() 265 if (after(ack, ca->end_seq)) in tcp_illinois_cong_avoid()
|
H A D | tcp_cubic.c | 100 u32 end_seq; /* end_seq of the round */ member 131 ca->end_seq = tp->snd_nxt; in bictcp_hystart_reset() 399 if (after(tp->snd_una, ca->end_seq)) in hystart_update()
|
H A D | tcp_fastopen.c | 190 if (TCP_SKB_CB(skb)->end_seq == tp->rcv_nxt) in tcp_fastopen_add_skb() 213 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; in tcp_fastopen_add_skb() 368 bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1; in tcp_try_fastopen()
|
/kernel/linux/linux-6.6/net/ipv4/ |
H A D | tcp_recovery.c | 78 tp->rack.end_seq, scb->end_seq)) in tcp_rack_detect_loss() 118 void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq, in tcp_rack_advance() argument 140 end_seq, tp->rack.end_seq)) { in tcp_rack_advance() 142 tp->rack.end_seq = end_seq; in tcp_rack_advance()
|
H A D | tcp_input.c | 350 if (TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) in tcp_ecn_accept_cwr() 705 if (TCP_SKB_CB(skb)->end_seq - in tcp_rcv_rtt_measure_ts() 1007 u32 end_seq, struct tcp_sacktag_state *state) in tcp_dsack_seen() 1011 if (!before(start_seq, end_seq)) in tcp_dsack_seen() 1014 seq_len = end_seq - start_seq; in tcp_dsack_seen() 1020 else if (tp->tlp_high_seq && tp->tlp_high_seq == end_seq) in tcp_dsack_seen() 1198 * fact that though start_seq (s) is before end_seq (i.e., not reversed), 1200 * happens when start_seq resides between end_seq wrap (e_w) and snd_nxt 1212 * similar case (end_seq after snd_nxt wrap) as earlier reversed check in 1230 u32 start_seq, u32 end_seq) in tcp_is_sackblock_valid() 1006 tcp_dsack_seen(struct tcp_sock *tp, u32 start_seq, u32 end_seq, struct tcp_sacktag_state *state) tcp_dsack_seen() argument 1229 tcp_is_sackblock_valid(struct tcp_sock *tp, bool is_dsack, u32 start_seq, u32 end_seq) tcp_is_sackblock_valid() argument 1313 tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, u32 start_seq, u32 end_seq) tcp_match_skb_to_sack() argument 1362 tcp_sacktag_one(struct sock *sk, struct tcp_sacktag_state *state, u8 sacked, u32 start_seq, u32 end_seq, int dup_sack, int pcount, u64 xmit_time) tcp_sacktag_one() argument 1455 u32 end_seq = start_seq + shifted; /* end of newly-SACKed */ tcp_shifted_skb() local 1561 tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, struct tcp_sacktag_state *state, u32 start_seq, u32 end_seq, bool dup_sack) tcp_shift_skb_data() argument 1699 tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk, struct tcp_sack_block *next_dup, struct tcp_sacktag_state *state, u32 start_seq, u32 end_seq, bool dup_sack_in) tcp_sacktag_walk() argument 1933 u32 end_seq = sp[i].end_seq; tcp_sacktag_write_queue() local 4332 tcp_sequence(const struct tcp_sock *tp, u32 seq, u32 end_seq) tcp_sequence() argument 4460 tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, u32 end_seq) tcp_sack_extend() argument 4473 tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq) tcp_dsack_set() argument 4493 tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq) tcp_dsack_extend() argument 4525 u32 end_seq = TCP_SKB_CB(skb)->end_seq; tcp_send_dupack() local 4593 tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq) tcp_sack_new_ofo_skb() argument 4826 u32 seq, end_seq; tcp_data_queue_ofo() local [all...] |
H A D | tcp_minisocks.c | 26 static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win) in tcp_in_window() argument 30 if (after(end_seq, s_win) && before(seq, e_win)) in tcp_in_window() 32 return seq == e_win && seq == end_seq; in tcp_in_window() 110 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, in tcp_timewait_state_process() 124 !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) || in tcp_timewait_state_process() 125 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) { in tcp_timewait_state_process() 134 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) in tcp_timewait_state_process() 139 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq; in tcp_timewait_state_process() 168 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) { in tcp_timewait_state_process() 739 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, in tcp_check_req() [all...] |
H A D | tcp_output.c | 71 WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(skb)->end_seq); in tcp_event_new_data_sent() 410 TCP_SKB_CB(skb)->end_seq = seq; in tcp_init_nondata_skb() 681 *ptr++ = htonl(sp[this_sack].end_seq); in tcp_options_write() 1397 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) in __tcp_transmit_skb() 1447 WRITE_ONCE(tp->write_seq, TCP_SKB_CB(skb)->end_seq); in tcp_queue_skb() 1591 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; in tcp_fragment() 1592 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; in tcp_fragment() 1618 if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) { in tcp_fragment() 2112 u32 end_seq = TCP_SKB_CB(skb)->end_seq; tcp_snd_wnd_test() local [all...] |
H A D | tcp_rate.c | 93 scb->end_seq, rs->last_end_seq)) { in tcp_rate_skb_delivered() 99 rs->last_end_seq = scb->end_seq; in tcp_rate_skb_delivered()
|
H A D | tcp_illinois.c | 49 u32 end_seq; /* right edge of current RTT */ member 62 ca->end_seq = tp->snd_nxt; in rtt_reset() 265 if (after(ack, ca->end_seq)) in tcp_illinois_cong_avoid()
|
/kernel/linux/linux-5.10/net/netfilter/ |
H A D | nf_conntrack_seqadj.c | 94 if (after(ntohl(sack->end_seq) - seq->offset_before, in nf_ct_sack_block_adjust() 96 new_end_seq = htonl(ntohl(sack->end_seq) - in nf_ct_sack_block_adjust() 99 new_end_seq = htonl(ntohl(sack->end_seq) - in nf_ct_sack_block_adjust() 102 pr_debug("sack_adjust: start_seq: %u->%u, end_seq: %u->%u\n", in nf_ct_sack_block_adjust() 104 ntohl(sack->end_seq), ntohl(new_end_seq)); in nf_ct_sack_block_adjust() 109 sack->end_seq, new_end_seq, false); in nf_ct_sack_block_adjust() 111 sack->end_seq = new_end_seq; in nf_ct_sack_block_adjust()
|
/kernel/linux/linux-6.6/net/netfilter/ |
H A D | nf_conntrack_seqadj.c | 94 if (after(ntohl(sack->end_seq) - seq->offset_before, in nf_ct_sack_block_adjust() 96 new_end_seq = htonl(ntohl(sack->end_seq) - in nf_ct_sack_block_adjust() 99 new_end_seq = htonl(ntohl(sack->end_seq) - in nf_ct_sack_block_adjust() 102 pr_debug("sack_adjust: start_seq: %u->%u, end_seq: %u->%u\n", in nf_ct_sack_block_adjust() 104 ntohl(sack->end_seq), ntohl(new_end_seq)); in nf_ct_sack_block_adjust() 109 sack->end_seq, new_end_seq, false); in nf_ct_sack_block_adjust() 111 sack->end_seq = new_end_seq; in nf_ct_sack_block_adjust()
|
/kernel/linux/linux-5.10/net/mptcp/ |
H A D | protocol.c | 36 u64 end_seq; member 133 to->len, MPTCP_SKB_CB(from)->end_seq); in mptcp_try_coalesce() 134 MPTCP_SKB_CB(to)->end_seq = MPTCP_SKB_CB(from)->end_seq; in mptcp_try_coalesce() 144 if (MPTCP_SKB_CB(from)->map_seq != MPTCP_SKB_CB(to)->end_seq) in mptcp_ooo_try_coalesce() 158 u64 seq, end_seq, max_seq; in mptcp_data_queue_ofo() local 163 end_seq = MPTCP_SKB_CB(skb)->end_seq; in mptcp_data_queue_ofo() 195 if (!before64(seq, MPTCP_SKB_CB(msk->ooo_last_skb)->end_seq)) { in mptcp_data_queue_ofo() 211 if (before64(seq, MPTCP_SKB_CB(skb1)->end_seq)) { in mptcp_data_queue_ofo() 552 u64 end_seq; mptcp_ofo_queue() local [all...] |
/kernel/linux/linux-5.10/include/linux/ |
H A D | tcp.h | 69 __be32 end_seq; member 74 u32 end_seq; member 216 u32 end_seq; /* Ending TCP sequence of the skb */ member
|
/kernel/linux/linux-5.10/net/tls/ |
H A D | tls_device.c | 165 if (info && !before(acked_seq, info->end_seq)) in tls_icsk_clean_acked() 169 if (before(acked_seq, info->end_seq)) in tls_icsk_clean_acked() 282 record->end_seq = tp->write_seq + record->len; in tls_push_record() 607 before(seq, info->end_seq - info->len)) { in tls_get_record() 630 last->end_seq)) in tls_get_record() 639 if (before(seq, info->end_seq)) { in tls_get_record() 641 after(info->end_seq, in tls_get_record() 642 context->retransmit_hint->end_seq)) { in tls_get_record() 717 * req_seq <= seq <= end_seq, and wait for real resync request in tls_device_rx_resync_async() 1102 start_marker_record->end_seq in tls_set_device_offload() [all...] |
/kernel/linux/linux-6.6/net/tls/ |
H A D | tls_device.c | 177 if (info && !before(acked_seq, info->end_seq)) in tls_icsk_clean_acked() 181 if (before(acked_seq, info->end_seq)) in tls_icsk_clean_acked() 294 record->end_seq = tp->write_seq + record->len; in tls_push_record() 624 before(seq, info->end_seq - info->len)) { in tls_get_record() 647 last->end_seq)) in tls_get_record() 656 if (before(seq, info->end_seq)) { in tls_get_record() 658 after(info->end_seq, in tls_get_record() 659 context->retransmit_hint->end_seq)) { in tls_get_record() 734 * req_seq <= seq <= end_seq, and wait for real resync request in tls_device_rx_resync_async() 1133 start_marker_record->end_seq in tls_set_device_offload() [all...] |
/kernel/linux/linux-5.10/tools/testing/selftests/bpf/progs/ |
H A D | bpf_cubic.c | 89 __u32 end_seq; /* end_seq of the round */ member 167 ca->end_seq = tp->snd_nxt; in bictcp_hystart_reset() 396 if (hystart && after(ack, ca->end_seq)) in BPF_STRUCT_OPS()
|