Lines Matching defs:flag
995 int flag;
1021 state->flag |= FLAG_DSACK_TLP;
1035 if (tp->reord_seen && !(state->flag & FLAG_DSACK_TLP))
1038 state->flag |= FLAG_DSACKING_ACK;
1408 state->flag |= FLAG_ORIG_SACK_ACKED;
1421 state->flag |= FLAG_DATA_SACKED;
1842 state->flag = 0;
2018 return state->flag;
2339 static bool tcp_time_to_recover(struct sock *sk, int flag)
2643 void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int newly_lost, int flag)
2660 if (flag & FLAG_SND_UNA_ADVANCED && !newly_lost)
2713 static void tcp_try_to_open(struct sock *sk, int flag)
2722 if (flag & FLAG_ECE)
2845 static void tcp_process_loss(struct sock *sk, int flag, int num_dupack,
2851 if ((flag & FLAG_SND_UNA_ADVANCED || rcu_access_pointer(tp->fastopen_rsk)) &&
2859 if ((flag & FLAG_ORIG_SACK_ACKED) &&
2864 if (flag & FLAG_DATA_SACKED || num_dupack)
2866 } else if (flag & FLAG_SND_UNA_ADVANCED && !recovered) {
2891 tcp_add_reno_sack(sk, num_dupack, flag & FLAG_ECE);
2892 else if (flag & FLAG_SND_UNA_ADVANCED)
2976 int fast_rexmit = 0, flag = *ack_flag;
2977 bool ece_ack = flag & FLAG_ECE;
2978 bool do_lost = num_dupack || ((flag & FLAG_DATA_SACKED) &&
3025 if (!(flag & FLAG_SND_UNA_ADVANCED)) {
3036 if (!tcp_time_to_recover(sk, flag))
3045 tcp_process_loss(sk, flag, num_dupack, rexmit);
3054 if (flag & FLAG_SND_UNA_ADVANCED)
3063 if (!tcp_time_to_recover(sk, flag)) {
3064 tcp_try_to_open(sk, flag);
3089 static void tcp_update_rtt_min(struct sock *sk, u32 rtt_us, const int flag)
3094 if ((flag & FLAG_ACK_MAYBE_DELAYED) && rtt_us > tcp_min_rtt(tp)) {
3105 static bool tcp_ack_update_rtt(struct sock *sk, const int flag,
3126 flag & FLAG_ACKED) {
3144 tcp_update_rtt_min(sk, ca_rtt_us, flag);
3272 int flag = 0;
3299 flag |= FLAG_RETRANS_DATA_ACKED;
3309 flag |= FLAG_ORIG_SACK_ACKED;
3335 flag |= FLAG_DATA_ACKED;
3337 flag |= FLAG_SYN_ACKED;
3364 flag |= FLAG_SACK_RENEGING;
3367 if (likely(first_ackt) && !(flag & FLAG_RETRANS_DATA_ACKED)) {
3374 !(flag & (FLAG_CA_ALERT | FLAG_SYN_ACKED))) {
3379 flag |= FLAG_ACK_MAYBE_DELAYED;
3386 rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt_us, sack_rtt_us,
3389 if (flag & FLAG_ACKED) {
3390 flag |= FLAG_SET_XMIT_TIMER; /* set TLP or RTO timer */
3405 if (flag & FLAG_RETRANS_DATA_ACKED)
3406 flag &= ~FLAG_ORIG_SACK_ACKED;
3424 flag |= FLAG_SET_XMIT_TIMER; /* set TLP or RTO timer */
3459 return flag;
3486 static inline bool tcp_ack_is_dubious(const struct sock *sk, const int flag)
3488 return !(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) ||
3493 static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag)
3503 return flag & FLAG_FORWARD_PROGRESS;
3505 return flag & FLAG_DATA_ACKED;
3514 int flag, const struct rate_sample *rs)
3525 tcp_cwnd_reduction(sk, acked_sacked, rs->losses, flag);
3526 } else if (tcp_may_raise_cwnd(sk, flag)) {
3574 int flag = 0;
3581 flag |= FLAG_WIN_UPDATE;
3605 return flag;
3709 static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
3719 } else if (flag & FLAG_DSACK_TLP) {
3732 } else if (!(flag & (FLAG_SND_UNA_ADVANCED |
3769 static u32 tcp_newly_delivered(struct sock *sk, u32 prior_delivered, int flag)
3777 if (flag & FLAG_ECE)
3784 static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3818 if (!(flag & FLAG_NO_CHALLENGE_ACK))
3832 flag |= FLAG_SND_UNA_ADVANCED;
3848 if (flag & FLAG_UPDATE_TS_RECENT)
3851 if ((flag & (FLAG_SLOWPATH | FLAG_SND_UNA_ADVANCED)) ==
3859 flag |= FLAG_WIN_UPDATE;
3868 flag |= FLAG_DATA;
3872 flag |= tcp_ack_update_window(sk, skb, ack, ack_seq);
3875 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
3879 flag |= FLAG_ECE;
3885 flag & FLAG_ECE);
3887 if (flag & FLAG_WIN_UPDATE)
3912 flag |= tcp_clean_rtx_queue(sk, skb, prior_fack, prior_snd_una,
3913 &sack_state, flag & FLAG_ECE);
3918 tcp_process_tlp_ack(sk, ack, flag);
3920 if (tcp_ack_is_dubious(sk, flag)) {
3921 if (!(flag & (FLAG_SND_UNA_ADVANCED |
3925 if (!(flag & FLAG_DATA))
3928 tcp_fastretrans_alert(sk, prior_snd_una, num_dupack, &flag,
3933 if (flag & FLAG_SET_XMIT_TIMER)
3936 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP))
3939 delivered = tcp_newly_delivered(sk, delivered, flag);
3941 rs.is_ack_delayed = !!(flag & FLAG_ACK_MAYBE_DELAYED);
3943 tcp_cong_control(sk, ack, delivered, flag, sack_state.rate);
3949 if (flag & FLAG_DSACKING_ACK) {
3950 tcp_fastretrans_alert(sk, prior_snd_una, num_dupack, &flag,
3952 tcp_newly_delivered(sk, delivered, flag);
3961 tcp_process_tlp_ack(sk, ack, flag);
3969 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
3971 tcp_fastretrans_alert(sk, prior_snd_una, num_dupack, &flag,
3973 tcp_newly_delivered(sk, delivered, flag);
5922 * PSH flag is ignored.