Lines Matching defs:buff
1526 /* Insert buff after skb on the write or rtx queue of sk. */
1528 struct sk_buff *buff,
1533 __skb_queue_after(&sk->sk_write_queue, skb, buff);
1535 tcp_rbtree_insert(&sk->tcp_rtx_queue, buff);
1548 struct sk_buff *buff;
1577 buff = tcp_stream_alloc_skb(sk, gfp, true);
1578 if (!buff)
1580 skb_copy_decrypted(buff, skb);
1581 mptcp_skb_ext_copy(buff, skb);
1583 sk_wmem_queued_add(sk, buff->truesize);
1584 sk_mem_charge(sk, buff->truesize);
1586 buff->truesize += nlen;
1590 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
1591 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
1592 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1597 TCP_SKB_CB(buff)->tcp_flags = flags;
1598 TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
1599 tcp_skb_fragment_eor(skb, buff);
1601 skb_split(skb, buff, len);
1603 skb_set_delivery_time(buff, skb->tstamp, true);
1604 tcp_fragment_tstamp(skb, buff);
1610 tcp_set_skb_tso_segs(buff, mss_now);
1613 TCP_SKB_CB(buff)->tx = TCP_SKB_CB(skb)->tx;
1618 if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
1620 tcp_skb_pcount(buff);
1627 __skb_header_release(buff);
1628 tcp_insert_write_queue_after(skb, buff, sk, tcp_queue);
1630 list_add(&buff->tcp_tsorted_anchor, &skb->tcp_tsorted_anchor);
2131 struct sk_buff *buff;
2137 buff = tcp_stream_alloc_skb(sk, gfp, true);
2138 if (unlikely(!buff))
2140 skb_copy_decrypted(buff, skb);
2141 mptcp_skb_ext_copy(buff, skb);
2143 sk_wmem_queued_add(sk, buff->truesize);
2144 sk_mem_charge(sk, buff->truesize);
2145 buff->truesize += nlen;
2149 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
2150 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
2151 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
2156 TCP_SKB_CB(buff)->tcp_flags = flags;
2158 tcp_skb_fragment_eor(skb, buff);
2160 skb_split(skb, buff, len);
2161 tcp_fragment_tstamp(skb, buff);
2165 tcp_set_skb_tso_segs(buff, mss_now);
2168 __skb_header_release(buff);
2169 tcp_insert_write_queue_after(skb, buff, sk, TCP_FRAG_IN_WRITE_QUEUE);
3955 struct sk_buff *buff;
3970 buff = tcp_stream_alloc_skb(sk, sk->sk_allocation, true);
3971 if (unlikely(!buff))
3974 tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
3977 tcp_connect_queue_skb(sk, buff);
3978 tcp_ecn_send_syn(sk, buff);
3979 tcp_rbtree_insert(&sk->tcp_rtx_queue, buff);
3982 err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) :
3983 tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
3992 buff = tcp_send_head(sk);
3993 if (unlikely(buff)) {
3994 WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(buff)->seq);
3995 tp->pushed_seq = TCP_SKB_CB(buff)->seq;
4079 struct sk_buff *buff;
4089 buff = alloc_skb(MAX_TCP_HEADER,
4091 if (unlikely(!buff)) {
4105 skb_reserve(buff, MAX_TCP_HEADER);
4106 tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
4112 skb_set_tcp_pure_ack(buff);
4115 __tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0, rcv_nxt);