Lines Matching refs:buff
1514 /* Insert buff after skb on the write or rtx queue of sk. */
1516 struct sk_buff *buff,
1521 __skb_queue_after(&sk->sk_write_queue, skb, buff);
1523 tcp_rbtree_insert(&sk->tcp_rtx_queue, buff);
1536 struct sk_buff *buff;
1567 buff = sk_stream_alloc_skb(sk, nsize, gfp, true);
1568 if (!buff)
1570 skb_copy_decrypted(buff, skb);
1572 sk_wmem_queued_add(sk, buff->truesize);
1573 sk_mem_charge(sk, buff->truesize);
1575 buff->truesize += nlen;
1579 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
1580 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
1581 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1586 TCP_SKB_CB(buff)->tcp_flags = flags;
1587 TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
1588 tcp_skb_fragment_eor(skb, buff);
1590 skb_split(skb, buff, len);
1592 buff->ip_summed = CHECKSUM_PARTIAL;
1594 buff->tstamp = skb->tstamp;
1595 tcp_fragment_tstamp(skb, buff);
1601 tcp_set_skb_tso_segs(buff, mss_now);
1604 TCP_SKB_CB(buff)->tx = TCP_SKB_CB(skb)->tx;
1609 if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
1611 tcp_skb_pcount(buff);
1618 __skb_header_release(buff);
1619 tcp_insert_write_queue_after(skb, buff, sk, tcp_queue);
1621 list_add(&buff->tcp_tsorted_anchor, &skb->tcp_tsorted_anchor);
2121 struct sk_buff *buff;
2129 buff = sk_stream_alloc_skb(sk, 0, gfp, true);
2130 if (unlikely(!buff))
2132 skb_copy_decrypted(buff, skb);
2134 sk_wmem_queued_add(sk, buff->truesize);
2135 sk_mem_charge(sk, buff->truesize);
2136 buff->truesize += nlen;
2140 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
2141 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
2142 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
2147 TCP_SKB_CB(buff)->tcp_flags = flags;
2150 TCP_SKB_CB(buff)->sacked = 0;
2152 tcp_skb_fragment_eor(skb, buff);
2154 buff->ip_summed = CHECKSUM_PARTIAL;
2155 skb_split(skb, buff, len);
2156 tcp_fragment_tstamp(skb, buff);
2160 tcp_set_skb_tso_segs(buff, mss_now);
2163 __skb_header_release(buff);
2164 tcp_insert_write_queue_after(skb, buff, sk, TCP_FRAG_IN_WRITE_QUEUE);
3867 struct sk_buff *buff;
3882 buff = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, true);
3883 if (unlikely(!buff))
3886 tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
3889 tcp_connect_queue_skb(sk, buff);
3890 tcp_ecn_send_syn(sk, buff);
3891 tcp_rbtree_insert(&sk->tcp_rtx_queue, buff);
3894 err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) :
3895 tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
3904 buff = tcp_send_head(sk);
3905 if (unlikely(buff)) {
3906 WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(buff)->seq);
3907 tp->pushed_seq = TCP_SKB_CB(buff)->seq;
3977 struct sk_buff *buff;
3987 buff = alloc_skb(MAX_TCP_HEADER,
3989 if (unlikely(!buff)) {
4003 skb_reserve(buff, MAX_TCP_HEADER);
4004 tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
4010 skb_set_tcp_pure_ack(buff);
4013 __tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0, rcv_nxt);