Lines Matching refs:skb
14 static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
17 while (skb) {
19 skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP;
20 skb_shinfo(skb)->tskey = ts_seq;
24 skb = skb->next;
29 static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
32 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4))
35 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
38 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
39 const struct iphdr *iph = ip_hdr(skb);
40 struct tcphdr *th = tcp_hdr(skb);
47 skb->ip_summed = CHECKSUM_PARTIAL;
48 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
51 return tcp_gso_segment(skb, features);
54 struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
65 struct sk_buff *gso_skb = skb;
69 th = tcp_hdr(skb);
74 if (!pskb_may_pull(skb, thlen))
77 oldlen = (u16)~skb->len;
78 __skb_pull(skb, thlen);
80 mss = skb_shinfo(skb)->gso_size;
81 if (unlikely(skb->len <= mss))
84 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
87 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
96 skb->ooo_okay = 0;
98 segs = skb_segment(skb, features);
107 * cases return a GSO skb. So update the mss now.
114 skb = segs;
115 th = tcp_hdr(skb);
124 while (skb->next) {
128 if (skb->ip_summed == CHECKSUM_PARTIAL)
129 gso_reset_checksum(skb, ~th->check);
131 th->check = gso_make_checksum(skb, ~th->check);
135 skb->destructor = gso_skb->destructor;
136 skb->sk = gso_skb->sk;
137 sum_truesize += skb->truesize;
139 skb = skb->next;
140 th = tcp_hdr(skb);
154 swap(gso_skb->sk, skb->sk);
155 swap(gso_skb->destructor, skb->destructor);
156 sum_truesize += skb->truesize;
162 refcount_add(delta, &skb->sk->sk_wmem_alloc);
164 WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc));
167 delta = htonl(oldlen + (skb_tail_pointer(skb) -
168 skb_transport_header(skb)) +
169 skb->data_len);
172 if (skb->ip_summed == CHECKSUM_PARTIAL)
173 gso_reset_checksum(skb, ~th->check);
175 th->check = gso_make_checksum(skb, ~th->check);
180 struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
195 off = skb_gro_offset(skb);
197 th = skb_gro_header_fast(skb, off);
198 if (skb_gro_header_hard(skb, hlen)) {
199 th = skb_gro_header_slow(skb, hlen, off);
209 if (skb_gro_header_hard(skb, hlen)) {
210 th = skb_gro_header_slow(skb, hlen, off);
215 skb_gro_pull(skb, thlen);
217 len = skb_gro_len(skb);
263 flush |= p->decrypted ^ skb->decrypted;
266 if (flush || skb_gro_receive(p, skb)) {
279 if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
283 NAPI_GRO_CB(skb)->flush |= (flush != 0);
288 int tcp_gro_complete(struct sk_buff *skb)
290 struct tcphdr *th = tcp_hdr(skb);
292 skb->csum_start = (unsigned char *)th - skb->head;
293 skb->csum_offset = offsetof(struct tcphdr, check);
294 skb->ip_summed = CHECKSUM_PARTIAL;
296 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
299 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
301 if (skb->encapsulation)
302 skb->inner_transport_header = skb->transport_header;
309 struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
312 if (!NAPI_GRO_CB(skb)->flush &&
313 skb_gro_checksum_validate(skb, IPPROTO_TCP,
315 NAPI_GRO_CB(skb)->flush = 1;
319 return tcp_gro_receive(head, skb);
322 INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff)
324 const struct iphdr *iph = ip_hdr(skb);
325 struct tcphdr *th = tcp_hdr(skb);
327 th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
329 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
331 if (NAPI_GRO_CB(skb)->is_atomic)
332 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
334 return tcp_gro_complete(skb);