Lines Matching refs:skb

96 int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
98 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
99 unsigned int offset = skb_gro_offset(skb);
100 unsigned int headlen = skb_headlen(skb);
101 unsigned int len = skb_gro_len(skb);
114 if (p->pp_recycle != skb->pp_recycle)
122 if (unlikely(p->len + len >= gro_max_size || NAPI_GRO_CB(skb)->flush))
126 if (NAPI_GRO_CB(skb)->proto != IPPROTO_TCP ||
133 segs = NAPI_GRO_CB(skb)->count;
160 new_truesize = SKB_TRUESIZE(skb_end_offset(skb));
161 delta_truesize = skb->truesize - new_truesize;
163 skb->truesize = new_truesize;
164 skb->len -= skb->data_len;
165 skb->data_len = 0;
167 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
169 } else if (skb->head_frag) {
172 struct page *page = virt_to_head_page(skb->head);
179 first_offset = skb->data -
191 delta_truesize = skb->truesize - new_truesize;
192 skb->truesize = new_truesize;
193 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
199 skb->destructor = NULL;
200 delta_truesize = skb->truesize;
206 skb->data_len -= eat;
207 skb->len -= eat;
211 __skb_pull(skb, offset);
214 skb_shinfo(p)->frag_list = skb;
216 NAPI_GRO_CB(p)->last->next = skb;
217 NAPI_GRO_CB(p)->last = skb;
218 __skb_header_release(skb);
231 NAPI_GRO_CB(skb)->same_flow = 1;
236 static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
239 __be16 type = skb->protocol;
243 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
245 if (NAPI_GRO_CB(skb)->count == 1) {
246 skb_shinfo(skb)->gso_size = 0;
257 skb, 0);
264 kfree_skb(skb);
269 gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count);
276 struct sk_buff *skb, *p;
278 list_for_each_entry_safe_reverse(skb, p, head, list) {
279 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
281 skb_list_del_init(skb);
282 napi_gro_complete(napi, skb);
307 static unsigned long gro_list_prepare_tc_ext(const struct sk_buff *skb,
315 skb_ext = skb_ext_find(skb, TC_SKB_EXT);
326 const struct sk_buff *skb)
328 unsigned int maclen = skb->dev->hard_header_len;
329 u32 hash = skb_get_hash_raw(skb);
342 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
343 diffs |= p->vlan_all ^ skb->vlan_all;
344 diffs |= skb_metadata_differs(p, skb);
347 skb_mac_header(skb));
350 skb_mac_header(skb),
358 if (!diffs && unlikely(skb->slow_gro | p->slow_gro)) {
359 diffs |= p->sk != skb->sk;
360 diffs |= skb_metadata_dst_cmp(p, skb);
361 diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb);
363 diffs |= gro_list_prepare_tc_ext(skb, p, diffs);
370 static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
372 const struct skb_shared_info *pinfo = skb_shinfo(skb);
375 NAPI_GRO_CB(skb)->data_offset = 0;
376 NAPI_GRO_CB(skb)->frag0 = NULL;
377 NAPI_GRO_CB(skb)->frag0_len = 0;
379 if (!skb_headlen(skb) && pinfo->nr_frags &&
382 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
383 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
385 skb->end - skb->tail);
389 static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
391 struct skb_shared_info *pinfo = skb_shinfo(skb);
393 BUG_ON(skb->end - skb->tail < grow);
395 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
397 skb->data_len -= grow;
398 skb->tail += grow;
404 skb_frag_unref(skb, 0);
410 static void gro_try_pull_from_frag0(struct sk_buff *skb)
412 int grow = skb_gro_offset(skb) - skb_headlen(skb);
415 gro_pull_from_frag0(skb, grow);
437 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
439 u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
443 __be16 type = skb->protocol;
448 if (netif_elide_gro(skb->dev))
451 gro_list_prepare(&gro_list->list, skb);
462 skb_set_network_header(skb, skb_gro_offset(skb));
463 skb_reset_mac_len(skb);
467 *(u32 *)&NAPI_GRO_CB(skb)->zeroed = 0;
468 NAPI_GRO_CB(skb)->flush = skb_has_frag_list(skb);
469 NAPI_GRO_CB(skb)->is_atomic = 1;
470 NAPI_GRO_CB(skb)->count = 1;
471 if (unlikely(skb_is_gso(skb))) {
472 NAPI_GRO_CB(skb)->count = skb_shinfo(skb)->gso_segs;
474 if (!skb_is_gso_tcp(skb) ||
475 (skb_shinfo(skb)->gso_type & SKB_GSO_DODGY))
476 NAPI_GRO_CB(skb)->flush = 1;
480 switch (skb->ip_summed) {
482 NAPI_GRO_CB(skb)->csum = skb->csum;
483 NAPI_GRO_CB(skb)->csum_valid = 1;
486 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
492 &gro_list->list, skb);
501 same_flow = NAPI_GRO_CB(skb)->same_flow;
502 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
513 if (NAPI_GRO_CB(skb)->flush)
521 /* Must be called before setting NAPI_GRO_CB(skb)->{age|last} */
522 gro_try_pull_from_frag0(skb);
523 NAPI_GRO_CB(skb)->age = jiffies;
524 NAPI_GRO_CB(skb)->last = skb;
525 if (!skb_is_gso(skb))
526 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
527 list_add(&skb->list, &gro_list->list);
541 gro_try_pull_from_frag0(skb);
574 struct sk_buff *skb,
579 gro_normal_one(napi, skb, 1);
583 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
584 napi_skb_free_stolen_head(skb);
585 else if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
586 __kfree_skb(skb);
588 __napi_kfree_skb(skb, SKB_CONSUMED);
600 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
604 skb_mark_napi_id(skb, napi);
605 trace_napi_gro_receive_entry(skb);
607 skb_gro_reset_offset(skb, 0);
609 ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb));
616 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
618 if (unlikely(skb->pfmemalloc)) {
619 consume_skb(skb);
622 __skb_pull(skb, skb_headlen(skb));
624 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
625 __vlan_hwaccel_clear_tag(skb);
626 skb->dev = napi->dev;
627 skb->skb_iif = 0;
630 skb->pkt_type = PACKET_HOST;
632 skb->encapsulation = 0;
633 skb_shinfo(skb)->gso_type = 0;
634 skb_shinfo(skb)->gso_size = 0;
635 if (unlikely(skb->slow_gro)) {
636 skb_orphan(skb);
637 skb_ext_reset(skb);
638 nf_reset_ct(skb);
639 skb->slow_gro = 0;
642 napi->skb = skb;
647 struct sk_buff *skb = napi->skb;
649 if (!skb) {
650 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
651 if (skb) {
652 napi->skb = skb;
653 skb_mark_napi_id(skb, napi);
656 return skb;
661 struct sk_buff *skb,
667 __skb_push(skb, ETH_HLEN);
668 skb->protocol = eth_type_trans(skb, skb->dev);
670 gro_normal_one(napi, skb, 1);
674 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
675 napi_skb_free_stolen_head(skb);
677 napi_reuse_skb(napi, skb);
690 * We copy ethernet header into skb->data to have a common layout.
694 struct sk_buff *skb = napi->skb;
698 napi->skb = NULL;
700 skb_reset_mac_header(skb);
701 skb_gro_reset_offset(skb, hlen);
703 if (unlikely(skb_gro_header_hard(skb, hlen))) {
704 eth = skb_gro_header_slow(skb, hlen, 0);
706 net_warn_ratelimited("%s: dropping impossible skb from %s\n",
708 napi_reuse_skb(napi, skb);
712 eth = (const struct ethhdr *)skb->data;
713 gro_pull_from_frag0(skb, hlen);
714 NAPI_GRO_CB(skb)->frag0 += hlen;
715 NAPI_GRO_CB(skb)->frag0_len -= hlen;
717 __skb_pull(skb, hlen);
724 skb->protocol = eth->h_proto;
726 return skb;
732 struct sk_buff *skb = napi_frags_skb(napi);
734 trace_napi_gro_frags_entry(skb);
736 ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
746 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
751 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
753 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
754 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
757 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
758 !skb->csum_complete_sw)
759 netdev_rx_csum_fault(skb->dev, skb);
762 NAPI_GRO_CB(skb)->csum = wsum;
763 NAPI_GRO_CB(skb)->csum_valid = 1;