Lines Matching refs:skb

19  *		Ray VanTassle	:	Fixed --skb->lock in free
80 #include <trace/events/skb.h>
175 * @skb: buffer
185 static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr,
189 msg, addr, skb->len, sz, skb->head, skb->data,
190 (unsigned long)skb->tail, (unsigned long)skb->end,
191 skb->dev ? skb->dev->name : "<NULL>");
195 static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr)
197 skb_panic(skb, sz, addr, __func__);
200 static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr)
202 skb_panic(skb, sz, addr, __func__);
277 * skb->head being backed by slab, not a page fragment.
284 struct sk_buff *skb;
287 skb = napi_get_frags(napi);
288 WARN_ON_ONCE(!NAPI_HAS_SMALL_PAGE_FRAG && skb && skb->head_frag);
327 struct sk_buff *skb;
338 skb = nc->skb_cache[--nc->skb_count];
339 kasan_unpoison_object_data(skbuff_cache, skb);
341 return skb;
344 static inline void __finalize_skb_around(struct sk_buff *skb, void *data,
352 skb->truesize = SKB_TRUESIZE(size);
353 refcount_set(&skb->users, 1);
354 skb->head = data;
355 skb->data = data;
356 skb_reset_tail_pointer(skb);
357 skb_set_end_offset(skb, size);
358 skb->mac_header = (typeof(skb->mac_header))~0U;
359 skb->transport_header = (typeof(skb->transport_header))~0U;
360 skb->alloc_cpu = raw_smp_processor_id();
362 shinfo = skb_shinfo(skb);
366 skb_set_kcov_handle(skb, kcov_common_handle());
369 static inline void *__slab_build_skb(struct sk_buff *skb, void *data,
394 struct sk_buff *skb;
397 skb = kmem_cache_alloc(skbuff_cache, GFP_ATOMIC);
398 if (unlikely(!skb))
401 memset(skb, 0, offsetof(struct sk_buff, tail));
402 data = __slab_build_skb(skb, data, &size);
403 __finalize_skb_around(skb, data, size);
405 return skb;
410 static void __build_skb_around(struct sk_buff *skb, void *data,
419 data = __slab_build_skb(skb, data, &size);
421 __finalize_skb_around(skb, data, size);
434 * The return is the new skb buffer.
446 struct sk_buff *skb;
448 skb = kmem_cache_alloc(skbuff_cache, GFP_ATOMIC);
449 if (unlikely(!skb))
452 memset(skb, 0, offsetof(struct sk_buff, tail));
453 __build_skb_around(skb, data, frag_size);
455 return skb;
459 * takes care of skb->head and skb->pfmemalloc
463 struct sk_buff *skb = __build_skb(data, frag_size);
465 if (likely(skb && frag_size)) {
466 skb->head_frag = 1;
467 skb_propagate_pfmemalloc(virt_to_head_page(data), skb);
469 return skb;
474 * build_skb_around - build a network buffer around provided skb
475 * @skb: sk_buff provide by caller, must be memset cleared
479 struct sk_buff *build_skb_around(struct sk_buff *skb,
482 if (unlikely(!skb))
485 __build_skb_around(skb, data, frag_size);
488 skb->head_frag = 1;
489 skb_propagate_pfmemalloc(virt_to_head_page(data), skb);
491 return skb;
507 struct sk_buff *skb;
509 skb = napi_skb_cache_get();
510 if (unlikely(!skb))
513 memset(skb, 0, offsetof(struct sk_buff, tail));
514 __build_skb_around(skb, data, frag_size);
516 return skb;
524 * Version of __napi_build_skb() that takes care of skb->head_frag
525 * and skb->pfmemalloc when the data is a page or page fragment.
531 struct sk_buff *skb = __napi_build_skb(data, frag_size);
533 if (likely(skb) && frag_size) {
534 skb->head_frag = 1;
535 skb_propagate_pfmemalloc(virt_to_head_page(data), skb);
538 return skb;
609 * instead of head cache and allocate a cloned (child) skb.
625 struct sk_buff *skb;
638 skb = napi_skb_cache_get();
640 skb = kmem_cache_alloc_node(cache, gfp_mask & ~GFP_DMA, node);
641 if (unlikely(!skb))
643 prefetchw(skb);
648 * Both skb->head and skb_shared_info are cache line aligned.
664 memset(skb, 0, offsetof(struct sk_buff, tail));
665 __build_skb_around(skb, data, size);
666 skb->pfmemalloc = pfmemalloc;
671 fclones = container_of(skb, struct sk_buff_fclones, skb1);
673 skb->fclone = SKB_FCLONE_ORIG;
677 return skb;
680 kmem_cache_free(cache, skb);
702 struct sk_buff *skb;
709 * we use kmalloc() for skb->head allocation.
714 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
715 if (!skb)
740 skb = __build_skb(data, len);
741 if (unlikely(!skb)) {
747 skb->pfmemalloc = 1;
748 skb->head_frag = 1;
751 skb_reserve(skb, NET_SKB_PAD);
752 skb->dev = dev;
755 return skb;
776 struct sk_buff *skb;
784 * we use kmalloc() for skb->head allocation.
791 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX | SKB_ALLOC_NAPI,
793 if (!skb)
828 skb = __napi_build_skb(data, len);
829 if (unlikely(!skb)) {
835 skb->pfmemalloc = 1;
836 skb->head_frag = 1;
839 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
840 skb->dev = napi->dev;
843 return skb;
847 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
850 skb_fill_page_desc(skb, i, page, off, size);
851 skb->len += size;
852 skb->data_len += size;
853 skb->truesize += truesize;
857 void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
860 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
863 skb->len += size;
864 skb->data_len += size;
865 skb->truesize += truesize;
875 static inline void skb_drop_fraglist(struct sk_buff *skb)
877 skb_drop_list(&skb_shinfo(skb)->frag_list);
880 static void skb_clone_fraglist(struct sk_buff *skb)
884 skb_walk_frags(skb, list)
933 static bool skb_pp_recycle(struct sk_buff *skb, void *data, bool napi_safe)
935 if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle)
948 static void skb_free_head(struct sk_buff *skb, bool napi_safe)
950 unsigned char *head = skb->head;
952 if (skb->head_frag) {
953 if (skb_pp_recycle(skb, head, napi_safe))
957 skb_kfree_head(head, skb_end_offset(skb));
961 static void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason,
964 struct skb_shared_info *shinfo = skb_shinfo(skb);
967 if (skb->cloned &&
968 atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
972 if (skb_zcopy(skb)) {
975 skb_zcopy_clear(skb, true);
981 napi_frag_unref(&shinfo->frags[i], skb->pp_recycle, napi_safe);
987 skb_free_head(skb, napi_safe);
998 skb->pp_recycle = 0;
1004 static void kfree_skbmem(struct sk_buff *skb)
1008 switch (skb->fclone) {
1010 kmem_cache_free(skbuff_cache, skb);
1014 fclones = container_of(skb, struct sk_buff_fclones, skb1);
1016 /* We usually free the clone (TX completion) before original skb
1025 fclones = container_of(skb, struct sk_buff_fclones, skb2);
1034 void skb_release_head_state(struct sk_buff *skb)
1036 skb_dst_drop(skb);
1037 if (skb->destructor) {
1039 skb->destructor(skb);
1042 nf_conntrack_put(skb_nfct(skb));
1044 skb_ext_put(skb);
1048 static void skb_release_all(struct sk_buff *skb, enum skb_drop_reason reason,
1051 skb_release_head_state(skb);
1052 if (likely(skb->head))
1053 skb_release_data(skb, reason, napi_safe);
1058 * @skb: buffer
1065 void __kfree_skb(struct sk_buff *skb)
1067 skb_release_all(skb, SKB_DROP_REASON_NOT_SPECIFIED, false);
1068 kfree_skbmem(skb);
1073 bool __kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason)
1075 if (unlikely(!skb_unref(skb)))
1084 trace_consume_skb(skb, __builtin_return_address(0));
1086 trace_kfree_skb(skb, __builtin_return_address(0), reason);
1092 * @skb: buffer to free
1093 * @reason: reason why this skb is dropped
1100 kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason)
1102 if (__kfree_skb_reason(skb, reason))
1103 __kfree_skb(skb);
1114 static void kfree_skb_add_bulk(struct sk_buff *skb,
1119 if (unlikely(skb->fclone != SKB_FCLONE_UNAVAILABLE)) {
1120 __kfree_skb(skb);
1124 skb_release_all(skb, reason, false);
1125 sa->skb_array[sa->skb_count++] = skb;
1157 /* Dump skb information and contents.
1163 void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt)
1165 struct skb_shared_info *sh = skb_shinfo(skb);
1166 struct net_device *dev = skb->dev;
1167 struct sock *sk = skb->sk;
1174 len = skb->len;
1176 len = min_t(int, skb->len, MAX_HEADER + 128);
1178 headroom = skb_headroom(skb);
1179 tailroom = skb_tailroom(skb);
1181 has_mac = skb_mac_header_was_set(skb);
1182 has_trans = skb_transport_header_was_set(skb);
1189 level, skb->len, headroom, skb_headlen(skb), tailroom,
1190 has_mac ? skb->mac_header : -1,
1191 has_mac ? skb_mac_header_len(skb) : -1,
1192 skb->network_header,
1193 has_trans ? skb_network_header_len(skb) : -1,
1194 has_trans ? skb->transport_header : -1,
1197 skb->csum, skb->ip_summed, skb->csum_complete_sw,
1198 skb->csum_valid, skb->csum_level,
1199 skb->hash, skb->sw_hash, skb->l4_hash,
1200 ntohs(skb->protocol), skb->pkt_type, skb->skb_iif);
1210 print_hex_dump(level, "skb headroom: ", DUMP_PREFIX_OFFSET,
1211 16, 1, skb->head, headroom, false);
1213 seg_len = min_t(int, skb_headlen(skb), len);
1215 print_hex_dump(level, "skb linear: ", DUMP_PREFIX_OFFSET,
1216 16, 1, skb->data, seg_len, false);
1220 print_hex_dump(level, "skb tailroom: ", DUMP_PREFIX_OFFSET,
1221 16, 1, skb_tail_pointer(skb), tailroom, false);
1223 for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) {
1224 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1234 print_hex_dump(level, "skb frag: ",
1244 if (full_pkt && skb_has_frag_list(skb)) {
1245 printk("skb fraglist:\n");
1246 skb_walk_frags(skb, list_skb)
1254 * @skb: buffer that triggered an error
1256 * Report xmit error if a device callback is tracking this skb.
1257 * skb must be freed afterwards.
1259 void skb_tx_error(struct sk_buff *skb)
1261 if (skb) {
1262 skb_zcopy_downgrade_managed(skb);
1263 skb_zcopy_clear(skb, true);
1271 * @skb: buffer to free
1277 void consume_skb(struct sk_buff *skb)
1279 if (!skb_unref(skb))
1282 trace_consume_skb(skb, __builtin_return_address(0));
1283 __kfree_skb(skb);
1290 * @skb: buffer to free
1293 * skb reference and all the head states have been already dropped
1295 void __consume_stateless_skb(struct sk_buff *skb)
1297 trace_consume_skb(skb, __builtin_return_address(0));
1298 skb_release_data(skb, SKB_CONSUMED, false);
1299 kfree_skbmem(skb);
1302 static void napi_skb_cache_put(struct sk_buff *skb)
1307 kasan_poison_object_data(skbuff_cache, skb);
1308 nc->skb_cache[nc->skb_count++] = skb;
1321 void __napi_kfree_skb(struct sk_buff *skb, enum skb_drop_reason reason)
1323 skb_release_all(skb, reason, true);
1324 napi_skb_cache_put(skb);
1327 void napi_skb_free_stolen_head(struct sk_buff *skb)
1329 if (unlikely(skb->slow_gro)) {
1330 nf_reset_ct(skb);
1331 skb_dst_drop(skb);
1332 skb_ext_put(skb);
1333 skb_orphan(skb);
1334 skb->slow_gro = 0;
1336 napi_skb_cache_put(skb);
1339 void napi_consume_skb(struct sk_buff *skb, int budget)
1343 dev_consume_skb_any(skb);
1349 if (!skb_unref(skb))
1353 trace_consume_skb(skb, __builtin_return_address(0));
1356 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) {
1357 __kfree_skb(skb);
1361 skb_release_all(skb, SKB_CONSUMED, !!budget);
1362 napi_skb_cache_put(skb);
1422 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
1424 #define C(x) n->x = skb->x
1428 __copy_skb_header(n, skb);
1433 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
1448 atomic_inc(&(skb_shinfo(skb)->dataref));
1449 skb->cloned = 1;
1481 * skb_morph - morph one skb into another
1482 * @dst: the skb to receive the contents
1483 * @src: the skb to supply the contents
1485 * This is identical to skb_clone except that the target skb is
1488 * The target skb is returned upon exit.
1543 struct sk_buff *skb;
1547 skb = sock_omalloc(sk, 0, GFP_KERNEL);
1548 if (!skb)
1551 BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb));
1552 uarg = (void *)skb->cb;
1556 kfree_skb(skb);
1600 /* TCP can create new skb to attach new uarg */
1627 static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len)
1629 struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
1649 struct sk_buff *tail, *skb = skb_from_uarg(uarg);
1651 struct sock *sk = skb->sk;
1671 serr = SKB_EXT_ERR(skb);
1685 __skb_queue_tail(q, skb);
1686 skb = NULL;
1693 consume_skb(skb);
1697 void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg,
1721 int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
1725 struct ubuf_info *orig_uarg = skb_zcopy(skb);
1726 int err, orig_len = skb->len;
1728 /* An skb can only point to one uarg. This edge case happens when
1729 * TCP appends to an skb, but zerocopy_realloc triggered a new alloc.
1734 err = __zerocopy_sg_from_iter(msg, sk, skb, &msg->msg_iter, len);
1735 if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) {
1736 struct sock *save_sk = skb->sk;
1738 /* Streams do not free skb on error. Reset to prev state. */
1739 iov_iter_revert(&msg->msg_iter, skb->len - orig_len);
1740 skb->sk = sk;
1741 ___pskb_trim(skb, orig_len);
1742 skb->sk = save_sk;
1746 skb_zcopy_set(skb, uarg, NULL);
1747 return skb->len - orig_len;
1751 void __skb_zcopy_downgrade_managed(struct sk_buff *skb)
1755 skb_shinfo(skb)->flags &= ~SKBFL_MANAGED_FRAG_REFS;
1756 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1757 skb_frag_ref(skb, i);
1782 * skb_copy_ubufs - copy userspace skb frags buffers to kernel
1783 * @skb: the skb to modify
1786 * This must be called on skb with SKBFL_ZEROCOPY_ENABLE.
1796 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
1798 int num_frags = skb_shinfo(skb)->nr_frags;
1803 if (skb_shared(skb) || skb_unclone(skb, gfp_mask))
1813 while ((PAGE_SIZE << order) * MAX_SKB_FRAGS < __skb_pagelen(skb))
1817 new_frags = (__skb_pagelen(skb) + psize - 1) >> (PAGE_SHIFT + order);
1835 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1860 /* skb frags release userspace buffers */
1862 skb_frag_unref(skb, i);
1864 /* skb frags point to kernel buffers */
1866 __skb_fill_page_desc(skb, i, head, 0, psize);
1869 __skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off);
1870 skb_shinfo(skb)->nr_frags = new_frags;
1873 skb_zcopy_clear(skb, false);
1880 * @skb: buffer to clone
1892 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
1894 struct sk_buff_fclones *fclones = container_of(skb,
1899 if (skb_orphan_frags(skb, gfp_mask))
1902 if (skb->fclone == SKB_FCLONE_ORIG &&
1908 if (skb_pfmemalloc(skb))
1918 return __skb_clone(n, skb);
1922 void skb_headers_offset_update(struct sk_buff *skb, int off)
1925 if (skb->ip_summed == CHECKSUM_PARTIAL)
1926 skb->csum_start += off;
1927 /* {transport,network,mac}_header and tail are relative to skb->head */
1928 skb->transport_header += off;
1929 skb->network_header += off;
1930 if (skb_mac_header_was_set(skb))
1931 skb->mac_header += off;
1932 skb->inner_transport_header += off;
1933 skb->inner_network_header += off;
1934 skb->inner_mac_header += off;
1948 static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
1950 if (skb_pfmemalloc(skb))
1957 * @skb: buffer to copy
1972 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
1974 int headerlen = skb_headroom(skb);
1975 unsigned int size = skb_end_offset(skb) + skb->data_len;
1977 skb_alloc_rx_flag(skb), NUMA_NO_NODE);
1985 skb_put(n, skb->len);
1987 BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len));
1989 skb_copy_header(n, skb);
1996 * @skb: buffer to copy
1997 * @headroom: headroom of new skb
1999 * @fclone: if true allocate the copy of the skb from the fclone
2011 struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
2014 unsigned int size = skb_headlen(skb) + headroom;
2015 int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0);
2024 skb_put(n, skb_headlen(skb));
2026 skb_copy_from_linear_data(skb, n->data, n->len);
2028 n->truesize += skb->data_len;
2029 n->data_len = skb->data_len;
2030 n->len = skb->len;
2032 if (skb_shinfo(skb)->nr_frags) {
2035 if (skb_orphan_frags(skb, gfp_mask) ||
2036 skb_zerocopy_clone(n, skb, gfp_mask)) {
2041 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2042 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
2043 skb_frag_ref(skb, i);
2048 if (skb_has_frag_list(skb)) {
2049 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
2053 skb_copy_header(n, skb);
2061 * @skb: buffer to reallocate
2067 * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have
2071 * All the pointers pointing into skb header may change and must be
2075 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
2078 unsigned int osize = skb_end_offset(skb);
2086 BUG_ON(skb_shared(skb));
2088 skb_zcopy_downgrade_managed(skb);
2090 if (skb_pfmemalloc(skb))
2101 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head);
2104 skb_shinfo(skb),
2105 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
2112 if (skb_cloned(skb)) {
2113 if (skb_orphan_frags(skb, gfp_mask))
2115 if (skb_zcopy(skb))
2116 refcount_inc(&skb_uarg(skb)->refcnt);
2117 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
2118 skb_frag_ref(skb, i);
2120 if (skb_has_frag_list(skb))
2121 skb_clone_fraglist(skb);
2123 skb_release_data(skb, SKB_CONSUMED, false);
2125 skb_free_head(skb, false);
2127 off = (data + nhead) - skb->head;
2129 skb->head = data;
2130 skb->head_frag = 0;
2131 skb->data += off;
2133 skb_set_end_offset(skb, size);
2137 skb->tail += off;
2138 skb_headers_offset_update(skb, nhead);
2139 skb->cloned = 0;
2140 skb->hdr_len = 0;
2141 skb->nohdr = 0;
2142 atomic_set(&skb_shinfo(skb)->dataref, 1);
2144 skb_metadata_clear(skb);
2146 /* It is not generally safe to change skb->truesize.
2148 * when skb is orphaned (not attached to a socket).
2150 if (!skb->sk || skb->destructor == sock_edemux)
2151 skb->truesize += size - osize;
2162 /* Make private copy of skb with writable head and some headroom */
2164 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
2167 int delta = headroom - skb_headroom(skb);
2170 skb2 = pskb_copy(skb, GFP_ATOMIC);
2172 skb2 = skb_clone(skb, GFP_ATOMIC);
2184 int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri)
2190 saved_end_offset = skb_end_offset(skb);
2191 saved_truesize = skb->truesize;
2193 res = pskb_expand_head(skb, 0, 0, pri);
2197 skb->truesize = saved_truesize;
2199 if (likely(skb_end_offset(skb) == saved_end_offset))
2202 /* We can not change skb->end if the original or new value
2206 skb_end_offset(skb) == SKB_SMALL_HEAD_HEADROOM) {
2211 saved_end_offset, skb_end_offset(skb));
2216 shinfo = skb_shinfo(skb);
2218 /* We are about to change back skb->end,
2221 memmove(skb->head + saved_end_offset,
2225 skb_set_end_offset(skb, saved_end_offset);
2232 * @skb: buffer to reallocate
2235 * Unlike skb_realloc_headroom, this one does not allocate a new skb
2236 * if possible; copies skb->sk to new skb as needed
2237 * and frees original skb in case of failures.
2242 struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom)
2244 int delta = headroom - skb_headroom(skb);
2245 int osize = skb_end_offset(skb);
2246 struct sock *sk = skb->sk;
2250 return skb;
2253 /* pskb_expand_head() might crash, if skb is shared. */
2254 if (skb_shared(skb) || !is_skb_wmem(skb)) {
2255 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
2262 consume_skb(skb);
2263 skb = nskb;
2265 if (pskb_expand_head(skb, delta, 0, GFP_ATOMIC))
2268 if (sk && is_skb_wmem(skb)) {
2269 delta = skb_end_offset(skb) - osize;
2271 skb->truesize += delta;
2273 return skb;
2276 kfree_skb(skb);
2283 * @skb: buffer to copy
2299 struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
2306 struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom,
2307 gfp_mask, skb_alloc_rx_flag(skb),
2309 int oldheadroom = skb_headroom(skb);
2318 skb_put(n, skb->len);
2328 BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
2329 skb->len + head_copy_len));
2331 skb_copy_header(n, skb);
2340 * __skb_pad - zero pad the tail of an skb
2341 * @skb: buffer to pad
2349 * May return error in out of memory cases. The skb is freed on error
2353 int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error)
2359 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
2360 memset(skb->data+skb->len, 0, pad);
2364 ntail = skb->data_len + pad - (skb->end - skb->tail);
2365 if (likely(skb_cloned(skb) || ntail > 0)) {
2366 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
2371 /* FIXME: The use of this function with non-linear skb's really needs
2374 err = skb_linearize(skb);
2378 memset(skb->data + skb->len, 0, pad);
2383 kfree_skb(skb);
2390 * @skb: start of the buffer to use
2395 * fragmented buffer. @tail must be the last fragment of @skb -- or
2396 * @skb itself. If this would exceed the total buffer size the kernel
2401 void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
2403 if (tail != skb) {
2404 skb->data_len += len;
2405 skb->len += len;
2413 * @skb: buffer to use
2420 void *skb_put(struct sk_buff *skb, unsigned int len)
2422 void *tmp = skb_tail_pointer(skb);
2423 SKB_LINEAR_ASSERT(skb);
2424 skb->tail += len;
2425 skb->len += len;
2426 if (unlikely(skb->tail > skb->end))
2427 skb_over_panic(skb, len, __builtin_return_address(0));
2434 * @skb: buffer to use
2441 void *skb_push(struct sk_buff *skb, unsigned int len)
2443 skb->data -= len;
2444 skb->len += len;
2445 if (unlikely(skb->data < skb->head))
2446 skb_under_panic(skb, len, __builtin_return_address(0));
2447 return skb->data;
2453 * @skb: buffer to use
2461 void *skb_pull(struct sk_buff *skb, unsigned int len)
2463 return skb_pull_inline(skb, len);
2470 * @skb: buffer to use
2478 void *skb_pull_data(struct sk_buff *skb, size_t len)
2480 void *data = skb->data;
2482 if (skb->len < len)
2485 skb_pull(skb, len);
2493 * @skb: buffer to alter
2498 * The skb must be linear.
2500 void skb_trim(struct sk_buff *skb, unsigned int len)
2502 if (skb->len > len)
2503 __skb_trim(skb, len);
2507 /* Trims skb to length len. It can change skb pointers.
2510 int ___pskb_trim(struct sk_buff *skb, unsigned int len)
2514 int offset = skb_headlen(skb);
2515 int nfrags = skb_shinfo(skb)->nr_frags;
2519 if (skb_cloned(skb) &&
2520 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
2528 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]);
2535 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset);
2538 skb_shinfo(skb)->nr_frags = i;
2541 skb_frag_unref(skb, i);
2543 if (skb_has_frag_list(skb))
2544 skb_drop_fraglist(skb);
2548 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
2580 if (len > skb_headlen(skb)) {
2581 skb->data_len -= skb->len - len;
2582 skb->len = len;
2584 skb->len = len;
2585 skb->data_len = 0;
2586 skb_set_tail_pointer(skb, len);
2589 if (!skb->sk || skb->destructor == sock_edemux)
2590 skb_condense(skb);
2597 int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len)
2599 if (skb->ip_summed == CHECKSUM_COMPLETE) {
2600 int delta = skb->len - len;
2602 skb->csum = csum_block_sub(skb->csum,
2603 skb_checksum(skb, len, delta, 0),
2605 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
2606 int hdlen = (len > skb_headlen(skb)) ? skb_headlen(skb) : len;
2607 int offset = skb_checksum_start_offset(skb) + skb->csum_offset;
2612 return __pskb_trim(skb, len);
2617 * __pskb_pull_tail - advance tail of skb header
2618 * @skb: buffer to reallocate
2628 * or value of new tail of skb in the case of success.
2630 * All the pointers pointing into skb header may change and must be
2634 /* Moves tail of skb head forward, copying data from fragmented part,
2637 * 2. It may change skb pointers.
2641 void *__pskb_pull_tail(struct sk_buff *skb, int delta)
2643 /* If skb has not enough free space at tail, get new one
2645 * room at tail, reallocate without expansion only if skb is cloned.
2647 int i, k, eat = (skb->tail + delta) - skb->end;
2649 if (eat > 0 || skb_cloned(skb)) {
2650 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
2655 BUG_ON(skb_copy_bits(skb, skb_headlen(skb),
2656 skb_tail_pointer(skb), delta));
2661 if (!skb_has_frag_list(skb))
2666 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2667 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2675 * Certainly, it is possible to add an offset to skb data,
2678 * further bloating skb head and crucify ourselves here instead.
2682 struct sk_buff *list = skb_shinfo(skb)->frag_list;
2694 if (skb_is_gso(skb) && !list->head_frag &&
2696 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
2719 while ((list = skb_shinfo(skb)->frag_list) != insp) {
2720 skb_shinfo(skb)->frag_list = list->next;
2726 skb_shinfo(skb)->frag_list = clone;
2729 /* Success! Now we may commit changes to skb data. */
2734 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2735 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2738 skb_frag_unref(skb, i);
2741 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2743 *frag = skb_shinfo(skb)->frags[i];
2754 skb_shinfo(skb)->nr_frags = k;
2757 skb->tail += delta;
2758 skb->data_len -= delta;
2760 if (!skb->data_len)
2761 skb_zcopy_clear(skb, false);
2763 return skb_tail_pointer(skb);
2768 * skb_copy_bits - copy bits from skb to kernel buffer
2769 * @skb: source skb
2774 * Copy the specified number of bytes from the source skb to the
2782 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
2784 int start = skb_headlen(skb);
2788 if (offset > (int)skb->len - len)
2795 skb_copy_from_linear_data_offset(skb, offset, to, copy);
2802 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2804 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
2833 skb_walk_frags(skb, frag_iter) {
2964 * Map linear and fragment data from the skb to spd. It reports true if the
2967 static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
2975 * If skb->head_frag is set, this 'linear' part is backed by a
2979 if (__splice_segment(virt_to_page(skb->data),
2980 (unsigned long) skb->data & (PAGE_SIZE - 1),
2981 skb_headlen(skb),
2983 skb_head_is_locked(skb),
2990 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
2991 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
2999 skb_walk_frags(skb, iter) {
3016 * Map data from the skb to a pipe. Should handle both the linear part,
3019 int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
3034 __skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk);
3067 static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset,
3071 struct sk_buff *head = skb;
3078 while (offset < skb_headlen(skb) && len) {
3082 slen = min_t(int, len, skb_headlen(skb) - offset);
3083 kv.iov_base = skb->data + offset;
3098 /* All the data was skb head? */
3103 offset -= skb_headlen(skb);
3106 for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
3107 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx];
3115 for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
3116 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx];
3147 if (skb == head) {
3148 if (skb_has_frag_list(skb)) {
3149 skb = skb_shinfo(skb)->frag_list;
3152 } else if (skb->next) {
3153 skb = skb->next;
3165 /* Send skb data on a socket. Socket must be locked. */
3166 int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
3169 return __skb_send_sock(sk, skb, offset, len, sendmsg_locked);
3173 /* Send skb data on a socket. Socket must be unlocked. */
3174 int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len)
3176 return __skb_send_sock(sk, skb, offset, len, sendmsg_unlocked);
3180 * skb_store_bits - store bits from kernel buffer to skb
3181 * @skb: destination buffer
3187 * destination skb. This function handles all the messy bits of
3191 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
3193 int start = skb_headlen(skb);
3197 if (offset > (int)skb->len - len)
3203 skb_copy_to_linear_data_offset(skb, offset, from, copy);
3210 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3211 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3241 skb_walk_frags(skb, frag_iter) {
3268 /* Checksum skb data. */
3269 __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
3272 int start = skb_headlen(skb);
3282 skb->data + offset, copy, csum);
3289 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3291 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3326 skb_walk_frags(skb, frag_iter) {
3353 __wsum skb_checksum(const struct sk_buff *skb, int offset,
3361 return __skb_checksum(skb, offset, len, csum, &ops);
3367 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
3370 int start = skb_headlen(skb);
3380 csum = csum_partial_copy_nocheck(skb->data + offset, to,
3389 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3394 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
3396 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3425 skb_walk_frags(skb, frag_iter) {
3452 __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
3456 sum = csum_fold(skb_checksum(skb, 0, len, skb->csum));
3459 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
3460 !skb->csum_complete_sw)
3461 netdev_rx_csum_fault(skb->dev, skb);
3463 if (!skb_shared(skb))
3464 skb->csum_valid = !sum;
3469 /* This function assumes skb->csum already holds pseudo header's checksum,
3471 * __skb_checksum_validate_complete(). And, the original skb->csum must
3475 * zero. The new checksum is stored back into skb->csum unless the skb is
3478 __sum16 __skb_checksum_complete(struct sk_buff *skb)
3483 csum = skb_checksum(skb, 0, skb->len, 0);
3485 sum = csum_fold(csum_add(skb->csum, csum));
3489 * between the original skb->csum and skb_checksum(). This means either
3490 * the original hardware checksum is incorrect or we screw up skb->csum
3491 * when moving skb->data around.
3494 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
3495 !skb->csum_complete_sw)
3496 netdev_rx_csum_fault(skb->dev, skb);
3499 if (!skb_shared(skb)) {
3501 skb->csum = csum;
3502 skb->ip_summed = CHECKSUM_COMPLETE;
3503 skb->csum_complete_sw = 1;
3504 skb->csum_valid = !sum;
3541 * Calculates the amount of linear headroom needed in the 'to' skb passed
3565 * skb_zerocopy - Zero copy skb to skb
3580 * -EFAULT: skb_copy_bits() found some problem with skb geometry
3586 int plen = 0; /* length of skb->head fragment */
3641 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
3646 if (skb->ip_summed == CHECKSUM_PARTIAL)
3647 csstart = skb_checksum_start_offset(skb);
3649 csstart = skb_headlen(skb);
3651 BUG_ON(csstart > skb_headlen(skb));
3653 skb_copy_from_linear_data(skb, to, csstart);
3656 if (csstart != skb->len)
3657 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
3658 skb->len - csstart);
3660 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3661 long csstuff = csstart + skb->csum_offset;
3721 struct sk_buff *skb;
3723 while ((skb = skb_dequeue(list)) != NULL)
3724 kfree_skb_reason(skb, reason);
3729 * skb_rbtree_purge - empty a skb rbtree
3744 struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
3747 rb_erase(&skb->rbnode, root);
3748 sum += skb->truesize;
3749 kfree_skb(skb);
3756 struct sk_buff *skb, *next;
3763 skb_queue_walk_safe(list, skb, next) {
3764 if (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ZEROCOPY ||
3765 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING)
3767 __skb_unlink(skb, list);
3768 __skb_queue_tail(&kill, skb);
3819 * @skb: buffer to remove
3827 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
3832 __skb_unlink(skb, list);
3857 static inline void skb_split_inside_header(struct sk_buff *skb,
3863 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
3866 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
3867 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
3869 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
3870 skb_shinfo(skb)->nr_frags = 0;
3871 skb1->data_len = skb->data_len;
3873 skb->data_len = 0;
3874 skb->len = len;
3875 skb_set_tail_pointer(skb, len);
3878 static inline void skb_split_no_header(struct sk_buff *skb,
3883 const int nfrags = skb_shinfo(skb)->nr_frags;
3885 skb_shinfo(skb)->nr_frags = 0;
3886 skb1->len = skb1->data_len = skb->len - len;
3887 skb->len = len;
3888 skb->data_len = len - pos;
3891 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
3894 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
3905 skb_frag_ref(skb, i);
3908 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos);
3909 skb_shinfo(skb)->nr_frags++;
3913 skb_shinfo(skb)->nr_frags++;
3920 * skb_split - Split fragmented skb to two parts at length len.
3921 * @skb: the buffer to split
3923 * @len: new length for skb
3925 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
3927 int pos = skb_headlen(skb);
3930 skb_zcopy_downgrade_managed(skb);
3932 skb_shinfo(skb1)->flags |= skb_shinfo(skb)->flags & zc_flags;
3933 skb_zerocopy_clone(skb1, skb, 0);
3935 skb_split_inside_header(skb, skb1, len, pos);
3937 skb_split_no_header(skb, skb1, len, pos);
3941 /* Shifting from/to a cloned skb is a no-go.
3945 static int skb_prepare_for_shift(struct sk_buff *skb)
3947 return skb_unclone_keeptruesize(skb, GFP_ATOMIC);
3951 * skb_shift - Shifts paged data partially from skb to another
3953 * @skb: buffer from which the paged data comes from
3957 * the length of the skb, from skb to tgt. Returns number bytes shifted.
3958 * It's up to caller to free skb if everything was shifted.
3966 * specialized skb free'er to handle frags without up-to-date nr_frags.
3968 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
3973 BUG_ON(shiftlen > skb->len);
3975 if (skb_headlen(skb))
3977 if (skb_zcopy(tgt) || skb_zcopy(skb))
3983 fragfrom = &skb_shinfo(skb)->frags[from];
3997 if (skb_prepare_for_shift(skb) ||
4002 fragfrom = &skb_shinfo(skb)->frags[from];
4015 /* Skip full, not-fitting skb to avoid expensive operations */
4016 if ((shiftlen == skb->len) &&
4017 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
4020 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
4023 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
4027 fragfrom = &skb_shinfo(skb)->frags[from];
4055 fragfrom = &skb_shinfo(skb)->frags[0];
4059 __skb_frag_unref(fragfrom, skb->pp_recycle);
4062 /* Reposition in the original skb */
4064 while (from < skb_shinfo(skb)->nr_frags)
4065 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
4066 skb_shinfo(skb)->nr_frags = to;
4068 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
4071 /* Most likely the tgt won't ever need its checksum anymore, skb on
4075 skb->ip_summed = CHECKSUM_PARTIAL;
4077 skb_len_add(skb, -shiftlen);
4084 * skb_prepare_seq_read - Prepare a sequential read of skb data
4085 * @skb: the buffer to read
4093 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
4098 st->root_skb = st->cur_skb = skb;
4106 * skb_seq_read - Sequentially read skb data
4111 * Reads a block of skb data at @consumed relative to the
4114 * of the block or 0 if the end of the skb data or the upper
4215 * skb_abort_seq_read - Abort a sequential read of skb data
4243 * skb_find_text - Find a text pattern in skb data
4244 * @skb: the buffer to look in
4249 * Finds a pattern in the skb data according to the specified
4254 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
4266 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state));
4273 int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
4276 int i = skb_shinfo(skb)->nr_frags;
4278 if (skb_can_coalesce(skb, i, page, offset)) {
4279 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size);
4281 skb_zcopy_downgrade_managed(skb);
4283 skb_fill_page_desc_noacc(skb, i, page, offset, size);
4293 * skb_pull_rcsum - pull skb and update receive checksum
4294 * @skb: buffer to update
4303 void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
4305 unsigned char *data = skb->data;
4307 BUG_ON(len > skb->len);
4308 __skb_pull(skb, len);
4309 skb_postpull_rcsum(skb, data, len);
4310 return skb->data;
4326 struct sk_buff *skb_segment_list(struct sk_buff *skb,
4330 struct sk_buff *list_skb = skb_shinfo(skb)->frag_list;
4331 unsigned int tnl_hlen = skb_tnl_header_len(skb);
4338 skb_push(skb, -skb_network_offset(skb) + offset);
4341 err = skb_unclone(skb, GFP_ATOMIC);
4345 skb_shinfo(skb)->frag_list = NULL;
4365 skb->next = nskb;
4381 len_diff = skb_network_header_len(nskb) - skb_network_header_len(skb);
4382 __copy_skb_header(nskb, skb);
4384 skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb));
4386 skb_copy_from_linear_data_offset(skb, -tnl_hlen,
4395 skb->truesize = skb->truesize - delta_truesize;
4396 skb->data_len = skb->data_len - delta_len;
4397 skb->len = skb->len - delta_len;
4399 skb_gso_reset(skb);
4401 skb->prev = tail;
4403 if (skb_needs_linearize(skb, features) &&
4404 __skb_linearize(skb))
4407 skb_get(skb);
4409 return skb;
4412 kfree_skb_list(skb->next);
4413 skb->next = NULL;
4419 * skb_segment - Perform protocol segmentation on skb.
4423 * This function performs segmentation on the given skb. It returns
4854 * struct skb_shared_info is located at the end of skb->head,
4868 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len,
4871 int start = skb_headlen(skb);
4882 sg_set_buf(sg, skb->data + offset, copy);
4889 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4894 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
4896 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4912 skb_walk_frags(skb, frag_iter) {
4941 * @skb: Socket buffer containing the buffers to be mapped
4951 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
4953 int nsg = __skb_to_sgvec(skb, sg, offset, len, 0);
4964 /* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given
4965 * sglist without mark the sg which contain last skb data as the end.
4983 int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
4986 return __skb_to_sgvec(skb, sg, offset, len, 0);
4994 * @skb: The socket buffer to check.
4996 * @trailer: Returned pointer to the skb where the @tailbits space begins
5004 * set to point to the skb in which this space begins.
5009 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
5015 /* If skb is cloned or its head is paged, reallocate
5019 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
5020 !__pskb_pull_tail(skb, __skb_pagelen(skb)))
5024 if (!skb_has_frag_list(skb)) {
5030 if (skb_tailroom(skb) < tailbits &&
5031 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
5035 *trailer = skb;
5042 skb_p = &skb_shinfo(skb)->frag_list;
5055 /* If the skb is the last, worry about trailer. */
5086 * OK, link new skb, drop old one */
5102 static void sock_rmem_free(struct sk_buff *skb)
5104 struct sock *sk = skb->sk;
5106 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
5109 static void skb_set_err_queue(struct sk_buff *skb)
5114 skb->pkt_type = PACKET_OUTGOING;
5121 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
5123 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
5127 skb_orphan(skb);
5128 skb->sk = sk;
5129 skb->destructor = sock_rmem_free;
5130 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
5131 skb_set_err_queue(skb);
5134 skb_dst_force(skb);
5136 skb_queue_tail(&sk->sk_error_queue, skb);
5143 static bool is_icmp_err_skb(const struct sk_buff *skb)
5145 return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
5146 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6);
5152 struct sk_buff *skb, *skb_next = NULL;
5157 skb = __skb_dequeue(q);
5158 if (skb && (skb_next = skb_peek(q))) {
5165 if (is_icmp_err_skb(skb) && !icmp_next)
5171 return skb;
5176 * skb_clone_sk - create clone of skb, and take reference to socket
5177 * @skb: the skb to clone
5188 struct sk_buff *skb_clone_sk(struct sk_buff *skb)
5190 struct sock *sk = skb->sk;
5196 clone = skb_clone(skb, GFP_ATOMIC);
5209 static void __skb_complete_tx_timestamp(struct sk_buff *skb,
5217 BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb));
5219 serr = SKB_EXT_ERR(skb);
5225 serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0;
5227 serr->ee.ee_data = skb_shinfo(skb)->tskey;
5232 err = sock_queue_err_skb(sk, skb);
5235 kfree_skb(skb);
5252 void skb_complete_tx_timestamp(struct sk_buff *skb,
5255 struct sock *sk = skb->sk;
5264 *skb_hwtstamps(skb) = *hwtstamps;
5265 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false);
5271 kfree_skb(skb);
5280 struct sk_buff *skb;
5300 skb = tcp_get_timestamping_opt_stats(sk, orig_skb,
5305 skb = alloc_skb(0, GFP_ATOMIC);
5307 skb = skb_clone(orig_skb, GFP_ATOMIC);
5309 if (skb_orphan_frags_rx(skb, GFP_ATOMIC)) {
5310 kfree_skb(skb);
5314 if (!skb)
5318 skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags &
5320 skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey;
5324 *skb_hwtstamps(skb) = *hwtstamps;
5326 __net_timestamp(skb);
5328 __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats);
5341 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
5343 struct sock *sk = skb->sk;
5347 skb->wifi_acked_valid = 1;
5348 skb->wifi_acked = acked;
5350 serr = SKB_EXT_ERR(skb);
5359 err = sock_queue_err_skb(sk, skb);
5363 kfree_skb(skb);
5370 * @skb: the skb to set
5371 * @start: the number of bytes after skb->data to start checksumming.
5375 * for skb->csum_start and skb->csum_offset are valid so we don't oops.
5377 * This function checks and sets those values and skb->ip_summed: if this
5380 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
5383 u32 csum_start = skb_headroom(skb) + (u32)start;
5385 if (unlikely(csum_start >= U16_MAX || csum_end > skb_headlen(skb))) {
5387 start, off, skb_headroom(skb), skb_headlen(skb));
5390 skb->ip_summed = CHECKSUM_PARTIAL;
5391 skb->csum_start = csum_start;
5392 skb->csum_offset = off;
5393 skb->transport_header = csum_start;
5398 static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len,
5401 if (skb_headlen(skb) >= len)
5407 if (max > skb->len)
5408 max = skb->len;
5410 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL)
5413 if (skb_headlen(skb) < len)
5421 static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb,
5429 err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr),
5431 if (!err && !skb_partial_csum_set(skb, off,
5435 return err ? ERR_PTR(err) : &tcp_hdr(skb)->check;
5438 err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr),
5440 if (!err && !skb_partial_csum_set(skb, off,
5444 return err ? ERR_PTR(err) : &udp_hdr(skb)->check;
5455 static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate)
5464 err = skb_maybe_pull_tail(skb,
5470 if (ip_is_fragment(ip_hdr(skb)))
5473 off = ip_hdrlen(skb);
5480 csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off);
5485 *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
5486 ip_hdr(skb)->daddr,
5487 skb->len - off,
5488 ip_hdr(skb)->protocol, 0);
5500 #define OPT_HDR(type, skb, off) \
5501 (type *)(skb_network_header(skb) + (off))
5503 static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate)
5518 err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN);
5522 nexthdr = ipv6_hdr(skb)->nexthdr;
5524 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
5532 err = skb_maybe_pull_tail(skb,
5539 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
5547 err = skb_maybe_pull_tail(skb,
5554 hp = OPT_HDR(struct ip_auth_hdr, skb, off);
5562 err = skb_maybe_pull_tail(skb,
5569 hp = OPT_HDR(struct frag_hdr, skb, off);
5589 csum = skb_checksum_setup_ip(skb, nexthdr, off);
5594 *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
5595 &ipv6_hdr(skb)->daddr,
5596 skb->len - off, nexthdr, 0);
5605 * @skb: the skb to set up
5608 int skb_checksum_setup(struct sk_buff *skb, bool recalculate)
5612 switch (skb->protocol) {
5614 err = skb_checksum_setup_ipv4(skb, recalculate);
5618 err = skb_checksum_setup_ipv6(skb, recalculate);
5631 * skb_checksum_maybe_trim - maybe trims the given skb
5632 * @skb: the skb to check
5635 * Checks whether the given skb has data beyond the given transport length.
5636 * If so, returns a cloned skb trimmed to this transport length.
5637 * Otherwise returns the provided skb. Returns NULL in error cases
5638 * (e.g. transport_len exceeds skb length or out-of-memory).
5640 * Caller needs to set the skb transport header and free any returned skb if it
5641 * differs from the provided skb.
5643 static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
5647 unsigned int len = skb_transport_offset(skb) + transport_len;
5650 if (skb->len < len)
5652 else if (skb->len == len)
5653 return skb;
5655 skb_chk = skb_clone(skb, GFP_ATOMIC);
5669 * skb_checksum_trimmed - validate checksum of an skb
5670 * @skb: the skb to check
5674 * Applies the given checksum function skb_chkf to the provided skb.
5675 * Returns a checked and maybe trimmed skb. Returns NULL on error.
5677 * If the skb has data beyond the given transport length, then a
5678 * trimmed & cloned skb is checked and returned.
5680 * Caller needs to set the skb transport header and free any returned skb if it
5681 * differs from the provided skb.
5683 struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
5685 __sum16(*skb_chkf)(struct sk_buff *skb))
5688 unsigned int offset = skb_transport_offset(skb);
5691 skb_chk = skb_checksum_maybe_trim(skb, transport_len);
5708 if (skb_chk && skb_chk != skb)
5716 void __skb_warn_lro_forwarding(const struct sk_buff *skb)
5719 skb->dev->name);
5723 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
5726 skb_release_head_state(skb);
5727 kmem_cache_free(skbuff_cache, skb);
5729 __kfree_skb(skb);
5735 * skb_try_coalesce - try to merge skb to prior one
5817 /* if the skb is not cloned this does nothing
5833 * skb_scrub_packet - scrub an skb
5835 * @skb: buffer to clean
5841 * skb_scrub_packet can also be used to clean a skb before injecting it in
5843 * skb that could impact namespace isolation.
5845 void skb_scrub_packet(struct sk_buff *skb, bool xnet)
5847 skb->pkt_type = PACKET_HOST;
5848 skb->skb_iif = 0;
5849 skb->ignore_df = 0;
5850 skb_dst_drop(skb);
5851 skb_ext_reset(skb);
5852 nf_reset_ct(skb);
5853 nf_reset_trace(skb);
5856 skb->offload_fwd_mark = 0;
5857 skb->offload_l3_fwd_mark = 0;
5863 ipvs_reset(skb);
5864 skb->mark = 0;
5865 skb_clear_tstamp(skb);
5869 static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
5874 if (skb_cow(skb, skb_headroom(skb)) < 0) {
5875 kfree_skb(skb);
5879 mac_len = skb->data - skb_mac_header(skb);
5881 memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb),
5885 meta_len = skb_metadata_len(skb);
5887 meta = skb_metadata_end(skb) - meta_len;
5891 skb->mac_header += VLAN_HLEN;
5892 return skb;
5895 struct sk_buff *skb_vlan_untag(struct sk_buff *skb)
5900 if (unlikely(skb_vlan_tag_present(skb))) {
5902 return skb;
5905 skb = skb_share_check(skb, GFP_ATOMIC);
5906 if (unlikely(!skb))
5909 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN + sizeof(unsigned short))))
5912 vhdr = (struct vlan_hdr *)skb->data;
5914 __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci);
5916 skb_pull_rcsum(skb, VLAN_HLEN);
5917 vlan_set_encap_proto(skb, vhdr);
5919 skb = skb_reorder_vlan_header(skb);
5920 if (unlikely(!skb))
5923 skb_reset_network_header(skb);
5924 if (!skb_transport_header_was_set(skb))
5925 skb_reset_transport_header(skb);
5926 skb_reset_mac_len(skb);
5928 return skb;
5931 kfree_skb(skb);
5936 int skb_ensure_writable(struct sk_buff *skb, unsigned int write_len)
5938 if (!pskb_may_pull(skb, write_len))
5941 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
5944 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
5949 * expects a non skb_vlan_tag_present skb with a vlan tag payload
5951 int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci)
5953 int offset = skb->data - skb_mac_header(skb);
5957 "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n",
5962 err = skb_ensure_writable(skb, VLAN_ETH_HLEN);
5966 skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
5968 vlan_remove_tag(skb, vlan_tci);
5970 skb->mac_header += VLAN_HLEN;
5972 if (skb_network_offset(skb) < ETH_HLEN)
5973 skb_set_network_header(skb, ETH_HLEN);
5975 skb_reset_mac_len(skb);
5982 * Expects skb->data at mac header.
5984 int skb_vlan_pop(struct sk_buff *skb)
5990 if (likely(skb_vlan_tag_present(skb))) {
5991 __vlan_hwaccel_clear_tag(skb);
5993 if (unlikely(!eth_type_vlan(skb->protocol)))
5996 err = __skb_vlan_pop(skb, &vlan_tci);
6001 if (likely(!eth_type_vlan(skb->protocol)))
6004 vlan_proto = skb->protocol;
6005 err = __skb_vlan_pop(skb, &vlan_tci);
6009 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
6015 * Expects skb->data at mac header.
6017 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
6019 if (skb_vlan_tag_present(skb)) {
6020 int offset = skb->data - skb_mac_header(skb);
6024 "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n",
6029 err = __vlan_insert_tag(skb, skb->vlan_proto,
6030 skb_vlan_tag_get(skb));
6034 skb->protocol = skb->vlan_proto;
6035 skb->mac_len += VLAN_HLEN;
6037 skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
6039 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
6047 * @skb: Socket buffer to modify
6049 * Drop the Ethernet header of @skb.
6051 * Expects that skb->data points to the mac header and that no VLAN tags are
6056 int skb_eth_pop(struct sk_buff *skb)
6058 if (!pskb_may_pull(skb, ETH_HLEN) || skb_vlan_tagged(skb) ||
6059 skb_network_offset(skb) < ETH_HLEN)
6062 skb_pull_rcsum(skb, ETH_HLEN);
6063 skb_reset_mac_header(skb);
6064 skb_reset_mac_len(skb);
6073 * @skb: Socket buffer to modify
6077 * Prepend @skb with a new Ethernet header.
6079 * Expects that skb->data points to the mac header, which must be empty.
6083 int skb_eth_push(struct sk_buff *skb, const unsigned char *dst,
6089 if (skb_network_offset(skb) || skb_vlan_tag_present(skb))
6092 err = skb_cow_head(skb, sizeof(*eth));
6096 skb_push(skb, sizeof(*eth));
6097 skb_reset_mac_header(skb);
6098 skb_reset_mac_len(skb);
6100 eth = eth_hdr(skb);
6103 eth->h_proto = skb->protocol;
6105 skb_postpush_rcsum(skb, eth, sizeof(*eth));
6111 /* Update the ethertype of hdr and the skb csum value if required. */
6112 static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr,
6115 if (skb->ip_summed == CHECKSUM_COMPLETE) {
6118 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum);
6128 * @skb: buffer
6135 * Expects skb->data at mac header.
6139 int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
6149 if (skb->encapsulation)
6152 err = skb_cow_head(skb, MPLS_HLEN);
6156 if (!skb->inner_protocol) {
6157 skb_set_inner_network_header(skb, skb_network_offset(skb));
6158 skb_set_inner_protocol(skb, skb->protocol);
6161 skb_push(skb, MPLS_HLEN);
6162 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
6164 skb_reset_mac_header(skb);
6165 skb_set_network_header(skb, mac_len);
6166 skb_reset_mac_len(skb);
6168 lse = mpls_hdr(skb);
6170 skb_postpush_rcsum(skb, lse, MPLS_HLEN);
6173 skb_mod_eth_type(skb, eth_hdr(skb), mpls_proto);
6174 skb->protocol = mpls_proto;
6183 * @skb: buffer
6188 * Expects skb->data at mac header.
6192 int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len,
6197 if (unlikely(!eth_p_mpls(skb->protocol)))
6200 err = skb_ensure_writable(skb, mac_len + MPLS_HLEN);
6204 skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN);
6205 memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
6208 __skb_pull(skb, MPLS_HLEN);
6209 skb_reset_mac_header(skb);
6210 skb_set_network_header(skb, mac_len);
6216 hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN);
6217 skb_mod_eth_type(skb, hdr, next_proto);
6219 skb->protocol = next_proto;
6228 * @skb: buffer
6231 * Expects skb->data at mac header.
6235 int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse)
6239 if (unlikely(!eth_p_mpls(skb->protocol)))
6242 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
6246 if (skb->ip_summed == CHECKSUM_COMPLETE) {
6247 __be32 diff[] = { ~mpls_hdr(skb)->label_stack_entry, mpls_lse };
6249 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum);
6252 mpls_hdr(skb)->label_stack_entry = mpls_lse;
6261 * @skb: buffer
6263 * Expects skb->data at mac header.
6267 int skb_mpls_dec_ttl(struct sk_buff *skb)
6272 if (unlikely(!eth_p_mpls(skb->protocol)))
6275 if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN))
6278 lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry);
6286 return skb_mpls_update_lse(skb, cpu_to_be32(lse));
6291 * alloc_skb_with_frags - allocate skb with page frags
6299 * This can be used to allocate a paged skb, given a maximal order for frags.
6308 struct sk_buff *skb;
6317 skb = alloc_skb(header_len, gfp_mask);
6318 if (!skb)
6343 skb_fill_page_desc(skb, nr_frags, page, 0, chunk);
6345 skb->truesize += (PAGE_SIZE << order);
6348 return skb;
6351 kfree_skb(skb);
6356 /* carve out the first off bytes from skb when off < headlen */
6357 static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off,
6361 unsigned int size = skb_end_offset(skb);
6365 if (skb_pfmemalloc(skb))
6374 skb_copy_from_linear_data_offset(skb, off, data, new_hlen);
6375 skb->len -= off;
6378 skb_shinfo(skb),
6380 frags[skb_shinfo(skb)->nr_frags]));
6381 if (skb_cloned(skb)) {
6383 if (skb_orphan_frags(skb, gfp_mask)) {
6387 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
6388 skb_frag_ref(skb, i);
6389 if (skb_has_frag_list(skb))
6390 skb_clone_fraglist(skb);
6391 skb_release_data(skb, SKB_CONSUMED, false);
6396 skb_free_head(skb, false);
6399 skb->head = data;
6400 skb->data = data;
6401 skb->head_frag = 0;
6402 skb_set_end_offset(skb, size);
6403 skb_set_tail_pointer(skb, skb_headlen(skb));
6404 skb_headers_offset_update(skb, 0);
6405 skb->cloned = 0;
6406 skb->hdr_len = 0;
6407 skb->nohdr = 0;
6408 atomic_set(&skb_shinfo(skb)->dataref, 1);
6413 static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp);
6415 /* carve out the first eat bytes from skb's frag_list. May recurse into
6418 static int pskb_carve_frag_list(struct sk_buff *skb,
6469 /* carve off first len bytes from skb. Split line (off) is in the
6470 * non-linear part of skb
6472 static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,
6476 unsigned int size = skb_end_offset(skb);
6478 const int nfrags = skb_shinfo(skb)->nr_frags;
6481 if (skb_pfmemalloc(skb))
6490 skb_shinfo(skb), offsetof(struct skb_shared_info, frags[0]));
6491 if (skb_orphan_frags(skb, gfp_mask)) {
6497 int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]);
6500 shinfo->frags[k] = skb_shinfo(skb)->frags[i];
6514 skb_frag_ref(skb, i);
6520 if (skb_has_frag_list(skb))
6521 skb_clone_fraglist(skb);
6524 if (k == 0 && pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask)) {
6526 if (skb_has_frag_list(skb))
6527 kfree_skb_list(skb_shinfo(skb)->frag_list);
6531 skb_release_data(skb, SKB_CONSUMED, false);
6533 skb->head = data;
6534 skb->head_frag = 0;
6535 skb->data = data;
6536 skb_set_end_offset(skb, size);
6537 skb_reset_tail_pointer(skb);
6538 skb_headers_offset_update(skb, 0);
6539 skb->cloned = 0;
6540 skb->hdr_len = 0;
6541 skb->nohdr = 0;
6542 skb->len -= off;
6543 skb->data_len = skb->len;
6544 atomic_set(&skb_shinfo(skb)->dataref, 1);
6548 /* remove len bytes from the beginning of the skb */
6549 static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp)
6551 int headlen = skb_headlen(skb);
6554 return pskb_carve_inside_header(skb, len, headlen, gfp);
6556 return pskb_carve_inside_nonlinear(skb, len, headlen, gfp);
6559 /* Extract to_copy bytes starting at off from skb, and return this in
6560 * a new skb
6562 struct sk_buff *pskb_extract(struct sk_buff *skb, int off,
6565 struct sk_buff *clone = skb_clone(skb, gfp);
6581 * @skb: buffer
6583 * Can be used to save memory before skb is added to a busy queue.
6584 * If packet has bytes in frags and enough tail room in skb->head,
6588 * We do not reallocate skb->head thus can not fail.
6589 * Caller must re-evaluate skb->truesize if needed.
6591 void skb_condense(struct sk_buff *skb)
6593 if (skb->data_len) {
6594 if (skb->data_len > skb->end - skb->tail ||
6595 skb_cloned(skb))
6599 __pskb_pull_tail(skb, skb->data_len);
6601 /* At this point, skb->truesize might be over estimated,
6602 * because skb had a fragment, and fragments do not tell
6604 * When we pulled its content into skb->head, fragment
6606 * adjust skb->truesize, not knowing the frag truesize.
6608 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
6619 * __skb_ext_alloc - allocate a new skb extensions storage
6624 * skb via __skb_ext_set().
6676 * __skb_ext_set - attach the specified extension storage to this skb
6677 * @skb: buffer
6685 void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id,
6690 skb_ext_put(skb);
6694 skb->extensions = ext;
6695 skb->active_extensions = 1 << id;
6701 * @skb: buffer
6708 * If the skb was cloned, COW applies and the returned memory can be
6713 void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id)
6718 if (skb->active_extensions) {
6719 old = skb->extensions;
6721 new = skb_ext_maybe_cow(old, skb->active_extensions);
6741 skb->slow_gro = 1;
6742 skb->extensions = new;
6743 skb->active_extensions |= 1 << id;
6766 void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id)
6768 struct skb_ext *ext = skb->extensions;
6770 skb->active_extensions &= ~(1 << id);
6771 if (skb->active_extensions == 0) {
6772 skb->extensions = NULL;
6812 * skb_attempt_defer_free - queue skb for remote freeing
6813 * @skb: buffer
6815 * Put @skb in a per-cpu list, using the cpu which
6816 * allocated the skb/pages to reduce false sharing
6819 void skb_attempt_defer_free(struct sk_buff *skb)
6821 int cpu = skb->alloc_cpu;
6829 nodefer: __kfree_skb(skb);
6833 DEBUG_NET_WARN_ON_ONCE(skb_dst(skb));
6834 DEBUG_NET_WARN_ON_ONCE(skb->destructor);
6847 skb->next = sd->defer_list;
6849 WRITE_ONCE(sd->defer_list, skb);
6859 static void skb_splice_csum_page(struct sk_buff *skb, struct page *page,
6868 skb->csum = csum_block_add(skb->csum, csum, skb->len);
6873 * @skb: The buffer to add pages to
6886 ssize_t skb_splice_from_iter(struct sk_buff *skb, struct iov_iter *iter,
6899 space = frag_limit - skb_shinfo(skb)->nr_frags;
6921 ret = skb_append_pagefrags(skb, page, off, part,
6928 if (skb->ip_summed == CHECKSUM_NONE)
6929 skb_splice_csum_page(skb, page, off, part);
6942 skb_len_add(skb, spliced);