Lines Matching defs:len
105 pr_emerg("%s: text:%px len:%d put:%d head:%px data:%px tail:%#lx end:%#lx dev:%s\n",
106 msg, addr, skb->len, sz, skb->head, skb->data,
420 * @len: length to allocate
430 struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
438 len += NET_SKB_PAD;
443 if (len <= SKB_WITH_OVERHEAD(1024) ||
444 len > SKB_WITH_OVERHEAD(PAGE_SIZE) ||
446 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
452 len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
453 len = SKB_DATA_ALIGN(len);
460 data = page_frag_alloc(nc, len, gfp_mask);
465 data = page_frag_alloc(nc, len, gfp_mask);
473 skb = __build_skb(data, len);
495 * @len: length to allocate
505 struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
512 len += NET_SKB_PAD + NET_IP_ALIGN;
517 if (len <= SKB_WITH_OVERHEAD(1024) ||
518 len > SKB_WITH_OVERHEAD(PAGE_SIZE) ||
520 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
527 len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
528 len = SKB_DATA_ALIGN(len);
533 data = page_frag_alloc(&nc->page, len, gfp_mask);
537 skb = __build_skb(data, len);
560 skb->len += size;
572 skb->len += size;
739 int i, len, seg_len;
742 len = skb->len;
744 len = min_t(int, skb->len, MAX_HEADER + 128);
752 printk("%sskb len=%u headroom=%u headlen=%u tailroom=%u\n"
757 level, skb->len, headroom, skb_headlen(skb), tailroom,
781 seg_len = min_t(int, skb_headlen(skb), len);
785 len -= seg_len;
791 for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) {
800 seg_len = min_t(int, p_len, len);
806 len -= seg_len;
807 if (!len)
997 C(len);
1033 n->len = first->len;
1034 n->data_len = first->len;
1125 uarg->len = 1;
1148 * so uarg->len and sk_zckey access is serialized
1156 if (uarg->len == USHRT_MAX - 1 || bytelen > byte_limit) {
1164 if ((u32)(uarg->id + uarg->len) == next) {
1167 uarg->len++;
1184 static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len)
1192 sum_len = old_hi - old_lo + 1ULL + len;
1200 serr->ee.ee_data += len;
1212 u16 len;
1216 /* if !len, there was only 1 call, and it was aborted
1219 if (!uarg->len || sock_flag(sk, SOCK_DEAD))
1222 len = uarg->len;
1224 hi = uarg->id + len - 1;
1239 !skb_zerocopy_notify_extend(tail, lo, len)) {
1270 uarg->len--;
1278 int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len)
1280 return __zerocopy_sg_from_iter(skb->sk, skb, &msg->msg_iter, len);
1285 struct msghdr *msg, int len,
1290 int err, orig_len = skb->len;
1298 err = __zerocopy_sg_from_iter(sk, skb, &msg->msg_iter, len);
1299 if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) {
1311 return skb->len - orig_len;
1536 skb_put(n, skb->len);
1538 BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len));
1577 skb_copy_from_linear_data(skb, n->data, n->len);
1581 n->len = skb->len;
1767 n = __alloc_skb(newheadroom + skb->len + newtailroom,
1776 skb_put(n, skb->len);
1787 skb->len + head_copy_len));
1818 memset(skb->data+skb->len, 0, pad);
1836 memset(skb->data + skb->len, 0, pad);
1850 * @len: amount of data to add
1859 void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
1862 skb->data_len += len;
1863 skb->len += len;
1865 return skb_put(tail, len);
1872 * @len: amount of data to add
1878 void *skb_put(struct sk_buff *skb, unsigned int len)
1882 skb->tail += len;
1883 skb->len += len;
1885 skb_over_panic(skb, len, __builtin_return_address(0));
1893 * @len: amount of data to add
1899 void *skb_push(struct sk_buff *skb, unsigned int len)
1901 skb->data -= len;
1902 skb->len += len;
1904 skb_under_panic(skb, len, __builtin_return_address(0));
1912 * @len: amount of data to remove
1919 void *skb_pull(struct sk_buff *skb, unsigned int len)
1921 return skb_pull_inline(skb, len);
1928 * @len: new length
1934 void skb_trim(struct sk_buff *skb, unsigned int len)
1936 if (skb->len > len)
1937 __skb_trim(skb, len);
1941 /* Trims skb to length len. It can change skb pointers.
1944 int ___pskb_trim(struct sk_buff *skb, unsigned int len)
1958 if (offset >= len)
1964 if (end < len) {
1969 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset);
1984 int end = offset + frag->len;
1999 if (end < len) {
2004 if (end > len &&
2005 unlikely((err = pskb_trim(frag, len - offset))))
2014 if (len > skb_headlen(skb)) {
2015 skb->data_len -= skb->len - len;
2016 skb->len = len;
2018 skb->len = len;
2020 skb_set_tail_pointer(skb, len);
2031 int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len)
2034 int delta = skb->len - len;
2037 skb_checksum(skb, len, delta, 0),
2038 len);
2040 int hdlen = (len > skb_headlen(skb)) ? skb_headlen(skb) : len;
2046 return __pskb_trim(skb, len);
2121 if (list->len <= eat) {
2123 eat -= list->len;
2206 * @len: number of bytes to copy
2216 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
2222 if (offset > (int)skb->len - len)
2227 if (copy > len)
2228 copy = len;
2230 if ((len -= copy) == 0)
2240 WARN_ON(start > offset + len);
2248 if (copy > len)
2249 copy = len;
2259 if ((len -= copy) == 0)
2270 WARN_ON(start > offset + len);
2272 end = start + frag_iter->len;
2274 if (copy > len)
2275 copy = len;
2278 if ((len -= copy) == 0)
2286 if (!len)
2303 static struct page *linear_to_page(struct page *page, unsigned int *len,
2312 *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset);
2315 page_address(page) + *offset, *len);
2317 pfrag->offset += *len;
2329 spd->partial[spd->nr_pages - 1].len == offset);
2337 unsigned int *len, unsigned int offset,
2345 page = linear_to_page(page, len, &offset, sk);
2350 spd->partial[spd->nr_pages - 1].len += *len;
2355 spd->partial[spd->nr_pages].len = *len;
2364 unsigned int *len,
2369 if (!*len)
2384 unsigned int flen = min(*len, plen);
2391 *len -= flen;
2392 } while (*len && plen);
2402 unsigned int *offset, unsigned int *len,
2416 offset, len, spd,
2429 offset, len, spd, false, sk, pipe))
2434 if (*offset >= iter->len) {
2435 *offset -= iter->len;
2442 if (__skb_splice_bits(iter, pipe, offset, len, spd, sk))
2479 int len)
2481 unsigned int orig_len = len;
2489 while (offset < skb_headlen(skb) && len) {
2493 slen = min_t(int, len, skb_headlen(skb) - offset);
2504 len -= ret;
2508 if (!len)
2524 for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
2527 slen = min_t(size_t, len, skb_frag_size(frag) - offset);
2536 len -= ret;
2544 if (len) {
2559 return orig_len - len;
2562 return orig_len == len ? ret : orig_len - len;
2571 * @len: number of bytes to copy
2578 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
2584 if (offset > (int)skb->len - len)
2588 if (copy > len)
2589 copy = len;
2591 if ((len -= copy) == 0)
2601 WARN_ON(start > offset + len);
2609 if (copy > len)
2610 copy = len;
2620 if ((len -= copy) == 0)
2631 WARN_ON(start > offset + len);
2633 end = start + frag_iter->len;
2635 if (copy > len)
2636 copy = len;
2640 if ((len -= copy) == 0)
2647 if (!len)
2656 __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
2666 if (copy > len)
2667 copy = len;
2670 if ((len -= copy) == 0)
2680 WARN_ON(start > offset + len);
2689 if (copy > len)
2690 copy = len;
2706 if (!(len -= copy))
2716 WARN_ON(start > offset + len);
2718 end = start + frag_iter->len;
2721 if (copy > len)
2722 copy = len;
2727 if ((len -= copy) == 0)
2734 BUG_ON(len);
2741 int len, __wsum csum)
2748 return __skb_checksum(skb, offset, len, csum, &ops);
2755 u8 *to, int len)
2765 if (copy > len)
2766 copy = len;
2769 if ((len -= copy) == 0)
2779 WARN_ON(start > offset + len);
2789 if (copy > len)
2790 copy = len;
2804 if (!(len -= copy))
2816 WARN_ON(start > offset + len);
2818 end = start + frag_iter->len;
2820 if (copy > len)
2821 copy = len;
2826 if ((len -= copy) == 0)
2834 BUG_ON(len);
2839 __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
2843 sum = csum_fold(skb_checksum(skb, 0, len, skb->csum));
2870 csum = skb_checksum(skb, 0, skb->len, 0);
2898 static __wsum warn_crc32c_csum_update(const void *buff, int len, __wsum sum)
2907 int offset, int len)
2941 hlen = from->len;
2945 hlen = from->len;
2955 * @len: number of bytes to copy from source buffer
2958 * Copies up to `len` bytes from `from` to `to` by creating references
2970 skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
2981 if (len <= skb_tailroom(to))
2982 return skb_copy_bits(from, 0, skb_put(to, len), len);
2988 len -= hlen;
2990 plen = min_t(int, skb_headlen(from), len);
2997 len -= plen;
3001 to->truesize += len + plen;
3002 to->len += len + plen;
3003 to->data_len += len + plen;
3014 if (!len)
3018 len);
3020 len -= size;
3045 if (csstart != skb->len)
3047 skb->len - csstart);
3224 const u32 len, const int pos)
3228 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
3229 pos - len);
3237 skb1->len += skb1->data_len;
3239 skb->len = len;
3240 skb_set_tail_pointer(skb, len);
3245 const u32 len, int pos)
3251 skb1->len = skb1->data_len = skb->len - len;
3252 skb->len = len;
3253 skb->data_len = len - pos;
3258 if (pos + size > len) {
3261 if (pos < len) {
3271 skb_frag_off_add(&skb_shinfo(skb1)->frags[0], len - pos);
3272 skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos);
3273 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos);
3285 * skb_split - Split fragmented skb to two parts at length len.
3288 * @len: new length for skb
3290 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
3297 if (len < pos) /* Split line is inside header. */
3298 skb_split_inside_header(skb, skb1, len, pos);
3300 skb_split_no_header(skb, skb1, len, pos);
3348 BUG_ON(shiftlen > skb->len);
3391 if ((shiftlen == skb->len) &&
3453 skb->len -= shiftlen;
3456 tgt->len += shiftlen;
3653 * @len: length of data pulled
3661 void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
3665 BUG_ON(len > skb->len);
3666 __skb_pull(skb, len);
3667 skb_postpull_rcsum(skb, data, len);
3735 delta_len += nskb->len;
3754 skb->len = skb->len - delta_len;
3777 if (unlikely(p->len + skb->len >= 65536))
3789 p->data_len += skb->len;
3791 p->len += skb->len;
3819 unsigned int len = head_skb->len;
3874 frag_len = list_skb->len;
3876 if (frag_len != iter->len && iter->next)
3881 len -= iter->len;
3884 if (len != frag_len)
3891 * Cap len to not accidentally hit GSO_BY_FRAGS.
3893 partial_segs = min(len, GSO_BY_FRAGS - 1U) / mss;
3918 len = list_skb->len;
3920 len = head_skb->len - offset;
3921 if (len > mss)
3922 len = mss;
3928 if (hsize > len || !sg)
3929 hsize = len;
3932 (skb_headlen(list_skb) == len || sg)) {
3933 BUG_ON(skb_headlen(list_skb) > len);
3945 while (pos < offset + len) {
3949 if (pos + size > offset + len)
3959 if (unlikely(pskb_trim(nskb, len))) {
4000 if (nskb->len == len + doffset)
4010 len),
4011 len);
4015 if (skb_copy_bits(head_skb, offset, skb_put(nskb, len), len))
4032 while (pos < offset + len) {
4076 if (pos + size <= offset + len) {
4081 skb_frag_size_sub(nskb_frag, pos + size - (offset + len));
4089 nskb->data_len = len - hsize;
4090 nskb->len += nskb->data_len;
4103 nskb->len - doffset, 0);
4107 } while ((offset += len) < head_skb->len);
4134 if (tail->len - doffset <= gso_size)
4137 skb_shinfo(tail)->gso_segs = DIV_ROUND_UP(tail->len - doffset, gso_size);
4162 unsigned int len = skb_gro_len(skb);
4166 if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush))
4199 skb->len -= skb->data_len;
4240 skb->len -= eat;
4256 p->data_len += len;
4258 p->len += len;
4260 lp->data_len += len;
4262 lp->len += len;
4338 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len,
4350 if (copy > len)
4351 copy = len;
4354 if ((len -= copy) == 0)
4362 WARN_ON(start > offset + len);
4370 if (copy > len)
4371 copy = len;
4375 if (!(len -= copy))
4385 WARN_ON(start > offset + len);
4387 end = start + frag_iter->len;
4392 if (copy > len)
4393 copy = len;
4399 if ((len -= copy) == 0)
4405 BUG_ON(len);
4414 * @len: Length of buffer space to be mapped
4421 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
4423 int nsg = __skb_to_sgvec(skb, sg, offset, len, 0);
4454 int offset, int len)
4456 return __skb_to_sgvec(skb, sg, offset, len, 0);
4864 static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len,
4867 if (skb_headlen(skb) >= len)
4873 if (max > skb->len)
4874 max = skb->len;
4879 if (skb_headlen(skb) < len)
4953 skb->len - off,
4974 unsigned int len;
4990 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
4991 while (off <= len && !done) {
5062 skb->len - off, nexthdr, 0);
5113 unsigned int len = skb_transport_offset(skb) + transport_len;
5116 if (skb->len < len)
5118 else if (skb->len == len)
5125 ret = pskb_trim_rcsum(skb_chk, len);
5211 int i, delta, len = from->len;
5218 if (len <= skb_tailroom(to)) {
5219 if (len)
5220 BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
5259 WARN_ON_ONCE(delta < len);
5276 to->len += len;
5277 to->data_len += len;
5451 * @len: length to validate against
5456 bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len)
5458 return skb_gso_size_check(skb, skb_gso_mac_seglen(skb), len);
5983 skb->len -= off;
6043 if (list->len <= eat) {
6045 eat -= list->len;
6081 /* carve off first len bytes from skb. Split line (off) is in the
6162 skb->len -= off;
6163 skb->data_len = skb->len;
6168 /* remove len bytes from the beginning of the skb */
6169 static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp)
6173 if (len < headlen)
6174 return pskb_carve_inside_header(skb, len, headlen, gfp);
6176 return pskb_carve_inside_nonlinear(skb, len, headlen, gfp);
6278 for (i = 0; i < sp->len; i++)
6363 for (i = 0; i < sp->len; i++)
6382 sp->len = 0;