Lines Matching refs:skb

16 			/* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
32 /* This indicates where we are processing relative to skb->data. */
35 /* This is non-zero if the packet cannot be merged with the new skb. */
68 /* Free the skb? */
91 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
94 static inline int gro_recursion_inc_test(struct sk_buff *skb)
96 return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
102 struct sk_buff *skb)
104 if (unlikely(gro_recursion_inc_test(skb))) {
105 NAPI_GRO_CB(skb)->flush |= 1;
109 return cb(head, skb);
117 struct sk_buff *skb)
119 if (unlikely(gro_recursion_inc_test(skb))) {
120 NAPI_GRO_CB(skb)->flush |= 1;
124 return cb(sk, head, skb);
127 static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
129 return NAPI_GRO_CB(skb)->data_offset;
132 static inline unsigned int skb_gro_len(const struct sk_buff *skb)
134 return skb->len - NAPI_GRO_CB(skb)->data_offset;
137 static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
139 NAPI_GRO_CB(skb)->data_offset += len;
142 static inline void *skb_gro_header_fast(struct sk_buff *skb,
145 return NAPI_GRO_CB(skb)->frag0 + offset;
148 static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
150 return NAPI_GRO_CB(skb)->frag0_len < hlen;
153 static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
155 NAPI_GRO_CB(skb)->frag0 = NULL;
156 NAPI_GRO_CB(skb)->frag0_len = 0;
159 static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
162 if (!pskb_may_pull(skb, hlen))
165 skb_gro_frag0_invalidate(skb);
166 return skb->data + offset;
169 static inline void *skb_gro_header(struct sk_buff *skb,
174 ptr = skb_gro_header_fast(skb, offset);
175 if (skb_gro_header_hard(skb, hlen))
176 ptr = skb_gro_header_slow(skb, hlen, offset);
180 static inline void *skb_gro_network_header(struct sk_buff *skb)
182 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
183 skb_network_offset(skb);
186 static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto)
188 const struct iphdr *iph = skb_gro_network_header(skb);
191 skb_gro_len(skb), proto, 0);
194 static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
197 if (NAPI_GRO_CB(skb)->csum_valid)
198 NAPI_GRO_CB(skb)->csum = wsum_negate(csum_partial(start, len,
199 wsum_negate(NAPI_GRO_CB(skb)->csum)));
207 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
209 static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
211 return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
214 static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
218 return ((skb->ip_summed != CHECKSUM_PARTIAL ||
219 skb_checksum_start_offset(skb) <
220 skb_gro_offset(skb)) &&
221 !skb_at_gro_remcsum_start(skb) &&
222 NAPI_GRO_CB(skb)->csum_cnt == 0 &&
226 static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
229 if (NAPI_GRO_CB(skb)->csum_valid &&
230 !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
233 NAPI_GRO_CB(skb)->csum = psum;
235 return __skb_gro_checksum_complete(skb);
238 static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
240 if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
242 NAPI_GRO_CB(skb)->csum_cnt--;
244 /* Update skb for CHECKSUM_UNNECESSARY and csum_level when we
248 __skb_incr_checksum_unnecessary(skb);
252 #define __skb_gro_checksum_validate(skb, proto, zero_okay, check, \
256 if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \
257 __ret = __skb_gro_checksum_validate_complete(skb, \
258 compute_pseudo(skb, proto)); \
260 skb_gro_incr_csum_unnecessary(skb); \
264 #define skb_gro_checksum_validate(skb, proto, compute_pseudo) \
265 __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
267 #define skb_gro_checksum_validate_zero_check(skb, proto, check, \
269 __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
271 #define skb_gro_checksum_simple_validate(skb) \
272 __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
274 static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
276 return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
277 !NAPI_GRO_CB(skb)->csum_valid);
280 static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
283 NAPI_GRO_CB(skb)->csum = ~pseudo;
284 NAPI_GRO_CB(skb)->csum_valid = 1;
287 #define skb_gro_checksum_try_convert(skb, proto, compute_pseudo) \
289 if (__skb_gro_checksum_convert_check(skb)) \
290 __skb_gro_checksum_convert(skb, \
291 compute_pseudo(skb, proto)); \
305 static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
314 BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
317 NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
321 ptr = skb_gro_header(skb, off + plen, off);
325 delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
328 /* Adjust skb->csum since we changed the packet */
329 NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
337 static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
346 ptr = skb_gro_header(skb, plen, grc->offset);
354 static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
357 NAPI_GRO_CB(skb)->flush |= flush;
359 static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
365 NAPI_GRO_CB(skb)->flush |= flush;
366 skb_gro_remcsum_cleanup(skb, grc);
367 skb->remcsum_offload = 0;
371 static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
373 NAPI_GRO_CB(skb)->flush |= flush;
375 static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
380 NAPI_GRO_CB(skb)->flush |= flush;
381 skb_gro_remcsum_cleanup(skb, grc);
382 skb->remcsum_offload = 0;
401 #define indirect_call_gro_receive_inet(cb, f2, f1, head, skb) \
403 unlikely(gro_recursion_inc_test(skb)) ? \
404 NAPI_GRO_CB(skb)->flush |= 1, NULL : \
405 INDIRECT_CALL_INET(cb, f2, f1, head, skb); \
408 struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
410 int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
412 static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
417 off = skb_gro_offset(skb);
419 uh = skb_gro_header(skb, hlen, off);
424 static inline __wsum ip6_gro_compute_pseudo(struct sk_buff *skb, int proto)
426 const struct ipv6hdr *iph = skb_gro_network_header(skb);
429 skb_gro_len(skb), proto, 0));
432 int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
447 static inline void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb, int segs)
449 list_add_tail(&skb->list, &napi->rx_list);
458 * The caller must verify skb_valid_dst(skb) is false and skb->dev is initialized.
461 static inline void inet_get_iif_sdif(const struct sk_buff *skb, int *iif, int *sdif)
463 *iif = inet_iif(skb) ?: skb->dev->ifindex;
467 if (netif_is_l3_slave(skb->dev)) {
468 struct net_device *master = netdev_master_upper_dev_get_rcu(skb->dev);
479 * The caller must verify skb_valid_dst(skb) is false and skb->dev is initialized.
482 static inline void inet6_get_iif_sdif(const struct sk_buff *skb, int *iif, int *sdif)
484 /* using skb->dev->ifindex because skb_dst(skb) is not initialized */
485 *iif = skb->dev->ifindex;
489 if (netif_is_l3_slave(skb->dev)) {
490 struct net_device *master = netdev_master_upper_dev_get_rcu(skb->dev);