Lines Matching defs:skb
115 #define NET_XMIT_DROP 0x01 /* skb dropped */
137 * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
142 * Positive cases with an skb consumed by a driver:
312 int (*create) (struct sk_buff *skb, struct net_device *dev,
315 int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
321 __be16 (*parse_protocol)(const struct sk_buff *skb);
374 struct sk_buff *skb;
422 * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it
425 * case skb->dev was changed by rx_handler.
427 * @RX_HANDLER_PASS: Do nothing, pass the skb as if no rx_handler was called.
430 * special processing of the skb, prior to delivery to protocol handlers.
440 * do with the skb.
442 * If the rx_handler consumed the skb in some way, it should return
444 * the skb to be delivered in some other way.
446 * If the rx_handler changed skb->dev, to divert the skb to another
450 * If the rx_handler decides the skb should be ignored, it should return
451 * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
452 * are registered on exact device (ptype->dev == skb->dev).
454 * If the rx_handler didn't change skb->dev, but wants the skb to be normally
880 struct sk_buff *skb,
1034 bool (*xdo_dev_offload_ok) (struct sk_buff *skb,
1080 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
1089 * netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
1099 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
1198 * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
1250 * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb,
1264 * struct sk_buff *skb,
1294 * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
1298 * entries to skb and update idx with the number of entries.
1306 * int (*ndo_mdb_dump)(struct net_device *dev, struct sk_buff *skb,
1313 * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
1355 * int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb);
1356 * This function is used to get egress tunnel information for given skb.
1360 * This function is used to specify the headroom that the skb must
1361 * consider when allocation skb during packet reception. Setting
1362 * appropriate rx headroom value allows avoiding skb head copy on
1415 netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
1417 netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
1421 struct sk_buff *skb,
1492 int vf, struct sk_buff *skb);
1532 const struct sk_buff *skb,
1542 struct sk_buff *skb,
1572 int (*ndo_fdb_dump)(struct sk_buff *skb,
1577 int (*ndo_fdb_get)(struct sk_buff *skb,
1591 struct sk_buff *skb,
1597 int (*ndo_bridge_getlink)(struct sk_buff *skb,
1623 struct sk_buff *skb);
1666 * release skb->dst
1696 * skb_headlen(skb) == 0 (data starts from frag0)
1875 * LL_MAX_HEADER instead to allocate the skb
2506 const struct sk_buff *skb)
2508 return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2538 u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
2541 struct sk_buff *skb,
2717 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
2720 struct sk_buff *skb);
2721 int (*gro_complete)(struct sk_buff *skb, int nhoff);
3079 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
3094 u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
3096 u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
3099 int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev);
3100 int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
3102 static inline int dev_queue_xmit(struct sk_buff *skb)
3104 return __dev_queue_xmit(skb, NULL);
3107 static inline int dev_queue_xmit_accel(struct sk_buff *skb,
3110 return __dev_queue_xmit(skb, sb_dev);
3113 static inline int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
3117 ret = __dev_direct_xmit(skb, queue_id);
3119 kfree_skb(skb);
3137 struct sk_buff *skb,
3150 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
3158 return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
3161 static inline int dev_parse_header(const struct sk_buff *skb,
3164 const struct net_device *dev = skb->dev;
3168 return dev->header_ops->parse(skb, haddr);
3171 static inline __be16 dev_parse_header_protocol(const struct sk_buff *skb)
3173 const struct net_device *dev = skb->dev;
3177 return dev->header_ops->parse_protocol(skb);
3500 * skb of a batch.
3692 * @skb: sub queue buffer pointer
3697 struct sk_buff *skb)
3699 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
3849 void dev_kfree_skb_irq_reason(struct sk_buff *skb, enum skb_drop_reason reason);
3850 void dev_kfree_skb_any_reason(struct sk_buff *skb, enum skb_drop_reason reason);
3859 * dev_kfree_skb_irq(skb) when caller drops a packet from irq context,
3860 * replacing kfree_skb(skb)
3862 * dev_consume_skb_irq(skb) when caller consumes a packet from irq context.
3863 * Typically used in place of consume_skb(skb) in TX completion path
3865 * dev_kfree_skb_any(skb) when caller doesn't know its current irq context,
3866 * replacing kfree_skb(skb)
3868 * dev_consume_skb_any(skb) when caller doesn't know its current irq context,
3869 * and consumed a packet. Used in place of consume_skb(skb)
3871 static inline void dev_kfree_skb_irq(struct sk_buff *skb)
3873 dev_kfree_skb_irq_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED);
3876 static inline void dev_consume_skb_irq(struct sk_buff *skb)
3878 dev_kfree_skb_irq_reason(skb, SKB_CONSUMED);
3881 static inline void dev_kfree_skb_any(struct sk_buff *skb)
3883 dev_kfree_skb_any_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED);
3886 static inline void dev_consume_skb_any(struct sk_buff *skb)
3888 dev_kfree_skb_any_reason(skb, SKB_CONSUMED);
3891 u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
3893 void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog);
3894 int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb);
3895 int netif_rx(struct sk_buff *skb);
3896 int __netif_rx(struct sk_buff *skb);
3898 int netif_receive_skb(struct sk_buff *skb);
3899 int netif_receive_skb_core(struct sk_buff *skb);
3902 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
3912 kfree_skb(napi->skb);
3913 napi->skb = NULL;
3965 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
3966 struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
3973 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
3974 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
3975 int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb);
3977 const struct sk_buff *skb);
3980 const struct sk_buff *skb,
3993 if (skb->len <= len)
3999 if (skb_is_gso(skb))
4033 struct sk_buff *skb,
4036 if (skb_orphan_frags(skb, GFP_ATOMIC) ||
4037 unlikely(!__is_skb_forwardable(dev, skb, check_mtu))) {
4039 kfree_skb(skb);
4043 skb_scrub_packet(skb, !net_eq(dev_net(dev), dev_net(skb->dev)));
4044 skb->priority = 0;
4049 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
4830 int skb_checksum_help(struct sk_buff *skb);
4831 int skb_crc32c_csum_help(struct sk_buff *skb);
4832 int skb_csum_hwoffload_help(struct sk_buff *skb,
4857 __be16 skb_network_protocol(struct sk_buff *skb, int *depth);
4883 void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb);
4886 struct sk_buff *skb)
4890 /* rx skb timestamps */
4907 struct sk_buff *skb, struct net_device *dev,
4911 return ops->ndo_start_xmit(skb, dev);
4919 static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
4925 rc = __netdev_start_xmit(ops, skb, dev, more);
4979 netdev_features_t passthru_features_check(struct sk_buff *skb,
4982 netdev_features_t netif_skb_features(struct sk_buff *skb);
4983 void skb_warn_bad_offload(const struct sk_buff *skb);
5013 static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
5015 return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
5016 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
5019 static inline bool netif_needs_gso(struct sk_buff *skb,
5022 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
5023 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
5024 (skb->ip_summed != CHECKSUM_UNNECESSARY)));
5151 /* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */