Lines Matching defs:skb
134 #include <trace/events/skb.h>
164 static int netif_rx_internal(struct sk_buff *skb);
664 * @skb: The packet.
670 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
677 info = skb_tunnel_info_unclone(skb);
683 return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
2158 static inline void net_timestamp_set(struct sk_buff *skb)
2160 skb->tstamp = 0;
2161 skb->mono_delivery_time = 0;
2163 skb->tstamp = ktime_get_real();
2172 bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
2174 return __is_skb_forwardable(dev, skb, true);
2178 static int __dev_forward_skb2(struct net_device *dev, struct sk_buff *skb,
2181 int ret = ____dev_forward_skb(dev, skb, check_mtu);
2184 skb->protocol = eth_type_trans(skb, dev);
2185 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
2191 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
2193 return __dev_forward_skb2(dev, skb, true);
2198 * dev_forward_skb - loopback an skb to another netif
2201 * @skb: buffer to forward
2207 * dev_forward_skb can be used for injecting an skb from the
2212 * we have to clear all information in the skb that could
2215 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
2217 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
2221 int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb)
2223 return __dev_forward_skb2(dev, skb, false) ?: netif_rx_internal(skb);
2226 static inline int deliver_skb(struct sk_buff *skb,
2230 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
2232 refcount_inc(&skb->users);
2233 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2236 static inline void deliver_ptype_list_skb(struct sk_buff *skb,
2248 deliver_skb(skb, pt_prev, orig_dev);
2254 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
2256 if (!ptype->af_packet_priv || !skb->sk)
2260 return ptype->id_match(ptype, skb->sk);
2261 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
2283 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
2299 if (skb_loop_sk(ptype, skb))
2303 deliver_skb(skb2, pt_prev, skb->dev);
2308 /* need to clone skb, done only once */
2309 skb2 = skb_clone(skb, GFP_ATOMIC);
2315 /* skb->nh should be correctly
2341 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
3045 * @size: max skb->len of a TSO frame
3140 static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
3142 return (struct dev_kfree_skb_cb *)skb->cb;
3170 void dev_kfree_skb_irq_reason(struct sk_buff *skb, enum skb_drop_reason reason)
3174 if (unlikely(!skb))
3177 if (likely(refcount_read(&skb->users) == 1)) {
3179 refcount_set(&skb->users, 0);
3180 } else if (likely(!refcount_dec_and_test(&skb->users))) {
3183 get_kfree_skb_cb(skb)->reason = reason;
3185 skb->next = __this_cpu_read(softnet_data.completion_queue);
3186 __this_cpu_write(softnet_data.completion_queue, skb);
3192 void dev_kfree_skb_any_reason(struct sk_buff *skb, enum skb_drop_reason reason)
3195 dev_kfree_skb_irq_reason(skb, reason);
3197 kfree_skb_reason(skb, reason);
3239 struct sk_buff *skb)
3246 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
3258 if (skb_rx_queue_recorded(skb)) {
3260 hash = skb_get_rx_queue(skb);
3268 return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
3271 void skb_warn_bad_offload(const struct sk_buff *skb)
3274 struct net_device *dev = skb->dev;
3286 skb_dump(KERN_WARNING, skb, false);
3289 skb->sk ? &skb->sk->sk_route_caps : &null_features);
3296 int skb_checksum_help(struct sk_buff *skb)
3301 if (skb->ip_summed == CHECKSUM_COMPLETE)
3304 if (unlikely(skb_is_gso(skb))) {
3305 skb_warn_bad_offload(skb);
3312 if (skb_has_shared_frag(skb)) {
3313 ret = __skb_linearize(skb);
3318 offset = skb_checksum_start_offset(skb);
3320 if (unlikely(offset >= skb_headlen(skb))) {
3321 DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false);
3323 offset, skb_headlen(skb));
3326 csum = skb_checksum(skb, offset, skb->len - offset, 0);
3328 offset += skb->csum_offset;
3329 if (unlikely(offset + sizeof(__sum16) > skb_headlen(skb))) {
3330 DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false);
3332 offset + sizeof(__sum16), skb_headlen(skb));
3335 ret = skb_ensure_writable(skb, offset + sizeof(__sum16));
3339 *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
3341 skb->ip_summed = CHECKSUM_NONE;
3347 int skb_crc32c_csum_help(struct sk_buff *skb)
3352 if (skb->ip_summed != CHECKSUM_PARTIAL)
3355 if (unlikely(skb_is_gso(skb)))
3361 if (unlikely(skb_has_shared_frag(skb))) {
3362 ret = __skb_linearize(skb);
3366 start = skb_checksum_start_offset(skb);
3368 if (WARN_ON_ONCE(offset >= skb_headlen(skb))) {
3373 ret = skb_ensure_writable(skb, offset + sizeof(__le32));
3377 crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start,
3378 skb->len - start, ~(__u32)0,
3380 *(__le32 *)(skb->data + offset) = crc32c_csum;
3381 skb_reset_csum_not_inet(skb);
3386 __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
3388 __be16 type = skb->protocol;
3394 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
3397 eth = (struct ethhdr *)skb->data;
3401 return vlan_get_protocol_and_depth(skb, type, depth);
3407 static void do_netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
3410 skb_dump(KERN_ERR, skb, true);
3414 void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
3416 DO_ONCE_LITE(do_netdev_rx_csum_fault, dev, skb);
3422 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
3428 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3429 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3443 static netdev_features_t net_mpls_features(struct sk_buff *skb,
3448 features &= skb->dev->mpls_features;
3453 static netdev_features_t net_mpls_features(struct sk_buff *skb,
3461 static netdev_features_t harmonize_features(struct sk_buff *skb,
3466 type = skb_network_protocol(skb, NULL);
3467 features = net_mpls_features(skb, features, type);
3469 if (skb->ip_summed != CHECKSUM_NONE &&
3473 if (illegal_highdma(skb->dev, skb))
3479 netdev_features_t passthru_features_check(struct sk_buff *skb,
3487 static netdev_features_t dflt_features_check(struct sk_buff *skb,
3491 return vlan_features_check(skb, features);
3494 static netdev_features_t gso_features_check(const struct sk_buff *skb,
3498 u16 gso_segs = skb_shinfo(skb)->gso_segs;
3503 if (unlikely(skb->len >= READ_ONCE(dev->gso_max_size)))
3506 if (!skb_shinfo(skb)->gso_type) {
3507 skb_warn_bad_offload(skb);
3517 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL))
3523 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
3524 struct iphdr *iph = skb->encapsulation ?
3525 inner_ip_hdr(skb) : ip_hdr(skb);
3534 netdev_features_t netif_skb_features(struct sk_buff *skb)
3536 struct net_device *dev = skb->dev;
3539 if (skb_is_gso(skb))
3540 features = gso_features_check(skb, dev, features);
3546 if (skb->encapsulation)
3549 if (skb_vlan_tagged(skb))
3556 features &= dev->netdev_ops->ndo_features_check(skb, dev,
3559 features &= dflt_features_check(skb, dev, features);
3561 return harmonize_features(skb, features);
3565 static int xmit_one(struct sk_buff *skb, struct net_device *dev,
3572 dev_queue_xmit_nit(skb, dev);
3574 len = skb->len;
3575 trace_net_dev_start_xmit(skb, dev);
3576 rc = netdev_start_xmit(skb, dev, txq, more);
3577 trace_net_dev_xmit(skb, rc, dev, len);
3585 struct sk_buff *skb = first;
3588 while (skb) {
3589 struct sk_buff *next = skb->next;
3591 skb_mark_not_on_list(skb);
3592 rc = xmit_one(skb, dev, txq, next != NULL);
3594 skb->next = next;
3598 skb = next;
3599 if (netif_tx_queue_stopped(txq) && skb) {
3607 return skb;
3610 static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
3613 if (skb_vlan_tag_present(skb) &&
3614 !vlan_hw_offload_capable(features, skb->vlan_proto))
3615 skb = __vlan_hwaccel_push_inside(skb);
3616 return skb;
3619 int skb_csum_hwoffload_help(struct sk_buff *skb,
3622 if (unlikely(skb_csum_is_sctp(skb)))
3624 skb_crc32c_csum_help(skb);
3630 switch (skb->csum_offset) {
3637 return skb_checksum_help(skb);
3641 static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again)
3645 features = netif_skb_features(skb);
3646 skb = validate_xmit_vlan(skb, features);
3647 if (unlikely(!skb))
3650 skb = sk_validate_xmit_skb(skb, dev);
3651 if (unlikely(!skb))
3654 if (netif_needs_gso(skb, features)) {
3657 segs = skb_gso_segment(skb, features);
3661 consume_skb(skb);
3662 skb = segs;
3665 if (skb_needs_linearize(skb, features) &&
3666 __skb_linearize(skb))
3673 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3674 if (skb->encapsulation)
3675 skb_set_inner_transport_header(skb,
3676 skb_checksum_start_offset(skb));
3678 skb_set_transport_header(skb,
3679 skb_checksum_start_offset(skb));
3680 if (skb_csum_hwoffload_help(skb, features))
3685 skb = validate_xmit_xfrm(skb, features, again);
3687 return skb;
3690 kfree_skb(skb);
3696 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again)
3700 for (; skb != NULL; skb = next) {
3701 next = skb->next;
3702 skb_mark_not_on_list(skb);
3704 /* in case skb wont be segmented, point to itself */
3705 skb->prev = skb;
3707 skb = validate_xmit_skb(skb, dev, again);
3708 if (!skb)
3712 head = skb;
3714 tail->next = skb;
3715 /* If skb was segmented, skb->prev points to
3716 * the last segment. If not, it still contains skb.
3718 tail = skb->prev;
3724 static void qdisc_pkt_len_init(struct sk_buff *skb)
3726 const struct skb_shared_info *shinfo = skb_shinfo(skb);
3728 qdisc_skb_cb(skb)->pkt_len = skb->len;
3733 if (shinfo->gso_size && skb_transport_header_was_set(skb)) {
3738 hdr_len = skb_transport_offset(skb);
3745 th = skb_header_pointer(skb, hdr_len,
3752 if (skb_header_pointer(skb, hdr_len,
3758 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
3761 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
3765 static int dev_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *q,
3771 rc = q->enqueue(skb, q, to_free) & NET_XMIT_MASK;
3773 trace_qdisc_enqueue(q, txq, skb);
3777 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
3786 qdisc_calculate_pkt_len(skb, q);
3795 rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
3802 qdisc_bstats_cpu_update(q, skb);
3803 if (sch_direct_xmit(skb, q, dev, txq, NULL, true) &&
3811 rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
3837 __qdisc_drop(skb, &to_free);
3844 * xmit the skb directly.
3847 qdisc_bstats_update(q, skb);
3849 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
3860 rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
3879 static void skb_update_prio(struct sk_buff *skb)
3885 if (skb->priority)
3887 map = rcu_dereference_bh(skb->dev->priomap);
3890 sk = skb_to_full_sk(skb);
3897 skb->priority = map->priomap[prioidx];
3900 #define skb_update_prio(skb)
3904 * dev_loopback_xmit - loop back @skb
3907 * @skb: buffer to transmit
3909 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
3911 skb_reset_mac_header(skb);
3912 __skb_pull(skb, skb_network_offset(skb));
3913 skb->pkt_type = PACKET_LOOPBACK;
3914 if (skb->ip_summed == CHECKSUM_NONE)
3915 skb->ip_summed = CHECKSUM_UNNECESSARY;
3916 DEBUG_NET_WARN_ON_ONCE(!skb_dst(skb));
3917 skb_dst_force(skb);
3918 netif_rx(skb);
3925 netdev_tx_queue_mapping(struct net_device *dev, struct sk_buff *skb)
3927 int qm = skb_get_queue_mapping(skb);
3945 static int tc_run(struct tcx_entry *entry, struct sk_buff *skb)
3955 tc_skb_cb(skb)->mru = 0;
3956 tc_skb_cb(skb)->post_ct = false;
3958 mini_qdisc_bstats_cpu_update(miniq, skb);
3959 ret = tcf_classify(skb, miniq->block, miniq->filter_list, &res, false);
3967 skb->tc_index = TC_H_MIN(res.classid);
3987 tcx_run(const struct bpf_mprog_entry *entry, struct sk_buff *skb,
3995 __skb_push(skb, skb->mac_len);
3997 bpf_compute_data_pointers(skb);
3998 ret = bpf_prog_run(prog, skb);
4003 __skb_pull(skb, skb->mac_len);
4004 return tcx_action_code(skb, ret);
4008 sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
4011 struct bpf_mprog_entry *entry = rcu_dereference_bh(skb->dev->tcx_ingress);
4015 return skb;
4017 *ret = deliver_skb(skb, *pt_prev, orig_dev);
4021 qdisc_skb_cb(skb)->pkt_len = skb->len;
4022 tcx_set_ingress(skb, true);
4025 sch_ret = tcx_run(entry, skb, true);
4029 sch_ret = tc_run(tcx_entry(entry), skb);
4037 __skb_push(skb, skb->mac_len);
4038 if (skb_do_redirect(skb) == -EAGAIN) {
4039 __skb_pull(skb, skb->mac_len);
4046 kfree_skb_reason(skb, SKB_DROP_REASON_TC_INGRESS);
4053 consume_skb(skb);
4060 return skb;
4064 sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
4070 return skb;
4072 /* qdisc_skb_cb(skb)->pkt_len & tcx_set_ingress() was
4076 sch_ret = tcx_run(entry, skb, false);
4080 sch_ret = tc_run(tcx_entry(entry), skb);
4084 /* No need to push/pop skb's mac_header here on egress! */
4085 skb_do_redirect(skb);
4089 kfree_skb_reason(skb, SKB_DROP_REASON_TC_EGRESS);
4096 consume_skb(skb);
4103 return skb;
4107 sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
4110 return skb;
4114 sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
4116 return skb;
4121 static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb,
4124 int tc = netdev_get_prio_tc_map(dev, skb->priority);
4140 skb_get_hash(skb), map->len)];
4149 struct sk_buff *skb)
4153 struct sock *sk = skb->sk;
4168 queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
4176 unsigned int tci = skb->sender_cpu - 1;
4178 queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
4190 u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
4197 u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
4204 u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
4207 struct sock *sk = skb->sk;
4212 if (queue_index < 0 || skb->ooo_okay ||
4214 int new_index = get_xps_queue(dev, sb_dev, skb);
4217 new_index = skb_tx_hash(dev, sb_dev, skb);
4232 struct sk_buff *skb,
4238 u32 sender_cpu = skb->sender_cpu - 1;
4241 skb->sender_cpu = raw_smp_processor_id() + 1;
4248 queue_index = ops->ndo_select_queue(dev, skb, sb_dev);
4250 queue_index = netdev_pick_tx(dev, skb, sb_dev);
4255 skb_set_queue_mapping(skb, queue_index);
4261 * @skb: buffer to transmit
4271 * Regardless of the return value, the skb is consumed, so it is currently
4280 int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
4282 struct net_device *dev = skb->dev;
4288 skb_reset_mac_header(skb);
4289 skb_assert_len(skb);
4291 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
4292 __skb_tstamp_tx(skb, NULL, NULL, skb->sk, SCM_TSTAMP_SCHED);
4299 skb_update_prio(skb);
4301 qdisc_pkt_len_init(skb);
4302 tcx_set_ingress(skb, false);
4306 skb = nf_hook_egress(skb, &rc, dev);
4307 if (!skb)
4313 nf_skip_egress(skb, true);
4314 skb = sch_handle_egress(skb, &rc, dev);
4315 if (!skb)
4317 nf_skip_egress(skb, false);
4320 txq = netdev_tx_queue_mapping(dev, skb);
4323 /* If device/qdisc don't need skb->dst, release it right now while
4327 skb_dst_drop(skb);
4329 skb_dst_force(skb);
4332 txq = netdev_core_pick_tx(dev, skb, sb_dev);
4336 trace_net_dev_queue(skb);
4338 rc = __dev_xmit_skb(skb, q, dev, txq);
4364 skb = validate_xmit_skb(skb, dev, &again);
4365 if (!skb)
4372 skb = dev_hard_start_xmit(skb, dev, txq, &rc);
4396 kfree_skb_list(skb);
4404 int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
4406 struct net_device *dev = skb->dev;
4407 struct sk_buff *orig_skb = skb;
4416 skb = validate_xmit_skb_list(skb, dev, &again);
4417 if (skb != orig_skb)
4420 skb_set_queue_mapping(skb, queue_id);
4421 txq = skb_get_tx_queue(dev, skb);
4428 ret = netdev_start_xmit(skb, dev, txq, false);
4436 kfree_skb_list(skb);
4511 set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
4524 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
4528 if (rxq_index == skb_get_rx_queue(skb))
4535 flow_id = skb_get_hash(skb) & flow_table->mask;
4536 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
4557 * CPU from the RPS map of the receiving queue for a given skb.
4560 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
4571 if (skb_rx_queue_recorded(skb)) {
4572 u16 index = skb_get_rx_queue(skb);
4591 skb_reset_network_header(skb);
4592 hash = skb_get_hash(skb);
4633 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
4751 static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
4766 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
4788 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
4791 static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
4803 if (!netif_running(skb->dev))
4806 if (qlen <= READ_ONCE(netdev_max_backlog) && !skb_flow_limit(skb, qlen)) {
4809 __skb_queue_tail(&sd->input_pkt_queue, skb);
4828 dev_core_stats_rx_dropped_inc(skb->dev);
4829 kfree_skb_reason(skb, reason);
4833 static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb)
4835 struct net_device *dev = skb->dev;
4840 if (skb_rx_queue_recorded(skb)) {
4841 u16 index = skb_get_rx_queue(skb);
4856 u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
4871 mac_len = skb->data - skb_mac_header(skb);
4872 hard_start = skb->data - skb_headroom(skb);
4875 frame_sz = (void *)skb_end_pointer(skb) - hard_start;
4878 rxqueue = netif_get_rxqueue(skb);
4880 xdp_prepare_buff(xdp, hard_start, skb_headroom(skb) - mac_len,
4881 skb_headlen(skb) + mac_len, true);
4886 orig_host = ether_addr_equal_64bits(eth->h_dest, skb->dev->dev_addr);
4896 __skb_pull(skb, off);
4898 __skb_push(skb, -off);
4900 skb->mac_header += off;
4901 skb_reset_network_header(skb);
4907 skb_set_tail_pointer(skb, xdp->data_end - xdp->data);
4908 skb->len += off; /* positive on grow, negative on shrink */
4915 skb->dev->dev_addr)) ||
4917 __skb_push(skb, ETH_HLEN);
4918 skb->pkt_type = PACKET_HOST;
4919 skb->protocol = eth_type_trans(skb, skb->dev);
4922 /* Redirect/Tx gives L2 packet, code that will reuse skb must __skb_pull
4926 * Caller is responsible for managing lifetime of skb (i.e. calling
4932 __skb_push(skb, mac_len);
4937 skb_metadata_set(skb, metalen);
4944 static u32 netif_receive_generic_xdp(struct sk_buff *skb,
4953 if (skb_is_redirected(skb))
4960 if (skb_cloned(skb) || skb_is_nonlinear(skb) ||
4961 skb_headroom(skb) < XDP_PACKET_HEADROOM) {
4962 int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
4963 int troom = skb->tail + skb->data_len - skb->end;
4968 if (pskb_expand_head(skb,
4972 if (skb_linearize(skb))
4976 act = bpf_prog_run_generic_xdp(skb, xdp, xdp_prog);
4983 bpf_warn_invalid_xdp_action(skb->dev, xdp_prog, act);
4986 trace_xdp_exception(skb->dev, xdp_prog, act);
4990 kfree_skb(skb);
5003 void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
5005 struct net_device *dev = skb->dev;
5010 txq = netdev_core_pick_tx(dev, skb, NULL);
5014 rc = netdev_start_xmit(skb, dev, txq, 0);
5022 kfree_skb(skb);
5028 int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
5035 act = netif_receive_generic_xdp(skb, &xdp, xdp_prog);
5039 err = xdp_do_generic_redirect(skb->dev, skb,
5045 generic_xdp_tx(skb, xdp_prog);
5053 kfree_skb_reason(skb, SKB_DROP_REASON_XDP);
5058 static int netif_rx_internal(struct sk_buff *skb)
5062 net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
5064 trace_netif_rx(skb);
5073 cpu = get_rps_cpu(skb->dev, skb, &rflow);
5077 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5085 ret = enqueue_to_backlog(skb, smp_processor_id(), &qtail);
5092 * @skb: buffer to post
5098 int __netif_rx(struct sk_buff *skb)
5104 trace_netif_rx_entry(skb);
5105 ret = netif_rx_internal(skb);
5113 * @skb: buffer to post
5130 int netif_rx(struct sk_buff *skb)
5137 trace_netif_rx_entry(skb);
5138 ret = netif_rx_internal(skb);
5159 struct sk_buff *skb = clist;
5163 WARN_ON(refcount_read(&skb->users));
5164 if (likely(get_kfree_skb_cb(skb)->reason == SKB_CONSUMED))
5165 trace_consume_skb(skb, net_tx_action);
5167 trace_kfree_skb(skb, net_tx_action,
5168 get_kfree_skb_cb(skb)->reason);
5170 if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
5171 __kfree_skb(skb);
5173 __napi_kfree_skb(skb,
5174 get_kfree_skb_cb(skb)->reason);
5310 static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
5312 switch (skb->protocol) {
5324 static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
5327 if (nf_hook_ingress_active(skb)) {
5331 *ret = deliver_skb(skb, *pt_prev, orig_dev);
5336 ingress_retval = nf_hook_ingress(skb);
5348 struct sk_buff *skb = *pskb;
5354 net_timestamp_check(!READ_ONCE(netdev_tstamp_prequeue), skb);
5356 trace_netif_receive_skb(skb);
5358 orig_dev = skb->dev;
5360 skb_reset_network_header(skb);
5361 if (!skb_transport_header_was_set(skb))
5362 skb_reset_transport_header(skb);
5363 skb_reset_mac_len(skb);
5368 skb->skb_iif = skb->dev->ifindex;
5376 ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
5385 if (eth_type_vlan(skb->protocol)) {
5386 skb = skb_vlan_untag(skb);
5387 if (unlikely(!skb))
5391 if (skb_skip_tc_classify(skb))
5399 ret = deliver_skb(skb, pt_prev, orig_dev);
5403 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
5405 ret = deliver_skb(skb, pt_prev, orig_dev);
5414 nf_skip_egress(skb, true);
5415 skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev,
5419 if (!skb)
5422 nf_skip_egress(skb, false);
5423 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
5427 skb_reset_redirect(skb);
5429 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
5432 if (skb_vlan_tag_present(skb)) {
5434 ret = deliver_skb(skb, pt_prev, orig_dev);
5437 if (vlan_do_receive(&skb))
5439 else if (unlikely(!skb))
5443 rx_handler = rcu_dereference(skb->dev->rx_handler);
5446 ret = deliver_skb(skb, pt_prev, orig_dev);
5449 switch (rx_handler(&skb)) {
5465 if (unlikely(skb_vlan_tag_present(skb)) && !netdev_uses_dsa(skb->dev)) {
5467 if (skb_vlan_tag_get_id(skb)) {
5471 skb->pkt_type = PACKET_OTHERHOST;
5472 } else if (eth_type_vlan(skb->protocol)) {
5477 __vlan_hwaccel_clear_tag(skb);
5478 skb = skb_vlan_untag(skb);
5479 if (unlikely(!skb))
5481 if (vlan_do_receive(&skb))
5486 else if (unlikely(!skb))
5496 * and set skb->priority like in vlan_do_receive()
5499 __vlan_hwaccel_clear_tag(skb);
5502 type = skb->protocol;
5506 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5511 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5514 if (unlikely(skb->dev != orig_dev)) {
5515 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5516 &skb->dev->ptype_specific);
5520 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
5526 dev_core_stats_rx_dropped_inc(skb->dev);
5528 dev_core_stats_rx_nohandler_inc(skb->dev);
5529 kfree_skb_reason(skb, SKB_DROP_REASON_UNHANDLED_PROTO);
5538 * then skb should also be non-NULL.
5541 * skb dereferencing near it.
5543 *pskb = skb;
5547 static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc)
5549 struct net_device *orig_dev = skb->dev;
5553 ret = __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev);
5555 ret = INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb,
5556 skb->dev, pt_prev, orig_dev);
5562 * @skb: buffer to process
5575 int netif_receive_skb_core(struct sk_buff *skb)
5580 ret = __netif_receive_skb_one_core(skb, false);
5591 struct sk_buff *skb, *next;
5601 list_for_each_entry_safe(skb, next, head, list) {
5602 skb_list_del_init(skb);
5603 pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
5623 struct sk_buff *skb, *next;
5626 list_for_each_entry_safe(skb, next, head, list) {
5627 struct net_device *orig_dev = skb->dev;
5630 skb_list_del_init(skb);
5631 __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev);
5642 list_add_tail(&skb->list, &sublist);
5649 static int __netif_receive_skb(struct sk_buff *skb)
5653 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
5666 ret = __netif_receive_skb_one_core(skb, true);
5669 ret = __netif_receive_skb_one_core(skb, false);
5677 struct sk_buff *skb, *next;
5680 list_for_each_entry_safe(skb, next, head, list) {
5681 if ((sk_memalloc_socks() && skb_pfmemalloc(skb)) != pfmemalloc) {
5685 list_cut_before(&sublist, head, &skb->list);
5733 static int netif_receive_skb_internal(struct sk_buff *skb)
5737 net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
5739 if (skb_defer_rx_timestamp(skb))
5746 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
5749 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5755 ret = __netif_receive_skb(skb);
5762 struct sk_buff *skb, *next;
5766 list_for_each_entry_safe(skb, next, head, list) {
5767 net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
5768 skb_list_del_init(skb);
5769 if (!skb_defer_rx_timestamp(skb))
5770 list_add_tail(&skb->list, &sublist);
5777 list_for_each_entry_safe(skb, next, head, list) {
5779 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
5783 skb_list_del_init(skb);
5784 enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5795 * @skb: buffer to process
5808 int netif_receive_skb(struct sk_buff *skb)
5812 trace_netif_receive_skb_entry(skb);
5814 ret = netif_receive_skb_internal(skb);
5833 struct sk_buff *skb;
5838 list_for_each_entry(skb, head, list)
5839 trace_netif_receive_skb_list_entry(skb);
5851 struct sk_buff *skb, *tmp;
5858 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
5859 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
5860 __skb_unlink(skb, &sd->input_pkt_queue);
5861 dev_kfree_skb_irq(skb);
5867 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
5868 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
5869 __skb_unlink(skb, &sd->process_queue);
5870 kfree_skb(skb);
5993 struct sk_buff *skb;
5995 while ((skb = __skb_dequeue(&sd->process_queue))) {
5997 __netif_receive_skb(skb);
6440 napi->skb = NULL;
6517 struct sk_buff *skb, *n;
6519 list_for_each_entry_safe(skb, n, &napi->gro_hash[i].list, list)
6520 kfree_skb(skb);
6666 struct sk_buff *skb, *next;
6673 skb = sd->defer_list;
6678 while (skb != NULL) {
6679 next = skb->next;
6680 napi_consume_skb(skb, 1);
6681 skb = next;
8225 * @skb: The packet
8234 struct sk_buff *skb,
8241 return ops->ndo_get_xmit_slave(dev, skb, all_slaves);
9271 /* just one XDP mode bit should be set, zero defaults to drv/skb mode */
9276 /* avoid ambiguity if offload + drv/skb mode progs are both loaded */
11015 struct sk_buff *skb = NULL;
11032 skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0,
11050 if (skb)
11051 rtmsg_ifinfo_send(skb, dev, GFP_KERNEL, portid, nlh);
11266 struct sk_buff *skb;
11317 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
11318 netif_rx(skb);
11321 while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
11322 netif_rx(skb);