Lines Matching refs:skb

22 				 const struct sk_buff *skb)
27 return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) &&
28 p->state == BR_STATE_FORWARDING && br_allowed_egress(vg, skb) &&
29 nbp_switchdev_allowed_egress(p, skb) &&
30 !br_skb_isolated(p, skb);
33 int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
35 skb_push(skb, ETH_HLEN);
36 if (!is_skb_forwardable(skb->dev, skb))
39 br_drop_fake_rtable(skb);
41 if (skb->ip_summed == CHECKSUM_PARTIAL &&
42 eth_type_vlan(skb->protocol)) {
45 if (!vlan_get_protocol_and_depth(skb, skb->protocol, &depth))
48 skb_set_network_header(skb, depth);
51 br_switchdev_frame_set_offload_fwd_mark(skb);
53 dev_queue_xmit(skb);
58 kfree_skb(skb);
63 int br_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
65 skb_clear_tstamp(skb);
67 net, sk, skb, NULL, skb->dev,
74 struct sk_buff *skb, bool local_orig)
81 /* Mark the skb for forwarding offload early so that br_handle_vlan()
84 nbp_switchdev_frame_mark_tx_fwd_offload(to, skb);
87 skb = br_handle_vlan(to->br, to, vg, skb);
88 if (!skb)
91 indev = skb->dev;
92 skb->dev = to->dev;
94 if (skb_warn_if_lro(skb)) {
95 kfree_skb(skb);
99 skb_forward_csum(skb);
103 skb_push(skb, ETH_HLEN);
104 if (!is_skb_forwardable(skb->dev, skb))
105 kfree_skb(skb);
107 br_netpoll_send_skb(to, skb);
111 net = dev_net(skb->dev);
116 net, NULL, skb, indev, skb->dev,
121 struct sk_buff *skb, bool local_orig)
123 struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
125 skb = skb_clone(skb, GFP_ATOMIC);
126 if (!skb) {
131 __br_forward(prev, skb, local_orig);
138 * @skb: packet being forwarded
145 struct sk_buff *skb, bool local_rcv, bool local_orig)
157 BR_INPUT_SKB_CB(skb)->backup_nhid = READ_ONCE(to->backup_nhid);
161 if (should_deliver(to, skb)) {
163 deliver_clone(to, skb, local_orig);
165 __br_forward(to, skb, local_orig);
171 kfree_skb(skb);
177 struct sk_buff *skb, bool local_orig)
179 u8 igmp_type = br_multicast_igmp_type(skb);
182 if (!should_deliver(p, skb))
185 nbp_switchdev_frame_mark_tx_fwd_to_hwdom(p, skb);
190 err = deliver_clone(prev, skb, local_orig);
194 br_multicast_count(p->br, p, skb, igmp_type, BR_MCAST_DIR_TX);
200 void br_flood(struct net_bridge *br, struct sk_buff *skb,
207 br_tc_skb_miss_set(skb, pkt_type != BR_PKT_BROADCAST);
219 if (!(p->flags & BR_MCAST_FLOOD) && skb->dev != br->dev)
223 if (!(p->flags & BR_BCAST_FLOOD) && skb->dev != br->dev)
231 if (BR_INPUT_SKB_CB(skb)->proxyarp_replied &&
236 prev = maybe_deliver(prev, p, skb, local_orig);
245 deliver_clone(prev, skb, local_orig);
247 __br_forward(prev, skb, local_orig);
252 kfree_skb(skb);
256 static void maybe_deliver_addr(struct net_bridge_port *p, struct sk_buff *skb,
259 struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
260 const unsigned char *src = eth_hdr(skb)->h_source;
262 if (!should_deliver(p, skb))
266 if (skb->dev == p->dev && ether_addr_equal(src, addr))
269 skb = skb_copy(skb, GFP_ATOMIC);
270 if (!skb) {
276 memcpy(eth_hdr(skb)->h_dest, addr, ETH_ALEN);
278 __br_forward(p, skb, local_orig);
283 struct sk_buff *skb,
292 rp = br_multicast_get_first_rport_node(brmctx, skb);
301 br_tc_skb_miss_set(skb, true);
308 rport = br_multicast_rport_from_node_skb(rp, skb);
314 maybe_deliver_addr(lport, skb, p->eth_addr,
326 prev = maybe_deliver(prev, port, skb, local_orig);
340 deliver_clone(prev, skb, local_orig);
342 __br_forward(prev, skb, local_orig);
347 kfree_skb(skb);