Lines Matching refs:skb
49 struct sk_buff *skb)
52 !skb_dst(skb) && !skb->sk) {
53 switch (ipv6_hdr(skb)->nexthdr) {
56 tcp_v6_early_demux(skb);
60 udp_v6_early_demux(skb);
65 if (!skb_valid_dst(skb))
66 ip6_route_input(skb);
69 int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
72 * skb to its handler for processing
74 skb = l3mdev_ip6_rcv(skb);
75 if (!skb)
77 ip6_rcv_finish_core(net, sk, skb);
79 return dst_input(skb);
84 struct sk_buff *skb, *next;
86 list_for_each_entry_safe(skb, next, head, list) {
87 skb_list_del_init(skb);
88 dst_input(skb);
92 static bool ip6_can_use_hint(const struct sk_buff *skb,
95 return hint && !skb_dst(skb) &&
96 ipv6_addr_equal(&ipv6_hdr(hint)->daddr, &ipv6_hdr(skb)->daddr);
100 struct sk_buff *skb)
103 IP6CB(skb)->flags & IP6SKB_MULTIPATH)
106 return skb;
112 struct sk_buff *skb, *next, *hint = NULL;
117 list_for_each_entry_safe(skb, next, head, list) {
120 skb_list_del_init(skb);
122 * skb to its handler for processing
124 skb = l3mdev_ip6_rcv(skb);
125 if (!skb)
128 if (ip6_can_use_hint(skb, hint))
129 skb_dst_copy(skb, hint);
131 ip6_rcv_finish_core(net, sk, skb);
132 dst = skb_dst(skb);
134 hint = ip6_extract_route_hint(net, skb);
143 list_add_tail(&skb->list, &sublist);
149 static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev,
157 if (skb->pkt_type == PACKET_OTHERHOST) {
158 dev_core_stats_rx_otherhost_dropped_inc(skb->dev);
159 kfree_skb_reason(skb, SKB_DROP_REASON_OTHERHOST);
165 idev = __in6_dev_get(skb->dev);
167 __IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_IN, skb->len);
170 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL ||
178 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
182 * be queued, we cannot refer to skb->dev anymore.
186 * via the loopback interface (lo) here; skb->dev = loopback_dev.
191 IP6CB(skb)->iif = skb_valid_dst(skb) ? ip6_dst_idev(skb_dst(skb))->dev->ifindex : dev->ifindex;
193 if (unlikely(!pskb_may_pull(skb, sizeof(*hdr))))
196 hdr = ipv6_hdr(skb);
206 max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
226 if (!(skb->pkt_type == PACKET_LOOPBACK ||
237 (skb->pkt_type == PACKET_BROADCAST ||
238 skb->pkt_type == PACKET_MULTICAST) &&
261 skb->transport_header = skb->network_header + sizeof(*hdr);
262 IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
268 if (pkt_len + sizeof(struct ipv6hdr) > skb->len) {
274 if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr)))
276 hdr = ipv6_hdr(skb);
280 if (ipv6_parse_hopopts(skb) < 0) {
290 if (!skb_sk_is_prefetched(skb))
291 skb_orphan(skb);
293 return skb;
299 kfree_skb_reason(skb, reason);
303 int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
305 struct net *net = dev_net(skb->dev);
307 skb = ip6_rcv_core(skb, dev, net);
308 if (skb == NULL)
311 net, NULL, skb, dev, NULL,
329 struct sk_buff *skb, *next;
333 list_for_each_entry_safe(skb, next, head, list) {
334 struct net_device *dev = skb->dev;
337 skb_list_del_init(skb);
338 skb = ip6_rcv_core(skb, dev, net);
339 if (skb == NULL)
351 list_add_tail(&skb->list, &sublist);
363 void ip6_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int nexthdr,
377 idev = ip6_dst_idev(skb_dst(skb));
378 nhoff = IP6CB(skb)->nhoff;
380 if (!pskb_pull(skb, skb_transport_offset(skb)))
382 nexthdr = skb_network_header(skb)[nhoff];
386 raw = raw6_local_deliver(skb, nexthdr);
402 int sdif = inet6_sdif(skb);
409 skb_postpull_rcsum(skb, skb_network_header(skb),
410 skb_network_header_len(skb));
411 hdr = ipv6_hdr(skb);
413 /* skb->dev passed may be master dev for vrfs. */
419 dev = skb->dev;
425 !ipv6_is_mld(skb, nexthdr, skb_network_header_len(skb))) {
431 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
435 nf_reset_ct(skb);
439 skb);
457 if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
460 icmpv6_send(skb, ICMPV6_PARAMPROB,
466 kfree_skb_reason(skb, reason);
469 consume_skb(skb);
476 kfree_skb_reason(skb, reason);
479 static int ip6_input_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
481 skb_clear_delivery_time(skb);
483 ip6_protocol_deliver_rcu(net, skb, 0, false);
490 int ip6_input(struct sk_buff *skb)
493 dev_net(skb->dev), NULL, skb, skb->dev, NULL,
498 int ip6_mc_input(struct sk_buff *skb)
500 int sdif = inet6_sdif(skb);
505 __IP6_UPD_PO_STATS(dev_net(skb_dst(skb)->dev),
506 __in6_dev_get_safely(skb->dev), IPSTATS_MIB_INMCAST,
507 skb->len);
509 /* skb->dev passed may be master dev for vrfs. */
512 dev = dev_get_by_index_rcu(dev_net(skb->dev), sdif);
515 kfree_skb(skb);
519 dev = skb->dev;
522 hdr = ipv6_hdr(skb);
531 if (atomic_read(&dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding) &&
534 likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) {
540 struct inet6_skb_parm *opt = IP6CB(skb);
559 offset = ipv6_skip_exthdr(skb, sizeof(*hdr),
564 if (ipv6_is_mld(skb, nexthdr, offset))
573 skb2 = skb_clone(skb, GFP_ATOMIC);
575 skb2 = skb;
576 skb = NULL;
586 ip6_input(skb);
589 kfree_skb(skb);