Lines Matching refs:skb
151 bool ip_call_ra_chain(struct sk_buff *skb)
154 u8 protocol = ip_hdr(skb)->protocol;
156 struct net_device *dev = skb->dev;
168 if (ip_is_fragment(ip_hdr(skb))) {
169 if (ip_defrag(net, skb, IP_DEFRAG_CALL_RA_CHAIN))
173 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
182 raw_rcv(last, skb);
190 void ip_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int protocol)
196 raw = raw_local_deliver(skb, protocol);
201 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
202 kfree_skb(skb);
205 nf_reset_ct(skb);
208 skb);
216 if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
218 icmp_send(skb, ICMP_DEST_UNREACH,
221 kfree_skb(skb);
224 consume_skb(skb);
229 static int ip_local_deliver_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
231 __skb_pull(skb, skb_network_header_len(skb));
234 ip_protocol_deliver_rcu(net, skb, ip_hdr(skb)->protocol);
243 int ip_local_deliver(struct sk_buff *skb)
248 struct net *net = dev_net(skb->dev);
253 if (ip_is_fragment(ip_hdr(skb))) {
254 if (ip_defrag(net, skb, IP_DEFRAG_LOCAL_DELIVER))
259 if (netfilter_bypass_enable(net, skb, ip_local_deliver_finish, &ret))
264 net, NULL, skb, skb->dev, NULL,
268 static inline bool ip_rcv_options(struct sk_buff *skb, struct net_device *dev)
280 if (skb_cow(skb, skb_headroom(skb))) {
285 iph = ip_hdr(skb);
286 opt = &(IPCB(skb)->opt);
289 if (ip_options_compile(dev_net(dev), opt, skb)) {
307 if (ip_options_rcv_srr(skb, dev))
316 static bool ip_can_use_hint(const struct sk_buff *skb, const struct iphdr *iph,
319 return hint && !skb_dst(skb) && ip_hdr(hint)->daddr == iph->daddr &&
323 int tcp_v4_early_demux(struct sk_buff *skb);
324 int udp_v4_early_demux(struct sk_buff *skb);
326 struct sk_buff *skb, struct net_device *dev,
329 const struct iphdr *iph = ip_hdr(skb);
333 if (ip_can_use_hint(skb, iph, hint)) {
334 err = ip_route_use_hint(skb, iph->daddr, iph->saddr, iph->tos,
341 !skb_dst(skb) &&
342 !skb->sk &&
347 tcp_v4_early_demux(skb);
349 /* must reload iph, skb->head might have changed */
350 iph = ip_hdr(skb);
355 err = udp_v4_early_demux(skb);
359 /* must reload iph, skb->head might have changed */
360 iph = ip_hdr(skb);
370 if (!skb_valid_dst(skb)) {
371 err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
379 IPCB(skb)->flags |= IPSKB_NOPOLICY;
383 if (unlikely(skb_dst(skb)->tclassid)) {
385 u32 idx = skb_dst(skb)->tclassid;
387 st[idx&0xFF].o_bytes += skb->len;
389 st[(idx>>16)&0xFF].i_bytes += skb->len;
393 if (iph->ihl > 5 && ip_rcv_options(skb, dev))
396 rt = skb_rtable(skb);
398 __IP_UPD_PO_STATS(net, IPSTATS_MIB_INMCAST, skb->len);
400 __IP_UPD_PO_STATS(net, IPSTATS_MIB_INBCAST, skb->len);
401 } else if (skb->pkt_type == PACKET_BROADCAST ||
402 skb->pkt_type == PACKET_MULTICAST) {
428 kfree_skb(skb);
437 static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
439 struct net_device *dev = skb->dev;
443 * skb to its handler for processing
445 skb = l3mdev_ip_rcv(skb);
446 if (!skb)
449 ret = ip_rcv_finish_core(net, sk, skb, dev, NULL);
451 ret = dst_input(skb);
458 static struct sk_buff *ip_rcv_core(struct sk_buff *skb, struct net *net)
466 if (skb->pkt_type == PACKET_OTHERHOST)
469 __IP_UPD_PO_STATS(net, IPSTATS_MIB_IN, skb->len);
471 skb = skb_share_check(skb, GFP_ATOMIC);
472 if (!skb) {
477 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
480 iph = ip_hdr(skb);
501 max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
503 if (!pskb_may_pull(skb, iph->ihl*4))
506 iph = ip_hdr(skb);
512 if (skb->len < len) {
520 * Note this now means skb->len holds ntohs(iph->tot_len).
522 if (pskb_trim_rcsum(skb, len)) {
527 iph = ip_hdr(skb);
528 skb->transport_header = skb->network_header + iph->ihl*4;
531 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
532 IPCB(skb)->iif = skb->skb_iif;
535 if (!skb_sk_is_prefetched(skb))
536 skb_orphan(skb);
538 return skb;
545 kfree_skb(skb);
553 int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
558 skb = ip_rcv_core(skb, net);
559 if (skb == NULL)
563 net, NULL, skb, dev, NULL,
569 struct sk_buff *skb, *next;
571 list_for_each_entry_safe(skb, next, head, list) {
572 skb_list_del_init(skb);
573 dst_input(skb);
578 struct sk_buff *skb, int rt_type)
581 IPCB(skb)->flags & IPSKB_MULTIPATH)
584 return skb;
590 struct sk_buff *skb, *next, *hint = NULL;
595 list_for_each_entry_safe(skb, next, head, list) {
596 struct net_device *dev = skb->dev;
599 skb_list_del_init(skb);
601 * skb to its handler for processing
603 skb = l3mdev_ip_rcv(skb);
604 if (!skb)
606 if (ip_rcv_finish_core(net, sk, skb, dev, hint) == NET_RX_DROP)
609 dst = skb_dst(skb);
611 hint = ip_extract_route_hint(net, skb,
621 list_add_tail(&skb->list, &sublist);
641 struct sk_buff *skb, *next;
645 list_for_each_entry_safe(skb, next, head, list) {
646 struct net_device *dev = skb->dev;
649 skb_list_del_init(skb);
650 skb = ip_rcv_core(skb, net);
651 if (skb == NULL)
663 list_add_tail(&skb->list, &sublist);