Lines Matching refs:skb

148 bool ip_call_ra_chain(struct sk_buff *skb)
151 u8 protocol = ip_hdr(skb)->protocol;
153 struct net_device *dev = skb->dev;
165 if (ip_is_fragment(ip_hdr(skb))) {
166 if (ip_defrag(net, skb, IP_DEFRAG_CALL_RA_CHAIN))
170 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
179 raw_rcv(last, skb);
187 void ip_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int protocol)
193 raw = raw_local_deliver(skb, protocol);
198 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
199 kfree_skb_reason(skb,
203 nf_reset_ct(skb);
206 skb);
214 if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
216 icmp_send(skb, ICMP_DEST_UNREACH,
219 kfree_skb_reason(skb, SKB_DROP_REASON_IP_NOPROTO);
222 consume_skb(skb);
227 static int ip_local_deliver_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
229 skb_clear_delivery_time(skb);
230 __skb_pull(skb, skb_network_header_len(skb));
233 ip_protocol_deliver_rcu(net, skb, ip_hdr(skb)->protocol);
242 int ip_local_deliver(struct sk_buff *skb)
247 struct net *net = dev_net(skb->dev);
249 if (ip_is_fragment(ip_hdr(skb))) {
250 if (ip_defrag(net, skb, IP_DEFRAG_LOCAL_DELIVER))
255 net, NULL, skb, skb->dev, NULL,
260 static inline bool ip_rcv_options(struct sk_buff *skb, struct net_device *dev)
272 if (skb_cow(skb, skb_headroom(skb))) {
277 iph = ip_hdr(skb);
278 opt = &(IPCB(skb)->opt);
281 if (ip_options_compile(dev_net(dev), opt, skb)) {
299 if (ip_options_rcv_srr(skb, dev))
308 static bool ip_can_use_hint(const struct sk_buff *skb, const struct iphdr *iph,
311 return hint && !skb_dst(skb) && ip_hdr(hint)->daddr == iph->daddr &&
315 int tcp_v4_early_demux(struct sk_buff *skb);
316 int udp_v4_early_demux(struct sk_buff *skb);
318 struct sk_buff *skb, struct net_device *dev,
321 const struct iphdr *iph = ip_hdr(skb);
327 if (ip_can_use_hint(skb, iph, hint)) {
328 err = ip_route_use_hint(skb, iph->daddr, iph->saddr, iph->tos,
335 !skb_dst(skb) &&
336 !skb->sk &&
341 tcp_v4_early_demux(skb);
343 /* must reload iph, skb->head might have changed */
344 iph = ip_hdr(skb);
349 err = udp_v4_early_demux(skb);
353 /* must reload iph, skb->head might have changed */
354 iph = ip_hdr(skb);
364 if (!skb_valid_dst(skb)) {
365 err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
373 IPCB(skb)->flags |= IPSKB_NOPOLICY;
377 if (unlikely(skb_dst(skb)->tclassid)) {
379 u32 idx = skb_dst(skb)->tclassid;
381 st[idx&0xFF].o_bytes += skb->len;
383 st[(idx>>16)&0xFF].i_bytes += skb->len;
387 if (iph->ihl > 5 && ip_rcv_options(skb, dev))
390 rt = skb_rtable(skb);
392 __IP_UPD_PO_STATS(net, IPSTATS_MIB_INMCAST, skb->len);
394 __IP_UPD_PO_STATS(net, IPSTATS_MIB_INBCAST, skb->len);
395 } else if (skb->pkt_type == PACKET_BROADCAST ||
396 skb->pkt_type == PACKET_MULTICAST) {
424 kfree_skb_reason(skb, drop_reason);
435 static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
437 struct net_device *dev = skb->dev;
441 * skb to its handler for processing
443 skb = l3mdev_ip_rcv(skb);
444 if (!skb)
447 ret = ip_rcv_finish_core(net, sk, skb, dev, NULL);
449 ret = dst_input(skb);
456 static struct sk_buff *ip_rcv_core(struct sk_buff *skb, struct net *net)
465 if (skb->pkt_type == PACKET_OTHERHOST) {
466 dev_core_stats_rx_otherhost_dropped_inc(skb->dev);
471 __IP_UPD_PO_STATS(net, IPSTATS_MIB_IN, skb->len);
473 skb = skb_share_check(skb, GFP_ATOMIC);
474 if (!skb) {
480 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
483 iph = ip_hdr(skb);
504 max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
506 if (!pskb_may_pull(skb, iph->ihl*4))
509 iph = ip_hdr(skb);
514 len = iph_totlen(skb, iph);
515 if (skb->len < len) {
524 * Note this now means skb->len holds ntohs(iph->tot_len).
526 if (pskb_trim_rcsum(skb, len)) {
531 iph = ip_hdr(skb);
532 skb->transport_header = skb->network_header + iph->ihl*4;
535 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
536 IPCB(skb)->iif = skb->skb_iif;
539 if (!skb_sk_is_prefetched(skb))
540 skb_orphan(skb);
542 return skb;
552 kfree_skb_reason(skb, drop_reason);
560 int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
565 skb = ip_rcv_core(skb, net);
566 if (skb == NULL)
570 net, NULL, skb, dev, NULL,
576 struct sk_buff *skb, *next;
578 list_for_each_entry_safe(skb, next, head, list) {
579 skb_list_del_init(skb);
580 dst_input(skb);
585 struct sk_buff *skb, int rt_type)
588 IPCB(skb)->flags & IPSKB_MULTIPATH)
591 return skb;
597 struct sk_buff *skb, *next, *hint = NULL;
602 list_for_each_entry_safe(skb, next, head, list) {
603 struct net_device *dev = skb->dev;
606 skb_list_del_init(skb);
608 * skb to its handler for processing
610 skb = l3mdev_ip_rcv(skb);
611 if (!skb)
613 if (ip_rcv_finish_core(net, sk, skb, dev, hint) == NET_RX_DROP)
616 dst = skb_dst(skb);
618 hint = ip_extract_route_hint(net, skb,
628 list_add_tail(&skb->list, &sublist);
648 struct sk_buff *skb, *next;
652 list_for_each_entry_safe(skb, next, head, list) {
653 struct net_device *dev = skb->dev;
656 skb_list_del_init(skb);
657 skb = ip_rcv_core(skb, net);
658 if (skb == NULL)
670 list_add_tail(&skb->list, &sublist);