Lines Matching refs:napi
129 #include <trace/events/napi.h>
1002 struct napi_struct *napi;
1009 napi = napi_by_id(napi_id);
1011 return napi ? napi->dev : NULL;
4285 struct napi_struct *napi)
4287 list_add_tail(&napi->poll_list, &sd->poll_list);
5778 static void gro_normal_list(struct napi_struct *napi)
5780 if (!napi->rx_count)
5782 netif_receive_skb_list_internal(&napi->rx_list);
5783 INIT_LIST_HEAD(&napi->rx_list);
5784 napi->rx_count = 0;
5790 static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb, int segs)
5792 list_add_tail(&skb->list, &napi->rx_list);
5793 napi->rx_count += segs;
5794 if (napi->rx_count >= gro_normal_batch)
5795 gro_normal_list(napi);
5800 static int napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
5833 gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count);
5837 static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
5840 struct list_head *head = &napi->gro_hash[index].list;
5847 napi_gro_complete(napi, skb);
5848 napi->gro_hash[index].count--;
5851 if (!napi->gro_hash[index].count)
5852 __clear_bit(index, &napi->gro_bitmask);
5855 /* napi->gro_hash[].list contains packets ordered by age.
5859 void napi_gro_flush(struct napi_struct *napi, bool flush_old)
5861 unsigned long bitmask = napi->gro_bitmask;
5867 __napi_gro_flush_chain(napi, base, flush_old);
5872 static struct list_head *gro_list_prepare(struct napi_struct *napi,
5880 head = &napi->gro_hash[hash & (GRO_HASH_BUCKETS - 1)].list;
5963 static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head)
5975 /* Do not adjust napi->gro_hash[].count, caller is adding a new
5979 napi_gro_complete(napi, oldest);
5986 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
6001 gro_head = gro_list_prepare(napi, skb);
6055 napi_gro_complete(napi, pp);
6056 napi->gro_hash[hash].count--;
6065 if (unlikely(napi->gro_hash[hash].count >= MAX_GRO_SKBS)) {
6066 gro_flush_oldest(napi, gro_head);
6068 napi->gro_hash[hash].count++;
6082 if (napi->gro_hash[hash].count) {
6083 if (!test_bit(hash, &napi->gro_bitmask))
6084 __set_bit(hash, &napi->gro_bitmask);
6085 } else if (test_bit(hash, &napi->gro_bitmask)) {
6086 __clear_bit(hash, &napi->gro_bitmask);
6132 static gro_result_t napi_skb_finish(struct napi_struct *napi,
6138 gro_normal_one(napi, skb, 1);
6161 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
6165 skb_mark_napi_id(skb, napi);
6170 ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb));
6177 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
6187 skb->dev = napi->dev;
6199 napi->skb = skb;
6202 struct sk_buff *napi_get_frags(struct napi_struct *napi)
6204 struct sk_buff *skb = napi->skb;
6207 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
6209 napi->skb = skb;
6210 skb_mark_napi_id(skb, napi);
6217 static gro_result_t napi_frags_finish(struct napi_struct *napi,
6227 gro_normal_one(napi, skb, 1);
6231 napi_reuse_skb(napi, skb);
6238 napi_reuse_skb(napi, skb);
6253 static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
6255 struct sk_buff *skb = napi->skb;
6259 napi->skb = NULL;
6268 __func__, napi->dev->name);
6269 napi_reuse_skb(napi, skb);
6290 gro_result_t napi_gro_frags(struct napi_struct *napi)
6293 struct sk_buff *skb = napi_frags_skb(napi);
6300 ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
6376 static int process_backlog(struct napi_struct *napi, int quota)
6378 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
6390 napi->weight = READ_ONCE(dev_rx_weight);
6409 * only current cpu owns and manipulates this napi,
6415 napi->state = 0;
6446 * napi_schedule_prep - check if napi can be scheduled
6447 * @n: napi context
6503 * 1) Don't let napi dequeue from the cpu poll list
6548 * because we will call napi->poll() one more time.
6571 struct napi_struct *napi;
6573 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
6574 if (napi->napi_id == napi_id)
6575 return napi;
6584 static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock)
6591 * Since we are about to call napi->poll() once more, we can safely
6597 clear_bit(NAPI_STATE_MISSED, &napi->state);
6598 clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state);
6605 rc = napi->poll(napi, BUSY_POLL_BUDGET);
6606 /* We can't gro_normal_list() here, because napi->poll() might have
6607 * rearmed the napi (napi_complete_done()) in which case it could
6610 trace_napi_poll(napi, rc, BUSY_POLL_BUDGET);
6613 /* As the whole budget was spent, we still own the napi so can
6616 gro_normal_list(napi);
6617 __napi_schedule(napi);
6627 int (*napi_poll)(struct napi_struct *napi, int budget);
6629 struct napi_struct *napi;
6636 napi = napi_by_id(napi_id);
6637 if (!napi)
6646 unsigned long val = READ_ONCE(napi->state);
6648 /* If multiple threads are competing for this napi,
6649 * we avoid dirtying napi->state as much as we can.
6654 if (cmpxchg(&napi->state, val,
6658 have_poll_lock = netpoll_poll_lock(napi);
6659 napi_poll = napi->poll;
6661 work = napi_poll(napi, BUSY_POLL_BUDGET);
6662 trace_napi_poll(napi, work, BUSY_POLL_BUDGET);
6663 gro_normal_list(napi);
6666 __NET_ADD_STATS(dev_net(napi->dev),
6675 busy_poll_stop(napi, have_poll_lock);
6686 busy_poll_stop(napi, have_poll_lock);
6695 static void napi_hash_add(struct napi_struct *napi)
6697 if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state))
6707 napi->napi_id = napi_gen_id;
6709 hlist_add_head_rcu(&napi->napi_hash_node,
6710 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
6716 * is respected before freeing memory containing @napi
6718 static void napi_hash_del(struct napi_struct *napi)
6722 hlist_del_init_rcu(&napi->napi_hash_node);
6729 struct napi_struct *napi;
6731 napi = container_of(timer, struct napi_struct, timer);
6736 if (!napi_disable_pending(napi) &&
6737 !test_and_set_bit(NAPI_STATE_SCHED, &napi->state))
6738 __napi_schedule_irqoff(napi);
6743 static void init_gro_hash(struct napi_struct *napi)
6748 INIT_LIST_HEAD(&napi->gro_hash[i].list);
6749 napi->gro_hash[i].count = 0;
6751 napi->gro_bitmask = 0;
6754 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
6757 if (WARN_ON(test_and_set_bit(NAPI_STATE_LISTED, &napi->state)))
6760 INIT_LIST_HEAD(&napi->poll_list);
6761 INIT_HLIST_NODE(&napi->napi_hash_node);
6762 hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
6763 napi->timer.function = napi_watchdog;
6764 init_gro_hash(napi);
6765 napi->skb = NULL;
6766 INIT_LIST_HEAD(&napi->rx_list);
6767 napi->rx_count = 0;
6768 napi->poll = poll;
6772 napi->weight = weight;
6773 napi->dev = dev;
6775 napi->poll_owner = -1;
6777 set_bit(NAPI_STATE_SCHED, &napi->state);
6778 set_bit(NAPI_STATE_NPSVC, &napi->state);
6779 list_add_rcu(&napi->dev_list, &dev->napi_list);
6780 napi_hash_add(napi);
6800 static void flush_gro_hash(struct napi_struct *napi)
6807 list_for_each_entry_safe(skb, n, &napi->gro_hash[i].list, list)
6809 napi->gro_hash[i].count = 0;
6814 void __netif_napi_del(struct napi_struct *napi)
6816 if (!test_and_clear_bit(NAPI_STATE_LISTED, &napi->state))
6819 napi_hash_del(napi);
6820 list_del_rcu(&napi->dev_list);
6821 napi_free_frags(napi);
6823 flush_gro_hash(napi);
6824 napi->gro_bitmask = 0;
6881 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
10960 struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
10964 list_del_init(&napi->poll_list);
10965 if (napi->poll == process_backlog)
10966 napi->state = 0;
10968 ____napi_schedule(sd, napi);