Lines Matching defs:head

300 	struct hlist_head *head = dev_name_hash(net, name);
303 hlist_for_each_entry(name_node, head, hlist)
312 struct hlist_head *head = dev_name_hash(net, name);
315 hlist_for_each_entry_rcu(name_node, head, hlist)
339 /* The node that holds dev->name acts as a head of per-device list. */
576 struct list_head *head = ptype_head(pt);
579 list_add_rcu(&pt->list, head);
599 struct list_head *head = ptype_head(pt);
604 list_for_each_entry(pt1, head, list) {
832 struct hlist_head *head = dev_index_hash(net, ifindex);
834 hlist_for_each_entry(dev, head, index_hlist)
856 struct hlist_head *head = dev_index_hash(net, ifindex);
858 hlist_for_each_entry_rcu(dev, head, index_hlist)
1522 static void __dev_close_many(struct list_head *head)
1529 list_for_each_entry(dev, head, close_list) {
1546 dev_deactivate_many(head);
1548 list_for_each_entry(dev, head, close_list) {
1575 void dev_close_many(struct list_head *head, bool unlink)
1580 list_for_each_entry_safe(dev, tmp, head, close_list)
1584 __dev_close_many(head);
1586 list_for_each_entry_safe(dev, tmp, head, close_list) {
3698 struct sk_buff *next, *head = NULL, *tail;
3711 if (!head)
3712 head = skb;
3720 return head;
4874 /* SKB "head" area always have tailroom for skb_shared_info */
5179 struct Qdisc *head;
5182 head = sd->output_queue;
5189 while (head) {
5190 struct Qdisc *q = head;
5193 head = head->next_sched;
5195 /* We need to make sure head->next_sched is read
5587 static inline void __netif_receive_skb_list_ptype(struct list_head *head,
5595 if (list_empty(head))
5599 ip_list_rcv, head, pt_prev, orig_dev);
5601 list_for_each_entry_safe(skb, next, head, list) {
5607 static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc)
5626 list_for_each_entry_safe(skb, next, head, list) {
5674 static void __netif_receive_skb_list(struct list_head *head)
5680 list_for_each_entry_safe(skb, next, head, list) {
5685 list_cut_before(&sublist, head, &skb->list);
5697 if (!list_empty(head))
5698 __netif_receive_skb_list_core(head, pfmemalloc);
5760 void netif_receive_skb_list_internal(struct list_head *head)
5766 list_for_each_entry_safe(skb, next, head, list) {
5772 list_splice_init(&sublist, head);
5777 list_for_each_entry_safe(skb, next, head, list) {
5789 __netif_receive_skb_list(head);
5823 * @head: list of skbs to process.
5831 void netif_receive_skb_list(struct list_head *head)
5835 if (list_empty(head))
5838 list_for_each_entry(skb, head, list)
5841 netif_receive_skb_list_internal(head);
10944 * @head: list
10948 * If head not NULL, device is queued to be unregistered later.
10954 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
10958 if (head) {
10959 list_move_tail(&dev->unreg_list, head);
10969 void unregister_netdevice_many_notify(struct list_head *head,
10978 if (list_empty(head))
10981 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
10999 list_for_each_entry(dev, head, unreg_list)
11003 list_for_each_entry(dev, head, unreg_list) {
11014 list_for_each_entry(dev, head, unreg_list) {
11067 list_for_each_entry(dev, head, unreg_list) {
11072 list_del(head);
11077 * @head: list of devices
11082 void unregister_netdevice_many(struct list_head *head)
11084 unregister_netdevice_many_notify(head, 0, NULL);