Lines Matching defs:queue
19 * Fixes in packet dropping, queue length setting and queue wakeup.
20 * Increased default tx queue length.
120 * to serve as one transmit queue for tuntap device. The sock_fprog and
122 * netdevice not for a specific queue (at least I didn't see the requirement for
228 struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
235 spin_lock(&queue->lock);
236 skb_queue_splice_tail_init(queue, &process_queue);
237 spin_unlock(&queue->lock);
245 spin_lock(&queue->lock);
246 skb_queue_splice(&process_queue, queue);
247 spin_unlock(&queue->lock);
487 /* TODO: keep queueing to old queue until it's empty? */
671 /* Drop read queue */
736 /* Drop read queue */
807 /* Setup XDP RX-queue info, for new tfile getting attached */
1044 /* Select queue was not called for the skbuff, so we extract the
1076 struct netdev_queue *queue;
1139 queue = netdev_get_tx_queue(dev, txq);
1140 txq_trans_cond_update(queue);
1548 struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
1553 if (!rx_batched || (!more && skb_queue_empty(queue))) {
1561 spin_lock(&queue->lock);
1562 if (!more || skb_queue_len(queue) == rx_batched) {
1564 skb_queue_splice_tail_init(queue, &process_queue);
1567 __skb_queue_tail(queue, skb);
1569 spin_unlock(&queue->lock);
1988 struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
1991 spin_lock_bh(&queue->lock);
1994 spin_unlock_bh(&queue->lock);
2000 __skb_queue_tail(queue, skb);
2001 queue_len = skb_queue_len(queue);
2002 spin_unlock(&queue->lock);
2339 /* We prefer our own queue length */
2453 struct sk_buff_head *queue;
2533 queue = &tfile->sk.sk_write_queue;
2534 spin_lock(&queue->lock);
2537 spin_unlock(&queue->lock);
2542 __skb_queue_tail(queue, skb);
2543 spin_unlock(&queue->lock);
2790 /* One or more queue has already been attached, no need