Lines Matching defs:queue

24  *		Alan Cox	:	Transmit queue code does relevant
25 * stunts to keep the queue safe.
62 * the backlog queue.
67 * J Hadi Salim : - Backlog queue sampling
439 * Device drivers call our routines to queue packets here. We empty the
440 * queue in the local softnet handler.
2208 * start_xmit function of one device into the receive queue
2537 /* Need to add tx-queue to this CPU's/rx-queue's existing map */
2545 /* Need to allocate new map to store tx-queue on this CPU's/rx-queue's
2603 /* If queue belongs to subordinate dev use its map */
2636 /* allocate memory for queue storage */
2676 /* add tx-queue to CPU/rx-queue maps */
2733 /* update Tx queue numa node */
2741 /* removes tx-queue from unused CPUs/rx-queues */
2890 /* Provide a way for Tx queue to find the tc_to_txq map or
3842 * This is a work-conserving queue; there are no old skbs
4342 /* The device has no queue. Common case for software devices:
4380 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
4523 /* Should we steer this flow to a different hardware queue? */
4557 * CPU from the RPS map of the receiving queue for a given skb.
4576 "%s received packet on queue %u, but number "
4612 * we can look at the local (per receive queue) flow table
4619 * different from current CPU (one in the rx-queue flow
4623 * - The current CPU's queue tail has advanced beyond the
4662 * @rxq_index: RX queue index
4719 * we need to make sure this queue is serviced soon.
4721 * - If this is another cpu queue, link it to our rps_ipi_list,
4724 * - If this is our own queue, NAPI schedule our backlog.
4788 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
4789 * queue (may be a remote CPU queue).
4816 * We can use non atomic operation since we own the queue lock
4845 "%s received packet on queue %u, but number "
8804 * dev_change_tx_queue_len - Change TX queue length of a netdevice
8806 * @new_len: new tx queue length
9977 /* XDP RX-queue setup */
10008 struct netdev_queue *queue, void *_unused)
10010 /* Initialize queue lock */
10011 spin_lock_init(&queue->_xmit_lock);
10012 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
10013 queue->xmit_lock_owner = -1;
10014 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
10015 queue->dev = dev;
10017 dql_init(&queue->dql, HZ);
10420 * happens, we simply run the queue
10677 struct netdev_queue *queue = dev_ingress_queue(dev);
10680 if (queue)
10681 return queue;
10682 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
10683 if (!queue)
10685 netdev_init_one_queue(dev, queue, NULL);
10686 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
10687 RCU_INIT_POINTER(queue->qdisc_sleeping, &noop_qdisc);
10688 rcu_assign_pointer(dev->ingress_queue, queue);
10690 return queue;
10739 * for each queue on the device.
11279 /* Append completion queue from offline CPU. */
11283 /* Append output queue from offline CPU. */