Lines Matching defs:queue

44 /* Number of bytes allowed on the internal guest Rx queue. */
52 void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
56 atomic_inc(&queue->inflight_packets);
59 void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
61 atomic_dec(&queue->inflight_packets);
67 wake_up(&queue->dealloc_wq);
77 static bool xenvif_handle_tx_interrupt(struct xenvif_queue *queue)
81 rc = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
83 napi_schedule(&queue->napi);
89 struct xenvif_queue *queue = dev_id;
92 old = atomic_fetch_or(NETBK_TX_EOI, &queue->eoi_pending);
95 if (!xenvif_handle_tx_interrupt(queue)) {
96 atomic_andnot(NETBK_TX_EOI, &queue->eoi_pending);
105 struct xenvif_queue *queue =
113 if (unlikely(queue->vif->disabled)) {
118 work_done = xenvif_tx_action(queue, budget);
122 /* If the queue is rate-limited, it shall be
125 if (likely(!queue->rate_limited))
126 xenvif_napi_schedule_or_enable_events(queue);
132 static bool xenvif_handle_rx_interrupt(struct xenvif_queue *queue)
136 rc = xenvif_have_rx_work(queue, false);
138 xenvif_kick_thread(queue);
144 struct xenvif_queue *queue = dev_id;
147 old = atomic_fetch_or(NETBK_RX_EOI, &queue->eoi_pending);
150 if (!xenvif_handle_rx_interrupt(queue)) {
151 atomic_andnot(NETBK_RX_EOI, &queue->eoi_pending);
160 struct xenvif_queue *queue = dev_id;
164 old = atomic_fetch_or(NETBK_COMMON_EOI, &queue->eoi_pending);
167 has_tx = xenvif_handle_tx_interrupt(queue);
168 has_rx = xenvif_handle_rx_interrupt(queue);
171 atomic_andnot(NETBK_COMMON_EOI, &queue->eoi_pending);
208 struct xenvif_queue *queue = NULL;
223 /* Obtain the queue to be used to transmit this packet */
226 pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n",
230 queue = &vif->queues[index];
232 /* Drop the packet if queue is not ready */
233 if (queue->task == NULL ||
234 queue->dealloc_task == NULL ||
255 if (!xenvif_rx_queue_tail(queue, skb))
258 xenvif_kick_thread(queue);
271 struct xenvif_queue *queue = NULL;
282 /* Aggregate tx and rx stats from each queue */
284 queue = &vif->queues[index];
285 rx_bytes += queue->stats.rx_bytes;
286 rx_packets += queue->stats.rx_packets;
287 tx_bytes += queue->stats.tx_bytes;
288 tx_packets += queue->stats.tx_packets;
303 struct xenvif_queue *queue = NULL;
308 queue = &vif->queues[queue_index];
309 napi_enable(&queue->napi);
310 enable_irq(queue->tx_irq);
311 if (queue->tx_irq != queue->rx_irq)
312 enable_irq(queue->rx_irq);
313 xenvif_napi_schedule_or_enable_events(queue);
319 struct xenvif_queue *queue = NULL;
324 queue = &vif->queues[queue_index];
325 disable_irq(queue->tx_irq);
326 if (queue->tx_irq != queue->rx_irq)
327 disable_irq(queue->rx_irq);
328 napi_disable(&queue->napi);
329 del_timer_sync(&queue->credit_timeout);
558 int xenvif_init_queue(struct xenvif_queue *queue)
562 queue->credit_bytes = queue->remaining_credit = ~0UL;
563 queue->credit_usec = 0UL;
564 timer_setup(&queue->credit_timeout, xenvif_tx_credit_callback, 0);
565 queue->credit_window_start = get_jiffies_64();
567 queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES;
569 skb_queue_head_init(&queue->rx_queue);
570 skb_queue_head_init(&queue->tx_queue);
572 queue->pending_cons = 0;
573 queue->pending_prod = MAX_PENDING_REQS;
575 queue->pending_ring[i] = i;
577 spin_lock_init(&queue->callback_lock);
578 spin_lock_init(&queue->response_lock);
585 queue->mmap_pages);
587 netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
592 queue->pending_tx_info[i].callback_struct = (struct ubuf_info_msgzc)
596 queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
668 static void xenvif_disconnect_queue(struct xenvif_queue *queue)
670 if (queue->task) {
671 kthread_stop(queue->task);
672 put_task_struct(queue->task);
673 queue->task = NULL;
676 if (queue->dealloc_task) {
677 kthread_stop(queue->dealloc_task);
678 queue->dealloc_task = NULL;
681 if (queue->napi.poll) {
682 netif_napi_del(&queue->napi);
683 queue->napi.poll = NULL;
686 if (queue->tx_irq) {
687 unbind_from_irqhandler(queue->tx_irq, queue);
688 if (queue->tx_irq == queue->rx_irq)
689 queue->rx_irq = 0;
690 queue->tx_irq = 0;
693 if (queue->rx_irq) {
694 unbind_from_irqhandler(queue->rx_irq, queue);
695 queue->rx_irq = 0;
698 xenvif_unmap_frontend_data_rings(queue);
701 int xenvif_connect_data(struct xenvif_queue *queue,
707 struct xenbus_device *dev = xenvif_to_xenbus_device(queue->vif);
711 BUG_ON(queue->tx_irq);
712 BUG_ON(queue->task);
713 BUG_ON(queue->dealloc_task);
715 err = xenvif_map_frontend_data_rings(queue, tx_ring_ref,
720 init_waitqueue_head(&queue->wq);
721 init_waitqueue_head(&queue->dealloc_wq);
722 atomic_set(&queue->inflight_packets, 0);
724 netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll);
726 queue->stalled = true;
728 task = kthread_run(xenvif_kthread_guest_rx, queue,
729 "%s-guest-rx", queue->name);
732 queue->task = task;
739 task = kthread_run(xenvif_dealloc_kthread, queue,
740 "%s-dealloc", queue->name);
743 queue->dealloc_task = task;
749 queue->name, queue);
752 queue->tx_irq = queue->rx_irq = err;
753 disable_irq(queue->tx_irq);
756 snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
757 "%s-tx", queue->name);
760 queue->tx_irq_name, queue);
763 queue->tx_irq = err;
764 disable_irq(queue->tx_irq);
766 snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
767 "%s-rx", queue->name);
770 queue->rx_irq_name, queue);
773 queue->rx_irq = err;
774 disable_irq(queue->rx_irq);
780 pr_warn("Could not allocate kthread for %s\n", queue->name);
783 xenvif_disconnect_queue(queue);
802 struct xenvif_queue *queue = NULL;
809 queue = &vif->queues[queue_index];
811 xenvif_disconnect_queue(queue);
833 * Used for queue teardown from xenvif_free(), and on the
836 void xenvif_deinit_queue(struct xenvif_queue *queue)
838 gnttab_free_pages(MAX_PENDING_REQS, queue->mmap_pages);