Lines Matching defs:queue
46 /* Number of bytes allowed on the internal guest Rx queue. */
54 void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
58 atomic_inc(&queue->inflight_packets);
61 void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
63 atomic_dec(&queue->inflight_packets);
69 wake_up(&queue->dealloc_wq);
79 static bool xenvif_handle_tx_interrupt(struct xenvif_queue *queue)
83 rc = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
85 napi_schedule(&queue->napi);
91 struct xenvif_queue *queue = dev_id;
94 old = atomic_fetch_or(NETBK_TX_EOI, &queue->eoi_pending);
97 if (!xenvif_handle_tx_interrupt(queue)) {
98 atomic_andnot(NETBK_TX_EOI, &queue->eoi_pending);
107 struct xenvif_queue *queue =
115 if (unlikely(queue->vif->disabled)) {
120 work_done = xenvif_tx_action(queue, budget);
124 /* If the queue is rate-limited, it shall be
127 if (likely(!queue->rate_limited))
128 xenvif_napi_schedule_or_enable_events(queue);
134 static bool xenvif_handle_rx_interrupt(struct xenvif_queue *queue)
138 rc = xenvif_have_rx_work(queue, false);
140 xenvif_kick_thread(queue);
146 struct xenvif_queue *queue = dev_id;
149 old = atomic_fetch_or(NETBK_RX_EOI, &queue->eoi_pending);
152 if (!xenvif_handle_rx_interrupt(queue)) {
153 atomic_andnot(NETBK_RX_EOI, &queue->eoi_pending);
162 struct xenvif_queue *queue = dev_id;
166 old = atomic_fetch_or(NETBK_COMMON_EOI, &queue->eoi_pending);
169 has_tx = xenvif_handle_tx_interrupt(queue);
170 has_rx = xenvif_handle_rx_interrupt(queue);
173 atomic_andnot(NETBK_COMMON_EOI, &queue->eoi_pending);
210 struct xenvif_queue *queue = NULL;
225 /* Obtain the queue to be used to transmit this packet */
228 pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n",
232 queue = &vif->queues[index];
234 /* Drop the packet if queue is not ready */
235 if (queue->task == NULL ||
236 queue->dealloc_task == NULL ||
257 if (!xenvif_rx_queue_tail(queue, skb))
260 xenvif_kick_thread(queue);
273 struct xenvif_queue *queue = NULL;
284 /* Aggregate tx and rx stats from each queue */
286 queue = &vif->queues[index];
287 rx_bytes += queue->stats.rx_bytes;
288 rx_packets += queue->stats.rx_packets;
289 tx_bytes += queue->stats.tx_bytes;
290 tx_packets += queue->stats.tx_packets;
305 struct xenvif_queue *queue = NULL;
310 queue = &vif->queues[queue_index];
311 napi_enable(&queue->napi);
312 enable_irq(queue->tx_irq);
313 if (queue->tx_irq != queue->rx_irq)
314 enable_irq(queue->rx_irq);
315 xenvif_napi_schedule_or_enable_events(queue);
321 struct xenvif_queue *queue = NULL;
326 queue = &vif->queues[queue_index];
327 disable_irq(queue->tx_irq);
328 if (queue->tx_irq != queue->rx_irq)
329 disable_irq(queue->rx_irq);
330 napi_disable(&queue->napi);
331 del_timer_sync(&queue->credit_timeout);
558 int xenvif_init_queue(struct xenvif_queue *queue)
562 queue->credit_bytes = queue->remaining_credit = ~0UL;
563 queue->credit_usec = 0UL;
564 timer_setup(&queue->credit_timeout, xenvif_tx_credit_callback, 0);
565 queue->credit_window_start = get_jiffies_64();
567 queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES;
569 skb_queue_head_init(&queue->rx_queue);
570 skb_queue_head_init(&queue->tx_queue);
572 queue->pending_cons = 0;
573 queue->pending_prod = MAX_PENDING_REQS;
575 queue->pending_ring[i] = i;
577 spin_lock_init(&queue->callback_lock);
578 spin_lock_init(&queue->response_lock);
585 queue->mmap_pages);
587 netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
592 queue->pending_tx_info[i].callback_struct = (struct ubuf_info)
596 queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
669 static void xenvif_disconnect_queue(struct xenvif_queue *queue)
671 if (queue->task) {
672 kthread_stop(queue->task);
673 put_task_struct(queue->task);
674 queue->task = NULL;
677 if (queue->dealloc_task) {
678 kthread_stop(queue->dealloc_task);
679 queue->dealloc_task = NULL;
682 if (queue->napi.poll) {
683 netif_napi_del(&queue->napi);
684 queue->napi.poll = NULL;
687 if (queue->tx_irq) {
688 unbind_from_irqhandler(queue->tx_irq, queue);
689 if (queue->tx_irq == queue->rx_irq)
690 queue->rx_irq = 0;
691 queue->tx_irq = 0;
694 if (queue->rx_irq) {
695 unbind_from_irqhandler(queue->rx_irq, queue);
696 queue->rx_irq = 0;
699 xenvif_unmap_frontend_data_rings(queue);
702 int xenvif_connect_data(struct xenvif_queue *queue,
711 BUG_ON(queue->tx_irq);
712 BUG_ON(queue->task);
713 BUG_ON(queue->dealloc_task);
715 err = xenvif_map_frontend_data_rings(queue, tx_ring_ref,
720 init_waitqueue_head(&queue->wq);
721 init_waitqueue_head(&queue->dealloc_wq);
722 atomic_set(&queue->inflight_packets, 0);
724 netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
727 queue->stalled = true;
729 task = kthread_run(xenvif_kthread_guest_rx, queue,
730 "%s-guest-rx", queue->name);
733 queue->task = task;
740 task = kthread_run(xenvif_dealloc_kthread, queue,
741 "%s-dealloc", queue->name);
744 queue->dealloc_task = task;
749 queue->vif->domid, tx_evtchn, xenvif_interrupt, 0,
750 queue->name, queue);
753 queue->tx_irq = queue->rx_irq = err;
754 disable_irq(queue->tx_irq);
757 snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
758 "%s-tx", queue->name);
760 queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
761 queue->tx_irq_name, queue);
764 queue->tx_irq = err;
765 disable_irq(queue->tx_irq);
767 snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
768 "%s-rx", queue->name);
770 queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
771 queue->rx_irq_name, queue);
774 queue->rx_irq = err;
775 disable_irq(queue->rx_irq);
781 pr_warn("Could not allocate kthread for %s\n", queue->name);
784 xenvif_disconnect_queue(queue);
803 struct xenvif_queue *queue = NULL;
810 queue = &vif->queues[queue_index];
812 xenvif_disconnect_queue(queue);
834 * Used for queue teardown from xenvif_free(), and on the
837 void xenvif_deinit_queue(struct xenvif_queue *queue)
839 gnttab_free_pages(MAX_PENDING_REQS, queue->mmap_pages);