Lines Matching refs:queue
96 /* IRQ name is queue name with "-tx" or "-rx" appended */
168 /* Multi-queue support */
220 static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue,
224 struct sk_buff *skb = queue->rx_skbs[i];
225 queue->rx_skbs[i] = NULL;
229 static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue,
233 grant_ref_t ref = queue->grant_rx_ref[i];
234 queue->grant_rx_ref[i] = GRANT_INVALID_REF;
250 struct netfront_queue *queue = from_timer(queue, t, rx_refill_timer);
251 napi_schedule(&queue->napi);
254 static int netfront_tx_slot_available(struct netfront_queue *queue)
256 return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) <
260 static void xennet_maybe_wake_tx(struct netfront_queue *queue)
262 struct net_device *dev = queue->info->netdev;
263 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id);
266 netfront_tx_slot_available(queue) &&
268 netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id));
272 static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
277 skb = __netdev_alloc_skb(queue->info->netdev,
283 page = page_pool_alloc_pages(queue->page_pool,
293 skb->dev = queue->info->netdev;
299 static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
301 RING_IDX req_prod = queue->rx.req_prod_pvt;
305 if (unlikely(!netif_carrier_ok(queue->info->netdev)))
308 for (req_prod = queue->rx.req_prod_pvt;
309 req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE;
317 skb = xennet_alloc_one_rx_buffer(queue);
325 BUG_ON(queue->rx_skbs[id]);
326 queue->rx_skbs[id] = skb;
328 ref = gnttab_claim_grant_reference(&queue->gref_rx_head);
330 queue->grant_rx_ref[id] = ref;
334 req = RING_GET_REQUEST(&queue->rx, req_prod);
336 queue->info->xbdev->otherend_id,
343 queue->rx.req_prod_pvt = req_prod;
350 if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN ||
352 mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
356 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify);
358 notify_remote_via_irq(queue->rx_irq);
366 struct netfront_queue *queue = NULL;
372 queue = &np->queues[i];
373 napi_enable(&queue->napi);
375 spin_lock_bh(&queue->rx_lock);
377 xennet_alloc_rx_buffers(queue);
378 queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1;
379 if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))
380 napi_schedule(&queue->napi);
382 spin_unlock_bh(&queue->rx_lock);
390 static bool xennet_tx_buf_gc(struct netfront_queue *queue)
397 const struct device *dev = &queue->info->netdev->dev;
399 BUG_ON(!netif_carrier_ok(queue->info->netdev));
402 prod = queue->tx.sring->rsp_prod;
403 if (RING_RESPONSE_PROD_OVERFLOW(&queue->tx, prod)) {
405 prod - queue->tx.rsp_cons);
410 for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
415 RING_COPY_RESPONSE(&queue->tx, cons, &txrsp);
420 if (id >= RING_SIZE(&queue->tx)) {
426 if (queue->tx_link[id] != TX_PENDING) {
432 queue->tx_link[id] = TX_LINK_NONE;
433 skb = queue->tx_skbs[id];
434 queue->tx_skbs[id] = NULL;
436 queue->grant_tx_ref[id], GNTMAP_readonly))) {
442 &queue->gref_tx_head, queue->grant_tx_ref[id]);
443 queue->grant_tx_ref[id] = GRANT_INVALID_REF;
444 queue->grant_tx_page[id] = NULL;
445 add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, id);
449 queue->tx.rsp_cons = prod;
451 RING_FINAL_CHECK_FOR_RESPONSES(&queue->tx, more_to_do);
454 xennet_maybe_wake_tx(queue);
459 queue->info->broken = true;
466 struct netfront_queue *queue;
483 struct netfront_queue *queue = info->queue;
486 id = get_id_from_list(&queue->tx_skb_freelist, queue->tx_link);
487 tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
488 ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
491 gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
494 queue->tx_skbs[id] = skb;
495 queue->grant_tx_page[id] = page;
496 queue->grant_tx_ref[id] = ref;
507 * Put the request in the pending queue, it will be set to be pending
510 add_id_to_list(&queue->tx_pend_queue, queue->tx_link, id);
593 /* First, check if there is only one queue */
604 static void xennet_mark_tx_pending(struct netfront_queue *queue)
608 while ((i = get_id_from_list(&queue->tx_pend_queue, queue->tx_link)) !=
610 queue->tx_link[i] = TX_PENDING;
614 struct netfront_queue *queue,
620 .queue = queue,
630 xennet_mark_tx_pending(queue);
632 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
634 notify_remote_via_irq(queue->tx_irq);
641 xennet_tx_buf_gc(queue);
651 struct netfront_queue *queue = NULL;
661 queue = &np->queues[smp_processor_id() % num_queues];
663 spin_lock_irqsave(&queue->tx_lock, irq_flags);
669 err = xennet_xdp_xmit_one(dev, queue, xdpf);
675 spin_unlock_irqrestore(&queue->tx_lock, irq_flags);
722 struct netfront_queue *queue = NULL;
733 /* Determine which queue to transmit this SKB on */
735 queue = &np->queues[queue_index];
777 spin_lock_irqsave(&queue->tx_lock, flags);
782 spin_unlock_irqrestore(&queue->tx_lock, flags);
787 info.queue = queue;
811 RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
843 xennet_mark_tx_pending(queue);
845 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
847 notify_remote_via_irq(queue->tx_irq);
855 xennet_tx_buf_gc(queue);
857 if (!netfront_tx_slot_available(queue))
858 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
860 spin_unlock_irqrestore(&queue->tx_lock, flags);
875 struct netfront_queue *queue;
878 queue = &np->queues[i];
879 napi_disable(&queue->napi);
889 struct netfront_queue *queue = &info->queues[i];
892 napi_disable(&queue->napi);
893 netif_napi_del(&queue->napi);
906 static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val)
910 spin_lock_irqsave(&queue->rx_cons_lock, flags);
911 queue->rx.rsp_cons = val;
912 queue->rx_rsp_unconsumed = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx);
913 spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
916 static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb,
919 int new = xennet_rxidx(queue->rx.req_prod_pvt);
921 BUG_ON(queue->rx_skbs[new]);
922 queue->rx_skbs[new] = skb;
923 queue->grant_rx_ref[new] = ref;
924 RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new;
925 RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref;
926 queue->rx.req_prod_pvt++;
929 static int xennet_get_extras(struct netfront_queue *queue,
935 struct device *dev = &queue->info->netdev->dev;
936 RING_IDX cons = queue->rx.rsp_cons;
950 RING_COPY_RESPONSE(&queue->rx, ++cons, &extra);
962 skb = xennet_get_rx_skb(queue, cons);
963 ref = xennet_get_rx_ref(queue, cons);
964 xennet_move_rx_slot(queue, skb, ref);
967 xennet_set_rx_rsp_cons(queue, cons);
971 static u32 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata,
984 xdp->rxq = &queue->xdp_rxq;
992 err = xennet_xdp_xmit(queue->info->netdev, 1, &xdpf, 0);
994 trace_xdp_exception(queue->info->netdev, prog, act);
998 err = xdp_do_redirect(queue->info->netdev, xdp, prog);
1001 trace_xdp_exception(queue->info->netdev, prog, act);
1008 trace_xdp_exception(queue->info->netdev, prog, act);
1018 static int xennet_get_responses(struct netfront_queue *queue,
1025 RING_IDX cons = queue->rx.rsp_cons;
1026 struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
1028 grant_ref_t ref = xennet_get_rx_ref(queue, cons);
1029 struct device *dev = &queue->info->netdev->dev;
1037 err = xennet_get_extras(queue, extras, rp);
1046 cons = queue->rx.rsp_cons;
1055 xennet_move_rx_slot(queue, skb, ref);
1076 queue->info->broken = true;
1081 gnttab_release_grant_reference(&queue->gref_rx_head, ref);
1084 xdp_prog = rcu_dereference(queue->xdp_prog);
1088 verdict = xennet_run_xdp(queue,
1113 RING_COPY_RESPONSE(&queue->rx, cons + slots, &rx_local);
1115 skb = xennet_get_rx_skb(queue, cons + slots);
1116 ref = xennet_get_rx_ref(queue, cons + slots);
1127 xennet_set_rx_rsp_cons(queue, cons + slots);
1161 static int xennet_fill_frags(struct netfront_queue *queue,
1165 RING_IDX cons = queue->rx.rsp_cons;
1172 RING_COPY_RESPONSE(&queue->rx, ++cons, &rx);
1181 xennet_set_rx_rsp_cons(queue,
1195 xennet_set_rx_rsp_cons(queue, cons);
1224 static int handle_incoming_queue(struct netfront_queue *queue,
1227 struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats);
1238 skb->protocol = eth_type_trans(skb, queue->info->netdev);
1241 if (checksum_setup(queue->info->netdev, skb)) {
1244 queue->info->netdev->stats.rx_errors++;
1254 napi_gro_receive(&queue->napi, skb);
1262 struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi);
1263 struct net_device *dev = queue->info->netdev;
1276 spin_lock(&queue->rx_lock);
1282 rp = queue->rx.sring->rsp_prod;
1283 if (RING_RESPONSE_PROD_OVERFLOW(&queue->rx, rp)) {
1285 rp - queue->rx.rsp_cons);
1286 queue->info->broken = true;
1287 spin_unlock(&queue->rx_lock);
1292 i = queue->rx.rsp_cons;
1295 RING_COPY_RESPONSE(&queue->rx, i, rx);
1298 err = xennet_get_responses(queue, &rinfo, rp, &tmpq,
1302 if (queue->info->broken) {
1303 spin_unlock(&queue->rx_lock);
1310 i = queue->rx.rsp_cons;
1322 xennet_set_rx_rsp_cons(queue,
1323 queue->rx.rsp_cons +
1338 if (unlikely(xennet_fill_frags(queue, skb, &tmpq)))
1348 i = queue->rx.rsp_cons + 1;
1349 xennet_set_rx_rsp_cons(queue, i);
1357 work_done -= handle_incoming_queue(queue, &rxq);
1359 xennet_alloc_rx_buffers(queue);
1366 RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do);
1371 spin_unlock(&queue->rx_lock);
1420 static void xennet_release_tx_bufs(struct netfront_queue *queue)
1427 if (!queue->tx_skbs[i])
1430 skb = queue->tx_skbs[i];
1431 queue->tx_skbs[i] = NULL;
1432 get_page(queue->grant_tx_page[i]);
1433 gnttab_end_foreign_access(queue->grant_tx_ref[i],
1435 (unsigned long)page_address(queue->grant_tx_page[i]));
1436 queue->grant_tx_page[i] = NULL;
1437 queue->grant_tx_ref[i] = GRANT_INVALID_REF;
1438 add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, i);
1443 static void xennet_release_rx_bufs(struct netfront_queue *queue)
1447 spin_lock_bh(&queue->rx_lock);
1453 skb = queue->rx_skbs[id];
1457 ref = queue->grant_rx_ref[id];
1469 queue->grant_rx_ref[id] = GRANT_INVALID_REF;
1474 spin_unlock_bh(&queue->rx_lock);
1513 static bool xennet_handle_tx(struct netfront_queue *queue, unsigned int *eoi)
1517 if (unlikely(queue->info->broken))
1520 spin_lock_irqsave(&queue->tx_lock, flags);
1521 if (xennet_tx_buf_gc(queue))
1523 spin_unlock_irqrestore(&queue->tx_lock, flags);
1538 static bool xennet_handle_rx(struct netfront_queue *queue, unsigned int *eoi)
1543 if (unlikely(queue->info->broken))
1546 spin_lock_irqsave(&queue->rx_cons_lock, flags);
1547 work_queued = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx);
1548 if (work_queued > queue->rx_rsp_unconsumed) {
1549 queue->rx_rsp_unconsumed = work_queued;
1551 } else if (unlikely(work_queued < queue->rx_rsp_unconsumed)) {
1552 const struct device *dev = &queue->info->netdev->dev;
1554 spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
1557 queue->info->broken = true;
1560 spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
1562 if (likely(netif_carrier_ok(queue->info->netdev) && work_queued))
1563 napi_schedule(&queue->napi);
1592 /* Poll each queue */
1820 struct netfront_queue *queue = &info->queues[i];
1822 del_timer_sync(&queue->rx_refill_timer);
1824 if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
1825 unbind_from_irqhandler(queue->tx_irq, queue);
1826 if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
1827 unbind_from_irqhandler(queue->tx_irq, queue);
1828 unbind_from_irqhandler(queue->rx_irq, queue);
1830 queue->tx_evtchn = queue->rx_evtchn = 0;
1831 queue->tx_irq = queue->rx_irq = 0;
1834 napi_synchronize(&queue->napi);
1836 xennet_release_tx_bufs(queue);
1837 xennet_release_rx_bufs(queue);
1838 gnttab_free_grant_references(queue->gref_tx_head);
1839 gnttab_free_grant_references(queue->gref_rx_head);
1842 xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
1843 xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
1845 queue->tx_ring_ref = GRANT_INVALID_REF;
1846 queue->rx_ring_ref = GRANT_INVALID_REF;
1847 queue->tx.sring = NULL;
1848 queue->rx.sring = NULL;
1850 page_pool_destroy(queue->page_pool);
1902 static int setup_netfront_single(struct netfront_queue *queue)
1906 err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1910 err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
1912 queue->info->netdev->name,
1913 queue);
1916 queue->rx_evtchn = queue->tx_evtchn;
1917 queue->rx_irq = queue->tx_irq = err;
1922 xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1923 queue->tx_evtchn = 0;
1928 static int setup_netfront_split(struct netfront_queue *queue)
1932 err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1935 err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn);
1939 snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
1940 "%s-tx", queue->name);
1941 err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
1943 queue->tx_irq_name, queue);
1946 queue->tx_irq = err;
1948 snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
1949 "%s-rx", queue->name);
1950 err = bind_evtchn_to_irqhandler_lateeoi(queue->rx_evtchn,
1952 queue->rx_irq_name, queue);
1955 queue->rx_irq = err;
1960 unbind_from_irqhandler(queue->tx_irq, queue);
1961 queue->tx_irq = 0;
1963 xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn);
1964 queue->rx_evtchn = 0;
1966 xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1967 queue->tx_evtchn = 0;
1973 struct netfront_queue *queue, unsigned int feature_split_evtchn)
1980 queue->tx_ring_ref = GRANT_INVALID_REF;
1981 queue->rx_ring_ref = GRANT_INVALID_REF;
1982 queue->rx.sring = NULL;
1983 queue->tx.sring = NULL;
1992 FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
1997 queue->tx_ring_ref = gref;
2006 FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
2011 queue->rx_ring_ref = gref;
2014 err = setup_netfront_split(queue);
2020 err = setup_netfront_single(queue);
2031 if (queue->rx_ring_ref != GRANT_INVALID_REF) {
2032 gnttab_end_foreign_access(queue->rx_ring_ref, 0,
2034 queue->rx_ring_ref = GRANT_INVALID_REF;
2038 if (queue->tx_ring_ref != GRANT_INVALID_REF) {
2039 gnttab_end_foreign_access(queue->tx_ring_ref, 0,
2041 queue->tx_ring_ref = GRANT_INVALID_REF;
2050 * be run per-queue.
2052 static int xennet_init_queue(struct netfront_queue *queue)
2058 spin_lock_init(&queue->tx_lock);
2059 spin_lock_init(&queue->rx_lock);
2060 spin_lock_init(&queue->rx_cons_lock);
2062 timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0);
2064 devid = strrchr(queue->info->xbdev->nodename, '/') + 1;
2065 snprintf(queue->name, sizeof(queue->name), "vif%s-q%u",
2066 devid, queue->id);
2069 queue->tx_skb_freelist = 0;
2070 queue->tx_pend_queue = TX_LINK_NONE;
2072 queue->tx_link[i] = i + 1;
2073 queue->grant_tx_ref[i] = GRANT_INVALID_REF;
2074 queue->grant_tx_page[i] = NULL;
2076 queue->tx_link[NET_TX_RING_SIZE - 1] = TX_LINK_NONE;
2080 queue->rx_skbs[i] = NULL;
2081 queue->grant_rx_ref[i] = GRANT_INVALID_REF;
2086 &queue->gref_tx_head) < 0) {
2094 &queue->gref_rx_head) < 0) {
2103 gnttab_free_grant_references(queue->gref_tx_head);
2108 static int write_queue_xenstore_keys(struct netfront_queue *queue,
2111 /* Write the queue-specific keys into XenStore in the traditional
2112 * way for a single queue, or in a queue subkeys for multiple
2115 struct xenbus_device *dev = queue->info->xbdev;
2130 snprintf(path, pathsize, "%s/queue-%u",
2131 dev->nodename, queue->id);
2138 queue->tx_ring_ref);
2145 queue->rx_ring_ref);
2154 if (queue->tx_evtchn == queue->rx_evtchn) {
2157 "event-channel", "%u", queue->tx_evtchn);
2165 "event-channel-tx", "%u", queue->tx_evtchn);
2172 "event-channel-rx", "%u", queue->rx_evtchn);
2192 static int xennet_create_page_pool(struct netfront_queue *queue)
2200 .dev = &queue->info->netdev->dev,
2205 queue->page_pool = page_pool_create(&pp_params);
2206 if (IS_ERR(queue->page_pool)) {
2207 err = PTR_ERR(queue->page_pool);
2208 queue->page_pool = NULL;
2212 err = xdp_rxq_info_reg(&queue->xdp_rxq, queue->info->netdev,
2213 queue->id);
2215 netdev_err(queue->info->netdev, "xdp_rxq_info_reg failed\n");
2219 err = xdp_rxq_info_reg_mem_model(&queue->xdp_rxq,
2220 MEM_TYPE_PAGE_POOL, queue->page_pool);
2222 netdev_err(queue->info->netdev, "xdp_rxq_info_reg_mem_model failed\n");
2228 xdp_rxq_info_unreg(&queue->xdp_rxq);
2230 page_pool_destroy(queue->page_pool);
2231 queue->page_pool = NULL;
2247 struct netfront_queue *queue = &info->queues[i];
2249 queue->id = i;
2250 queue->info = info;
2252 ret = xennet_init_queue(queue);
2261 ret = xennet_create_page_pool(queue);
2268 netif_napi_add(queue->info->netdev, &queue->napi,
2271 napi_enable(&queue->napi);
2293 struct netfront_queue *queue = NULL;
2304 "multi-queue-max-queues", 1);
2345 /* Create shared ring, alloc event channel -- for each queue */
2347 queue = &info->queues[i];
2348 err = setup_netfront(dev, queue, feature_split_evtchn);
2361 info->xbdev->otherend, "multi-queue-max-queues")) {
2364 "multi-queue-num-queues", "%u", num_queues);
2366 message = "writing multi-queue-num-queues";
2376 /* Write the keys for each queue */
2378 queue = &info->queues[i];
2379 err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */
2385 /* The remaining keys are not queue-specific */
2455 struct netfront_queue *queue = NULL;
2500 queue = &np->queues[j];
2502 notify_remote_via_irq(queue->tx_irq);
2503 if (queue->tx_irq != queue->rx_irq)
2504 notify_remote_via_irq(queue->rx_irq);
2506 spin_lock_irq(&queue->tx_lock);
2507 xennet_tx_buf_gc(queue);
2508 spin_unlock_irq(&queue->tx_lock);
2510 spin_lock_bh(&queue->rx_lock);
2511 xennet_alloc_rx_buffers(queue);
2512 spin_unlock_bh(&queue->rx_lock);