Lines Matching refs:queue

94 /* IRQ name is queue name with "-tx" or "-rx" appended */
166 /* Multi-queue support */
218 static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue,
222 struct sk_buff *skb = queue->rx_skbs[i];
223 queue->rx_skbs[i] = NULL;
227 static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue,
231 grant_ref_t ref = queue->grant_rx_ref[i];
232 queue->grant_rx_ref[i] = INVALID_GRANT_REF;
248 struct netfront_queue *queue = from_timer(queue, t, rx_refill_timer);
249 napi_schedule(&queue->napi);
252 static int netfront_tx_slot_available(struct netfront_queue *queue)
254 return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) <
258 static void xennet_maybe_wake_tx(struct netfront_queue *queue)
260 struct net_device *dev = queue->info->netdev;
261 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id);
264 netfront_tx_slot_available(queue) &&
266 netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id));
270 static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
275 skb = __netdev_alloc_skb(queue->info->netdev,
281 page = page_pool_alloc_pages(queue->page_pool,
291 skb->dev = queue->info->netdev;
297 static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
299 RING_IDX req_prod = queue->rx.req_prod_pvt;
303 if (unlikely(!netif_carrier_ok(queue->info->netdev)))
306 for (req_prod = queue->rx.req_prod_pvt;
307 req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE;
315 skb = xennet_alloc_one_rx_buffer(queue);
323 BUG_ON(queue->rx_skbs[id]);
324 queue->rx_skbs[id] = skb;
326 ref = gnttab_claim_grant_reference(&queue->gref_rx_head);
328 queue->grant_rx_ref[id] = ref;
332 req = RING_GET_REQUEST(&queue->rx, req_prod);
334 queue->info->xbdev->otherend_id,
341 queue->rx.req_prod_pvt = req_prod;
348 if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN ||
350 mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
354 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify);
356 notify_remote_via_irq(queue->rx_irq);
364 struct netfront_queue *queue = NULL;
370 queue = &np->queues[i];
371 napi_enable(&queue->napi);
373 spin_lock_bh(&queue->rx_lock);
375 xennet_alloc_rx_buffers(queue);
376 queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1;
377 if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))
378 napi_schedule(&queue->napi);
380 spin_unlock_bh(&queue->rx_lock);
388 static bool xennet_tx_buf_gc(struct netfront_queue *queue)
395 const struct device *dev = &queue->info->netdev->dev;
397 BUG_ON(!netif_carrier_ok(queue->info->netdev));
400 prod = queue->tx.sring->rsp_prod;
401 if (RING_RESPONSE_PROD_OVERFLOW(&queue->tx, prod)) {
403 prod - queue->tx.rsp_cons);
408 for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
413 RING_COPY_RESPONSE(&queue->tx, cons, &txrsp);
418 if (id >= RING_SIZE(&queue->tx)) {
424 if (queue->tx_link[id] != TX_PENDING) {
430 queue->tx_link[id] = TX_LINK_NONE;
431 skb = queue->tx_skbs[id];
432 queue->tx_skbs[id] = NULL;
434 queue->grant_tx_ref[id]))) {
440 &queue->gref_tx_head, queue->grant_tx_ref[id]);
441 queue->grant_tx_ref[id] = INVALID_GRANT_REF;
442 queue->grant_tx_page[id] = NULL;
443 add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, id);
447 queue->tx.rsp_cons = prod;
449 RING_FINAL_CHECK_FOR_RESPONSES(&queue->tx, more_to_do);
452 xennet_maybe_wake_tx(queue);
457 queue->info->broken = true;
464 struct netfront_queue *queue;
481 struct netfront_queue *queue = info->queue;
484 id = get_id_from_list(&queue->tx_skb_freelist, queue->tx_link);
485 tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
486 ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
489 gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
492 queue->tx_skbs[id] = skb;
493 queue->grant_tx_page[id] = page;
494 queue->grant_tx_ref[id] = ref;
505 * Put the request in the pending queue, it will be set to be pending
508 add_id_to_list(&queue->tx_pend_queue, queue->tx_link, id);
591 /* First, check if there is only one queue */
602 static void xennet_mark_tx_pending(struct netfront_queue *queue)
606 while ((i = get_id_from_list(&queue->tx_pend_queue, queue->tx_link)) !=
608 queue->tx_link[i] = TX_PENDING;
612 struct netfront_queue *queue,
618 .queue = queue,
628 xennet_mark_tx_pending(queue);
630 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
632 notify_remote_via_irq(queue->tx_irq);
639 xennet_tx_buf_gc(queue);
649 struct netfront_queue *queue = NULL;
659 queue = &np->queues[smp_processor_id() % num_queues];
661 spin_lock_irqsave(&queue->tx_lock, irq_flags);
667 if (xennet_xdp_xmit_one(dev, queue, xdpf))
671 spin_unlock_irqrestore(&queue->tx_lock, irq_flags);
718 struct netfront_queue *queue = NULL;
729 /* Determine which queue to transmit this SKB on */
731 queue = &np->queues[queue_index];
773 spin_lock_irqsave(&queue->tx_lock, flags);
778 spin_unlock_irqrestore(&queue->tx_lock, flags);
783 info.queue = queue;
807 RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
839 xennet_mark_tx_pending(queue);
841 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
843 notify_remote_via_irq(queue->tx_irq);
851 xennet_tx_buf_gc(queue);
853 if (!netfront_tx_slot_available(queue))
854 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
856 spin_unlock_irqrestore(&queue->tx_lock, flags);
871 struct netfront_queue *queue;
874 queue = &np->queues[i];
875 napi_disable(&queue->napi);
885 struct netfront_queue *queue = &info->queues[i];
888 napi_disable(&queue->napi);
889 netif_napi_del(&queue->napi);
902 static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val)
906 spin_lock_irqsave(&queue->rx_cons_lock, flags);
907 queue->rx.rsp_cons = val;
908 queue->rx_rsp_unconsumed = XEN_RING_NR_UNCONSUMED_RESPONSES(&queue->rx);
909 spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
912 static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb,
915 int new = xennet_rxidx(queue->rx.req_prod_pvt);
917 BUG_ON(queue->rx_skbs[new]);
918 queue->rx_skbs[new] = skb;
919 queue->grant_rx_ref[new] = ref;
920 RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new;
921 RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref;
922 queue->rx.req_prod_pvt++;
925 static int xennet_get_extras(struct netfront_queue *queue,
931 struct device *dev = &queue->info->netdev->dev;
932 RING_IDX cons = queue->rx.rsp_cons;
946 RING_COPY_RESPONSE(&queue->rx, ++cons, &extra);
958 skb = xennet_get_rx_skb(queue, cons);
959 ref = xennet_get_rx_ref(queue, cons);
960 xennet_move_rx_slot(queue, skb, ref);
963 xennet_set_rx_rsp_cons(queue, cons);
967 static u32 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata,
977 &queue->xdp_rxq);
986 err = xennet_xdp_xmit(queue->info->netdev, 1, &xdpf, 0);
990 trace_xdp_exception(queue->info->netdev, prog, act);
994 err = xdp_do_redirect(queue->info->netdev, xdp, prog);
997 trace_xdp_exception(queue->info->netdev, prog, act);
1004 trace_xdp_exception(queue->info->netdev, prog, act);
1008 bpf_warn_invalid_xdp_action(queue->info->netdev, prog, act);
1014 static int xennet_get_responses(struct netfront_queue *queue,
1021 RING_IDX cons = queue->rx.rsp_cons;
1022 struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
1024 grant_ref_t ref = xennet_get_rx_ref(queue, cons);
1025 struct device *dev = &queue->info->netdev->dev;
1033 err = xennet_get_extras(queue, extras, rp);
1042 cons = queue->rx.rsp_cons;
1064 xennet_move_rx_slot(queue, skb, ref);
1072 queue->info->broken = true;
1077 gnttab_release_grant_reference(&queue->gref_rx_head, ref);
1080 xdp_prog = rcu_dereference(queue->xdp_prog);
1084 verdict = xennet_run_xdp(queue,
1109 RING_COPY_RESPONSE(&queue->rx, cons + slots, &rx_local);
1111 skb = xennet_get_rx_skb(queue, cons + slots);
1112 ref = xennet_get_rx_ref(queue, cons + slots);
1123 xennet_set_rx_rsp_cons(queue, cons + slots);
1157 static int xennet_fill_frags(struct netfront_queue *queue,
1161 RING_IDX cons = queue->rx.rsp_cons;
1168 RING_COPY_RESPONSE(&queue->rx, ++cons, &rx);
1177 xennet_set_rx_rsp_cons(queue,
1191 xennet_set_rx_rsp_cons(queue, cons);
1220 static int handle_incoming_queue(struct netfront_queue *queue,
1223 struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats);
1234 skb->protocol = eth_type_trans(skb, queue->info->netdev);
1237 if (checksum_setup(queue->info->netdev, skb)) {
1240 queue->info->netdev->stats.rx_errors++;
1250 napi_gro_receive(&queue->napi, skb);
1258 struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi);
1259 struct net_device *dev = queue->info->netdev;
1272 spin_lock(&queue->rx_lock);
1278 rp = queue->rx.sring->rsp_prod;
1279 if (RING_RESPONSE_PROD_OVERFLOW(&queue->rx, rp)) {
1281 rp - queue->rx.rsp_cons);
1282 queue->info->broken = true;
1283 spin_unlock(&queue->rx_lock);
1288 i = queue->rx.rsp_cons;
1291 RING_COPY_RESPONSE(&queue->rx, i, rx);
1294 err = xennet_get_responses(queue, &rinfo, rp, &tmpq,
1298 if (queue->info->broken) {
1299 spin_unlock(&queue->rx_lock);
1306 i = queue->rx.rsp_cons;
1318 xennet_set_rx_rsp_cons(queue,
1319 queue->rx.rsp_cons +
1334 if (unlikely(xennet_fill_frags(queue, skb, &tmpq)))
1344 i = queue->rx.rsp_cons + 1;
1345 xennet_set_rx_rsp_cons(queue, i);
1353 work_done -= handle_incoming_queue(queue, &rxq);
1355 xennet_alloc_rx_buffers(queue);
1362 RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do);
1367 spin_unlock(&queue->rx_lock);
1416 static void xennet_release_tx_bufs(struct netfront_queue *queue)
1423 if (!queue->tx_skbs[i])
1426 skb = queue->tx_skbs[i];
1427 queue->tx_skbs[i] = NULL;
1428 get_page(queue->grant_tx_page[i]);
1429 gnttab_end_foreign_access(queue->grant_tx_ref[i],
1430 queue->grant_tx_page[i]);
1431 queue->grant_tx_page[i] = NULL;
1432 queue->grant_tx_ref[i] = INVALID_GRANT_REF;
1433 add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, i);
1438 static void xennet_release_rx_bufs(struct netfront_queue *queue)
1442 spin_lock_bh(&queue->rx_lock);
1448 skb = queue->rx_skbs[id];
1452 ref = queue->grant_rx_ref[id];
1463 queue->grant_rx_ref[id] = INVALID_GRANT_REF;
1468 spin_unlock_bh(&queue->rx_lock);
1507 static bool xennet_handle_tx(struct netfront_queue *queue, unsigned int *eoi)
1511 if (unlikely(queue->info->broken))
1514 spin_lock_irqsave(&queue->tx_lock, flags);
1515 if (xennet_tx_buf_gc(queue))
1517 spin_unlock_irqrestore(&queue->tx_lock, flags);
1532 static bool xennet_handle_rx(struct netfront_queue *queue, unsigned int *eoi)
1537 if (unlikely(queue->info->broken))
1540 spin_lock_irqsave(&queue->rx_cons_lock, flags);
1541 work_queued = XEN_RING_NR_UNCONSUMED_RESPONSES(&queue->rx);
1542 if (work_queued > queue->rx_rsp_unconsumed) {
1543 queue->rx_rsp_unconsumed = work_queued;
1545 } else if (unlikely(work_queued < queue->rx_rsp_unconsumed)) {
1546 const struct device *dev = &queue->info->netdev->dev;
1548 spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
1551 queue->info->broken = true;
1554 spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
1556 if (likely(netif_carrier_ok(queue->info->netdev) && work_queued))
1557 napi_schedule(&queue->napi);
1586 /* Poll each queue */
1816 struct netfront_queue *queue = &info->queues[i];
1818 del_timer_sync(&queue->rx_refill_timer);
1820 if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
1821 unbind_from_irqhandler(queue->tx_irq, queue);
1822 if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
1823 unbind_from_irqhandler(queue->tx_irq, queue);
1824 unbind_from_irqhandler(queue->rx_irq, queue);
1826 queue->tx_evtchn = queue->rx_evtchn = 0;
1827 queue->tx_irq = queue->rx_irq = 0;
1830 napi_synchronize(&queue->napi);
1832 xennet_release_tx_bufs(queue);
1833 xennet_release_rx_bufs(queue);
1834 gnttab_free_grant_references(queue->gref_tx_head);
1835 gnttab_free_grant_references(queue->gref_rx_head);
1838 xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
1839 xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
1841 queue->tx_ring_ref = INVALID_GRANT_REF;
1842 queue->rx_ring_ref = INVALID_GRANT_REF;
1843 queue->tx.sring = NULL;
1844 queue->rx.sring = NULL;
1846 page_pool_destroy(queue->page_pool);
1898 static int setup_netfront_single(struct netfront_queue *queue)
1902 err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1906 err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
1908 queue->info->netdev->name,
1909 queue);
1912 queue->rx_evtchn = queue->tx_evtchn;
1913 queue->rx_irq = queue->tx_irq = err;
1918 xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1919 queue->tx_evtchn = 0;
1924 static int setup_netfront_split(struct netfront_queue *queue)
1928 err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1931 err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn);
1935 snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
1936 "%s-tx", queue->name);
1937 err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
1939 queue->tx_irq_name, queue);
1942 queue->tx_irq = err;
1944 snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
1945 "%s-rx", queue->name);
1946 err = bind_evtchn_to_irqhandler_lateeoi(queue->rx_evtchn,
1948 queue->rx_irq_name, queue);
1951 queue->rx_irq = err;
1956 unbind_from_irqhandler(queue->tx_irq, queue);
1957 queue->tx_irq = 0;
1959 xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn);
1960 queue->rx_evtchn = 0;
1962 xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1963 queue->tx_evtchn = 0;
1969 struct netfront_queue *queue, unsigned int feature_split_evtchn)
1975 queue->tx_ring_ref = INVALID_GRANT_REF;
1976 queue->rx_ring_ref = INVALID_GRANT_REF;
1977 queue->rx.sring = NULL;
1978 queue->tx.sring = NULL;
1981 1, &queue->tx_ring_ref);
1985 XEN_FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
1988 1, &queue->rx_ring_ref);
1992 XEN_FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
1995 err = setup_netfront_split(queue);
2001 err = setup_netfront_single(queue);
2009 xenbus_teardown_ring((void **)&queue->rx.sring, 1, &queue->rx_ring_ref);
2010 xenbus_teardown_ring((void **)&queue->tx.sring, 1, &queue->tx_ring_ref);
2017 * be run per-queue.
2019 static int xennet_init_queue(struct netfront_queue *queue)
2025 spin_lock_init(&queue->tx_lock);
2026 spin_lock_init(&queue->rx_lock);
2027 spin_lock_init(&queue->rx_cons_lock);
2029 timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0);
2031 devid = strrchr(queue->info->xbdev->nodename, '/') + 1;
2032 snprintf(queue->name, sizeof(queue->name), "vif%s-q%u",
2033 devid, queue->id);
2036 queue->tx_skb_freelist = 0;
2037 queue->tx_pend_queue = TX_LINK_NONE;
2039 queue->tx_link[i] = i + 1;
2040 queue->grant_tx_ref[i] = INVALID_GRANT_REF;
2041 queue->grant_tx_page[i] = NULL;
2043 queue->tx_link[NET_TX_RING_SIZE - 1] = TX_LINK_NONE;
2047 queue->rx_skbs[i] = NULL;
2048 queue->grant_rx_ref[i] = INVALID_GRANT_REF;
2053 &queue->gref_tx_head) < 0) {
2061 &queue->gref_rx_head) < 0) {
2070 gnttab_free_grant_references(queue->gref_tx_head);
2075 static int write_queue_xenstore_keys(struct netfront_queue *queue,
2078 /* Write the queue-specific keys into XenStore in the traditional
2079 * way for a single queue, or in a queue subkeys for multiple
2082 struct xenbus_device *dev = queue->info->xbdev;
2097 snprintf(path, pathsize, "%s/queue-%u",
2098 dev->nodename, queue->id);
2105 queue->tx_ring_ref);
2112 queue->rx_ring_ref);
2121 if (queue->tx_evtchn == queue->rx_evtchn) {
2124 "event-channel", "%u", queue->tx_evtchn);
2132 "event-channel-tx", "%u", queue->tx_evtchn);
2139 "event-channel-rx", "%u", queue->rx_evtchn);
2159 static int xennet_create_page_pool(struct netfront_queue *queue)
2167 .dev = &queue->info->netdev->dev,
2172 queue->page_pool = page_pool_create(&pp_params);
2173 if (IS_ERR(queue->page_pool)) {
2174 err = PTR_ERR(queue->page_pool);
2175 queue->page_pool = NULL;
2179 err = xdp_rxq_info_reg(&queue->xdp_rxq, queue->info->netdev,
2180 queue->id, 0);
2182 netdev_err(queue->info->netdev, "xdp_rxq_info_reg failed\n");
2186 err = xdp_rxq_info_reg_mem_model(&queue->xdp_rxq,
2187 MEM_TYPE_PAGE_POOL, queue->page_pool);
2189 netdev_err(queue->info->netdev, "xdp_rxq_info_reg_mem_model failed\n");
2195 xdp_rxq_info_unreg(&queue->xdp_rxq);
2197 page_pool_destroy(queue->page_pool);
2198 queue->page_pool = NULL;
2214 struct netfront_queue *queue = &info->queues[i];
2216 queue->id = i;
2217 queue->info = info;
2219 ret = xennet_init_queue(queue);
2228 ret = xennet_create_page_pool(queue);
2235 netif_napi_add(queue->info->netdev, &queue->napi, xennet_poll);
2237 napi_enable(&queue->napi);
2259 struct netfront_queue *queue = NULL;
2271 "multi-queue-max-queues", 1);
2313 /* Create shared ring, alloc event channel -- for each queue */
2315 queue = &info->queues[i];
2316 err = setup_netfront(dev, queue, feature_split_evtchn);
2329 info->xbdev->otherend, "multi-queue-max-queues")) {
2332 "multi-queue-num-queues", "%u", num_queues);
2334 message = "writing multi-queue-num-queues";
2344 /* Write the keys for each queue */
2346 queue = &info->queues[i];
2347 err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */
2353 /* The remaining keys are not queue-specific */
2423 struct netfront_queue *queue = NULL;
2468 queue = &np->queues[j];
2470 notify_remote_via_irq(queue->tx_irq);
2471 if (queue->tx_irq != queue->rx_irq)
2472 notify_remote_via_irq(queue->rx_irq);
2474 spin_lock_bh(&queue->rx_lock);
2475 xennet_alloc_rx_buffers(queue);
2476 spin_unlock_bh(&queue->rx_lock);