Lines Matching defs:queue

58 /* The time that packets can stay on the guest Rx internal queue
106 static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
109 static void make_tx_response(struct xenvif_queue *queue,
114 static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
116 static inline int tx_work_todo(struct xenvif_queue *queue);
118 static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
121 return page_to_pfn(queue->mmap_pages[idx]);
124 static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue,
127 return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx));
160 void xenvif_kick_thread(struct xenvif_queue *queue)
162 wake_up(&queue->wq);
165 void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue)
169 RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do);
172 napi_schedule(&queue->napi);
174 &queue->eoi_pending) &
176 xen_irq_lateeoi(queue->tx_irq, 0);
179 static void tx_add_credit(struct xenvif_queue *queue)
187 max_burst = max(131072UL, queue->credit_bytes);
190 max_credit = queue->remaining_credit + queue->credit_bytes;
191 if (max_credit < queue->remaining_credit)
194 queue->remaining_credit = min(max_credit, max_burst);
195 queue->rate_limited = false;
200 struct xenvif_queue *queue = from_timer(queue, t, credit_timeout);
201 tx_add_credit(queue);
202 xenvif_napi_schedule_or_enable_events(queue);
205 static void xenvif_tx_err(struct xenvif_queue *queue,
209 RING_IDX cons = queue->tx.req_cons;
212 make_tx_response(queue, txp, extra_count, XEN_NETIF_RSP_ERROR);
215 RING_COPY_REQUEST(&queue->tx, cons++, txp);
218 queue->tx.req_cons = cons;
225 /* Disable the vif from queue 0's kthread */
230 static int xenvif_count_requests(struct xenvif_queue *queue,
236 RING_IDX cons = queue->tx.req_cons;
248 netdev_err(queue->vif->dev,
251 xenvif_fatal_tx_err(queue->vif);
259 netdev_err(queue->vif->dev,
262 xenvif_fatal_tx_err(queue->vif);
275 netdev_dbg(queue->vif->dev,
284 RING_COPY_REQUEST(&queue->tx, cons + slots, txp);
297 netdev_dbg(queue->vif->dev,
307 netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %u, size: %u\n",
309 xenvif_fatal_tx_err(queue->vif);
321 xenvif_tx_err(queue, first, extra_count, cons + slots);
339 static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
345 queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx];
346 gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx),
348 txp->gref, queue->vif->domid);
350 memcpy(&queue->pending_tx_info[pending_idx].req, txp,
352 queue->pending_tx_info[pending_idx].extra_count = extra_count;
374 static void xenvif_get_requests(struct xenvif_queue *queue,
390 struct gnttab_copy *cop = queue->tx_copy_ops + *copy_ops;
391 struct gnttab_map_grant_ref *gop = queue->tx_map_ops + *map_ops;
406 cop->source.domid = queue->vif->domid;
426 index = pending_index(queue->pending_cons);
427 pending_idx = queue->pending_ring[index];
428 callback_param(queue, pending_idx).ctx = NULL;
439 memcpy(&queue->pending_tx_info[pending_idx].req,
441 queue->pending_tx_info[pending_idx].extra_count =
448 queue->pending_cons++;
463 make_tx_response(queue, txp, 0, XEN_NETIF_RSP_OKAY);
468 index = pending_index(queue->pending_cons++);
469 pending_idx = queue->pending_ring[index];
470 xenvif_tx_create_map_op(queue, pending_idx, txp,
489 make_tx_response(queue, txp, 0,
494 index = pending_index(queue->pending_cons++);
495 pending_idx = queue->pending_ring[index];
496 xenvif_tx_create_map_op(queue, pending_idx, txp, 0,
518 (*copy_ops) = cop - queue->tx_copy_ops;
519 (*map_ops) = gop - queue->tx_map_ops;
522 static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
526 if (unlikely(queue->grant_tx_handle[pending_idx] !=
528 netdev_err(queue->vif->dev,
533 queue->grant_tx_handle[pending_idx] = handle;
536 static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue,
539 if (unlikely(queue->grant_tx_handle[pending_idx] ==
541 netdev_err(queue->vif->dev,
546 queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;
549 static int xenvif_tx_check_gop(struct xenvif_queue *queue,
587 xenvif_idx_release(queue, pending_idx,
592 netdev_dbg(queue->vif->dev,
599 xenvif_idx_release(queue, pending_idx,
615 xenvif_grant_handle_set(queue,
620 xenvif_idx_unmap(queue, pending_idx);
626 xenvif_idx_release(queue, pending_idx,
629 xenvif_idx_release(queue, pending_idx,
637 netdev_dbg(queue->vif->dev,
644 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
653 xenvif_idx_unmap(queue, pending_idx);
654 xenvif_idx_release(queue, pending_idx,
664 xenvif_idx_unmap(queue, pending_idx);
665 xenvif_idx_release(queue, pending_idx,
686 static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
704 &callback_param(queue, pending_idx);
706 callback_param(queue, prev_pending_idx).ctx =
707 &callback_param(queue, pending_idx);
709 callback_param(queue, pending_idx).ctx = NULL;
712 txp = &queue->pending_tx_info[pending_idx].req;
713 page = virt_to_page(idx_to_kaddr(queue, pending_idx));
720 get_page(queue->mmap_pages[pending_idx]);
724 static int xenvif_get_extras(struct xenvif_queue *queue,
730 RING_IDX cons = queue->tx.req_cons;
734 netdev_err(queue->vif->dev, "Missing extra info\n");
735 xenvif_fatal_tx_err(queue->vif);
739 RING_COPY_REQUEST(&queue->tx, cons, &extra);
741 queue->tx.req_cons = ++cons;
746 netdev_err(queue->vif->dev,
748 xenvif_fatal_tx_err(queue->vif);
787 static int checksum_setup(struct xenvif_queue *queue, struct sk_buff *skb)
797 queue->stats.rx_gso_checksum_fixup++;
809 static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
812 u64 next_credit = queue->credit_window_start +
813 msecs_to_jiffies(queue->credit_usec / 1000);
816 if (timer_pending(&queue->credit_timeout)) {
817 queue->rate_limited = true;
823 queue->credit_window_start = now;
824 tx_add_credit(queue);
828 if (size > queue->remaining_credit) {
829 mod_timer(&queue->credit_timeout,
831 queue->credit_window_start = next_credit;
832 queue->rate_limited = true;
899 /* No need for locking or RCU here. NAPI poll and TX queue
914 static void xenvif_tx_build_gops(struct xenvif_queue *queue,
923 while (skb_queue_len(&queue->tx_queue) < budget) {
934 if (queue->tx.sring->req_prod - queue->tx.req_cons >
936 netdev_err(queue->vif->dev,
939 queue->tx.sring->req_prod, queue->tx.req_cons,
941 xenvif_fatal_tx_err(queue->vif);
945 work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
949 idx = queue->tx.req_cons;
951 RING_COPY_REQUEST(&queue->tx, idx, &txreq);
954 if (txreq.size > queue->remaining_credit &&
955 tx_credit_exceeded(queue, txreq.size))
958 queue->remaining_credit -= txreq.size;
961 queue->tx.req_cons = ++idx;
966 work_to_do = xenvif_get_extras(queue, extras,
969 idx = queue->tx.req_cons;
978 ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr);
980 make_tx_response(queue, &txreq, extra_count,
991 xenvif_mcast_del(queue->vif, extra->u.mcast.addr);
993 make_tx_response(queue, &txreq, extra_count,
1001 ret = xenvif_count_requests(queue, &txreq, extra_count,
1010 netdev_dbg(queue->vif->dev,
1012 xenvif_tx_err(queue, &txreq, extra_count, idx);
1018 netdev_err(queue->vif->dev, "Cross page boundary, txreq.offset: %u, size: %u\n",
1020 xenvif_fatal_tx_err(queue->vif);
1024 index = pending_index(queue->pending_cons);
1025 pending_idx = queue->pending_ring[index];
1032 netdev_dbg(queue->vif->dev,
1034 xenvif_tx_err(queue, &txreq, extra_count, idx);
1052 xenvif_tx_err(queue, &txreq, extra_count, idx);
1054 netdev_err(queue->vif->dev,
1064 if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
1100 xenvif_get_requests(queue, skb, &txreq, txfrags, copy_ops,
1104 __skb_queue_tail(&queue->tx_queue, skb);
1106 queue->tx.req_cons = idx;
1108 if ((*map_ops >= ARRAY_SIZE(queue->tx_map_ops)) ||
1109 (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops)))
1119 static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *skb)
1127 queue->stats.tx_zerocopy_sent += 2;
1128 queue->stats.tx_frag_overflow++;
1130 xenvif_fill_frags(queue, nskb);
1169 atomic_inc(&queue->inflight_packets);
1181 static int xenvif_tx_submit(struct xenvif_queue *queue)
1183 struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops;
1184 struct gnttab_copy *gop_copy = queue->tx_copy_ops;
1188 while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
1193 txp = &queue->pending_tx_info[pending_idx].req;
1196 if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
1216 xenvif_fill_frags(queue, skb);
1220 xenvif_skb_zerocopy_prepare(queue, nskb);
1221 if (xenvif_handle_frag_list(queue, skb)) {
1223 netdev_err(queue->vif->dev,
1225 xenvif_skb_zerocopy_prepare(queue, skb);
1234 skb->dev = queue->vif->dev;
1238 if (checksum_setup(queue, skb)) {
1239 netdev_dbg(queue->vif->dev,
1243 xenvif_skb_zerocopy_prepare(queue, skb);
1273 queue->stats.rx_bytes += skb->len;
1274 queue->stats.rx_packets++;
1284 xenvif_skb_zerocopy_prepare(queue, skb);
1285 queue->stats.tx_zerocopy_sent++;
1298 struct xenvif_queue *queue = ubuf_to_queue(ubuf);
1303 spin_lock_irqsave(&queue->callback_lock, flags);
1307 BUG_ON(queue->dealloc_prod - queue->dealloc_cons >=
1309 index = pending_index(queue->dealloc_prod);
1310 queue->dealloc_ring[index] = pending_idx;
1315 queue->dealloc_prod++;
1317 spin_unlock_irqrestore(&queue->callback_lock, flags);
1320 queue->stats.tx_zerocopy_success++;
1322 queue->stats.tx_zerocopy_fail++;
1323 xenvif_skb_zerocopy_complete(queue);
1326 static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
1333 dc = queue->dealloc_cons;
1334 gop = queue->tx_unmap_ops;
1338 dp = queue->dealloc_prod;
1346 BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS);
1348 queue->dealloc_ring[pending_index(dc++)];
1350 pending_idx_release[gop - queue->tx_unmap_ops] =
1352 queue->pages_to_unmap[gop - queue->tx_unmap_ops] =
1353 queue->mmap_pages[pending_idx];
1355 idx_to_kaddr(queue, pending_idx),
1357 queue->grant_tx_handle[pending_idx]);
1358 xenvif_grant_handle_reset(queue, pending_idx);
1362 } while (dp != queue->dealloc_prod);
1364 queue->dealloc_cons = dc;
1366 if (gop - queue->tx_unmap_ops > 0) {
1368 ret = gnttab_unmap_refs(queue->tx_unmap_ops,
1370 queue->pages_to_unmap,
1371 gop - queue->tx_unmap_ops);
1373 netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tu ret %d\n",
1374 gop - queue->tx_unmap_ops, ret);
1375 for (i = 0; i < gop - queue->tx_unmap_ops; ++i) {
1377 netdev_err(queue->vif->dev,
1387 for (i = 0; i < gop - queue->tx_unmap_ops; ++i)
1388 xenvif_idx_release(queue, pending_idx_release[i],
1394 int xenvif_tx_action(struct xenvif_queue *queue, int budget)
1399 if (unlikely(!tx_work_todo(queue)))
1402 xenvif_tx_build_gops(queue, budget, &nr_cops, &nr_mops);
1407 gnttab_batch_copy(queue->tx_copy_ops, nr_cops);
1409 ret = gnttab_map_refs(queue->tx_map_ops,
1411 queue->pages_to_map,
1416 netdev_err(queue->vif->dev, "Map fail: nr %u ret %d\n",
1419 WARN_ON_ONCE(queue->tx_map_ops[i].status ==
1424 work_done = xenvif_tx_submit(queue);
1429 static void _make_tx_response(struct xenvif_queue *queue,
1434 RING_IDX i = queue->tx.rsp_prod_pvt;
1437 resp = RING_GET_RESPONSE(&queue->tx, i);
1442 RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
1444 queue->tx.rsp_prod_pvt = ++i;
1447 static void push_tx_responses(struct xenvif_queue *queue)
1451 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
1453 notify_remote_via_irq(queue->tx_irq);
1456 static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
1463 pending_tx_info = &queue->pending_tx_info[pending_idx];
1465 spin_lock_irqsave(&queue->response_lock, flags);
1467 _make_tx_response(queue, &pending_tx_info->req,
1474 index = pending_index(queue->pending_prod++);
1475 queue->pending_ring[index] = pending_idx;
1477 push_tx_responses(queue);
1479 spin_unlock_irqrestore(&queue->response_lock, flags);
1482 static void make_tx_response(struct xenvif_queue *queue,
1489 spin_lock_irqsave(&queue->response_lock, flags);
1491 _make_tx_response(queue, txp, extra_count, status);
1492 push_tx_responses(queue);
1494 spin_unlock_irqrestore(&queue->response_lock, flags);
1497 static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
1503 idx_to_kaddr(queue, pending_idx),
1505 queue->grant_tx_handle[pending_idx]);
1506 xenvif_grant_handle_reset(queue, pending_idx);
1509 &queue->mmap_pages[pending_idx], 1);
1511 netdev_err(queue->vif->dev,
1522 static inline int tx_work_todo(struct xenvif_queue *queue)
1524 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)))
1530 static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue)
1532 return queue->dealloc_cons != queue->dealloc_prod;
1535 void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue)
1537 if (queue->tx.sring)
1538 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
1539 queue->tx.sring);
1540 if (queue->rx.sring)
1541 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
1542 queue->rx.sring);
1545 int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
1555 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
1564 BACK_RING_ATTACH(&queue->tx, txs, rsp_prod, XEN_PAGE_SIZE);
1567 if (req_prod - rsp_prod > RING_SIZE(&queue->tx))
1570 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
1579 BACK_RING_ATTACH(&queue->rx, rxs, rsp_prod, XEN_PAGE_SIZE);
1582 if (req_prod - rsp_prod > RING_SIZE(&queue->rx))
1588 xenvif_unmap_frontend_data_rings(queue);
1592 static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue *queue)
1598 !atomic_read(&queue->inflight_packets);
1603 struct xenvif_queue *queue = data;
1606 wait_event_interruptible(queue->dealloc_wq,
1607 tx_dealloc_work_todo(queue) ||
1608 xenvif_dealloc_kthread_should_stop(queue));
1609 if (xenvif_dealloc_kthread_should_stop(queue))
1612 xenvif_tx_dealloc_action(queue);
1617 if (tx_dealloc_work_todo(queue))
1618 xenvif_tx_dealloc_action(queue);