Lines Matching refs:txreq
924 struct xen_netif_tx_request txreq;
951 RING_COPY_REQUEST(&queue->tx, idx, &txreq);
954 if (txreq.size > queue->remaining_credit &&
955 tx_credit_exceeded(queue, txreq.size))
958 queue->remaining_credit -= txreq.size;
965 if (txreq.flags & XEN_NETTXF_extra_info) {
980 make_tx_response(queue, &txreq, extra_count,
993 make_tx_response(queue, &txreq, extra_count,
998 data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN) ?
999 XEN_NETBACK_TX_COPY_LEN : txreq.size;
1001 ret = xenvif_count_requests(queue, &txreq, extra_count,
1009 if (unlikely(txreq.size < ETH_HLEN)) {
1011 "Bad packet size: %d\n", txreq.size);
1012 xenvif_tx_err(queue, &txreq, extra_count, idx);
1017 if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) {
1018 netdev_err(queue->vif->dev, "Cross page boundary, txreq.offset: %u, size: %u\n",
1019 txreq.offset, txreq.size);
1027 if (ret >= XEN_NETBK_LEGACY_SLOTS_MAX - 1 && data_len < txreq.size)
1028 data_len = txreq.size;
1034 xenvif_tx_err(queue, &txreq, extra_count, idx);
1052 xenvif_tx_err(queue, &txreq, extra_count, idx);
1100 xenvif_get_requests(queue, skb, &txreq, txfrags, copy_ops,