Lines Matching refs:tq
22 struct vmxnet3_tx_queue *tq;
29 tq = &adapter->tx_queue[cpu];
31 tq = &adapter->tx_queue[reciprocal_scale(cpu, tq_number)];
33 return tq;
117 struct vmxnet3_tx_queue *tq, bool dma_map)
127 dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
129 ctx.sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
133 tbi = tq->buf_info + tq->tx_ring.next2fill;
135 if (vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) == 0) {
136 tq->stats.tx_ring_full++;
159 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
160 WARN_ON_ONCE(gdesc->txd.gen == tq->tx_ring.gen);
173 tx_num_deferred = le32_to_cpu(tq->shared->txNumDeferred);
174 le32_add_cpu(&tq->shared->txNumDeferred, 1);
177 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
180 tbi->sop_idx = ctx.sop_txd - tq->tx_ring.base;
188 * tq->shared->txNumDeferred to 0.
190 if (tx_num_deferred >= le32_to_cpu(tq->shared->txThreshold)) {
191 tq->shared->txNumDeferred = 0;
193 VMXNET3_REG_TXPROD + tq->qid * 8,
194 tq->tx_ring.next2fill);
204 struct vmxnet3_tx_queue *tq;
208 tq = vmxnet3_xdp_get_tq(adapter);
209 if (tq->stopped)
212 nq = netdev_get_tx_queue(adapter->netdev, tq->qid);
215 err = vmxnet3_xdp_xmit_frame(adapter, xdpf, tq, false);
227 struct vmxnet3_tx_queue *tq;
235 tq = vmxnet3_xdp_get_tq(adapter);
236 if (tq->stopped)
240 if (vmxnet3_xdp_xmit_frame(adapter, frames[i], tq, true)) {
241 tq->stats.xdp_xmit_err++;
245 tq->stats.xdp_xmit += i;