Lines Matching refs:nq
1834 struct netdev_queue *nq, bool napi)
1864 netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
1871 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
1878 mvneta_txq_bufs_free(pp, txq, tx_done, nq, true);
1882 if (netif_tx_queue_stopped(nq)) {
1884 netif_tx_wake_queue(nq);
2090 struct netdev_queue *nq;
2101 nq = netdev_get_tx_queue(pp->dev, txq->id);
2103 __netif_tx_lock(nq, cpu);
2118 __netif_tx_unlock(nq);
2132 struct netdev_queue *nq;
2142 nq = netdev_get_tx_queue(pp->dev, txq->id);
2144 __netif_tx_lock(nq, cpu);
2157 __netif_tx_unlock(nq);
2830 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
2833 netdev_tx_sent_queue(nq, len);
2837 netif_tx_stop_queue(nq);
2839 if (!netdev_xmit_more() || netif_xmit_stopped(nq) ||
2863 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
2866 mvneta_txq_bufs_free(pp, txq, tx_done, nq, false);
2880 struct netdev_queue *nq;
2886 nq = netdev_get_tx_queue(pp->dev, txq->id);
2887 __netif_tx_lock(nq, cpu);
2892 __netif_tx_unlock(nq);
3457 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
3470 netdev_tx_reset_queue(nq);