Lines Matching defs:txq

839 /* HW offloaded queuing disciplines txq count and offset maps */
2459 int netdev_txq_to_tc(struct net_device *dev, unsigned int txq);
3304 void netif_schedule_queue(struct netdev_queue *txq);
3335 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3336 netif_tx_start_queue(txq);
3359 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3360 netif_tx_wake_queue(txq);
3656 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3658 netif_tx_start_queue(txq);
3670 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3671 netif_tx_stop_queue(txq);
3684 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3686 return netif_tx_queue_stopped(txq);
3711 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3713 netif_tx_wake_queue(txq);
3832 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
3845 unsigned int txq, unsigned int rxq);
3967 struct netdev_queue *txq, int *ret);
4372 static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
4374 spin_lock(&txq->_xmit_lock);
4376 WRITE_ONCE(txq->xmit_lock_owner, cpu);
4379 static inline bool __netif_tx_acquire(struct netdev_queue *txq)
4381 __acquire(&txq->_xmit_lock);
4385 static inline void __netif_tx_release(struct netdev_queue *txq)
4387 __release(&txq->_xmit_lock);
4390 static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
4392 spin_lock_bh(&txq->_xmit_lock);
4394 WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
4397 static inline bool __netif_tx_trylock(struct netdev_queue *txq)
4399 bool ok = spin_trylock(&txq->_xmit_lock);
4403 WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
4408 static inline void __netif_tx_unlock(struct netdev_queue *txq)
4411 WRITE_ONCE(txq->xmit_lock_owner, -1);
4412 spin_unlock(&txq->_xmit_lock);
4415 static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
4418 WRITE_ONCE(txq->xmit_lock_owner, -1);
4419 spin_unlock_bh(&txq->_xmit_lock);
4423 * txq->trans_start can be read locklessly from dev_watchdog()
4425 static inline void txq_trans_update(struct netdev_queue *txq)
4427 if (txq->xmit_lock_owner != -1)
4428 WRITE_ONCE(txq->trans_start, jiffies);
4431 static inline void txq_trans_cond_update(struct netdev_queue *txq)
4435 if (READ_ONCE(txq->trans_start) != now)
4436 WRITE_ONCE(txq->trans_start, now);
4439 /* legacy drivers only, netdev_start_xmit() sets txq->trans_start */
4442 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
4444 txq_trans_cond_update(txq);
4469 #define HARD_TX_LOCK(dev, txq, cpu) { \
4471 __netif_tx_lock(txq, cpu); \
4473 __netif_tx_acquire(txq); \
4477 #define HARD_TX_TRYLOCK(dev, txq) \
4479 __netif_tx_trylock(txq) : \
4480 __netif_tx_acquire(txq))
4482 #define HARD_TX_UNLOCK(dev, txq) { \
4484 __netif_tx_unlock(txq); \
4486 __netif_tx_release(txq); \
4499 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
4501 __netif_tx_lock(txq, cpu);
4502 netif_tx_stop_queue(txq);
4503 __netif_tx_unlock(txq);
4920 struct netdev_queue *txq, bool more)
4927 txq_trans_update(txq);