Lines Matching defs:txq

1234 	netif_tx_stop_queue(q->txq);
1425 if (!q->in_use || !__netif_tx_trylock(eq->txq))
1441 if (netif_tx_queue_stopped(eq->txq) && hw_in_use < (q->size / 2)) {
1442 netif_tx_wake_queue(eq->txq);
1446 __netif_tx_unlock(eq->txq);
1690 * txq, reset the current to point to start of the queue
1832 struct sge_eth_txq *txq;
1858 txq = &adapter->sge.ethtxq[pi->first_qset + qidx];
1863 reclaim_completed_tx(adapter, &txq->q, -1, true);
1871 credits = txq_avail(&txq->q) - ndesc;
1879 eth_txq_stop(txq);
1886 last_desc = txq->q.pidx + ndesc - 1;
1887 if (last_desc >= txq->q.size)
1888 last_desc -= txq->q.size;
1889 sgl_sdesc = &txq->q.sdesc[last_desc];
1899 txq->mapping_err++;
1914 eth_txq_stop(txq);
1925 wr = (void *)&txq->q.desc[txq->q.pidx];
1977 txq->tso++;
1978 txq->tx_cso += ssi->gso_segs;
1996 txq->tx_cso++;
2006 txq->vlan_ins++;
2025 cxgb4_inline_tx_skb(skb, &txq->q, cpl + 1);
2065 struct sge_txq *tq = &txq->q;
2087 txq_advance(&txq->q, ndesc);
2089 cxgb4_ring_tx_db(adapter, &txq->q, ndesc);
2363 * txq, reset the current to point to start of the queue
2687 __netif_tx_lock(q->txq, smp_processor_id());
2692 __netif_tx_unlock(q->txq);
2727 __netif_tx_unlock(q->txq);
2954 struct sge_txq *txq;
3004 txq = &q->q;
3009 left = (u8 *)end - (u8 *)txq->stat;
3010 end = (void *)txq->desc + left;
3017 if (pos == (u64 *)txq->stat) {
3018 left = (u8 *)end - (u8 *)txq->stat;
3019 end = (void *)txq->desc + left;
3020 pos = (void *)txq->desc;
3134 struct sge_uld_txq *txq;
3151 txq = &txq_info->uldtxq[idx];
3152 return ofld_xmit(txq, skb);
3264 struct sge_uld_txq *txq;
3277 txq = &txq_info->uldtxq[idx];
3279 ret = ofld_xmit_direct(txq, src, len);
3598 struct sge_eth_txq *txq;
3619 txq = &s->ethtxq[pi->first_qset + rspq->idx];
3636 WRITE_ONCE(txq->q.stat->cidx, egr->cidx);
3639 t4_sge_eth_txq_egress_update(adapter, txq, -1);
4293 struct sge_uld_txq *txq = s->egr_map[id];
4296 tasklet_schedule(&txq->qresume_tsk);
4584 * @txq: the SGE Ethernet TX Queue to initialize
4590 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
4601 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
4603 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
4605 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
4607 if (!txq->q.desc)
4646 c.eqaddr = cpu_to_be64(txq->q.phys_addr);
4657 FW_EQ_ETH_CMD_TIMERIX_V(txq->dbqtimerix));
4661 kfree(txq->q.sdesc);
4662 txq->q.sdesc = NULL;
4665 txq->q.desc, txq->q.phys_addr);
4666 txq->q.desc = NULL;
4670 txq->q.q_type = CXGB4_TXQ_ETH;
4671 init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd)));
4672 txq->txq = netdevq;
4673 txq->tso = 0;
4674 txq->uso = 0;
4675 txq->tx_cso = 0;
4676 txq->vlan_ins = 0;
4677 txq->mapping_err = 0;
4678 txq->dbqt = dbqt;
4683 int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
4694 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
4696 txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
4697 sizeof(struct tx_desc), 0, &txq->q.phys_addr,
4699 if (!txq->q.desc)
4721 c.eqaddr = cpu_to_be64(txq->q.phys_addr);
4727 txq->q.desc, txq->q.phys_addr);
4728 txq->q.desc = NULL;
4732 txq->q.q_type = CXGB4_TXQ_CTRL;
4733 init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid)));
4734 txq->adap = adap;
4735 skb_queue_head_init(&txq->sendq);
4736 tasklet_setup(&txq->qresume_tsk, restart_ctrlq);
4737 txq->full = 0;
4809 int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
4819 ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, cmd, iqid);
4823 txq->q.q_type = CXGB4_TXQ_ULD;
4824 txq->adap = adap;
4825 skb_queue_head_init(&txq->sendq);
4826 tasklet_setup(&txq->qresume_tsk, restart_ofldq);
4827 txq->full = 0;
4828 txq->mapping_err = 0;
4832 int t4_sge_alloc_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq,
4837 ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, FW_EQ_OFLD_CMD, iqid);
4841 txq->q.q_type = CXGB4_TXQ_ULD;
4842 spin_lock_init(&txq->lock);
4843 txq->adap = adap;
4844 txq->tso = 0;
4845 txq->uso = 0;
4846 txq->tx_cso = 0;
4847 txq->vlan_ins = 0;
4848 txq->mapping_err = 0;
4907 void t4_sge_free_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq)
4909 if (txq->q.desc) {
4911 txq->q.cntxt_id);
4912 free_tx_desc(adap, &txq->q, txq->q.in_use, false);
4913 kfree(txq->q.sdesc);
4914 free_txq(adap, &txq->q);
4956 __netif_tx_lock_bh(etq->txq);
4958 __netif_tx_unlock_bh(etq->txq);
5038 struct sge_uld_txq *txq = txq_info->uldtxq;
5041 if (txq->q.desc)
5042 tasklet_kill(&txq->qresume_tsk);
5052 struct sge_uld_txq *txq = txq_info->uldtxq;
5055 if (txq->q.desc)
5056 tasklet_kill(&txq->qresume_tsk);