Lines Matching defs:txq

1234 	netif_tx_stop_queue(q->txq);
1425 if (!q->in_use || !__netif_tx_trylock(eq->txq))
1441 if (netif_tx_queue_stopped(eq->txq) && hw_in_use < (q->size / 2)) {
1442 netif_tx_wake_queue(eq->txq);
1446 __netif_tx_unlock(eq->txq);
1689 * txq, reset the current to point to start of the queue
1831 struct sge_eth_txq *txq;
1854 txq = &adapter->sge.ethtxq[pi->first_qset + qidx];
1859 reclaim_completed_tx(adapter, &txq->q, -1, true);
1867 credits = txq_avail(&txq->q) - ndesc;
1875 eth_txq_stop(txq);
1882 last_desc = txq->q.pidx + ndesc - 1;
1883 if (last_desc >= txq->q.size)
1884 last_desc -= txq->q.size;
1885 sgl_sdesc = &txq->q.sdesc[last_desc];
1895 txq->mapping_err++;
1909 eth_txq_stop(txq);
1919 wr = (void *)&txq->q.desc[txq->q.pidx];
1971 txq->tso++;
1972 txq->tx_cso += ssi->gso_segs;
1990 txq->tx_cso++;
2000 txq->vlan_ins++;
2019 cxgb4_inline_tx_skb(skb, &txq->q, cpl + 1);
2059 struct sge_txq *tq = &txq->q;
2081 txq_advance(&txq->q, ndesc);
2083 cxgb4_ring_tx_db(adapter, &txq->q, ndesc);
2357 * txq, reset the current to point to start of the queue
2681 __netif_tx_lock(q->txq, smp_processor_id());
2686 __netif_tx_unlock(q->txq);
2721 __netif_tx_unlock(q->txq);
2948 struct sge_txq *txq;
2998 txq = &q->q;
3003 left = (u8 *)end - (u8 *)txq->stat;
3004 end = (void *)txq->desc + left;
3011 if (pos == (u64 *)txq->stat) {
3012 left = (u8 *)end - (u8 *)txq->stat;
3013 end = (void *)txq->desc + left;
3014 pos = (void *)txq->desc;
3128 struct sge_uld_txq *txq;
3145 txq = &txq_info->uldtxq[idx];
3146 return ofld_xmit(txq, skb);
3258 struct sge_uld_txq *txq;
3271 txq = &txq_info->uldtxq[idx];
3273 ret = ofld_xmit_direct(txq, src, len);
3592 struct sge_eth_txq *txq;
3613 txq = &s->ethtxq[pi->first_qset + rspq->idx];
3614 t4_sge_eth_txq_egress_update(adapter, txq, -1);
4268 struct sge_uld_txq *txq = s->egr_map[id];
4271 tasklet_schedule(&txq->qresume_tsk);
4559 * @txq: the SGE Ethernet TX Queue to initialize
4565 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
4576 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
4578 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
4580 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
4582 if (!txq->q.desc)
4616 c.eqaddr = cpu_to_be64(txq->q.phys_addr);
4627 FW_EQ_ETH_CMD_TIMERIX_V(txq->dbqtimerix));
4631 kfree(txq->q.sdesc);
4632 txq->q.sdesc = NULL;
4635 txq->q.desc, txq->q.phys_addr);
4636 txq->q.desc = NULL;
4640 txq->q.q_type = CXGB4_TXQ_ETH;
4641 init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd)));
4642 txq->txq = netdevq;
4643 txq->tso = 0;
4644 txq->uso = 0;
4645 txq->tx_cso = 0;
4646 txq->vlan_ins = 0;
4647 txq->mapping_err = 0;
4648 txq->dbqt = dbqt;
4653 int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
4664 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
4666 txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
4667 sizeof(struct tx_desc), 0, &txq->q.phys_addr,
4669 if (!txq->q.desc)
4691 c.eqaddr = cpu_to_be64(txq->q.phys_addr);
4697 txq->q.desc, txq->q.phys_addr);
4698 txq->q.desc = NULL;
4702 txq->q.q_type = CXGB4_TXQ_CTRL;
4703 init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid)));
4704 txq->adap = adap;
4705 skb_queue_head_init(&txq->sendq);
4706 tasklet_setup(&txq->qresume_tsk, restart_ctrlq);
4707 txq->full = 0;
4779 int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
4789 ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, cmd, iqid);
4793 txq->q.q_type = CXGB4_TXQ_ULD;
4794 txq->adap = adap;
4795 skb_queue_head_init(&txq->sendq);
4796 tasklet_setup(&txq->qresume_tsk, restart_ofldq);
4797 txq->full = 0;
4798 txq->mapping_err = 0;
4802 int t4_sge_alloc_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq,
4807 ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, FW_EQ_OFLD_CMD, iqid);
4811 txq->q.q_type = CXGB4_TXQ_ULD;
4812 spin_lock_init(&txq->lock);
4813 txq->adap = adap;
4814 txq->tso = 0;
4815 txq->uso = 0;
4816 txq->tx_cso = 0;
4817 txq->vlan_ins = 0;
4818 txq->mapping_err = 0;
4877 void t4_sge_free_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq)
4879 if (txq->q.desc) {
4881 txq->q.cntxt_id);
4882 free_tx_desc(adap, &txq->q, txq->q.in_use, false);
4883 kfree(txq->q.sdesc);
4884 free_txq(adap, &txq->q);
4926 __netif_tx_lock_bh(etq->txq);
4928 __netif_tx_unlock_bh(etq->txq);
5008 struct sge_uld_txq *txq = txq_info->uldtxq;
5011 if (txq->q.desc)
5012 tasklet_kill(&txq->qresume_tsk);
5022 struct sge_uld_txq *txq = txq_info->uldtxq;
5025 if (txq->q.desc)
5026 tasklet_kill(&txq->qresume_tsk);