Lines Matching defs:txq
179 return container_of(q, struct sge_qset, txq[qidx]);
658 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET);
695 if (q->txq[i].desc) {
697 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
699 if (q->txq[i].sdesc) {
700 free_tx_desc(adapter, &q->txq[i],
701 q->txq[i].in_use);
702 kfree(q->txq[i].sdesc);
705 q->txq[i].size *
707 q->txq[i].desc, q->txq[i].phys_addr);
708 __skb_queue_purge(&q->txq[i].sendq);
735 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
736 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
737 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
738 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
739 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
1250 static inline void t3_stop_tx_queue(struct netdev_queue *txq,
1253 netif_tx_stop_queue(txq);
1271 struct netdev_queue *txq;
1287 q = &qs->txq[TXQ_ETH];
1288 txq = netdev_get_tx_queue(dev, qidx);
1296 t3_stop_tx_queue(txq, qs, q);
1313 t3_stop_tx_queue(txq, qs, q);
1318 netif_tx_start_queue(txq);
1528 struct sge_qset *qs = from_tasklet(qs, t, txq[TXQ_CTRL].qresume_tsk);
1529 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1569 ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
1746 struct sge_qset *qs = from_tasklet(qs, t, txq[TXQ_OFLD].qresume_tsk);
1747 struct sge_txq *q = &qs->txq[TXQ_OFLD];
1841 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
1843 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
1990 should_restart_tx(&qs->txq[TXQ_ETH]) &&
1992 qs->txq[TXQ_ETH].restarts++;
1998 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
2000 qs->txq[TXQ_OFLD].restarts++;
2001 tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
2004 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
2006 qs->txq[TXQ_CTRL].restarts++;
2007 tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
2224 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
2229 qs->txq[TXQ_ETH].processed += credits;
2233 qs->txq[TXQ_CTRL].processed += credits;
2237 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
2241 qs->txq[TXQ_OFLD].processed += credits;
2258 struct sge_txq *txq = &qs->txq[TXQ_ETH];
2260 if (txq->cleaned + txq->in_use != txq->processed &&
2261 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2262 set_bit(TXQ_RUNNING, &txq->flags);
2264 V_EGRCNTX(txq->cntxt_id));
2269 struct sge_txq *txq = &qs->txq[TXQ_OFLD];
2271 if (txq->cleaned + txq->in_use != txq->processed &&
2272 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2273 set_bit(TXQ_RUNNING, &txq->flags);
2275 V_EGRCNTX(txq->cntxt_id));
2931 tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH],
2936 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2937 tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD],
2939 spin_unlock(&qs->txq[TXQ_OFLD].lock);
3075 q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
3077 &q->txq[i].phys_addr,
3078 &q->txq[i].sdesc);
3079 if (!q->txq[i].desc)
3082 q->txq[i].gen = 1;
3083 q->txq[i].size = p->txq_size[i];
3084 spin_lock_init(&q->txq[i].lock);
3085 skb_queue_head_init(&q->txq[i].sendq);
3088 tasklet_setup(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq);
3089 tasklet_setup(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq);
3100 q->txq[TXQ_ETH].stop_thres = nports *
3141 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
3142 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
3143 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
3149 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
3151 q->txq[TXQ_OFLD].phys_addr,
3152 q->txq[TXQ_OFLD].size, 0, 1, 0);
3158 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
3160 q->txq[TXQ_CTRL].phys_addr,
3161 q->txq[TXQ_CTRL].size,
3162 q->txq[TXQ_CTRL].token, 1, 0);
3306 tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
3307 tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);