Lines Matching refs:txq
53 static void alx_free_txbuf(struct alx_tx_queue *txq, int entry)
55 struct alx_buffer *txb = &txq->bufs[entry];
58 dma_unmap_single(txq->dev,
149 return alx->qnapi[r_idx]->txq;
152 static struct netdev_queue *alx_get_tx_queue(const struct alx_tx_queue *txq)
154 return netdev_get_tx_queue(txq->netdev, txq->queue_idx);
157 static inline int alx_tpd_avail(struct alx_tx_queue *txq)
159 if (txq->write_idx >= txq->read_idx)
160 return txq->count + txq->read_idx - txq->write_idx - 1;
161 return txq->read_idx - txq->write_idx - 1;
164 static bool alx_clean_tx_irq(struct alx_tx_queue *txq)
172 alx = netdev_priv(txq->netdev);
173 tx_queue = alx_get_tx_queue(txq);
175 sw_read_idx = txq->read_idx;
176 hw_read_idx = alx_read_mem16(&alx->hw, txq->c_reg);
182 skb = txq->bufs[sw_read_idx].skb;
189 alx_free_txbuf(txq, sw_read_idx);
191 if (++sw_read_idx == txq->count)
194 txq->read_idx = sw_read_idx;
200 alx_tpd_avail(txq) > txq->count / 4)
305 if (np->txq)
306 tx_complete = alx_clean_tx_irq(np->txq);
459 if (np->txq) {
460 np->txq->read_idx = 0;
461 np->txq->write_idx = 0;
463 txring_header_reg[np->txq->queue_idx],
464 np->txq->tpd_dma);
488 static void alx_free_txring_buf(struct alx_tx_queue *txq)
492 if (!txq->bufs)
495 for (i = 0; i < txq->count; i++)
496 alx_free_txbuf(txq, i);
498 memset(txq->bufs, 0, txq->count * sizeof(struct alx_buffer));
499 memset(txq->tpd, 0, txq->count * sizeof(struct alx_txd));
500 txq->write_idx = 0;
501 txq->read_idx = 0;
503 netdev_tx_reset_queue(alx_get_tx_queue(txq));
538 if (alx->qnapi[i] && alx->qnapi[i]->txq)
539 alx_free_txring_buf(alx->qnapi[i]->txq);
616 static int alx_alloc_tx_ring(struct alx_priv *alx, struct alx_tx_queue *txq,
619 txq->bufs = kcalloc(txq->count, sizeof(struct alx_buffer), GFP_KERNEL);
620 if (!txq->bufs)
623 txq->tpd = alx->descmem.virt + offset;
624 txq->tpd_dma = alx->descmem.dma + offset;
625 offset += sizeof(struct alx_txd) * txq->count;
673 offset = alx_alloc_tx_ring(alx, alx->qnapi[i]->txq, offset);
696 if (alx->qnapi[i] && alx->qnapi[i]->txq)
697 kfree(alx->qnapi[i]->txq->bufs);
720 kfree(np->txq);
742 struct alx_tx_queue *txq;
761 txq = kzalloc(sizeof(*txq), GFP_KERNEL);
762 if (!txq)
765 np->txq = txq;
766 txq->p_reg = tx_pidx_reg[i];
767 txq->c_reg = tx_cidx_reg[i];
768 txq->queue_idx = i;
769 txq->count = alx->tx_ringsz;
770 txq->netdev = alx->dev;
771 txq->dev = &alx->hw.pdev->dev;
867 if (np->txq && np->rxq)
869 np->txq->queue_idx);
870 else if (np->txq)
872 np->txq->queue_idx);
1460 static int alx_map_tx_skb(struct alx_tx_queue *txq, struct sk_buff *skb)
1464 int maplen, f, first_idx = txq->write_idx;
1466 first_tpd = &txq->tpd[txq->write_idx];
1470 if (++txq->write_idx == txq->count)
1471 txq->write_idx = 0;
1473 tpd = &txq->tpd[txq->write_idx];
1480 dma = dma_map_single(txq->dev, skb->data, maplen,
1482 if (dma_mapping_error(txq->dev, dma))
1485 dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen);
1486 dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma);
1494 if (++txq->write_idx == txq->count)
1495 txq->write_idx = 0;
1496 tpd = &txq->tpd[txq->write_idx];
1501 dma = skb_frag_dma_map(txq->dev, frag, 0,
1503 if (dma_mapping_error(txq->dev, dma))
1505 dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen);
1506 dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma);
1514 txq->bufs[txq->write_idx].skb = skb;
1516 if (++txq->write_idx == txq->count)
1517 txq->write_idx = 0;
1523 while (f != txq->write_idx) {
1524 alx_free_txbuf(txq, f);
1525 if (++f == txq->count)
1532 struct alx_tx_queue *txq)
1538 alx = netdev_priv(txq->netdev);
1540 if (alx_tpd_avail(txq) < alx_tpd_req(skb)) {
1541 netif_tx_stop_queue(alx_get_tx_queue(txq));
1545 first = &txq->tpd[txq->write_idx];
1554 if (alx_map_tx_skb(txq, skb) < 0)
1557 netdev_tx_sent_queue(alx_get_tx_queue(txq), skb->len);
1561 alx_write_mem16(&alx->hw, txq->p_reg, txq->write_idx);
1563 if (alx_tpd_avail(txq) < txq->count / 8)
1564 netif_tx_stop_queue(alx_get_tx_queue(txq));