Lines Matching refs:txq

54 static void alx_free_txbuf(struct alx_tx_queue *txq, int entry)
56 struct alx_buffer *txb = &txq->bufs[entry];
59 dma_unmap_single(txq->dev,
150 return alx->qnapi[r_idx]->txq;
153 static struct netdev_queue *alx_get_tx_queue(const struct alx_tx_queue *txq)
155 return netdev_get_tx_queue(txq->netdev, txq->queue_idx);
158 static inline int alx_tpd_avail(struct alx_tx_queue *txq)
160 if (txq->write_idx >= txq->read_idx)
161 return txq->count + txq->read_idx - txq->write_idx - 1;
162 return txq->read_idx - txq->write_idx - 1;
165 static bool alx_clean_tx_irq(struct alx_tx_queue *txq)
173 alx = netdev_priv(txq->netdev);
174 tx_queue = alx_get_tx_queue(txq);
176 sw_read_idx = txq->read_idx;
177 hw_read_idx = alx_read_mem16(&alx->hw, txq->c_reg);
183 skb = txq->bufs[sw_read_idx].skb;
190 alx_free_txbuf(txq, sw_read_idx);
192 if (++sw_read_idx == txq->count)
195 txq->read_idx = sw_read_idx;
201 alx_tpd_avail(txq) > txq->count / 4)
306 if (np->txq)
307 tx_complete = alx_clean_tx_irq(np->txq);
460 if (np->txq) {
461 np->txq->read_idx = 0;
462 np->txq->write_idx = 0;
464 txring_header_reg[np->txq->queue_idx],
465 np->txq->tpd_dma);
489 static void alx_free_txring_buf(struct alx_tx_queue *txq)
493 if (!txq->bufs)
496 for (i = 0; i < txq->count; i++)
497 alx_free_txbuf(txq, i);
499 memset(txq->bufs, 0, txq->count * sizeof(struct alx_buffer));
500 memset(txq->tpd, 0, txq->count * sizeof(struct alx_txd));
501 txq->write_idx = 0;
502 txq->read_idx = 0;
504 netdev_tx_reset_queue(alx_get_tx_queue(txq));
539 if (alx->qnapi[i] && alx->qnapi[i]->txq)
540 alx_free_txring_buf(alx->qnapi[i]->txq);
617 static int alx_alloc_tx_ring(struct alx_priv *alx, struct alx_tx_queue *txq,
620 txq->bufs = kcalloc(txq->count, sizeof(struct alx_buffer), GFP_KERNEL);
621 if (!txq->bufs)
624 txq->tpd = alx->descmem.virt + offset;
625 txq->tpd_dma = alx->descmem.dma + offset;
626 offset += sizeof(struct alx_txd) * txq->count;
674 offset = alx_alloc_tx_ring(alx, alx->qnapi[i]->txq, offset);
697 if (alx->qnapi[i] && alx->qnapi[i]->txq)
698 kfree(alx->qnapi[i]->txq->bufs);
721 kfree(np->txq);
743 struct alx_tx_queue *txq;
762 txq = kzalloc(sizeof(*txq), GFP_KERNEL);
763 if (!txq)
766 np->txq = txq;
767 txq->p_reg = tx_pidx_reg[i];
768 txq->c_reg = tx_cidx_reg[i];
769 txq->queue_idx = i;
770 txq->count = alx->tx_ringsz;
771 txq->netdev = alx->dev;
772 txq->dev = &alx->hw.pdev->dev;
868 if (np->txq && np->rxq)
870 np->txq->queue_idx);
871 else if (np->txq)
873 np->txq->queue_idx);
1437 static int alx_map_tx_skb(struct alx_tx_queue *txq, struct sk_buff *skb)
1441 int maplen, f, first_idx = txq->write_idx;
1443 first_tpd = &txq->tpd[txq->write_idx];
1447 if (++txq->write_idx == txq->count)
1448 txq->write_idx = 0;
1450 tpd = &txq->tpd[txq->write_idx];
1457 dma = dma_map_single(txq->dev, skb->data, maplen,
1459 if (dma_mapping_error(txq->dev, dma))
1462 dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen);
1463 dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma);
1471 if (++txq->write_idx == txq->count)
1472 txq->write_idx = 0;
1473 tpd = &txq->tpd[txq->write_idx];
1478 dma = skb_frag_dma_map(txq->dev, frag, 0,
1480 if (dma_mapping_error(txq->dev, dma))
1482 dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen);
1483 dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma);
1491 txq->bufs[txq->write_idx].skb = skb;
1493 if (++txq->write_idx == txq->count)
1494 txq->write_idx = 0;
1500 while (f != txq->write_idx) {
1501 alx_free_txbuf(txq, f);
1502 if (++f == txq->count)
1509 struct alx_tx_queue *txq)
1515 alx = netdev_priv(txq->netdev);
1517 if (alx_tpd_avail(txq) < alx_tpd_req(skb)) {
1518 netif_tx_stop_queue(alx_get_tx_queue(txq));
1522 first = &txq->tpd[txq->write_idx];
1531 if (alx_map_tx_skb(txq, skb) < 0)
1534 netdev_tx_sent_queue(alx_get_tx_queue(txq), skb->len);
1538 alx_write_mem16(&alx->hw, txq->p_reg, txq->write_idx);
1540 if (alx_tpd_avail(txq) < txq->count / 8)
1541 netif_tx_stop_queue(alx_get_tx_queue(txq));