Lines Matching defs:txq

337 #define IS_TSO_HEADER(txq, addr) \
338 ((addr >= txq->tso_hdrs_dma) && \
339 (addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE))
363 static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q *txq)
367 entries = (((const char *)txq->dirty_tx -
368 (const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1;
370 return entries >= 0 ? entries : entries + txq->bd.ring_size;
386 struct fec_enet_priv_tx_q *txq;
392 txq = fep->tx_queue[0];
393 bdp = txq->bd.base;
398 bdp == txq->bd.cur ? 'S' : ' ',
399 bdp == txq->dirty_tx ? 'H' : ' ',
403 txq->tx_buf[index].buf_p);
404 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
406 } while (bdp != txq->bd.base);
475 fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
480 struct bufdesc *bdp = txq->bd.cur;
494 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
515 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
525 index = fec_enet_get_bd_index(bdp, &txq->bd);
528 memcpy(txq->tx_bounce[index], bufaddr, frag_len);
529 bufaddr = txq->tx_bounce[index];
554 bdp = txq->bd.cur;
556 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
563 static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
577 entries_free = fec_enet_get_free_txdesc_num(txq);
592 bdp = txq->bd.cur;
601 index = fec_enet_get_bd_index(bdp, &txq->bd);
604 memcpy(txq->tx_bounce[index], skb->data, buflen);
605 bufaddr = txq->tx_bounce[index];
621 last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev);
649 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
658 index = fec_enet_get_bd_index(last_bdp, &txq->bd);
660 txq->tx_buf[index].buf_p = skb;
674 bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd);
678 /* Make sure the update to bdp is performed before txq->bd.cur. */
680 txq->bd.cur = bdp;
683 writel(0, txq->bd.reg_desc_active);
689 fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
707 memcpy(txq->tx_bounce[index], data, size);
708 data = txq->tx_bounce[index];
727 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
749 fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
765 bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
766 dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE;
769 memcpy(txq->tx_bounce[index], skb->data, hdr_len);
770 bufaddr = txq->tx_bounce[index];
790 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
802 static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
808 struct bufdesc *bdp = txq->bd.cur;
813 if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(txq)) {
833 index = fec_enet_get_bd_index(bdp, &txq->bd);
838 hdr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
840 ret = fec_enet_txq_put_hdr_tso(txq, skb, ndev, bdp, index);
848 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
849 index = fec_enet_get_bd_index(bdp, &txq->bd);
850 ret = fec_enet_txq_put_data_tso(txq, skb, ndev,
862 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
866 txq->tx_buf[index].buf_p = skb;
869 txq->bd.cur = bdp;
873 !readl(txq->bd.reg_desc_active) ||
874 !readl(txq->bd.reg_desc_active) ||
875 !readl(txq->bd.reg_desc_active) ||
876 !readl(txq->bd.reg_desc_active))
877 writel(0, txq->bd.reg_desc_active);
892 struct fec_enet_priv_tx_q *txq;
897 txq = fep->tx_queue[queue];
901 ret = fec_enet_txq_submit_tso(txq, skb, ndev);
903 ret = fec_enet_txq_submit_skb(txq, skb, ndev);
907 entries_free = fec_enet_get_free_txdesc_num(txq);
908 if (entries_free <= txq->tx_stop_threshold)
919 struct fec_enet_priv_tx_q *txq;
949 txq = fep->tx_queue[q];
950 bdp = txq->bd.base;
951 txq->bd.cur = bdp;
953 for (i = 0; i < txq->bd.ring_size; i++) {
956 if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) {
958 !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
963 if (txq->tx_buf[i].buf_p)
964 dev_kfree_skb_any(txq->tx_buf[i].buf_p);
965 } else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) {
972 if (txq->tx_buf[i].buf_p)
973 xdp_return_frame(txq->tx_buf[i].buf_p);
975 struct page *page = txq->tx_buf[i].buf_p;
981 txq->tx_buf[i].buf_p = NULL;
983 txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
985 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
989 bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
991 txq->dirty_tx = bdp;
1007 struct fec_enet_priv_tx_q *txq;
1023 txq = fep->tx_queue[i];
1024 writel(txq->bd.dma, fep->hwp + FEC_X_DES_START(i));
1386 struct fec_enet_priv_tx_q *txq;
1395 txq = fep->tx_queue[queue_id];
1398 bdp = txq->dirty_tx;
1401 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
1403 while (bdp != READ_ONCE(txq->bd.cur)) {
1410 index = fec_enet_get_bd_index(bdp, &txq->bd);
1412 if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) {
1413 skb = txq->tx_buf[index].buf_p;
1415 !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
1432 if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) {
1433 xdpf = txq->tx_buf[index].buf_p;
1440 page = txq->tx_buf[index].buf_p;
1444 if (unlikely(!txq->tx_buf[index].buf_p)) {
1445 txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
1470 if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB)
1482 if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) {
1498 } else if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) {
1505 txq->tx_buf[index].buf_p = NULL;
1507 txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
1514 txq->dirty_tx = bdp;
1517 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
1522 entries_free = fec_enet_get_free_txdesc_num(txq);
1523 if (entries_free >= txq->tx_wake_threshold)
1529 if (bdp != txq->bd.cur &&
1530 readl(txq->bd.reg_desc_active) == 0)
1531 writel(0, txq->bd.reg_desc_active);
3239 struct fec_enet_priv_tx_q *txq;
3258 txq = fep->tx_queue[q];
3259 for (i = 0; i < txq->bd.ring_size; i++) {
3260 kfree(txq->tx_bounce[i]);
3261 txq->tx_bounce[i] = NULL;
3263 if (!txq->tx_buf[i].buf_p) {
3264 txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
3268 if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) {
3269 dev_kfree_skb(txq->tx_buf[i].buf_p);
3270 } else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) {
3271 xdp_return_frame(txq->tx_buf[i].buf_p);
3273 struct page *page = txq->tx_buf[i].buf_p;
3278 txq->tx_buf[i].buf_p = NULL;
3279 txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
3288 struct fec_enet_priv_tx_q *txq;
3292 txq = fep->tx_queue[i];
3294 txq->bd.ring_size * TSO_HEADER_SIZE,
3295 txq->tso_hdrs,
3296 txq->tso_hdrs_dma);
3310 struct fec_enet_priv_tx_q *txq;
3313 txq = kzalloc(sizeof(*txq), GFP_KERNEL);
3314 if (!txq) {
3319 fep->tx_queue[i] = txq;
3320 txq->bd.ring_size = TX_RING_SIZE;
3323 txq->tx_stop_threshold = FEC_MAX_SKB_DESCS;
3324 txq->tx_wake_threshold = FEC_MAX_SKB_DESCS + 2 * MAX_SKB_FRAGS;
3326 txq->tso_hdrs = dma_alloc_coherent(&fep->pdev->dev,
3327 txq->bd.ring_size * TSO_HEADER_SIZE,
3328 &txq->tso_hdrs_dma,
3330 if (!txq->tso_hdrs) {
3409 struct fec_enet_priv_tx_q *txq;
3411 txq = fep->tx_queue[queue];
3412 bdp = txq->bd.base;
3413 for (i = 0; i < txq->bd.ring_size; i++) {
3414 txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
3415 if (!txq->tx_bounce[i])
3426 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
3430 bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
3796 struct fec_enet_priv_tx_q *txq,
3806 entries_free = fec_enet_get_free_txdesc_num(txq);
3813 bdp = txq->bd.cur;
3817 index = fec_enet_get_bd_index(bdp, &txq->bd);
3828 txq->tx_buf[index].buf_p = xdpf;
3829 txq->tx_buf[index].type = FEC_TXBUF_T_XDP_NDO;
3840 txq->tx_buf[index].buf_p = page;
3841 txq->tx_buf[index].type = FEC_TXBUF_T_XDP_TX;
3855 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
3873 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
3875 /* Make sure the update to bdp are performed before txq->bd.cur. */
3878 txq->bd.cur = bdp;
3881 writel(0, txq->bd.reg_desc_active);
3890 struct fec_enet_priv_tx_q *txq;
3895 txq = fep->tx_queue[queue];
3902 ret = fec_enet_txq_xmit_frame(fep, txq, xdp, dma_sync_len, false);
3915 struct fec_enet_priv_tx_q *txq;
3923 txq = fep->tx_queue[queue];
3931 if (fec_enet_txq_xmit_frame(fep, txq, frames[i], 0, true) < 0)
4073 struct fec_enet_priv_tx_q *txq = fep->tx_queue[i];
4074 unsigned size = dsize * txq->bd.ring_size;
4076 txq->bd.qid = i;
4077 txq->bd.base = cbd_base;
4078 txq->bd.cur = cbd_base;
4079 txq->bd.dma = bd_dma;
4080 txq->bd.dsize = dsize;
4081 txq->bd.dsize_log2 = dsize_log2;
4082 txq->bd.reg_desc_active = fep->hwp + offset_des_active_txq[i];
4085 txq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);