Lines Matching refs:tx_chn

283 	struct am65_cpsw_tx_chn *tx_chn;
288 tx_chn = &common->tx_chns[txqueue];
296 k3_cppi_desc_pool_avail(tx_chn->desc_pool));
457 ret = k3_udma_glue_enable_tx_chn(common->tx_chns[i].tx_chn);
493 k3_udma_glue_tdown_tx_chn(common->tx_chns[i].tx_chn, false);
503 k3_udma_glue_reset_tx_chn(common->tx_chns[i].tx_chn,
506 k3_udma_glue_disable_tx_chn(common->tx_chns[i].tx_chn);
843 static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn,
854 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
856 dma_unmap_single(tx_chn->dma_dev, buf_dma, buf_dma_len, DMA_TO_DEVICE);
859 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
861 next_desc = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
864 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
866 dma_unmap_page(tx_chn->dma_dev, buf_dma, buf_dma_len,
870 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
872 k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
875 k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc);
880 struct am65_cpsw_tx_chn *tx_chn = data;
885 desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
888 am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
894 am65_cpsw_nuss_tx_compl_packet(struct am65_cpsw_tx_chn *tx_chn,
904 desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
908 am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
912 am65_cpts_tx_timestamp(tx_chn->common->cpts, skb);
924 static void am65_cpsw_nuss_tx_wake(struct am65_cpsw_tx_chn *tx_chn, struct net_device *ndev,
934 (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >= MAX_SKB_FRAGS))
945 struct am65_cpsw_tx_chn *tx_chn;
953 tx_chn = &common->tx_chns[chn];
956 spin_lock(&tx_chn->lock);
957 res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
958 spin_unlock(&tx_chn->lock);
968 skb = am65_cpsw_nuss_tx_compl_packet(tx_chn, desc_dma);
978 am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq);
990 struct am65_cpsw_tx_chn *tx_chn;
998 tx_chn = &common->tx_chns[chn];
1001 res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
1011 skb = am65_cpsw_nuss_tx_compl_packet(tx_chn, desc_dma);
1026 am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq);
1035 struct am65_cpsw_tx_chn *tx_chn = am65_cpsw_napi_to_tx_chn(napi_tx);
1038 if (AM65_CPSW_IS_CPSW2G(tx_chn->common))
1039 num_tx = am65_cpsw_nuss_tx_compl_packets_2g(tx_chn->common, tx_chn->id, budget);
1041 num_tx = am65_cpsw_nuss_tx_compl_packets(tx_chn->common, tx_chn->id, budget);
1047 enable_irq(tx_chn->irq);
1065 struct am65_cpsw_tx_chn *tx_chn = dev_id;
1068 napi_schedule(&tx_chn->napi_tx);
1080 struct am65_cpsw_tx_chn *tx_chn;
1098 tx_chn = &common->tx_chns[q_idx];
1102 buf_dma = dma_map_single(tx_chn->dma_dev, skb->data, pkt_len,
1104 if (unlikely(dma_mapping_error(tx_chn->dma_dev, buf_dma))) {
1110 first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
1113 dma_unmap_single(tx_chn->dma_dev, buf_dma, pkt_len,
1124 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
1154 next_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
1160 buf_dma = skb_frag_dma_map(tx_chn->dma_dev, frag, 0, frag_size,
1162 if (unlikely(dma_mapping_error(tx_chn->dma_dev, buf_dma))) {
1164 k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
1170 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
1174 desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool,
1176 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &desc_dma);
1191 desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
1193 ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
1195 spin_lock_bh(&tx_chn->lock);
1196 ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
1197 spin_unlock_bh(&tx_chn->lock);
1207 if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) < MAX_SKB_FRAGS) {
1214 if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >=
1224 am65_cpsw_nuss_xmit_free(tx_chn, first_desc);
1231 am65_cpsw_nuss_xmit_free(tx_chn, first_desc);
1631 struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
1633 if (!IS_ERR_OR_NULL(tx_chn->desc_pool))
1634 k3_cppi_desc_pool_destroy(tx_chn->desc_pool);
1636 if (!IS_ERR_OR_NULL(tx_chn->tx_chn))
1637 k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
1639 memset(tx_chn, 0, sizeof(*tx_chn));
1652 struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
1654 if (tx_chn->irq)
1655 devm_free_irq(dev, tx_chn->irq, tx_chn);
1657 netif_napi_del(&tx_chn->napi_tx);
1659 if (!IS_ERR_OR_NULL(tx_chn->desc_pool))
1660 k3_cppi_desc_pool_destroy(tx_chn->desc_pool);
1662 if (!IS_ERR_OR_NULL(tx_chn->tx_chn))
1663 k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
1665 memset(tx_chn, 0, sizeof(*tx_chn));
1675 struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
1677 netif_napi_add_tx(common->dma_ndev, &tx_chn->napi_tx,
1680 ret = devm_request_irq(dev, tx_chn->irq,
1683 tx_chn->tx_chn_name, tx_chn);
1686 tx_chn->id, tx_chn->irq, ret);
1718 struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
1720 snprintf(tx_chn->tx_chn_name,
1721 sizeof(tx_chn->tx_chn_name), "tx%d", i);
1723 spin_lock_init(&tx_chn->lock);
1724 tx_chn->common = common;
1725 tx_chn->id = i;
1726 tx_chn->descs_num = max_desc_num;
1728 tx_chn->tx_chn =
1730 tx_chn->tx_chn_name,
1732 if (IS_ERR(tx_chn->tx_chn)) {
1733 ret = dev_err_probe(dev, PTR_ERR(tx_chn->tx_chn),
1737 tx_chn->dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn->tx_chn);
1739 tx_chn->desc_pool = k3_cppi_desc_pool_create_name(tx_chn->dma_dev,
1740 tx_chn->descs_num,
1742 tx_chn->tx_chn_name);
1743 if (IS_ERR(tx_chn->desc_pool)) {
1744 ret = PTR_ERR(tx_chn->desc_pool);
1749 tx_chn->irq = k3_udma_glue_tx_get_irq(tx_chn->tx_chn);
1750 if (tx_chn->irq < 0) {
1752 tx_chn->irq);
1753 ret = tx_chn->irq;
1757 snprintf(tx_chn->tx_chn_name,
1758 sizeof(tx_chn->tx_chn_name), "%s-tx%d",
1759 dev_name(dev), tx_chn->id);