Lines Matching refs:tx_chn
327 struct am65_cpsw_tx_chn *tx_chn;
332 tx_chn = &common->tx_chns[txqueue];
340 k3_cppi_desc_pool_avail(tx_chn->desc_pool));
491 ret = k3_udma_glue_enable_tx_chn(common->tx_chns[i].tx_chn);
523 k3_udma_glue_tdown_tx_chn(common->tx_chns[i].tx_chn, false);
533 k3_udma_glue_reset_tx_chn(common->tx_chns[i].tx_chn,
536 k3_udma_glue_disable_tx_chn(common->tx_chns[i].tx_chn);
864 static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn,
882 next_desc = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
891 k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
894 k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc);
899 struct am65_cpsw_tx_chn *tx_chn = data;
904 desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
907 am65_cpsw_nuss_xmit_free(tx_chn, tx_chn->common->dev, desc_tx);
917 struct am65_cpsw_tx_chn *tx_chn;
926 tx_chn = &common->tx_chns[chn];
932 res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
942 desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
946 am65_cpsw_nuss_xmit_free(tx_chn, dev, desc_tx);
978 (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >=
991 struct am65_cpsw_tx_chn *tx_chn = am65_cpsw_napi_to_tx_chn(napi_tx);
994 num_tx = am65_cpsw_nuss_tx_compl_packets(tx_chn->common, tx_chn->id,
999 enable_irq(tx_chn->irq);
1017 struct am65_cpsw_tx_chn *tx_chn = dev_id;
1020 napi_schedule(&tx_chn->napi_tx);
1032 struct am65_cpsw_tx_chn *tx_chn;
1050 tx_chn = &common->tx_chns[q_idx];
1062 first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
1104 next_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
1114 k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
1123 desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool,
1139 desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
1140 ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
1149 if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) < MAX_SKB_FRAGS) {
1156 if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >=
1166 am65_cpsw_nuss_xmit_free(tx_chn, dev, first_desc);
1173 am65_cpsw_nuss_xmit_free(tx_chn, dev, first_desc);
1406 struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
1408 if (!IS_ERR_OR_NULL(tx_chn->desc_pool))
1409 k3_cppi_desc_pool_destroy(tx_chn->desc_pool);
1411 if (!IS_ERR_OR_NULL(tx_chn->tx_chn))
1412 k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
1414 memset(tx_chn, 0, sizeof(*tx_chn));
1426 struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
1428 if (tx_chn->irq)
1429 devm_free_irq(dev, tx_chn->irq, tx_chn);
1431 netif_napi_del(&tx_chn->napi_tx);
1433 if (!IS_ERR_OR_NULL(tx_chn->desc_pool))
1434 k3_cppi_desc_pool_destroy(tx_chn->desc_pool);
1436 if (!IS_ERR_OR_NULL(tx_chn->tx_chn))
1437 k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
1439 memset(tx_chn, 0, sizeof(*tx_chn));
1466 struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
1468 snprintf(tx_chn->tx_chn_name,
1469 sizeof(tx_chn->tx_chn_name), "tx%d", i);
1471 tx_chn->common = common;
1472 tx_chn->id = i;
1473 tx_chn->descs_num = max_desc_num;
1474 tx_chn->desc_pool =
1476 tx_chn->descs_num,
1478 tx_chn->tx_chn_name);
1479 if (IS_ERR(tx_chn->desc_pool)) {
1480 ret = PTR_ERR(tx_chn->desc_pool);
1485 tx_chn->tx_chn =
1487 tx_chn->tx_chn_name,
1489 if (IS_ERR(tx_chn->tx_chn)) {
1490 ret = dev_err_probe(dev, PTR_ERR(tx_chn->tx_chn),
1495 tx_chn->irq = k3_udma_glue_tx_get_irq(tx_chn->tx_chn);
1496 if (tx_chn->irq <= 0) {
1498 tx_chn->irq);
1499 ret = tx_chn->irq ?: -ENXIO;
1503 snprintf(tx_chn->tx_chn_name,
1504 sizeof(tx_chn->tx_chn_name), "%s-tx%d",
1505 dev_name(dev), tx_chn->id);
1898 struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
1900 netif_tx_napi_add(port->ndev, &tx_chn->napi_tx,
1903 ret = devm_request_irq(dev, tx_chn->irq,
1906 tx_chn->tx_chn_name, tx_chn);
1909 tx_chn->id, tx_chn->irq, ret);