Lines Matching refs:ud
280 struct udma_dev *ud;
402 static int navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread)
404 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
412 static int navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
415 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
470 struct device *dev = uc->ud->dev;
557 struct udma_dev *ud = container_of(work, typeof(*ud), purge_work);
562 spin_lock_irqsave(&ud->lock, flags);
563 list_splice_tail_init(&ud->desc_to_purge, &head);
564 spin_unlock_irqrestore(&ud->lock, flags);
576 if (!list_empty(&ud->desc_to_purge))
577 schedule_work(&ud->purge_work);
582 struct udma_dev *ud = to_udma_dev(vd->tx.chan->device);
596 spin_lock_irqsave(&ud->lock, flags);
597 list_add_tail(&vd->node, &ud->desc_to_purge);
598 spin_unlock_irqrestore(&ud->lock, flags);
600 schedule_work(&ud->purge_work);
648 return uc->ud->rx_flush.hwdescs[uc->config.pkt_mode].cppi5_desc_paddr;
840 uc->ud->ddev.device_free_chan_resources(&uc->vc.chan);
844 ret = uc->ud->ddev.device_alloc_chan_resources(&uc->vc.chan);
867 if (uc->ud->match_data->type == DMA_TYPE_UDMA && ucc->pkt_mode &&
929 uc->ud->match_data;
1180 dev_err(uc->ud->dev, "not matching descriptors!\n");
1240 * @ud: UDMA device
1254 static int __udma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
1261 tmp_from = ud->rchan_cnt;
1263 if (tmp_from < ud->rchan_cnt)
1266 if (tmp_from + cnt > ud->rflow_cnt)
1269 bitmap_or(tmp, ud->rflow_gp_map, ud->rflow_gp_map_allocated,
1270 ud->rflow_cnt);
1273 ud->rflow_cnt,
1275 if (start >= ud->rflow_cnt)
1281 bitmap_set(ud->rflow_gp_map_allocated, start, cnt);
1285 static int __udma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
1287 if (from < ud->rchan_cnt)
1289 if (from + cnt > ud->rflow_cnt)
1292 bitmap_clear(ud->rflow_gp_map_allocated, from, cnt);
1296 static struct udma_rflow *__udma_get_rflow(struct udma_dev *ud, int id)
1305 if (id < 0 || id >= ud->rflow_cnt)
1308 if (test_bit(id, ud->rflow_in_use))
1311 if (ud->rflow_gp_map) {
1313 if (!test_bit(id, ud->rflow_gp_map) &&
1314 !test_bit(id, ud->rflow_gp_map_allocated))
1318 dev_dbg(ud->dev, "get rflow%d\n", id);
1319 set_bit(id, ud->rflow_in_use);
1320 return &ud->rflows[id];
1323 static void __udma_put_rflow(struct udma_dev *ud, struct udma_rflow *rflow)
1325 if (!test_bit(rflow->id, ud->rflow_in_use)) {
1326 dev_err(ud->dev, "attempt to put unused rflow%d\n", rflow->id);
1330 dev_dbg(ud->dev, "put rflow%d\n", rflow->id);
1331 clear_bit(rflow->id, ud->rflow_in_use);
1335 static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
1340 if (test_bit(id, ud->res##_map)) { \
1341 dev_err(ud->dev, "res##%d is in use\n", id); \
1347 if (tpl >= ud->res##_tpl.levels) \
1348 tpl = ud->res##_tpl.levels - 1; \
1350 start = ud->res##_tpl.start_idx[tpl]; \
1352 id = find_next_zero_bit(ud->res##_map, ud->res##_cnt, \
1354 if (id == ud->res##_cnt) { \
1359 set_bit(id, ud->res##_map); \
1360 return &ud->res##s[id]; \
1369 struct udma_dev *ud = uc->ud;
1374 dev_dbg(ud->dev, "chan%d: already have bchan%d allocated\n",
1386 tpl = ud->bchan_tpl.levels - 1;
1388 uc->bchan = __udma_reserve_bchan(ud, tpl, -1);
1402 struct udma_dev *ud = uc->ud;
1406 dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
1416 uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl,
1424 if (ud->tflow_cnt) {
1433 if (test_bit(tflow_id, ud->tflow_map)) {
1434 dev_err(ud->dev, "tflow%d is in use\n", tflow_id);
1435 clear_bit(uc->tchan->id, ud->tchan_map);
1441 set_bit(tflow_id, ud->tflow_map);
1451 struct udma_dev *ud = uc->ud;
1455 dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
1465 uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl,
1478 struct udma_dev *ud = uc->ud;
1482 dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
1488 dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
1492 dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
1498 end = min(ud->tchan_cnt, ud->rchan_cnt);
1503 chan_id = ud->tchan_tpl.start_idx[ud->tchan_tpl.levels - 1];
1505 if (!test_bit(chan_id, ud->tchan_map) &&
1506 !test_bit(chan_id, ud->rchan_map))
1513 set_bit(chan_id, ud->tchan_map);
1514 set_bit(chan_id, ud->rchan_map);
1515 uc->tchan = &ud->tchans[chan_id];
1516 uc->rchan = &ud->rchans[chan_id];
1526 struct udma_dev *ud = uc->ud;
1530 dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id);
1535 dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
1540 uc->rflow = __udma_get_rflow(ud, flow_id);
1552 struct udma_dev *ud = uc->ud;
1555 dev_dbg(ud->dev, "chan%d: put bchan%d\n", uc->id,
1557 clear_bit(uc->bchan->id, ud->bchan_map);
1565 struct udma_dev *ud = uc->ud;
1568 dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
1570 clear_bit(uc->rchan->id, ud->rchan_map);
1577 struct udma_dev *ud = uc->ud;
1580 dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
1582 clear_bit(uc->tchan->id, ud->tchan_map);
1585 clear_bit(uc->tchan->tflow_id, ud->tflow_map);
1593 struct udma_dev *ud = uc->ud;
1596 dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
1598 __udma_put_rflow(ud, uc->rflow);
1620 struct udma_dev *ud = uc->ud;
1627 ret = k3_ringacc_request_rings_pair(ud->ringacc, uc->bchan->id, -1,
1640 k3_configure_chan_coherency(&uc->vc.chan, ud->asel);
1641 ring_cfg.asel = ud->asel;
1678 struct udma_dev *ud = uc->ud;
1690 ring_idx = ud->bchan_cnt + tchan->id;
1692 ret = k3_ringacc_request_rings_pair(ud->ringacc, ring_idx, -1,
1703 if (ud->match_data->type == DMA_TYPE_UDMA) {
1753 struct udma_dev *ud = uc->ud;
1778 if (ud->tflow_cnt)
1779 fd_ring_id = ud->tflow_cnt + rflow->id;
1781 fd_ring_id = ud->bchan_cnt + ud->tchan_cnt + ud->echan_cnt +
1784 ret = k3_ringacc_request_rings_pair(ud->ringacc, fd_ring_id, -1,
1794 if (ud->match_data->type == DMA_TYPE_UDMA) {
1867 struct udma_dev *ud = uc->ud;
1868 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1881 if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) {
1882 tpl = udma_get_chan_tpl_index(&ud->tchan_tpl, tchan->id);
1884 burst_size = ud->match_data->burst_size[tpl];
1893 req_tx.tx_atype = ud->atype;
1901 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
1911 req_rx.rx_atype = ud->atype;
1919 dev_err(ud->dev, "rchan%d alloc failed %d\n", rchan->id, ret);
1926 struct udma_dev *ud = uc->ud;
1927 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1935 if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) {
1936 tpl = udma_get_chan_tpl_index(&ud->bchan_tpl, bchan->id);
1938 burst_size = ud->match_data->burst_size[tpl];
1952 dev_err(ud->dev, "bchan%d cfg failed %d\n", bchan->id, ret);
1959 struct udma_dev *ud = uc->ud;
1960 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1986 ud->match_data->flags & UDMA_FLAG_TDTYPE) {
1995 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
2002 struct udma_dev *ud = uc->ud;
2003 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2013 if (ud->match_data->flags & UDMA_FLAG_TDTYPE) {
2022 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
2031 struct udma_dev *ud = uc->ud;
2032 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2061 dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret);
2105 dev_err(ud->dev, "flow%d config failed: %d\n", rchan->id, ret);
2112 struct udma_dev *ud = uc->ud;
2113 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2125 dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret);
2132 struct udma_dev *ud = uc->ud;
2133 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2145 dev_err(ud->dev, "rchan%d cfg failed %d\n", uc->rchan->id, ret);
2170 dev_err(ud->dev, "flow%d config failed: %d\n", uc->rflow->id,
2179 struct udma_dev *ud = to_udma_dev(chan->device);
2180 const struct udma_soc_data *soc_data = ud->soc_data;
2185 uc->dma_dev = ud->dev;
2198 uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev,
2200 ud->desc_align,
2203 dev_err(ud->ddev.dev,
2222 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
2241 uc->config.src_thread = ud->psil_base + uc->tchan->id;
2242 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2252 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
2259 uc->config.src_thread = ud->psil_base + uc->tchan->id;
2270 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
2278 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2288 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2300 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
2303 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
2310 ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
2312 dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2321 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
2330 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
2336 uc->irq_num_udma = msi_get_virq(ud->dev, irq_udma_idx);
2338 dev_err(ud->dev, "Failed to get udma irq (index: %u)\n",
2348 dev_err(ud->dev, "chan%d: UDMA irq request failed\n",
2365 navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread);
2384 struct udma_dev *ud = to_udma_dev(chan->device);
2385 const struct udma_oes_offsets *oes = &ud->soc_data->oes;
2403 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
2417 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
2426 uc->config.src_thread = ud->psil_base + uc->tchan->id;
2437 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
2447 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2457 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2467 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
2470 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
2481 uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev,
2483 ud->desc_align,
2486 dev_err(ud->ddev.dev,
2496 ret = navss_psil_pair(ud, uc->config.src_thread,
2499 dev_err(ud->dev,
2508 uc->irq_num_ring = msi_get_virq(ud->dev, irq_ring_idx);
2510 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
2519 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
2525 uc->irq_num_udma = msi_get_virq(ud->dev, irq_udma_idx);
2527 dev_err(ud->dev, "Failed to get bcdma irq (index: %u)\n",
2537 dev_err(ud->dev, "chan%d: BCDMA irq request failed\n",
2557 navss_psil_unpair(ud, uc->config.src_thread,
2587 trigger_event = uc->ud->soc_data->bcdma_trigger_event_offset;
2596 struct udma_dev *ud = to_udma_dev(chan->device);
2597 const struct udma_oes_offsets *oes = &ud->soc_data->oes;
2612 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
2621 uc->config.src_thread = ud->psil_base + uc->tchan->id;
2631 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
2641 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2650 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2660 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
2663 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
2671 uc->config.hdesc_size, ud->desc_align,
2674 dev_err(ud->ddev.dev,
2684 ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
2686 dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2693 uc->irq_num_ring = msi_get_virq(ud->dev, irq_ring_idx);
2695 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
2704 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
2716 dev_dbg(ud->dev,
2721 dev_dbg(ud->dev,
2730 navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread);
2771 dev_err(uc->ud->dev, "Unsupported TR size of %zu\n", tr_size);
2795 uc->ud->desc_align);
2796 hwdesc->cppi5_desc_vaddr = dma_alloc_coherent(uc->ud->dev,
2906 if (uc->ud->match_data->type == DMA_TYPE_UDMA)
2918 dev_err(uc->ud->dev, "size %u is not supported\n",
2990 dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
2999 dev_err(uc->ud->dev,
3017 dev_err(uc->ud->dev,
3037 if (uc->ud->match_data->type == DMA_TYPE_UDMA) {
3054 dev_err(uc->ud->dev, "size %zu is not supported\n",
3203 d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask)
3236 if (uc->ud->match_data->type == DMA_TYPE_UDMA)
3251 dev_err(uc->ud->dev,
3283 if (uc->ud->match_data->type == DMA_TYPE_PKTDMA ||
3289 dev_err(uc->ud->dev,
3452 dev_err(uc->ud->dev,
3483 dev_err(uc->ud->dev, "size %zu is not supported\n",
3495 if (uc->ud->match_data->type == DMA_TYPE_UDMA)
3565 if (uc->ud->match_data->type != DMA_TYPE_UDMA)
3577 dev_err(uc->ud->dev,
3633 dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
3657 dev_err(uc->ud->dev,
3696 dev_err(uc->ud->dev, "size %zu is not supported\n",
3710 if (uc->ud->match_data->type != DMA_TYPE_UDMA) {
3711 src |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT;
3712 dest |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT;
3950 dev_warn(uc->ud->dev, "chan%d teardown timeout!\n",
3959 dev_warn(uc->ud->dev, "chan%d refused to stop!\n", uc->id);
4047 struct udma_dev *ud = to_udma_dev(chan->device);
4070 navss_psil_unpair(ud, uc->config.src_thread,
4106 struct udma_dev *ud;
4115 ud = uc->ud;
4119 dev_err(ud->dev, "Invalid channel atype: %u\n",
4125 dev_err(ud->dev, "Invalid channel asel: %u\n",
4146 dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n",
4155 if (ud->match_data->type == DMA_TYPE_BCDMA &&
4157 dev_err(ud->dev,
4172 if (ud->match_data->type == DMA_TYPE_PKTDMA &&
4182 const struct udma_match_data *match_data = ud->match_data;
4198 ucc->metadata_size, ud->desc_align);
4200 dev_dbg(ud->dev, "chan%d: Remote thread: 0x%04x (%s)\n", uc->id,
4206 dev_dbg(ud->dev, "chan%d: triggered channel (type: %u)\n", uc->id,
4216 struct udma_dev *ud = ofdma->of_dma_data;
4217 dma_cap_mask_t mask = ud->ddev.cap_mask;
4221 if (ud->match_data->type == DMA_TYPE_BCDMA) {
4236 if (ud->match_data->type == DMA_TYPE_UDMA) {
4252 dev_err(ud->dev, "get channel fail in %s.\n", __func__);
4453 static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
4458 ud->mmrs[MMR_GCFG] = devm_platform_ioremap_resource_byname(pdev, mmr_names[MMR_GCFG]);
4459 if (IS_ERR(ud->mmrs[MMR_GCFG]))
4460 return PTR_ERR(ud->mmrs[MMR_GCFG]);
4462 cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
4463 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
4465 switch (ud->match_data->type) {
4467 ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3);
4468 ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2);
4469 ud->echan_cnt = UDMA_CAP2_ECHAN_CNT(cap2);
4470 ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2);
4473 ud->bchan_cnt = BCDMA_CAP2_BCHAN_CNT(cap2);
4474 ud->tchan_cnt = BCDMA_CAP2_TCHAN_CNT(cap2);
4475 ud->rchan_cnt = BCDMA_CAP2_RCHAN_CNT(cap2);
4476 ud->rflow_cnt = ud->rchan_cnt;
4479 cap4 = udma_read(ud->mmrs[MMR_GCFG], 0x30);
4480 ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2);
4481 ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2);
4482 ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3);
4483 ud->tflow_cnt = PKTDMA_CAP4_TFLOW_CNT(cap4);
4490 if (i == MMR_BCHANRT && ud->bchan_cnt == 0)
4492 if (i == MMR_TCHANRT && ud->tchan_cnt == 0)
4494 if (i == MMR_RCHANRT && ud->rchan_cnt == 0)
4497 ud->mmrs[i] = devm_platform_ioremap_resource_byname(pdev, mmr_names[i]);
4498 if (IS_ERR(ud->mmrs[i]))
4499 return PTR_ERR(ud->mmrs[i]);
4505 static void udma_mark_resource_ranges(struct udma_dev *ud, unsigned long *map,
4511 dev_dbg(ud->dev, "ti_sci resource range for %s: %d:%d | %d:%d\n", name,
4524 static int udma_setup_resources(struct udma_dev *ud)
4527 struct device *dev = ud->dev;
4529 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
4533 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
4536 ud->tchan_tpl.levels = 2;
4537 ud->tchan_tpl.start_idx[0] = 8;
4540 ud->tchan_tpl.levels = 2;
4541 ud->tchan_tpl.start_idx[0] = 2;
4543 ud->tchan_tpl.levels = 3;
4544 ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3);
4545 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4547 ud->tchan_tpl.levels = 2;
4548 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4550 ud->tchan_tpl.levels = 1;
4553 ud->rchan_tpl.levels = ud->tchan_tpl.levels;
4554 ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0];
4555 ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1];
4557 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
4559 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
4561 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
4563 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
4565 ud->rflow_gp_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
4568 ud->rflow_gp_map_allocated = devm_kcalloc(dev,
4569 BITS_TO_LONGS(ud->rflow_cnt),
4572 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
4575 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
4578 if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_gp_map ||
4579 !ud->rflow_gp_map_allocated || !ud->tchans || !ud->rchans ||
4580 !ud->rflows || !ud->rflow_in_use)
4588 bitmap_set(ud->rflow_gp_map_allocated, 0, ud->rchan_cnt);
4591 bitmap_set(ud->rflow_gp_map, 0, ud->rflow_cnt);
4607 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
4610 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
4612 udma_mark_resource_ranges(ud, ud->tchan_map,
4620 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
4623 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
4625 udma_mark_resource_ranges(ud, ud->rchan_map,
4636 irq_res.desc[0].num = ud->tchan_cnt;
4649 irq_res.desc[i].num = ud->rchan_cnt;
4654 ud->soc_data->oes.udma_rchan;
4659 ud->soc_data->oes.udma_rchan;
4664 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
4667 dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
4675 bitmap_clear(ud->rflow_gp_map, ud->rchan_cnt,
4676 ud->rflow_cnt - ud->rchan_cnt);
4679 udma_mark_resource_ranges(ud, ud->rflow_gp_map,
4686 static int bcdma_setup_resources(struct udma_dev *ud)
4689 struct device *dev = ud->dev;
4691 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
4692 const struct udma_oes_offsets *oes = &ud->soc_data->oes;
4696 cap = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
4698 ud->bchan_tpl.levels = 3;
4699 ud->bchan_tpl.start_idx[1] = BCDMA_CAP3_UBCHAN_CNT(cap);
4700 ud->bchan_tpl.start_idx[0] = BCDMA_CAP3_HBCHAN_CNT(cap);
4702 ud->bchan_tpl.levels = 2;
4703 ud->bchan_tpl.start_idx[0] = BCDMA_CAP3_HBCHAN_CNT(cap);
4705 ud->bchan_tpl.levels = 1;
4708 cap = udma_read(ud->mmrs[MMR_GCFG], 0x30);
4710 ud->rchan_tpl.levels = 3;
4711 ud->rchan_tpl.start_idx[1] = BCDMA_CAP4_URCHAN_CNT(cap);
4712 ud->rchan_tpl.start_idx[0] = BCDMA_CAP4_HRCHAN_CNT(cap);
4714 ud->rchan_tpl.levels = 2;
4715 ud->rchan_tpl.start_idx[0] = BCDMA_CAP4_HRCHAN_CNT(cap);
4717 ud->rchan_tpl.levels = 1;
4721 ud->tchan_tpl.levels = 3;
4722 ud->tchan_tpl.start_idx[1] = BCDMA_CAP4_UTCHAN_CNT(cap);
4723 ud->tchan_tpl.start_idx[0] = BCDMA_CAP4_HTCHAN_CNT(cap);
4725 ud->tchan_tpl.levels = 2;
4726 ud->tchan_tpl.start_idx[0] = BCDMA_CAP4_HTCHAN_CNT(cap);
4728 ud->tchan_tpl.levels = 1;
4731 ud->bchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->bchan_cnt),
4733 ud->bchans = devm_kcalloc(dev, ud->bchan_cnt, sizeof(*ud->bchans),
4735 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
4737 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
4739 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
4741 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
4744 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rchan_cnt),
4747 ud->rflows = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rflows),
4750 if (!ud->bchan_map || !ud->tchan_map || !ud->rchan_map ||
4751 !ud->rflow_in_use || !ud->bchans || !ud->tchans || !ud->rchans ||
4752 !ud->rflows)
4759 if (i == RM_RANGE_BCHAN && ud->bchan_cnt == 0)
4761 if (i == RM_RANGE_TCHAN && ud->tchan_cnt == 0)
4763 if (i == RM_RANGE_RCHAN && ud->rchan_cnt == 0)
4775 if (ud->bchan_cnt) {
4778 bitmap_zero(ud->bchan_map, ud->bchan_cnt);
4781 bitmap_fill(ud->bchan_map, ud->bchan_cnt);
4783 udma_mark_resource_ranges(ud, ud->bchan_map,
4791 if (ud->tchan_cnt) {
4794 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
4797 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
4799 udma_mark_resource_ranges(ud, ud->tchan_map,
4807 if (ud->rchan_cnt) {
4810 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
4813 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
4815 udma_mark_resource_ranges(ud, ud->rchan_map,
4825 if (ud->bchan_cnt) {
4829 irq_res.desc[0].num = ud->bchan_cnt;
4842 if (ud->tchan_cnt) {
4846 irq_res.desc[i].num = ud->tchan_cnt;
4848 irq_res.desc[i + 1].num = ud->tchan_cnt;
4862 if (ud->rchan_cnt) {
4866 irq_res.desc[i].num = ud->rchan_cnt;
4868 irq_res.desc[i + 1].num = ud->rchan_cnt;
4883 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
4886 dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
4893 static int pktdma_setup_resources(struct udma_dev *ud)
4896 struct device *dev = ud->dev;
4898 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
4899 const struct udma_oes_offsets *oes = &ud->soc_data->oes;
4903 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
4905 ud->tchan_tpl.levels = 3;
4906 ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3);
4907 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4909 ud->tchan_tpl.levels = 2;
4910 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4912 ud->tchan_tpl.levels = 1;
4915 ud->rchan_tpl.levels = ud->tchan_tpl.levels;
4916 ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0];
4917 ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1];
4919 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
4921 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
4923 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
4925 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
4927 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
4930 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
4932 ud->tflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tflow_cnt),
4935 if (!ud->tchan_map || !ud->rchan_map || !ud->tflow_map || !ud->tchans ||
4936 !ud->rchans || !ud->rflows || !ud->rflow_in_use)
4953 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
4955 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
4957 udma_mark_resource_ranges(ud, ud->tchan_map,
4964 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
4966 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
4968 udma_mark_resource_ranges(ud, ud->rchan_map,
4976 bitmap_zero(ud->rflow_in_use, ud->rflow_cnt);
4979 bitmap_fill(ud->rflow_in_use, ud->rflow_cnt);
4981 udma_mark_resource_ranges(ud, ud->rflow_in_use,
4990 bitmap_zero(ud->tflow_map, ud->tflow_cnt);
4993 bitmap_fill(ud->tflow_map, ud->tflow_cnt);
4995 udma_mark_resource_ranges(ud, ud->tflow_map,
5006 irq_res.desc[0].num = ud->tflow_cnt;
5018 irq_res.desc[i].num = ud->rflow_cnt;
5026 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
5029 dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
5036 static int setup_resources(struct udma_dev *ud)
5038 struct device *dev = ud->dev;
5041 switch (ud->match_data->type) {
5043 ret = udma_setup_resources(ud);
5046 ret = bcdma_setup_resources(ud);
5049 ret = pktdma_setup_resources(ud);
5058 ch_count = ud->bchan_cnt + ud->tchan_cnt + ud->rchan_cnt;
5059 if (ud->bchan_cnt)
5060 ch_count -= bitmap_weight(ud->bchan_map, ud->bchan_cnt);
5061 ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
5062 ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
5066 ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
5068 if (!ud->channels)
5071 switch (ud->match_data->type) {
5076 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
5077 ud->tchan_cnt),
5078 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
5079 ud->rchan_cnt),
5080 ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map,
5081 ud->rflow_cnt));
5087 ud->bchan_cnt - bitmap_weight(ud->bchan_map,
5088 ud->bchan_cnt),
5089 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
5090 ud->tchan_cnt),
5091 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
5092 ud->rchan_cnt));
5098 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
5099 ud->tchan_cnt),
5100 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
5101 ud->rchan_cnt));
5110 static int udma_setup_rx_flush(struct udma_dev *ud)
5112 struct udma_rx_flush *rx_flush = &ud->rx_flush;
5116 struct device *dev = ud->dev;
5138 ud->desc_align);
5178 ud->desc_align);
5222 if (uc->ud->match_data->type == DMA_TYPE_BCDMA) {
5233 if (uc->ud->match_data->type == DMA_TYPE_PKTDMA)
5239 if (uc->ud->match_data->type == DMA_TYPE_PKTDMA)
5278 static enum dmaengine_alignment udma_get_copy_align(struct udma_dev *ud)
5280 const struct udma_match_data *match_data = ud->match_data;
5287 if (ud->bchan_cnt)
5288 tpl = udma_get_chan_tpl_index(&ud->bchan_tpl, 0);
5289 else if (ud->tchan_cnt)
5290 tpl = udma_get_chan_tpl_index(&ud->tchan_tpl, 0);
5317 struct udma_dev *ud;
5326 ud = devm_kzalloc(dev, sizeof(*ud), GFP_KERNEL);
5327 if (!ud)
5335 ud->match_data = match->data;
5337 ud->soc_data = ud->match_data->soc_data;
5338 if (!ud->soc_data) {
5344 ud->soc_data = soc->data;
5347 ret = udma_get_mmrs(pdev, ud);
5351 ud->tisci_rm.tisci = ti_sci_get_by_phandle(dev->of_node, "ti,sci");
5352 if (IS_ERR(ud->tisci_rm.tisci))
5353 return PTR_ERR(ud->tisci_rm.tisci);
5356 &ud->tisci_rm.tisci_dev_id);
5361 pdev->id = ud->tisci_rm.tisci_dev_id;
5364 &ud->tisci_rm.tisci_navss_dev_id);
5370 if (ud->match_data->type == DMA_TYPE_UDMA) {
5372 &ud->atype);
5373 if (!ret && ud->atype > 2) {
5374 dev_err(dev, "Invalid atype: %u\n", ud->atype);
5379 &ud->asel);
5380 if (!ret && ud->asel > 15) {
5381 dev_err(dev, "Invalid asel: %u\n", ud->asel);
5386 ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops;
5387 ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops;
5389 if (ud->match_data->type == DMA_TYPE_UDMA) {
5390 ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc");
5394 ring_init_data.tisci = ud->tisci_rm.tisci;
5395 ring_init_data.tisci_dev_id = ud->tisci_rm.tisci_dev_id;
5396 if (ud->match_data->type == DMA_TYPE_BCDMA) {
5397 ring_init_data.num_rings = ud->bchan_cnt +
5398 ud->tchan_cnt +
5399 ud->rchan_cnt;
5401 ring_init_data.num_rings = ud->rflow_cnt +
5402 ud->tflow_cnt;
5405 ud->ringacc = k3_ringacc_dmarings_init(pdev, &ring_init_data);
5408 if (IS_ERR(ud->ringacc))
5409 return PTR_ERR(ud->ringacc);
5417 dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask);
5419 if (ud->match_data->type != DMA_TYPE_PKTDMA) {
5420 dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask);
5421 ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic;
5424 ud->ddev.device_config = udma_slave_config;
5425 ud->ddev.device_prep_slave_sg = udma_prep_slave_sg;
5426 ud->ddev.device_issue_pending = udma_issue_pending;
5427 ud->ddev.device_tx_status = udma_tx_status;
5428 ud->ddev.device_pause = udma_pause;
5429 ud->ddev.device_resume = udma_resume;
5430 ud->ddev.device_terminate_all = udma_terminate_all;
5431 ud->ddev.device_synchronize = udma_synchronize;
5433 ud->ddev.dbg_summary_show = udma_dbg_summary_show;
5436 switch (ud->match_data->type) {
5438 ud->ddev.device_alloc_chan_resources =
5442 ud->ddev.device_alloc_chan_resources =
5444 ud->ddev.device_router_config = bcdma_router_config;
5447 ud->ddev.device_alloc_chan_resources =
5453 ud->ddev.device_free_chan_resources = udma_free_chan_resources;
5455 ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS;
5456 ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS;
5457 ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
5458 ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
5459 ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT |
5461 if (ud->match_data->enable_memcpy_support &&
5462 !(ud->match_data->type == DMA_TYPE_BCDMA && ud->bchan_cnt == 0)) {
5463 dma_cap_set(DMA_MEMCPY, ud->ddev.cap_mask);
5464 ud->ddev.device_prep_dma_memcpy = udma_prep_dma_memcpy;
5465 ud->ddev.directions |= BIT(DMA_MEM_TO_MEM);
5468 ud->ddev.dev = dev;
5469 ud->dev = dev;
5470 ud->psil_base = ud->match_data->psil_base;
5472 INIT_LIST_HEAD(&ud->ddev.channels);
5473 INIT_LIST_HEAD(&ud->desc_to_purge);
5475 ch_count = setup_resources(ud);
5479 spin_lock_init(&ud->lock);
5480 INIT_WORK(&ud->purge_work, udma_purge_desc_work);
5482 ud->desc_align = 64;
5483 if (ud->desc_align < dma_get_cache_alignment())
5484 ud->desc_align = dma_get_cache_alignment();
5486 ret = udma_setup_rx_flush(ud);
5490 for (i = 0; i < ud->bchan_cnt; i++) {
5491 struct udma_bchan *bchan = &ud->bchans[i];
5494 bchan->reg_rt = ud->mmrs[MMR_BCHANRT] + i * 0x1000;
5497 for (i = 0; i < ud->tchan_cnt; i++) {
5498 struct udma_tchan *tchan = &ud->tchans[i];
5501 tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + i * 0x1000;
5504 for (i = 0; i < ud->rchan_cnt; i++) {
5505 struct udma_rchan *rchan = &ud->rchans[i];
5508 rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + i * 0x1000;
5511 for (i = 0; i < ud->rflow_cnt; i++) {
5512 struct udma_rflow *rflow = &ud->rflows[i];
5518 struct udma_chan *uc = &ud->channels[i];
5520 uc->ud = ud;
5533 vchan_init(&uc->vc, &ud->ddev);
5541 ud->ddev.copy_align = udma_get_copy_align(ud);
5543 ret = dma_async_device_register(&ud->ddev);
5549 platform_set_drvdata(pdev, ud);
5551 ret = of_dma_controller_register(dev->of_node, udma_of_xlate, ud);
5554 dma_async_device_unregister(&ud->ddev);
5562 struct udma_dev *ud = dev_get_drvdata(dev);
5563 struct dma_device *dma_dev = &ud->ddev;
5575 ud->ddev.device_free_chan_resources(chan);
5584 struct udma_dev *ud = dev_get_drvdata(dev);
5585 struct dma_device *dma_dev = &ud->ddev;
5598 ret = ud->ddev.device_alloc_chan_resources(chan);