Lines Matching refs:ud

215 	struct udma_dev *ud;
335 static int navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread)
337 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
345 static int navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
348 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
365 struct device *dev = uc->ud->dev;
442 struct udma_dev *ud = uc->ud;
444 dma_free_coherent(ud->dev, d->hwdesc[0].cppi5_desc_size,
454 struct udma_dev *ud = container_of(work, typeof(*ud), purge_work);
459 spin_lock_irqsave(&ud->lock, flags);
460 list_splice_tail_init(&ud->desc_to_purge, &head);
461 spin_unlock_irqrestore(&ud->lock, flags);
473 if (!list_empty(&ud->desc_to_purge))
474 schedule_work(&ud->purge_work);
479 struct udma_dev *ud = to_udma_dev(vd->tx.chan->device);
493 spin_lock_irqsave(&ud->lock, flags);
494 list_add_tail(&vd->node, &ud->desc_to_purge);
495 spin_unlock_irqrestore(&ud->lock, flags);
497 schedule_work(&ud->purge_work);
545 return uc->ud->rx_flush.hwdescs[uc->config.pkt_mode].cppi5_desc_paddr;
722 uc->ud->ddev.device_free_chan_resources(&uc->vc.chan);
726 ret = uc->ud->ddev.device_alloc_chan_resources(&uc->vc.chan);
805 uc->ud->match_data;
1052 dev_err(uc->ud->dev, "not matching descriptors!\n");
1113 * @ud: UDMA device
1127 static int __udma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
1134 tmp_from = ud->rchan_cnt;
1136 if (tmp_from < ud->rchan_cnt)
1139 if (tmp_from + cnt > ud->rflow_cnt)
1142 bitmap_or(tmp, ud->rflow_gp_map, ud->rflow_gp_map_allocated,
1143 ud->rflow_cnt);
1146 ud->rflow_cnt,
1148 if (start >= ud->rflow_cnt)
1154 bitmap_set(ud->rflow_gp_map_allocated, start, cnt);
1158 static int __udma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
1160 if (from < ud->rchan_cnt)
1162 if (from + cnt > ud->rflow_cnt)
1165 bitmap_clear(ud->rflow_gp_map_allocated, from, cnt);
1169 static struct udma_rflow *__udma_get_rflow(struct udma_dev *ud, int id)
1178 if (id < 0 || id >= ud->rflow_cnt)
1181 if (test_bit(id, ud->rflow_in_use))
1185 if (!test_bit(id, ud->rflow_gp_map) &&
1186 !test_bit(id, ud->rflow_gp_map_allocated))
1189 dev_dbg(ud->dev, "get rflow%d\n", id);
1190 set_bit(id, ud->rflow_in_use);
1191 return &ud->rflows[id];
1194 static void __udma_put_rflow(struct udma_dev *ud, struct udma_rflow *rflow)
1196 if (!test_bit(rflow->id, ud->rflow_in_use)) {
1197 dev_err(ud->dev, "attempt to put unused rflow%d\n", rflow->id);
1201 dev_dbg(ud->dev, "put rflow%d\n", rflow->id);
1202 clear_bit(rflow->id, ud->rflow_in_use);
1206 static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
1211 if (test_bit(id, ud->res##_map)) { \
1212 dev_err(ud->dev, "res##%d is in use\n", id); \
1218 if (tpl >= ud->tpl_levels) \
1219 tpl = ud->tpl_levels - 1; \
1221 start = ud->tpl_start_idx[tpl]; \
1223 id = find_next_zero_bit(ud->res##_map, ud->res##_cnt, \
1225 if (id == ud->res##_cnt) { \
1230 set_bit(id, ud->res##_map); \
1231 return &ud->res##s[id]; \
1239 struct udma_dev *ud = uc->ud;
1242 dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
1247 uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl, -1);
1254 struct udma_dev *ud = uc->ud;
1257 dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
1262 uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl, -1);
1269 struct udma_dev *ud = uc->ud;
1273 dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
1279 dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
1283 dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
1289 end = min(ud->tchan_cnt, ud->rchan_cnt);
1291 chan_id = ud->tpl_start_idx[ud->tpl_levels - 1];
1293 if (!test_bit(chan_id, ud->tchan_map) &&
1294 !test_bit(chan_id, ud->rchan_map))
1301 set_bit(chan_id, ud->tchan_map);
1302 set_bit(chan_id, ud->rchan_map);
1303 uc->tchan = &ud->tchans[chan_id];
1304 uc->rchan = &ud->rchans[chan_id];
1311 struct udma_dev *ud = uc->ud;
1314 dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id);
1319 dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
1324 uc->rflow = __udma_get_rflow(ud, flow_id);
1331 struct udma_dev *ud = uc->ud;
1334 dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
1336 clear_bit(uc->rchan->id, ud->rchan_map);
1343 struct udma_dev *ud = uc->ud;
1346 dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
1348 clear_bit(uc->tchan->id, ud->tchan_map);
1355 struct udma_dev *ud = uc->ud;
1358 dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
1360 __udma_put_rflow(ud, uc->rflow);
1381 struct udma_dev *ud = uc->ud;
1388 ret = k3_ringacc_request_rings_pair(ud->ringacc, uc->tchan->id, -1,
1441 struct udma_dev *ud = uc->ud;
1462 fd_ring_id = ud->tchan_cnt + ud->echan_cnt + uc->rchan->id;
1463 ret = k3_ringacc_request_rings_pair(ud->ringacc, fd_ring_id, -1,
1525 struct udma_dev *ud = uc->ud;
1526 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1543 req_tx.tx_atype = ud->atype;
1547 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
1557 req_rx.rx_atype = ud->atype;
1561 dev_err(ud->dev, "rchan%d alloc failed %d\n", rchan->id, ret);
1568 struct udma_dev *ud = uc->ud;
1569 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1597 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
1604 struct udma_dev *ud = uc->ud;
1605 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1634 dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret);
1678 dev_err(ud->dev, "flow%d config failed: %d\n", rchan->id, ret);
1686 struct udma_dev *ud = to_udma_dev(chan->device);
1687 const struct udma_soc_data *soc_data = ud->soc_data;
1703 uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev,
1705 ud->desc_align,
1708 dev_err(ud->ddev.dev,
1727 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
1746 uc->config.src_thread = ud->psil_base + uc->tchan->id;
1747 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
1757 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
1764 uc->config.src_thread = ud->psil_base + uc->tchan->id;
1775 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
1783 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
1793 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
1805 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
1808 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
1815 ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
1817 dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
1826 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
1835 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
1841 uc->irq_num_udma = ti_sci_inta_msi_get_virq(ud->dev,
1844 dev_err(ud->dev, "Failed to get udma irq (index: %u)\n",
1854 dev_err(ud->dev, "chan%d: UDMA irq request failed\n",
1871 navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread);
1914 dev_err(uc->ud->dev, "Unsupported TR size of %zu\n", tr_size);
1938 uc->ud->desc_align);
1939 hwdesc->cppi5_desc_vaddr = dma_alloc_coherent(uc->ud->dev,
2055 dev_err(uc->ud->dev, "size %u is not supported\n",
2140 d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask)
2182 dev_err(uc->ud->dev,
2218 dev_err(uc->ud->dev,
2375 dev_err(uc->ud->dev,
2406 dev_err(uc->ud->dev, "size %zu is not supported\n",
2492 dev_err(uc->ud->dev,
2548 dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
2572 dev_err(uc->ud->dev,
2610 dev_err(uc->ud->dev, "size %zu is not supported\n",
2859 dev_warn(uc->ud->dev, "chan%d teardown timeout!\n",
2868 dev_warn(uc->ud->dev, "chan%d refused to stop!\n", uc->id);
2956 struct udma_dev *ud = to_udma_dev(chan->device);
2979 navss_psil_unpair(ud, uc->config.src_thread,
3010 struct udma_dev *ud;
3017 ud = uc->ud;
3021 dev_err(ud->dev, "Invalid channel atype: %u\n",
3036 dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n",
3050 const struct udma_match_data *match_data = ud->match_data;
3066 ucc->metadata_size, ud->desc_align);
3068 dev_dbg(ud->dev, "chan%d: Remote thread: 0x%04x (%s)\n", uc->id,
3077 struct udma_dev *ud = ofdma->of_dma_data;
3078 dma_cap_mask_t mask = ud->ddev.cap_mask;
3094 dev_err(ud->dev, "get channel fail in %s.\n", __func__);
3164 static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
3169 ud->mmrs[i] = devm_platform_ioremap_resource_byname(pdev, mmr_names[i]);
3170 if (IS_ERR(ud->mmrs[i]))
3171 return PTR_ERR(ud->mmrs[i]);
3177 static int udma_setup_resources(struct udma_dev *ud)
3179 struct device *dev = ud->dev;
3184 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
3189 cap2 = udma_read(ud->mmrs[MMR_GCFG], UDMA_CAP_REG(2));
3190 cap3 = udma_read(ud->mmrs[MMR_GCFG], UDMA_CAP_REG(3));
3192 ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3);
3193 ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2);
3194 ud->echan_cnt = UDMA_CAP2_ECHAN_CNT(cap2);
3195 ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2);
3196 ch_count = ud->tchan_cnt + ud->rchan_cnt;
3201 ud->tpl_levels = 2;
3202 ud->tpl_start_idx[0] = 8;
3205 ud->tpl_levels = 2;
3206 ud->tpl_start_idx[0] = 2;
3208 ud->tpl_levels = 3;
3209 ud->tpl_start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3);
3210 ud->tpl_start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
3212 ud->tpl_levels = 2;
3213 ud->tpl_start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
3215 ud->tpl_levels = 1;
3218 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
3220 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
3222 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
3224 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
3226 ud->rflow_gp_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
3229 ud->rflow_gp_map_allocated = devm_kcalloc(dev,
3230 BITS_TO_LONGS(ud->rflow_cnt),
3233 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
3236 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
3239 if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_gp_map ||
3240 !ud->rflow_gp_map_allocated || !ud->tchans || !ud->rchans ||
3241 !ud->rflows || !ud->rflow_in_use)
3249 bitmap_set(ud->rflow_gp_map_allocated, 0, ud->rchan_cnt);
3252 bitmap_set(ud->rflow_gp_map, 0, ud->rflow_cnt);
3264 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
3266 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
3269 bitmap_clear(ud->tchan_map, rm_desc->start,
3280 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
3282 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
3285 bitmap_clear(ud->rchan_map, rm_desc->start,
3302 ud->soc_data->rchan_oes_offset;
3305 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
3308 dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
3316 bitmap_clear(ud->rflow_gp_map, ud->rchan_cnt,
3317 ud->rflow_cnt - ud->rchan_cnt);
3321 bitmap_clear(ud->rflow_gp_map, rm_desc->start,
3328 ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
3329 ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
3333 ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
3335 if (!ud->channels)
3340 ud->tchan_cnt - bitmap_weight(ud->tchan_map, ud->tchan_cnt),
3341 ud->rchan_cnt - bitmap_weight(ud->rchan_map, ud->rchan_cnt),
3342 ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map,
3343 ud->rflow_cnt));
3348 static int udma_setup_rx_flush(struct udma_dev *ud)
3350 struct udma_rx_flush *rx_flush = &ud->rx_flush;
3354 struct device *dev = ud->dev;
3376 ud->desc_align);
3416 ud->desc_align);
3514 struct udma_dev *ud;
3523 ud = devm_kzalloc(dev, sizeof(*ud), GFP_KERNEL);
3524 if (!ud)
3527 ret = udma_get_mmrs(pdev, ud);
3531 ud->tisci_rm.tisci = ti_sci_get_by_phandle(dev->of_node, "ti,sci");
3532 if (IS_ERR(ud->tisci_rm.tisci))
3533 return PTR_ERR(ud->tisci_rm.tisci);
3536 &ud->tisci_rm.tisci_dev_id);
3541 pdev->id = ud->tisci_rm.tisci_dev_id;
3544 &ud->tisci_rm.tisci_navss_dev_id);
3550 ret = of_property_read_u32(dev->of_node, "ti,udma-atype", &ud->atype);
3551 if (!ret && ud->atype > 2) {
3552 dev_err(dev, "Invalid atype: %u\n", ud->atype);
3556 ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops;
3557 ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops;
3559 ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc");
3560 if (IS_ERR(ud->ringacc))
3561 return PTR_ERR(ud->ringacc);
3575 ud->match_data = match->data;
3582 ud->soc_data = soc->data;
3584 dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask);
3585 dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask);
3587 ud->ddev.device_alloc_chan_resources = udma_alloc_chan_resources;
3588 ud->ddev.device_config = udma_slave_config;
3589 ud->ddev.device_prep_slave_sg = udma_prep_slave_sg;
3590 ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic;
3591 ud->ddev.device_issue_pending = udma_issue_pending;
3592 ud->ddev.device_tx_status = udma_tx_status;
3593 ud->ddev.device_pause = udma_pause;
3594 ud->ddev.device_resume = udma_resume;
3595 ud->ddev.device_terminate_all = udma_terminate_all;
3596 ud->ddev.device_synchronize = udma_synchronize;
3598 ud->ddev.dbg_summary_show = udma_dbg_summary_show;
3601 ud->ddev.device_free_chan_resources = udma_free_chan_resources;
3602 ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS;
3603 ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS;
3604 ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
3605 ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
3606 ud->ddev.copy_align = DMAENGINE_ALIGN_8_BYTES;
3607 ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT |
3609 if (ud->match_data->enable_memcpy_support) {
3610 dma_cap_set(DMA_MEMCPY, ud->ddev.cap_mask);
3611 ud->ddev.device_prep_dma_memcpy = udma_prep_dma_memcpy;
3612 ud->ddev.directions |= BIT(DMA_MEM_TO_MEM);
3615 ud->ddev.dev = dev;
3616 ud->dev = dev;
3617 ud->psil_base = ud->match_data->psil_base;
3619 INIT_LIST_HEAD(&ud->ddev.channels);
3620 INIT_LIST_HEAD(&ud->desc_to_purge);
3622 ch_count = udma_setup_resources(ud);
3626 spin_lock_init(&ud->lock);
3627 INIT_WORK(&ud->purge_work, udma_purge_desc_work);
3629 ud->desc_align = 64;
3630 if (ud->desc_align < dma_get_cache_alignment())
3631 ud->desc_align = dma_get_cache_alignment();
3633 ret = udma_setup_rx_flush(ud);
3637 for (i = 0; i < ud->tchan_cnt; i++) {
3638 struct udma_tchan *tchan = &ud->tchans[i];
3641 tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + i * 0x1000;
3644 for (i = 0; i < ud->rchan_cnt; i++) {
3645 struct udma_rchan *rchan = &ud->rchans[i];
3648 rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + i * 0x1000;
3651 for (i = 0; i < ud->rflow_cnt; i++) {
3652 struct udma_rflow *rflow = &ud->rflows[i];
3658 struct udma_chan *uc = &ud->channels[i];
3660 uc->ud = ud;
3670 vchan_init(&uc->vc, &ud->ddev);
3677 ret = dma_async_device_register(&ud->ddev);
3683 platform_set_drvdata(pdev, ud);
3685 ret = of_dma_controller_register(dev->of_node, udma_of_xlate, ud);
3688 dma_async_device_unregister(&ud->ddev);