Lines Matching refs:nbpf

179  * @nbpf:	DMAC
205 struct nbpf_device *nbpf;
321 static inline u32 nbpf_read(struct nbpf_device *nbpf,
324 u32 data = ioread32(nbpf->base + offset);
325 dev_dbg(nbpf->dma_dev.dev, "%s(0x%p + 0x%x) = 0x%x\n",
326 __func__, nbpf->base, offset, data);
330 static inline void nbpf_write(struct nbpf_device *nbpf,
333 iowrite32(data, nbpf->base + offset);
334 dev_dbg(nbpf->dma_dev.dev, "%s(0x%p + 0x%x) = 0x%x\n",
335 __func__, nbpf->base, offset, data);
345 u32 status = nbpf_read(chan->nbpf, NBPF_DSTAT_END);
347 return status & BIT(chan - chan->nbpf->chan);
355 static u32 nbpf_error_get(struct nbpf_device *nbpf)
357 return nbpf_read(nbpf, NBPF_DSTAT_ER);
360 static struct nbpf_channel *nbpf_error_get_channel(struct nbpf_device *nbpf, u32 error)
362 return nbpf->chan + __ffs(error);
400 dev_dbg(chan->nbpf->dma_dev.dev, "%s(): next 0x%x, cur 0x%x\n", __func__,
433 static u32 nbpf_xfer_ds(struct nbpf_device *nbpf, size_t size,
436 int max_burst = nbpf->config->buffer_size * 8;
438 if (nbpf->max_burst_mem_read || nbpf->max_burst_mem_write) {
441 max_burst = min_not_zero(nbpf->max_burst_mem_read,
442 nbpf->max_burst_mem_write);
445 if (nbpf->max_burst_mem_read)
446 max_burst = nbpf->max_burst_mem_read;
449 if (nbpf->max_burst_mem_write)
450 max_burst = nbpf->max_burst_mem_write;
462 static size_t nbpf_xfer_size(struct nbpf_device *nbpf,
490 return nbpf_xfer_ds(nbpf, size, DMA_TRANS_NONE);
539 mem_xfer = nbpf_xfer_ds(chan->nbpf, size, direction);
593 static void nbpf_configure(struct nbpf_device *nbpf)
595 nbpf_write(nbpf, NBPF_CTRL, NBPF_CTRL_LVINT);
856 dev_dbg(chan->nbpf->dma_dev.dev, "%s(): force-free desc %p cookie %d\n",
904 chan->slave_dst_width = nbpf_xfer_size(chan->nbpf,
906 chan->slave_dst_burst = nbpf_xfer_size(chan->nbpf,
910 chan->slave_src_width = nbpf_xfer_size(chan->nbpf,
912 chan->slave_src_burst = nbpf_xfer_size(chan->nbpf,
1090 struct nbpf_device *nbpf = ofdma->of_dma_data;
1097 dchan = dma_get_any_slave_channel(&nbpf->dma_dev);
1225 struct nbpf_device *nbpf = dev;
1226 u32 error = nbpf_error_get(nbpf);
1228 dev_warn(nbpf->dma_dev.dev, "DMA error IRQ %u\n", irq);
1234 struct nbpf_channel *chan = nbpf_error_get_channel(nbpf, error);
1238 error = nbpf_error_get(nbpf);
1244 static int nbpf_chan_probe(struct nbpf_device *nbpf, int n)
1246 struct dma_device *dma_dev = &nbpf->dma_dev;
1247 struct nbpf_channel *chan = nbpf->chan + n;
1250 chan->nbpf = nbpf;
1251 chan->base = nbpf->base + NBPF_REG_CHAN_OFFSET + NBPF_REG_CHAN_SIZE * n;
1260 snprintf(chan->name, sizeof(chan->name), "nbpf %d", n);
1294 struct nbpf_device *nbpf;
1311 nbpf = devm_kzalloc(dev, struct_size(nbpf, chan, num_channels),
1313 if (!nbpf)
1316 dma_dev = &nbpf->dma_dev;
1319 nbpf->base = devm_platform_ioremap_resource(pdev, 0);
1320 if (IS_ERR(nbpf->base))
1321 return PTR_ERR(nbpf->base);
1323 nbpf->clk = devm_clk_get(dev, NULL);
1324 if (IS_ERR(nbpf->clk))
1325 return PTR_ERR(nbpf->clk);
1328 &nbpf->max_burst_mem_read);
1330 &nbpf->max_burst_mem_write);
1332 nbpf->config = cfg;
1355 nbpf->chan[i].irq = irqbuf[0];
1364 for (i = 0, chan = nbpf->chan; i <= num_channels;
1372 if (chan != nbpf->chan + num_channels)
1382 nbpf->chan[i].irq = irq;
1387 IRQF_SHARED, "dma error", nbpf);
1390 nbpf->eirq = eirq;
1396 ret = nbpf_chan_probe(nbpf, i);
1431 platform_set_drvdata(pdev, nbpf);
1433 ret = clk_prepare_enable(nbpf->clk);
1437 nbpf_configure(nbpf);
1443 ret = of_dma_controller_register(np, nbpf_of_xlate, nbpf);
1452 clk_disable_unprepare(nbpf->clk);
1459 struct nbpf_device *nbpf = platform_get_drvdata(pdev);
1462 devm_free_irq(&pdev->dev, nbpf->eirq, nbpf);
1464 for (i = 0; i < nbpf->config->num_channels; i++) {
1465 struct nbpf_channel *chan = nbpf->chan + i;
1473 dma_async_device_unregister(&nbpf->dma_dev);
1474 clk_disable_unprepare(nbpf->clk);
1496 struct nbpf_device *nbpf = dev_get_drvdata(dev);
1497 clk_disable_unprepare(nbpf->clk);
1503 struct nbpf_device *nbpf = dev_get_drvdata(dev);
1504 return clk_prepare_enable(nbpf->clk);
1514 .name = "dma-nbpf",