Lines Matching refs:nbpf

180  * @nbpf:	DMAC
206 struct nbpf_device *nbpf;
322 static inline u32 nbpf_read(struct nbpf_device *nbpf,
325 u32 data = ioread32(nbpf->base + offset);
326 dev_dbg(nbpf->dma_dev.dev, "%s(0x%p + 0x%x) = 0x%x\n",
327 __func__, nbpf->base, offset, data);
331 static inline void nbpf_write(struct nbpf_device *nbpf,
334 iowrite32(data, nbpf->base + offset);
335 dev_dbg(nbpf->dma_dev.dev, "%s(0x%p + 0x%x) = 0x%x\n",
336 __func__, nbpf->base, offset, data);
346 u32 status = nbpf_read(chan->nbpf, NBPF_DSTAT_END);
348 return status & BIT(chan - chan->nbpf->chan);
356 static u32 nbpf_error_get(struct nbpf_device *nbpf)
358 return nbpf_read(nbpf, NBPF_DSTAT_ER);
361 static struct nbpf_channel *nbpf_error_get_channel(struct nbpf_device *nbpf, u32 error)
363 return nbpf->chan + __ffs(error);
401 dev_dbg(chan->nbpf->dma_dev.dev, "%s(): next 0x%x, cur 0x%x\n", __func__,
434 static u32 nbpf_xfer_ds(struct nbpf_device *nbpf, size_t size,
437 int max_burst = nbpf->config->buffer_size * 8;
439 if (nbpf->max_burst_mem_read || nbpf->max_burst_mem_write) {
442 max_burst = min_not_zero(nbpf->max_burst_mem_read,
443 nbpf->max_burst_mem_write);
446 if (nbpf->max_burst_mem_read)
447 max_burst = nbpf->max_burst_mem_read;
450 if (nbpf->max_burst_mem_write)
451 max_burst = nbpf->max_burst_mem_write;
463 static size_t nbpf_xfer_size(struct nbpf_device *nbpf,
491 return nbpf_xfer_ds(nbpf, size, DMA_TRANS_NONE);
540 mem_xfer = nbpf_xfer_ds(chan->nbpf, size, direction);
594 static void nbpf_configure(struct nbpf_device *nbpf)
596 nbpf_write(nbpf, NBPF_CTRL, NBPF_CTRL_LVINT);
857 dev_dbg(chan->nbpf->dma_dev.dev, "%s(): force-free desc %p cookie %d\n",
905 chan->slave_dst_width = nbpf_xfer_size(chan->nbpf,
907 chan->slave_dst_burst = nbpf_xfer_size(chan->nbpf,
911 chan->slave_src_width = nbpf_xfer_size(chan->nbpf,
913 chan->slave_src_burst = nbpf_xfer_size(chan->nbpf,
1091 struct nbpf_device *nbpf = ofdma->of_dma_data;
1098 dchan = dma_get_any_slave_channel(&nbpf->dma_dev);
1226 struct nbpf_device *nbpf = dev;
1227 u32 error = nbpf_error_get(nbpf);
1229 dev_warn(nbpf->dma_dev.dev, "DMA error IRQ %u\n", irq);
1235 struct nbpf_channel *chan = nbpf_error_get_channel(nbpf, error);
1239 error = nbpf_error_get(nbpf);
1245 static int nbpf_chan_probe(struct nbpf_device *nbpf, int n)
1247 struct dma_device *dma_dev = &nbpf->dma_dev;
1248 struct nbpf_channel *chan = nbpf->chan + n;
1251 chan->nbpf = nbpf;
1252 chan->base = nbpf->base + NBPF_REG_CHAN_OFFSET + NBPF_REG_CHAN_SIZE * n;
1261 snprintf(chan->name, sizeof(chan->name), "nbpf %d", n);
1295 struct nbpf_device *nbpf;
1313 nbpf = devm_kzalloc(dev, struct_size(nbpf, chan, num_channels),
1315 if (!nbpf)
1318 dma_dev = &nbpf->dma_dev;
1322 nbpf->base = devm_ioremap_resource(dev, iomem);
1323 if (IS_ERR(nbpf->base))
1324 return PTR_ERR(nbpf->base);
1326 nbpf->clk = devm_clk_get(dev, NULL);
1327 if (IS_ERR(nbpf->clk))
1328 return PTR_ERR(nbpf->clk);
1331 &nbpf->max_burst_mem_read);
1333 &nbpf->max_burst_mem_write);
1335 nbpf->config = cfg;
1360 nbpf->chan[i].irq = irqbuf[0];
1369 for (i = 0, chan = nbpf->chan; i <= num_channels;
1377 if (chan != nbpf->chan + num_channels)
1387 nbpf->chan[i].irq = irq;
1392 IRQF_SHARED, "dma error", nbpf);
1395 nbpf->eirq = eirq;
1401 ret = nbpf_chan_probe(nbpf, i);
1436 platform_set_drvdata(pdev, nbpf);
1438 ret = clk_prepare_enable(nbpf->clk);
1442 nbpf_configure(nbpf);
1448 ret = of_dma_controller_register(np, nbpf_of_xlate, nbpf);
1457 clk_disable_unprepare(nbpf->clk);
1464 struct nbpf_device *nbpf = platform_get_drvdata(pdev);
1467 devm_free_irq(&pdev->dev, nbpf->eirq, nbpf);
1469 for (i = 0; i < nbpf->config->num_channels; i++) {
1470 struct nbpf_channel *chan = nbpf->chan + i;
1478 dma_async_device_unregister(&nbpf->dma_dev);
1479 clk_disable_unprepare(nbpf->clk);
1501 struct nbpf_device *nbpf = dev_get_drvdata(dev);
1502 clk_disable_unprepare(nbpf->clk);
1508 struct nbpf_device *nbpf = dev_get_drvdata(dev);
1509 return clk_prepare_enable(nbpf->clk);
1519 .name = "dma-nbpf",