Lines Matching refs:imxdma
144 struct imxdma_engine *imxdma;
190 struct imxdma_engine *imxdma;
207 static inline int is_imx1_dma(struct imxdma_engine *imxdma)
209 return imxdma->devtype == IMX1_DMA;
212 static inline int is_imx27_dma(struct imxdma_engine *imxdma)
214 return imxdma->devtype == IMX27_DMA;
237 static void imx_dmav1_writel(struct imxdma_engine *imxdma, unsigned val,
240 __raw_writel(val, imxdma->base + offset);
243 static unsigned imx_dmav1_readl(struct imxdma_engine *imxdma, unsigned offset)
245 return __raw_readl(imxdma->base + offset);
250 struct imxdma_engine *imxdma = imxdmac->imxdma;
252 if (is_imx27_dma(imxdma))
264 struct imxdma_engine *imxdma = imxdmac->imxdma;
273 imx_dmav1_writel(imxdma, sg->dma_address,
276 imx_dmav1_writel(imxdma, sg->dma_address,
279 imx_dmav1_writel(imxdma, now, DMA_CNTR(imxdmac->channel));
281 dev_dbg(imxdma->dev, " %s channel: %d dst 0x%08x, src 0x%08x, "
283 imx_dmav1_readl(imxdma, DMA_DAR(imxdmac->channel)),
284 imx_dmav1_readl(imxdma, DMA_SAR(imxdmac->channel)),
285 imx_dmav1_readl(imxdma, DMA_CNTR(imxdmac->channel)));
291 struct imxdma_engine *imxdma = imxdmac->imxdma;
295 dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel);
299 imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR);
300 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) &
302 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) |
305 if (!is_imx1_dma(imxdma) &&
311 tmp = imx_dmav1_readl(imxdma, DMA_CCR(channel));
312 imx_dmav1_writel(imxdma, tmp | CCR_RPT | CCR_ACRPT,
322 struct imxdma_engine *imxdma = imxdmac->imxdma;
326 dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel);
332 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) |
334 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) &
336 imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR);
343 struct imxdma_engine *imxdma = imxdmac->imxdma;
346 imx_dmav1_writel(imxdma, 0, DMA_CCR(channel));
350 dev_dbg(imxdma->dev, "channel %d: watchdog timeout!\n",
356 struct imxdma_engine *imxdma = dev_id;
361 disr = imx_dmav1_readl(imxdma, DMA_DISR);
363 err_mask = imx_dmav1_readl(imxdma, DMA_DBTOSR) |
364 imx_dmav1_readl(imxdma, DMA_DRTOSR) |
365 imx_dmav1_readl(imxdma, DMA_DSESR) |
366 imx_dmav1_readl(imxdma, DMA_DBOSR);
371 imx_dmav1_writel(imxdma, disr & err_mask, DMA_DISR);
378 if (imx_dmav1_readl(imxdma, DMA_DBTOSR) & (1 << i)) {
379 imx_dmav1_writel(imxdma, 1 << i, DMA_DBTOSR);
382 if (imx_dmav1_readl(imxdma, DMA_DRTOSR) & (1 << i)) {
383 imx_dmav1_writel(imxdma, 1 << i, DMA_DRTOSR);
386 if (imx_dmav1_readl(imxdma, DMA_DSESR) & (1 << i)) {
387 imx_dmav1_writel(imxdma, 1 << i, DMA_DSESR);
390 if (imx_dmav1_readl(imxdma, DMA_DBOSR) & (1 << i)) {
391 imx_dmav1_writel(imxdma, 1 << i, DMA_DBOSR);
395 tasklet_schedule(&imxdma->channel[i].dma_tasklet);
397 dev_warn(imxdma->dev,
409 struct imxdma_engine *imxdma = imxdmac->imxdma;
414 spin_lock_irqsave(&imxdma->lock, flags);
416 spin_unlock_irqrestore(&imxdma->lock, flags);
423 spin_unlock_irqrestore(&imxdma->lock, flags);
432 tmp = imx_dmav1_readl(imxdma, DMA_CCR(chno));
442 imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno));
444 imx_dmav1_writel(imxdma, tmp & ~CCR_CEN,
449 imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno));
465 imx_dmav1_writel(imxdma, 0, DMA_CCR(chno));
472 struct imxdma_engine *imxdma = dev_id;
475 if (!is_imx1_dma(imxdma))
478 disr = imx_dmav1_readl(imxdma, DMA_DISR);
480 dev_dbg(imxdma->dev, "%s called, disr=0x%08x\n", __func__, disr);
482 imx_dmav1_writel(imxdma, disr, DMA_DISR);
485 dma_irq_handle_channel(&imxdma->channel[i]);
494 struct imxdma_engine *imxdma = imxdmac->imxdma;
503 if ((imxdma->slots_2d[i].count > 0) &&
504 ((imxdma->slots_2d[i].xsr != d->x) ||
505 (imxdma->slots_2d[i].ysr != d->y) ||
506 (imxdma->slots_2d[i].wsr != d->w)))
514 imxdma->slots_2d[slot].xsr = d->x;
515 imxdma->slots_2d[slot].ysr = d->y;
516 imxdma->slots_2d[slot].wsr = d->w;
517 imxdma->slots_2d[slot].count++;
525 imx_dmav1_writel(imxdma, d->x, DMA_XSRA);
526 imx_dmav1_writel(imxdma, d->y, DMA_YSRA);
527 imx_dmav1_writel(imxdma, d->w, DMA_WSRA);
531 imx_dmav1_writel(imxdma, d->x, DMA_XSRB);
532 imx_dmav1_writel(imxdma, d->y, DMA_YSRB);
533 imx_dmav1_writel(imxdma, d->w, DMA_WSRB);
541 imx_dmav1_writel(imxdma, d->src, DMA_SAR(imxdmac->channel));
542 imx_dmav1_writel(imxdma, d->dest, DMA_DAR(imxdmac->channel));
543 imx_dmav1_writel(imxdma, d->config_mem | (d->config_port << 2),
546 imx_dmav1_writel(imxdma, d->len, DMA_CNTR(imxdmac->channel));
548 dev_dbg(imxdma->dev,
559 imx_dmav1_writel(imxdma, imxdmac->per_address,
561 imx_dmav1_writel(imxdma, imxdmac->ccr_from_device,
564 dev_dbg(imxdma->dev,
570 imx_dmav1_writel(imxdma, imxdmac->per_address,
572 imx_dmav1_writel(imxdma, imxdmac->ccr_to_device,
575 dev_dbg(imxdma->dev,
581 dev_err(imxdma->dev, "%s channel: %d bad dma mode\n",
599 struct imxdma_engine *imxdma = imxdmac->imxdma;
603 spin_lock_irqsave(&imxdma->lock, flags);
607 spin_unlock_irqrestore(&imxdma->lock, flags);
623 imxdma->slots_2d[imxdmac->slot_2d].count--;
634 dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n",
638 spin_unlock_irqrestore(&imxdma->lock, flags);
646 struct imxdma_engine *imxdma = imxdmac->imxdma;
651 spin_lock_irqsave(&imxdma->lock, flags);
654 spin_unlock_irqrestore(&imxdma->lock, flags);
663 struct imxdma_engine *imxdma = imxdmac->imxdma;
697 imx_dmav1_writel(imxdma, imxdmac->dma_request,
701 imx_dmav1_writel(imxdma, imxdmac->watermark_level *
727 struct imxdma_engine *imxdma = imxdmac->imxdma;
731 spin_lock_irqsave(&imxdma->lock, flags);
734 spin_unlock_irqrestore(&imxdma->lock, flags);
772 struct imxdma_engine *imxdma = imxdmac->imxdma;
776 spin_lock_irqsave(&imxdma->lock, flags);
782 spin_unlock_irqrestore(&imxdma->lock, flags);
853 struct imxdma_engine *imxdma = imxdmac->imxdma;
858 dev_dbg(imxdma->dev, "%s channel: %d buf_len=%zu period_len=%zu\n",
910 struct imxdma_engine *imxdma = imxdmac->imxdma;
913 dev_dbg(imxdma->dev, "%s channel: %d src=0x%llx dst=0x%llx len=%zu\n",
941 struct imxdma_engine *imxdma = imxdmac->imxdma;
944 dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%llx dst_start=0x%llx\n"
983 struct imxdma_engine *imxdma = imxdmac->imxdma;
987 spin_lock_irqsave(&imxdma->lock, flags);
994 dev_warn(imxdma->dev,
1002 spin_unlock_irqrestore(&imxdma->lock, flags);
1010 if (chan->device->dev != fdata->imxdma->dev)
1023 struct imxdma_engine *imxdma = ofdma->of_dma_data;
1025 .imxdma = imxdma,
1033 return dma_request_channel(imxdma->dma_device.cap_mask,
1039 struct imxdma_engine *imxdma;
1043 imxdma = devm_kzalloc(&pdev->dev, sizeof(*imxdma), GFP_KERNEL);
1044 if (!imxdma)
1047 imxdma->dev = &pdev->dev;
1048 imxdma->devtype = (uintptr_t)of_device_get_match_data(&pdev->dev);
1050 imxdma->base = devm_platform_ioremap_resource(pdev, 0);
1051 if (IS_ERR(imxdma->base))
1052 return PTR_ERR(imxdma->base);
1058 imxdma->dma_ipg = devm_clk_get(&pdev->dev, "ipg");
1059 if (IS_ERR(imxdma->dma_ipg))
1060 return PTR_ERR(imxdma->dma_ipg);
1062 imxdma->dma_ahb = devm_clk_get(&pdev->dev, "ahb");
1063 if (IS_ERR(imxdma->dma_ahb))
1064 return PTR_ERR(imxdma->dma_ahb);
1066 ret = clk_prepare_enable(imxdma->dma_ipg);
1069 ret = clk_prepare_enable(imxdma->dma_ahb);
1074 imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR);
1076 if (is_imx1_dma(imxdma)) {
1078 dma_irq_handler, 0, "DMA", imxdma);
1080 dev_warn(imxdma->dev, "Can't register IRQ for DMA\n");
1083 imxdma->irq = irq;
1092 imxdma_err_handler, 0, "DMA", imxdma);
1094 dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n");
1097 imxdma->irq_err = irq_err;
1101 imx_dmav1_writel(imxdma, DCR_DEN, DMA_DCR);
1104 imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DISR);
1107 imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR);
1109 INIT_LIST_HEAD(&imxdma->dma_device.channels);
1111 dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
1112 dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
1113 dma_cap_set(DMA_MEMCPY, imxdma->dma_device.cap_mask);
1114 dma_cap_set(DMA_INTERLEAVE, imxdma->dma_device.cap_mask);
1118 imxdma->slots_2d[i].count = 0;
1120 spin_lock_init(&imxdma->lock);
1124 struct imxdma_channel *imxdmac = &imxdma->channel[i];
1126 if (!is_imx1_dma(imxdma)) {
1128 dma_irq_handler, 0, "DMA", imxdma);
1130 dev_warn(imxdma->dev, "Can't register IRQ %d "
1140 imxdmac->imxdma = imxdma;
1147 imxdmac->chan.device = &imxdma->dma_device;
1153 &imxdma->dma_device.channels);
1156 imxdma->dma_device.dev = &pdev->dev;
1158 imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources;
1159 imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources;
1160 imxdma->dma_device.device_tx_status = imxdma_tx_status;
1161 imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg;
1162 imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic;
1163 imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy;
1164 imxdma->dma_device.device_prep_interleaved_dma = imxdma_prep_dma_interleaved;
1165 imxdma->dma_device.device_config = imxdma_config;
1166 imxdma->dma_device.device_terminate_all = imxdma_terminate_all;
1167 imxdma->dma_device.device_issue_pending = imxdma_issue_pending;
1169 platform_set_drvdata(pdev, imxdma);
1171 imxdma->dma_device.copy_align = DMAENGINE_ALIGN_4_BYTES;
1172 dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff);
1174 ret = dma_async_device_register(&imxdma->dma_device);
1182 imxdma_xlate, imxdma);
1192 dma_async_device_unregister(&imxdma->dma_device);
1194 clk_disable_unprepare(imxdma->dma_ahb);
1196 clk_disable_unprepare(imxdma->dma_ipg);
1200 static void imxdma_free_irq(struct platform_device *pdev, struct imxdma_engine *imxdma)
1204 if (is_imx1_dma(imxdma)) {
1205 disable_irq(imxdma->irq);
1206 disable_irq(imxdma->irq_err);
1210 struct imxdma_channel *imxdmac = &imxdma->channel[i];
1212 if (!is_imx1_dma(imxdma))
1221 struct imxdma_engine *imxdma = platform_get_drvdata(pdev);
1223 imxdma_free_irq(pdev, imxdma);
1225 dma_async_device_unregister(&imxdma->dma_device);
1230 clk_disable_unprepare(imxdma->dma_ipg);
1231 clk_disable_unprepare(imxdma->dma_ahb);