Lines Matching refs:imxdma

144 	struct imxdma_engine		*imxdma;
190 struct imxdma_engine *imxdma;
226 static inline int is_imx1_dma(struct imxdma_engine *imxdma)
228 return imxdma->devtype == IMX1_DMA;
231 static inline int is_imx27_dma(struct imxdma_engine *imxdma)
233 return imxdma->devtype == IMX27_DMA;
256 static void imx_dmav1_writel(struct imxdma_engine *imxdma, unsigned val,
259 __raw_writel(val, imxdma->base + offset);
262 static unsigned imx_dmav1_readl(struct imxdma_engine *imxdma, unsigned offset)
264 return __raw_readl(imxdma->base + offset);
269 struct imxdma_engine *imxdma = imxdmac->imxdma;
271 if (is_imx27_dma(imxdma))
283 struct imxdma_engine *imxdma = imxdmac->imxdma;
292 imx_dmav1_writel(imxdma, sg->dma_address,
295 imx_dmav1_writel(imxdma, sg->dma_address,
298 imx_dmav1_writel(imxdma, now, DMA_CNTR(imxdmac->channel));
300 dev_dbg(imxdma->dev, " %s channel: %d dst 0x%08x, src 0x%08x, "
302 imx_dmav1_readl(imxdma, DMA_DAR(imxdmac->channel)),
303 imx_dmav1_readl(imxdma, DMA_SAR(imxdmac->channel)),
304 imx_dmav1_readl(imxdma, DMA_CNTR(imxdmac->channel)));
310 struct imxdma_engine *imxdma = imxdmac->imxdma;
314 dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel);
318 imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR);
319 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) &
321 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) |
324 if (!is_imx1_dma(imxdma) &&
330 tmp = imx_dmav1_readl(imxdma, DMA_CCR(channel));
331 imx_dmav1_writel(imxdma, tmp | CCR_RPT | CCR_ACRPT,
341 struct imxdma_engine *imxdma = imxdmac->imxdma;
345 dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel);
351 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) |
353 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) &
355 imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR);
362 struct imxdma_engine *imxdma = imxdmac->imxdma;
365 imx_dmav1_writel(imxdma, 0, DMA_CCR(channel));
369 dev_dbg(imxdma->dev, "channel %d: watchdog timeout!\n",
375 struct imxdma_engine *imxdma = dev_id;
380 disr = imx_dmav1_readl(imxdma, DMA_DISR);
382 err_mask = imx_dmav1_readl(imxdma, DMA_DBTOSR) |
383 imx_dmav1_readl(imxdma, DMA_DRTOSR) |
384 imx_dmav1_readl(imxdma, DMA_DSESR) |
385 imx_dmav1_readl(imxdma, DMA_DBOSR);
390 imx_dmav1_writel(imxdma, disr & err_mask, DMA_DISR);
397 if (imx_dmav1_readl(imxdma, DMA_DBTOSR) & (1 << i)) {
398 imx_dmav1_writel(imxdma, 1 << i, DMA_DBTOSR);
401 if (imx_dmav1_readl(imxdma, DMA_DRTOSR) & (1 << i)) {
402 imx_dmav1_writel(imxdma, 1 << i, DMA_DRTOSR);
405 if (imx_dmav1_readl(imxdma, DMA_DSESR) & (1 << i)) {
406 imx_dmav1_writel(imxdma, 1 << i, DMA_DSESR);
409 if (imx_dmav1_readl(imxdma, DMA_DBOSR) & (1 << i)) {
410 imx_dmav1_writel(imxdma, 1 << i, DMA_DBOSR);
414 tasklet_schedule(&imxdma->channel[i].dma_tasklet);
416 dev_warn(imxdma->dev,
428 struct imxdma_engine *imxdma = imxdmac->imxdma;
433 spin_lock_irqsave(&imxdma->lock, flags);
435 spin_unlock_irqrestore(&imxdma->lock, flags);
442 spin_unlock_irqrestore(&imxdma->lock, flags);
451 tmp = imx_dmav1_readl(imxdma, DMA_CCR(chno));
461 imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno));
463 imx_dmav1_writel(imxdma, tmp & ~CCR_CEN,
468 imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno));
484 imx_dmav1_writel(imxdma, 0, DMA_CCR(chno));
491 struct imxdma_engine *imxdma = dev_id;
494 if (!is_imx1_dma(imxdma))
497 disr = imx_dmav1_readl(imxdma, DMA_DISR);
499 dev_dbg(imxdma->dev, "%s called, disr=0x%08x\n", __func__, disr);
501 imx_dmav1_writel(imxdma, disr, DMA_DISR);
504 dma_irq_handle_channel(&imxdma->channel[i]);
513 struct imxdma_engine *imxdma = imxdmac->imxdma;
522 if ((imxdma->slots_2d[i].count > 0) &&
523 ((imxdma->slots_2d[i].xsr != d->x) ||
524 (imxdma->slots_2d[i].ysr != d->y) ||
525 (imxdma->slots_2d[i].wsr != d->w)))
533 imxdma->slots_2d[slot].xsr = d->x;
534 imxdma->slots_2d[slot].ysr = d->y;
535 imxdma->slots_2d[slot].wsr = d->w;
536 imxdma->slots_2d[slot].count++;
544 imx_dmav1_writel(imxdma, d->x, DMA_XSRA);
545 imx_dmav1_writel(imxdma, d->y, DMA_YSRA);
546 imx_dmav1_writel(imxdma, d->w, DMA_WSRA);
550 imx_dmav1_writel(imxdma, d->x, DMA_XSRB);
551 imx_dmav1_writel(imxdma, d->y, DMA_YSRB);
552 imx_dmav1_writel(imxdma, d->w, DMA_WSRB);
560 imx_dmav1_writel(imxdma, d->src, DMA_SAR(imxdmac->channel));
561 imx_dmav1_writel(imxdma, d->dest, DMA_DAR(imxdmac->channel));
562 imx_dmav1_writel(imxdma, d->config_mem | (d->config_port << 2),
565 imx_dmav1_writel(imxdma, d->len, DMA_CNTR(imxdmac->channel));
567 dev_dbg(imxdma->dev,
578 imx_dmav1_writel(imxdma, imxdmac->per_address,
580 imx_dmav1_writel(imxdma, imxdmac->ccr_from_device,
583 dev_dbg(imxdma->dev,
589 imx_dmav1_writel(imxdma, imxdmac->per_address,
591 imx_dmav1_writel(imxdma, imxdmac->ccr_to_device,
594 dev_dbg(imxdma->dev,
600 dev_err(imxdma->dev, "%s channel: %d bad dma mode\n",
618 struct imxdma_engine *imxdma = imxdmac->imxdma;
622 spin_lock_irqsave(&imxdma->lock, flags);
626 spin_unlock_irqrestore(&imxdma->lock, flags);
642 imxdma->slots_2d[imxdmac->slot_2d].count--;
653 dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n",
657 spin_unlock_irqrestore(&imxdma->lock, flags);
665 struct imxdma_engine *imxdma = imxdmac->imxdma;
670 spin_lock_irqsave(&imxdma->lock, flags);
673 spin_unlock_irqrestore(&imxdma->lock, flags);
682 struct imxdma_engine *imxdma = imxdmac->imxdma;
716 imx_dmav1_writel(imxdma, imxdmac->dma_request,
720 imx_dmav1_writel(imxdma, imxdmac->watermark_level *
746 struct imxdma_engine *imxdma = imxdmac->imxdma;
750 spin_lock_irqsave(&imxdma->lock, flags);
753 spin_unlock_irqrestore(&imxdma->lock, flags);
792 struct imxdma_engine *imxdma = imxdmac->imxdma;
796 spin_lock_irqsave(&imxdma->lock, flags);
802 spin_unlock_irqrestore(&imxdma->lock, flags);
873 struct imxdma_engine *imxdma = imxdmac->imxdma;
878 dev_dbg(imxdma->dev, "%s channel: %d buf_len=%zu period_len=%zu\n",
930 struct imxdma_engine *imxdma = imxdmac->imxdma;
933 dev_dbg(imxdma->dev, "%s channel: %d src=0x%llx dst=0x%llx len=%zu\n",
961 struct imxdma_engine *imxdma = imxdmac->imxdma;
964 dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%llx dst_start=0x%llx\n"
1003 struct imxdma_engine *imxdma = imxdmac->imxdma;
1007 spin_lock_irqsave(&imxdma->lock, flags);
1014 dev_warn(imxdma->dev,
1022 spin_unlock_irqrestore(&imxdma->lock, flags);
1030 if (chan->device->dev != fdata->imxdma->dev)
1043 struct imxdma_engine *imxdma = ofdma->of_dma_data;
1045 .imxdma = imxdma,
1053 return dma_request_channel(imxdma->dma_device.cap_mask,
1059 struct imxdma_engine *imxdma;
1069 imxdma = devm_kzalloc(&pdev->dev, sizeof(*imxdma), GFP_KERNEL);
1070 if (!imxdma)
1073 imxdma->dev = &pdev->dev;
1074 imxdma->devtype = pdev->id_entry->driver_data;
1077 imxdma->base = devm_ioremap_resource(&pdev->dev, res);
1078 if (IS_ERR(imxdma->base))
1079 return PTR_ERR(imxdma->base);
1085 imxdma->dma_ipg = devm_clk_get(&pdev->dev, "ipg");
1086 if (IS_ERR(imxdma->dma_ipg))
1087 return PTR_ERR(imxdma->dma_ipg);
1089 imxdma->dma_ahb = devm_clk_get(&pdev->dev, "ahb");
1090 if (IS_ERR(imxdma->dma_ahb))
1091 return PTR_ERR(imxdma->dma_ahb);
1093 ret = clk_prepare_enable(imxdma->dma_ipg);
1096 ret = clk_prepare_enable(imxdma->dma_ahb);
1101 imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR);
1103 if (is_imx1_dma(imxdma)) {
1105 dma_irq_handler, 0, "DMA", imxdma);
1107 dev_warn(imxdma->dev, "Can't register IRQ for DMA\n");
1110 imxdma->irq = irq;
1119 imxdma_err_handler, 0, "DMA", imxdma);
1121 dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n");
1124 imxdma->irq_err = irq_err;
1128 imx_dmav1_writel(imxdma, DCR_DEN, DMA_DCR);
1131 imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DISR);
1134 imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR);
1136 INIT_LIST_HEAD(&imxdma->dma_device.channels);
1138 dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
1139 dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
1140 dma_cap_set(DMA_MEMCPY, imxdma->dma_device.cap_mask);
1141 dma_cap_set(DMA_INTERLEAVE, imxdma->dma_device.cap_mask);
1145 imxdma->slots_2d[i].count = 0;
1147 spin_lock_init(&imxdma->lock);
1151 struct imxdma_channel *imxdmac = &imxdma->channel[i];
1153 if (!is_imx1_dma(imxdma)) {
1155 dma_irq_handler, 0, "DMA", imxdma);
1157 dev_warn(imxdma->dev, "Can't register IRQ %d "
1167 imxdmac->imxdma = imxdma;
1174 imxdmac->chan.device = &imxdma->dma_device;
1180 &imxdma->dma_device.channels);
1183 imxdma->dma_device.dev = &pdev->dev;
1185 imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources;
1186 imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources;
1187 imxdma->dma_device.device_tx_status = imxdma_tx_status;
1188 imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg;
1189 imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic;
1190 imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy;
1191 imxdma->dma_device.device_prep_interleaved_dma = imxdma_prep_dma_interleaved;
1192 imxdma->dma_device.device_config = imxdma_config;
1193 imxdma->dma_device.device_terminate_all = imxdma_terminate_all;
1194 imxdma->dma_device.device_issue_pending = imxdma_issue_pending;
1196 platform_set_drvdata(pdev, imxdma);
1198 imxdma->dma_device.copy_align = DMAENGINE_ALIGN_4_BYTES;
1199 dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff);
1201 ret = dma_async_device_register(&imxdma->dma_device);
1209 imxdma_xlate, imxdma);
1219 dma_async_device_unregister(&imxdma->dma_device);
1221 clk_disable_unprepare(imxdma->dma_ahb);
1223 clk_disable_unprepare(imxdma->dma_ipg);
1227 static void imxdma_free_irq(struct platform_device *pdev, struct imxdma_engine *imxdma)
1231 if (is_imx1_dma(imxdma)) {
1232 disable_irq(imxdma->irq);
1233 disable_irq(imxdma->irq_err);
1237 struct imxdma_channel *imxdmac = &imxdma->channel[i];
1239 if (!is_imx1_dma(imxdma))
1248 struct imxdma_engine *imxdma = platform_get_drvdata(pdev);
1250 imxdma_free_irq(pdev, imxdma);
1252 dma_async_device_unregister(&imxdma->dma_device);
1257 clk_disable_unprepare(imxdma->dma_ipg);
1258 clk_disable_unprepare(imxdma->dma_ahb);