Lines Matching refs:mxs_dma

39 #define dma_is_apbh(mxs_dma)	((mxs_dma)->type == MXS_DMA_APBH)
40 #define apbh_is_old(mxs_dma) ((mxs_dma)->dev_id == IMX23_DMA)
110 struct mxs_dma_engine *mxs_dma;
187 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
200 } else if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) {
202 mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
206 void __iomem *reg_dbg1 = mxs_dma->base +
207 HW_APBX_CHn_DEBUG1(mxs_dma, chan_id);
222 dev_err(&mxs_chan->mxs_dma->pdev->dev,
227 mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET);
236 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
241 mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(mxs_dma, chan_id));
249 writel(2, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id));
251 writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id));
266 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
270 if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma))
272 mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
275 mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET);
284 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
288 if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma))
290 mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_CLR);
293 mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_CLR);
311 static int mxs_dma_irq_to_chan(struct mxs_dma_engine *mxs_dma, int irq)
315 for (i = 0; i != mxs_dma->nr_channels; ++i)
316 if (mxs_dma->mxs_chans[i].chan_irq == irq)
324 struct mxs_dma_engine *mxs_dma = dev_id;
328 int chan = mxs_dma_irq_to_chan(mxs_dma, irq);
334 completed = readl(mxs_dma->base + HW_APBHX_CTRL1);
339 mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_CLR);
342 err = readl(mxs_dma->base + HW_APBHX_CTRL2);
354 mxs_dma->base + HW_APBHX_CTRL2 + STMP_OFFSET_REG_CLR);
365 mxs_chan = &mxs_dma->mxs_chans[chan];
368 dev_dbg(mxs_dma->dma_device.dev,
377 writel(1, mxs_dma->base +
378 HW_APBHX_CHn_SEMA(mxs_dma, chan));
399 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
402 mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev,
411 0, "mxs-dma", mxs_dma);
415 ret = clk_prepare_enable(mxs_dma->clk);
430 free_irq(mxs_chan->chan_irq, mxs_dma);
432 dma_free_coherent(mxs_dma->dma_device.dev, CCW_BLOCK_SIZE,
441 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
445 free_irq(mxs_chan->chan_irq, mxs_dma);
447 dma_free_coherent(mxs_dma->dma_device.dev, CCW_BLOCK_SIZE,
450 clk_disable_unprepare(mxs_dma->clk);
481 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
492 dev_err(mxs_dma->dma_device.dev,
537 dev_err(mxs_dma->dma_device.dev, "maximum bytes for sg entry exceeded: %d > %d\n",
580 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
592 dev_err(mxs_dma->dma_device.dev,
599 dev_err(mxs_dma->dma_device.dev,
651 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
662 bar = readl(mxs_dma->base +
663 HW_APBHX_CHn_BAR(mxs_dma, chan->chan_id));
673 static int mxs_dma_init(struct mxs_dma_engine *mxs_dma)
677 ret = clk_prepare_enable(mxs_dma->clk);
681 ret = stmp_reset_block(mxs_dma->base);
686 if (dma_is_apbh(mxs_dma)) {
688 mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
690 mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
695 mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_SET);
698 clk_disable_unprepare(mxs_dma->clk);
710 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
716 chan_irq = platform_get_irq(mxs_dma->pdev, param->chan_id);
728 struct mxs_dma_engine *mxs_dma = ofdma->of_dma_data;
729 dma_cap_mask_t mask = mxs_dma->dma_device.cap_mask;
737 if (param.chan_id >= mxs_dma->nr_channels)
748 struct mxs_dma_engine *mxs_dma;
752 mxs_dma = devm_kzalloc(&pdev->dev, sizeof(*mxs_dma), GFP_KERNEL);
753 if (!mxs_dma)
756 ret = of_property_read_u32(np, "dma-channels", &mxs_dma->nr_channels);
763 mxs_dma->type = dma_type->type;
764 mxs_dma->dev_id = dma_type->id;
767 mxs_dma->base = devm_ioremap_resource(&pdev->dev, iores);
768 if (IS_ERR(mxs_dma->base))
769 return PTR_ERR(mxs_dma->base);
771 mxs_dma->clk = devm_clk_get(&pdev->dev, NULL);
772 if (IS_ERR(mxs_dma->clk))
773 return PTR_ERR(mxs_dma->clk);
775 dma_cap_set(DMA_SLAVE, mxs_dma->dma_device.cap_mask);
776 dma_cap_set(DMA_CYCLIC, mxs_dma->dma_device.cap_mask);
778 INIT_LIST_HEAD(&mxs_dma->dma_device.channels);
782 struct mxs_dma_chan *mxs_chan = &mxs_dma->mxs_chans[i];
784 mxs_chan->mxs_dma = mxs_dma;
785 mxs_chan->chan.device = &mxs_dma->dma_device;
793 &mxs_dma->dma_device.channels);
796 ret = mxs_dma_init(mxs_dma);
800 mxs_dma->pdev = pdev;
801 mxs_dma->dma_device.dev = &pdev->dev;
803 /* mxs_dma gets 65535 bytes maximum sg size */
804 dma_set_max_seg_size(mxs_dma->dma_device.dev, MAX_XFER_BYTES);
806 mxs_dma->dma_device.device_alloc_chan_resources = mxs_dma_alloc_chan_resources;
807 mxs_dma->dma_device.device_free_chan_resources = mxs_dma_free_chan_resources;
808 mxs_dma->dma_device.device_tx_status = mxs_dma_tx_status;
809 mxs_dma->dma_device.device_prep_slave_sg = mxs_dma_prep_slave_sg;
810 mxs_dma->dma_device.device_prep_dma_cyclic = mxs_dma_prep_dma_cyclic;
811 mxs_dma->dma_device.device_pause = mxs_dma_pause_chan;
812 mxs_dma->dma_device.device_resume = mxs_dma_resume_chan;
813 mxs_dma->dma_device.device_terminate_all = mxs_dma_terminate_all;
814 mxs_dma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
815 mxs_dma->dma_device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
816 mxs_dma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
817 mxs_dma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
818 mxs_dma->dma_device.device_issue_pending = mxs_dma_enable_chan;
820 ret = dmaenginem_async_device_register(&mxs_dma->dma_device);
822 dev_err(mxs_dma->dma_device.dev, "unable to register\n");
826 ret = of_dma_controller_register(np, mxs_dma_xlate, mxs_dma);
828 dev_err(mxs_dma->dma_device.dev,
832 dev_info(mxs_dma->dma_device.dev, "initialized\n");