Lines Matching refs:fsl_edma

37 	struct fsl_edma_engine *fsl_edma = dev_id;
39 struct edma_regs *regs = &fsl_edma->regs;
41 intr = edma_readl(fsl_edma, regs->intl);
45 for (ch = 0; ch < fsl_edma->n_chans; ch++) {
47 edma_writeb(fsl_edma, EDMA_CINT_CINT(ch), regs->cint);
48 fsl_edma_tx_chan_handler(&fsl_edma->chans[ch]);
72 struct fsl_edma_engine *fsl_edma = dev_id;
74 struct edma_regs *regs = &fsl_edma->regs;
76 err = edma_readl(fsl_edma, regs->errl);
80 for (ch = 0; ch < fsl_edma->n_chans; ch++) {
82 fsl_edma_disable_request(&fsl_edma->chans[ch]);
83 edma_writeb(fsl_edma, EDMA_CERR_CERR(ch), regs->cerr);
84 fsl_edma_err_chan_handler(&fsl_edma->chans[ch]);
101 struct fsl_edma_engine *fsl_edma = ofdma->of_dma_data;
104 u32 dmamux_nr = fsl_edma->drvdata->dmamuxs;
105 unsigned long chans_per_mux = fsl_edma->n_chans / dmamux_nr;
110 mutex_lock(&fsl_edma->fsl_edma_mutex);
111 list_for_each_entry_safe(chan, _chan, &fsl_edma->dma_dev.channels, device_node) {
122 mutex_unlock(&fsl_edma->fsl_edma_mutex);
127 mutex_unlock(&fsl_edma->fsl_edma_mutex);
134 struct fsl_edma_engine *fsl_edma = ofdma->of_dma_data;
143 b_chmux = !!(fsl_edma->drvdata->flags & FSL_EDMA_DRV_HAS_CHMUX);
145 mutex_lock(&fsl_edma->fsl_edma_mutex);
146 list_for_each_entry_safe(chan, _chan, &fsl_edma->dma_dev.channels,
153 i = fsl_chan - fsl_edma->chans;
169 mutex_unlock(&fsl_edma->fsl_edma_mutex);
176 mutex_unlock(&fsl_edma->fsl_edma_mutex);
180 mutex_unlock(&fsl_edma->fsl_edma_mutex);
185 fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
189 edma_writel(fsl_edma, ~0, fsl_edma->regs.intl);
191 fsl_edma->txirq = platform_get_irq_byname(pdev, "edma-tx");
192 if (fsl_edma->txirq < 0)
193 return fsl_edma->txirq;
195 fsl_edma->errirq = platform_get_irq_byname(pdev, "edma-err");
196 if (fsl_edma->errirq < 0)
197 return fsl_edma->errirq;
199 if (fsl_edma->txirq == fsl_edma->errirq) {
200 ret = devm_request_irq(&pdev->dev, fsl_edma->txirq,
201 fsl_edma_irq_handler, 0, "eDMA", fsl_edma);
207 ret = devm_request_irq(&pdev->dev, fsl_edma->txirq,
208 fsl_edma_tx_handler, 0, "eDMA tx", fsl_edma);
214 ret = devm_request_irq(&pdev->dev, fsl_edma->errirq,
215 fsl_edma_err_handler, 0, "eDMA err", fsl_edma);
225 static int fsl_edma3_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
230 for (i = 0; i < fsl_edma->n_chans; i++) {
232 struct fsl_edma_chan *fsl_chan = &fsl_edma->chans[i];
234 if (fsl_edma->chan_masked & BIT(i))
258 struct fsl_edma_engine *fsl_edma)
263 edma_writel(fsl_edma, ~0, fsl_edma->regs.intl);
286 0, "eDMA2-ERR", fsl_edma);
290 fsl_edma->chans[i].chan_name,
291 fsl_edma);
300 struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
302 if (fsl_edma->txirq == fsl_edma->errirq) {
303 devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma);
305 devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma);
306 devm_free_irq(&pdev->dev, fsl_edma->errirq, fsl_edma);
310 static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma, int nr_clocks)
315 clk_disable_unprepare(fsl_edma->muxclk[i]);
382 static int fsl_edma3_attach_pd(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
392 for (i = 0; i < fsl_edma->n_chans; i++) {
393 if (fsl_edma->chan_masked & BIT(i))
396 fsl_chan = &fsl_edma->chans[i];
427 struct fsl_edma_engine *fsl_edma;
447 fsl_edma = devm_kzalloc(&pdev->dev, struct_size(fsl_edma, chans, chans),
449 if (!fsl_edma)
452 fsl_edma->drvdata = drvdata;
453 fsl_edma->n_chans = chans;
454 mutex_init(&fsl_edma->fsl_edma_mutex);
456 fsl_edma->membase = devm_platform_ioremap_resource(pdev, 0);
457 if (IS_ERR(fsl_edma->membase))
458 return PTR_ERR(fsl_edma->membase);
461 fsl_edma_setup_regs(fsl_edma);
462 regs = &fsl_edma->regs;
466 fsl_edma->dmaclk = devm_clk_get_enabled(&pdev->dev, "dma");
467 if (IS_ERR(fsl_edma->dmaclk)) {
469 return PTR_ERR(fsl_edma->dmaclk);
474 fsl_edma->chclk = devm_clk_get_enabled(&pdev->dev, "mp");
475 if (IS_ERR(fsl_edma->chclk)) {
477 return PTR_ERR(fsl_edma->chclk);
484 fsl_edma->chan_masked = chan_mask[1];
485 fsl_edma->chan_masked <<= 32;
486 fsl_edma->chan_masked |= chan_mask[0];
489 for (i = 0; i < fsl_edma->drvdata->dmamuxs; i++) {
496 fsl_edma->muxbase[i] = devm_platform_ioremap_resource(pdev,
498 if (IS_ERR(fsl_edma->muxbase[i])) {
500 fsl_disable_clocks(fsl_edma, i);
501 return PTR_ERR(fsl_edma->muxbase[i]);
505 fsl_edma->muxclk[i] = devm_clk_get_enabled(&pdev->dev, clkname);
506 if (IS_ERR(fsl_edma->muxclk[i])) {
509 return PTR_ERR(fsl_edma->muxclk[i]);
513 fsl_edma->big_endian = of_property_read_bool(np, "big-endian");
516 ret = fsl_edma3_attach_pd(pdev, fsl_edma);
521 INIT_LIST_HEAD(&fsl_edma->dma_dev.channels);
522 for (i = 0; i < fsl_edma->n_chans; i++) {
523 struct fsl_edma_chan *fsl_chan = &fsl_edma->chans[i];
526 if (fsl_edma->chan_masked & BIT(i))
532 fsl_chan->edma = fsl_edma;
541 fsl_chan->tcd = fsl_edma->membase
545 vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev);
551 ret = fsl_edma->drvdata->setup_irq(pdev, fsl_edma);
555 dma_cap_set(DMA_PRIVATE, fsl_edma->dma_dev.cap_mask);
556 dma_cap_set(DMA_SLAVE, fsl_edma->dma_dev.cap_mask);
557 dma_cap_set(DMA_CYCLIC, fsl_edma->dma_dev.cap_mask);
558 dma_cap_set(DMA_MEMCPY, fsl_edma->dma_dev.cap_mask);
560 fsl_edma->dma_dev.dev = &pdev->dev;
561 fsl_edma->dma_dev.device_alloc_chan_resources
563 fsl_edma->dma_dev.device_free_chan_resources
565 fsl_edma->dma_dev.device_tx_status = fsl_edma_tx_status;
566 fsl_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg;
567 fsl_edma->dma_dev.device_prep_dma_cyclic = fsl_edma_prep_dma_cyclic;
568 fsl_edma->dma_dev.device_prep_dma_memcpy = fsl_edma_prep_memcpy;
569 fsl_edma->dma_dev.device_config = fsl_edma_slave_config;
570 fsl_edma->dma_dev.device_pause = fsl_edma_pause;
571 fsl_edma->dma_dev.device_resume = fsl_edma_resume;
572 fsl_edma->dma_dev.device_terminate_all = fsl_edma_terminate_all;
573 fsl_edma->dma_dev.device_synchronize = fsl_edma_synchronize;
574 fsl_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending;
576 fsl_edma->dma_dev.src_addr_widths = FSL_EDMA_BUSWIDTHS;
577 fsl_edma->dma_dev.dst_addr_widths = FSL_EDMA_BUSWIDTHS;
580 fsl_edma->dma_dev.src_addr_widths |= BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
581 fsl_edma->dma_dev.dst_addr_widths |= BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
584 fsl_edma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
586 fsl_edma->dma_dev.directions |= BIT(DMA_DEV_TO_DEV);
588 fsl_edma->dma_dev.copy_align = drvdata->flags & FSL_EDMA_DRV_ALIGN_64BYTE ?
593 dma_set_max_seg_size(fsl_edma->dma_dev.dev,
596 fsl_edma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
598 platform_set_drvdata(pdev, fsl_edma);
600 ret = dma_async_device_register(&fsl_edma->dma_dev);
609 fsl_edma);
613 dma_async_device_unregister(&fsl_edma->dma_dev);
619 edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
627 struct fsl_edma_engine *fsl_edma = platform_get_drvdata(pdev);
629 fsl_edma_irq_exit(pdev, fsl_edma);
630 fsl_edma_cleanup_vchan(&fsl_edma->dma_dev);
632 dma_async_device_unregister(&fsl_edma->dma_dev);
633 fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs);
640 struct fsl_edma_engine *fsl_edma = dev_get_drvdata(dev);
645 for (i = 0; i < fsl_edma->n_chans; i++) {
646 fsl_chan = &fsl_edma->chans[i];
647 if (fsl_edma->chan_masked & BIT(i))
666 struct fsl_edma_engine *fsl_edma = dev_get_drvdata(dev);
668 struct edma_regs *regs = &fsl_edma->regs;
671 for (i = 0; i < fsl_edma->n_chans; i++) {
672 fsl_chan = &fsl_edma->chans[i];
673 if (fsl_edma->chan_masked & BIT(i))
681 if (!(fsl_edma->drvdata->flags & FSL_EDMA_DRV_SPLIT_REG))
682 edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);