Lines Matching defs:fsl_chan

45 void fsl_edma_tx_chan_handler(struct fsl_edma_chan *fsl_chan)
47 spin_lock(&fsl_chan->vchan.lock);
49 if (!fsl_chan->edesc) {
51 spin_unlock(&fsl_chan->vchan.lock);
55 if (!fsl_chan->edesc->iscyclic) {
56 list_del(&fsl_chan->edesc->vdesc.node);
57 vchan_cookie_complete(&fsl_chan->edesc->vdesc);
58 fsl_chan->edesc = NULL;
59 fsl_chan->status = DMA_COMPLETE;
60 fsl_chan->idle = true;
62 vchan_cyclic_callback(&fsl_chan->edesc->vdesc);
65 if (!fsl_chan->edesc)
66 fsl_edma_xfer_desc(fsl_chan);
68 spin_unlock(&fsl_chan->vchan.lock);
71 static void fsl_edma3_enable_request(struct fsl_edma_chan *fsl_chan)
75 flags = fsl_edma_drvflags(fsl_chan);
76 val = edma_readl_chreg(fsl_chan, ch_sbr);
79 if (!fsl_chan->is_rxchan)
84 if (fsl_chan->is_rxchan)
90 if (fsl_chan->is_remote)
93 edma_writel_chreg(fsl_chan, val, ch_sbr);
100 if (!edma_readl_chreg(fsl_chan, ch_mux))
101 edma_writel_chreg(fsl_chan, fsl_chan->srcid, ch_mux);
104 val = edma_readl_chreg(fsl_chan, ch_csr);
106 edma_writel_chreg(fsl_chan, val, ch_csr);
109 static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
111 struct edma_regs *regs = &fsl_chan->edma->regs;
112 u32 ch = fsl_chan->vchan.chan.chan_id;
114 if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_SPLIT_REG)
115 return fsl_edma3_enable_request(fsl_chan);
117 if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_WRAP_IO) {
118 edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), regs->seei);
119 edma_writeb(fsl_chan->edma, ch, regs->serq);
129 static void fsl_edma3_disable_request(struct fsl_edma_chan *fsl_chan)
131 u32 val = edma_readl_chreg(fsl_chan, ch_csr);
134 flags = fsl_edma_drvflags(fsl_chan);
137 edma_writel_chreg(fsl_chan, 0, ch_mux);
140 edma_writel_chreg(fsl_chan, val, ch_csr);
143 void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
145 struct edma_regs *regs = &fsl_chan->edma->regs;
146 u32 ch = fsl_chan->vchan.chan.chan_id;
148 if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_SPLIT_REG)
149 return fsl_edma3_disable_request(fsl_chan);
151 if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_WRAP_IO) {
152 edma_writeb(fsl_chan->edma, ch, regs->cerq);
153 edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), regs->ceei);
163 static void mux_configure8(struct fsl_edma_chan *fsl_chan, void __iomem *addr,
176 static void mux_configure32(struct fsl_edma_chan *fsl_chan, void __iomem *addr,
189 void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
192 u32 ch = fsl_chan->vchan.chan.chan_id;
196 u32 dmamux_nr = fsl_chan->edma->drvdata->dmamuxs;
201 chans_per_mux = fsl_chan->edma->n_chans / dmamux_nr;
202 ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
204 if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_MUX_SWAP)
207 muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
210 if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_CONFIG32)
211 mux_configure32(fsl_chan, muxaddr, ch_off, slot, enable);
213 mux_configure8(fsl_chan, muxaddr, ch_off, slot, enable);
241 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
245 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
246 fsl_edma_disable_request(fsl_chan);
247 fsl_chan->edesc = NULL;
248 fsl_chan->idle = true;
249 vchan_get_all_descriptors(&fsl_chan->vchan, &head);
250 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
251 vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
253 if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_PD)
254 pm_runtime_allow(fsl_chan->pd_dev);
261 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
264 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
265 if (fsl_chan->edesc) {
266 fsl_edma_disable_request(fsl_chan);
267 fsl_chan->status = DMA_PAUSED;
268 fsl_chan->idle = true;
270 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
276 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
279 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
280 if (fsl_chan->edesc) {
281 fsl_edma_enable_request(fsl_chan);
282 fsl_chan->status = DMA_IN_PROGRESS;
283 fsl_chan->idle = false;
285 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
289 static void fsl_edma_unprep_slave_dma(struct fsl_edma_chan *fsl_chan)
291 if (fsl_chan->dma_dir != DMA_NONE)
292 dma_unmap_resource(fsl_chan->vchan.chan.device->dev,
293 fsl_chan->dma_dev_addr,
294 fsl_chan->dma_dev_size,
295 fsl_chan->dma_dir, 0);
296 fsl_chan->dma_dir = DMA_NONE;
299 static bool fsl_edma_prep_slave_dma(struct fsl_edma_chan *fsl_chan,
302 struct device *dev = fsl_chan->vchan.chan.device->dev;
310 addr = fsl_chan->cfg.dst_addr;
311 size = fsl_chan->cfg.dst_maxburst;
315 addr = fsl_chan->cfg.src_addr;
316 size = fsl_chan->cfg.src_maxburst;
324 if (fsl_chan->dma_dir == dma_dir)
327 fsl_edma_unprep_slave_dma(fsl_chan);
329 fsl_chan->dma_dev_addr = dma_map_resource(dev, addr, size, dma_dir, 0);
330 if (dma_mapping_error(dev, fsl_chan->dma_dev_addr))
332 fsl_chan->dma_dev_size = size;
333 fsl_chan->dma_dir = dma_dir;
341 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
343 memcpy(&fsl_chan->cfg, cfg, sizeof(*cfg));
344 fsl_edma_unprep_slave_dma(fsl_chan);
349 static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
352 struct fsl_edma_desc *edesc = fsl_chan->edesc;
360 for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++) {
371 cur_addr = edma_read_tcdreg(fsl_chan, saddr);
373 cur_addr = edma_read_tcdreg(fsl_chan, daddr);
376 for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
401 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
411 return fsl_chan->status;
413 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
414 vdesc = vchan_find_desc(&fsl_chan->vchan, cookie);
415 if (fsl_chan->edesc && cookie == fsl_chan->edesc->vdesc.tx.cookie)
417 fsl_edma_desc_residue(fsl_chan, vdesc, true);
420 fsl_edma_desc_residue(fsl_chan, vdesc, false);
424 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
426 return fsl_chan->status;
429 static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
440 edma_write_tcdreg(fsl_chan, 0, csr);
442 edma_write_tcdreg(fsl_chan, tcd->saddr, saddr);
443 edma_write_tcdreg(fsl_chan, tcd->daddr, daddr);
445 edma_write_tcdreg(fsl_chan, tcd->attr, attr);
446 edma_write_tcdreg(fsl_chan, tcd->soff, soff);
448 edma_write_tcdreg(fsl_chan, tcd->nbytes, nbytes);
449 edma_write_tcdreg(fsl_chan, tcd->slast, slast);
451 edma_write_tcdreg(fsl_chan, tcd->citer, citer);
452 edma_write_tcdreg(fsl_chan, tcd->biter, biter);
453 edma_write_tcdreg(fsl_chan, tcd->doff, doff);
455 edma_write_tcdreg(fsl_chan, tcd->dlast_sga, dlast_sga);
459 if (fsl_chan->is_sw) {
469 if (((fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_CLEAR_DONE_E_SG) &&
471 ((fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_CLEAR_DONE_E_LINK) &&
473 edma_writel_chreg(fsl_chan, edma_readl_chreg(fsl_chan, ch_csr), ch_csr);
476 edma_write_tcdreg(fsl_chan, tcd->csr, csr);
480 void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan,
486 struct dma_slave_config *cfg = &fsl_chan->cfg;
503 if (fsl_chan->is_multi_fifo) {
536 if (fsl_chan->is_rxchan)
539 if (fsl_chan->is_sw)
545 static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan,
555 fsl_desc->echan = fsl_chan;
558 fsl_desc->tcd[i].vtcd = dma_pool_alloc(fsl_chan->tcd_pool,
567 dma_pool_free(fsl_chan->tcd_pool, fsl_desc->tcd[i].vtcd,
578 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
589 if (!fsl_edma_prep_slave_dma(fsl_chan, direction))
593 fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
601 fsl_chan->attr =
602 fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width);
603 nbytes = fsl_chan->cfg.dst_addr_width *
604 fsl_chan->cfg.dst_maxburst;
606 fsl_chan->attr =
607 fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width);
608 nbytes = fsl_chan->cfg.src_addr_width *
609 fsl_chan->cfg.src_maxburst;
623 dst_addr = fsl_chan->dma_dev_addr;
624 soff = fsl_chan->cfg.dst_addr_width;
625 doff = fsl_chan->is_multi_fifo ? 4 : 0;
627 src_addr = fsl_chan->dma_dev_addr;
629 soff = fsl_chan->is_multi_fifo ? 4 : 0;
630 doff = fsl_chan->cfg.src_addr_width;
633 src_addr = fsl_chan->cfg.src_addr;
634 dst_addr = fsl_chan->cfg.dst_addr;
639 fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd, src_addr, dst_addr,
640 fsl_chan->attr, soff, nbytes, 0, iter,
645 return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
653 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
663 if (!fsl_edma_prep_slave_dma(fsl_chan, direction))
666 fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
673 fsl_chan->attr =
674 fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width);
675 nbytes = fsl_chan->cfg.dst_addr_width *
676 fsl_chan->cfg.dst_maxburst;
678 fsl_chan->attr =
679 fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width);
680 nbytes = fsl_chan->cfg.src_addr_width *
681 fsl_chan->cfg.src_maxburst;
687 dst_addr = fsl_chan->dma_dev_addr;
688 soff = fsl_chan->cfg.dst_addr_width;
691 src_addr = fsl_chan->dma_dev_addr;
694 doff = fsl_chan->cfg.src_addr_width;
697 src_addr = fsl_chan->cfg.src_addr;
698 dst_addr = fsl_chan->cfg.dst_addr;
711 fsl_chan->cfg.src_maxburst :
712 fsl_chan->cfg.dst_maxburst;
728 fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd, src_addr,
729 dst_addr, fsl_chan->attr, soff,
734 fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd, src_addr,
735 dst_addr, fsl_chan->attr, soff,
741 return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
748 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
751 fsl_desc = fsl_edma_alloc_desc(fsl_chan, 1);
756 fsl_chan->is_sw = true;
759 fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[0].vtcd, dma_src, dma_dst,
763 return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
766 void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
770 lockdep_assert_held(&fsl_chan->vchan.lock);
772 vdesc = vchan_next_desc(&fsl_chan->vchan);
775 fsl_chan->edesc = to_fsl_edma_desc(vdesc);
776 fsl_edma_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd);
777 fsl_edma_enable_request(fsl_chan);
778 fsl_chan->status = DMA_IN_PROGRESS;
779 fsl_chan->idle = false;
784 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
787 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
789 if (unlikely(fsl_chan->pm_state != RUNNING)) {
790 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
795 if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc)
796 fsl_edma_xfer_desc(fsl_chan);
798 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
803 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
805 fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev,
813 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
814 struct fsl_edma_engine *edma = fsl_chan->edma;
818 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
819 fsl_edma_disable_request(fsl_chan);
821 fsl_edma_chan_mux(fsl_chan, 0, false);
822 fsl_chan->edesc = NULL;
823 vchan_get_all_descriptors(&fsl_chan->vchan, &head);
824 fsl_edma_unprep_slave_dma(fsl_chan);
825 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
827 vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
828 dma_pool_destroy(fsl_chan->tcd_pool);
829 fsl_chan->tcd_pool = NULL;
830 fsl_chan->is_sw = false;
831 fsl_chan->srcid = 0;