Lines Matching defs:fsl_chan

45 static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
47 struct edma_regs *regs = &fsl_chan->edma->regs;
48 u32 ch = fsl_chan->vchan.chan.chan_id;
50 if (fsl_chan->edma->drvdata->version == v1) {
51 edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), regs->seei);
52 edma_writeb(fsl_chan->edma, ch, regs->serq);
62 void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
64 struct edma_regs *regs = &fsl_chan->edma->regs;
65 u32 ch = fsl_chan->vchan.chan.chan_id;
67 if (fsl_chan->edma->drvdata->version == v1) {
68 edma_writeb(fsl_chan->edma, ch, regs->cerq);
69 edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), regs->ceei);
80 static void mux_configure8(struct fsl_edma_chan *fsl_chan, void __iomem *addr,
93 static void mux_configure32(struct fsl_edma_chan *fsl_chan, void __iomem *addr,
106 void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
109 u32 ch = fsl_chan->vchan.chan.chan_id;
113 u32 dmamux_nr = fsl_chan->edma->drvdata->dmamuxs;
115 chans_per_mux = fsl_chan->edma->n_chans / dmamux_nr;
116 ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
118 if (fsl_chan->edma->drvdata->mux_swap)
121 muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
124 if (fsl_chan->edma->drvdata->version == v3)
125 mux_configure32(fsl_chan, muxaddr, ch_off, slot, enable);
127 mux_configure8(fsl_chan, muxaddr, ch_off, slot, enable);
162 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
166 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
167 fsl_edma_disable_request(fsl_chan);
168 fsl_chan->edesc = NULL;
169 fsl_chan->idle = true;
170 vchan_get_all_descriptors(&fsl_chan->vchan, &head);
171 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
172 vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
179 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
182 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
183 if (fsl_chan->edesc) {
184 fsl_edma_disable_request(fsl_chan);
185 fsl_chan->status = DMA_PAUSED;
186 fsl_chan->idle = true;
188 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
195 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
198 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
199 if (fsl_chan->edesc) {
200 fsl_edma_enable_request(fsl_chan);
201 fsl_chan->status = DMA_IN_PROGRESS;
202 fsl_chan->idle = false;
204 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
209 static void fsl_edma_unprep_slave_dma(struct fsl_edma_chan *fsl_chan)
211 if (fsl_chan->dma_dir != DMA_NONE)
212 dma_unmap_resource(fsl_chan->vchan.chan.device->dev,
213 fsl_chan->dma_dev_addr,
214 fsl_chan->dma_dev_size,
215 fsl_chan->dma_dir, 0);
216 fsl_chan->dma_dir = DMA_NONE;
219 static bool fsl_edma_prep_slave_dma(struct fsl_edma_chan *fsl_chan,
222 struct device *dev = fsl_chan->vchan.chan.device->dev;
230 addr = fsl_chan->cfg.dst_addr;
231 size = fsl_chan->cfg.dst_maxburst;
235 addr = fsl_chan->cfg.src_addr;
236 size = fsl_chan->cfg.src_maxburst;
244 if (fsl_chan->dma_dir == dma_dir)
247 fsl_edma_unprep_slave_dma(fsl_chan);
249 fsl_chan->dma_dev_addr = dma_map_resource(dev, addr, size, dma_dir, 0);
250 if (dma_mapping_error(dev, fsl_chan->dma_dev_addr))
252 fsl_chan->dma_dev_size = size;
253 fsl_chan->dma_dir = dma_dir;
261 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
263 memcpy(&fsl_chan->cfg, cfg, sizeof(*cfg));
264 fsl_edma_unprep_slave_dma(fsl_chan);
270 static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
273 struct fsl_edma_desc *edesc = fsl_chan->edesc;
274 struct edma_regs *regs = &fsl_chan->edma->regs;
275 u32 ch = fsl_chan->vchan.chan.chan_id;
282 for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++)
290 cur_addr = edma_readl(fsl_chan->edma, &regs->tcd[ch].saddr);
292 cur_addr = edma_readl(fsl_chan->edma, &regs->tcd[ch].daddr);
295 for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
316 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
326 return fsl_chan->status;
328 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
329 vdesc = vchan_find_desc(&fsl_chan->vchan, cookie);
330 if (fsl_chan->edesc && cookie == fsl_chan->edesc->vdesc.tx.cookie)
332 fsl_edma_desc_residue(fsl_chan, vdesc, true);
335 fsl_edma_desc_residue(fsl_chan, vdesc, false);
339 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
341 return fsl_chan->status;
345 static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
348 struct fsl_edma_engine *edma = fsl_chan->edma;
349 struct edma_regs *regs = &fsl_chan->edma->regs;
350 u32 ch = fsl_chan->vchan.chan.chan_id;
421 static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan,
431 fsl_desc->echan = fsl_chan;
434 fsl_desc->tcd[i].vtcd = dma_pool_alloc(fsl_chan->tcd_pool,
443 dma_pool_free(fsl_chan->tcd_pool, fsl_desc->tcd[i].vtcd,
454 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
464 if (!fsl_edma_prep_slave_dma(fsl_chan, direction))
468 fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
476 fsl_chan->attr =
477 fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width);
478 nbytes = fsl_chan->cfg.dst_addr_width *
479 fsl_chan->cfg.dst_maxburst;
481 fsl_chan->attr =
482 fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width);
483 nbytes = fsl_chan->cfg.src_addr_width *
484 fsl_chan->cfg.src_maxburst;
498 dst_addr = fsl_chan->dma_dev_addr;
499 soff = fsl_chan->cfg.dst_addr_width;
502 src_addr = fsl_chan->dma_dev_addr;
505 doff = fsl_chan->cfg.src_addr_width;
509 fsl_chan->attr, soff, nbytes, 0, iter,
514 return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
523 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
533 if (!fsl_edma_prep_slave_dma(fsl_chan, direction))
536 fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
543 fsl_chan->attr =
544 fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width);
545 nbytes = fsl_chan->cfg.dst_addr_width *
546 fsl_chan->cfg.dst_maxburst;
548 fsl_chan->attr =
549 fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width);
550 nbytes = fsl_chan->cfg.src_addr_width *
551 fsl_chan->cfg.src_maxburst;
560 dst_addr = fsl_chan->dma_dev_addr;
561 soff = fsl_chan->cfg.dst_addr_width;
564 src_addr = fsl_chan->dma_dev_addr;
567 doff = fsl_chan->cfg.src_addr_width;
574 dst_addr, fsl_chan->attr, soff,
580 dst_addr, fsl_chan->attr, soff,
586 return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
590 void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
594 lockdep_assert_held(&fsl_chan->vchan.lock);
596 vdesc = vchan_next_desc(&fsl_chan->vchan);
599 fsl_chan->edesc = to_fsl_edma_desc(vdesc);
600 fsl_edma_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd);
601 fsl_edma_enable_request(fsl_chan);
602 fsl_chan->status = DMA_IN_PROGRESS;
603 fsl_chan->idle = false;
609 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
612 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
614 if (unlikely(fsl_chan->pm_state != RUNNING)) {
615 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
620 if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc)
621 fsl_edma_xfer_desc(fsl_chan);
623 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
629 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
631 fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev,
640 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
644 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
645 fsl_edma_disable_request(fsl_chan);
646 fsl_edma_chan_mux(fsl_chan, 0, false);
647 fsl_chan->edesc = NULL;
648 vchan_get_all_descriptors(&fsl_chan->vchan, &head);
649 fsl_edma_unprep_slave_dma(fsl_chan);
650 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
652 vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
653 dma_pool_destroy(fsl_chan->tcd_pool);
654 fsl_chan->tcd_pool = NULL;