Lines Matching defs:mchan
129 void (*enable_chan)(struct mdc_chan *mchan);
130 void (*disable_chan)(struct mdc_chan *mchan);
159 static inline u32 mdc_chan_readl(struct mdc_chan *mchan, u32 reg)
161 return mdc_readl(mchan->mdma, mchan->chan_nr * 0x040 + reg);
164 static inline void mdc_chan_writel(struct mdc_chan *mchan, u32 val, u32 reg)
166 mdc_writel(mchan->mdma, val, mchan->chan_nr * 0x040 + reg);
205 static void mdc_list_desc_config(struct mdc_chan *mchan,
210 struct mdc_dma *mdma = mchan->mdma;
217 (mchan->thread << MDC_READ_PORT_CONFIG_STHREAD_SHIFT) |
218 (mchan->thread << MDC_READ_PORT_CONFIG_RTHREAD_SHIFT) |
219 (mchan->thread << MDC_READ_PORT_CONFIG_WTHREAD_SHIFT);
239 mdc_set_write_width(ldesc, mchan->config.dst_addr_width);
240 burst_size = min(max_burst, mchan->config.dst_maxburst *
241 mchan->config.dst_addr_width);
245 mdc_set_read_width(ldesc, mchan->config.src_addr_width);
247 burst_size = min(max_burst, mchan->config.src_maxburst *
248 mchan->config.src_addr_width);
289 struct mdc_chan *mchan = to_mdc_chan(chan);
290 struct mdc_dma *mdma = mchan->mdma;
301 mdesc->chan = mchan;
321 mdc_list_desc_config(mchan, curr, DMA_MEM_TO_MEM, src, dest,
332 return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags);
340 static int mdc_check_slave_width(struct mdc_chan *mchan,
346 width = mchan->config.dst_addr_width;
348 width = mchan->config.src_addr_width;
360 if (width > mchan->mdma->bus_width)
371 struct mdc_chan *mchan = to_mdc_chan(chan);
372 struct mdc_dma *mdma = mchan->mdma;
383 if (mdc_check_slave_width(mchan, dir) < 0)
389 mdesc->chan = mchan;
418 mdc_list_desc_config(mchan, curr, dir,
420 mchan->config.dst_addr,
423 mdc_list_desc_config(mchan, curr, dir,
424 mchan->config.src_addr,
439 return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags);
452 struct mdc_chan *mchan = to_mdc_chan(chan);
453 struct mdc_dma *mdma = mchan->mdma;
466 if (mdc_check_slave_width(mchan, dir) < 0)
472 mdesc->chan = mchan;
498 mdc_list_desc_config(mchan, curr, dir, buf,
499 mchan->config.dst_addr,
502 mdc_list_desc_config(mchan, curr, dir,
503 mchan->config.src_addr,
516 return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags);
524 static void mdc_issue_desc(struct mdc_chan *mchan)
526 struct mdc_dma *mdma = mchan->mdma;
531 vd = vchan_next_desc(&mchan->vc);
538 mchan->desc = mdesc;
541 mchan->chan_nr);
543 mdma->soc->enable_chan(mchan);
545 val = mdc_chan_readl(mchan, MDC_GENERAL_CONFIG);
549 mdc_chan_writel(mchan, val, MDC_GENERAL_CONFIG);
550 val = (mchan->thread << MDC_READ_PORT_CONFIG_STHREAD_SHIFT) |
551 (mchan->thread << MDC_READ_PORT_CONFIG_RTHREAD_SHIFT) |
552 (mchan->thread << MDC_READ_PORT_CONFIG_WTHREAD_SHIFT);
553 mdc_chan_writel(mchan, val, MDC_READ_PORT_CONFIG);
554 mdc_chan_writel(mchan, mdesc->list_phys, MDC_LIST_NODE_ADDRESS);
555 val = mdc_chan_readl(mchan, MDC_CONTROL_AND_STATUS);
557 mdc_chan_writel(mchan, val, MDC_CONTROL_AND_STATUS);
562 struct mdc_chan *mchan = to_mdc_chan(chan);
565 spin_lock_irqsave(&mchan->vc.lock, flags);
566 if (vchan_issue_pending(&mchan->vc) && !mchan->desc)
567 mdc_issue_desc(mchan);
568 spin_unlock_irqrestore(&mchan->vc.lock, flags);
574 struct mdc_chan *mchan = to_mdc_chan(chan);
588 spin_lock_irqsave(&mchan->vc.lock, flags);
589 vd = vchan_find_desc(&mchan->vc, cookie);
593 } else if (mchan->desc && mchan->desc->vd.tx.cookie == cookie) {
598 mdesc = mchan->desc;
605 val1 = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED) &
607 residue = mdc_chan_readl(mchan,
609 val2 = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED) &
642 spin_unlock_irqrestore(&mchan->vc.lock, flags);
649 static unsigned int mdc_get_new_events(struct mdc_chan *mchan)
654 val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
663 val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
674 mdc_chan_writel(mchan, val, MDC_CMDS_PROCESSED);
676 val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
693 struct mdc_chan *mchan = to_mdc_chan(chan);
697 spin_lock_irqsave(&mchan->vc.lock, flags);
699 mdc_chan_writel(mchan, MDC_CONTROL_AND_STATUS_CANCEL,
702 if (mchan->desc) {
703 vchan_terminate_vdesc(&mchan->desc->vd);
704 mchan->desc = NULL;
706 vchan_get_all_descriptors(&mchan->vc, &head);
708 mdc_get_new_events(mchan);
710 spin_unlock_irqrestore(&mchan->vc.lock, flags);
712 vchan_dma_desc_free_list(&mchan->vc, &head);
719 struct mdc_chan *mchan = to_mdc_chan(chan);
721 vchan_synchronize(&mchan->vc);
727 struct mdc_chan *mchan = to_mdc_chan(chan);
730 spin_lock_irqsave(&mchan->vc.lock, flags);
731 mchan->config = *config;
732 spin_unlock_irqrestore(&mchan->vc.lock, flags);
739 struct mdc_chan *mchan = to_mdc_chan(chan);
740 struct device *dev = mdma2dev(mchan->mdma);
747 struct mdc_chan *mchan = to_mdc_chan(chan);
748 struct mdc_dma *mdma = mchan->mdma;
752 mdma->soc->disable_chan(mchan);
758 struct mdc_chan *mchan = (struct mdc_chan *)dev_id;
762 spin_lock(&mchan->vc.lock);
764 dev_dbg(mdma2dev(mchan->mdma), "IRQ on channel %d\n", mchan->chan_nr);
766 new_events = mdc_get_new_events(mchan);
771 mdesc = mchan->desc;
773 dev_warn(mdma2dev(mchan->mdma),
775 mchan->chan_nr);
796 mchan->desc = NULL;
798 mdc_issue_desc(mchan);
803 spin_unlock(&mchan->vc.lock);
818 struct mdc_chan *mchan = to_mdc_chan(chan);
820 if (!(dma_spec->args[1] & BIT(mchan->chan_nr)))
823 mchan->periph = dma_spec->args[0];
824 mchan->thread = dma_spec->args[2];
836 static void pistachio_mdc_enable_chan(struct mdc_chan *mchan)
838 struct mdc_dma *mdma = mchan->mdma;
841 PISTACHIO_CR_PERIPH_DMA_ROUTE(mchan->chan_nr),
843 PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr),
844 mchan->periph <<
845 PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr));
848 static void pistachio_mdc_disable_chan(struct mdc_chan *mchan)
850 struct mdc_dma *mdma = mchan->mdma;
853 PISTACHIO_CR_PERIPH_DMA_ROUTE(mchan->chan_nr),
855 PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr),
970 struct mdc_chan *mchan = &mdma->channels[i];
972 mchan->mdma = mdma;
973 mchan->chan_nr = i;
974 mchan->irq = platform_get_irq(pdev, i);
975 if (mchan->irq < 0)
976 return mchan->irq;
978 ret = devm_request_irq(&pdev->dev, mchan->irq, mdc_chan_irq,
980 dev_name(&pdev->dev), mchan);
984 mchan->vc.desc_free = mdc_desc_free;
985 vchan_init(&mchan->vc, &mdma->dma_dev);
1026 struct mdc_chan *mchan, *next;
1031 list_for_each_entry_safe(mchan, next, &mdma->dma_dev.channels,
1033 list_del(&mchan->vc.chan.device_node);
1035 devm_free_irq(&pdev->dev, mchan->irq, mchan);
1037 tasklet_kill(&mchan->vc.task);
1055 struct mdc_chan *mchan = &mdma->channels[i];
1057 if (unlikely(mchan->desc))