Lines Matching refs:bchan
344 #define IS_BUSY(chan) (CIRC_SPACE(bchan->tail, bchan->head,\
460 * @bchan: bam channel
464 static void bam_reset_channel(struct bam_chan *bchan)
466 struct bam_device *bdev = bchan->bdev;
468 lockdep_assert_held(&bchan->vc.lock);
471 writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_RST));
472 writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_RST));
478 bchan->initialized = 0;
483 * @bchan: bam channel
488 static void bam_chan_init_hw(struct bam_chan *bchan,
491 struct bam_device *bdev = bchan->bdev;
495 bam_reset_channel(bchan);
501 writel_relaxed(ALIGN(bchan->fifo_phys, sizeof(struct bam_desc_hw)),
502 bam_addr(bdev, bchan->id, BAM_P_DESC_FIFO_ADDR));
504 bam_addr(bdev, bchan->id, BAM_P_FIFO_SIZES));
508 bam_addr(bdev, bchan->id, BAM_P_IRQ_EN));
512 val |= BIT(bchan->id);
523 writel_relaxed(val, bam_addr(bdev, bchan->id, BAM_P_CTRL));
525 bchan->initialized = 1;
528 bchan->head = 0;
529 bchan->tail = 0;
540 struct bam_chan *bchan = to_bam_chan(chan);
541 struct bam_device *bdev = bchan->bdev;
543 if (bchan->fifo_virt)
547 bchan->fifo_virt = dma_alloc_wc(bdev->dev, BAM_DESC_FIFO_SIZE,
548 &bchan->fifo_phys, GFP_KERNEL);
550 if (!bchan->fifo_virt) {
570 struct bam_chan *bchan = to_bam_chan(chan);
571 struct bam_device *bdev = bchan->bdev;
582 if (!list_empty(&bchan->desc_list)) {
583 dev_err(bchan->bdev->dev, "Cannot free busy channel\n");
587 spin_lock_irqsave(&bchan->vc.lock, flags);
588 bam_reset_channel(bchan);
589 spin_unlock_irqrestore(&bchan->vc.lock, flags);
591 dma_free_wc(bdev->dev, BAM_DESC_FIFO_SIZE, bchan->fifo_virt,
592 bchan->fifo_phys);
593 bchan->fifo_virt = NULL;
597 val &= ~BIT(bchan->id);
601 writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_IRQ_EN));
626 struct bam_chan *bchan = to_bam_chan(chan);
629 spin_lock_irqsave(&bchan->vc.lock, flag);
630 memcpy(&bchan->slave, cfg, sizeof(*cfg));
631 bchan->reconfigure = 1;
632 spin_unlock_irqrestore(&bchan->vc.lock, flag);
652 struct bam_chan *bchan = to_bam_chan(chan);
653 struct bam_device *bdev = bchan->bdev;
714 return vchan_tx_prep(&bchan->vc, &async_desc->vd, flags);
727 struct bam_chan *bchan = to_bam_chan(chan);
733 spin_lock_irqsave(&bchan->vc.lock, flag);
747 if (!list_empty(&bchan->desc_list)) {
748 async_desc = list_first_entry(&bchan->desc_list,
750 bam_chan_init_hw(bchan, async_desc->dir);
754 &bchan->desc_list, desc_node) {
755 list_add(&async_desc->vd.node, &bchan->vc.desc_issued);
759 vchan_get_all_descriptors(&bchan->vc, &head);
760 spin_unlock_irqrestore(&bchan->vc.lock, flag);
762 vchan_dma_desc_free_list(&bchan->vc, &head);
774 struct bam_chan *bchan = to_bam_chan(chan);
775 struct bam_device *bdev = bchan->bdev;
783 spin_lock_irqsave(&bchan->vc.lock, flag);
784 writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT));
785 bchan->paused = 1;
786 spin_unlock_irqrestore(&bchan->vc.lock, flag);
800 struct bam_chan *bchan = to_bam_chan(chan);
801 struct bam_device *bdev = bchan->bdev;
809 spin_lock_irqsave(&bchan->vc.lock, flag);
810 writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT));
811 bchan->paused = 0;
812 spin_unlock_irqrestore(&bchan->vc.lock, flag);
839 struct bam_chan *bchan = &bdev->channels[i];
849 spin_lock_irqsave(&bchan->vc.lock, flags);
856 avail = CIRC_CNT(offset, bchan->head, MAX_DESCRIPTORS + 1);
858 if (offset < bchan->head)
862 &bchan->desc_list, desc_node) {
868 bchan->head += async_desc->xfer_len;
869 bchan->head %= MAX_DESCRIPTORS;
884 &bchan->vc.desc_issued);
889 spin_unlock_irqrestore(&bchan->vc.lock, flags);
947 struct bam_chan *bchan = to_bam_chan(chan);
960 return bchan->paused ? DMA_PAUSED : ret;
962 spin_lock_irqsave(&bchan->vc.lock, flags);
963 vd = vchan_find_desc(&bchan->vc, cookie);
967 list_for_each_entry(async_desc, &bchan->desc_list, desc_node) {
977 spin_unlock_irqrestore(&bchan->vc.lock, flags);
981 if (ret == DMA_IN_PROGRESS && bchan->paused)
989 * @bchan: bam dma channel
992 static void bam_apply_new_config(struct bam_chan *bchan,
995 struct bam_device *bdev = bchan->bdev;
1000 maxburst = bchan->slave.src_maxburst;
1002 maxburst = bchan->slave.dst_maxburst;
1008 bchan->reconfigure = 0;
1013 * @bchan: bam dma channel
1015 static void bam_start_dma(struct bam_chan *bchan)
1017 struct virt_dma_desc *vd = vchan_next_desc(&bchan->vc);
1018 struct bam_device *bdev = bchan->bdev;
1021 struct bam_desc_hw *fifo = PTR_ALIGN(bchan->fifo_virt,
1027 lockdep_assert_held(&bchan->vc.lock);
1036 while (vd && !IS_BUSY(bchan)) {
1042 if (!bchan->initialized)
1043 bam_chan_init_hw(bchan, async_desc->dir);
1046 if (bchan->reconfigure)
1047 bam_apply_new_config(bchan, async_desc->dir);
1050 avail = CIRC_SPACE(bchan->tail, bchan->head,
1063 vd = vchan_next_desc(&bchan->vc);
1081 if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) {
1082 u32 partial = MAX_DESCRIPTORS - bchan->tail;
1084 memcpy(&fifo[bchan->tail], desc,
1090 memcpy(&fifo[bchan->tail], desc,
1095 bchan->tail += async_desc->xfer_len;
1096 bchan->tail %= MAX_DESCRIPTORS;
1097 list_add_tail(&async_desc->desc_node, &bchan->desc_list);
1102 writel_relaxed(bchan->tail * sizeof(struct bam_desc_hw),
1103 bam_addr(bdev, bchan->id, BAM_P_EVNT_REG));
1118 struct bam_chan *bchan;
1124 bchan = &bdev->channels[i];
1125 spin_lock_irqsave(&bchan->vc.lock, flags);
1127 if (!list_empty(&bchan->vc.desc_issued) && !IS_BUSY(bchan))
1128 bam_start_dma(bchan);
1129 spin_unlock_irqrestore(&bchan->vc.lock, flags);
1142 struct bam_chan *bchan = to_bam_chan(chan);
1145 spin_lock_irqsave(&bchan->vc.lock, flags);
1148 if (vchan_issue_pending(&bchan->vc) && !IS_BUSY(bchan))
1149 bam_start_dma(bchan);
1151 spin_unlock_irqrestore(&bchan->vc.lock, flags);
1216 static void bam_channel_init(struct bam_device *bdev, struct bam_chan *bchan,
1219 bchan->id = index;
1220 bchan->bdev = bdev;
1222 vchan_init(&bchan->vc, &bdev->common);
1223 bchan->vc.desc_free = bam_dma_free_desc;
1224 INIT_LIST_HEAD(&bchan->desc_list);