Lines Matching refs:bchan

344 #define IS_BUSY(chan)	(CIRC_SPACE(bchan->tail, bchan->head,\
420 * @bchan: bam channel
424 static void bam_reset_channel(struct bam_chan *bchan)
426 struct bam_device *bdev = bchan->bdev;
428 lockdep_assert_held(&bchan->vc.lock);
431 writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_RST));
432 writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_RST));
438 bchan->initialized = 0;
443 * @bchan: bam channel
448 static void bam_chan_init_hw(struct bam_chan *bchan,
451 struct bam_device *bdev = bchan->bdev;
455 bam_reset_channel(bchan);
461 writel_relaxed(ALIGN(bchan->fifo_phys, sizeof(struct bam_desc_hw)),
462 bam_addr(bdev, bchan->id, BAM_P_DESC_FIFO_ADDR));
464 bam_addr(bdev, bchan->id, BAM_P_FIFO_SIZES));
468 bam_addr(bdev, bchan->id, BAM_P_IRQ_EN));
472 val |= BIT(bchan->id);
483 writel_relaxed(val, bam_addr(bdev, bchan->id, BAM_P_CTRL));
485 bchan->initialized = 1;
488 bchan->head = 0;
489 bchan->tail = 0;
500 struct bam_chan *bchan = to_bam_chan(chan);
501 struct bam_device *bdev = bchan->bdev;
503 if (bchan->fifo_virt)
507 bchan->fifo_virt = dma_alloc_wc(bdev->dev, BAM_DESC_FIFO_SIZE,
508 &bchan->fifo_phys, GFP_KERNEL);
510 if (!bchan->fifo_virt) {
535 struct bam_chan *bchan = to_bam_chan(chan);
536 struct bam_device *bdev = bchan->bdev;
547 if (!list_empty(&bchan->desc_list)) {
548 dev_err(bchan->bdev->dev, "Cannot free busy channel\n");
552 spin_lock_irqsave(&bchan->vc.lock, flags);
553 bam_reset_channel(bchan);
554 spin_unlock_irqrestore(&bchan->vc.lock, flags);
556 dma_free_wc(bdev->dev, BAM_DESC_FIFO_SIZE, bchan->fifo_virt,
557 bchan->fifo_phys);
558 bchan->fifo_virt = NULL;
562 val &= ~BIT(bchan->id);
566 writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_IRQ_EN));
584 struct bam_chan *bchan = to_bam_chan(chan);
587 spin_lock_irqsave(&bchan->vc.lock, flag);
588 memcpy(&bchan->slave, cfg, sizeof(*cfg));
589 bchan->reconfigure = 1;
590 spin_unlock_irqrestore(&bchan->vc.lock, flag);
610 struct bam_chan *bchan = to_bam_chan(chan);
611 struct bam_device *bdev = bchan->bdev;
672 return vchan_tx_prep(&bchan->vc, &async_desc->vd, flags);
689 struct bam_chan *bchan = to_bam_chan(chan);
695 spin_lock_irqsave(&bchan->vc.lock, flag);
709 if (!list_empty(&bchan->desc_list)) {
710 async_desc = list_first_entry(&bchan->desc_list,
712 bam_chan_init_hw(bchan, async_desc->dir);
716 &bchan->desc_list, desc_node) {
717 list_add(&async_desc->vd.node, &bchan->vc.desc_issued);
721 vchan_get_all_descriptors(&bchan->vc, &head);
722 spin_unlock_irqrestore(&bchan->vc.lock, flag);
724 vchan_dma_desc_free_list(&bchan->vc, &head);
736 struct bam_chan *bchan = to_bam_chan(chan);
737 struct bam_device *bdev = bchan->bdev;
745 spin_lock_irqsave(&bchan->vc.lock, flag);
746 writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT));
747 bchan->paused = 1;
748 spin_unlock_irqrestore(&bchan->vc.lock, flag);
762 struct bam_chan *bchan = to_bam_chan(chan);
763 struct bam_device *bdev = bchan->bdev;
771 spin_lock_irqsave(&bchan->vc.lock, flag);
772 writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT));
773 bchan->paused = 0;
774 spin_unlock_irqrestore(&bchan->vc.lock, flag);
801 struct bam_chan *bchan = &bdev->channels[i];
811 spin_lock_irqsave(&bchan->vc.lock, flags);
818 avail = CIRC_CNT(offset, bchan->head, MAX_DESCRIPTORS + 1);
820 if (offset < bchan->head)
824 &bchan->desc_list, desc_node) {
830 bchan->head += async_desc->xfer_len;
831 bchan->head %= MAX_DESCRIPTORS;
846 &bchan->vc.desc_issued);
851 spin_unlock_irqrestore(&bchan->vc.lock, flags);
909 struct bam_chan *bchan = to_bam_chan(chan);
922 return bchan->paused ? DMA_PAUSED : ret;
924 spin_lock_irqsave(&bchan->vc.lock, flags);
925 vd = vchan_find_desc(&bchan->vc, cookie);
929 list_for_each_entry(async_desc, &bchan->desc_list, desc_node) {
939 spin_unlock_irqrestore(&bchan->vc.lock, flags);
943 if (ret == DMA_IN_PROGRESS && bchan->paused)
951 * @bchan: bam dma channel
954 static void bam_apply_new_config(struct bam_chan *bchan,
957 struct bam_device *bdev = bchan->bdev;
962 maxburst = bchan->slave.src_maxburst;
964 maxburst = bchan->slave.dst_maxburst;
970 bchan->reconfigure = 0;
975 * @bchan: bam dma channel
977 static void bam_start_dma(struct bam_chan *bchan)
979 struct virt_dma_desc *vd = vchan_next_desc(&bchan->vc);
980 struct bam_device *bdev = bchan->bdev;
983 struct bam_desc_hw *fifo = PTR_ALIGN(bchan->fifo_virt,
989 lockdep_assert_held(&bchan->vc.lock);
998 while (vd && !IS_BUSY(bchan)) {
1004 if (!bchan->initialized)
1005 bam_chan_init_hw(bchan, async_desc->dir);
1008 if (bchan->reconfigure)
1009 bam_apply_new_config(bchan, async_desc->dir);
1012 avail = CIRC_SPACE(bchan->tail, bchan->head,
1025 vd = vchan_next_desc(&bchan->vc);
1043 if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) {
1044 u32 partial = MAX_DESCRIPTORS - bchan->tail;
1046 memcpy(&fifo[bchan->tail], desc,
1052 memcpy(&fifo[bchan->tail], desc,
1057 bchan->tail += async_desc->xfer_len;
1058 bchan->tail %= MAX_DESCRIPTORS;
1059 list_add_tail(&async_desc->desc_node, &bchan->desc_list);
1064 writel_relaxed(bchan->tail * sizeof(struct bam_desc_hw),
1065 bam_addr(bdev, bchan->id, BAM_P_EVNT_REG));
1080 struct bam_chan *bchan;
1086 bchan = &bdev->channels[i];
1087 spin_lock_irqsave(&bchan->vc.lock, flags);
1089 if (!list_empty(&bchan->vc.desc_issued) && !IS_BUSY(bchan))
1090 bam_start_dma(bchan);
1091 spin_unlock_irqrestore(&bchan->vc.lock, flags);
1104 struct bam_chan *bchan = to_bam_chan(chan);
1107 spin_lock_irqsave(&bchan->vc.lock, flags);
1110 if (vchan_issue_pending(&bchan->vc) && !IS_BUSY(bchan))
1111 bam_start_dma(bchan);
1113 spin_unlock_irqrestore(&bchan->vc.lock, flags);
1206 static void bam_channel_init(struct bam_device *bdev, struct bam_chan *bchan,
1209 bchan->id = index;
1210 bchan->bdev = bdev;
1212 vchan_init(&bchan->vc, &bdev->common);
1213 bchan->vc.desc_free = bam_dma_free_desc;
1214 INIT_LIST_HEAD(&bchan->desc_list);