Lines Matching refs:fsl_qdma
308 struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
349 fsl_qdma->desc_allocated--;
483 struct fsl_qdma_engine *fsl_qdma)
490 queue_num = fsl_qdma->n_queues;
491 block_number = fsl_qdma->block_number;
524 queue_temp->block_base = fsl_qdma->block_base +
525 FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
583 static int fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
587 void __iomem *block, *ctrl = fsl_qdma->ctrl_base;
590 reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
592 qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
593 for (j = 0; j < fsl_qdma->block_number; j++) {
594 block = fsl_qdma->block_base +
595 FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
597 qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQMR(i));
600 reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DSR);
608 for (j = 0; j < fsl_qdma->block_number; j++) {
609 block = fsl_qdma->block_base +
610 FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
613 qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BSQMR);
619 qdma_writel(fsl_qdma, FSL_QDMA_BCQIDR_CLEAR,
627 fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
637 struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
638 struct fsl_qdma_queue *fsl_status = fsl_qdma->status[id];
644 reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQSR);
656 id * fsl_qdma->n_queues;
680 reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
687 qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
695 reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
701 qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
723 dev_err(fsl_qdma->dma_dev.dev,
741 struct fsl_qdma_engine *fsl_qdma = dev_id;
742 void __iomem *status = fsl_qdma->status_base;
748 intr = qdma_readl(fsl_qdma, status + FSL_QDMA_DEDR);
751 decfdw0r = qdma_readl(fsl_qdma, status + FSL_QDMA_DECFDW0R);
752 decfdw1r = qdma_readl(fsl_qdma, status + FSL_QDMA_DECFDW1R);
753 decfdw2r = qdma_readl(fsl_qdma, status + FSL_QDMA_DECFDW2R);
754 decfdw3r = qdma_readl(fsl_qdma, status + FSL_QDMA_DECFDW3R);
755 dev_err(fsl_qdma->dma_dev.dev,
760 qdma_writel(fsl_qdma, FSL_QDMA_DEDR_CLEAR, status + FSL_QDMA_DEDR);
768 struct fsl_qdma_engine *fsl_qdma = dev_id;
769 void __iomem *block, *ctrl = fsl_qdma->ctrl_base;
771 id = irq - fsl_qdma->irq_base;
772 if (id < 0 && id > fsl_qdma->block_number) {
773 dev_err(fsl_qdma->dma_dev.dev,
775 irq, fsl_qdma->irq_base);
778 block = fsl_qdma->block_base +
779 FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
781 intr = qdma_readl(fsl_qdma, block + FSL_QDMA_BCQIDR(0));
784 intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id);
787 reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
789 qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
790 qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQIER(0));
791 dev_err(fsl_qdma->dma_dev.dev, "QDMA: status err!\n");
795 qdma_writel(fsl_qdma, FSL_QDMA_BCQIDR_CLEAR,
803 struct fsl_qdma_engine *fsl_qdma)
810 fsl_qdma->error_irq =
812 if (fsl_qdma->error_irq < 0)
813 return fsl_qdma->error_irq;
815 ret = devm_request_irq(&pdev->dev, fsl_qdma->error_irq,
817 "qDMA error", fsl_qdma);
823 for (i = 0; i < fsl_qdma->block_number; i++) {
825 fsl_qdma->queue_irq[i] =
828 if (fsl_qdma->queue_irq[i] < 0)
829 return fsl_qdma->queue_irq[i];
832 fsl_qdma->queue_irq[i],
836 fsl_qdma);
844 ret = irq_set_affinity_hint(fsl_qdma->queue_irq[i],
850 fsl_qdma->queue_irq[i]);
859 struct fsl_qdma_engine *fsl_qdma)
863 devm_free_irq(&pdev->dev, fsl_qdma->error_irq, fsl_qdma);
864 for (i = 0; i < fsl_qdma->block_number; i++)
865 devm_free_irq(&pdev->dev, fsl_qdma->queue_irq[i], fsl_qdma);
868 static int fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
873 void __iomem *status = fsl_qdma->status_base;
874 void __iomem *block, *ctrl = fsl_qdma->ctrl_base;
875 struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
878 ret = fsl_qdma_halt(fsl_qdma);
880 dev_err(fsl_qdma->dma_dev.dev, "DMA halt failed!");
884 for (i = 0; i < fsl_qdma->block_number; i++) {
890 block = fsl_qdma->block_base +
891 FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, i);
892 qdma_writel(fsl_qdma, FSL_QDMA_BCQIDR_CLEAR,
896 for (j = 0; j < fsl_qdma->block_number; j++) {
897 block = fsl_qdma->block_base +
898 FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
899 for (i = 0; i < fsl_qdma->n_queues; i++) {
900 temp = fsl_queue + i + (j * fsl_qdma->n_queues);
909 qdma_writel(fsl_qdma, temp->bus_addr,
911 qdma_writel(fsl_qdma, temp->bus_addr,
918 qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BCQMR(i));
927 qdma_writel(fsl_qdma, FSL_QDMA_SQCCMR_ENTER_WM,
937 qdma_writel(fsl_qdma, fsl_qdma->status[j]->bus_addr,
939 qdma_writel(fsl_qdma, fsl_qdma->status[j]->bus_addr,
942 qdma_writel(fsl_qdma, FSL_QDMA_BCQIER_CQTIE,
944 qdma_writel(fsl_qdma, FSL_QDMA_BSQICR_ICEN |
947 qdma_writel(fsl_qdma, FSL_QDMA_CQIER_MEIE |
954 (fsl_qdma->status[j]->n_cq) - 6);
956 qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
957 reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
961 qdma_writel(fsl_qdma, FSL_QDMA_DEDR_CLEAR, status + FSL_QDMA_DEDR);
962 qdma_writel(fsl_qdma, FSL_QDMA_DEIER_CLEAR, status + FSL_QDMA_DEIER);
964 reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
966 qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
1070 struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
1074 return fsl_qdma->desc_allocated;
1107 fsl_qdma->desc_allocated++;
1108 return fsl_qdma->desc_allocated;
1124 struct fsl_qdma_engine *fsl_qdma;
1147 len = sizeof(*fsl_qdma);
1148 fsl_qdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
1149 if (!fsl_qdma)
1153 fsl_qdma->chans = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
1154 if (!fsl_qdma->chans)
1158 fsl_qdma->status = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
1159 if (!fsl_qdma->status)
1163 fsl_qdma->queue_irq = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
1164 if (!fsl_qdma->queue_irq)
1173 fsl_qdma->desc_allocated = 0;
1174 fsl_qdma->n_chans = chans;
1175 fsl_qdma->n_queues = queues;
1176 fsl_qdma->block_number = blk_num;
1177 fsl_qdma->block_offset = blk_off;
1179 mutex_init(&fsl_qdma->fsl_qdma_mutex);
1181 for (i = 0; i < fsl_qdma->block_number; i++) {
1182 fsl_qdma->status[i] = fsl_qdma_prep_status_queue(pdev);
1183 if (!fsl_qdma->status[i])
1187 fsl_qdma->ctrl_base = devm_ioremap_resource(&pdev->dev, res);
1188 if (IS_ERR(fsl_qdma->ctrl_base))
1189 return PTR_ERR(fsl_qdma->ctrl_base);
1192 fsl_qdma->status_base = devm_ioremap_resource(&pdev->dev, res);
1193 if (IS_ERR(fsl_qdma->status_base))
1194 return PTR_ERR(fsl_qdma->status_base);
1197 fsl_qdma->block_base = devm_ioremap_resource(&pdev->dev, res);
1198 if (IS_ERR(fsl_qdma->block_base))
1199 return PTR_ERR(fsl_qdma->block_base);
1200 fsl_qdma->queue = fsl_qdma_alloc_queue_resources(pdev, fsl_qdma);
1201 if (!fsl_qdma->queue)
1204 ret = fsl_qdma_irq_init(pdev, fsl_qdma);
1208 fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0");
1209 if (fsl_qdma->irq_base < 0)
1210 return fsl_qdma->irq_base;
1212 fsl_qdma->feature = of_property_read_bool(np, "big-endian");
1213 INIT_LIST_HEAD(&fsl_qdma->dma_dev.channels);
1215 for (i = 0; i < fsl_qdma->n_chans; i++) {
1216 struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
1218 fsl_chan->qdma = fsl_qdma;
1219 fsl_chan->queue = fsl_qdma->queue + i % (fsl_qdma->n_queues *
1220 fsl_qdma->block_number);
1222 vchan_init(&fsl_chan->vchan, &fsl_qdma->dma_dev);
1225 dma_cap_set(DMA_MEMCPY, fsl_qdma->dma_dev.cap_mask);
1227 fsl_qdma->dma_dev.dev = &pdev->dev;
1228 fsl_qdma->dma_dev.device_free_chan_resources =
1230 fsl_qdma->dma_dev.device_alloc_chan_resources =
1232 fsl_qdma->dma_dev.device_tx_status = dma_cookie_status;
1233 fsl_qdma->dma_dev.device_prep_dma_memcpy = fsl_qdma_prep_memcpy;
1234 fsl_qdma->dma_dev.device_issue_pending = fsl_qdma_issue_pending;
1235 fsl_qdma->dma_dev.device_synchronize = fsl_qdma_synchronize;
1236 fsl_qdma->dma_dev.device_terminate_all = fsl_qdma_terminate_all;
1244 platform_set_drvdata(pdev, fsl_qdma);
1246 ret = dma_async_device_register(&fsl_qdma->dma_dev);
1253 ret = fsl_qdma_reg_init(fsl_qdma);
1276 struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev);
1278 fsl_qdma_irq_exit(pdev, fsl_qdma);
1279 fsl_qdma_cleanup_vchan(&fsl_qdma->dma_dev);
1281 dma_async_device_unregister(&fsl_qdma->dma_dev);