Lines Matching refs:cqdma

163 static struct device *cqdma2dev(struct mtk_cqdma_device *cqdma)
165 return cqdma->ddev.dev;
235 dev_err(cqdma2dev(to_cqdma_dev(cvd->ch)), "cqdma wait transaction timeout\n");
240 dev_err(cqdma2dev(to_cqdma_dev(cvd->ch)), "cqdma warm reset timeout\n");
387 struct mtk_cqdma_device *cqdma = devid;
393 for (i = 0; i < cqdma->dma_channels; ++i, schedule_tasklet = false) {
394 spin_lock(&cqdma->pc[i]->lock);
395 if (mtk_dma_read(cqdma->pc[i],
398 mtk_dma_clr(cqdma->pc[i], MTK_CQDMA_INT_FLAG,
404 spin_unlock(&cqdma->pc[i]->lock);
408 disable_irq_nosync(cqdma->pc[i]->irq);
411 tasklet_schedule(&cqdma->pc[i]->tasklet);
614 struct mtk_cqdma_device *cqdma = to_cqdma_dev(c);
621 for (i = 0; i < cqdma->dma_channels; ++i) {
622 refcnt = refcount_read(&cqdma->pc[i]->refcnt);
624 pc = cqdma->pc[i];
674 dev_err(cqdma2dev(to_cqdma_dev(c)), "cqdma flush timeout\n");
688 static int mtk_cqdma_hw_init(struct mtk_cqdma_device *cqdma)
694 pm_runtime_enable(cqdma2dev(cqdma));
695 pm_runtime_get_sync(cqdma2dev(cqdma));
697 err = clk_prepare_enable(cqdma->clk);
700 pm_runtime_put_sync(cqdma2dev(cqdma));
701 pm_runtime_disable(cqdma2dev(cqdma));
706 for (i = 0; i < cqdma->dma_channels; ++i) {
707 spin_lock_irqsave(&cqdma->pc[i]->lock, flags);
708 if (mtk_cqdma_hard_reset(cqdma->pc[i]) < 0) {
709 dev_err(cqdma2dev(cqdma), "cqdma hard reset timeout\n");
710 spin_unlock_irqrestore(&cqdma->pc[i]->lock, flags);
712 clk_disable_unprepare(cqdma->clk);
713 pm_runtime_put_sync(cqdma2dev(cqdma));
714 pm_runtime_disable(cqdma2dev(cqdma));
717 spin_unlock_irqrestore(&cqdma->pc[i]->lock, flags);
723 static void mtk_cqdma_hw_deinit(struct mtk_cqdma_device *cqdma)
729 for (i = 0; i < cqdma->dma_channels; ++i) {
730 spin_lock_irqsave(&cqdma->pc[i]->lock, flags);
731 if (mtk_cqdma_hard_reset(cqdma->pc[i]) < 0)
732 dev_err(cqdma2dev(cqdma), "cqdma hard reset timeout\n");
733 spin_unlock_irqrestore(&cqdma->pc[i]->lock, flags);
736 clk_disable_unprepare(cqdma->clk);
738 pm_runtime_put_sync(cqdma2dev(cqdma));
739 pm_runtime_disable(cqdma2dev(cqdma));
743 { .compatible = "mediatek,mt6765-cqdma" },
750 struct mtk_cqdma_device *cqdma;
756 cqdma = devm_kzalloc(&pdev->dev, sizeof(*cqdma), GFP_KERNEL);
757 if (!cqdma)
760 dd = &cqdma->ddev;
762 cqdma->clk = devm_clk_get(&pdev->dev, "cqdma");
763 if (IS_ERR(cqdma->clk)) {
766 return PTR_ERR(cqdma->clk);
787 &cqdma->dma_requests)) {
792 cqdma->dma_requests = MTK_CQDMA_NR_VCHANS;
797 &cqdma->dma_channels)) {
802 cqdma->dma_channels = MTK_CQDMA_NR_PCHANS;
805 cqdma->pc = devm_kcalloc(&pdev->dev, cqdma->dma_channels,
806 sizeof(*cqdma->pc), GFP_KERNEL);
807 if (!cqdma->pc)
811 for (i = 0; i < cqdma->dma_channels; ++i) {
812 cqdma->pc[i] = devm_kcalloc(&pdev->dev, 1,
813 sizeof(**cqdma->pc), GFP_KERNEL);
814 if (!cqdma->pc[i])
817 INIT_LIST_HEAD(&cqdma->pc[i]->queue);
818 spin_lock_init(&cqdma->pc[i]->lock);
819 refcount_set(&cqdma->pc[i]->refcnt, 0);
820 cqdma->pc[i]->base = devm_platform_ioremap_resource(pdev, i);
821 if (IS_ERR(cqdma->pc[i]->base))
822 return PTR_ERR(cqdma->pc[i]->base);
828 cqdma->pc[i]->irq = err;
830 err = devm_request_irq(&pdev->dev, cqdma->pc[i]->irq,
832 cqdma);
841 cqdma->vc = devm_kcalloc(&pdev->dev, cqdma->dma_requests,
842 sizeof(*cqdma->vc), GFP_KERNEL);
843 if (!cqdma->vc)
846 for (i = 0; i < cqdma->dma_requests; i++) {
847 vc = &cqdma->vc[i];
858 of_dma_xlate_by_chan_id, cqdma);
865 err = mtk_cqdma_hw_init(cqdma);
872 platform_set_drvdata(pdev, cqdma);
875 for (i = 0; i < cqdma->dma_channels; ++i)
876 tasklet_setup(&cqdma->pc[i]->tasklet, mtk_cqdma_tasklet_cb);
890 struct mtk_cqdma_device *cqdma = platform_get_drvdata(pdev);
896 for (i = 0; i < cqdma->dma_requests; i++) {
897 vc = &cqdma->vc[i];
904 for (i = 0; i < cqdma->dma_channels; i++) {
905 spin_lock_irqsave(&cqdma->pc[i]->lock, flags);
906 mtk_dma_clr(cqdma->pc[i], MTK_CQDMA_INT_EN,
908 spin_unlock_irqrestore(&cqdma->pc[i]->lock, flags);
911 synchronize_irq(cqdma->pc[i]->irq);
913 tasklet_kill(&cqdma->pc[i]->tasklet);
917 mtk_cqdma_hw_deinit(cqdma);
919 dma_async_device_unregister(&cqdma->ddev);