Lines Matching refs:cqdma

164 static struct device *cqdma2dev(struct mtk_cqdma_device *cqdma)
166 return cqdma->ddev.dev;
236 dev_err(cqdma2dev(to_cqdma_dev(cvd->ch)), "cqdma wait transaction timeout\n");
241 dev_err(cqdma2dev(to_cqdma_dev(cvd->ch)), "cqdma warm reset timeout\n");
388 struct mtk_cqdma_device *cqdma = devid;
394 for (i = 0; i < cqdma->dma_channels; ++i, schedule_tasklet = false) {
395 spin_lock(&cqdma->pc[i]->lock);
396 if (mtk_dma_read(cqdma->pc[i],
399 mtk_dma_clr(cqdma->pc[i], MTK_CQDMA_INT_FLAG,
405 spin_unlock(&cqdma->pc[i]->lock);
409 disable_irq_nosync(cqdma->pc[i]->irq);
412 tasklet_schedule(&cqdma->pc[i]->tasklet);
615 struct mtk_cqdma_device *cqdma = to_cqdma_dev(c);
622 for (i = 0; i < cqdma->dma_channels; ++i) {
623 refcnt = refcount_read(&cqdma->pc[i]->refcnt);
625 pc = cqdma->pc[i];
675 dev_err(cqdma2dev(to_cqdma_dev(c)), "cqdma flush timeout\n");
689 static int mtk_cqdma_hw_init(struct mtk_cqdma_device *cqdma)
695 pm_runtime_enable(cqdma2dev(cqdma));
696 pm_runtime_get_sync(cqdma2dev(cqdma));
698 err = clk_prepare_enable(cqdma->clk);
701 pm_runtime_put_sync(cqdma2dev(cqdma));
702 pm_runtime_disable(cqdma2dev(cqdma));
707 for (i = 0; i < cqdma->dma_channels; ++i) {
708 spin_lock_irqsave(&cqdma->pc[i]->lock, flags);
709 if (mtk_cqdma_hard_reset(cqdma->pc[i]) < 0) {
710 dev_err(cqdma2dev(cqdma), "cqdma hard reset timeout\n");
711 spin_unlock_irqrestore(&cqdma->pc[i]->lock, flags);
713 clk_disable_unprepare(cqdma->clk);
714 pm_runtime_put_sync(cqdma2dev(cqdma));
715 pm_runtime_disable(cqdma2dev(cqdma));
718 spin_unlock_irqrestore(&cqdma->pc[i]->lock, flags);
724 static void mtk_cqdma_hw_deinit(struct mtk_cqdma_device *cqdma)
730 for (i = 0; i < cqdma->dma_channels; ++i) {
731 spin_lock_irqsave(&cqdma->pc[i]->lock, flags);
732 if (mtk_cqdma_hard_reset(cqdma->pc[i]) < 0)
733 dev_err(cqdma2dev(cqdma), "cqdma hard reset timeout\n");
734 spin_unlock_irqrestore(&cqdma->pc[i]->lock, flags);
737 clk_disable_unprepare(cqdma->clk);
739 pm_runtime_put_sync(cqdma2dev(cqdma));
740 pm_runtime_disable(cqdma2dev(cqdma));
744 { .compatible = "mediatek,mt6765-cqdma" },
751 struct mtk_cqdma_device *cqdma;
758 cqdma = devm_kzalloc(&pdev->dev, sizeof(*cqdma), GFP_KERNEL);
759 if (!cqdma)
762 dd = &cqdma->ddev;
764 cqdma->clk = devm_clk_get(&pdev->dev, "cqdma");
765 if (IS_ERR(cqdma->clk)) {
768 return PTR_ERR(cqdma->clk);
789 &cqdma->dma_requests)) {
794 cqdma->dma_requests = MTK_CQDMA_NR_VCHANS;
799 &cqdma->dma_channels)) {
804 cqdma->dma_channels = MTK_CQDMA_NR_PCHANS;
807 cqdma->pc = devm_kcalloc(&pdev->dev, cqdma->dma_channels,
808 sizeof(*cqdma->pc), GFP_KERNEL);
809 if (!cqdma->pc)
813 for (i = 0; i < cqdma->dma_channels; ++i) {
814 cqdma->pc[i] = devm_kcalloc(&pdev->dev, 1,
815 sizeof(**cqdma->pc), GFP_KERNEL);
816 if (!cqdma->pc[i])
819 INIT_LIST_HEAD(&cqdma->pc[i]->queue);
820 spin_lock_init(&cqdma->pc[i]->lock);
821 refcount_set(&cqdma->pc[i]->refcnt, 0);
822 cqdma->pc[i]->base = devm_platform_ioremap_resource(pdev, i);
823 if (IS_ERR(cqdma->pc[i]->base))
824 return PTR_ERR(cqdma->pc[i]->base);
833 cqdma->pc[i]->irq = res->start;
835 err = devm_request_irq(&pdev->dev, cqdma->pc[i]->irq,
837 cqdma);
846 cqdma->vc = devm_kcalloc(&pdev->dev, cqdma->dma_requests,
847 sizeof(*cqdma->vc), GFP_KERNEL);
848 if (!cqdma->vc)
851 for (i = 0; i < cqdma->dma_requests; i++) {
852 vc = &cqdma->vc[i];
863 of_dma_xlate_by_chan_id, cqdma);
870 err = mtk_cqdma_hw_init(cqdma);
877 platform_set_drvdata(pdev, cqdma);
880 for (i = 0; i < cqdma->dma_channels; ++i)
881 tasklet_setup(&cqdma->pc[i]->tasklet, mtk_cqdma_tasklet_cb);
895 struct mtk_cqdma_device *cqdma = platform_get_drvdata(pdev);
901 for (i = 0; i < cqdma->dma_requests; i++) {
902 vc = &cqdma->vc[i];
909 for (i = 0; i < cqdma->dma_channels; i++) {
910 spin_lock_irqsave(&cqdma->pc[i]->lock, flags);
911 mtk_dma_clr(cqdma->pc[i], MTK_CQDMA_INT_EN,
913 spin_unlock_irqrestore(&cqdma->pc[i]->lock, flags);
916 synchronize_irq(cqdma->pc[i]->irq);
918 tasklet_kill(&cqdma->pc[i]->tasklet);
922 mtk_cqdma_hw_deinit(cqdma);
924 dma_async_device_unregister(&cqdma->ddev);