Lines Matching defs:cmdq
17 #include <linux/mailbox/mtk-cmdq-mailbox.h>
67 struct cmdq *cmdq;
74 struct cmdq {
93 static void cmdq_sw_ddr_enable(struct cmdq *cmdq, bool enable)
95 WARN_ON(clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks));
98 writel(GCE_DDR_EN | GCE_CTRL_BY_SW, cmdq->base + GCE_GCTL_VALUE);
100 writel(GCE_CTRL_BY_SW, cmdq->base + GCE_GCTL_VALUE);
102 clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
107 struct cmdq *cmdq = container_of(chan->mbox, struct cmdq, mbox);
109 return cmdq->pdata->shift;
113 static int cmdq_thread_suspend(struct cmdq *cmdq, struct cmdq_thread *thread)
125 dev_err(cmdq->mbox.dev, "suspend GCE thread 0x%x failed\n",
126 (u32)(thread->base - cmdq->base));
138 static void cmdq_init(struct cmdq *cmdq)
143 WARN_ON(clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks));
144 if (cmdq->pdata->control_by_sw)
146 if (cmdq->pdata->sw_ddr_en)
150 writel(gctl_regval, cmdq->base + GCE_GCTL_VALUE);
152 writel(CMDQ_THR_ACTIVE_SLOT_CYCLES, cmdq->base + CMDQ_THR_SLOT_CYCLES);
154 writel(i, cmdq->base + CMDQ_SYNC_TOKEN_UPDATE);
155 clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
158 static int cmdq_thread_reset(struct cmdq *cmdq, struct cmdq_thread *thread)
166 dev_err(cmdq->mbox.dev, "reset GCE thread 0x%x failed\n",
167 (u32)(thread->base - cmdq->base));
174 static void cmdq_thread_disable(struct cmdq *cmdq, struct cmdq_thread *thread)
176 cmdq_thread_reset(cmdq, thread);
189 struct device *dev = task->cmdq->mbox.dev;
200 (task->pa_base >> task->cmdq->pdata->shift);
227 struct cmdq *cmdq = task->cmdq;
229 dev_err(cmdq->mbox.dev, "task 0x%p error\n", task);
230 WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
234 writel(next_task->pa_base >> cmdq->pdata->shift,
239 static void cmdq_thread_irq_handler(struct cmdq *cmdq,
265 curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) << cmdq->pdata->shift;
287 cmdq_thread_disable(cmdq, thread);
288 clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
294 struct cmdq *cmdq = dev;
298 irq_status = readl(cmdq->base + CMDQ_CURR_IRQ_STATUS) & cmdq->irq_mask;
299 if (!(irq_status ^ cmdq->irq_mask))
302 for_each_clear_bit(bit, &irq_status, cmdq->pdata->thread_nr) {
303 struct cmdq_thread *thread = &cmdq->thread[bit];
306 cmdq_thread_irq_handler(cmdq, thread);
315 struct cmdq *cmdq = dev_get_drvdata(dev);
320 cmdq->suspended = true;
322 for (i = 0; i < cmdq->pdata->thread_nr; i++) {
323 thread = &cmdq->thread[i];
333 if (cmdq->pdata->sw_ddr_en)
334 cmdq_sw_ddr_enable(cmdq, false);
336 clk_bulk_unprepare(cmdq->pdata->gce_num, cmdq->clocks);
343 struct cmdq *cmdq = dev_get_drvdata(dev);
345 WARN_ON(clk_bulk_prepare(cmdq->pdata->gce_num, cmdq->clocks));
346 cmdq->suspended = false;
348 if (cmdq->pdata->sw_ddr_en)
349 cmdq_sw_ddr_enable(cmdq, true);
356 struct cmdq *cmdq = platform_get_drvdata(pdev);
358 if (cmdq->pdata->sw_ddr_en)
359 cmdq_sw_ddr_enable(cmdq, false);
361 clk_bulk_unprepare(cmdq->pdata->gce_num, cmdq->clocks);
369 struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
374 WARN_ON(cmdq->suspended);
380 task->cmdq = cmdq;
387 WARN_ON(clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks));
395 WARN_ON(cmdq_thread_reset(cmdq, thread) < 0);
397 writel(task->pa_base >> cmdq->pdata->shift,
399 writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->pdata->shift,
406 WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
408 cmdq->pdata->shift;
410 cmdq->pdata->shift;
415 writel(task->pa_base >> cmdq->pdata->shift,
421 writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->pdata->shift,
438 struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
446 WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
449 cmdq_thread_irq_handler(cmdq, thread);
459 cmdq_thread_disable(cmdq, thread);
460 clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
476 struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
485 WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
499 cmdq_thread_disable(cmdq, thread);
500 clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
511 dev_err(cmdq->mbox.dev, "Fail to wait GCE thread 0x%x done\n",
512 (u32)(thread->base - cmdq->base));
545 struct cmdq *cmdq;
553 cmdq = devm_kzalloc(dev, sizeof(*cmdq), GFP_KERNEL);
554 if (!cmdq)
557 cmdq->base = devm_platform_ioremap_resource(pdev, 0);
558 if (IS_ERR(cmdq->base))
559 return PTR_ERR(cmdq->base);
561 cmdq->irq = platform_get_irq(pdev, 0);
562 if (cmdq->irq < 0)
563 return cmdq->irq;
565 cmdq->pdata = device_get_match_data(dev);
566 if (!cmdq->pdata) {
571 cmdq->irq_mask = GENMASK(cmdq->pdata->thread_nr - 1, 0);
573 dev_dbg(dev, "cmdq device: addr:0x%p, va:0x%p, irq:%d\n",
574 dev, cmdq->base, cmdq->irq);
576 if (cmdq->pdata->gce_num > 1) {
579 if (alias_id >= 0 && alias_id < cmdq->pdata->gce_num) {
580 cmdq->clocks[alias_id].id = clk_names[alias_id];
581 cmdq->clocks[alias_id].clk = of_clk_get(node, 0);
582 if (IS_ERR(cmdq->clocks[alias_id].clk)) {
585 PTR_ERR(cmdq->clocks[alias_id].clk),
592 cmdq->clocks[alias_id].id = clk_name;
593 cmdq->clocks[alias_id].clk = devm_clk_get(&pdev->dev, clk_name);
594 if (IS_ERR(cmdq->clocks[alias_id].clk)) {
595 return dev_err_probe(dev, PTR_ERR(cmdq->clocks[alias_id].clk),
600 cmdq->mbox.dev = dev;
601 cmdq->mbox.chans = devm_kcalloc(dev, cmdq->pdata->thread_nr,
602 sizeof(*cmdq->mbox.chans), GFP_KERNEL);
603 if (!cmdq->mbox.chans)
606 cmdq->mbox.num_chans = cmdq->pdata->thread_nr;
607 cmdq->mbox.ops = &cmdq_mbox_chan_ops;
608 cmdq->mbox.of_xlate = cmdq_xlate;
611 cmdq->mbox.txdone_irq = false;
612 cmdq->mbox.txdone_poll = false;
614 cmdq->thread = devm_kcalloc(dev, cmdq->pdata->thread_nr,
615 sizeof(*cmdq->thread), GFP_KERNEL);
616 if (!cmdq->thread)
619 for (i = 0; i < cmdq->pdata->thread_nr; i++) {
620 cmdq->thread[i].base = cmdq->base + CMDQ_THR_BASE +
622 INIT_LIST_HEAD(&cmdq->thread[i].task_busy_list);
623 cmdq->mbox.chans[i].con_priv = (void *)&cmdq->thread[i];
626 err = devm_mbox_controller_register(dev, &cmdq->mbox);
632 platform_set_drvdata(pdev, cmdq);
634 WARN_ON(clk_bulk_prepare(cmdq->pdata->gce_num, cmdq->clocks));
636 cmdq_init(cmdq);
638 err = devm_request_irq(dev, cmdq->irq, cmdq_irq_handler, IRQF_SHARED,
639 "mtk_cmdq", cmdq);