Lines Matching defs:cmdq
17 #include <linux/mailbox/mtk-cmdq-mailbox.h>
62 struct cmdq *cmdq;
69 struct cmdq {
88 struct cmdq *cmdq = container_of(chan->mbox, struct cmdq, mbox);
90 return cmdq->shift_pa;
94 static int cmdq_thread_suspend(struct cmdq *cmdq, struct cmdq_thread *thread)
106 dev_err(cmdq->mbox.dev, "suspend GCE thread 0x%x failed\n",
107 (u32)(thread->base - cmdq->base));
119 static void cmdq_init(struct cmdq *cmdq)
123 WARN_ON(clk_enable(cmdq->clock) < 0);
124 writel(CMDQ_THR_ACTIVE_SLOT_CYCLES, cmdq->base + CMDQ_THR_SLOT_CYCLES);
126 writel(i, cmdq->base + CMDQ_SYNC_TOKEN_UPDATE);
127 clk_disable(cmdq->clock);
130 static int cmdq_thread_reset(struct cmdq *cmdq, struct cmdq_thread *thread)
138 dev_err(cmdq->mbox.dev, "reset GCE thread 0x%x failed\n",
139 (u32)(thread->base - cmdq->base));
146 static void cmdq_thread_disable(struct cmdq *cmdq, struct cmdq_thread *thread)
148 cmdq_thread_reset(cmdq, thread);
161 struct device *dev = task->cmdq->mbox.dev;
172 (task->pa_base >> task->cmdq->shift_pa);
201 struct cmdq *cmdq = task->cmdq;
203 dev_err(cmdq->mbox.dev, "task 0x%p error\n", task);
204 WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
208 writel(next_task->pa_base >> cmdq->shift_pa,
213 static void cmdq_thread_irq_handler(struct cmdq *cmdq,
239 curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) << cmdq->shift_pa;
261 cmdq_thread_disable(cmdq, thread);
262 clk_disable(cmdq->clock);
268 struct cmdq *cmdq = dev;
272 irq_status = readl(cmdq->base + CMDQ_CURR_IRQ_STATUS) & cmdq->irq_mask;
273 if (!(irq_status ^ cmdq->irq_mask))
276 for_each_clear_bit(bit, &irq_status, cmdq->thread_nr) {
277 struct cmdq_thread *thread = &cmdq->thread[bit];
280 cmdq_thread_irq_handler(cmdq, thread);
289 struct cmdq *cmdq = dev_get_drvdata(dev);
294 cmdq->suspended = true;
296 for (i = 0; i < cmdq->thread_nr; i++) {
297 thread = &cmdq->thread[i];
307 clk_unprepare(cmdq->clock);
314 struct cmdq *cmdq = dev_get_drvdata(dev);
316 WARN_ON(clk_prepare(cmdq->clock) < 0);
317 cmdq->suspended = false;
323 struct cmdq *cmdq = platform_get_drvdata(pdev);
325 clk_unprepare(cmdq->clock);
334 struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
339 WARN_ON(cmdq->suspended);
345 task->cmdq = cmdq;
352 WARN_ON(clk_enable(cmdq->clock) < 0);
359 WARN_ON(cmdq_thread_reset(cmdq, thread) < 0);
361 writel(task->pa_base >> cmdq->shift_pa,
363 writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->shift_pa,
370 WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
372 cmdq->shift_pa;
374 cmdq->shift_pa;
379 writel(task->pa_base >> cmdq->shift_pa,
385 writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->shift_pa,
402 struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
410 WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
413 cmdq_thread_irq_handler(cmdq, thread);
423 cmdq_thread_disable(cmdq, thread);
424 clk_disable(cmdq->clock);
440 struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
449 WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
466 cmdq_thread_disable(cmdq, thread);
467 clk_disable(cmdq->clock);
478 dev_err(cmdq->mbox.dev, "Fail to wait GCE thread 0x%x done\n",
479 (u32)(thread->base - cmdq->base));
513 struct cmdq *cmdq;
517 cmdq = devm_kzalloc(dev, sizeof(*cmdq), GFP_KERNEL);
518 if (!cmdq)
522 cmdq->base = devm_ioremap_resource(dev, res);
523 if (IS_ERR(cmdq->base)) {
525 return PTR_ERR(cmdq->base);
528 cmdq->irq = platform_get_irq(pdev, 0);
529 if (cmdq->irq < 0)
530 return cmdq->irq;
538 cmdq->thread_nr = plat_data->thread_nr;
539 cmdq->shift_pa = plat_data->shift;
540 cmdq->irq_mask = GENMASK(cmdq->thread_nr - 1, 0);
541 err = devm_request_irq(dev, cmdq->irq, cmdq_irq_handler, IRQF_SHARED,
542 "mtk_cmdq", cmdq);
548 dev_dbg(dev, "cmdq device: addr:0x%p, va:0x%p, irq:%d\n",
549 dev, cmdq->base, cmdq->irq);
551 cmdq->clock = devm_clk_get(dev, "gce");
552 if (IS_ERR(cmdq->clock)) {
554 return PTR_ERR(cmdq->clock);
557 cmdq->mbox.dev = dev;
558 cmdq->mbox.chans = devm_kcalloc(dev, cmdq->thread_nr,
559 sizeof(*cmdq->mbox.chans), GFP_KERNEL);
560 if (!cmdq->mbox.chans)
563 cmdq->mbox.num_chans = cmdq->thread_nr;
564 cmdq->mbox.ops = &cmdq_mbox_chan_ops;
565 cmdq->mbox.of_xlate = cmdq_xlate;
568 cmdq->mbox.txdone_irq = false;
569 cmdq->mbox.txdone_poll = false;
571 cmdq->thread = devm_kcalloc(dev, cmdq->thread_nr,
572 sizeof(*cmdq->thread), GFP_KERNEL);
573 if (!cmdq->thread)
576 for (i = 0; i < cmdq->thread_nr; i++) {
577 cmdq->thread[i].base = cmdq->base + CMDQ_THR_BASE +
579 INIT_LIST_HEAD(&cmdq->thread[i].task_busy_list);
580 cmdq->mbox.chans[i].con_priv = (void *)&cmdq->thread[i];
583 err = devm_mbox_controller_register(dev, &cmdq->mbox);
589 platform_set_drvdata(pdev, cmdq);
590 WARN_ON(clk_prepare(cmdq->clock) < 0);
592 cmdq_init(cmdq);