Lines Matching defs:thread

65 	struct cmdq_thread	*thread;
75 struct cmdq_thread *thread;
94 static int cmdq_thread_suspend(struct cmdq *cmdq, struct cmdq_thread *thread)
98 writel(CMDQ_THR_SUSPEND, thread->base + CMDQ_THR_SUSPEND_TASK);
101 if (!(readl(thread->base + CMDQ_THR_ENABLE_TASK) & CMDQ_THR_ENABLED))
104 if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_CURR_STATUS,
106 dev_err(cmdq->mbox.dev, "suspend GCE thread 0x%x failed\n",
107 (u32)(thread->base - cmdq->base));
114 static void cmdq_thread_resume(struct cmdq_thread *thread)
116 writel(CMDQ_THR_RESUME, thread->base + CMDQ_THR_SUSPEND_TASK);
130 static int cmdq_thread_reset(struct cmdq *cmdq, struct cmdq_thread *thread)
134 writel(CMDQ_THR_DO_WARM_RESET, thread->base + CMDQ_THR_WARM_RESET);
135 if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_WARM_RESET,
138 dev_err(cmdq->mbox.dev, "reset GCE thread 0x%x failed\n",
139 (u32)(thread->base - cmdq->base));
146 static void cmdq_thread_disable(struct cmdq *cmdq, struct cmdq_thread *thread)
148 cmdq_thread_reset(cmdq, thread);
149 writel(CMDQ_THR_DISABLED, thread->base + CMDQ_THR_ENABLE_TASK);
152 /* notify GCE to re-fetch commands by setting GCE thread PC */
153 static void cmdq_thread_invalidate_fetched_data(struct cmdq_thread *thread)
155 writel(readl(thread->base + CMDQ_THR_CURR_ADDR),
156 thread->base + CMDQ_THR_CURR_ADDR);
162 struct cmdq_thread *thread = task->thread;
164 &thread->task_busy_list, typeof(*task), list_entry);
176 cmdq_thread_invalidate_fetched_data(thread);
179 static bool cmdq_thread_is_in_wfe(struct cmdq_thread *thread)
181 return readl(thread->base + CMDQ_THR_WAIT_TOKEN) & CMDQ_THR_IS_WAITING;
199 struct cmdq_thread *thread = task->thread;
204 WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
205 next_task = list_first_entry_or_null(&thread->task_busy_list,
209 thread->base + CMDQ_THR_CURR_ADDR);
210 cmdq_thread_resume(thread);
214 struct cmdq_thread *thread)
220 irq_flag = readl(thread->base + CMDQ_THR_IRQ_STATUS);
221 writel(~irq_flag, thread->base + CMDQ_THR_IRQ_STATUS);
226 * reset / disable this GCE thread, so we need to check the enable
227 * bit of this GCE thread.
229 if (!(readl(thread->base + CMDQ_THR_ENABLE_TASK) & CMDQ_THR_ENABLED))
239 curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) << cmdq->shift_pa;
241 list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
260 if (list_empty(&thread->task_busy_list)) {
261 cmdq_thread_disable(cmdq, thread);
277 struct cmdq_thread *thread = &cmdq->thread[bit];
279 spin_lock_irqsave(&thread->chan->lock, flags);
280 cmdq_thread_irq_handler(cmdq, thread);
281 spin_unlock_irqrestore(&thread->chan->lock, flags);
290 struct cmdq_thread *thread;
297 thread = &cmdq->thread[i];
298 if (!list_empty(&thread->task_busy_list)) {
333 struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
348 task->thread = thread;
351 if (list_empty(&thread->task_busy_list)) {
354 * The thread reset will clear thread related register to 0,
357 * thread and make it running.
359 WARN_ON(cmdq_thread_reset(cmdq, thread) < 0);
362 thread->base + CMDQ_THR_CURR_ADDR);
364 thread->base + CMDQ_THR_END_ADDR);
366 writel(thread->priority, thread->base + CMDQ_THR_PRIORITY);
367 writel(CMDQ_THR_IRQ_EN, thread->base + CMDQ_THR_IRQ_ENABLE);
368 writel(CMDQ_THR_ENABLED, thread->base + CMDQ_THR_ENABLE_TASK);
370 WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
371 curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) <<
373 end_pa = readl(thread->base + CMDQ_THR_END_ADDR) <<
380 thread->base + CMDQ_THR_CURR_ADDR);
383 smp_mb(); /* modify jump before enable thread */
386 thread->base + CMDQ_THR_END_ADDR);
387 cmdq_thread_resume(thread);
389 list_move_tail(&task->list_entry, &thread->task_busy_list);
401 struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
406 spin_lock_irqsave(&thread->chan->lock, flags);
407 if (list_empty(&thread->task_busy_list))
410 WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
413 cmdq_thread_irq_handler(cmdq, thread);
414 if (list_empty(&thread->task_busy_list))
417 list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
423 cmdq_thread_disable(cmdq, thread);
427 * The thread->task_busy_list empty means thread already disable. The
428 * cmdq_mbox_send_data() always reset thread which clear disable and
432 spin_unlock_irqrestore(&thread->chan->lock, flags);
437 struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
445 spin_lock_irqsave(&thread->chan->lock, flags);
446 if (list_empty(&thread->task_busy_list))
449 WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
450 if (!cmdq_thread_is_in_wfe(thread))
453 list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
465 cmdq_thread_resume(thread);
466 cmdq_thread_disable(cmdq, thread);
470 spin_unlock_irqrestore(&thread->chan->lock, flags);
474 cmdq_thread_resume(thread);
475 spin_unlock_irqrestore(&thread->chan->lock, flags);
476 if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_ENABLE_TASK,
478 dev_err(cmdq->mbox.dev, "Fail to wait GCE thread 0x%x done\n",
479 (u32)(thread->base - cmdq->base));
497 struct cmdq_thread *thread;
502 thread = (struct cmdq_thread *)mbox->chans[ind].con_priv;
503 thread->priority = sp->args[1];
504 thread->chan = &mbox->chans[ind];
571 cmdq->thread = devm_kcalloc(dev, cmdq->thread_nr,
572 sizeof(*cmdq->thread), GFP_KERNEL);
573 if (!cmdq->thread)
577 cmdq->thread[i].base = cmdq->base + CMDQ_THR_BASE +
579 INIT_LIST_HEAD(&cmdq->thread[i].task_busy_list);
580 cmdq->mbox.chans[i].con_priv = (void *)&cmdq->thread[i];