Lines Matching refs:ipc

23 	struct avs_ipc *ipc = adev->ipc;
27 if (ipc->in_d0ix == enable)
34 atomic_inc(&ipc->d0ix_disable_depth);
36 ipc->in_d0ix = false;
40 ipc->in_d0ix = enable;
46 if (atomic_read(&adev->ipc->d0ix_disable_depth))
49 mod_delayed_work(system_power_efficient_wq, &adev->ipc->d0ix_work,
55 struct avs_ipc *ipc = container_of(work, struct avs_ipc, d0ix_work.work);
57 avs_dsp_set_d0ix(to_avs_dev(ipc->dev), true);
62 struct avs_ipc *ipc = adev->ipc;
64 if (!atomic_read(&ipc->d0ix_disable_depth)) {
65 cancel_delayed_work_sync(&ipc->d0ix_work);
74 struct avs_ipc *ipc = adev->ipc;
77 if (atomic_inc_return(&ipc->d0ix_disable_depth) == 1) {
78 cancel_delayed_work_sync(&ipc->d0ix_work);
87 struct avs_ipc *ipc = adev->ipc;
89 if (atomic_dec_and_test(&ipc->d0ix_disable_depth))
90 queue_delayed_work(system_power_efficient_wq, &ipc->d0ix_work,
148 atomic_set(&adev->ipc->recovering, 0);
153 struct avs_ipc *ipc = container_of(work, struct avs_ipc, recovery_work);
155 avs_dsp_recovery(to_avs_dev(ipc->dev));
160 struct avs_ipc *ipc = adev->ipc;
163 ipc->ready = false;
165 if (!atomic_add_unless(&ipc->recovering, 1, 1)) {
172 cancel_delayed_work_sync(&ipc->d0ix_work);
173 ipc->in_d0ix = false;
180 schedule_work(&ipc->recovery_work);
185 struct avs_ipc *ipc = adev->ipc;
192 ipc->rx.header = header;
198 ipc->rx.size = min_t(u32, AVS_MAILBOX_SIZE,
201 memcpy_fromio(ipc->rx.data, avs_uplink_addr(adev), ipc->rx.size);
202 trace_avs_msg_payload(ipc->rx.data, ipc->rx.size);
218 if (!adev->ipc->ready && msg.notify_msg_type != AVS_NOTIFY_FW_READY) {
264 adev->ipc->ready = true;
285 struct avs_ipc *ipc = adev->ipc;
293 spin_lock_irq(&ipc->rx_lock);
295 ipc->rx_completed = true;
296 spin_unlock_irq(&ipc->rx_lock);
301 complete(&ipc->busy_completion);
307 struct avs_ipc *ipc = adev->ipc;
327 complete(&ipc->done_completion);
379 static bool avs_ipc_is_busy(struct avs_ipc *ipc)
381 struct avs_dev *adev = to_avs_dev(ipc->dev);
388 static int avs_ipc_wait_busy_completion(struct avs_ipc *ipc, int timeout)
394 ret = wait_for_completion_timeout(&ipc->busy_completion, msecs_to_jiffies(timeout));
397 if (!ipc->ready)
401 if (!avs_ipc_is_busy(ipc))
407 wait_for_completion_killable(&ipc->busy_completion);
411 spin_lock(&ipc->rx_lock);
412 if (!ipc->rx_completed) {
416 reinit_completion(&ipc->busy_completion);
417 spin_unlock(&ipc->rx_lock);
421 spin_unlock(&ipc->rx_lock);
425 spin_unlock(&ipc->rx_lock);
429 static void avs_ipc_msg_init(struct avs_ipc *ipc, struct avs_ipc_msg *reply)
431 lockdep_assert_held(&ipc->rx_lock);
433 ipc->rx.header = 0;
434 ipc->rx.size = reply ? reply->size : 0;
435 ipc->rx_completed = false;
437 reinit_completion(&ipc->done_completion);
438 reinit_completion(&ipc->busy_completion);
460 struct avs_ipc *ipc = adev->ipc;
463 if (!ipc->ready)
466 mutex_lock(&ipc->msg_mutex);
468 spin_lock(&ipc->rx_lock);
469 avs_ipc_msg_init(ipc, reply);
471 spin_unlock(&ipc->rx_lock);
473 ret = avs_ipc_wait_busy_completion(ipc, timeout);
484 ret = ipc->rx.rsp.status;
486 reply->header = ipc->rx.header;
487 reply->size = ipc->rx.size;
488 if (reply->data && ipc->rx.size)
489 memcpy(reply->data, ipc->rx.data, reply->size);
493 mutex_unlock(&ipc->msg_mutex);
533 return avs_dsp_send_msg_timeout(adev, request, reply, adev->ipc->default_timeout_ms);
545 return avs_dsp_send_pm_msg_timeout(adev, request, reply, adev->ipc->default_timeout_ms,
551 struct avs_ipc *ipc = adev->ipc;
554 mutex_lock(&ipc->msg_mutex);
556 spin_lock(&ipc->rx_lock);
557 avs_ipc_msg_init(ipc, NULL);
563 spin_unlock(&ipc->rx_lock);
568 ret = wait_for_completion_timeout(&ipc->done_completion, msecs_to_jiffies(timeout));
572 mutex_unlock(&ipc->msg_mutex);
584 return avs_dsp_send_rom_msg_timeout(adev, request, adev->ipc->default_timeout_ms);
603 int avs_ipc_init(struct avs_ipc *ipc, struct device *dev)
605 ipc->rx.data = devm_kzalloc(dev, AVS_MAILBOX_SIZE, GFP_KERNEL);
606 if (!ipc->rx.data)
609 ipc->dev = dev;
610 ipc->ready = false;
611 ipc->default_timeout_ms = AVS_IPC_TIMEOUT_MS;
612 INIT_WORK(&ipc->recovery_work, avs_dsp_recovery_work);
613 INIT_DELAYED_WORK(&ipc->d0ix_work, avs_dsp_d0ix_work);
614 init_completion(&ipc->done_completion);
615 init_completion(&ipc->busy_completion);
616 spin_lock_init(&ipc->rx_lock);
617 mutex_init(&ipc->msg_mutex);
622 void avs_ipc_block(struct avs_ipc *ipc)
624 ipc->ready = false;
625 cancel_work_sync(&ipc->recovery_work);
626 cancel_delayed_work_sync(&ipc->d0ix_work);
627 ipc->in_d0ix = false;