Lines Matching defs:mhi_cntrl
110 enum mhi_pm_state __must_check mhi_tryset_pm_state(struct mhi_controller *mhi_cntrl,
113 unsigned long cur_state = mhi_cntrl->pm_state;
125 mhi_cntrl->pm_state = state;
126 return mhi_cntrl->pm_state;
129 void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum mhi_state state)
132 mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
135 mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
142 static void mhi_toggle_dev_wake_nop(struct mhi_controller *mhi_cntrl)
146 static void mhi_toggle_dev_wake(struct mhi_controller *mhi_cntrl)
148 mhi_cntrl->wake_get(mhi_cntrl, false);
149 mhi_cntrl->wake_put(mhi_cntrl, true);
153 int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl)
155 void __iomem *base = mhi_cntrl->regs;
158 struct device *dev = &mhi_cntrl->mhi_dev->dev;
163 wait_event_timeout(mhi_cntrl->state_event,
164 MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state) ||
165 mhi_read_reg_field(mhi_cntrl, base, MHICTRL,
168 mhi_read_reg_field(mhi_cntrl, base, MHISTATUS,
172 msecs_to_jiffies(mhi_cntrl->timeout_ms));
175 if (MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
187 write_lock_irq(&mhi_cntrl->pm_lock);
188 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR);
189 mhi_cntrl->dev_state = MHI_STATE_READY;
190 write_unlock_irq(&mhi_cntrl->pm_lock);
199 read_lock_bh(&mhi_cntrl->pm_lock);
200 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
206 ret = mhi_init_mmio(mhi_cntrl);
213 mhi_event = mhi_cntrl->mhi_event;
214 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
233 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
234 read_unlock_bh(&mhi_cntrl->pm_lock);
239 read_unlock_bh(&mhi_cntrl->pm_lock);
244 int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl)
248 struct device *dev = &mhi_cntrl->mhi_dev->dev;
251 write_lock_irq(&mhi_cntrl->pm_lock);
252 mhi_cntrl->dev_state = MHI_STATE_M0;
253 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M0);
254 write_unlock_irq(&mhi_cntrl->pm_lock);
259 mhi_cntrl->M0++;
262 read_lock_bh(&mhi_cntrl->pm_lock);
263 mhi_cntrl->wake_get(mhi_cntrl, true);
266 if (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) {
267 struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
269 &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
271 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
283 mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
288 mhi_chan = mhi_cntrl->mhi_chan;
289 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
303 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
307 mhi_cntrl->wake_put(mhi_cntrl, false);
308 read_unlock_bh(&mhi_cntrl->pm_lock);
309 wake_up_all(&mhi_cntrl->state_event);
319 void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl)
322 struct device *dev = &mhi_cntrl->mhi_dev->dev;
324 write_lock_irq(&mhi_cntrl->pm_lock);
325 state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M2);
327 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M2);
328 mhi_cntrl->dev_state = MHI_STATE_M2;
330 write_unlock_irq(&mhi_cntrl->pm_lock);
332 mhi_cntrl->M2++;
333 wake_up_all(&mhi_cntrl->state_event);
336 if (unlikely(atomic_read(&mhi_cntrl->pending_pkts) ||
337 atomic_read(&mhi_cntrl->dev_wake))) {
340 atomic_read(&mhi_cntrl->pending_pkts),
341 atomic_read(&mhi_cntrl->dev_wake));
342 read_lock_bh(&mhi_cntrl->pm_lock);
343 mhi_cntrl->wake_get(mhi_cntrl, true);
344 mhi_cntrl->wake_put(mhi_cntrl, true);
345 read_unlock_bh(&mhi_cntrl->pm_lock);
347 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_IDLE);
350 write_unlock_irq(&mhi_cntrl->pm_lock);
355 int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl)
358 struct device *dev = &mhi_cntrl->mhi_dev->dev;
360 write_lock_irq(&mhi_cntrl->pm_lock);
361 mhi_cntrl->dev_state = MHI_STATE_M3;
362 state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3);
363 write_unlock_irq(&mhi_cntrl->pm_lock);
369 mhi_cntrl->M3++;
370 wake_up_all(&mhi_cntrl->state_event);
376 static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
379 struct device *dev = &mhi_cntrl->mhi_dev->dev;
380 enum mhi_ee_type current_ee = mhi_cntrl->ee;
385 write_lock_irq(&mhi_cntrl->pm_lock);
386 if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
387 mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
388 write_unlock_irq(&mhi_cntrl->pm_lock);
390 if (!MHI_IN_MISSION_MODE(mhi_cntrl->ee))
393 wake_up_all(&mhi_cntrl->state_event);
395 device_for_each_child(&mhi_cntrl->mhi_dev->dev, ¤t_ee,
397 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE);
400 ret = __mhi_device_get_sync(mhi_cntrl);
404 read_lock_bh(&mhi_cntrl->pm_lock);
406 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
412 mhi_event = mhi_cntrl->mhi_event;
413 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
425 if (MHI_DB_ACCESS_VALID(mhi_cntrl))
430 read_unlock_bh(&mhi_cntrl->pm_lock);
436 mhi_create_devices(mhi_cntrl);
438 read_lock_bh(&mhi_cntrl->pm_lock);
441 mhi_cntrl->wake_put(mhi_cntrl, false);
442 read_unlock_bh(&mhi_cntrl->pm_lock);
448 static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
456 struct device *dev = &mhi_cntrl->mhi_dev->dev;
460 to_mhi_pm_state_str(mhi_cntrl->pm_state),
465 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR);
467 mutex_lock(&mhi_cntrl->pm_mutex);
468 write_lock_irq(&mhi_cntrl->pm_lock);
469 prev_state = mhi_cntrl->pm_state;
470 cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state);
472 mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
473 mhi_cntrl->dev_state = MHI_STATE_RESET;
475 write_unlock_irq(&mhi_cntrl->pm_lock);
478 wake_up_all(&mhi_cntrl->state_event);
484 mutex_unlock(&mhi_cntrl->pm_mutex);
491 unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms);
494 if (mhi_cntrl->rddm_image && mhi_get_exec_env(mhi_cntrl) == MHI_EE_RDDM)
498 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
501 ret = wait_event_timeout(mhi_cntrl->state_event,
502 mhi_read_reg_field(mhi_cntrl,
503 mhi_cntrl->regs,
511 mutex_unlock(&mhi_cntrl->pm_mutex);
519 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
525 mhi_event = mhi_cntrl->mhi_event;
526 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
533 mutex_unlock(&mhi_cntrl->pm_mutex);
535 wake_up_all(&mhi_cntrl->state_event);
538 device_for_each_child(mhi_cntrl->cntrl_dev, NULL, mhi_destroy_device);
540 mutex_lock(&mhi_cntrl->pm_mutex);
542 WARN_ON(atomic_read(&mhi_cntrl->dev_wake));
543 WARN_ON(atomic_read(&mhi_cntrl->pending_pkts));
547 mhi_cmd = mhi_cntrl->mhi_cmd;
548 cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt;
558 mhi_event = mhi_cntrl->mhi_event;
559 er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
560 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
575 mhi_ready_state_transition(mhi_cntrl);
578 write_lock_irq(&mhi_cntrl->pm_lock);
579 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_DISABLE);
580 write_unlock_irq(&mhi_cntrl->pm_lock);
588 to_mhi_pm_state_str(mhi_cntrl->pm_state),
589 TO_MHI_STATE_STR(mhi_cntrl->dev_state));
591 mutex_unlock(&mhi_cntrl->pm_mutex);
595 int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
605 spin_lock_irqsave(&mhi_cntrl->transition_lock, flags);
606 list_add_tail(&item->node, &mhi_cntrl->transition_list);
607 spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags);
609 schedule_work(&mhi_cntrl->st_worker);
615 void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl)
617 struct device *dev = &mhi_cntrl->mhi_dev->dev;
620 if (mhi_cntrl->rddm_image) {
625 mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_SYS_ERR);
633 struct mhi_controller *mhi_cntrl = container_of(work,
636 struct device *dev = &mhi_cntrl->mhi_dev->dev;
638 spin_lock_irq(&mhi_cntrl->transition_lock);
639 list_splice_tail_init(&mhi_cntrl->transition_list, &head);
640 spin_unlock_irq(&mhi_cntrl->transition_lock);
649 write_lock_irq(&mhi_cntrl->pm_lock);
650 if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
651 mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
652 write_unlock_irq(&mhi_cntrl->pm_lock);
653 if (MHI_IN_PBL(mhi_cntrl->ee))
654 mhi_fw_load_handler(mhi_cntrl);
657 write_lock_irq(&mhi_cntrl->pm_lock);
658 mhi_cntrl->ee = MHI_EE_SBL;
659 write_unlock_irq(&mhi_cntrl->pm_lock);
665 mhi_create_devices(mhi_cntrl);
668 mhi_pm_mission_mode_transition(mhi_cntrl);
671 mhi_ready_state_transition(mhi_cntrl);
675 (mhi_cntrl, MHI_PM_SYS_ERR_PROCESS);
679 (mhi_cntrl, MHI_PM_SHUTDOWN_PROCESS);
688 int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
691 struct device *dev = &mhi_cntrl->mhi_dev->dev;
695 if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
698 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
702 if (atomic_read(&mhi_cntrl->dev_wake) ||
703 atomic_read(&mhi_cntrl->pending_pkts))
707 read_lock_bh(&mhi_cntrl->pm_lock);
708 mhi_cntrl->wake_get(mhi_cntrl, false);
709 read_unlock_bh(&mhi_cntrl->pm_lock);
711 ret = wait_event_timeout(mhi_cntrl->state_event,
712 mhi_cntrl->dev_state == MHI_STATE_M0 ||
713 mhi_cntrl->dev_state == MHI_STATE_M1 ||
714 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
715 msecs_to_jiffies(mhi_cntrl->timeout_ms));
717 read_lock_bh(&mhi_cntrl->pm_lock);
718 mhi_cntrl->wake_put(mhi_cntrl, false);
719 read_unlock_bh(&mhi_cntrl->pm_lock);
721 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
727 write_lock_irq(&mhi_cntrl->pm_lock);
729 if (atomic_read(&mhi_cntrl->dev_wake) ||
730 atomic_read(&mhi_cntrl->pending_pkts)) {
731 write_unlock_irq(&mhi_cntrl->pm_lock);
736 new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER);
738 write_unlock_irq(&mhi_cntrl->pm_lock);
742 to_mhi_pm_state_str(mhi_cntrl->pm_state));
747 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
748 write_unlock_irq(&mhi_cntrl->pm_lock);
751 ret = wait_event_timeout(mhi_cntrl->state_event,
752 mhi_cntrl->dev_state == MHI_STATE_M3 ||
753 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
754 msecs_to_jiffies(mhi_cntrl->timeout_ms));
756 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
759 TO_MHI_STATE_STR(mhi_cntrl->dev_state),
760 to_mhi_pm_state_str(mhi_cntrl->pm_state));
765 list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
776 int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
779 struct device *dev = &mhi_cntrl->mhi_dev->dev;
784 to_mhi_pm_state_str(mhi_cntrl->pm_state),
785 TO_MHI_STATE_STR(mhi_cntrl->dev_state));
787 if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
790 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
794 list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
801 write_lock_irq(&mhi_cntrl->pm_lock);
802 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_EXIT);
804 write_unlock_irq(&mhi_cntrl->pm_lock);
808 to_mhi_pm_state_str(mhi_cntrl->pm_state));
813 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
814 write_unlock_irq(&mhi_cntrl->pm_lock);
816 ret = wait_event_timeout(mhi_cntrl->state_event,
817 mhi_cntrl->dev_state == MHI_STATE_M0 ||
818 mhi_cntrl->dev_state == MHI_STATE_M2 ||
819 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
820 msecs_to_jiffies(mhi_cntrl->timeout_ms));
822 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
825 TO_MHI_STATE_STR(mhi_cntrl->dev_state),
826 to_mhi_pm_state_str(mhi_cntrl->pm_state));
834 int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl)
839 read_lock_bh(&mhi_cntrl->pm_lock);
840 mhi_cntrl->wake_get(mhi_cntrl, true);
841 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
842 mhi_trigger_resume(mhi_cntrl);
843 read_unlock_bh(&mhi_cntrl->pm_lock);
845 ret = wait_event_timeout(mhi_cntrl->state_event,
846 mhi_cntrl->pm_state == MHI_PM_M0 ||
847 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
848 msecs_to_jiffies(mhi_cntrl->timeout_ms));
850 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
851 read_lock_bh(&mhi_cntrl->pm_lock);
852 mhi_cntrl->wake_put(mhi_cntrl, false);
853 read_unlock_bh(&mhi_cntrl->pm_lock);
861 static void mhi_assert_dev_wake(struct mhi_controller *mhi_cntrl, bool force)
870 spin_lock_irqsave(&mhi_cntrl->wlock, flags);
871 atomic_inc(&mhi_cntrl->dev_wake);
872 if (MHI_WAKE_DB_FORCE_SET_VALID(mhi_cntrl->pm_state) &&
873 !mhi_cntrl->wake_set) {
874 mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
875 mhi_cntrl->wake_set = true;
877 spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
883 if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, 1, 0)))
886 spin_lock_irqsave(&mhi_cntrl->wlock, flags);
887 if ((atomic_inc_return(&mhi_cntrl->dev_wake) == 1) &&
888 MHI_WAKE_DB_SET_VALID(mhi_cntrl->pm_state) &&
889 !mhi_cntrl->wake_set) {
890 mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
891 mhi_cntrl->wake_set = true;
893 spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
898 static void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl,
907 if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1)))
910 spin_lock_irqsave(&mhi_cntrl->wlock, flags);
911 if ((atomic_dec_return(&mhi_cntrl->dev_wake) == 0) &&
912 MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl->pm_state) && !override &&
913 mhi_cntrl->wake_set) {
914 mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 0);
915 mhi_cntrl->wake_set = false;
917 spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
920 int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
925 struct device *dev = &mhi_cntrl->mhi_dev->dev;
931 if (mhi_cntrl->nr_irqs < 1)
935 if (!mhi_cntrl->wake_get || !mhi_cntrl->wake_put ||
936 !mhi_cntrl->wake_toggle) {
937 mhi_cntrl->wake_get = mhi_assert_dev_wake;
938 mhi_cntrl->wake_put = mhi_deassert_dev_wake;
939 mhi_cntrl->wake_toggle = (mhi_cntrl->db_access & MHI_PM_M2) ?
943 mutex_lock(&mhi_cntrl->pm_mutex);
944 mhi_cntrl->pm_state = MHI_PM_DISABLE;
946 if (!mhi_cntrl->pre_init) {
948 ret = mhi_init_dev_ctxt(mhi_cntrl);
953 ret = mhi_init_irq_setup(mhi_cntrl);
958 write_lock_irq(&mhi_cntrl->pm_lock);
959 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIOFF, &val);
961 write_unlock_irq(&mhi_cntrl->pm_lock);
965 mhi_cntrl->bhi = mhi_cntrl->regs + val;
968 if (mhi_cntrl->fbc_download) {
969 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF, &val);
971 write_unlock_irq(&mhi_cntrl->pm_lock);
976 mhi_cntrl->bhie = mhi_cntrl->regs + val;
979 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
980 mhi_cntrl->pm_state = MHI_PM_POR;
981 mhi_cntrl->ee = MHI_EE_MAX;
982 current_ee = mhi_get_exec_env(mhi_cntrl);
983 write_unlock_irq(&mhi_cntrl->pm_lock);
992 state = mhi_get_mhi_state(mhi_cntrl);
994 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
995 ret = wait_event_timeout(mhi_cntrl->state_event,
996 MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state) ||
997 mhi_read_reg_field(mhi_cntrl,
998 mhi_cntrl->regs,
1004 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1015 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
1022 mhi_queue_state_transition(mhi_cntrl, next_state);
1024 mutex_unlock(&mhi_cntrl->pm_mutex);
1031 mhi_deinit_free_irq(mhi_cntrl);
1034 if (!mhi_cntrl->pre_init)
1035 mhi_deinit_dev_ctxt(mhi_cntrl);
1038 mutex_unlock(&mhi_cntrl->pm_mutex);
1044 void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
1047 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1051 mutex_lock(&mhi_cntrl->pm_mutex);
1052 write_lock_irq(&mhi_cntrl->pm_lock);
1053 cur_state = mhi_tryset_pm_state(mhi_cntrl,
1055 write_unlock_irq(&mhi_cntrl->pm_lock);
1056 mutex_unlock(&mhi_cntrl->pm_mutex);
1060 to_mhi_pm_state_str(mhi_cntrl->pm_state));
1063 mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_DISABLE);
1066 flush_work(&mhi_cntrl->st_worker);
1068 mhi_deinit_free_irq(mhi_cntrl);
1070 if (!mhi_cntrl->pre_init) {
1072 if (mhi_cntrl->fbc_image) {
1073 mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
1074 mhi_cntrl->fbc_image = NULL;
1076 mhi_deinit_dev_ctxt(mhi_cntrl);
1081 int mhi_sync_power_up(struct mhi_controller *mhi_cntrl)
1083 int ret = mhi_async_power_up(mhi_cntrl);
1088 wait_event_timeout(mhi_cntrl->state_event,
1089 MHI_IN_MISSION_MODE(mhi_cntrl->ee) ||
1090 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
1091 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1093 ret = (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT;
1095 mhi_power_down(mhi_cntrl, false);
1101 int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl)
1103 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1107 if (mhi_cntrl->ee == MHI_EE_RDDM)
1111 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
1114 ret = wait_event_timeout(mhi_cntrl->state_event,
1115 mhi_cntrl->ee == MHI_EE_RDDM,
1116 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1125 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1128 read_lock_bh(&mhi_cntrl->pm_lock);
1129 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
1130 mhi_trigger_resume(mhi_cntrl);
1132 mhi_cntrl->wake_get(mhi_cntrl, true);
1133 read_unlock_bh(&mhi_cntrl->pm_lock);
1139 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1142 ret = __mhi_device_get_sync(mhi_cntrl);
1152 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1155 read_lock_bh(&mhi_cntrl->pm_lock);
1156 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
1157 mhi_trigger_resume(mhi_cntrl);
1159 mhi_cntrl->wake_put(mhi_cntrl, false);
1160 read_unlock_bh(&mhi_cntrl->pm_lock);