Lines Matching defs:mhi_cntrl
18 int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
21 return mhi_cntrl->read_reg(mhi_cntrl, base + offset, out);
24 int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
31 ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
40 void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
43 mhi_cntrl->write_reg(mhi_cntrl, base + offset, val);
46 void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base,
52 ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
58 mhi_write_reg(mhi_cntrl, base, offset, tmp);
61 void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr,
64 mhi_write_reg(mhi_cntrl, db_addr, 4, upper_32_bits(db_val));
65 mhi_write_reg(mhi_cntrl, db_addr, 0, lower_32_bits(db_val));
68 void mhi_db_brstmode(struct mhi_controller *mhi_cntrl,
75 mhi_write_db(mhi_cntrl, db_addr, db_val);
80 void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl,
86 mhi_write_db(mhi_cntrl, db_addr, db_val);
93 mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg,
97 void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd)
104 mhi_write_db(mhi_cntrl, ring->db_addr, db);
107 void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
115 mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg,
119 enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl)
122 int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec);
127 enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl)
130 int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
136 int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
139 buf_info->p_addr = dma_map_single(mhi_cntrl->cntrl_dev,
142 if (dma_mapping_error(mhi_cntrl->cntrl_dev, buf_info->p_addr))
148 int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl,
151 void *buf = mhi_alloc_coherent(mhi_cntrl, buf_info->len,
165 void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl,
168 dma_unmap_single(mhi_cntrl->cntrl_dev, buf_info->p_addr, buf_info->len,
172 void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl,
178 mhi_free_coherent(mhi_cntrl, buf_info->len, buf_info->bb_addr,
182 static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl,
203 static void mhi_add_ring_element(struct mhi_controller *mhi_cntrl,
213 static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl,
232 struct mhi_controller *mhi_cntrl;
239 mhi_cntrl = mhi_dev->mhi_cntrl;
277 dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n",
302 void mhi_create_devices(struct mhi_controller *mhi_cntrl)
306 struct device *dev = &mhi_cntrl->mhi_dev->dev;
309 mhi_chan = mhi_cntrl->mhi_chan;
310 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
312 !(mhi_chan->ee_mask & BIT(mhi_cntrl->ee)))
314 mhi_dev = mhi_alloc_device(mhi_cntrl);
339 if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) {
358 dev_name(mhi_cntrl->cntrl_dev),
374 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
376 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
382 dev_err(&mhi_cntrl->mhi_dev->dev,
409 struct mhi_controller *mhi_cntrl = priv;
410 struct device *dev = &mhi_cntrl->mhi_dev->dev;
415 write_lock_irq(&mhi_cntrl->pm_lock);
416 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
417 write_unlock_irq(&mhi_cntrl->pm_lock);
421 state = mhi_get_mhi_state(mhi_cntrl);
422 ee = mhi_cntrl->ee;
423 mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
425 TO_MHI_EXEC_STR(mhi_cntrl->ee), TO_MHI_EXEC_STR(ee),
430 pm_state = mhi_tryset_pm_state(mhi_cntrl,
433 write_unlock_irq(&mhi_cntrl->pm_lock);
436 if (mhi_cntrl->rddm_image) {
437 if (mhi_cntrl->ee == MHI_EE_RDDM && mhi_cntrl->ee != ee) {
438 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
439 wake_up_all(&mhi_cntrl->state_event);
445 wake_up_all(&mhi_cntrl->state_event);
449 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
451 mhi_pm_sys_err_handler(mhi_cntrl);
461 struct mhi_controller *mhi_cntrl = dev;
464 wake_up_all(&mhi_cntrl->state_event);
469 static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl,
494 static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
499 struct device *dev = &mhi_cntrl->mhi_dev->dev;
537 dev_err(&mhi_cntrl->mhi_dev->dev,
561 mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
568 mhi_del_ring_element(mhi_cntrl, buf_ring);
569 mhi_del_ring_element(mhi_cntrl, tre_ring);
578 atomic_dec(&mhi_cntrl->pending_pkts);
607 read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
609 MHI_DB_ACCESS_VALID(mhi_cntrl)) {
610 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
612 read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
630 static int parse_rsc_event(struct mhi_controller *mhi_cntrl,
685 mhi_del_ring_element(mhi_cntrl, tre_ring);
694 static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl,
698 struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
705 dev_err(&mhi_cntrl->mhi_dev->dev,
714 if (chan < mhi_cntrl->max_chan &&
715 mhi_cntrl->mhi_chan[chan].configured) {
716 mhi_chan = &mhi_cntrl->mhi_chan[chan];
722 dev_err(&mhi_cntrl->mhi_dev->dev,
726 mhi_del_ring_element(mhi_cntrl, mhi_ring);
729 int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
736 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
738 struct device *dev = &mhi_cntrl->mhi_dev->dev;
748 if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
752 dev_err(&mhi_cntrl->mhi_dev->dev,
768 link_info = &mhi_cntrl->mhi_link_info;
769 write_lock_irq(&mhi_cntrl->pm_lock);
774 write_unlock_irq(&mhi_cntrl->pm_lock);
776 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_BW_REQ);
790 mhi_pm_m0_transition(mhi_cntrl);
793 mhi_pm_m1_transition(mhi_cntrl);
796 mhi_pm_m3_transition(mhi_cntrl);
803 if (mhi_cntrl->ee == MHI_EE_RDDM ||
804 mhi_cntrl->rddm_image)
808 write_lock_irq(&mhi_cntrl->pm_lock);
809 new_state = mhi_tryset_pm_state(mhi_cntrl,
811 write_unlock_irq(&mhi_cntrl->pm_lock);
813 mhi_pm_sys_err_handler(mhi_cntrl);
824 mhi_process_cmd_completion(mhi_cntrl, local_rp);
842 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
843 write_lock_irq(&mhi_cntrl->pm_lock);
844 mhi_cntrl->ee = event;
845 write_unlock_irq(&mhi_cntrl->pm_lock);
846 wake_up_all(&mhi_cntrl->state_event);
853 mhi_queue_state_transition(mhi_cntrl, st);
860 WARN_ON(chan >= mhi_cntrl->max_chan);
866 if (chan < mhi_cntrl->max_chan) {
867 mhi_chan = &mhi_cntrl->mhi_chan[chan];
870 parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
879 mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
884 dev_err(&mhi_cntrl->mhi_dev->dev,
893 read_lock_bh(&mhi_cntrl->pm_lock);
894 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
896 read_unlock_bh(&mhi_cntrl->pm_lock);
901 int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
908 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
914 if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
918 dev_err(&mhi_cntrl->mhi_dev->dev,
931 WARN_ON(chan >= mhi_cntrl->max_chan);
937 if (chan < mhi_cntrl->max_chan &&
938 mhi_cntrl->mhi_chan[chan].configured) {
939 mhi_chan = &mhi_cntrl->mhi_chan[chan];
942 parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
945 parse_rsc_event(mhi_cntrl, local_rp, mhi_chan);
950 mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
955 dev_err(&mhi_cntrl->mhi_dev->dev,
963 read_lock_bh(&mhi_cntrl->pm_lock);
964 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
966 read_unlock_bh(&mhi_cntrl->pm_lock);
974 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
978 mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
985 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
986 struct device *dev = &mhi_cntrl->mhi_dev->dev;
996 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
1002 mhi_trigger_resume(mhi_cntrl);
1008 ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
1015 write_lock_irq(&mhi_cntrl->pm_lock);
1016 state = mhi_get_mhi_state(mhi_cntrl);
1019 pm_state = mhi_tryset_pm_state(mhi_cntrl,
1022 write_unlock_irq(&mhi_cntrl->pm_lock);
1024 mhi_pm_sys_err_handler(mhi_cntrl);
1028 static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,
1042 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1053 if (mhi_is_ring_full(mhi_cntrl, tre_ring))
1056 read_lock_bh(&mhi_cntrl->pm_lock);
1057 if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
1058 read_unlock_bh(&mhi_cntrl->pm_lock);
1063 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
1064 mhi_trigger_resume(mhi_cntrl);
1067 mhi_cntrl->wake_toggle(mhi_cntrl);
1073 ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
1075 read_unlock_bh(&mhi_cntrl->pm_lock);
1080 atomic_inc(&mhi_cntrl->pending_pkts);
1082 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
1084 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
1088 read_unlock_bh(&mhi_cntrl->pm_lock);
1097 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1100 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1109 if (mhi_is_ring_full(mhi_cntrl, tre_ring))
1112 read_lock_bh(&mhi_cntrl->pm_lock);
1113 if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
1115 to_mhi_pm_state_str(mhi_cntrl->pm_state));
1116 read_unlock_bh(&mhi_cntrl->pm_lock);
1122 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
1123 mhi_trigger_resume(mhi_cntrl);
1126 mhi_cntrl->wake_toggle(mhi_cntrl);
1133 ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
1135 read_unlock_bh(&mhi_cntrl->pm_lock);
1140 atomic_inc(&mhi_cntrl->pending_pkts);
1142 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
1144 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
1148 read_unlock_bh(&mhi_cntrl->pm_lock);
1154 int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
1179 ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
1195 mhi_add_ring_element(mhi_cntrl, tre_ring);
1196 mhi_add_ring_element(mhi_cntrl, buf_ring);
1204 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1217 if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
1221 if (mhi_is_ring_full(mhi_cntrl, tre_ring))
1228 ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
1232 read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
1235 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
1236 mhi_trigger_resume(mhi_cntrl);
1239 mhi_cntrl->wake_toggle(mhi_cntrl);
1242 atomic_inc(&mhi_cntrl->pending_pkts);
1244 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
1248 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
1252 read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
1258 int mhi_send_cmd(struct mhi_controller *mhi_cntrl,
1263 struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
1265 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1272 if (!get_nr_avail_ring_elements(mhi_cntrl, ring)) {
1296 mhi_add_ring_element(mhi_cntrl, ring);
1297 read_lock_bh(&mhi_cntrl->pm_lock);
1298 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
1299 mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
1300 read_unlock_bh(&mhi_cntrl->pm_lock);
1306 static void __mhi_unprepare_channel(struct mhi_controller *mhi_cntrl,
1310 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1327 read_lock_bh(&mhi_cntrl->pm_lock);
1328 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
1329 read_unlock_bh(&mhi_cntrl->pm_lock);
1333 mhi_cntrl->wake_toggle(mhi_cntrl);
1334 read_unlock_bh(&mhi_cntrl->pm_lock);
1336 mhi_cntrl->runtime_get(mhi_cntrl);
1337 mhi_cntrl->runtime_put(mhi_cntrl);
1338 ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_RESET_CHAN);
1344 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1351 mhi_reset_chan(mhi_cntrl, mhi_chan);
1352 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1358 int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
1362 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1366 if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
1369 TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask,
1386 ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan);
1392 read_lock_bh(&mhi_cntrl->pm_lock);
1393 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
1394 read_unlock_bh(&mhi_cntrl->pm_lock);
1399 mhi_cntrl->wake_toggle(mhi_cntrl);
1400 read_unlock_bh(&mhi_cntrl->pm_lock);
1401 mhi_cntrl->runtime_get(mhi_cntrl);
1402 mhi_cntrl->runtime_put(mhi_cntrl);
1404 ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_START_CHAN);
1409 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1421 int nr_el = get_nr_avail_ring_elements(mhi_cntrl,
1423 size_t len = mhi_cntrl->buffer_len;
1438 ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &info, MHI_EOT);
1445 read_lock_bh(&mhi_cntrl->pm_lock);
1446 if (MHI_DB_ACCESS_VALID(mhi_cntrl)) {
1448 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
1451 read_unlock_bh(&mhi_cntrl->pm_lock);
1463 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1472 __mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1477 static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
1485 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1498 dev_err(&mhi_cntrl->mhi_dev->dev,
1520 static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl,
1535 atomic_dec(&mhi_cntrl->pending_pkts);
1538 mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
1540 mhi_del_ring_element(mhi_cntrl, buf_ring);
1541 mhi_del_ring_element(mhi_cntrl, tre_ring);
1552 void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan)
1562 read_lock_bh(&mhi_cntrl->pm_lock);
1563 mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
1564 er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index];
1566 mhi_mark_stale_events(mhi_cntrl, mhi_event, er_ctxt, chan);
1568 mhi_reset_data_chan(mhi_cntrl, mhi_chan);
1570 read_unlock_bh(&mhi_cntrl->pm_lock);
1577 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1585 ret = mhi_prepare_channel(mhi_cntrl, mhi_chan);
1598 __mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1607 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1616 __mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1623 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1625 struct mhi_event *mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
1629 ret = mhi_event->process_event(mhi_cntrl, mhi_event, budget);