Lines Matching defs:mhi_cntrl
19 int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
22 return mhi_cntrl->read_reg(mhi_cntrl, base + offset, out);
25 int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
32 ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
41 int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl,
46 u32 out, retry = (mhi_cntrl->timeout_ms * 1000) / delayus;
49 ret = mhi_read_reg_field(mhi_cntrl, base, offset, mask, &out);
62 void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
65 mhi_cntrl->write_reg(mhi_cntrl, base + offset, val);
68 int __must_check mhi_write_reg_field(struct mhi_controller *mhi_cntrl,
75 ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
81 mhi_write_reg(mhi_cntrl, base, offset, tmp);
86 void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr,
89 mhi_write_reg(mhi_cntrl, db_addr, 4, upper_32_bits(db_val));
90 mhi_write_reg(mhi_cntrl, db_addr, 0, lower_32_bits(db_val));
93 void mhi_db_brstmode(struct mhi_controller *mhi_cntrl,
100 mhi_write_db(mhi_cntrl, db_addr, db_val);
105 void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl,
111 mhi_write_db(mhi_cntrl, db_addr, db_val);
118 mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg,
122 void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd)
129 mhi_write_db(mhi_cntrl, ring->db_addr, db);
132 void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
147 mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg,
151 enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl)
154 int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec);
160 enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl)
163 int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
169 void mhi_soc_reset(struct mhi_controller *mhi_cntrl)
171 if (mhi_cntrl->reset) {
172 mhi_cntrl->reset(mhi_cntrl);
177 mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, MHI_SOC_RESET_REQ_OFFSET,
182 int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
185 buf_info->p_addr = dma_map_single(mhi_cntrl->cntrl_dev,
188 if (dma_mapping_error(mhi_cntrl->cntrl_dev, buf_info->p_addr))
194 int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl,
197 void *buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, buf_info->len,
211 void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl,
214 dma_unmap_single(mhi_cntrl->cntrl_dev, buf_info->p_addr, buf_info->len,
218 void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl,
224 dma_free_coherent(mhi_cntrl->cntrl_dev, buf_info->len,
228 static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl,
249 static void mhi_add_ring_element(struct mhi_controller *mhi_cntrl,
259 static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl,
279 struct mhi_controller *mhi_cntrl;
286 mhi_cntrl = mhi_dev->mhi_cntrl;
324 dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n",
337 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
342 return get_nr_avail_ring_elements(mhi_cntrl, tre_ring);
361 void mhi_create_devices(struct mhi_controller *mhi_cntrl)
365 struct device *dev = &mhi_cntrl->mhi_dev->dev;
368 mhi_chan = mhi_cntrl->mhi_chan;
369 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
371 !(mhi_chan->ee_mask & BIT(mhi_cntrl->ee)))
373 mhi_dev = mhi_alloc_device(mhi_cntrl);
398 if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) {
417 dev_name(&mhi_cntrl->mhi_dev->dev),
433 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
444 if (!mhi_cntrl->mhi_ctxt) {
445 dev_dbg(&mhi_cntrl->mhi_dev->dev,
450 er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
454 dev_err(&mhi_cntrl->mhi_dev->dev,
481 struct mhi_controller *mhi_cntrl = priv;
482 struct device *dev = &mhi_cntrl->mhi_dev->dev;
487 write_lock_irq(&mhi_cntrl->pm_lock);
488 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
489 write_unlock_irq(&mhi_cntrl->pm_lock);
493 state = mhi_get_mhi_state(mhi_cntrl);
494 ee = mhi_get_exec_env(mhi_cntrl);
496 TO_MHI_EXEC_STR(mhi_cntrl->ee),
497 mhi_state_str(mhi_cntrl->dev_state),
502 pm_state = mhi_tryset_pm_state(mhi_cntrl,
505 write_unlock_irq(&mhi_cntrl->pm_lock);
513 if (mhi_cntrl->rddm_image && mhi_is_active(mhi_cntrl)) {
514 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
515 mhi_cntrl->ee = ee;
516 wake_up_all(&mhi_cntrl->state_event);
522 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
523 mhi_cntrl->ee = ee;
524 wake_up_all(&mhi_cntrl->state_event);
525 mhi_pm_sys_err_handler(mhi_cntrl);
528 wake_up_all(&mhi_cntrl->state_event);
529 mhi_pm_sys_err_handler(mhi_cntrl);
540 struct mhi_controller *mhi_cntrl = dev;
543 wake_up_all(&mhi_cntrl->state_event);
548 static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl,
568 static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
573 struct device *dev = &mhi_cntrl->mhi_dev->dev;
611 dev_err(&mhi_cntrl->mhi_dev->dev,
635 mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
642 mhi_del_ring_element(mhi_cntrl, buf_ring);
643 mhi_del_ring_element(mhi_cntrl, tre_ring);
652 atomic_dec(&mhi_cntrl->pending_pkts);
654 mhi_cntrl->runtime_put(mhi_cntrl);
684 read_lock_irqsave(&mhi_cntrl->pm_lock, pm_lock_flags);
686 MHI_DB_ACCESS_VALID(mhi_cntrl)) {
687 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
689 read_unlock_irqrestore(&mhi_cntrl->pm_lock, pm_lock_flags);
707 static int parse_rsc_event(struct mhi_controller *mhi_cntrl,
762 mhi_del_ring_element(mhi_cntrl, tre_ring);
771 static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl,
775 struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
782 dev_err(&mhi_cntrl->mhi_dev->dev,
791 if (chan < mhi_cntrl->max_chan &&
792 mhi_cntrl->mhi_chan[chan].configured) {
793 mhi_chan = &mhi_cntrl->mhi_chan[chan];
799 dev_err(&mhi_cntrl->mhi_dev->dev,
803 mhi_del_ring_element(mhi_cntrl, mhi_ring);
806 int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
813 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
815 struct device *dev = &mhi_cntrl->mhi_dev->dev;
825 if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
829 dev_err(&mhi_cntrl->mhi_dev->dev,
845 link_info = &mhi_cntrl->mhi_link_info;
846 write_lock_irq(&mhi_cntrl->pm_lock);
851 write_unlock_irq(&mhi_cntrl->pm_lock);
853 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_BW_REQ);
867 mhi_pm_m0_transition(mhi_cntrl);
870 mhi_pm_m1_transition(mhi_cntrl);
873 mhi_pm_m3_transition(mhi_cntrl);
880 write_lock_irq(&mhi_cntrl->pm_lock);
881 pm_state = mhi_tryset_pm_state(mhi_cntrl,
883 write_unlock_irq(&mhi_cntrl->pm_lock);
885 mhi_pm_sys_err_handler(mhi_cntrl);
896 mhi_process_cmd_completion(mhi_cntrl, local_rp);
917 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
918 write_lock_irq(&mhi_cntrl->pm_lock);
919 mhi_cntrl->ee = event;
920 write_unlock_irq(&mhi_cntrl->pm_lock);
921 wake_up_all(&mhi_cntrl->state_event);
928 mhi_queue_state_transition(mhi_cntrl, st);
935 WARN_ON(chan >= mhi_cntrl->max_chan);
941 if (chan < mhi_cntrl->max_chan) {
942 mhi_chan = &mhi_cntrl->mhi_chan[chan];
945 parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
953 mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
958 dev_err(&mhi_cntrl->mhi_dev->dev,
967 read_lock_bh(&mhi_cntrl->pm_lock);
970 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)) && count)
972 read_unlock_bh(&mhi_cntrl->pm_lock);
977 int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
984 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
990 if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
994 dev_err(&mhi_cntrl->mhi_dev->dev,
1007 WARN_ON(chan >= mhi_cntrl->max_chan);
1013 if (chan < mhi_cntrl->max_chan &&
1014 mhi_cntrl->mhi_chan[chan].configured) {
1015 mhi_chan = &mhi_cntrl->mhi_chan[chan];
1018 parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
1021 parse_rsc_event(mhi_cntrl, local_rp, mhi_chan);
1026 mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
1031 dev_err(&mhi_cntrl->mhi_dev->dev,
1039 read_lock_bh(&mhi_cntrl->pm_lock);
1042 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)) && count)
1044 read_unlock_bh(&mhi_cntrl->pm_lock);
1052 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
1056 mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
1063 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
1064 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1074 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
1080 mhi_trigger_resume(mhi_cntrl);
1086 ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
1093 write_lock_irq(&mhi_cntrl->pm_lock);
1094 state = mhi_get_mhi_state(mhi_cntrl);
1097 pm_state = mhi_tryset_pm_state(mhi_cntrl,
1100 write_unlock_irq(&mhi_cntrl->pm_lock);
1102 mhi_pm_sys_err_handler(mhi_cntrl);
1106 static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,
1120 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1127 if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
1130 ret = mhi_is_ring_full(mhi_cntrl, tre_ring);
1134 ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags);
1138 read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
1144 mhi_cntrl->runtime_get(mhi_cntrl);
1147 mhi_cntrl->wake_toggle(mhi_cntrl);
1150 atomic_inc(&mhi_cntrl->pending_pkts);
1152 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
1153 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
1156 mhi_cntrl->runtime_put(mhi_cntrl);
1158 read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
1200 int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
1228 ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
1246 mhi_add_ring_element(mhi_cntrl, tre_ring);
1247 mhi_add_ring_element(mhi_cntrl, buf_ring);
1269 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1274 return mhi_is_ring_full(mhi_cntrl, tre_ring);
1278 int mhi_send_cmd(struct mhi_controller *mhi_cntrl,
1283 struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
1285 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1292 if (!get_nr_avail_ring_elements(mhi_cntrl, ring)) {
1321 mhi_add_ring_element(mhi_cntrl, ring);
1322 read_lock_bh(&mhi_cntrl->pm_lock);
1323 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
1324 mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
1325 read_unlock_bh(&mhi_cntrl->pm_lock);
1331 static int mhi_update_channel_state(struct mhi_controller *mhi_cntrl,
1376 ret = mhi_device_get_sync(mhi_cntrl->mhi_dev);
1379 mhi_cntrl->runtime_get(mhi_cntrl);
1382 ret = mhi_send_cmd(mhi_cntrl, mhi_chan, cmd);
1390 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1412 mhi_cntrl->runtime_put(mhi_cntrl);
1413 mhi_device_put(mhi_cntrl->mhi_dev);
1418 static void mhi_unprepare_channel(struct mhi_controller *mhi_cntrl,
1426 if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
1428 TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask);
1433 ret = mhi_update_channel_state(mhi_cntrl, mhi_chan,
1445 mhi_reset_chan(mhi_cntrl, mhi_chan);
1446 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1453 int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
1459 if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
1461 TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask);
1469 ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan);
1474 ret = mhi_update_channel_state(mhi_cntrl, mhi_chan,
1484 int nr_el = get_nr_avail_ring_elements(mhi_cntrl,
1486 size_t len = mhi_cntrl->buffer_len;
1502 ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &info, MHI_EOT);
1509 read_lock_bh(&mhi_cntrl->pm_lock);
1510 if (MHI_DB_ACCESS_VALID(mhi_cntrl)) {
1512 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
1515 read_unlock_bh(&mhi_cntrl->pm_lock);
1524 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1533 mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1538 static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
1546 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1559 dev_err(&mhi_cntrl->mhi_dev->dev,
1581 static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl,
1596 atomic_dec(&mhi_cntrl->pending_pkts);
1598 mhi_cntrl->runtime_put(mhi_cntrl);
1602 mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
1604 mhi_del_ring_element(mhi_cntrl, buf_ring);
1605 mhi_del_ring_element(mhi_cntrl, tre_ring);
1616 void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan)
1626 read_lock_bh(&mhi_cntrl->pm_lock);
1627 mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
1628 er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index];
1630 mhi_mark_stale_events(mhi_cntrl, mhi_event, er_ctxt, chan);
1632 mhi_reset_data_chan(mhi_cntrl, mhi_chan);
1634 read_unlock_bh(&mhi_cntrl->pm_lock);
1640 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1648 ret = mhi_prepare_channel(mhi_cntrl, mhi_chan, flags);
1661 mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1681 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1690 mhi_unprepare_channel(mhi_cntrl, mhi_chan);