Lines Matching defs:mhi_cntrl

84 	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
87 mhi_cntrl->serial_number);
96 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
99 for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++)
102 mhi_cntrl->oem_pk_hash[i]);
116 static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl,
121 ring->pre_aligned = mhi_alloc_coherent(mhi_cntrl, ring->alloc_size,
132 void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl)
135 struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
137 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
141 free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
144 free_irq(mhi_cntrl->irq[0], mhi_cntrl);
147 int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
149 struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
150 struct device *dev = &mhi_cntrl->mhi_dev->dev;
154 ret = request_threaded_irq(mhi_cntrl->irq[0], mhi_intvec_handler,
157 "bhi", mhi_cntrl);
161 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
165 if (mhi_event->irq >= mhi_cntrl->nr_irqs) {
172 ret = request_irq(mhi_cntrl->irq[mhi_event->irq],
178 mhi_cntrl->irq[mhi_event->irq], i);
190 free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
192 free_irq(mhi_cntrl->irq[0], mhi_cntrl);
197 void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl)
200 struct mhi_ctxt *mhi_ctxt = mhi_cntrl->mhi_ctxt;
205 mhi_cmd = mhi_cntrl->mhi_cmd;
208 mhi_free_coherent(mhi_cntrl, ring->alloc_size,
214 mhi_free_coherent(mhi_cntrl,
218 mhi_event = mhi_cntrl->mhi_event;
219 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
224 mhi_free_coherent(mhi_cntrl, ring->alloc_size,
230 mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->er_ctxt) *
231 mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
234 mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->chan_ctxt) *
235 mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
239 mhi_cntrl->mhi_ctxt = NULL;
242 int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
254 atomic_set(&mhi_cntrl->dev_wake, 0);
255 atomic_set(&mhi_cntrl->pending_pkts, 0);
262 mhi_ctxt->chan_ctxt = mhi_alloc_coherent(mhi_cntrl,
264 mhi_cntrl->max_chan,
270 mhi_chan = mhi_cntrl->mhi_chan;
272 for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) {
294 mhi_ctxt->er_ctxt = mhi_alloc_coherent(mhi_cntrl,
296 mhi_cntrl->total_ev_rings,
303 mhi_event = mhi_cntrl->mhi_event;
304 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
324 ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
341 mhi_ctxt->cmd_ctxt = mhi_alloc_coherent(mhi_cntrl,
349 mhi_cmd = mhi_cntrl->mhi_cmd;
357 ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
368 mhi_cntrl->mhi_ctxt = mhi_ctxt;
376 mhi_free_coherent(mhi_cntrl, ring->alloc_size,
379 mhi_free_coherent(mhi_cntrl,
382 i = mhi_cntrl->total_ev_rings;
383 mhi_event = mhi_cntrl->mhi_event + i;
392 mhi_free_coherent(mhi_cntrl, ring->alloc_size,
395 mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->er_ctxt) *
396 mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
400 mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->chan_ctxt) *
401 mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
410 int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
416 void __iomem *base = mhi_cntrl->regs;
417 struct device *dev = &mhi_cntrl->mhi_dev->dev;
426 upper_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
430 lower_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
434 upper_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
438 lower_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
442 upper_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
446 lower_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
450 mhi_cntrl->total_ev_rings,
454 mhi_cntrl->hw_ev_rings,
458 upper_32_bits(mhi_cntrl->iova_start),
462 lower_32_bits(mhi_cntrl->iova_start),
466 upper_32_bits(mhi_cntrl->iova_start),
470 lower_32_bits(mhi_cntrl->iova_start),
474 upper_32_bits(mhi_cntrl->iova_stop),
478 lower_32_bits(mhi_cntrl->iova_stop),
482 upper_32_bits(mhi_cntrl->iova_stop),
486 lower_32_bits(mhi_cntrl->iova_stop),
494 ret = mhi_read_reg_field(mhi_cntrl, base, CHDBOFF, CHDBOFF_CHDBOFF_MASK,
501 if (val >= mhi_cntrl->reg_len - (8 * MHI_DEV_WAKE_DB)) {
503 val, mhi_cntrl->reg_len - (8 * MHI_DEV_WAKE_DB));
508 mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB);
509 mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 4, 0);
510 mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 0, 0);
511 mhi_cntrl->wake_set = false;
514 mhi_chan = mhi_cntrl->mhi_chan;
515 for (i = 0; i < mhi_cntrl->max_chan; i++, val += 8, mhi_chan++)
519 ret = mhi_read_reg_field(mhi_cntrl, base, ERDBOFF, ERDBOFF_ERDBOFF_MASK,
526 if (val >= mhi_cntrl->reg_len - (8 * mhi_cntrl->total_ev_rings)) {
528 val, mhi_cntrl->reg_len - (8 * mhi_cntrl->total_ev_rings));
533 mhi_event = mhi_cntrl->mhi_event;
534 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) {
542 mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING].ring.db_addr = base + CRDB_LOWER;
546 mhi_write_reg_field(mhi_cntrl, base, reg_info[i].offset,
553 void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
563 chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
565 mhi_free_coherent(mhi_cntrl, tre_ring->alloc_size,
585 int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
598 chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
599 ret = mhi_alloc_aligned_ring(mhi_cntrl, tre_ring, tre_ring->len);
608 mhi_free_coherent(mhi_cntrl, tre_ring->alloc_size,
633 static int parse_ev_cfg(struct mhi_controller *mhi_cntrl,
638 struct device *dev = mhi_cntrl->cntrl_dev;
642 mhi_cntrl->total_ev_rings = num;
643 mhi_cntrl->mhi_event = kcalloc(num, sizeof(*mhi_cntrl->mhi_event),
645 if (!mhi_cntrl->mhi_event)
649 mhi_event = mhi_cntrl->mhi_event;
661 if (mhi_event->chan >= mhi_cntrl->max_chan) {
668 &mhi_cntrl->mhi_chan[mhi_event->chan];
699 mhi_cntrl->hw_ev_rings++;
701 mhi_cntrl->sw_ev_rings++;
712 kfree(mhi_cntrl->mhi_event);
716 static int parse_ch_cfg(struct mhi_controller *mhi_cntrl,
720 struct device *dev = mhi_cntrl->cntrl_dev;
724 mhi_cntrl->max_chan = config->max_channels;
731 mhi_cntrl->mhi_chan = vzalloc(mhi_cntrl->max_chan *
732 sizeof(*mhi_cntrl->mhi_chan));
733 if (!mhi_cntrl->mhi_chan)
736 INIT_LIST_HEAD(&mhi_cntrl->lpm_chans);
745 if (chan >= mhi_cntrl->max_chan) {
750 mhi_chan = &mhi_cntrl->mhi_chan[chan];
823 list_add_tail(&mhi_chan->node, &mhi_cntrl->lpm_chans);
829 vfree(mhi_cntrl->mhi_chan);
834 static int parse_config(struct mhi_controller *mhi_cntrl,
840 ret = parse_ch_cfg(mhi_cntrl, config);
845 ret = parse_ev_cfg(mhi_cntrl, config);
849 mhi_cntrl->timeout_ms = config->timeout_ms;
850 if (!mhi_cntrl->timeout_ms)
851 mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS;
853 mhi_cntrl->bounce_buf = config->use_bounce_buf;
854 mhi_cntrl->buffer_len = config->buf_len;
855 if (!mhi_cntrl->buffer_len)
856 mhi_cntrl->buffer_len = MHI_MAX_MTU;
859 mhi_cntrl->db_access = MHI_PM_M0 | MHI_PM_M2;
861 mhi_cntrl->db_access &= ~MHI_PM_M2;
866 vfree(mhi_cntrl->mhi_chan);
871 int mhi_register_controller(struct mhi_controller *mhi_cntrl,
881 if (!mhi_cntrl)
884 if (!mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put ||
885 !mhi_cntrl->status_cb || !mhi_cntrl->read_reg ||
886 !mhi_cntrl->write_reg)
889 ret = parse_config(mhi_cntrl, config);
893 mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS,
894 sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL);
895 if (!mhi_cntrl->mhi_cmd) {
900 INIT_LIST_HEAD(&mhi_cntrl->transition_list);
901 mutex_init(&mhi_cntrl->pm_mutex);
902 rwlock_init(&mhi_cntrl->pm_lock);
903 spin_lock_init(&mhi_cntrl->transition_lock);
904 spin_lock_init(&mhi_cntrl->wlock);
905 INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
906 init_waitqueue_head(&mhi_cntrl->state_event);
908 mhi_cmd = mhi_cntrl->mhi_cmd;
912 mhi_event = mhi_cntrl->mhi_event;
913 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
918 mhi_event->mhi_cntrl = mhi_cntrl;
928 mhi_chan = mhi_cntrl->mhi_chan;
929 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
935 mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
939 if (mhi_cntrl->bounce_buf) {
940 mhi_cntrl->map_single = mhi_map_single_use_bb;
941 mhi_cntrl->unmap_single = mhi_unmap_single_use_bb;
943 mhi_cntrl->map_single = mhi_map_single_no_bb;
944 mhi_cntrl->unmap_single = mhi_unmap_single_no_bb;
948 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs,
953 mhi_cntrl->family_number = (soc_info & SOC_HW_VERSION_FAM_NUM_BMSK) >>
955 mhi_cntrl->device_number = (soc_info & SOC_HW_VERSION_DEV_NUM_BMSK) >>
957 mhi_cntrl->major_version = (soc_info & SOC_HW_VERSION_MAJOR_VER_BMSK) >>
959 mhi_cntrl->minor_version = (soc_info & SOC_HW_VERSION_MINOR_VER_BMSK) >>
963 mhi_dev = mhi_alloc_device(mhi_cntrl);
965 dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate MHI device\n");
971 mhi_dev->mhi_cntrl = mhi_cntrl;
972 dev_set_name(&mhi_dev->dev, "%s", dev_name(mhi_cntrl->cntrl_dev));
973 mhi_dev->name = dev_name(mhi_cntrl->cntrl_dev);
982 mhi_cntrl->mhi_dev = mhi_dev;
984 mhi_create_debugfs(mhi_cntrl);
992 kfree(mhi_cntrl->mhi_cmd);
995 vfree(mhi_cntrl->mhi_chan);
996 kfree(mhi_cntrl->mhi_event);
1002 void mhi_unregister_controller(struct mhi_controller *mhi_cntrl)
1004 struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
1005 struct mhi_chan *mhi_chan = mhi_cntrl->mhi_chan;
1008 mhi_destroy_debugfs(mhi_cntrl);
1010 kfree(mhi_cntrl->mhi_cmd);
1011 kfree(mhi_cntrl->mhi_event);
1014 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
1020 vfree(mhi_cntrl->mhi_chan);
1029 struct mhi_controller *mhi_cntrl;
1031 mhi_cntrl = kzalloc(sizeof(*mhi_cntrl), GFP_KERNEL);
1033 return mhi_cntrl;
1037 void mhi_free_controller(struct mhi_controller *mhi_cntrl)
1039 kfree(mhi_cntrl);
1043 int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl)
1045 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1049 mutex_lock(&mhi_cntrl->pm_mutex);
1051 ret = mhi_init_dev_ctxt(mhi_cntrl);
1058 if (mhi_cntrl->rddm_size) {
1059 mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image,
1060 mhi_cntrl->rddm_size);
1066 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF,
1073 mhi_cntrl->bhie = mhi_cntrl->regs + bhie_off;
1074 memset_io(mhi_cntrl->bhie + BHIE_RXVECADDR_LOW_OFFS,
1078 if (mhi_cntrl->rddm_image)
1079 mhi_rddm_prepare(mhi_cntrl, mhi_cntrl->rddm_image);
1082 mhi_cntrl->pre_init = true;
1084 mutex_unlock(&mhi_cntrl->pm_mutex);
1089 if (mhi_cntrl->rddm_image) {
1090 mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image);
1091 mhi_cntrl->rddm_image = NULL;
1095 mutex_unlock(&mhi_cntrl->pm_mutex);
1101 void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl)
1103 if (mhi_cntrl->fbc_image) {
1104 mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
1105 mhi_cntrl->fbc_image = NULL;
1108 if (mhi_cntrl->rddm_image) {
1109 mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image);
1110 mhi_cntrl->rddm_image = NULL;
1113 mhi_deinit_dev_ctxt(mhi_cntrl);
1114 mhi_cntrl->pre_init = false;
1137 struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl)
1150 dev->parent = mhi_cntrl->cntrl_dev;
1151 mhi_dev->mhi_cntrl = mhi_cntrl;
1160 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1189 ret = mhi_prepare_channel(mhi_cntrl, ul_chan);
1208 mhi_event = &mhi_cntrl->mhi_event[dl_chan->er_index];
1227 mhi_prepare_channel(mhi_cntrl, dl_chan);
1245 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1279 mhi_reset_chan(mhi_cntrl, mhi_chan);
1298 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);