Lines Matching defs:mhi_cntrl
87 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
90 mhi_cntrl->serial_number);
99 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
102 for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++)
104 i, mhi_cntrl->oem_pk_hash[i]);
116 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
118 mhi_soc_reset(mhi_cntrl);
132 static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl,
137 ring->pre_aligned = dma_alloc_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
148 void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl)
151 struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
153 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
157 free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
160 free_irq(mhi_cntrl->irq[0], mhi_cntrl);
163 int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
165 struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
166 struct device *dev = &mhi_cntrl->mhi_dev->dev;
171 if (mhi_cntrl->irq_flags)
172 irq_flags = mhi_cntrl->irq_flags;
175 ret = request_threaded_irq(mhi_cntrl->irq[0], mhi_intvec_handler,
178 "bhi", mhi_cntrl);
186 disable_irq(mhi_cntrl->irq[0]);
188 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
192 if (mhi_event->irq >= mhi_cntrl->nr_irqs) {
199 ret = request_irq(mhi_cntrl->irq[mhi_event->irq],
205 mhi_cntrl->irq[mhi_event->irq], i);
209 disable_irq(mhi_cntrl->irq[mhi_event->irq]);
219 free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
221 free_irq(mhi_cntrl->irq[0], mhi_cntrl);
226 void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl)
229 struct mhi_ctxt *mhi_ctxt = mhi_cntrl->mhi_ctxt;
234 mhi_cmd = mhi_cntrl->mhi_cmd;
237 dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
243 dma_free_coherent(mhi_cntrl->cntrl_dev,
247 mhi_event = mhi_cntrl->mhi_event;
248 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
253 dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
259 dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->er_ctxt) *
260 mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
263 dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->chan_ctxt) *
264 mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
268 mhi_cntrl->mhi_ctxt = NULL;
271 int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
283 atomic_set(&mhi_cntrl->dev_wake, 0);
284 atomic_set(&mhi_cntrl->pending_pkts, 0);
291 mhi_ctxt->chan_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev,
293 mhi_cntrl->max_chan,
299 mhi_chan = mhi_cntrl->mhi_chan;
301 for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) {
323 mhi_ctxt->er_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev,
325 mhi_cntrl->total_ev_rings,
332 mhi_event = mhi_cntrl->mhi_event;
333 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
353 ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
370 mhi_ctxt->cmd_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev,
378 mhi_cmd = mhi_cntrl->mhi_cmd;
386 ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
397 mhi_cntrl->mhi_ctxt = mhi_ctxt;
405 dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
408 dma_free_coherent(mhi_cntrl->cntrl_dev,
411 i = mhi_cntrl->total_ev_rings;
412 mhi_event = mhi_cntrl->mhi_event + i;
421 dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
424 dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->er_ctxt) *
425 mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
429 dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->chan_ctxt) *
430 mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
439 int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
445 void __iomem *base = mhi_cntrl->regs;
446 struct device *dev = &mhi_cntrl->mhi_dev->dev;
453 upper_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
457 lower_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
461 upper_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
465 lower_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
469 upper_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
473 lower_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
477 upper_32_bits(mhi_cntrl->iova_start),
481 lower_32_bits(mhi_cntrl->iova_start),
485 upper_32_bits(mhi_cntrl->iova_start),
489 lower_32_bits(mhi_cntrl->iova_start),
493 upper_32_bits(mhi_cntrl->iova_stop),
497 lower_32_bits(mhi_cntrl->iova_stop),
501 upper_32_bits(mhi_cntrl->iova_stop),
505 lower_32_bits(mhi_cntrl->iova_stop),
513 ret = mhi_read_reg(mhi_cntrl, base, CHDBOFF, &val);
519 if (val >= mhi_cntrl->reg_len - (8 * MHI_DEV_WAKE_DB)) {
521 val, mhi_cntrl->reg_len - (8 * MHI_DEV_WAKE_DB));
526 mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB);
527 mhi_cntrl->wake_set = false;
530 mhi_chan = mhi_cntrl->mhi_chan;
531 for (i = 0; i < mhi_cntrl->max_chan; i++, val += 8, mhi_chan++)
535 ret = mhi_read_reg(mhi_cntrl, base, ERDBOFF, &val);
541 if (val >= mhi_cntrl->reg_len - (8 * mhi_cntrl->total_ev_rings)) {
543 val, mhi_cntrl->reg_len - (8 * mhi_cntrl->total_ev_rings));
548 mhi_event = mhi_cntrl->mhi_event;
549 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) {
557 mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING].ring.db_addr = base + CRDB_LOWER;
561 mhi_write_reg(mhi_cntrl, base, reg_info[i].offset,
564 ret = mhi_write_reg_field(mhi_cntrl, base, MHICFG, MHICFG_NER_MASK,
565 mhi_cntrl->total_ev_rings);
571 ret = mhi_write_reg_field(mhi_cntrl, base, MHICFG, MHICFG_NHWER_MASK,
572 mhi_cntrl->hw_ev_rings);
581 void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
591 chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
596 dma_free_coherent(mhi_cntrl->cntrl_dev, tre_ring->alloc_size,
616 int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
629 chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
630 ret = mhi_alloc_aligned_ring(mhi_cntrl, tre_ring, tre_ring->len);
639 dma_free_coherent(mhi_cntrl->cntrl_dev, tre_ring->alloc_size,
664 static int parse_ev_cfg(struct mhi_controller *mhi_cntrl,
669 struct device *dev = mhi_cntrl->cntrl_dev;
673 mhi_cntrl->total_ev_rings = num;
674 mhi_cntrl->mhi_event = kcalloc(num, sizeof(*mhi_cntrl->mhi_event),
676 if (!mhi_cntrl->mhi_event)
680 mhi_event = mhi_cntrl->mhi_event;
692 if (mhi_event->chan >= mhi_cntrl->max_chan) {
699 &mhi_cntrl->mhi_chan[mhi_event->chan];
730 mhi_cntrl->hw_ev_rings++;
732 mhi_cntrl->sw_ev_rings++;
743 kfree(mhi_cntrl->mhi_event);
747 static int parse_ch_cfg(struct mhi_controller *mhi_cntrl,
751 struct device *dev = mhi_cntrl->cntrl_dev;
755 mhi_cntrl->max_chan = config->max_channels;
762 mhi_cntrl->mhi_chan = vcalloc(mhi_cntrl->max_chan,
763 sizeof(*mhi_cntrl->mhi_chan));
764 if (!mhi_cntrl->mhi_chan)
767 INIT_LIST_HEAD(&mhi_cntrl->lpm_chans);
776 if (chan >= mhi_cntrl->max_chan) {
781 mhi_chan = &mhi_cntrl->mhi_chan[chan];
854 list_add_tail(&mhi_chan->node, &mhi_cntrl->lpm_chans);
860 vfree(mhi_cntrl->mhi_chan);
865 static int parse_config(struct mhi_controller *mhi_cntrl,
871 ret = parse_ch_cfg(mhi_cntrl, config);
876 ret = parse_ev_cfg(mhi_cntrl, config);
880 mhi_cntrl->timeout_ms = config->timeout_ms;
881 if (!mhi_cntrl->timeout_ms)
882 mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS;
884 mhi_cntrl->bounce_buf = config->use_bounce_buf;
885 mhi_cntrl->buffer_len = config->buf_len;
886 if (!mhi_cntrl->buffer_len)
887 mhi_cntrl->buffer_len = MHI_MAX_MTU;
890 mhi_cntrl->db_access = MHI_PM_M0 | MHI_PM_M2;
892 mhi_cntrl->db_access &= ~MHI_PM_M2;
897 vfree(mhi_cntrl->mhi_chan);
902 int mhi_register_controller(struct mhi_controller *mhi_cntrl,
912 if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->regs ||
913 !mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put ||
914 !mhi_cntrl->status_cb || !mhi_cntrl->read_reg ||
915 !mhi_cntrl->write_reg || !mhi_cntrl->nr_irqs ||
916 !mhi_cntrl->irq || !mhi_cntrl->reg_len)
919 ret = parse_config(mhi_cntrl, config);
923 mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS,
924 sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL);
925 if (!mhi_cntrl->mhi_cmd) {
930 INIT_LIST_HEAD(&mhi_cntrl->transition_list);
931 mutex_init(&mhi_cntrl->pm_mutex);
932 rwlock_init(&mhi_cntrl->pm_lock);
933 spin_lock_init(&mhi_cntrl->transition_lock);
934 spin_lock_init(&mhi_cntrl->wlock);
935 INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
936 init_waitqueue_head(&mhi_cntrl->state_event);
938 mhi_cntrl->hiprio_wq = alloc_ordered_workqueue("mhi_hiprio_wq", WQ_HIGHPRI);
939 if (!mhi_cntrl->hiprio_wq) {
940 dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate workqueue\n");
945 mhi_cmd = mhi_cntrl->mhi_cmd;
949 mhi_event = mhi_cntrl->mhi_event;
950 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
955 mhi_event->mhi_cntrl = mhi_cntrl;
965 mhi_chan = mhi_cntrl->mhi_chan;
966 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
972 mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
976 if (mhi_cntrl->bounce_buf) {
977 mhi_cntrl->map_single = mhi_map_single_use_bb;
978 mhi_cntrl->unmap_single = mhi_unmap_single_use_bb;
980 mhi_cntrl->map_single = mhi_map_single_no_bb;
981 mhi_cntrl->unmap_single = mhi_unmap_single_no_bb;
985 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs,
990 mhi_cntrl->family_number = FIELD_GET(SOC_HW_VERSION_FAM_NUM_BMSK, soc_info);
991 mhi_cntrl->device_number = FIELD_GET(SOC_HW_VERSION_DEV_NUM_BMSK, soc_info);
992 mhi_cntrl->major_version = FIELD_GET(SOC_HW_VERSION_MAJOR_VER_BMSK, soc_info);
993 mhi_cntrl->minor_version = FIELD_GET(SOC_HW_VERSION_MINOR_VER_BMSK, soc_info);
995 mhi_cntrl->index = ida_alloc(&mhi_controller_ida, GFP_KERNEL);
996 if (mhi_cntrl->index < 0) {
997 ret = mhi_cntrl->index;
1001 ret = mhi_init_irq_setup(mhi_cntrl);
1006 mhi_dev = mhi_alloc_device(mhi_cntrl);
1008 dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate MHI device\n");
1014 mhi_dev->mhi_cntrl = mhi_cntrl;
1015 dev_set_name(&mhi_dev->dev, "mhi%d", mhi_cntrl->index);
1025 mhi_cntrl->mhi_dev = mhi_dev;
1027 mhi_create_debugfs(mhi_cntrl);
1034 mhi_deinit_free_irq(mhi_cntrl);
1036 ida_free(&mhi_controller_ida, mhi_cntrl->index);
1038 destroy_workqueue(mhi_cntrl->hiprio_wq);
1040 kfree(mhi_cntrl->mhi_cmd);
1042 kfree(mhi_cntrl->mhi_event);
1043 vfree(mhi_cntrl->mhi_chan);
1049 void mhi_unregister_controller(struct mhi_controller *mhi_cntrl)
1051 struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
1052 struct mhi_chan *mhi_chan = mhi_cntrl->mhi_chan;
1055 mhi_deinit_free_irq(mhi_cntrl);
1056 mhi_destroy_debugfs(mhi_cntrl);
1058 destroy_workqueue(mhi_cntrl->hiprio_wq);
1059 kfree(mhi_cntrl->mhi_cmd);
1060 kfree(mhi_cntrl->mhi_event);
1063 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
1069 vfree(mhi_cntrl->mhi_chan);
1074 ida_free(&mhi_controller_ida, mhi_cntrl->index);
1080 struct mhi_controller *mhi_cntrl;
1082 mhi_cntrl = kzalloc(sizeof(*mhi_cntrl), GFP_KERNEL);
1084 return mhi_cntrl;
1088 void mhi_free_controller(struct mhi_controller *mhi_cntrl)
1090 kfree(mhi_cntrl);
1094 int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl)
1096 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1100 mutex_lock(&mhi_cntrl->pm_mutex);
1102 ret = mhi_init_dev_ctxt(mhi_cntrl);
1106 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIOFF, &bhi_off);
1112 if (bhi_off >= mhi_cntrl->reg_len) {
1114 bhi_off, mhi_cntrl->reg_len);
1118 mhi_cntrl->bhi = mhi_cntrl->regs + bhi_off;
1120 if (mhi_cntrl->fbc_download || mhi_cntrl->rddm_size) {
1121 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF,
1128 if (bhie_off >= mhi_cntrl->reg_len) {
1131 bhie_off, mhi_cntrl->reg_len);
1135 mhi_cntrl->bhie = mhi_cntrl->regs + bhie_off;
1138 if (mhi_cntrl->rddm_size) {
1143 memset_io(mhi_cntrl->bhie + BHIE_RXVECADDR_LOW_OFFS,
1149 mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image,
1150 mhi_cntrl->rddm_size);
1151 if (mhi_cntrl->rddm_image) {
1152 ret = mhi_rddm_prepare(mhi_cntrl,
1153 mhi_cntrl->rddm_image);
1155 mhi_free_bhie_table(mhi_cntrl,
1156 mhi_cntrl->rddm_image);
1162 mutex_unlock(&mhi_cntrl->pm_mutex);
1167 mhi_deinit_dev_ctxt(mhi_cntrl);
1170 mutex_unlock(&mhi_cntrl->pm_mutex);
1176 void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl)
1178 if (mhi_cntrl->fbc_image) {
1179 mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
1180 mhi_cntrl->fbc_image = NULL;
1183 if (mhi_cntrl->rddm_image) {
1184 mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image);
1185 mhi_cntrl->rddm_image = NULL;
1188 mhi_cntrl->bhi = NULL;
1189 mhi_cntrl->bhie = NULL;
1191 mhi_deinit_dev_ctxt(mhi_cntrl);
1214 struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl)
1228 if (mhi_cntrl->mhi_dev) {
1230 dev->parent = &mhi_cntrl->mhi_dev->dev;
1233 dev->parent = mhi_cntrl->cntrl_dev;
1236 mhi_dev->mhi_cntrl = mhi_cntrl;
1245 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1288 mhi_event = &mhi_cntrl->mhi_event[dl_chan->er_index];
1322 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1356 mhi_reset_chan(mhi_cntrl, mhi_chan);
1375 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);