Lines Matching defs:mhi_cntrl

25 static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id);
28 static int mhi_ep_send_event(struct mhi_ep_cntrl *mhi_cntrl, u32 ring_idx,
31 struct device *dev = &mhi_cntrl->mhi_dev->dev;
36 mutex_lock(&mhi_cntrl->event_lock);
37 ring = &mhi_cntrl->mhi_event[ring_idx].ring;
38 ctx = (union mhi_ep_ring_ctx *)&mhi_cntrl->ev_ctx_cache[ring_idx];
40 ret = mhi_ep_ring_start(mhi_cntrl, ring, ctx);
54 mutex_unlock(&mhi_cntrl->event_lock);
61 mhi_cntrl->raise_irq(mhi_cntrl, ring->irq_vector);
66 mutex_unlock(&mhi_cntrl->event_lock);
71 static int mhi_ep_send_completion_event(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
77 event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
85 ret = mhi_ep_send_event(mhi_cntrl, ring->er_index, event, MHI_TRE_DATA_GET_BEI(tre));
86 kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event);
91 int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state)
96 event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
103 ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0);
104 kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event);
109 int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_env)
114 event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
121 ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0);
122 kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event);
127 static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ev_ccs code)
129 struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring;
133 event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
141 ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0);
142 kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event);
149 struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
150 struct device *dev = &mhi_cntrl->mhi_dev->dev;
160 if ((ch_id >= mhi_cntrl->max_chan) || !mhi_cntrl->mhi_chan[ch_id].name) {
165 mhi_chan = &mhi_cntrl->mhi_chan[ch_id];
166 ch_ring = &mhi_cntrl->mhi_chan[ch_id].ring;
175 ret = mhi_ep_ring_start(mhi_cntrl, ch_ring,
176 (union mhi_ep_ring_ctx *)&mhi_cntrl->ch_ctx_cache[ch_id]);
179 ret = mhi_ep_send_cmd_comp_event(mhi_cntrl,
190 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg);
193 mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp);
195 ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS);
214 ret = mhi_ep_create_device(mhi_cntrl, ch_id);
217 mhi_ep_handle_syserr(mhi_cntrl);
223 mhi_ep_mmio_enable_chdb(mhi_cntrl, ch_id);
235 mhi_ep_mmio_disable_chdb(mhi_cntrl, ch_id);
246 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg);
249 mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp);
251 ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS);
269 mhi_ep_ring_reset(mhi_cntrl, ch_ring);
280 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg);
283 mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp);
285 ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS);
312 struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
313 struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring;
319 static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
324 struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id];
325 struct device *dev = &mhi_cntrl->mhi_dev->dev;
364 ret = mhi_cntrl->read_from_host(mhi_cntrl, &buf_info);
392 ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
408 ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
432 struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
438 mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id];
455 result.buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL | GFP_DMA);
460 ret = mhi_ep_read_channel(mhi_cntrl, ring, &result, len);
463 kmem_cache_free(mhi_cntrl->tre_buf_cache, result.buf_addr);
475 kmem_cache_free(mhi_cntrl->tre_buf_cache, result.buf_addr);
484 struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
497 ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring;
526 ret = mhi_cntrl->write_to_host(mhi_cntrl, &buf_info);
544 ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, tr_len, code);
564 static int mhi_ep_cache_host_cfg(struct mhi_ep_cntrl *mhi_cntrl)
567 struct device *dev = &mhi_cntrl->mhi_dev->dev;
571 mhi_ep_mmio_update_ner(mhi_cntrl);
574 mhi_cntrl->event_rings, mhi_cntrl->hw_event_rings);
576 ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) * mhi_cntrl->max_chan;
577 ev_ctx_host_size = sizeof(struct mhi_event_ctxt) * mhi_cntrl->event_rings;
581 mhi_ep_mmio_get_chc_base(mhi_cntrl);
584 ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa,
585 &mhi_cntrl->ch_ctx_cache_phys,
586 (void __iomem **) &mhi_cntrl->ch_ctx_cache,
594 mhi_ep_mmio_get_erc_base(mhi_cntrl);
597 ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa,
598 &mhi_cntrl->ev_ctx_cache_phys,
599 (void __iomem **) &mhi_cntrl->ev_ctx_cache,
607 mhi_ep_mmio_get_crc_base(mhi_cntrl);
610 ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa,
611 &mhi_cntrl->cmd_ctx_cache_phys,
612 (void __iomem **) &mhi_cntrl->cmd_ctx_cache,
620 ret = mhi_ep_ring_start(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring,
621 (union mhi_ep_ring_ctx *)mhi_cntrl->cmd_ctx_cache);
630 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_cache_phys,
631 (void __iomem *) mhi_cntrl->cmd_ctx_cache, cmd_ctx_host_size);
634 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_cache_phys,
635 (void __iomem *) mhi_cntrl->ev_ctx_cache, ev_ctx_host_size);
638 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_cache_phys,
639 (void __iomem *) mhi_cntrl->ch_ctx_cache, ch_ctx_host_size);
644 static void mhi_ep_free_host_cfg(struct mhi_ep_cntrl *mhi_cntrl)
648 ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) * mhi_cntrl->max_chan;
649 ev_ctx_host_size = sizeof(struct mhi_event_ctxt) * mhi_cntrl->event_rings;
652 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_cache_phys,
653 (void __iomem *) mhi_cntrl->cmd_ctx_cache, cmd_ctx_host_size);
655 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_cache_phys,
656 (void __iomem *) mhi_cntrl->ev_ctx_cache, ev_ctx_host_size);
658 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_cache_phys,
659 (void __iomem *) mhi_cntrl->ch_ctx_cache, ch_ctx_host_size);
662 static void mhi_ep_enable_int(struct mhi_ep_cntrl *mhi_cntrl)
669 mhi_ep_mmio_enable_ctrl_interrupt(mhi_cntrl);
670 mhi_ep_mmio_enable_cmdb_interrupt(mhi_cntrl);
673 static int mhi_ep_enable(struct mhi_ep_cntrl *mhi_cntrl)
675 struct device *dev = &mhi_cntrl->mhi_dev->dev;
684 mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset);
687 mhi_ep_mmio_clear_reset(mhi_cntrl);
698 ret = mhi_ep_cache_host_cfg(mhi_cntrl);
704 mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS);
707 mhi_ep_enable_int(mhi_cntrl);
714 struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, cmd_ring_work);
715 struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring;
716 struct device *dev = &mhi_cntrl->mhi_dev->dev;
748 struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, ch_ring_work);
749 struct device *dev = &mhi_cntrl->mhi_dev->dev;
758 spin_lock_irqsave(&mhi_cntrl->list_lock, flags);
759 list_splice_tail_init(&mhi_cntrl->ch_db_list, &head);
760 spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags);
767 chan = &mhi_cntrl->mhi_chan[ring->ch_id];
785 kmem_cache_free(mhi_cntrl->ring_item_cache, itr);
792 kmem_cache_free(mhi_cntrl->ring_item_cache, itr);
804 kmem_cache_free(mhi_cntrl->ring_item_cache, itr);
809 kmem_cache_free(mhi_cntrl->ring_item_cache, itr);
815 struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, state_work);
816 struct device *dev = &mhi_cntrl->mhi_dev->dev;
822 spin_lock_irqsave(&mhi_cntrl->list_lock, flags);
823 list_splice_tail_init(&mhi_cntrl->st_transition_list, &head);
824 spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags);
833 ret = mhi_ep_set_m0_state(mhi_cntrl);
838 ret = mhi_ep_set_m3_state(mhi_cntrl);
850 static void mhi_ep_queue_channel_db(struct mhi_ep_cntrl *mhi_cntrl, unsigned long ch_int,
864 ring = &mhi_cntrl->mhi_chan[ch_id].ring;
865 item = kmem_cache_zalloc(mhi_cntrl->ring_item_cache, GFP_ATOMIC);
875 spin_lock(&mhi_cntrl->list_lock);
876 list_splice_tail_init(&head, &mhi_cntrl->ch_db_list);
877 spin_unlock(&mhi_cntrl->list_lock);
879 queue_work(mhi_cntrl->wq, &mhi_cntrl->ch_ring_work);
888 static void mhi_ep_check_channel_interrupt(struct mhi_ep_cntrl *mhi_cntrl)
893 if (!mhi_ep_mmio_read_chdb_status_interrupts(mhi_cntrl))
900 ch_int = mhi_cntrl->chdb[i].status & mhi_cntrl->chdb[i].mask;
902 mhi_ep_queue_channel_db(mhi_cntrl, ch_int, ch_idx);
903 mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_CLEAR_n(i),
904 mhi_cntrl->chdb[i].status);
909 static void mhi_ep_process_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl,
919 spin_lock(&mhi_cntrl->list_lock);
920 list_add_tail(&item->node, &mhi_cntrl->st_transition_list);
921 spin_unlock(&mhi_cntrl->list_lock);
923 queue_work(mhi_cntrl->wq, &mhi_cntrl->state_work);
933 struct mhi_ep_cntrl *mhi_cntrl = data;
934 struct device *dev = &mhi_cntrl->mhi_dev->dev;
940 int_value = mhi_ep_mmio_read(mhi_cntrl, MHI_CTRL_INT_STATUS);
941 mhi_ep_mmio_write(mhi_cntrl, MHI_CTRL_INT_CLEAR, int_value);
946 mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset);
949 disable_irq_nosync(mhi_cntrl->irq);
950 schedule_work(&mhi_cntrl->reset_work);
954 mhi_ep_process_ctrl_interrupt(mhi_cntrl, state);
960 queue_work(mhi_cntrl->wq, &mhi_cntrl->cmd_ring_work);
964 mhi_ep_check_channel_interrupt(mhi_cntrl);
969 static void mhi_ep_abort_transfer(struct mhi_ep_cntrl *mhi_cntrl)
977 for (i = 0; i < mhi_cntrl->max_chan; i++) {
978 mhi_chan = &mhi_cntrl->mhi_chan[i];
994 flush_workqueue(mhi_cntrl->wq);
997 device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_ep_destroy_device);
1000 for (i = 0; i < mhi_cntrl->max_chan; i++) {
1001 mhi_chan = &mhi_cntrl->mhi_chan[i];
1005 ch_ring = &mhi_cntrl->mhi_chan[i].ring;
1007 mhi_ep_ring_reset(mhi_cntrl, ch_ring);
1012 for (i = 0; i < mhi_cntrl->event_rings; i++) {
1013 ev_ring = &mhi_cntrl->mhi_event[i].ring;
1017 mutex_lock(&mhi_cntrl->event_lock);
1018 mhi_ep_ring_reset(mhi_cntrl, ev_ring);
1019 mutex_unlock(&mhi_cntrl->event_lock);
1023 mhi_ep_ring_reset(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring);
1025 mhi_ep_free_host_cfg(mhi_cntrl);
1026 mhi_ep_mmio_mask_interrupts(mhi_cntrl);
1028 mhi_cntrl->enabled = false;
1033 struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, reset_work);
1036 mhi_ep_power_down(mhi_cntrl);
1038 mutex_lock(&mhi_cntrl->state_lock);
1041 mhi_ep_mmio_reset(mhi_cntrl);
1042 cur_state = mhi_cntrl->mhi_state;
1050 mhi_ep_power_up(mhi_cntrl);
1052 mutex_unlock(&mhi_cntrl->state_lock);
1060 void mhi_ep_handle_syserr(struct mhi_ep_cntrl *mhi_cntrl)
1062 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1065 ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
1070 ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_SYS_ERR);
1075 int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl)
1077 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1084 mhi_ep_mmio_mask_interrupts(mhi_cntrl);
1085 mhi_ep_mmio_init(mhi_cntrl);
1087 mhi_cntrl->mhi_event = kzalloc(mhi_cntrl->event_rings * (sizeof(*mhi_cntrl->mhi_event)),
1089 if (!mhi_cntrl->mhi_event)
1093 mhi_ep_ring_init(&mhi_cntrl->mhi_cmd->ring, RING_TYPE_CMD, 0);
1094 for (i = 0; i < mhi_cntrl->max_chan; i++)
1095 mhi_ep_ring_init(&mhi_cntrl->mhi_chan[i].ring, RING_TYPE_CH, i);
1096 for (i = 0; i < mhi_cntrl->event_rings; i++)
1097 mhi_ep_ring_init(&mhi_cntrl->mhi_event[i].ring, RING_TYPE_ER, i);
1099 mhi_cntrl->mhi_state = MHI_STATE_RESET;
1102 mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS);
1105 ret = mhi_ep_set_ready_state(mhi_cntrl);
1111 ret = mhi_ep_enable(mhi_cntrl);
1117 enable_irq(mhi_cntrl->irq);
1118 mhi_cntrl->enabled = true;
1123 kfree(mhi_cntrl->mhi_event);
1129 void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl)
1131 if (mhi_cntrl->enabled) {
1132 mhi_ep_abort_transfer(mhi_cntrl);
1133 kfree(mhi_cntrl->mhi_event);
1134 disable_irq(mhi_cntrl->irq);
1139 void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl)
1145 for (i = 0; i < mhi_cntrl->max_chan; i++) {
1146 mhi_chan = &mhi_cntrl->mhi_chan[i];
1153 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg);
1164 mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp);
1169 void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl)
1175 for (i = 0; i < mhi_cntrl->max_chan; i++) {
1176 mhi_chan = &mhi_cntrl->mhi_chan[i];
1183 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg);
1194 mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp);
1204 mhi_dev->mhi_cntrl->mhi_dev = NULL;
1220 static struct mhi_ep_device *mhi_ep_alloc_device(struct mhi_ep_cntrl *mhi_cntrl,
1238 dev->parent = mhi_cntrl->cntrl_dev;
1241 dev->parent = &mhi_cntrl->mhi_dev->dev;
1243 mhi_dev->mhi_cntrl = mhi_cntrl;
1255 static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id)
1257 struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ch_id];
1258 struct device *dev = mhi_cntrl->cntrl_dev;
1269 mhi_dev = mhi_ep_alloc_device(mhi_cntrl, MHI_DEVICE_XFER);
1287 dev_name(&mhi_cntrl->mhi_dev->dev),
1304 struct mhi_ep_cntrl *mhi_cntrl;
1311 mhi_cntrl = mhi_dev->mhi_cntrl;
1326 dev_dbg(&mhi_cntrl->mhi_dev->dev, "Destroying device for chan:%s\n",
1336 static int mhi_ep_chan_init(struct mhi_ep_cntrl *mhi_cntrl,
1340 struct device *dev = mhi_cntrl->cntrl_dev;
1344 mhi_cntrl->max_chan = config->max_channels;
1350 mhi_cntrl->mhi_chan = kcalloc(mhi_cntrl->max_chan, sizeof(*mhi_cntrl->mhi_chan),
1352 if (!mhi_cntrl->mhi_chan)
1361 if (chan >= mhi_cntrl->max_chan) {
1363 chan, mhi_cntrl->max_chan);
1374 mhi_chan = &mhi_cntrl->mhi_chan[chan];
1384 kfree(mhi_cntrl->mhi_chan);
1393 int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
1399 if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->mmio || !mhi_cntrl->irq)
1402 ret = mhi_ep_chan_init(mhi_cntrl, config);
1406 mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS, sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL);
1407 if (!mhi_cntrl->mhi_cmd) {
1412 mhi_cntrl->ev_ring_el_cache = kmem_cache_create("mhi_ep_event_ring_el",
1415 if (!mhi_cntrl->ev_ring_el_cache) {
1420 mhi_cntrl->tre_buf_cache = kmem_cache_create("mhi_ep_tre_buf", MHI_EP_DEFAULT_MTU, 0,
1422 if (!mhi_cntrl->tre_buf_cache) {
1427 mhi_cntrl->ring_item_cache = kmem_cache_create("mhi_ep_ring_item",
1430 if (!mhi_cntrl->ring_item_cache) {
1434 INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker);
1435 INIT_WORK(&mhi_cntrl->reset_work, mhi_ep_reset_worker);
1436 INIT_WORK(&mhi_cntrl->cmd_ring_work, mhi_ep_cmd_ring_worker);
1437 INIT_WORK(&mhi_cntrl->ch_ring_work, mhi_ep_ch_ring_worker);
1439 mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", 0, 0);
1440 if (!mhi_cntrl->wq) {
1445 INIT_LIST_HEAD(&mhi_cntrl->st_transition_list);
1446 INIT_LIST_HEAD(&mhi_cntrl->ch_db_list);
1447 spin_lock_init(&mhi_cntrl->list_lock);
1448 mutex_init(&mhi_cntrl->state_lock);
1449 mutex_init(&mhi_cntrl->event_lock);
1452 mhi_ep_mmio_write(mhi_cntrl, EP_MHIVER, config->mhi_version);
1453 mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS);
1460 mhi_cntrl->index = ret;
1462 irq_set_status_flags(mhi_cntrl->irq, IRQ_NOAUTOEN);
1463 ret = request_irq(mhi_cntrl->irq, mhi_ep_irq, IRQF_TRIGGER_HIGH,
1464 "doorbell_irq", mhi_cntrl);
1466 dev_err(mhi_cntrl->cntrl_dev, "Failed to request Doorbell IRQ\n");
1471 mhi_dev = mhi_ep_alloc_device(mhi_cntrl, MHI_DEVICE_CONTROLLER);
1473 dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate controller device\n");
1478 ret = dev_set_name(&mhi_dev->dev, "mhi_ep%u", mhi_cntrl->index);
1483 mhi_cntrl->mhi_dev = mhi_dev;
1496 free_irq(mhi_cntrl->irq, mhi_cntrl);
1498 ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index);
1500 destroy_workqueue(mhi_cntrl->wq);
1502 kmem_cache_destroy(mhi_cntrl->ring_item_cache);
1504 kmem_cache_destroy(mhi_cntrl->ev_ring_el_cache);
1506 kmem_cache_destroy(mhi_cntrl->tre_buf_cache);
1508 kfree(mhi_cntrl->mhi_cmd);
1510 kfree(mhi_cntrl->mhi_chan);
1520 void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl)
1522 struct mhi_ep_device *mhi_dev = mhi_cntrl->mhi_dev;
1524 destroy_workqueue(mhi_cntrl->wq);
1526 free_irq(mhi_cntrl->irq, mhi_cntrl);
1528 kmem_cache_destroy(mhi_cntrl->tre_buf_cache);
1529 kmem_cache_destroy(mhi_cntrl->ev_ring_el_cache);
1530 kmem_cache_destroy(mhi_cntrl->ring_item_cache);
1531 kfree(mhi_cntrl->mhi_cmd);
1532 kfree(mhi_cntrl->mhi_chan);
1537 ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index);