xref: /kernel/linux/linux-5.10/drivers/bus/mhi/host/init.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
4 *
5 */
6
7#include <linux/debugfs.h>
8#include <linux/device.h>
9#include <linux/dma-direction.h>
10#include <linux/dma-mapping.h>
11#include <linux/interrupt.h>
12#include <linux/list.h>
13#include <linux/mhi.h>
14#include <linux/mod_devicetable.h>
15#include <linux/module.h>
16#include <linux/slab.h>
17#include <linux/vmalloc.h>
18#include <linux/wait.h>
19#include "internal.h"
20
21const char * const mhi_ee_str[MHI_EE_MAX] = {
22	[MHI_EE_PBL] = "PBL",
23	[MHI_EE_SBL] = "SBL",
24	[MHI_EE_AMSS] = "AMSS",
25	[MHI_EE_RDDM] = "RDDM",
26	[MHI_EE_WFW] = "WFW",
27	[MHI_EE_PTHRU] = "PASS THRU",
28	[MHI_EE_EDL] = "EDL",
29	[MHI_EE_DISABLE_TRANSITION] = "DISABLE",
30	[MHI_EE_NOT_SUPPORTED] = "NOT SUPPORTED",
31};
32
33const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX] = {
34	[DEV_ST_TRANSITION_PBL] = "PBL",
35	[DEV_ST_TRANSITION_READY] = "READY",
36	[DEV_ST_TRANSITION_SBL] = "SBL",
37	[DEV_ST_TRANSITION_MISSION_MODE] = "MISSION_MODE",
38	[DEV_ST_TRANSITION_SYS_ERR] = "SYS_ERR",
39	[DEV_ST_TRANSITION_DISABLE] = "DISABLE",
40};
41
42const char * const mhi_state_str[MHI_STATE_MAX] = {
43	[MHI_STATE_RESET] = "RESET",
44	[MHI_STATE_READY] = "READY",
45	[MHI_STATE_M0] = "M0",
46	[MHI_STATE_M1] = "M1",
47	[MHI_STATE_M2] = "M2",
48	[MHI_STATE_M3] = "M3",
49	[MHI_STATE_M3_FAST] = "M3_FAST",
50	[MHI_STATE_BHI] = "BHI",
51	[MHI_STATE_SYS_ERR] = "SYS_ERR",
52};
53
54static const char * const mhi_pm_state_str[] = {
55	[MHI_PM_STATE_DISABLE] = "DISABLE",
56	[MHI_PM_STATE_POR] = "POR",
57	[MHI_PM_STATE_M0] = "M0",
58	[MHI_PM_STATE_M2] = "M2",
59	[MHI_PM_STATE_M3_ENTER] = "M?->M3",
60	[MHI_PM_STATE_M3] = "M3",
61	[MHI_PM_STATE_M3_EXIT] = "M3->M0",
62	[MHI_PM_STATE_FW_DL_ERR] = "FW DL Error",
63	[MHI_PM_STATE_SYS_ERR_DETECT] = "SYS_ERR Detect",
64	[MHI_PM_STATE_SYS_ERR_PROCESS] = "SYS_ERR Process",
65	[MHI_PM_STATE_SHUTDOWN_PROCESS] = "SHUTDOWN Process",
66	[MHI_PM_STATE_LD_ERR_FATAL_DETECT] = "LD or Error Fatal Detect",
67};
68
69const char *to_mhi_pm_state_str(enum mhi_pm_state state)
70{
71	int index = find_last_bit((unsigned long *)&state, 32);
72
73	if (index >= ARRAY_SIZE(mhi_pm_state_str))
74		return "Invalid State";
75
76	return mhi_pm_state_str[index];
77}
78
79static ssize_t serial_number_show(struct device *dev,
80				  struct device_attribute *attr,
81				  char *buf)
82{
83	struct mhi_device *mhi_dev = to_mhi_device(dev);
84	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
85
86	return snprintf(buf, PAGE_SIZE, "Serial Number: %u\n",
87			mhi_cntrl->serial_number);
88}
89static DEVICE_ATTR_RO(serial_number);
90
91static ssize_t oem_pk_hash_show(struct device *dev,
92				struct device_attribute *attr,
93				char *buf)
94{
95	struct mhi_device *mhi_dev = to_mhi_device(dev);
96	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
97	int i, cnt = 0;
98
99	for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++)
100		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
101				"OEMPKHASH[%d]: 0x%x\n", i,
102				mhi_cntrl->oem_pk_hash[i]);
103
104	return cnt;
105}
106static DEVICE_ATTR_RO(oem_pk_hash);
107
108static struct attribute *mhi_dev_attrs[] = {
109	&dev_attr_serial_number.attr,
110	&dev_attr_oem_pk_hash.attr,
111	NULL,
112};
113ATTRIBUTE_GROUPS(mhi_dev);
114
115/* MHI protocol requires the transfer ring to be aligned with ring length */
116static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl,
117				  struct mhi_ring *ring,
118				  u64 len)
119{
120	ring->alloc_size = len + (len - 1);
121	ring->pre_aligned = mhi_alloc_coherent(mhi_cntrl, ring->alloc_size,
122					       &ring->dma_handle, GFP_KERNEL);
123	if (!ring->pre_aligned)
124		return -ENOMEM;
125
126	ring->iommu_base = (ring->dma_handle + (len - 1)) & ~(len - 1);
127	ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle);
128
129	return 0;
130}
131
132void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl)
133{
134	int i;
135	struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
136
137	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
138		if (mhi_event->offload_ev)
139			continue;
140
141		free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
142	}
143
144	free_irq(mhi_cntrl->irq[0], mhi_cntrl);
145}
146
147int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
148{
149	struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
150	struct device *dev = &mhi_cntrl->mhi_dev->dev;
151	int i, ret;
152
153	/* Setup BHI_INTVEC IRQ */
154	ret = request_threaded_irq(mhi_cntrl->irq[0], mhi_intvec_handler,
155				   mhi_intvec_threaded_handler,
156				   IRQF_SHARED | IRQF_NO_SUSPEND,
157				   "bhi", mhi_cntrl);
158	if (ret)
159		return ret;
160
161	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
162		if (mhi_event->offload_ev)
163			continue;
164
165		if (mhi_event->irq >= mhi_cntrl->nr_irqs) {
166			dev_err(dev, "irq %d not available for event ring\n",
167				mhi_event->irq);
168			ret = -EINVAL;
169			goto error_request;
170		}
171
172		ret = request_irq(mhi_cntrl->irq[mhi_event->irq],
173				  mhi_irq_handler,
174				  IRQF_SHARED | IRQF_NO_SUSPEND,
175				  "mhi", mhi_event);
176		if (ret) {
177			dev_err(dev, "Error requesting irq:%d for ev:%d\n",
178				mhi_cntrl->irq[mhi_event->irq], i);
179			goto error_request;
180		}
181	}
182
183	return 0;
184
185error_request:
186	for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
187		if (mhi_event->offload_ev)
188			continue;
189
190		free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
191	}
192	free_irq(mhi_cntrl->irq[0], mhi_cntrl);
193
194	return ret;
195}
196
197void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl)
198{
199	int i;
200	struct mhi_ctxt *mhi_ctxt = mhi_cntrl->mhi_ctxt;
201	struct mhi_cmd *mhi_cmd;
202	struct mhi_event *mhi_event;
203	struct mhi_ring *ring;
204
205	mhi_cmd = mhi_cntrl->mhi_cmd;
206	for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) {
207		ring = &mhi_cmd->ring;
208		mhi_free_coherent(mhi_cntrl, ring->alloc_size,
209				  ring->pre_aligned, ring->dma_handle);
210		ring->base = NULL;
211		ring->iommu_base = 0;
212	}
213
214	mhi_free_coherent(mhi_cntrl,
215			  sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
216			  mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
217
218	mhi_event = mhi_cntrl->mhi_event;
219	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
220		if (mhi_event->offload_ev)
221			continue;
222
223		ring = &mhi_event->ring;
224		mhi_free_coherent(mhi_cntrl, ring->alloc_size,
225				  ring->pre_aligned, ring->dma_handle);
226		ring->base = NULL;
227		ring->iommu_base = 0;
228	}
229
230	mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->er_ctxt) *
231			  mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
232			  mhi_ctxt->er_ctxt_addr);
233
234	mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->chan_ctxt) *
235			  mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
236			  mhi_ctxt->chan_ctxt_addr);
237
238	kfree(mhi_ctxt);
239	mhi_cntrl->mhi_ctxt = NULL;
240}
241
242int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
243{
244	struct mhi_ctxt *mhi_ctxt;
245	struct mhi_chan_ctxt *chan_ctxt;
246	struct mhi_event_ctxt *er_ctxt;
247	struct mhi_cmd_ctxt *cmd_ctxt;
248	struct mhi_chan *mhi_chan;
249	struct mhi_event *mhi_event;
250	struct mhi_cmd *mhi_cmd;
251	u32 tmp;
252	int ret = -ENOMEM, i;
253
254	atomic_set(&mhi_cntrl->dev_wake, 0);
255	atomic_set(&mhi_cntrl->pending_pkts, 0);
256
257	mhi_ctxt = kzalloc(sizeof(*mhi_ctxt), GFP_KERNEL);
258	if (!mhi_ctxt)
259		return -ENOMEM;
260
261	/* Setup channel ctxt */
262	mhi_ctxt->chan_ctxt = mhi_alloc_coherent(mhi_cntrl,
263						 sizeof(*mhi_ctxt->chan_ctxt) *
264						 mhi_cntrl->max_chan,
265						 &mhi_ctxt->chan_ctxt_addr,
266						 GFP_KERNEL);
267	if (!mhi_ctxt->chan_ctxt)
268		goto error_alloc_chan_ctxt;
269
270	mhi_chan = mhi_cntrl->mhi_chan;
271	chan_ctxt = mhi_ctxt->chan_ctxt;
272	for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) {
273		/* Skip if it is an offload channel */
274		if (mhi_chan->offload_ch)
275			continue;
276
277		tmp = chan_ctxt->chcfg;
278		tmp &= ~CHAN_CTX_CHSTATE_MASK;
279		tmp |= (MHI_CH_STATE_DISABLED << CHAN_CTX_CHSTATE_SHIFT);
280		tmp &= ~CHAN_CTX_BRSTMODE_MASK;
281		tmp |= (mhi_chan->db_cfg.brstmode << CHAN_CTX_BRSTMODE_SHIFT);
282		tmp &= ~CHAN_CTX_POLLCFG_MASK;
283		tmp |= (mhi_chan->db_cfg.pollcfg << CHAN_CTX_POLLCFG_SHIFT);
284		chan_ctxt->chcfg = tmp;
285
286		chan_ctxt->chtype = mhi_chan->type;
287		chan_ctxt->erindex = mhi_chan->er_index;
288
289		mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
290		mhi_chan->tre_ring.db_addr = (void __iomem *)&chan_ctxt->wp;
291	}
292
293	/* Setup event context */
294	mhi_ctxt->er_ctxt = mhi_alloc_coherent(mhi_cntrl,
295					       sizeof(*mhi_ctxt->er_ctxt) *
296					       mhi_cntrl->total_ev_rings,
297					       &mhi_ctxt->er_ctxt_addr,
298					       GFP_KERNEL);
299	if (!mhi_ctxt->er_ctxt)
300		goto error_alloc_er_ctxt;
301
302	er_ctxt = mhi_ctxt->er_ctxt;
303	mhi_event = mhi_cntrl->mhi_event;
304	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
305		     mhi_event++) {
306		struct mhi_ring *ring = &mhi_event->ring;
307
308		/* Skip if it is an offload event */
309		if (mhi_event->offload_ev)
310			continue;
311
312		tmp = er_ctxt->intmod;
313		tmp &= ~EV_CTX_INTMODC_MASK;
314		tmp &= ~EV_CTX_INTMODT_MASK;
315		tmp |= (mhi_event->intmod << EV_CTX_INTMODT_SHIFT);
316		er_ctxt->intmod = tmp;
317
318		er_ctxt->ertype = MHI_ER_TYPE_VALID;
319		er_ctxt->msivec = mhi_event->irq;
320		mhi_event->db_cfg.db_mode = true;
321
322		ring->el_size = sizeof(struct mhi_tre);
323		ring->len = ring->el_size * ring->elements;
324		ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
325		if (ret)
326			goto error_alloc_er;
327
328		/*
329		 * If the read pointer equals to the write pointer, then the
330		 * ring is empty
331		 */
332		ring->rp = ring->wp = ring->base;
333		er_ctxt->rbase = ring->iommu_base;
334		er_ctxt->rp = er_ctxt->wp = er_ctxt->rbase;
335		er_ctxt->rlen = ring->len;
336		ring->ctxt_wp = &er_ctxt->wp;
337	}
338
339	/* Setup cmd context */
340	ret = -ENOMEM;
341	mhi_ctxt->cmd_ctxt = mhi_alloc_coherent(mhi_cntrl,
342						sizeof(*mhi_ctxt->cmd_ctxt) *
343						NR_OF_CMD_RINGS,
344						&mhi_ctxt->cmd_ctxt_addr,
345						GFP_KERNEL);
346	if (!mhi_ctxt->cmd_ctxt)
347		goto error_alloc_er;
348
349	mhi_cmd = mhi_cntrl->mhi_cmd;
350	cmd_ctxt = mhi_ctxt->cmd_ctxt;
351	for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
352		struct mhi_ring *ring = &mhi_cmd->ring;
353
354		ring->el_size = sizeof(struct mhi_tre);
355		ring->elements = CMD_EL_PER_RING;
356		ring->len = ring->el_size * ring->elements;
357		ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
358		if (ret)
359			goto error_alloc_cmd;
360
361		ring->rp = ring->wp = ring->base;
362		cmd_ctxt->rbase = ring->iommu_base;
363		cmd_ctxt->rp = cmd_ctxt->wp = cmd_ctxt->rbase;
364		cmd_ctxt->rlen = ring->len;
365		ring->ctxt_wp = &cmd_ctxt->wp;
366	}
367
368	mhi_cntrl->mhi_ctxt = mhi_ctxt;
369
370	return 0;
371
372error_alloc_cmd:
373	for (--i, --mhi_cmd; i >= 0; i--, mhi_cmd--) {
374		struct mhi_ring *ring = &mhi_cmd->ring;
375
376		mhi_free_coherent(mhi_cntrl, ring->alloc_size,
377				  ring->pre_aligned, ring->dma_handle);
378	}
379	mhi_free_coherent(mhi_cntrl,
380			  sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
381			  mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
382	i = mhi_cntrl->total_ev_rings;
383	mhi_event = mhi_cntrl->mhi_event + i;
384
385error_alloc_er:
386	for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
387		struct mhi_ring *ring = &mhi_event->ring;
388
389		if (mhi_event->offload_ev)
390			continue;
391
392		mhi_free_coherent(mhi_cntrl, ring->alloc_size,
393				  ring->pre_aligned, ring->dma_handle);
394	}
395	mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->er_ctxt) *
396			  mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
397			  mhi_ctxt->er_ctxt_addr);
398
399error_alloc_er_ctxt:
400	mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->chan_ctxt) *
401			  mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
402			  mhi_ctxt->chan_ctxt_addr);
403
404error_alloc_chan_ctxt:
405	kfree(mhi_ctxt);
406
407	return ret;
408}
409
410int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
411{
412	u32 val;
413	int i, ret;
414	struct mhi_chan *mhi_chan;
415	struct mhi_event *mhi_event;
416	void __iomem *base = mhi_cntrl->regs;
417	struct device *dev = &mhi_cntrl->mhi_dev->dev;
418	struct {
419		u32 offset;
420		u32 mask;
421		u32 shift;
422		u32 val;
423	} reg_info[] = {
424		{
425			CCABAP_HIGHER, U32_MAX, 0,
426			upper_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
427		},
428		{
429			CCABAP_LOWER, U32_MAX, 0,
430			lower_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
431		},
432		{
433			ECABAP_HIGHER, U32_MAX, 0,
434			upper_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
435		},
436		{
437			ECABAP_LOWER, U32_MAX, 0,
438			lower_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
439		},
440		{
441			CRCBAP_HIGHER, U32_MAX, 0,
442			upper_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
443		},
444		{
445			CRCBAP_LOWER, U32_MAX, 0,
446			lower_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
447		},
448		{
449			MHICFG, MHICFG_NER_MASK, MHICFG_NER_SHIFT,
450			mhi_cntrl->total_ev_rings,
451		},
452		{
453			MHICFG, MHICFG_NHWER_MASK, MHICFG_NHWER_SHIFT,
454			mhi_cntrl->hw_ev_rings,
455		},
456		{
457			MHICTRLBASE_HIGHER, U32_MAX, 0,
458			upper_32_bits(mhi_cntrl->iova_start),
459		},
460		{
461			MHICTRLBASE_LOWER, U32_MAX, 0,
462			lower_32_bits(mhi_cntrl->iova_start),
463		},
464		{
465			MHIDATABASE_HIGHER, U32_MAX, 0,
466			upper_32_bits(mhi_cntrl->iova_start),
467		},
468		{
469			MHIDATABASE_LOWER, U32_MAX, 0,
470			lower_32_bits(mhi_cntrl->iova_start),
471		},
472		{
473			MHICTRLLIMIT_HIGHER, U32_MAX, 0,
474			upper_32_bits(mhi_cntrl->iova_stop),
475		},
476		{
477			MHICTRLLIMIT_LOWER, U32_MAX, 0,
478			lower_32_bits(mhi_cntrl->iova_stop),
479		},
480		{
481			MHIDATALIMIT_HIGHER, U32_MAX, 0,
482			upper_32_bits(mhi_cntrl->iova_stop),
483		},
484		{
485			MHIDATALIMIT_LOWER, U32_MAX, 0,
486			lower_32_bits(mhi_cntrl->iova_stop),
487		},
488		{ 0, 0, 0 }
489	};
490
491	dev_dbg(dev, "Initializing MHI registers\n");
492
493	/* Read channel db offset */
494	ret = mhi_read_reg_field(mhi_cntrl, base, CHDBOFF, CHDBOFF_CHDBOFF_MASK,
495				 CHDBOFF_CHDBOFF_SHIFT, &val);
496	if (ret) {
497		dev_err(dev, "Unable to read CHDBOFF register\n");
498		return -EIO;
499	}
500
501	if (val >= mhi_cntrl->reg_len - (8 * MHI_DEV_WAKE_DB)) {
502		dev_err(dev, "CHDB offset: 0x%x is out of range: 0x%zx\n",
503			val, mhi_cntrl->reg_len - (8 * MHI_DEV_WAKE_DB));
504		return -ERANGE;
505	}
506
507	/* Setup wake db */
508	mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB);
509	mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 4, 0);
510	mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 0, 0);
511	mhi_cntrl->wake_set = false;
512
513	/* Setup channel db address for each channel in tre_ring */
514	mhi_chan = mhi_cntrl->mhi_chan;
515	for (i = 0; i < mhi_cntrl->max_chan; i++, val += 8, mhi_chan++)
516		mhi_chan->tre_ring.db_addr = base + val;
517
518	/* Read event ring db offset */
519	ret = mhi_read_reg_field(mhi_cntrl, base, ERDBOFF, ERDBOFF_ERDBOFF_MASK,
520				 ERDBOFF_ERDBOFF_SHIFT, &val);
521	if (ret) {
522		dev_err(dev, "Unable to read ERDBOFF register\n");
523		return -EIO;
524	}
525
526	if (val >= mhi_cntrl->reg_len - (8 * mhi_cntrl->total_ev_rings)) {
527		dev_err(dev, "ERDB offset: 0x%x is out of range: 0x%zx\n",
528			val, mhi_cntrl->reg_len - (8 * mhi_cntrl->total_ev_rings));
529		return -ERANGE;
530	}
531
532	/* Setup event db address for each ev_ring */
533	mhi_event = mhi_cntrl->mhi_event;
534	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) {
535		if (mhi_event->offload_ev)
536			continue;
537
538		mhi_event->ring.db_addr = base + val;
539	}
540
541	/* Setup DB register for primary CMD rings */
542	mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING].ring.db_addr = base + CRDB_LOWER;
543
544	/* Write to MMIO registers */
545	for (i = 0; reg_info[i].offset; i++)
546		mhi_write_reg_field(mhi_cntrl, base, reg_info[i].offset,
547				    reg_info[i].mask, reg_info[i].shift,
548				    reg_info[i].val);
549
550	return 0;
551}
552
553void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
554			  struct mhi_chan *mhi_chan)
555{
556	struct mhi_ring *buf_ring;
557	struct mhi_ring *tre_ring;
558	struct mhi_chan_ctxt *chan_ctxt;
559	u32 tmp;
560
561	buf_ring = &mhi_chan->buf_ring;
562	tre_ring = &mhi_chan->tre_ring;
563	chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
564
565	mhi_free_coherent(mhi_cntrl, tre_ring->alloc_size,
566			  tre_ring->pre_aligned, tre_ring->dma_handle);
567	vfree(buf_ring->base);
568
569	buf_ring->base = tre_ring->base = NULL;
570	tre_ring->ctxt_wp = NULL;
571	chan_ctxt->rbase = 0;
572	chan_ctxt->rlen = 0;
573	chan_ctxt->rp = 0;
574	chan_ctxt->wp = 0;
575
576	tmp = chan_ctxt->chcfg;
577	tmp &= ~CHAN_CTX_CHSTATE_MASK;
578	tmp |= (MHI_CH_STATE_DISABLED << CHAN_CTX_CHSTATE_SHIFT);
579	chan_ctxt->chcfg = tmp;
580
581	/* Update to all cores */
582	smp_wmb();
583}
584
585int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
586		       struct mhi_chan *mhi_chan)
587{
588	struct mhi_ring *buf_ring;
589	struct mhi_ring *tre_ring;
590	struct mhi_chan_ctxt *chan_ctxt;
591	u32 tmp;
592	int ret;
593
594	buf_ring = &mhi_chan->buf_ring;
595	tre_ring = &mhi_chan->tre_ring;
596	tre_ring->el_size = sizeof(struct mhi_tre);
597	tre_ring->len = tre_ring->el_size * tre_ring->elements;
598	chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
599	ret = mhi_alloc_aligned_ring(mhi_cntrl, tre_ring, tre_ring->len);
600	if (ret)
601		return -ENOMEM;
602
603	buf_ring->el_size = sizeof(struct mhi_buf_info);
604	buf_ring->len = buf_ring->el_size * buf_ring->elements;
605	buf_ring->base = vzalloc(buf_ring->len);
606
607	if (!buf_ring->base) {
608		mhi_free_coherent(mhi_cntrl, tre_ring->alloc_size,
609				  tre_ring->pre_aligned, tre_ring->dma_handle);
610		return -ENOMEM;
611	}
612
613	tmp = chan_ctxt->chcfg;
614	tmp &= ~CHAN_CTX_CHSTATE_MASK;
615	tmp |= (MHI_CH_STATE_ENABLED << CHAN_CTX_CHSTATE_SHIFT);
616	chan_ctxt->chcfg = tmp;
617
618	chan_ctxt->rbase = tre_ring->iommu_base;
619	chan_ctxt->rp = chan_ctxt->wp = chan_ctxt->rbase;
620	chan_ctxt->rlen = tre_ring->len;
621	tre_ring->ctxt_wp = &chan_ctxt->wp;
622
623	tre_ring->rp = tre_ring->wp = tre_ring->base;
624	buf_ring->rp = buf_ring->wp = buf_ring->base;
625	mhi_chan->db_cfg.db_mode = 1;
626
627	/* Update to all cores */
628	smp_wmb();
629
630	return 0;
631}
632
633static int parse_ev_cfg(struct mhi_controller *mhi_cntrl,
634			const struct mhi_controller_config *config)
635{
636	struct mhi_event *mhi_event;
637	const struct mhi_event_config *event_cfg;
638	struct device *dev = mhi_cntrl->cntrl_dev;
639	int i, num;
640
641	num = config->num_events;
642	mhi_cntrl->total_ev_rings = num;
643	mhi_cntrl->mhi_event = kcalloc(num, sizeof(*mhi_cntrl->mhi_event),
644				       GFP_KERNEL);
645	if (!mhi_cntrl->mhi_event)
646		return -ENOMEM;
647
648	/* Populate event ring */
649	mhi_event = mhi_cntrl->mhi_event;
650	for (i = 0; i < num; i++) {
651		event_cfg = &config->event_cfg[i];
652
653		mhi_event->er_index = i;
654		mhi_event->ring.elements = event_cfg->num_elements;
655		mhi_event->intmod = event_cfg->irq_moderation_ms;
656		mhi_event->irq = event_cfg->irq;
657
658		if (event_cfg->channel != U32_MAX) {
659			/* This event ring has a dedicated channel */
660			mhi_event->chan = event_cfg->channel;
661			if (mhi_event->chan >= mhi_cntrl->max_chan) {
662				dev_err(dev,
663					"Event Ring channel not available\n");
664				goto error_ev_cfg;
665			}
666
667			mhi_event->mhi_chan =
668				&mhi_cntrl->mhi_chan[mhi_event->chan];
669		}
670
671		/* Priority is fixed to 1 for now */
672		mhi_event->priority = 1;
673
674		mhi_event->db_cfg.brstmode = event_cfg->mode;
675		if (MHI_INVALID_BRSTMODE(mhi_event->db_cfg.brstmode))
676			goto error_ev_cfg;
677
678		if (mhi_event->db_cfg.brstmode == MHI_DB_BRST_ENABLE)
679			mhi_event->db_cfg.process_db = mhi_db_brstmode;
680		else
681			mhi_event->db_cfg.process_db = mhi_db_brstmode_disable;
682
683		mhi_event->data_type = event_cfg->data_type;
684
685		switch (mhi_event->data_type) {
686		case MHI_ER_DATA:
687			mhi_event->process_event = mhi_process_data_event_ring;
688			break;
689		case MHI_ER_CTRL:
690			mhi_event->process_event = mhi_process_ctrl_ev_ring;
691			break;
692		default:
693			dev_err(dev, "Event Ring type not supported\n");
694			goto error_ev_cfg;
695		}
696
697		mhi_event->hw_ring = event_cfg->hardware_event;
698		if (mhi_event->hw_ring)
699			mhi_cntrl->hw_ev_rings++;
700		else
701			mhi_cntrl->sw_ev_rings++;
702
703		mhi_event->cl_manage = event_cfg->client_managed;
704		mhi_event->offload_ev = event_cfg->offload_channel;
705		mhi_event++;
706	}
707
708	return 0;
709
710error_ev_cfg:
711
712	kfree(mhi_cntrl->mhi_event);
713	return -EINVAL;
714}
715
716static int parse_ch_cfg(struct mhi_controller *mhi_cntrl,
717			const struct mhi_controller_config *config)
718{
719	const struct mhi_channel_config *ch_cfg;
720	struct device *dev = mhi_cntrl->cntrl_dev;
721	int i;
722	u32 chan;
723
724	mhi_cntrl->max_chan = config->max_channels;
725
726	/*
727	 * The allocation of MHI channels can exceed 32KB in some scenarios,
728	 * so to avoid any memory possible allocation failures, vzalloc is
729	 * used here
730	 */
731	mhi_cntrl->mhi_chan = vzalloc(mhi_cntrl->max_chan *
732				      sizeof(*mhi_cntrl->mhi_chan));
733	if (!mhi_cntrl->mhi_chan)
734		return -ENOMEM;
735
736	INIT_LIST_HEAD(&mhi_cntrl->lpm_chans);
737
738	/* Populate channel configurations */
739	for (i = 0; i < config->num_channels; i++) {
740		struct mhi_chan *mhi_chan;
741
742		ch_cfg = &config->ch_cfg[i];
743
744		chan = ch_cfg->num;
745		if (chan >= mhi_cntrl->max_chan) {
746			dev_err(dev, "Channel %d not available\n", chan);
747			goto error_chan_cfg;
748		}
749
750		mhi_chan = &mhi_cntrl->mhi_chan[chan];
751		mhi_chan->name = ch_cfg->name;
752		mhi_chan->chan = chan;
753
754		mhi_chan->tre_ring.elements = ch_cfg->num_elements;
755		if (!mhi_chan->tre_ring.elements)
756			goto error_chan_cfg;
757
758		/*
759		 * For some channels, local ring length should be bigger than
760		 * the transfer ring length due to internal logical channels
761		 * in device. So host can queue much more buffers than transfer
762		 * ring length. Example, RSC channels should have a larger local
763		 * channel length than transfer ring length.
764		 */
765		mhi_chan->buf_ring.elements = ch_cfg->local_elements;
766		if (!mhi_chan->buf_ring.elements)
767			mhi_chan->buf_ring.elements = mhi_chan->tre_ring.elements;
768		mhi_chan->er_index = ch_cfg->event_ring;
769		mhi_chan->dir = ch_cfg->dir;
770
771		/*
772		 * For most channels, chtype is identical to channel directions.
773		 * So, if it is not defined then assign channel direction to
774		 * chtype
775		 */
776		mhi_chan->type = ch_cfg->type;
777		if (!mhi_chan->type)
778			mhi_chan->type = (enum mhi_ch_type)mhi_chan->dir;
779
780		mhi_chan->ee_mask = ch_cfg->ee_mask;
781		mhi_chan->db_cfg.pollcfg = ch_cfg->pollcfg;
782		mhi_chan->lpm_notify = ch_cfg->lpm_notify;
783		mhi_chan->offload_ch = ch_cfg->offload_channel;
784		mhi_chan->db_cfg.reset_req = ch_cfg->doorbell_mode_switch;
785		mhi_chan->pre_alloc = ch_cfg->auto_queue;
786		mhi_chan->auto_start = ch_cfg->auto_start;
787
788		/*
789		 * If MHI host allocates buffers, then the channel direction
790		 * should be DMA_FROM_DEVICE
791		 */
792		if (mhi_chan->pre_alloc && mhi_chan->dir != DMA_FROM_DEVICE) {
793			dev_err(dev, "Invalid channel configuration\n");
794			goto error_chan_cfg;
795		}
796
797		/*
798		 * Bi-directional and direction less channel must be an
799		 * offload channel
800		 */
801		if ((mhi_chan->dir == DMA_BIDIRECTIONAL ||
802		     mhi_chan->dir == DMA_NONE) && !mhi_chan->offload_ch) {
803			dev_err(dev, "Invalid channel configuration\n");
804			goto error_chan_cfg;
805		}
806
807		if (!mhi_chan->offload_ch) {
808			mhi_chan->db_cfg.brstmode = ch_cfg->doorbell;
809			if (MHI_INVALID_BRSTMODE(mhi_chan->db_cfg.brstmode)) {
810				dev_err(dev, "Invalid Door bell mode\n");
811				goto error_chan_cfg;
812			}
813		}
814
815		if (mhi_chan->db_cfg.brstmode == MHI_DB_BRST_ENABLE)
816			mhi_chan->db_cfg.process_db = mhi_db_brstmode;
817		else
818			mhi_chan->db_cfg.process_db = mhi_db_brstmode_disable;
819
820		mhi_chan->configured = true;
821
822		if (mhi_chan->lpm_notify)
823			list_add_tail(&mhi_chan->node, &mhi_cntrl->lpm_chans);
824	}
825
826	return 0;
827
828error_chan_cfg:
829	vfree(mhi_cntrl->mhi_chan);
830
831	return -EINVAL;
832}
833
834static int parse_config(struct mhi_controller *mhi_cntrl,
835			const struct mhi_controller_config *config)
836{
837	int ret;
838
839	/* Parse MHI channel configuration */
840	ret = parse_ch_cfg(mhi_cntrl, config);
841	if (ret)
842		return ret;
843
844	/* Parse MHI event configuration */
845	ret = parse_ev_cfg(mhi_cntrl, config);
846	if (ret)
847		goto error_ev_cfg;
848
849	mhi_cntrl->timeout_ms = config->timeout_ms;
850	if (!mhi_cntrl->timeout_ms)
851		mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS;
852
853	mhi_cntrl->bounce_buf = config->use_bounce_buf;
854	mhi_cntrl->buffer_len = config->buf_len;
855	if (!mhi_cntrl->buffer_len)
856		mhi_cntrl->buffer_len = MHI_MAX_MTU;
857
858	/* By default, host is allowed to ring DB in both M0 and M2 states */
859	mhi_cntrl->db_access = MHI_PM_M0 | MHI_PM_M2;
860	if (config->m2_no_db)
861		mhi_cntrl->db_access &= ~MHI_PM_M2;
862
863	return 0;
864
865error_ev_cfg:
866	vfree(mhi_cntrl->mhi_chan);
867
868	return ret;
869}
870
871int mhi_register_controller(struct mhi_controller *mhi_cntrl,
872			    const struct mhi_controller_config *config)
873{
874	struct mhi_event *mhi_event;
875	struct mhi_chan *mhi_chan;
876	struct mhi_cmd *mhi_cmd;
877	struct mhi_device *mhi_dev;
878	u32 soc_info;
879	int ret, i;
880
881	if (!mhi_cntrl)
882		return -EINVAL;
883
884	if (!mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put ||
885	    !mhi_cntrl->status_cb || !mhi_cntrl->read_reg ||
886	    !mhi_cntrl->write_reg)
887		return -EINVAL;
888
889	ret = parse_config(mhi_cntrl, config);
890	if (ret)
891		return -EINVAL;
892
893	mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS,
894				     sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL);
895	if (!mhi_cntrl->mhi_cmd) {
896		ret = -ENOMEM;
897		goto error_alloc_cmd;
898	}
899
900	INIT_LIST_HEAD(&mhi_cntrl->transition_list);
901	mutex_init(&mhi_cntrl->pm_mutex);
902	rwlock_init(&mhi_cntrl->pm_lock);
903	spin_lock_init(&mhi_cntrl->transition_lock);
904	spin_lock_init(&mhi_cntrl->wlock);
905	INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
906	init_waitqueue_head(&mhi_cntrl->state_event);
907
908	mhi_cmd = mhi_cntrl->mhi_cmd;
909	for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++)
910		spin_lock_init(&mhi_cmd->lock);
911
912	mhi_event = mhi_cntrl->mhi_event;
913	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
914		/* Skip for offload events */
915		if (mhi_event->offload_ev)
916			continue;
917
918		mhi_event->mhi_cntrl = mhi_cntrl;
919		spin_lock_init(&mhi_event->lock);
920		if (mhi_event->data_type == MHI_ER_CTRL)
921			tasklet_init(&mhi_event->task, mhi_ctrl_ev_task,
922				     (ulong)mhi_event);
923		else
924			tasklet_init(&mhi_event->task, mhi_ev_task,
925				     (ulong)mhi_event);
926	}
927
928	mhi_chan = mhi_cntrl->mhi_chan;
929	for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
930		mutex_init(&mhi_chan->mutex);
931		init_completion(&mhi_chan->completion);
932		rwlock_init(&mhi_chan->lock);
933
934		/* used in setting bei field of TRE */
935		mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
936		mhi_chan->intmod = mhi_event->intmod;
937	}
938
939	if (mhi_cntrl->bounce_buf) {
940		mhi_cntrl->map_single = mhi_map_single_use_bb;
941		mhi_cntrl->unmap_single = mhi_unmap_single_use_bb;
942	} else {
943		mhi_cntrl->map_single = mhi_map_single_no_bb;
944		mhi_cntrl->unmap_single = mhi_unmap_single_no_bb;
945	}
946
947	/* Read the MHI device info */
948	ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs,
949			   SOC_HW_VERSION_OFFS, &soc_info);
950	if (ret)
951		goto error_alloc_dev;
952
953	mhi_cntrl->family_number = (soc_info & SOC_HW_VERSION_FAM_NUM_BMSK) >>
954					SOC_HW_VERSION_FAM_NUM_SHFT;
955	mhi_cntrl->device_number = (soc_info & SOC_HW_VERSION_DEV_NUM_BMSK) >>
956					SOC_HW_VERSION_DEV_NUM_SHFT;
957	mhi_cntrl->major_version = (soc_info & SOC_HW_VERSION_MAJOR_VER_BMSK) >>
958					SOC_HW_VERSION_MAJOR_VER_SHFT;
959	mhi_cntrl->minor_version = (soc_info & SOC_HW_VERSION_MINOR_VER_BMSK) >>
960					SOC_HW_VERSION_MINOR_VER_SHFT;
961
962	/* Register controller with MHI bus */
963	mhi_dev = mhi_alloc_device(mhi_cntrl);
964	if (IS_ERR(mhi_dev)) {
965		dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate MHI device\n");
966		ret = PTR_ERR(mhi_dev);
967		goto error_alloc_dev;
968	}
969
970	mhi_dev->dev_type = MHI_DEVICE_CONTROLLER;
971	mhi_dev->mhi_cntrl = mhi_cntrl;
972	dev_set_name(&mhi_dev->dev, "%s", dev_name(mhi_cntrl->cntrl_dev));
973	mhi_dev->name = dev_name(mhi_cntrl->cntrl_dev);
974
975	/* Init wakeup source */
976	device_init_wakeup(&mhi_dev->dev, true);
977
978	ret = device_add(&mhi_dev->dev);
979	if (ret)
980		goto error_add_dev;
981
982	mhi_cntrl->mhi_dev = mhi_dev;
983
984	mhi_create_debugfs(mhi_cntrl);
985
986	return 0;
987
988error_add_dev:
989	put_device(&mhi_dev->dev);
990
991error_alloc_dev:
992	kfree(mhi_cntrl->mhi_cmd);
993
994error_alloc_cmd:
995	vfree(mhi_cntrl->mhi_chan);
996	kfree(mhi_cntrl->mhi_event);
997
998	return ret;
999}
1000EXPORT_SYMBOL_GPL(mhi_register_controller);
1001
1002void mhi_unregister_controller(struct mhi_controller *mhi_cntrl)
1003{
1004	struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
1005	struct mhi_chan *mhi_chan = mhi_cntrl->mhi_chan;
1006	unsigned int i;
1007
1008	mhi_destroy_debugfs(mhi_cntrl);
1009
1010	kfree(mhi_cntrl->mhi_cmd);
1011	kfree(mhi_cntrl->mhi_event);
1012
1013	/* Drop the references to MHI devices created for channels */
1014	for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
1015		if (!mhi_chan->mhi_dev)
1016			continue;
1017
1018		put_device(&mhi_chan->mhi_dev->dev);
1019	}
1020	vfree(mhi_cntrl->mhi_chan);
1021
1022	device_del(&mhi_dev->dev);
1023	put_device(&mhi_dev->dev);
1024}
1025EXPORT_SYMBOL_GPL(mhi_unregister_controller);
1026
1027struct mhi_controller *mhi_alloc_controller(void)
1028{
1029	struct mhi_controller *mhi_cntrl;
1030
1031	mhi_cntrl = kzalloc(sizeof(*mhi_cntrl), GFP_KERNEL);
1032
1033	return mhi_cntrl;
1034}
1035EXPORT_SYMBOL_GPL(mhi_alloc_controller);
1036
1037void mhi_free_controller(struct mhi_controller *mhi_cntrl)
1038{
1039	kfree(mhi_cntrl);
1040}
1041EXPORT_SYMBOL_GPL(mhi_free_controller);
1042
1043int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl)
1044{
1045	struct device *dev = &mhi_cntrl->mhi_dev->dev;
1046	u32 bhie_off;
1047	int ret;
1048
1049	mutex_lock(&mhi_cntrl->pm_mutex);
1050
1051	ret = mhi_init_dev_ctxt(mhi_cntrl);
1052	if (ret)
1053		goto error_dev_ctxt;
1054
1055	/*
1056	 * Allocate RDDM table if specified, this table is for debugging purpose
1057	 */
1058	if (mhi_cntrl->rddm_size) {
1059		mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image,
1060				     mhi_cntrl->rddm_size);
1061
1062		/*
1063		 * This controller supports RDDM, so we need to manually clear
1064		 * BHIE RX registers since POR values are undefined.
1065		 */
1066		ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF,
1067				   &bhie_off);
1068		if (ret) {
1069			dev_err(dev, "Error getting BHIE offset\n");
1070			goto bhie_error;
1071		}
1072
1073		mhi_cntrl->bhie = mhi_cntrl->regs + bhie_off;
1074		memset_io(mhi_cntrl->bhie + BHIE_RXVECADDR_LOW_OFFS,
1075			  0, BHIE_RXVECSTATUS_OFFS - BHIE_RXVECADDR_LOW_OFFS +
1076			  4);
1077
1078		if (mhi_cntrl->rddm_image)
1079			mhi_rddm_prepare(mhi_cntrl, mhi_cntrl->rddm_image);
1080	}
1081
1082	mhi_cntrl->pre_init = true;
1083
1084	mutex_unlock(&mhi_cntrl->pm_mutex);
1085
1086	return 0;
1087
1088bhie_error:
1089	if (mhi_cntrl->rddm_image) {
1090		mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image);
1091		mhi_cntrl->rddm_image = NULL;
1092	}
1093
1094error_dev_ctxt:
1095	mutex_unlock(&mhi_cntrl->pm_mutex);
1096
1097	return ret;
1098}
1099EXPORT_SYMBOL_GPL(mhi_prepare_for_power_up);
1100
1101void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl)
1102{
1103	if (mhi_cntrl->fbc_image) {
1104		mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
1105		mhi_cntrl->fbc_image = NULL;
1106	}
1107
1108	if (mhi_cntrl->rddm_image) {
1109		mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image);
1110		mhi_cntrl->rddm_image = NULL;
1111	}
1112
1113	mhi_deinit_dev_ctxt(mhi_cntrl);
1114	mhi_cntrl->pre_init = false;
1115}
1116EXPORT_SYMBOL_GPL(mhi_unprepare_after_power_down);
1117
1118static void mhi_release_device(struct device *dev)
1119{
1120	struct mhi_device *mhi_dev = to_mhi_device(dev);
1121
1122	/*
1123	 * We need to set the mhi_chan->mhi_dev to NULL here since the MHI
1124	 * devices for the channels will only get created if the mhi_dev
1125	 * associated with it is NULL. This scenario will happen during the
1126	 * controller suspend and resume.
1127	 */
1128	if (mhi_dev->ul_chan)
1129		mhi_dev->ul_chan->mhi_dev = NULL;
1130
1131	if (mhi_dev->dl_chan)
1132		mhi_dev->dl_chan->mhi_dev = NULL;
1133
1134	kfree(mhi_dev);
1135}
1136
1137struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl)
1138{
1139	struct mhi_device *mhi_dev;
1140	struct device *dev;
1141
1142	mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL);
1143	if (!mhi_dev)
1144		return ERR_PTR(-ENOMEM);
1145
1146	dev = &mhi_dev->dev;
1147	device_initialize(dev);
1148	dev->bus = &mhi_bus_type;
1149	dev->release = mhi_release_device;
1150	dev->parent = mhi_cntrl->cntrl_dev;
1151	mhi_dev->mhi_cntrl = mhi_cntrl;
1152	mhi_dev->dev_wake = 0;
1153
1154	return mhi_dev;
1155}
1156
1157static int mhi_driver_probe(struct device *dev)
1158{
1159	struct mhi_device *mhi_dev = to_mhi_device(dev);
1160	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1161	struct device_driver *drv = dev->driver;
1162	struct mhi_driver *mhi_drv = to_mhi_driver(drv);
1163	struct mhi_event *mhi_event;
1164	struct mhi_chan *ul_chan = mhi_dev->ul_chan;
1165	struct mhi_chan *dl_chan = mhi_dev->dl_chan;
1166	int ret;
1167
1168	/* Bring device out of LPM */
1169	ret = mhi_device_get_sync(mhi_dev);
1170	if (ret)
1171		return ret;
1172
1173	ret = -EINVAL;
1174
1175	if (ul_chan) {
1176		/*
1177		 * If channel supports LPM notifications then status_cb should
1178		 * be provided
1179		 */
1180		if (ul_chan->lpm_notify && !mhi_drv->status_cb)
1181			goto exit_probe;
1182
1183		/* For non-offload channels then xfer_cb should be provided */
1184		if (!ul_chan->offload_ch && !mhi_drv->ul_xfer_cb)
1185			goto exit_probe;
1186
1187		ul_chan->xfer_cb = mhi_drv->ul_xfer_cb;
1188		if (ul_chan->auto_start) {
1189			ret = mhi_prepare_channel(mhi_cntrl, ul_chan);
1190			if (ret)
1191				goto exit_probe;
1192		}
1193	}
1194
1195	ret = -EINVAL;
1196	if (dl_chan) {
1197		/*
1198		 * If channel supports LPM notifications then status_cb should
1199		 * be provided
1200		 */
1201		if (dl_chan->lpm_notify && !mhi_drv->status_cb)
1202			goto exit_probe;
1203
1204		/* For non-offload channels then xfer_cb should be provided */
1205		if (!dl_chan->offload_ch && !mhi_drv->dl_xfer_cb)
1206			goto exit_probe;
1207
1208		mhi_event = &mhi_cntrl->mhi_event[dl_chan->er_index];
1209
1210		/*
1211		 * If the channel event ring is managed by client, then
1212		 * status_cb must be provided so that the framework can
1213		 * notify pending data
1214		 */
1215		if (mhi_event->cl_manage && !mhi_drv->status_cb)
1216			goto exit_probe;
1217
1218		dl_chan->xfer_cb = mhi_drv->dl_xfer_cb;
1219	}
1220
1221	/* Call the user provided probe function */
1222	ret = mhi_drv->probe(mhi_dev, mhi_dev->id);
1223	if (ret)
1224		goto exit_probe;
1225
1226	if (dl_chan && dl_chan->auto_start)
1227		mhi_prepare_channel(mhi_cntrl, dl_chan);
1228
1229	mhi_device_put(mhi_dev);
1230
1231	return ret;
1232
1233exit_probe:
1234	mhi_unprepare_from_transfer(mhi_dev);
1235
1236	mhi_device_put(mhi_dev);
1237
1238	return ret;
1239}
1240
1241static int mhi_driver_remove(struct device *dev)
1242{
1243	struct mhi_device *mhi_dev = to_mhi_device(dev);
1244	struct mhi_driver *mhi_drv = to_mhi_driver(dev->driver);
1245	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1246	struct mhi_chan *mhi_chan;
1247	enum mhi_ch_state ch_state[] = {
1248		MHI_CH_STATE_DISABLED,
1249		MHI_CH_STATE_DISABLED
1250	};
1251	int dir;
1252
1253	/* Skip if it is a controller device */
1254	if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
1255		return 0;
1256
1257	/* Reset both channels */
1258	for (dir = 0; dir < 2; dir++) {
1259		mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
1260
1261		if (!mhi_chan)
1262			continue;
1263
1264		/* Wake all threads waiting for completion */
1265		write_lock_irq(&mhi_chan->lock);
1266		mhi_chan->ccs = MHI_EV_CC_INVALID;
1267		complete_all(&mhi_chan->completion);
1268		write_unlock_irq(&mhi_chan->lock);
1269
1270		/* Set the channel state to disabled */
1271		mutex_lock(&mhi_chan->mutex);
1272		write_lock_irq(&mhi_chan->lock);
1273		ch_state[dir] = mhi_chan->ch_state;
1274		mhi_chan->ch_state = MHI_CH_STATE_SUSPENDED;
1275		write_unlock_irq(&mhi_chan->lock);
1276
1277		/* Reset the non-offload channel */
1278		if (!mhi_chan->offload_ch)
1279			mhi_reset_chan(mhi_cntrl, mhi_chan);
1280
1281		mutex_unlock(&mhi_chan->mutex);
1282	}
1283
1284	mhi_drv->remove(mhi_dev);
1285
1286	/* De-init channel if it was enabled */
1287	for (dir = 0; dir < 2; dir++) {
1288		mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
1289
1290		if (!mhi_chan)
1291			continue;
1292
1293		mutex_lock(&mhi_chan->mutex);
1294
1295		if ((ch_state[dir] == MHI_CH_STATE_ENABLED ||
1296		     ch_state[dir] == MHI_CH_STATE_STOP) &&
1297		    !mhi_chan->offload_ch)
1298			mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1299
1300		mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
1301
1302		mutex_unlock(&mhi_chan->mutex);
1303	}
1304
1305	while (mhi_dev->dev_wake)
1306		mhi_device_put(mhi_dev);
1307
1308	return 0;
1309}
1310
1311int __mhi_driver_register(struct mhi_driver *mhi_drv, struct module *owner)
1312{
1313	struct device_driver *driver = &mhi_drv->driver;
1314
1315	if (!mhi_drv->probe || !mhi_drv->remove)
1316		return -EINVAL;
1317
1318	driver->bus = &mhi_bus_type;
1319	driver->owner = owner;
1320	driver->probe = mhi_driver_probe;
1321	driver->remove = mhi_driver_remove;
1322
1323	return driver_register(driver);
1324}
1325EXPORT_SYMBOL_GPL(__mhi_driver_register);
1326
1327void mhi_driver_unregister(struct mhi_driver *mhi_drv)
1328{
1329	driver_unregister(&mhi_drv->driver);
1330}
1331EXPORT_SYMBOL_GPL(mhi_driver_unregister);
1332
1333static int mhi_uevent(struct device *dev, struct kobj_uevent_env *env)
1334{
1335	struct mhi_device *mhi_dev = to_mhi_device(dev);
1336
1337	return add_uevent_var(env, "MODALIAS=" MHI_DEVICE_MODALIAS_FMT,
1338					mhi_dev->name);
1339}
1340
1341static int mhi_match(struct device *dev, struct device_driver *drv)
1342{
1343	struct mhi_device *mhi_dev = to_mhi_device(dev);
1344	struct mhi_driver *mhi_drv = to_mhi_driver(drv);
1345	const struct mhi_device_id *id;
1346
1347	/*
1348	 * If the device is a controller type then there is no client driver
1349	 * associated with it
1350	 */
1351	if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
1352		return 0;
1353
1354	for (id = mhi_drv->id_table; id->chan[0]; id++)
1355		if (!strcmp(mhi_dev->name, id->chan)) {
1356			mhi_dev->id = id;
1357			return 1;
1358		}
1359
1360	return 0;
1361};
1362
1363struct bus_type mhi_bus_type = {
1364	.name = "mhi",
1365	.dev_name = "mhi",
1366	.match = mhi_match,
1367	.uevent = mhi_uevent,
1368	.dev_groups = mhi_dev_groups,
1369};
1370
1371static int __init mhi_init(void)
1372{
1373	mhi_debugfs_init();
1374	return bus_register(&mhi_bus_type);
1375}
1376
1377static void __exit mhi_exit(void)
1378{
1379	mhi_debugfs_exit();
1380	bus_unregister(&mhi_bus_type);
1381}
1382
1383postcore_initcall(mhi_init);
1384module_exit(mhi_exit);
1385
1386MODULE_LICENSE("GPL v2");
1387MODULE_DESCRIPTION("MHI Host Interface");
1388