1// SPDX-License-Identifier: GPL-2.0
2/*
3 * PCI EPF driver for MHI Endpoint devices
4 *
5 * Copyright (C) 2023 Linaro Ltd.
6 * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
7 */
8
9#include <linux/dmaengine.h>
10#include <linux/mhi_ep.h>
11#include <linux/module.h>
12#include <linux/of_dma.h>
13#include <linux/platform_device.h>
14#include <linux/pci-epc.h>
15#include <linux/pci-epf.h>
16
17#define MHI_VERSION_1_0 0x01000000
18
19#define to_epf_mhi(cntrl) container_of(cntrl, struct pci_epf_mhi, cntrl)
20
21/* Platform specific flags */
22#define MHI_EPF_USE_DMA BIT(0)
23
24struct pci_epf_mhi_ep_info {
25	const struct mhi_ep_cntrl_config *config;
26	struct pci_epf_header *epf_header;
27	enum pci_barno bar_num;
28	u32 epf_flags;
29	u32 msi_count;
30	u32 mru;
31	u32 flags;
32};
33
34#define MHI_EP_CHANNEL_CONFIG(ch_num, ch_name, direction)	\
35	{							\
36		.num = ch_num,					\
37		.name = ch_name,				\
38		.dir = direction,				\
39	}
40
41#define MHI_EP_CHANNEL_CONFIG_UL(ch_num, ch_name)		\
42	MHI_EP_CHANNEL_CONFIG(ch_num, ch_name, DMA_TO_DEVICE)
43
44#define MHI_EP_CHANNEL_CONFIG_DL(ch_num, ch_name)		\
45	MHI_EP_CHANNEL_CONFIG(ch_num, ch_name, DMA_FROM_DEVICE)
46
47static const struct mhi_ep_channel_config mhi_v1_channels[] = {
48	MHI_EP_CHANNEL_CONFIG_UL(0, "LOOPBACK"),
49	MHI_EP_CHANNEL_CONFIG_DL(1, "LOOPBACK"),
50	MHI_EP_CHANNEL_CONFIG_UL(2, "SAHARA"),
51	MHI_EP_CHANNEL_CONFIG_DL(3, "SAHARA"),
52	MHI_EP_CHANNEL_CONFIG_UL(4, "DIAG"),
53	MHI_EP_CHANNEL_CONFIG_DL(5, "DIAG"),
54	MHI_EP_CHANNEL_CONFIG_UL(6, "SSR"),
55	MHI_EP_CHANNEL_CONFIG_DL(7, "SSR"),
56	MHI_EP_CHANNEL_CONFIG_UL(8, "QDSS"),
57	MHI_EP_CHANNEL_CONFIG_DL(9, "QDSS"),
58	MHI_EP_CHANNEL_CONFIG_UL(10, "EFS"),
59	MHI_EP_CHANNEL_CONFIG_DL(11, "EFS"),
60	MHI_EP_CHANNEL_CONFIG_UL(12, "MBIM"),
61	MHI_EP_CHANNEL_CONFIG_DL(13, "MBIM"),
62	MHI_EP_CHANNEL_CONFIG_UL(14, "QMI"),
63	MHI_EP_CHANNEL_CONFIG_DL(15, "QMI"),
64	MHI_EP_CHANNEL_CONFIG_UL(16, "QMI"),
65	MHI_EP_CHANNEL_CONFIG_DL(17, "QMI"),
66	MHI_EP_CHANNEL_CONFIG_UL(18, "IP-CTRL-1"),
67	MHI_EP_CHANNEL_CONFIG_DL(19, "IP-CTRL-1"),
68	MHI_EP_CHANNEL_CONFIG_UL(20, "IPCR"),
69	MHI_EP_CHANNEL_CONFIG_DL(21, "IPCR"),
70	MHI_EP_CHANNEL_CONFIG_UL(32, "DUN"),
71	MHI_EP_CHANNEL_CONFIG_DL(33, "DUN"),
72	MHI_EP_CHANNEL_CONFIG_UL(46, "IP_SW0"),
73	MHI_EP_CHANNEL_CONFIG_DL(47, "IP_SW0"),
74};
75
76static const struct mhi_ep_cntrl_config mhi_v1_config = {
77	.max_channels = 128,
78	.num_channels = ARRAY_SIZE(mhi_v1_channels),
79	.ch_cfg = mhi_v1_channels,
80	.mhi_version = MHI_VERSION_1_0,
81};
82
83static struct pci_epf_header sdx55_header = {
84	.vendorid = PCI_VENDOR_ID_QCOM,
85	.deviceid = 0x0306,
86	.baseclass_code = PCI_BASE_CLASS_COMMUNICATION,
87	.subclass_code = PCI_CLASS_COMMUNICATION_MODEM & 0xff,
88	.interrupt_pin	= PCI_INTERRUPT_INTA,
89};
90
91static const struct pci_epf_mhi_ep_info sdx55_info = {
92	.config = &mhi_v1_config,
93	.epf_header = &sdx55_header,
94	.bar_num = BAR_0,
95	.epf_flags = PCI_BASE_ADDRESS_MEM_TYPE_32,
96	.msi_count = 32,
97	.mru = 0x8000,
98};
99
100static struct pci_epf_header sm8450_header = {
101	.vendorid = PCI_VENDOR_ID_QCOM,
102	.deviceid = 0x0306,
103	.baseclass_code = PCI_CLASS_OTHERS,
104	.interrupt_pin = PCI_INTERRUPT_INTA,
105};
106
107static const struct pci_epf_mhi_ep_info sm8450_info = {
108	.config = &mhi_v1_config,
109	.epf_header = &sm8450_header,
110	.bar_num = BAR_0,
111	.epf_flags = PCI_BASE_ADDRESS_MEM_TYPE_32,
112	.msi_count = 32,
113	.mru = 0x8000,
114	.flags = MHI_EPF_USE_DMA,
115};
116
117struct pci_epf_mhi {
118	const struct pci_epc_features *epc_features;
119	const struct pci_epf_mhi_ep_info *info;
120	struct mhi_ep_cntrl mhi_cntrl;
121	struct pci_epf *epf;
122	struct mutex lock;
123	void __iomem *mmio;
124	resource_size_t mmio_phys;
125	struct dma_chan *dma_chan_tx;
126	struct dma_chan *dma_chan_rx;
127	u32 mmio_size;
128	int irq;
129};
130
131static size_t get_align_offset(struct pci_epf_mhi *epf_mhi, u64 addr)
132{
133	return addr & (epf_mhi->epc_features->align -1);
134}
135
136static int __pci_epf_mhi_alloc_map(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr,
137				 phys_addr_t *paddr, void __iomem **vaddr,
138				 size_t offset, size_t size)
139{
140	struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
141	struct pci_epf *epf = epf_mhi->epf;
142	struct pci_epc *epc = epf->epc;
143	int ret;
144
145	*vaddr = pci_epc_mem_alloc_addr(epc, paddr, size + offset);
146	if (!*vaddr)
147		return -ENOMEM;
148
149	ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, *paddr,
150			       pci_addr - offset, size + offset);
151	if (ret) {
152		pci_epc_mem_free_addr(epc, *paddr, *vaddr, size + offset);
153		return ret;
154	}
155
156	*paddr = *paddr + offset;
157	*vaddr = *vaddr + offset;
158
159	return 0;
160}
161
162static int pci_epf_mhi_alloc_map(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr,
163				 phys_addr_t *paddr, void __iomem **vaddr,
164				 size_t size)
165{
166	struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
167	size_t offset = get_align_offset(epf_mhi, pci_addr);
168
169	return __pci_epf_mhi_alloc_map(mhi_cntrl, pci_addr, paddr, vaddr,
170				      offset, size);
171}
172
173static void __pci_epf_mhi_unmap_free(struct mhi_ep_cntrl *mhi_cntrl,
174				     u64 pci_addr, phys_addr_t paddr,
175				     void __iomem *vaddr, size_t offset,
176				     size_t size)
177{
178	struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
179	struct pci_epf *epf = epf_mhi->epf;
180	struct pci_epc *epc = epf->epc;
181
182	pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, paddr - offset);
183	pci_epc_mem_free_addr(epc, paddr - offset, vaddr - offset,
184			      size + offset);
185}
186
187static void pci_epf_mhi_unmap_free(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr,
188				   phys_addr_t paddr, void __iomem *vaddr,
189				   size_t size)
190{
191	struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
192	size_t offset = get_align_offset(epf_mhi, pci_addr);
193
194	__pci_epf_mhi_unmap_free(mhi_cntrl, pci_addr, paddr, vaddr, offset,
195				 size);
196}
197
198static void pci_epf_mhi_raise_irq(struct mhi_ep_cntrl *mhi_cntrl, u32 vector)
199{
200	struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
201	struct pci_epf *epf = epf_mhi->epf;
202	struct pci_epc *epc = epf->epc;
203
204	/*
205	 * MHI supplies 0 based MSI vectors but the API expects the vector
206	 * number to start from 1, so we need to increment the vector by 1.
207	 */
208	pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, PCI_EPC_IRQ_MSI,
209			  vector + 1);
210}
211
212static int pci_epf_mhi_iatu_read(struct mhi_ep_cntrl *mhi_cntrl,
213				 struct mhi_ep_buf_info *buf_info)
214{
215	struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
216	size_t offset = get_align_offset(epf_mhi, buf_info->host_addr);
217	void __iomem *tre_buf;
218	phys_addr_t tre_phys;
219	int ret;
220
221	mutex_lock(&epf_mhi->lock);
222
223	ret = __pci_epf_mhi_alloc_map(mhi_cntrl, buf_info->host_addr, &tre_phys,
224				      &tre_buf, offset, buf_info->size);
225	if (ret) {
226		mutex_unlock(&epf_mhi->lock);
227		return ret;
228	}
229
230	memcpy_fromio(buf_info->dev_addr, tre_buf, buf_info->size);
231
232	__pci_epf_mhi_unmap_free(mhi_cntrl, buf_info->host_addr, tre_phys,
233				 tre_buf, offset, buf_info->size);
234
235	mutex_unlock(&epf_mhi->lock);
236
237	return 0;
238}
239
240static int pci_epf_mhi_iatu_write(struct mhi_ep_cntrl *mhi_cntrl,
241				  struct mhi_ep_buf_info *buf_info)
242{
243	struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
244	size_t offset = get_align_offset(epf_mhi, buf_info->host_addr);
245	void __iomem *tre_buf;
246	phys_addr_t tre_phys;
247	int ret;
248
249	mutex_lock(&epf_mhi->lock);
250
251	ret = __pci_epf_mhi_alloc_map(mhi_cntrl, buf_info->host_addr, &tre_phys,
252				      &tre_buf, offset, buf_info->size);
253	if (ret) {
254		mutex_unlock(&epf_mhi->lock);
255		return ret;
256	}
257
258	memcpy_toio(tre_buf, buf_info->dev_addr, buf_info->size);
259
260	__pci_epf_mhi_unmap_free(mhi_cntrl, buf_info->host_addr, tre_phys,
261				 tre_buf, offset, buf_info->size);
262
263	mutex_unlock(&epf_mhi->lock);
264
265	return 0;
266}
267
268static void pci_epf_mhi_dma_callback(void *param)
269{
270	complete(param);
271}
272
273static int pci_epf_mhi_edma_read(struct mhi_ep_cntrl *mhi_cntrl,
274				 struct mhi_ep_buf_info *buf_info)
275{
276	struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
277	struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
278	struct dma_chan *chan = epf_mhi->dma_chan_rx;
279	struct device *dev = &epf_mhi->epf->dev;
280	DECLARE_COMPLETION_ONSTACK(complete);
281	struct dma_async_tx_descriptor *desc;
282	struct dma_slave_config config = {};
283	dma_cookie_t cookie;
284	dma_addr_t dst_addr;
285	int ret;
286
287	if (buf_info->size < SZ_4K)
288		return pci_epf_mhi_iatu_read(mhi_cntrl, buf_info);
289
290	mutex_lock(&epf_mhi->lock);
291
292	config.direction = DMA_DEV_TO_MEM;
293	config.src_addr = buf_info->host_addr;
294
295	ret = dmaengine_slave_config(chan, &config);
296	if (ret) {
297		dev_err(dev, "Failed to configure DMA channel\n");
298		goto err_unlock;
299	}
300
301	dst_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
302				  DMA_FROM_DEVICE);
303	ret = dma_mapping_error(dma_dev, dst_addr);
304	if (ret) {
305		dev_err(dev, "Failed to map remote memory\n");
306		goto err_unlock;
307	}
308
309	desc = dmaengine_prep_slave_single(chan, dst_addr, buf_info->size,
310					   DMA_DEV_TO_MEM,
311					   DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
312	if (!desc) {
313		dev_err(dev, "Failed to prepare DMA\n");
314		ret = -EIO;
315		goto err_unmap;
316	}
317
318	desc->callback = pci_epf_mhi_dma_callback;
319	desc->callback_param = &complete;
320
321	cookie = dmaengine_submit(desc);
322	ret = dma_submit_error(cookie);
323	if (ret) {
324		dev_err(dev, "Failed to do DMA submit\n");
325		goto err_unmap;
326	}
327
328	dma_async_issue_pending(chan);
329	ret = wait_for_completion_timeout(&complete, msecs_to_jiffies(1000));
330	if (!ret) {
331		dev_err(dev, "DMA transfer timeout\n");
332		dmaengine_terminate_sync(chan);
333		ret = -ETIMEDOUT;
334	}
335
336err_unmap:
337	dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE);
338err_unlock:
339	mutex_unlock(&epf_mhi->lock);
340
341	return ret;
342}
343
344static int pci_epf_mhi_edma_write(struct mhi_ep_cntrl *mhi_cntrl,
345				  struct mhi_ep_buf_info *buf_info)
346{
347	struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
348	struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
349	struct dma_chan *chan = epf_mhi->dma_chan_tx;
350	struct device *dev = &epf_mhi->epf->dev;
351	DECLARE_COMPLETION_ONSTACK(complete);
352	struct dma_async_tx_descriptor *desc;
353	struct dma_slave_config config = {};
354	dma_cookie_t cookie;
355	dma_addr_t src_addr;
356	int ret;
357
358	if (buf_info->size < SZ_4K)
359		return pci_epf_mhi_iatu_write(mhi_cntrl, buf_info);
360
361	mutex_lock(&epf_mhi->lock);
362
363	config.direction = DMA_MEM_TO_DEV;
364	config.dst_addr = buf_info->host_addr;
365
366	ret = dmaengine_slave_config(chan, &config);
367	if (ret) {
368		dev_err(dev, "Failed to configure DMA channel\n");
369		goto err_unlock;
370	}
371
372	src_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
373				  DMA_TO_DEVICE);
374	ret = dma_mapping_error(dma_dev, src_addr);
375	if (ret) {
376		dev_err(dev, "Failed to map remote memory\n");
377		goto err_unlock;
378	}
379
380	desc = dmaengine_prep_slave_single(chan, src_addr, buf_info->size,
381					   DMA_MEM_TO_DEV,
382					   DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
383	if (!desc) {
384		dev_err(dev, "Failed to prepare DMA\n");
385		ret = -EIO;
386		goto err_unmap;
387	}
388
389	desc->callback = pci_epf_mhi_dma_callback;
390	desc->callback_param = &complete;
391
392	cookie = dmaengine_submit(desc);
393	ret = dma_submit_error(cookie);
394	if (ret) {
395		dev_err(dev, "Failed to do DMA submit\n");
396		goto err_unmap;
397	}
398
399	dma_async_issue_pending(chan);
400	ret = wait_for_completion_timeout(&complete, msecs_to_jiffies(1000));
401	if (!ret) {
402		dev_err(dev, "DMA transfer timeout\n");
403		dmaengine_terminate_sync(chan);
404		ret = -ETIMEDOUT;
405	}
406
407err_unmap:
408	dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE);
409err_unlock:
410	mutex_unlock(&epf_mhi->lock);
411
412	return ret;
413}
414
415struct epf_dma_filter {
416	struct device *dev;
417	u32 dma_mask;
418};
419
420static bool pci_epf_mhi_filter(struct dma_chan *chan, void *node)
421{
422	struct epf_dma_filter *filter = node;
423	struct dma_slave_caps caps;
424
425	memset(&caps, 0, sizeof(caps));
426	dma_get_slave_caps(chan, &caps);
427
428	return chan->device->dev == filter->dev && filter->dma_mask &
429					caps.directions;
430}
431
432static int pci_epf_mhi_dma_init(struct pci_epf_mhi *epf_mhi)
433{
434	struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
435	struct device *dev = &epf_mhi->epf->dev;
436	struct epf_dma_filter filter;
437	dma_cap_mask_t mask;
438
439	dma_cap_zero(mask);
440	dma_cap_set(DMA_SLAVE, mask);
441
442	filter.dev = dma_dev;
443	filter.dma_mask = BIT(DMA_MEM_TO_DEV);
444	epf_mhi->dma_chan_tx = dma_request_channel(mask, pci_epf_mhi_filter,
445						   &filter);
446	if (IS_ERR_OR_NULL(epf_mhi->dma_chan_tx)) {
447		dev_err(dev, "Failed to request tx channel\n");
448		return -ENODEV;
449	}
450
451	filter.dma_mask = BIT(DMA_DEV_TO_MEM);
452	epf_mhi->dma_chan_rx = dma_request_channel(mask, pci_epf_mhi_filter,
453						   &filter);
454	if (IS_ERR_OR_NULL(epf_mhi->dma_chan_rx)) {
455		dev_err(dev, "Failed to request rx channel\n");
456		dma_release_channel(epf_mhi->dma_chan_tx);
457		epf_mhi->dma_chan_tx = NULL;
458		return -ENODEV;
459	}
460
461	return 0;
462}
463
464static void pci_epf_mhi_dma_deinit(struct pci_epf_mhi *epf_mhi)
465{
466	dma_release_channel(epf_mhi->dma_chan_tx);
467	dma_release_channel(epf_mhi->dma_chan_rx);
468	epf_mhi->dma_chan_tx = NULL;
469	epf_mhi->dma_chan_rx = NULL;
470}
471
472static int pci_epf_mhi_core_init(struct pci_epf *epf)
473{
474	struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
475	const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
476	struct pci_epf_bar *epf_bar = &epf->bar[info->bar_num];
477	struct pci_epc *epc = epf->epc;
478	struct device *dev = &epf->dev;
479	int ret;
480
481	epf_bar->phys_addr = epf_mhi->mmio_phys;
482	epf_bar->size = epf_mhi->mmio_size;
483	epf_bar->barno = info->bar_num;
484	epf_bar->flags = info->epf_flags;
485	ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, epf_bar);
486	if (ret) {
487		dev_err(dev, "Failed to set BAR: %d\n", ret);
488		return ret;
489	}
490
491	ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no,
492			      order_base_2(info->msi_count));
493	if (ret) {
494		dev_err(dev, "Failed to set MSI configuration: %d\n", ret);
495		return ret;
496	}
497
498	ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no,
499				   epf->header);
500	if (ret) {
501		dev_err(dev, "Failed to set Configuration header: %d\n", ret);
502		return ret;
503	}
504
505	epf_mhi->epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
506	if (!epf_mhi->epc_features)
507		return -ENODATA;
508
509	return 0;
510}
511
512static int pci_epf_mhi_link_up(struct pci_epf *epf)
513{
514	struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
515	const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
516	struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
517	struct pci_epc *epc = epf->epc;
518	struct device *dev = &epf->dev;
519	int ret;
520
521	if (info->flags & MHI_EPF_USE_DMA) {
522		ret = pci_epf_mhi_dma_init(epf_mhi);
523		if (ret) {
524			dev_err(dev, "Failed to initialize DMA: %d\n", ret);
525			return ret;
526		}
527	}
528
529	mhi_cntrl->mmio = epf_mhi->mmio;
530	mhi_cntrl->irq = epf_mhi->irq;
531	mhi_cntrl->mru = info->mru;
532
533	/* Assign the struct dev of PCI EP as MHI controller device */
534	mhi_cntrl->cntrl_dev = epc->dev.parent;
535	mhi_cntrl->raise_irq = pci_epf_mhi_raise_irq;
536	mhi_cntrl->alloc_map = pci_epf_mhi_alloc_map;
537	mhi_cntrl->unmap_free = pci_epf_mhi_unmap_free;
538	if (info->flags & MHI_EPF_USE_DMA) {
539		mhi_cntrl->read_from_host = pci_epf_mhi_edma_read;
540		mhi_cntrl->write_to_host = pci_epf_mhi_edma_write;
541	} else {
542		mhi_cntrl->read_from_host = pci_epf_mhi_iatu_read;
543		mhi_cntrl->write_to_host = pci_epf_mhi_iatu_write;
544	}
545
546	/* Register the MHI EP controller */
547	ret = mhi_ep_register_controller(mhi_cntrl, info->config);
548	if (ret) {
549		dev_err(dev, "Failed to register MHI EP controller: %d\n", ret);
550		if (info->flags & MHI_EPF_USE_DMA)
551			pci_epf_mhi_dma_deinit(epf_mhi);
552		return ret;
553	}
554
555	return 0;
556}
557
558static int pci_epf_mhi_link_down(struct pci_epf *epf)
559{
560	struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
561	const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
562	struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
563
564	if (mhi_cntrl->mhi_dev) {
565		mhi_ep_power_down(mhi_cntrl);
566		if (info->flags & MHI_EPF_USE_DMA)
567			pci_epf_mhi_dma_deinit(epf_mhi);
568		mhi_ep_unregister_controller(mhi_cntrl);
569	}
570
571	return 0;
572}
573
574static int pci_epf_mhi_bme(struct pci_epf *epf)
575{
576	struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
577	const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
578	struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
579	struct device *dev = &epf->dev;
580	int ret;
581
582	/*
583	 * Power up the MHI EP stack if link is up and stack is in power down
584	 * state.
585	 */
586	if (!mhi_cntrl->enabled && mhi_cntrl->mhi_dev) {
587		ret = mhi_ep_power_up(mhi_cntrl);
588		if (ret) {
589			dev_err(dev, "Failed to power up MHI EP: %d\n", ret);
590			if (info->flags & MHI_EPF_USE_DMA)
591				pci_epf_mhi_dma_deinit(epf_mhi);
592			mhi_ep_unregister_controller(mhi_cntrl);
593		}
594	}
595
596	return 0;
597}
598
599static int pci_epf_mhi_bind(struct pci_epf *epf)
600{
601	struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
602	struct pci_epc *epc = epf->epc;
603	struct platform_device *pdev = to_platform_device(epc->dev.parent);
604	struct resource *res;
605	int ret;
606
607	/* Get MMIO base address from Endpoint controller */
608	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mmio");
609	epf_mhi->mmio_phys = res->start;
610	epf_mhi->mmio_size = resource_size(res);
611
612	epf_mhi->mmio = ioremap(epf_mhi->mmio_phys, epf_mhi->mmio_size);
613	if (!epf_mhi->mmio)
614		return -ENOMEM;
615
616	ret = platform_get_irq_byname(pdev, "doorbell");
617	if (ret < 0) {
618		iounmap(epf_mhi->mmio);
619		return ret;
620	}
621
622	epf_mhi->irq = ret;
623
624	return 0;
625}
626
627static void pci_epf_mhi_unbind(struct pci_epf *epf)
628{
629	struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
630	const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
631	struct pci_epf_bar *epf_bar = &epf->bar[info->bar_num];
632	struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
633	struct pci_epc *epc = epf->epc;
634
635	/*
636	 * Forcefully power down the MHI EP stack. Only way to bring the MHI EP
637	 * stack back to working state after successive bind is by getting BME
638	 * from host.
639	 */
640	if (mhi_cntrl->mhi_dev) {
641		mhi_ep_power_down(mhi_cntrl);
642		if (info->flags & MHI_EPF_USE_DMA)
643			pci_epf_mhi_dma_deinit(epf_mhi);
644		mhi_ep_unregister_controller(mhi_cntrl);
645	}
646
647	iounmap(epf_mhi->mmio);
648	pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no, epf_bar);
649}
650
651static struct pci_epc_event_ops pci_epf_mhi_event_ops = {
652	.core_init = pci_epf_mhi_core_init,
653	.link_up = pci_epf_mhi_link_up,
654	.link_down = pci_epf_mhi_link_down,
655	.bme = pci_epf_mhi_bme,
656};
657
658static int pci_epf_mhi_probe(struct pci_epf *epf,
659			     const struct pci_epf_device_id *id)
660{
661	struct pci_epf_mhi_ep_info *info =
662			(struct pci_epf_mhi_ep_info *)id->driver_data;
663	struct pci_epf_mhi *epf_mhi;
664	struct device *dev = &epf->dev;
665
666	epf_mhi = devm_kzalloc(dev, sizeof(*epf_mhi), GFP_KERNEL);
667	if (!epf_mhi)
668		return -ENOMEM;
669
670	epf->header = info->epf_header;
671	epf_mhi->info = info;
672	epf_mhi->epf = epf;
673
674	epf->event_ops = &pci_epf_mhi_event_ops;
675
676	mutex_init(&epf_mhi->lock);
677
678	epf_set_drvdata(epf, epf_mhi);
679
680	return 0;
681}
682
683static const struct pci_epf_device_id pci_epf_mhi_ids[] = {
684	{ .name = "sdx55", .driver_data = (kernel_ulong_t)&sdx55_info },
685	{ .name = "sm8450", .driver_data = (kernel_ulong_t)&sm8450_info },
686	{},
687};
688
689static struct pci_epf_ops pci_epf_mhi_ops = {
690	.unbind	= pci_epf_mhi_unbind,
691	.bind	= pci_epf_mhi_bind,
692};
693
694static struct pci_epf_driver pci_epf_mhi_driver = {
695	.driver.name	= "pci_epf_mhi",
696	.probe		= pci_epf_mhi_probe,
697	.id_table	= pci_epf_mhi_ids,
698	.ops		= &pci_epf_mhi_ops,
699	.owner		= THIS_MODULE,
700};
701
702static int __init pci_epf_mhi_init(void)
703{
704	return pci_epf_register_driver(&pci_epf_mhi_driver);
705}
706module_init(pci_epf_mhi_init);
707
708static void __exit pci_epf_mhi_exit(void)
709{
710	pci_epf_unregister_driver(&pci_epf_mhi_driver);
711}
712module_exit(pci_epf_mhi_exit);
713
714MODULE_DESCRIPTION("PCI EPF driver for MHI Endpoint devices");
715MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
716MODULE_LICENSE("GPL");
717