1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Test driver to test endpoint functionality
4 *
5 * Copyright (C) 2017 Texas Instruments
6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
7 */
8
9#include <linux/crc32.h>
10#include <linux/delay.h>
11#include <linux/dmaengine.h>
12#include <linux/io.h>
13#include <linux/module.h>
14#include <linux/slab.h>
15#include <linux/pci_ids.h>
16#include <linux/random.h>
17
18#include <linux/pci-epc.h>
19#include <linux/pci-epf.h>
20#include <linux/pci_regs.h>
21
22#define IRQ_TYPE_LEGACY			0
23#define IRQ_TYPE_MSI			1
24#define IRQ_TYPE_MSIX			2
25
26#define COMMAND_RAISE_LEGACY_IRQ	BIT(0)
27#define COMMAND_RAISE_MSI_IRQ		BIT(1)
28#define COMMAND_RAISE_MSIX_IRQ		BIT(2)
29#define COMMAND_READ			BIT(3)
30#define COMMAND_WRITE			BIT(4)
31#define COMMAND_COPY			BIT(5)
32
33#define STATUS_READ_SUCCESS		BIT(0)
34#define STATUS_READ_FAIL		BIT(1)
35#define STATUS_WRITE_SUCCESS		BIT(2)
36#define STATUS_WRITE_FAIL		BIT(3)
37#define STATUS_COPY_SUCCESS		BIT(4)
38#define STATUS_COPY_FAIL		BIT(5)
39#define STATUS_IRQ_RAISED		BIT(6)
40#define STATUS_SRC_ADDR_INVALID		BIT(7)
41#define STATUS_DST_ADDR_INVALID		BIT(8)
42
43#define FLAG_USE_DMA			BIT(0)
44
45#define TIMER_RESOLUTION		1
46
47static struct workqueue_struct *kpcitest_workqueue;
48
49struct pci_epf_test {
50	void			*reg[PCI_STD_NUM_BARS];
51	struct pci_epf		*epf;
52	enum pci_barno		test_reg_bar;
53	size_t			msix_table_offset;
54	struct delayed_work	cmd_handler;
55	struct dma_chan		*dma_chan_tx;
56	struct dma_chan		*dma_chan_rx;
57	struct dma_chan		*transfer_chan;
58	dma_cookie_t		transfer_cookie;
59	enum dma_status		transfer_status;
60	struct completion	transfer_complete;
61	bool			dma_supported;
62	bool			dma_private;
63	const struct pci_epc_features *epc_features;
64};
65
66struct pci_epf_test_reg {
67	u32	magic;
68	u32	command;
69	u32	status;
70	u64	src_addr;
71	u64	dst_addr;
72	u32	size;
73	u32	checksum;
74	u32	irq_type;
75	u32	irq_number;
76	u32	flags;
77} __packed;
78
79static struct pci_epf_header test_header = {
80	.vendorid	= PCI_ANY_ID,
81	.deviceid	= PCI_ANY_ID,
82	.baseclass_code = PCI_CLASS_OTHERS,
83	.interrupt_pin	= PCI_INTERRUPT_INTA,
84};
85
86static size_t bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 };
87
88static void pci_epf_test_dma_callback(void *param)
89{
90	struct pci_epf_test *epf_test = param;
91	struct dma_tx_state state;
92
93	epf_test->transfer_status =
94		dmaengine_tx_status(epf_test->transfer_chan,
95				    epf_test->transfer_cookie, &state);
96	if (epf_test->transfer_status == DMA_COMPLETE ||
97	    epf_test->transfer_status == DMA_ERROR)
98		complete(&epf_test->transfer_complete);
99}
100
101/**
102 * pci_epf_test_data_transfer() - Function that uses dmaengine API to transfer
103 *				  data between PCIe EP and remote PCIe RC
104 * @epf_test: the EPF test device that performs the data transfer operation
105 * @dma_dst: The destination address of the data transfer. It can be a physical
106 *	     address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
107 * @dma_src: The source address of the data transfer. It can be a physical
108 *	     address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
109 * @len: The size of the data transfer
110 * @dma_remote: remote RC physical address
111 * @dir: DMA transfer direction
112 *
113 * Function that uses dmaengine API to transfer data between PCIe EP and remote
114 * PCIe RC. The source and destination address can be a physical address given
115 * by pci_epc_mem_alloc_addr or the one obtained using DMA mapping APIs.
116 *
117 * The function returns '0' on success and negative value on failure.
118 */
119static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test,
120				      dma_addr_t dma_dst, dma_addr_t dma_src,
121				      size_t len, dma_addr_t dma_remote,
122				      enum dma_transfer_direction dir)
123{
124	struct dma_chan *chan = (dir == DMA_MEM_TO_DEV) ?
125				 epf_test->dma_chan_tx : epf_test->dma_chan_rx;
126	dma_addr_t dma_local = (dir == DMA_MEM_TO_DEV) ? dma_src : dma_dst;
127	enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
128	struct pci_epf *epf = epf_test->epf;
129	struct dma_async_tx_descriptor *tx;
130	struct dma_slave_config sconf = {};
131	struct device *dev = &epf->dev;
132	int ret;
133
134	if (IS_ERR_OR_NULL(chan)) {
135		dev_err(dev, "Invalid DMA memcpy channel\n");
136		return -EINVAL;
137	}
138
139	if (epf_test->dma_private) {
140		sconf.direction = dir;
141		if (dir == DMA_MEM_TO_DEV)
142			sconf.dst_addr = dma_remote;
143		else
144			sconf.src_addr = dma_remote;
145
146		if (dmaengine_slave_config(chan, &sconf)) {
147			dev_err(dev, "DMA slave config fail\n");
148			return -EIO;
149		}
150		tx = dmaengine_prep_slave_single(chan, dma_local, len, dir,
151						 flags);
152	} else {
153		tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len,
154					       flags);
155	}
156
157	if (!tx) {
158		dev_err(dev, "Failed to prepare DMA memcpy\n");
159		return -EIO;
160	}
161
162	reinit_completion(&epf_test->transfer_complete);
163	epf_test->transfer_chan = chan;
164	tx->callback = pci_epf_test_dma_callback;
165	tx->callback_param = epf_test;
166	epf_test->transfer_cookie = dmaengine_submit(tx);
167
168	ret = dma_submit_error(epf_test->transfer_cookie);
169	if (ret) {
170		dev_err(dev, "Failed to do DMA tx_submit %d\n", ret);
171		goto terminate;
172	}
173
174	dma_async_issue_pending(chan);
175	ret = wait_for_completion_interruptible(&epf_test->transfer_complete);
176	if (ret < 0) {
177		dev_err(dev, "DMA wait_for_completion interrupted\n");
178		goto terminate;
179	}
180
181	if (epf_test->transfer_status == DMA_ERROR) {
182		dev_err(dev, "DMA transfer failed\n");
183		ret = -EIO;
184	}
185
186terminate:
187	dmaengine_terminate_sync(chan);
188
189	return ret;
190}
191
192struct epf_dma_filter {
193	struct device *dev;
194	u32 dma_mask;
195};
196
197static bool epf_dma_filter_fn(struct dma_chan *chan, void *node)
198{
199	struct epf_dma_filter *filter = node;
200	struct dma_slave_caps caps;
201
202	memset(&caps, 0, sizeof(caps));
203	dma_get_slave_caps(chan, &caps);
204
205	return chan->device->dev == filter->dev
206		&& (filter->dma_mask & caps.directions);
207}
208
209/**
210 * pci_epf_test_init_dma_chan() - Function to initialize EPF test DMA channel
211 * @epf_test: the EPF test device that performs data transfer operation
212 *
213 * Function to initialize EPF test DMA channel.
214 */
215static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)
216{
217	struct pci_epf *epf = epf_test->epf;
218	struct device *dev = &epf->dev;
219	struct epf_dma_filter filter;
220	struct dma_chan *dma_chan;
221	dma_cap_mask_t mask;
222	int ret;
223
224	filter.dev = epf->epc->dev.parent;
225	filter.dma_mask = BIT(DMA_DEV_TO_MEM);
226
227	dma_cap_zero(mask);
228	dma_cap_set(DMA_SLAVE, mask);
229	dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
230	if (!dma_chan) {
231		dev_info(dev, "Failed to get private DMA rx channel. Falling back to generic one\n");
232		goto fail_back_tx;
233	}
234
235	epf_test->dma_chan_rx = dma_chan;
236
237	filter.dma_mask = BIT(DMA_MEM_TO_DEV);
238	dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
239
240	if (!dma_chan) {
241		dev_info(dev, "Failed to get private DMA tx channel. Falling back to generic one\n");
242		goto fail_back_rx;
243	}
244
245	epf_test->dma_chan_tx = dma_chan;
246	epf_test->dma_private = true;
247
248	init_completion(&epf_test->transfer_complete);
249
250	return 0;
251
252fail_back_rx:
253	dma_release_channel(epf_test->dma_chan_rx);
254	epf_test->dma_chan_tx = NULL;
255
256fail_back_tx:
257	dma_cap_zero(mask);
258	dma_cap_set(DMA_MEMCPY, mask);
259
260	dma_chan = dma_request_chan_by_mask(&mask);
261	if (IS_ERR(dma_chan)) {
262		ret = PTR_ERR(dma_chan);
263		if (ret != -EPROBE_DEFER)
264			dev_err(dev, "Failed to get DMA channel\n");
265		return ret;
266	}
267	init_completion(&epf_test->transfer_complete);
268
269	epf_test->dma_chan_tx = epf_test->dma_chan_rx = dma_chan;
270
271	return 0;
272}
273
274/**
275 * pci_epf_test_clean_dma_chan() - Function to cleanup EPF test DMA channel
276 * @epf_test: the EPF test device that performs data transfer operation
277 *
278 * Helper to cleanup EPF test DMA channel.
279 */
280static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test)
281{
282	if (!epf_test->dma_supported)
283		return;
284
285	dma_release_channel(epf_test->dma_chan_tx);
286	if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) {
287		epf_test->dma_chan_tx = NULL;
288		epf_test->dma_chan_rx = NULL;
289		return;
290	}
291
292	dma_release_channel(epf_test->dma_chan_rx);
293	epf_test->dma_chan_rx = NULL;
294
295	return;
296}
297
298static void pci_epf_test_print_rate(struct pci_epf_test *epf_test,
299				    const char *op, u64 size,
300				    struct timespec64 *start,
301				    struct timespec64 *end, bool dma)
302{
303	struct timespec64 ts = timespec64_sub(*end, *start);
304	u64 rate = 0, ns;
305
306	/* calculate the rate */
307	ns = timespec64_to_ns(&ts);
308	if (ns)
309		rate = div64_u64(size * NSEC_PER_SEC, ns * 1000);
310
311	dev_info(&epf_test->epf->dev,
312		 "%s => Size: %llu B, DMA: %s, Time: %llu.%09u s, Rate: %llu KB/s\n",
313		 op, size, dma ? "YES" : "NO",
314		 (u64)ts.tv_sec, (u32)ts.tv_nsec, rate);
315}
316
317static void pci_epf_test_copy(struct pci_epf_test *epf_test,
318			      struct pci_epf_test_reg *reg)
319{
320	int ret;
321	void __iomem *src_addr;
322	void __iomem *dst_addr;
323	phys_addr_t src_phys_addr;
324	phys_addr_t dst_phys_addr;
325	struct timespec64 start, end;
326	struct pci_epf *epf = epf_test->epf;
327	struct device *dev = &epf->dev;
328	struct pci_epc *epc = epf->epc;
329
330	src_addr = pci_epc_mem_alloc_addr(epc, &src_phys_addr, reg->size);
331	if (!src_addr) {
332		dev_err(dev, "Failed to allocate source address\n");
333		reg->status = STATUS_SRC_ADDR_INVALID;
334		ret = -ENOMEM;
335		goto err;
336	}
337
338	ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr,
339			       reg->src_addr, reg->size);
340	if (ret) {
341		dev_err(dev, "Failed to map source address\n");
342		reg->status = STATUS_SRC_ADDR_INVALID;
343		goto err_src_addr;
344	}
345
346	dst_addr = pci_epc_mem_alloc_addr(epc, &dst_phys_addr, reg->size);
347	if (!dst_addr) {
348		dev_err(dev, "Failed to allocate destination address\n");
349		reg->status = STATUS_DST_ADDR_INVALID;
350		ret = -ENOMEM;
351		goto err_src_map_addr;
352	}
353
354	ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr,
355			       reg->dst_addr, reg->size);
356	if (ret) {
357		dev_err(dev, "Failed to map destination address\n");
358		reg->status = STATUS_DST_ADDR_INVALID;
359		goto err_dst_addr;
360	}
361
362	ktime_get_ts64(&start);
363	if (reg->flags & FLAG_USE_DMA) {
364		if (epf_test->dma_private) {
365			dev_err(dev, "Cannot transfer data using DMA\n");
366			ret = -EINVAL;
367			goto err_map_addr;
368		}
369
370		ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
371						 src_phys_addr, reg->size, 0,
372						 DMA_MEM_TO_MEM);
373		if (ret)
374			dev_err(dev, "Data transfer failed\n");
375	} else {
376		void *buf;
377
378		buf = kzalloc(reg->size, GFP_KERNEL);
379		if (!buf) {
380			ret = -ENOMEM;
381			goto err_map_addr;
382		}
383
384		memcpy_fromio(buf, src_addr, reg->size);
385		memcpy_toio(dst_addr, buf, reg->size);
386		kfree(buf);
387	}
388	ktime_get_ts64(&end);
389	pci_epf_test_print_rate(epf_test, "COPY", reg->size, &start, &end,
390				reg->flags & FLAG_USE_DMA);
391
392err_map_addr:
393	pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr);
394
395err_dst_addr:
396	pci_epc_mem_free_addr(epc, dst_phys_addr, dst_addr, reg->size);
397
398err_src_map_addr:
399	pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr);
400
401err_src_addr:
402	pci_epc_mem_free_addr(epc, src_phys_addr, src_addr, reg->size);
403
404err:
405	if (!ret)
406		reg->status |= STATUS_COPY_SUCCESS;
407	else
408		reg->status |= STATUS_COPY_FAIL;
409}
410
411static void pci_epf_test_read(struct pci_epf_test *epf_test,
412			      struct pci_epf_test_reg *reg)
413{
414	int ret;
415	void __iomem *src_addr;
416	void *buf;
417	u32 crc32;
418	phys_addr_t phys_addr;
419	phys_addr_t dst_phys_addr;
420	struct timespec64 start, end;
421	struct pci_epf *epf = epf_test->epf;
422	struct device *dev = &epf->dev;
423	struct pci_epc *epc = epf->epc;
424	struct device *dma_dev = epf->epc->dev.parent;
425
426	src_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
427	if (!src_addr) {
428		dev_err(dev, "Failed to allocate address\n");
429		reg->status = STATUS_SRC_ADDR_INVALID;
430		ret = -ENOMEM;
431		goto err;
432	}
433
434	ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr,
435			       reg->src_addr, reg->size);
436	if (ret) {
437		dev_err(dev, "Failed to map address\n");
438		reg->status = STATUS_SRC_ADDR_INVALID;
439		goto err_addr;
440	}
441
442	buf = kzalloc(reg->size, GFP_KERNEL);
443	if (!buf) {
444		ret = -ENOMEM;
445		goto err_map_addr;
446	}
447
448	if (reg->flags & FLAG_USE_DMA) {
449		dst_phys_addr = dma_map_single(dma_dev, buf, reg->size,
450					       DMA_FROM_DEVICE);
451		if (dma_mapping_error(dma_dev, dst_phys_addr)) {
452			dev_err(dev, "Failed to map destination buffer addr\n");
453			ret = -ENOMEM;
454			goto err_dma_map;
455		}
456
457		ktime_get_ts64(&start);
458		ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
459						 phys_addr, reg->size,
460						 reg->src_addr, DMA_DEV_TO_MEM);
461		if (ret)
462			dev_err(dev, "Data transfer failed\n");
463		ktime_get_ts64(&end);
464
465		dma_unmap_single(dma_dev, dst_phys_addr, reg->size,
466				 DMA_FROM_DEVICE);
467	} else {
468		ktime_get_ts64(&start);
469		memcpy_fromio(buf, src_addr, reg->size);
470		ktime_get_ts64(&end);
471	}
472
473	pci_epf_test_print_rate(epf_test, "READ", reg->size, &start, &end,
474				reg->flags & FLAG_USE_DMA);
475
476	crc32 = crc32_le(~0, buf, reg->size);
477	if (crc32 != reg->checksum)
478		ret = -EIO;
479
480err_dma_map:
481	kfree(buf);
482
483err_map_addr:
484	pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr);
485
486err_addr:
487	pci_epc_mem_free_addr(epc, phys_addr, src_addr, reg->size);
488
489err:
490	if (!ret)
491		reg->status |= STATUS_READ_SUCCESS;
492	else
493		reg->status |= STATUS_READ_FAIL;
494}
495
496static void pci_epf_test_write(struct pci_epf_test *epf_test,
497			       struct pci_epf_test_reg *reg)
498{
499	int ret;
500	void __iomem *dst_addr;
501	void *buf;
502	phys_addr_t phys_addr;
503	phys_addr_t src_phys_addr;
504	struct timespec64 start, end;
505	struct pci_epf *epf = epf_test->epf;
506	struct device *dev = &epf->dev;
507	struct pci_epc *epc = epf->epc;
508	struct device *dma_dev = epf->epc->dev.parent;
509
510	dst_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
511	if (!dst_addr) {
512		dev_err(dev, "Failed to allocate address\n");
513		reg->status = STATUS_DST_ADDR_INVALID;
514		ret = -ENOMEM;
515		goto err;
516	}
517
518	ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr,
519			       reg->dst_addr, reg->size);
520	if (ret) {
521		dev_err(dev, "Failed to map address\n");
522		reg->status = STATUS_DST_ADDR_INVALID;
523		goto err_addr;
524	}
525
526	buf = kzalloc(reg->size, GFP_KERNEL);
527	if (!buf) {
528		ret = -ENOMEM;
529		goto err_map_addr;
530	}
531
532	get_random_bytes(buf, reg->size);
533	reg->checksum = crc32_le(~0, buf, reg->size);
534
535	if (reg->flags & FLAG_USE_DMA) {
536		src_phys_addr = dma_map_single(dma_dev, buf, reg->size,
537					       DMA_TO_DEVICE);
538		if (dma_mapping_error(dma_dev, src_phys_addr)) {
539			dev_err(dev, "Failed to map source buffer addr\n");
540			ret = -ENOMEM;
541			goto err_dma_map;
542		}
543
544		ktime_get_ts64(&start);
545
546		ret = pci_epf_test_data_transfer(epf_test, phys_addr,
547						 src_phys_addr, reg->size,
548						 reg->dst_addr,
549						 DMA_MEM_TO_DEV);
550		if (ret)
551			dev_err(dev, "Data transfer failed\n");
552		ktime_get_ts64(&end);
553
554		dma_unmap_single(dma_dev, src_phys_addr, reg->size,
555				 DMA_TO_DEVICE);
556	} else {
557		ktime_get_ts64(&start);
558		memcpy_toio(dst_addr, buf, reg->size);
559		ktime_get_ts64(&end);
560	}
561
562	pci_epf_test_print_rate(epf_test, "WRITE", reg->size, &start, &end,
563				reg->flags & FLAG_USE_DMA);
564
565	/*
566	 * wait 1ms inorder for the write to complete. Without this delay L3
567	 * error in observed in the host system.
568	 */
569	usleep_range(1000, 2000);
570
571err_dma_map:
572	kfree(buf);
573
574err_map_addr:
575	pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr);
576
577err_addr:
578	pci_epc_mem_free_addr(epc, phys_addr, dst_addr, reg->size);
579
580err:
581	if (!ret)
582		reg->status |= STATUS_WRITE_SUCCESS;
583	else
584		reg->status |= STATUS_WRITE_FAIL;
585}
586
587static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test,
588				   struct pci_epf_test_reg *reg)
589{
590	struct pci_epf *epf = epf_test->epf;
591	struct device *dev = &epf->dev;
592	struct pci_epc *epc = epf->epc;
593	u32 status = reg->status | STATUS_IRQ_RAISED;
594	int count;
595
596	/*
597	 * Set the status before raising the IRQ to ensure that the host sees
598	 * the updated value when it gets the IRQ.
599	 */
600	WRITE_ONCE(reg->status, status);
601
602	switch (reg->irq_type) {
603	case IRQ_TYPE_LEGACY:
604		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
605				  PCI_EPC_IRQ_LEGACY, 0);
606		break;
607	case IRQ_TYPE_MSI:
608		count = pci_epc_get_msi(epc, epf->func_no, epf->vfunc_no);
609		if (reg->irq_number > count || count <= 0) {
610			dev_err(dev, "Invalid MSI IRQ number %d / %d\n",
611				reg->irq_number, count);
612			return;
613		}
614		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
615				  PCI_EPC_IRQ_MSI, reg->irq_number);
616		break;
617	case IRQ_TYPE_MSIX:
618		count = pci_epc_get_msix(epc, epf->func_no, epf->vfunc_no);
619		if (reg->irq_number > count || count <= 0) {
620			dev_err(dev, "Invalid MSIX IRQ number %d / %d\n",
621				reg->irq_number, count);
622			return;
623		}
624		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
625				  PCI_EPC_IRQ_MSIX, reg->irq_number);
626		break;
627	default:
628		dev_err(dev, "Failed to raise IRQ, unknown type\n");
629		break;
630	}
631}
632
633static void pci_epf_test_cmd_handler(struct work_struct *work)
634{
635	u32 command;
636	struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test,
637						     cmd_handler.work);
638	struct pci_epf *epf = epf_test->epf;
639	struct device *dev = &epf->dev;
640	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
641	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
642
643	command = READ_ONCE(reg->command);
644	if (!command)
645		goto reset_handler;
646
647	WRITE_ONCE(reg->command, 0);
648	WRITE_ONCE(reg->status, 0);
649
650	if ((READ_ONCE(reg->flags) & FLAG_USE_DMA) &&
651	    !epf_test->dma_supported) {
652		dev_err(dev, "Cannot transfer data using DMA\n");
653		goto reset_handler;
654	}
655
656	if (reg->irq_type > IRQ_TYPE_MSIX) {
657		dev_err(dev, "Failed to detect IRQ type\n");
658		goto reset_handler;
659	}
660
661	switch (command) {
662	case COMMAND_RAISE_LEGACY_IRQ:
663	case COMMAND_RAISE_MSI_IRQ:
664	case COMMAND_RAISE_MSIX_IRQ:
665		pci_epf_test_raise_irq(epf_test, reg);
666		break;
667	case COMMAND_WRITE:
668		pci_epf_test_write(epf_test, reg);
669		pci_epf_test_raise_irq(epf_test, reg);
670		break;
671	case COMMAND_READ:
672		pci_epf_test_read(epf_test, reg);
673		pci_epf_test_raise_irq(epf_test, reg);
674		break;
675	case COMMAND_COPY:
676		pci_epf_test_copy(epf_test, reg);
677		pci_epf_test_raise_irq(epf_test, reg);
678		break;
679	default:
680		dev_err(dev, "Invalid command 0x%x\n", command);
681		break;
682	}
683
684reset_handler:
685	queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
686			   msecs_to_jiffies(1));
687}
688
689static void pci_epf_test_unbind(struct pci_epf *epf)
690{
691	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
692	struct pci_epc *epc = epf->epc;
693	struct pci_epf_bar *epf_bar;
694	int bar;
695
696	cancel_delayed_work(&epf_test->cmd_handler);
697	pci_epf_test_clean_dma_chan(epf_test);
698	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
699		epf_bar = &epf->bar[bar];
700
701		if (epf_test->reg[bar]) {
702			pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no,
703					  epf_bar);
704			pci_epf_free_space(epf, epf_test->reg[bar], bar,
705					   PRIMARY_INTERFACE);
706		}
707	}
708}
709
710static int pci_epf_test_set_bar(struct pci_epf *epf)
711{
712	int bar, add;
713	int ret;
714	struct pci_epf_bar *epf_bar;
715	struct pci_epc *epc = epf->epc;
716	struct device *dev = &epf->dev;
717	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
718	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
719	const struct pci_epc_features *epc_features;
720
721	epc_features = epf_test->epc_features;
722
723	for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) {
724		epf_bar = &epf->bar[bar];
725		/*
726		 * pci_epc_set_bar() sets PCI_BASE_ADDRESS_MEM_TYPE_64
727		 * if the specific implementation required a 64-bit BAR,
728		 * even if we only requested a 32-bit BAR.
729		 */
730		add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;
731
732		if (!!(epc_features->reserved_bar & (1 << bar)))
733			continue;
734
735		ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no,
736				      epf_bar);
737		if (ret) {
738			pci_epf_free_space(epf, epf_test->reg[bar], bar,
739					   PRIMARY_INTERFACE);
740			dev_err(dev, "Failed to set BAR%d\n", bar);
741			if (bar == test_reg_bar)
742				return ret;
743		}
744	}
745
746	return 0;
747}
748
749static int pci_epf_test_core_init(struct pci_epf *epf)
750{
751	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
752	struct pci_epf_header *header = epf->header;
753	const struct pci_epc_features *epc_features;
754	struct pci_epc *epc = epf->epc;
755	struct device *dev = &epf->dev;
756	bool msix_capable = false;
757	bool msi_capable = true;
758	int ret;
759
760	epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
761	if (epc_features) {
762		msix_capable = epc_features->msix_capable;
763		msi_capable = epc_features->msi_capable;
764	}
765
766	if (epf->vfunc_no <= 1) {
767		ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no, header);
768		if (ret) {
769			dev_err(dev, "Configuration header write failed\n");
770			return ret;
771		}
772	}
773
774	ret = pci_epf_test_set_bar(epf);
775	if (ret)
776		return ret;
777
778	if (msi_capable) {
779		ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no,
780				      epf->msi_interrupts);
781		if (ret) {
782			dev_err(dev, "MSI configuration failed\n");
783			return ret;
784		}
785	}
786
787	if (msix_capable) {
788		ret = pci_epc_set_msix(epc, epf->func_no, epf->vfunc_no,
789				       epf->msix_interrupts,
790				       epf_test->test_reg_bar,
791				       epf_test->msix_table_offset);
792		if (ret) {
793			dev_err(dev, "MSI-X configuration failed\n");
794			return ret;
795		}
796	}
797
798	return 0;
799}
800
801static int pci_epf_test_link_up(struct pci_epf *epf)
802{
803	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
804
805	queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
806			   msecs_to_jiffies(1));
807
808	return 0;
809}
810
811static const struct pci_epc_event_ops pci_epf_test_event_ops = {
812	.core_init = pci_epf_test_core_init,
813	.link_up = pci_epf_test_link_up,
814};
815
816static int pci_epf_test_alloc_space(struct pci_epf *epf)
817{
818	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
819	struct device *dev = &epf->dev;
820	struct pci_epf_bar *epf_bar;
821	size_t msix_table_size = 0;
822	size_t test_reg_bar_size;
823	size_t pba_size = 0;
824	bool msix_capable;
825	void *base;
826	int bar, add;
827	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
828	const struct pci_epc_features *epc_features;
829	size_t test_reg_size;
830
831	epc_features = epf_test->epc_features;
832
833	test_reg_bar_size = ALIGN(sizeof(struct pci_epf_test_reg), 128);
834
835	msix_capable = epc_features->msix_capable;
836	if (msix_capable) {
837		msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts;
838		epf_test->msix_table_offset = test_reg_bar_size;
839		/* Align to QWORD or 8 Bytes */
840		pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8);
841	}
842	test_reg_size = test_reg_bar_size + msix_table_size + pba_size;
843
844	if (epc_features->bar_fixed_size[test_reg_bar]) {
845		if (test_reg_size > bar_size[test_reg_bar])
846			return -ENOMEM;
847		test_reg_size = bar_size[test_reg_bar];
848	}
849
850	base = pci_epf_alloc_space(epf, test_reg_size, test_reg_bar,
851				   epc_features->align, PRIMARY_INTERFACE);
852	if (!base) {
853		dev_err(dev, "Failed to allocated register space\n");
854		return -ENOMEM;
855	}
856	epf_test->reg[test_reg_bar] = base;
857
858	for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) {
859		epf_bar = &epf->bar[bar];
860		add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;
861
862		if (bar == test_reg_bar)
863			continue;
864
865		if (!!(epc_features->reserved_bar & (1 << bar)))
866			continue;
867
868		base = pci_epf_alloc_space(epf, bar_size[bar], bar,
869					   epc_features->align,
870					   PRIMARY_INTERFACE);
871		if (!base)
872			dev_err(dev, "Failed to allocate space for BAR%d\n",
873				bar);
874		epf_test->reg[bar] = base;
875	}
876
877	return 0;
878}
879
880static void pci_epf_configure_bar(struct pci_epf *epf,
881				  const struct pci_epc_features *epc_features)
882{
883	struct pci_epf_bar *epf_bar;
884	bool bar_fixed_64bit;
885	int i;
886
887	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
888		epf_bar = &epf->bar[i];
889		bar_fixed_64bit = !!(epc_features->bar_fixed_64bit & (1 << i));
890		if (bar_fixed_64bit)
891			epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
892		if (epc_features->bar_fixed_size[i])
893			bar_size[i] = epc_features->bar_fixed_size[i];
894	}
895}
896
897static int pci_epf_test_bind(struct pci_epf *epf)
898{
899	int ret;
900	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
901	const struct pci_epc_features *epc_features;
902	enum pci_barno test_reg_bar = BAR_0;
903	struct pci_epc *epc = epf->epc;
904	bool linkup_notifier = false;
905	bool core_init_notifier = false;
906
907	if (WARN_ON_ONCE(!epc))
908		return -EINVAL;
909
910	epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
911	if (!epc_features) {
912		dev_err(&epf->dev, "epc_features not implemented\n");
913		return -EOPNOTSUPP;
914	}
915
916	linkup_notifier = epc_features->linkup_notifier;
917	core_init_notifier = epc_features->core_init_notifier;
918	test_reg_bar = pci_epc_get_first_free_bar(epc_features);
919	if (test_reg_bar < 0)
920		return -EINVAL;
921	pci_epf_configure_bar(epf, epc_features);
922
923	epf_test->test_reg_bar = test_reg_bar;
924	epf_test->epc_features = epc_features;
925
926	ret = pci_epf_test_alloc_space(epf);
927	if (ret)
928		return ret;
929
930	if (!core_init_notifier) {
931		ret = pci_epf_test_core_init(epf);
932		if (ret)
933			return ret;
934	}
935
936	epf_test->dma_supported = true;
937
938	ret = pci_epf_test_init_dma_chan(epf_test);
939	if (ret)
940		epf_test->dma_supported = false;
941
942	if (!linkup_notifier && !core_init_notifier)
943		queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
944
945	return 0;
946}
947
948static const struct pci_epf_device_id pci_epf_test_ids[] = {
949	{
950		.name = "pci_epf_test",
951	},
952	{},
953};
954
955static int pci_epf_test_probe(struct pci_epf *epf,
956			      const struct pci_epf_device_id *id)
957{
958	struct pci_epf_test *epf_test;
959	struct device *dev = &epf->dev;
960
961	epf_test = devm_kzalloc(dev, sizeof(*epf_test), GFP_KERNEL);
962	if (!epf_test)
963		return -ENOMEM;
964
965	epf->header = &test_header;
966	epf_test->epf = epf;
967
968	INIT_DELAYED_WORK(&epf_test->cmd_handler, pci_epf_test_cmd_handler);
969
970	epf->event_ops = &pci_epf_test_event_ops;
971
972	epf_set_drvdata(epf, epf_test);
973	return 0;
974}
975
976static struct pci_epf_ops ops = {
977	.unbind	= pci_epf_test_unbind,
978	.bind	= pci_epf_test_bind,
979};
980
981static struct pci_epf_driver test_driver = {
982	.driver.name	= "pci_epf_test",
983	.probe		= pci_epf_test_probe,
984	.id_table	= pci_epf_test_ids,
985	.ops		= &ops,
986	.owner		= THIS_MODULE,
987};
988
989static int __init pci_epf_test_init(void)
990{
991	int ret;
992
993	kpcitest_workqueue = alloc_workqueue("kpcitest",
994					     WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
995	if (!kpcitest_workqueue) {
996		pr_err("Failed to allocate the kpcitest work queue\n");
997		return -ENOMEM;
998	}
999
1000	ret = pci_epf_register_driver(&test_driver);
1001	if (ret) {
1002		destroy_workqueue(kpcitest_workqueue);
1003		pr_err("Failed to register pci epf test driver --> %d\n", ret);
1004		return ret;
1005	}
1006
1007	return 0;
1008}
1009module_init(pci_epf_test_init);
1010
1011static void __exit pci_epf_test_exit(void)
1012{
1013	if (kpcitest_workqueue)
1014		destroy_workqueue(kpcitest_workqueue);
1015	pci_epf_unregister_driver(&test_driver);
1016}
1017module_exit(pci_epf_test_exit);
1018
1019MODULE_DESCRIPTION("PCI EPF TEST DRIVER");
1020MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
1021MODULE_LICENSE("GPL v2");
1022