1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * AMD Passthru DMA device driver
4 * -- Based on the CCP driver
5 *
6 * Copyright (C) 2016,2021 Advanced Micro Devices, Inc.
7 *
8 * Author: Sanjay R Mehta <sanju.mehta@amd.com>
9 * Author: Tom Lendacky <thomas.lendacky@amd.com>
10 * Author: Gary R Hook <gary.hook@amd.com>
11 */
12
13#include <linux/device.h>
14#include <linux/dma-mapping.h>
15#include <linux/delay.h>
16#include <linux/interrupt.h>
17#include <linux/kernel.h>
18#include <linux/kthread.h>
19#include <linux/module.h>
20#include <linux/pci_ids.h>
21#include <linux/pci.h>
22#include <linux/spinlock.h>
23
24#include "ptdma.h"
25
26struct pt_msix {
27	int msix_count;
28	struct msix_entry msix_entry;
29};
30
31/*
32 * pt_alloc_struct - allocate and initialize the pt_device struct
33 *
34 * @dev: device struct of the PTDMA
35 */
36static struct pt_device *pt_alloc_struct(struct device *dev)
37{
38	struct pt_device *pt;
39
40	pt = devm_kzalloc(dev, sizeof(*pt), GFP_KERNEL);
41
42	if (!pt)
43		return NULL;
44	pt->dev = dev;
45
46	INIT_LIST_HEAD(&pt->cmd);
47
48	return pt;
49}
50
51static int pt_get_msix_irqs(struct pt_device *pt)
52{
53	struct pt_msix *pt_msix = pt->pt_msix;
54	struct device *dev = pt->dev;
55	struct pci_dev *pdev = to_pci_dev(dev);
56	int ret;
57
58	pt_msix->msix_entry.entry = 0;
59
60	ret = pci_enable_msix_range(pdev, &pt_msix->msix_entry, 1, 1);
61	if (ret < 0)
62		return ret;
63
64	pt_msix->msix_count = ret;
65
66	pt->pt_irq = pt_msix->msix_entry.vector;
67
68	return 0;
69}
70
71static int pt_get_msi_irq(struct pt_device *pt)
72{
73	struct device *dev = pt->dev;
74	struct pci_dev *pdev = to_pci_dev(dev);
75	int ret;
76
77	ret = pci_enable_msi(pdev);
78	if (ret)
79		return ret;
80
81	pt->pt_irq = pdev->irq;
82
83	return 0;
84}
85
86static int pt_get_irqs(struct pt_device *pt)
87{
88	struct device *dev = pt->dev;
89	int ret;
90
91	ret = pt_get_msix_irqs(pt);
92	if (!ret)
93		return 0;
94
95	/* Couldn't get MSI-X vectors, try MSI */
96	dev_err(dev, "could not enable MSI-X (%d), trying MSI\n", ret);
97	ret = pt_get_msi_irq(pt);
98	if (!ret)
99		return 0;
100
101	/* Couldn't get MSI interrupt */
102	dev_err(dev, "could not enable MSI (%d)\n", ret);
103
104	return ret;
105}
106
107static void pt_free_irqs(struct pt_device *pt)
108{
109	struct pt_msix *pt_msix = pt->pt_msix;
110	struct device *dev = pt->dev;
111	struct pci_dev *pdev = to_pci_dev(dev);
112
113	if (pt_msix->msix_count)
114		pci_disable_msix(pdev);
115	else if (pt->pt_irq)
116		pci_disable_msi(pdev);
117
118	pt->pt_irq = 0;
119}
120
121static int pt_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
122{
123	struct pt_device *pt;
124	struct pt_msix *pt_msix;
125	struct device *dev = &pdev->dev;
126	void __iomem * const *iomap_table;
127	int bar_mask;
128	int ret = -ENOMEM;
129
130	pt = pt_alloc_struct(dev);
131	if (!pt)
132		goto e_err;
133
134	pt_msix = devm_kzalloc(dev, sizeof(*pt_msix), GFP_KERNEL);
135	if (!pt_msix)
136		goto e_err;
137
138	pt->pt_msix = pt_msix;
139	pt->dev_vdata = (struct pt_dev_vdata *)id->driver_data;
140	if (!pt->dev_vdata) {
141		ret = -ENODEV;
142		dev_err(dev, "missing driver data\n");
143		goto e_err;
144	}
145
146	ret = pcim_enable_device(pdev);
147	if (ret) {
148		dev_err(dev, "pcim_enable_device failed (%d)\n", ret);
149		goto e_err;
150	}
151
152	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
153	ret = pcim_iomap_regions(pdev, bar_mask, "ptdma");
154	if (ret) {
155		dev_err(dev, "pcim_iomap_regions failed (%d)\n", ret);
156		goto e_err;
157	}
158
159	iomap_table = pcim_iomap_table(pdev);
160	if (!iomap_table) {
161		dev_err(dev, "pcim_iomap_table failed\n");
162		ret = -ENOMEM;
163		goto e_err;
164	}
165
166	pt->io_regs = iomap_table[pt->dev_vdata->bar];
167	if (!pt->io_regs) {
168		dev_err(dev, "ioremap failed\n");
169		ret = -ENOMEM;
170		goto e_err;
171	}
172
173	ret = pt_get_irqs(pt);
174	if (ret)
175		goto e_err;
176
177	pci_set_master(pdev);
178
179	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
180	if (ret) {
181		ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
182		if (ret) {
183			dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n",
184				ret);
185			goto e_err;
186		}
187	}
188
189	dev_set_drvdata(dev, pt);
190
191	if (pt->dev_vdata)
192		ret = pt_core_init(pt);
193
194	if (ret)
195		goto e_err;
196
197	return 0;
198
199e_err:
200	dev_err(dev, "initialization failed ret = %d\n", ret);
201
202	return ret;
203}
204
205static void pt_pci_remove(struct pci_dev *pdev)
206{
207	struct device *dev = &pdev->dev;
208	struct pt_device *pt = dev_get_drvdata(dev);
209
210	if (!pt)
211		return;
212
213	if (pt->dev_vdata)
214		pt_core_destroy(pt);
215
216	pt_free_irqs(pt);
217}
218
219static const struct pt_dev_vdata dev_vdata[] = {
220	{
221		.bar = 2,
222	},
223};
224
225static const struct pci_device_id pt_pci_table[] = {
226	{ PCI_VDEVICE(AMD, 0x1498), (kernel_ulong_t)&dev_vdata[0] },
227	/* Last entry must be zero */
228	{ 0, }
229};
230MODULE_DEVICE_TABLE(pci, pt_pci_table);
231
232static struct pci_driver pt_pci_driver = {
233	.name = "ptdma",
234	.id_table = pt_pci_table,
235	.probe = pt_pci_probe,
236	.remove = pt_pci_remove,
237};
238
239module_pci_driver(pt_pci_driver);
240
241MODULE_AUTHOR("Sanjay R Mehta <sanju.mehta@amd.com>");
242MODULE_LICENSE("GPL");
243MODULE_DESCRIPTION("AMD PassThru DMA driver");
244