1// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Copyright 2016-2019 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8#include "habanalabs.h"
9#include "../include/hw_ip/pci/pci_general.h"
10
11#include <linux/pci.h>
12
13#define HL_PLDM_PCI_ELBI_TIMEOUT_MSEC	(HL_PCI_ELBI_TIMEOUT_MSEC * 10)
14
15#define IATU_REGION_CTRL_REGION_EN_MASK		BIT(31)
16#define IATU_REGION_CTRL_MATCH_MODE_MASK	BIT(30)
17#define IATU_REGION_CTRL_NUM_MATCH_EN_MASK	BIT(19)
18#define IATU_REGION_CTRL_BAR_NUM_MASK		GENMASK(10, 8)
19
20/**
21 * hl_pci_bars_map() - Map PCI BARs.
22 * @hdev: Pointer to hl_device structure.
23 * @name: Array of BAR names.
24 * @is_wc: Array with flag per BAR whether a write-combined mapping is needed.
25 *
26 * Request PCI regions and map them to kernel virtual addresses.
27 *
28 * Return: 0 on success, non-zero for failure.
29 */
30int hl_pci_bars_map(struct hl_device *hdev, const char * const name[3],
31			bool is_wc[3])
32{
33	struct pci_dev *pdev = hdev->pdev;
34	int rc, i, bar;
35
36	rc = pci_request_regions(pdev, HL_NAME);
37	if (rc) {
38		dev_err(hdev->dev, "Cannot obtain PCI resources\n");
39		return rc;
40	}
41
42	for (i = 0 ; i < 3 ; i++) {
43		bar = i * 2; /* 64-bit BARs */
44		hdev->pcie_bar[bar] = is_wc[i] ?
45				pci_ioremap_wc_bar(pdev, bar) :
46				pci_ioremap_bar(pdev, bar);
47		if (!hdev->pcie_bar[bar]) {
48			dev_err(hdev->dev, "pci_ioremap%s_bar failed for %s\n",
49					is_wc[i] ? "_wc" : "", name[i]);
50			rc = -ENODEV;
51			goto err;
52		}
53	}
54
55	return 0;
56
57err:
58	for (i = 2 ; i >= 0 ; i--) {
59		bar = i * 2; /* 64-bit BARs */
60		if (hdev->pcie_bar[bar])
61			iounmap(hdev->pcie_bar[bar]);
62	}
63
64	pci_release_regions(pdev);
65
66	return rc;
67}
68
69/**
70 * hl_pci_bars_unmap() - Unmap PCI BARS.
71 * @hdev: Pointer to hl_device structure.
72 *
73 * Release all PCI BARs and unmap their virtual addresses.
74 */
75static void hl_pci_bars_unmap(struct hl_device *hdev)
76{
77	struct pci_dev *pdev = hdev->pdev;
78	int i, bar;
79
80	for (i = 2 ; i >= 0 ; i--) {
81		bar = i * 2; /* 64-bit BARs */
82		iounmap(hdev->pcie_bar[bar]);
83	}
84
85	pci_release_regions(pdev);
86}
87
88/**
89 * hl_pci_elbi_write() - Write through the ELBI interface.
90 * @hdev: Pointer to hl_device structure.
91 * @addr: Address to write to
92 * @data: Data to write
93 *
94 * Return: 0 on success, negative value for failure.
95 */
96static int hl_pci_elbi_write(struct hl_device *hdev, u64 addr, u32 data)
97{
98	struct pci_dev *pdev = hdev->pdev;
99	ktime_t timeout;
100	u64 msec;
101	u32 val;
102
103	if (hdev->pldm)
104		msec = HL_PLDM_PCI_ELBI_TIMEOUT_MSEC;
105	else
106		msec = HL_PCI_ELBI_TIMEOUT_MSEC;
107
108	/* Clear previous status */
109	pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, 0);
110
111	pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_ADDR, (u32) addr);
112	pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_DATA, data);
113	pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_CTRL,
114				PCI_CONFIG_ELBI_CTRL_WRITE);
115
116	timeout = ktime_add_ms(ktime_get(), msec);
117	for (;;) {
118		pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, &val);
119		if (val & PCI_CONFIG_ELBI_STS_MASK)
120			break;
121		if (ktime_compare(ktime_get(), timeout) > 0) {
122			pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS,
123						&val);
124			break;
125		}
126
127		usleep_range(300, 500);
128	}
129
130	if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE)
131		return 0;
132
133	if (val & PCI_CONFIG_ELBI_STS_ERR)
134		return -EIO;
135
136	if (!(val & PCI_CONFIG_ELBI_STS_MASK)) {
137		dev_err(hdev->dev, "ELBI write didn't finish in time\n");
138		return -EIO;
139	}
140
141	dev_err(hdev->dev, "ELBI write has undefined bits in status\n");
142	return -EIO;
143}
144
145/**
146 * hl_pci_iatu_write() - iatu write routine.
147 * @hdev: Pointer to hl_device structure.
148 * @addr: Address to write to
149 * @data: Data to write
150 *
151 * Return: 0 on success, negative value for failure.
152 */
153int hl_pci_iatu_write(struct hl_device *hdev, u32 addr, u32 data)
154{
155	struct asic_fixed_properties *prop = &hdev->asic_prop;
156	u32 dbi_offset;
157	int rc;
158
159	dbi_offset = addr & 0xFFF;
160
161	/* Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail
162	 * in case the firmware security is enabled
163	 */
164	hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0x00300000);
165
166	rc = hl_pci_elbi_write(hdev, prop->pcie_dbi_base_address + dbi_offset,
167				data);
168
169	if (rc)
170		return -EIO;
171
172	return 0;
173}
174
175/**
176 * hl_pci_reset_link_through_bridge() - Reset PCI link.
177 * @hdev: Pointer to hl_device structure.
178 */
179static void hl_pci_reset_link_through_bridge(struct hl_device *hdev)
180{
181	struct pci_dev *pdev = hdev->pdev;
182	struct pci_dev *parent_port;
183	u16 val;
184
185	parent_port = pdev->bus->self;
186	pci_read_config_word(parent_port, PCI_BRIDGE_CONTROL, &val);
187	val |= PCI_BRIDGE_CTL_BUS_RESET;
188	pci_write_config_word(parent_port, PCI_BRIDGE_CONTROL, val);
189	ssleep(1);
190
191	val &= ~(PCI_BRIDGE_CTL_BUS_RESET);
192	pci_write_config_word(parent_port, PCI_BRIDGE_CONTROL, val);
193	ssleep(3);
194}
195
196/**
197 * hl_pci_set_inbound_region() - Configure inbound region
198 * @hdev: Pointer to hl_device structure.
199 * @region: Inbound region number.
200 * @pci_region: Inbound region parameters.
201 *
202 * Configure the iATU inbound region.
203 *
204 * Return: 0 on success, negative value for failure.
205 */
206int hl_pci_set_inbound_region(struct hl_device *hdev, u8 region,
207		struct hl_inbound_pci_region *pci_region)
208{
209	struct asic_fixed_properties *prop = &hdev->asic_prop;
210	u64 bar_phys_base, region_base, region_end_address;
211	u32 offset, ctrl_reg_val;
212	int rc = 0;
213
214	/* region offset */
215	offset = (0x200 * region) + 0x100;
216
217	if (pci_region->mode == PCI_ADDRESS_MATCH_MODE) {
218		bar_phys_base = hdev->pcie_bar_phys[pci_region->bar];
219		region_base = bar_phys_base + pci_region->offset_in_bar;
220		region_end_address = region_base + pci_region->size - 1;
221
222		rc |= hl_pci_iatu_write(hdev, offset + 0x8,
223				lower_32_bits(region_base));
224		rc |= hl_pci_iatu_write(hdev, offset + 0xC,
225				upper_32_bits(region_base));
226		rc |= hl_pci_iatu_write(hdev, offset + 0x10,
227				lower_32_bits(region_end_address));
228	}
229
230	/* Point to the specified address */
231	rc |= hl_pci_iatu_write(hdev, offset + 0x14,
232			lower_32_bits(pci_region->addr));
233	rc |= hl_pci_iatu_write(hdev, offset + 0x18,
234			upper_32_bits(pci_region->addr));
235	rc |= hl_pci_iatu_write(hdev, offset + 0x0, 0);
236
237	/* Enable + bar/address match + match enable + bar number */
238	ctrl_reg_val = FIELD_PREP(IATU_REGION_CTRL_REGION_EN_MASK, 1);
239	ctrl_reg_val |= FIELD_PREP(IATU_REGION_CTRL_MATCH_MODE_MASK,
240			pci_region->mode);
241	ctrl_reg_val |= FIELD_PREP(IATU_REGION_CTRL_NUM_MATCH_EN_MASK, 1);
242
243	if (pci_region->mode == PCI_BAR_MATCH_MODE)
244		ctrl_reg_val |= FIELD_PREP(IATU_REGION_CTRL_BAR_NUM_MASK,
245				pci_region->bar);
246
247	rc |= hl_pci_iatu_write(hdev, offset + 0x4, ctrl_reg_val);
248
249	/* Return the DBI window to the default location
250	 * Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail
251	 * in case the firmware security is enabled
252	 */
253	hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
254
255	if (rc)
256		dev_err(hdev->dev, "failed to map bar %u to 0x%08llx\n",
257				pci_region->bar, pci_region->addr);
258
259	return rc;
260}
261
262/**
263 * hl_pci_set_outbound_region() - Configure outbound region 0
264 * @hdev: Pointer to hl_device structure.
265 * @pci_region: Outbound region parameters.
266 *
267 * Configure the iATU outbound region 0.
268 *
269 * Return: 0 on success, negative value for failure.
270 */
271int hl_pci_set_outbound_region(struct hl_device *hdev,
272		struct hl_outbound_pci_region *pci_region)
273{
274	struct asic_fixed_properties *prop = &hdev->asic_prop;
275	u64 outbound_region_end_address;
276	int rc = 0;
277
278	/* Outbound Region 0 */
279	outbound_region_end_address =
280			pci_region->addr + pci_region->size - 1;
281	rc |= hl_pci_iatu_write(hdev, 0x008,
282				lower_32_bits(pci_region->addr));
283	rc |= hl_pci_iatu_write(hdev, 0x00C,
284				upper_32_bits(pci_region->addr));
285	rc |= hl_pci_iatu_write(hdev, 0x010,
286				lower_32_bits(outbound_region_end_address));
287	rc |= hl_pci_iatu_write(hdev, 0x014, 0);
288
289	if ((hdev->power9_64bit_dma_enable) && (hdev->dma_mask == 64))
290		rc |= hl_pci_iatu_write(hdev, 0x018, 0x08000000);
291	else
292		rc |= hl_pci_iatu_write(hdev, 0x018, 0);
293
294	rc |= hl_pci_iatu_write(hdev, 0x020,
295				upper_32_bits(outbound_region_end_address));
296	/* Increase region size */
297	rc |= hl_pci_iatu_write(hdev, 0x000, 0x00002000);
298	/* Enable */
299	rc |= hl_pci_iatu_write(hdev, 0x004, 0x80000000);
300
301	/* Return the DBI window to the default location
302	 * Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail
303	 * in case the firmware security is enabled
304	 */
305	hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
306
307	return rc;
308}
309
310/**
311 * hl_pci_set_dma_mask() - Set DMA masks for the device.
312 * @hdev: Pointer to hl_device structure.
313 *
314 * This function sets the DMA masks (regular and consistent) for a specified
315 * value. If it doesn't succeed, it tries to set it to a fall-back value
316 *
317 * Return: 0 on success, non-zero for failure.
318 */
319static int hl_pci_set_dma_mask(struct hl_device *hdev)
320{
321	struct pci_dev *pdev = hdev->pdev;
322	int rc;
323
324	/* set DMA mask */
325	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(hdev->dma_mask));
326	if (rc) {
327		dev_err(hdev->dev,
328			"Failed to set pci dma mask to %d bits, error %d\n",
329			hdev->dma_mask, rc);
330		return rc;
331	}
332
333	rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(hdev->dma_mask));
334	if (rc) {
335		dev_err(hdev->dev,
336			"Failed to set pci consistent dma mask to %d bits, error %d\n",
337			hdev->dma_mask, rc);
338		return rc;
339	}
340
341	return 0;
342}
343
344/**
345 * hl_pci_init() - PCI initialization code.
346 * @hdev: Pointer to hl_device structure.
347 * @cpu_boot_status_reg: status register of the device's CPU
348 * @boot_err0_reg: boot error register of the device's CPU
349 * @preboot_ver_timeout: how much to wait before bailing out on reading
350 *                       the preboot version
351 *
352 * Set DMA masks, initialize the PCI controller and map the PCI BARs.
353 *
354 * Return: 0 on success, non-zero for failure.
355 */
356int hl_pci_init(struct hl_device *hdev, u32 cpu_boot_status_reg,
357		u32 boot_err0_reg, u32 preboot_ver_timeout)
358{
359	struct pci_dev *pdev = hdev->pdev;
360	int rc;
361
362	if (hdev->reset_pcilink)
363		hl_pci_reset_link_through_bridge(hdev);
364
365	rc = pci_enable_device_mem(pdev);
366	if (rc) {
367		dev_err(hdev->dev, "can't enable PCI device\n");
368		return rc;
369	}
370
371	pci_set_master(pdev);
372
373	rc = hdev->asic_funcs->pci_bars_map(hdev);
374	if (rc) {
375		dev_err(hdev->dev, "Failed to initialize PCI BARs\n");
376		goto disable_device;
377	}
378
379	rc = hdev->asic_funcs->init_iatu(hdev);
380	if (rc) {
381		dev_err(hdev->dev, "Failed to initialize iATU\n");
382		goto unmap_pci_bars;
383	}
384
385	rc = hl_pci_set_dma_mask(hdev);
386	if (rc)
387		goto unmap_pci_bars;
388
389	/* Before continuing in the initialization, we need to read the preboot
390	 * version to determine whether we run with a security-enabled firmware
391	 * The check will be done in each ASIC's specific code
392	 */
393	rc = hl_fw_read_preboot_ver(hdev, cpu_boot_status_reg, boot_err0_reg,
394					preboot_ver_timeout);
395	if (rc)
396		goto unmap_pci_bars;
397
398	return 0;
399
400unmap_pci_bars:
401	hl_pci_bars_unmap(hdev);
402disable_device:
403	pci_clear_master(pdev);
404	pci_disable_device(pdev);
405
406	return rc;
407}
408
409/**
410 * hl_fw_fini() - PCI finalization code.
411 * @hdev: Pointer to hl_device structure
412 *
413 * Unmap PCI bars and disable PCI device.
414 */
415void hl_pci_fini(struct hl_device *hdev)
416{
417	hl_pci_bars_unmap(hdev);
418
419	pci_clear_master(hdev->pdev);
420	pci_disable_device(hdev->pdev);
421}
422