xref: /kernel/linux/linux-6.6/drivers/pci/pcie/dpc.c (revision 62306a36)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * PCI Express Downstream Port Containment services driver
4 * Author: Keith Busch <keith.busch@intel.com>
5 *
6 * Copyright (C) 2016 Intel Corp.
7 */
8
9#define dev_fmt(fmt) "DPC: " fmt
10
11#include <linux/aer.h>
12#include <linux/delay.h>
13#include <linux/interrupt.h>
14#include <linux/init.h>
15#include <linux/pci.h>
16
17#include "portdrv.h"
18#include "../pci.h"
19
20static const char * const rp_pio_error_string[] = {
21	"Configuration Request received UR Completion",	 /* Bit Position 0  */
22	"Configuration Request received CA Completion",	 /* Bit Position 1  */
23	"Configuration Request Completion Timeout",	 /* Bit Position 2  */
24	NULL,
25	NULL,
26	NULL,
27	NULL,
28	NULL,
29	"I/O Request received UR Completion",		 /* Bit Position 8  */
30	"I/O Request received CA Completion",		 /* Bit Position 9  */
31	"I/O Request Completion Timeout",		 /* Bit Position 10 */
32	NULL,
33	NULL,
34	NULL,
35	NULL,
36	NULL,
37	"Memory Request received UR Completion",	 /* Bit Position 16 */
38	"Memory Request received CA Completion",	 /* Bit Position 17 */
39	"Memory Request Completion Timeout",		 /* Bit Position 18 */
40};
41
42void pci_save_dpc_state(struct pci_dev *dev)
43{
44	struct pci_cap_saved_state *save_state;
45	u16 *cap;
46
47	if (!pci_is_pcie(dev))
48		return;
49
50	save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_DPC);
51	if (!save_state)
52		return;
53
54	cap = (u16 *)&save_state->cap.data[0];
55	pci_read_config_word(dev, dev->dpc_cap + PCI_EXP_DPC_CTL, cap);
56}
57
58void pci_restore_dpc_state(struct pci_dev *dev)
59{
60	struct pci_cap_saved_state *save_state;
61	u16 *cap;
62
63	if (!pci_is_pcie(dev))
64		return;
65
66	save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_DPC);
67	if (!save_state)
68		return;
69
70	cap = (u16 *)&save_state->cap.data[0];
71	pci_write_config_word(dev, dev->dpc_cap + PCI_EXP_DPC_CTL, *cap);
72}
73
74static DECLARE_WAIT_QUEUE_HEAD(dpc_completed_waitqueue);
75
76#ifdef CONFIG_HOTPLUG_PCI_PCIE
77static bool dpc_completed(struct pci_dev *pdev)
78{
79	u16 status;
80
81	pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_STATUS, &status);
82	if ((!PCI_POSSIBLE_ERROR(status)) && (status & PCI_EXP_DPC_STATUS_TRIGGER))
83		return false;
84
85	if (test_bit(PCI_DPC_RECOVERING, &pdev->priv_flags))
86		return false;
87
88	return true;
89}
90
91/**
92 * pci_dpc_recovered - whether DPC triggered and has recovered successfully
93 * @pdev: PCI device
94 *
95 * Return true if DPC was triggered for @pdev and has recovered successfully.
96 * Wait for recovery if it hasn't completed yet.  Called from the PCIe hotplug
97 * driver to recognize and ignore Link Down/Up events caused by DPC.
98 */
99bool pci_dpc_recovered(struct pci_dev *pdev)
100{
101	struct pci_host_bridge *host;
102
103	if (!pdev->dpc_cap)
104		return false;
105
106	/*
107	 * Synchronization between hotplug and DPC is not supported
108	 * if DPC is owned by firmware and EDR is not enabled.
109	 */
110	host = pci_find_host_bridge(pdev->bus);
111	if (!host->native_dpc && !IS_ENABLED(CONFIG_PCIE_EDR))
112		return false;
113
114	/*
115	 * Need a timeout in case DPC never completes due to failure of
116	 * dpc_wait_rp_inactive().  The spec doesn't mandate a time limit,
117	 * but reports indicate that DPC completes within 4 seconds.
118	 */
119	wait_event_timeout(dpc_completed_waitqueue, dpc_completed(pdev),
120			   msecs_to_jiffies(4000));
121
122	return test_and_clear_bit(PCI_DPC_RECOVERED, &pdev->priv_flags);
123}
124#endif /* CONFIG_HOTPLUG_PCI_PCIE */
125
126static int dpc_wait_rp_inactive(struct pci_dev *pdev)
127{
128	unsigned long timeout = jiffies + HZ;
129	u16 cap = pdev->dpc_cap, status;
130
131	pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
132	while (status & PCI_EXP_DPC_RP_BUSY &&
133					!time_after(jiffies, timeout)) {
134		msleep(10);
135		pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
136	}
137	if (status & PCI_EXP_DPC_RP_BUSY) {
138		pci_warn(pdev, "root port still busy\n");
139		return -EBUSY;
140	}
141	return 0;
142}
143
144pci_ers_result_t dpc_reset_link(struct pci_dev *pdev)
145{
146	pci_ers_result_t ret;
147	u16 cap;
148
149	set_bit(PCI_DPC_RECOVERING, &pdev->priv_flags);
150
151	/*
152	 * DPC disables the Link automatically in hardware, so it has
153	 * already been reset by the time we get here.
154	 */
155	cap = pdev->dpc_cap;
156
157	/*
158	 * Wait until the Link is inactive, then clear DPC Trigger Status
159	 * to allow the Port to leave DPC.
160	 */
161	if (!pcie_wait_for_link(pdev, false))
162		pci_info(pdev, "Data Link Layer Link Active not cleared in 1000 msec\n");
163
164	if (pdev->dpc_rp_extensions && dpc_wait_rp_inactive(pdev)) {
165		clear_bit(PCI_DPC_RECOVERED, &pdev->priv_flags);
166		ret = PCI_ERS_RESULT_DISCONNECT;
167		goto out;
168	}
169
170	pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS,
171			      PCI_EXP_DPC_STATUS_TRIGGER);
172
173	if (pci_bridge_wait_for_secondary_bus(pdev, "DPC")) {
174		clear_bit(PCI_DPC_RECOVERED, &pdev->priv_flags);
175		ret = PCI_ERS_RESULT_DISCONNECT;
176	} else {
177		set_bit(PCI_DPC_RECOVERED, &pdev->priv_flags);
178		ret = PCI_ERS_RESULT_RECOVERED;
179	}
180out:
181	clear_bit(PCI_DPC_RECOVERING, &pdev->priv_flags);
182	wake_up_all(&dpc_completed_waitqueue);
183	return ret;
184}
185
186static void dpc_process_rp_pio_error(struct pci_dev *pdev)
187{
188	u16 cap = pdev->dpc_cap, dpc_status, first_error;
189	u32 status, mask, sev, syserr, exc, dw0, dw1, dw2, dw3, log, prefix;
190	int i;
191
192	pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS, &status);
193	pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_MASK, &mask);
194	pci_err(pdev, "rp_pio_status: %#010x, rp_pio_mask: %#010x\n",
195		status, mask);
196
197	pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_SEVERITY, &sev);
198	pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_SYSERROR, &syserr);
199	pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_EXCEPTION, &exc);
200	pci_err(pdev, "RP PIO severity=%#010x, syserror=%#010x, exception=%#010x\n",
201		sev, syserr, exc);
202
203	/* Get First Error Pointer */
204	pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &dpc_status);
205	first_error = (dpc_status & 0x1f00) >> 8;
206
207	for (i = 0; i < ARRAY_SIZE(rp_pio_error_string); i++) {
208		if ((status & ~mask) & (1 << i))
209			pci_err(pdev, "[%2d] %s%s\n", i, rp_pio_error_string[i],
210				first_error == i ? " (First)" : "");
211	}
212
213	if (pdev->dpc_rp_log_size < 4)
214		goto clear_status;
215	pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG,
216			      &dw0);
217	pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG + 4,
218			      &dw1);
219	pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG + 8,
220			      &dw2);
221	pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG + 12,
222			      &dw3);
223	pci_err(pdev, "TLP Header: %#010x %#010x %#010x %#010x\n",
224		dw0, dw1, dw2, dw3);
225
226	if (pdev->dpc_rp_log_size < 5)
227		goto clear_status;
228	pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_IMPSPEC_LOG, &log);
229	pci_err(pdev, "RP PIO ImpSpec Log %#010x\n", log);
230
231	for (i = 0; i < pdev->dpc_rp_log_size - 5; i++) {
232		pci_read_config_dword(pdev,
233			cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG + i * 4, &prefix);
234		pci_err(pdev, "TLP Prefix Header: dw%d, %#010x\n", i, prefix);
235	}
236 clear_status:
237	pci_write_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS, status);
238}
239
240static int dpc_get_aer_uncorrect_severity(struct pci_dev *dev,
241					  struct aer_err_info *info)
242{
243	int pos = dev->aer_cap;
244	u32 status, mask, sev;
245
246	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
247	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &mask);
248	status &= ~mask;
249	if (!status)
250		return 0;
251
252	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &sev);
253	status &= sev;
254	if (status)
255		info->severity = AER_FATAL;
256	else
257		info->severity = AER_NONFATAL;
258
259	return 1;
260}
261
262void dpc_process_error(struct pci_dev *pdev)
263{
264	u16 cap = pdev->dpc_cap, status, source, reason, ext_reason;
265	struct aer_err_info info;
266
267	pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
268	pci_read_config_word(pdev, cap + PCI_EXP_DPC_SOURCE_ID, &source);
269
270	pci_info(pdev, "containment event, status:%#06x source:%#06x\n",
271		 status, source);
272
273	reason = (status & PCI_EXP_DPC_STATUS_TRIGGER_RSN) >> 1;
274	ext_reason = (status & PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT) >> 5;
275	pci_warn(pdev, "%s detected\n",
276		 (reason == 0) ? "unmasked uncorrectable error" :
277		 (reason == 1) ? "ERR_NONFATAL" :
278		 (reason == 2) ? "ERR_FATAL" :
279		 (ext_reason == 0) ? "RP PIO error" :
280		 (ext_reason == 1) ? "software trigger" :
281				     "reserved error");
282
283	/* show RP PIO error detail information */
284	if (pdev->dpc_rp_extensions && reason == 3 && ext_reason == 0)
285		dpc_process_rp_pio_error(pdev);
286	else if (reason == 0 &&
287		 dpc_get_aer_uncorrect_severity(pdev, &info) &&
288		 aer_get_device_error_info(pdev, &info)) {
289		aer_print_error(pdev, &info);
290		pci_aer_clear_nonfatal_status(pdev);
291		pci_aer_clear_fatal_status(pdev);
292	}
293}
294
295static irqreturn_t dpc_handler(int irq, void *context)
296{
297	struct pci_dev *pdev = context;
298
299	dpc_process_error(pdev);
300
301	/* We configure DPC so it only triggers on ERR_FATAL */
302	pcie_do_recovery(pdev, pci_channel_io_frozen, dpc_reset_link);
303
304	return IRQ_HANDLED;
305}
306
307static irqreturn_t dpc_irq(int irq, void *context)
308{
309	struct pci_dev *pdev = context;
310	u16 cap = pdev->dpc_cap, status;
311
312	pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
313
314	if (!(status & PCI_EXP_DPC_STATUS_INTERRUPT) || PCI_POSSIBLE_ERROR(status))
315		return IRQ_NONE;
316
317	pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS,
318			      PCI_EXP_DPC_STATUS_INTERRUPT);
319	if (status & PCI_EXP_DPC_STATUS_TRIGGER)
320		return IRQ_WAKE_THREAD;
321	return IRQ_HANDLED;
322}
323
324void pci_dpc_init(struct pci_dev *pdev)
325{
326	u16 cap;
327
328	pdev->dpc_cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DPC);
329	if (!pdev->dpc_cap)
330		return;
331
332	pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CAP, &cap);
333	if (!(cap & PCI_EXP_DPC_CAP_RP_EXT))
334		return;
335
336	pdev->dpc_rp_extensions = true;
337
338	/* Quirks may set dpc_rp_log_size if device or firmware is buggy */
339	if (!pdev->dpc_rp_log_size) {
340		pdev->dpc_rp_log_size =
341			(cap & PCI_EXP_DPC_RP_PIO_LOG_SIZE) >> 8;
342		if (pdev->dpc_rp_log_size < 4 || pdev->dpc_rp_log_size > 9) {
343			pci_err(pdev, "RP PIO log size %u is invalid\n",
344				pdev->dpc_rp_log_size);
345			pdev->dpc_rp_log_size = 0;
346		}
347	}
348}
349
350#define FLAG(x, y) (((x) & (y)) ? '+' : '-')
351static int dpc_probe(struct pcie_device *dev)
352{
353	struct pci_dev *pdev = dev->port;
354	struct device *device = &dev->device;
355	int status;
356	u16 ctl, cap;
357
358	if (!pcie_aer_is_native(pdev) && !pcie_ports_dpc_native)
359		return -ENOTSUPP;
360
361	status = devm_request_threaded_irq(device, dev->irq, dpc_irq,
362					   dpc_handler, IRQF_SHARED,
363					   "pcie-dpc", pdev);
364	if (status) {
365		pci_warn(pdev, "request IRQ%d failed: %d\n", dev->irq,
366			 status);
367		return status;
368	}
369
370	pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CAP, &cap);
371	pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, &ctl);
372
373	ctl = (ctl & 0xfff4) | PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN;
374	pci_write_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, ctl);
375	pci_info(pdev, "enabled with IRQ %d\n", dev->irq);
376
377	pci_info(pdev, "error containment capabilities: Int Msg #%d, RPExt%c PoisonedTLP%c SwTrigger%c RP PIO Log %d, DL_ActiveErr%c\n",
378		 cap & PCI_EXP_DPC_IRQ, FLAG(cap, PCI_EXP_DPC_CAP_RP_EXT),
379		 FLAG(cap, PCI_EXP_DPC_CAP_POISONED_TLP),
380		 FLAG(cap, PCI_EXP_DPC_CAP_SW_TRIGGER), pdev->dpc_rp_log_size,
381		 FLAG(cap, PCI_EXP_DPC_CAP_DL_ACTIVE));
382
383	pci_add_ext_cap_save_buffer(pdev, PCI_EXT_CAP_ID_DPC, sizeof(u16));
384	return status;
385}
386
387static void dpc_remove(struct pcie_device *dev)
388{
389	struct pci_dev *pdev = dev->port;
390	u16 ctl;
391
392	pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, &ctl);
393	ctl &= ~(PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN);
394	pci_write_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, ctl);
395}
396
397static struct pcie_port_service_driver dpcdriver = {
398	.name		= "dpc",
399	.port_type	= PCIE_ANY_PORT,
400	.service	= PCIE_PORT_SERVICE_DPC,
401	.probe		= dpc_probe,
402	.remove		= dpc_remove,
403};
404
405int __init pcie_dpc_init(void)
406{
407	return pcie_port_service_register(&dpcdriver);
408}
409