xref: /kernel/linux/linux-6.6/drivers/cxl/core/pci.c (revision 62306a36)
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright(c) 2021 Intel Corporation. All rights reserved. */
3#include <linux/io-64-nonatomic-lo-hi.h>
4#include <linux/device.h>
5#include <linux/delay.h>
6#include <linux/pci.h>
7#include <linux/pci-doe.h>
8#include <cxlpci.h>
9#include <cxlmem.h>
10#include <cxl.h>
11#include "core.h"
12#include "trace.h"
13
14/**
15 * DOC: cxl core pci
16 *
17 * Compute Express Link protocols are layered on top of PCIe. CXL core provides
18 * a set of helpers for CXL interactions which occur via PCIe.
19 */
20
21static unsigned short media_ready_timeout = 60;
22module_param(media_ready_timeout, ushort, 0644);
23MODULE_PARM_DESC(media_ready_timeout, "seconds to wait for media ready");
24
25struct cxl_walk_context {
26	struct pci_bus *bus;
27	struct cxl_port *port;
28	int type;
29	int error;
30	int count;
31};
32
33static int match_add_dports(struct pci_dev *pdev, void *data)
34{
35	struct cxl_walk_context *ctx = data;
36	struct cxl_port *port = ctx->port;
37	int type = pci_pcie_type(pdev);
38	struct cxl_register_map map;
39	struct cxl_dport *dport;
40	u32 lnkcap, port_num;
41	int rc;
42
43	if (pdev->bus != ctx->bus)
44		return 0;
45	if (!pci_is_pcie(pdev))
46		return 0;
47	if (type != ctx->type)
48		return 0;
49	if (pci_read_config_dword(pdev, pci_pcie_cap(pdev) + PCI_EXP_LNKCAP,
50				  &lnkcap))
51		return 0;
52
53	rc = cxl_find_regblock(pdev, CXL_REGLOC_RBI_COMPONENT, &map);
54	if (rc)
55		dev_dbg(&port->dev, "failed to find component registers\n");
56
57	port_num = FIELD_GET(PCI_EXP_LNKCAP_PN, lnkcap);
58	dport = devm_cxl_add_dport(port, &pdev->dev, port_num, map.resource);
59	if (IS_ERR(dport)) {
60		ctx->error = PTR_ERR(dport);
61		return PTR_ERR(dport);
62	}
63	ctx->count++;
64
65	return 0;
66}
67
68/**
69 * devm_cxl_port_enumerate_dports - enumerate downstream ports of the upstream port
70 * @port: cxl_port whose ->uport_dev is the upstream of dports to be enumerated
71 *
72 * Returns a positive number of dports enumerated or a negative error
73 * code.
74 */
75int devm_cxl_port_enumerate_dports(struct cxl_port *port)
76{
77	struct pci_bus *bus = cxl_port_to_pci_bus(port);
78	struct cxl_walk_context ctx;
79	int type;
80
81	if (!bus)
82		return -ENXIO;
83
84	if (pci_is_root_bus(bus))
85		type = PCI_EXP_TYPE_ROOT_PORT;
86	else
87		type = PCI_EXP_TYPE_DOWNSTREAM;
88
89	ctx = (struct cxl_walk_context) {
90		.port = port,
91		.bus = bus,
92		.type = type,
93	};
94	pci_walk_bus(bus, match_add_dports, &ctx);
95
96	if (ctx.count == 0)
97		return -ENODEV;
98	if (ctx.error)
99		return ctx.error;
100	return ctx.count;
101}
102EXPORT_SYMBOL_NS_GPL(devm_cxl_port_enumerate_dports, CXL);
103
104static int cxl_dvsec_mem_range_valid(struct cxl_dev_state *cxlds, int id)
105{
106	struct pci_dev *pdev = to_pci_dev(cxlds->dev);
107	int d = cxlds->cxl_dvsec;
108	bool valid = false;
109	int rc, i;
110	u32 temp;
111
112	if (id > CXL_DVSEC_RANGE_MAX)
113		return -EINVAL;
114
115	/* Check MEM INFO VALID bit first, give up after 1s */
116	i = 1;
117	do {
118		rc = pci_read_config_dword(pdev,
119					   d + CXL_DVSEC_RANGE_SIZE_LOW(id),
120					   &temp);
121		if (rc)
122			return rc;
123
124		valid = FIELD_GET(CXL_DVSEC_MEM_INFO_VALID, temp);
125		if (valid)
126			break;
127		msleep(1000);
128	} while (i--);
129
130	if (!valid) {
131		dev_err(&pdev->dev,
132			"Timeout awaiting memory range %d valid after 1s.\n",
133			id);
134		return -ETIMEDOUT;
135	}
136
137	return 0;
138}
139
140static int cxl_dvsec_mem_range_active(struct cxl_dev_state *cxlds, int id)
141{
142	struct pci_dev *pdev = to_pci_dev(cxlds->dev);
143	int d = cxlds->cxl_dvsec;
144	bool active = false;
145	int rc, i;
146	u32 temp;
147
148	if (id > CXL_DVSEC_RANGE_MAX)
149		return -EINVAL;
150
151	/* Check MEM ACTIVE bit, up to 60s timeout by default */
152	for (i = media_ready_timeout; i; i--) {
153		rc = pci_read_config_dword(
154			pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(id), &temp);
155		if (rc)
156			return rc;
157
158		active = FIELD_GET(CXL_DVSEC_MEM_ACTIVE, temp);
159		if (active)
160			break;
161		msleep(1000);
162	}
163
164	if (!active) {
165		dev_err(&pdev->dev,
166			"timeout awaiting memory active after %d seconds\n",
167			media_ready_timeout);
168		return -ETIMEDOUT;
169	}
170
171	return 0;
172}
173
174/*
175 * Wait up to @media_ready_timeout for the device to report memory
176 * active.
177 */
178int cxl_await_media_ready(struct cxl_dev_state *cxlds)
179{
180	struct pci_dev *pdev = to_pci_dev(cxlds->dev);
181	int d = cxlds->cxl_dvsec;
182	int rc, i, hdm_count;
183	u64 md_status;
184	u16 cap;
185
186	rc = pci_read_config_word(pdev,
187				  d + CXL_DVSEC_CAP_OFFSET, &cap);
188	if (rc)
189		return rc;
190
191	hdm_count = FIELD_GET(CXL_DVSEC_HDM_COUNT_MASK, cap);
192	for (i = 0; i < hdm_count; i++) {
193		rc = cxl_dvsec_mem_range_valid(cxlds, i);
194		if (rc)
195			return rc;
196	}
197
198	for (i = 0; i < hdm_count; i++) {
199		rc = cxl_dvsec_mem_range_active(cxlds, i);
200		if (rc)
201			return rc;
202	}
203
204	md_status = readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET);
205	if (!CXLMDEV_READY(md_status))
206		return -EIO;
207
208	return 0;
209}
210EXPORT_SYMBOL_NS_GPL(cxl_await_media_ready, CXL);
211
212static int wait_for_valid(struct pci_dev *pdev, int d)
213{
214	u32 val;
215	int rc;
216
217	/*
218	 * Memory_Info_Valid: When set, indicates that the CXL Range 1 Size high
219	 * and Size Low registers are valid. Must be set within 1 second of
220	 * deassertion of reset to CXL device. Likely it is already set by the
221	 * time this runs, but otherwise give a 1.5 second timeout in case of
222	 * clock skew.
223	 */
224	rc = pci_read_config_dword(pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &val);
225	if (rc)
226		return rc;
227
228	if (val & CXL_DVSEC_MEM_INFO_VALID)
229		return 0;
230
231	msleep(1500);
232
233	rc = pci_read_config_dword(pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &val);
234	if (rc)
235		return rc;
236
237	if (val & CXL_DVSEC_MEM_INFO_VALID)
238		return 0;
239
240	return -ETIMEDOUT;
241}
242
243static int cxl_set_mem_enable(struct cxl_dev_state *cxlds, u16 val)
244{
245	struct pci_dev *pdev = to_pci_dev(cxlds->dev);
246	int d = cxlds->cxl_dvsec;
247	u16 ctrl;
248	int rc;
249
250	rc = pci_read_config_word(pdev, d + CXL_DVSEC_CTRL_OFFSET, &ctrl);
251	if (rc < 0)
252		return rc;
253
254	if ((ctrl & CXL_DVSEC_MEM_ENABLE) == val)
255		return 1;
256	ctrl &= ~CXL_DVSEC_MEM_ENABLE;
257	ctrl |= val;
258
259	rc = pci_write_config_word(pdev, d + CXL_DVSEC_CTRL_OFFSET, ctrl);
260	if (rc < 0)
261		return rc;
262
263	return 0;
264}
265
266static void clear_mem_enable(void *cxlds)
267{
268	cxl_set_mem_enable(cxlds, 0);
269}
270
271static int devm_cxl_enable_mem(struct device *host, struct cxl_dev_state *cxlds)
272{
273	int rc;
274
275	rc = cxl_set_mem_enable(cxlds, CXL_DVSEC_MEM_ENABLE);
276	if (rc < 0)
277		return rc;
278	if (rc > 0)
279		return 0;
280	return devm_add_action_or_reset(host, clear_mem_enable, cxlds);
281}
282
283/* require dvsec ranges to be covered by a locked platform window */
284static int dvsec_range_allowed(struct device *dev, void *arg)
285{
286	struct range *dev_range = arg;
287	struct cxl_decoder *cxld;
288
289	if (!is_root_decoder(dev))
290		return 0;
291
292	cxld = to_cxl_decoder(dev);
293
294	if (!(cxld->flags & CXL_DECODER_F_RAM))
295		return 0;
296
297	return range_contains(&cxld->hpa_range, dev_range);
298}
299
300static void disable_hdm(void *_cxlhdm)
301{
302	u32 global_ctrl;
303	struct cxl_hdm *cxlhdm = _cxlhdm;
304	void __iomem *hdm = cxlhdm->regs.hdm_decoder;
305
306	global_ctrl = readl(hdm + CXL_HDM_DECODER_CTRL_OFFSET);
307	writel(global_ctrl & ~CXL_HDM_DECODER_ENABLE,
308	       hdm + CXL_HDM_DECODER_CTRL_OFFSET);
309}
310
311static int devm_cxl_enable_hdm(struct device *host, struct cxl_hdm *cxlhdm)
312{
313	void __iomem *hdm = cxlhdm->regs.hdm_decoder;
314	u32 global_ctrl;
315
316	global_ctrl = readl(hdm + CXL_HDM_DECODER_CTRL_OFFSET);
317	writel(global_ctrl | CXL_HDM_DECODER_ENABLE,
318	       hdm + CXL_HDM_DECODER_CTRL_OFFSET);
319
320	return devm_add_action_or_reset(host, disable_hdm, cxlhdm);
321}
322
323int cxl_dvsec_rr_decode(struct device *dev, int d,
324			struct cxl_endpoint_dvsec_info *info)
325{
326	struct pci_dev *pdev = to_pci_dev(dev);
327	int hdm_count, rc, i, ranges = 0;
328	u16 cap, ctrl;
329
330	if (!d) {
331		dev_dbg(dev, "No DVSEC Capability\n");
332		return -ENXIO;
333	}
334
335	rc = pci_read_config_word(pdev, d + CXL_DVSEC_CAP_OFFSET, &cap);
336	if (rc)
337		return rc;
338
339	rc = pci_read_config_word(pdev, d + CXL_DVSEC_CTRL_OFFSET, &ctrl);
340	if (rc)
341		return rc;
342
343	if (!(cap & CXL_DVSEC_MEM_CAPABLE)) {
344		dev_dbg(dev, "Not MEM Capable\n");
345		return -ENXIO;
346	}
347
348	/*
349	 * It is not allowed by spec for MEM.capable to be set and have 0 legacy
350	 * HDM decoders (values > 2 are also undefined as of CXL 2.0). As this
351	 * driver is for a spec defined class code which must be CXL.mem
352	 * capable, there is no point in continuing to enable CXL.mem.
353	 */
354	hdm_count = FIELD_GET(CXL_DVSEC_HDM_COUNT_MASK, cap);
355	if (!hdm_count || hdm_count > 2)
356		return -EINVAL;
357
358	rc = wait_for_valid(pdev, d);
359	if (rc) {
360		dev_dbg(dev, "Failure awaiting MEM_INFO_VALID (%d)\n", rc);
361		return rc;
362	}
363
364	/*
365	 * The current DVSEC values are moot if the memory capability is
366	 * disabled, and they will remain moot after the HDM Decoder
367	 * capability is enabled.
368	 */
369	info->mem_enabled = FIELD_GET(CXL_DVSEC_MEM_ENABLE, ctrl);
370	if (!info->mem_enabled)
371		return 0;
372
373	for (i = 0; i < hdm_count; i++) {
374		u64 base, size;
375		u32 temp;
376
377		rc = pci_read_config_dword(
378			pdev, d + CXL_DVSEC_RANGE_SIZE_HIGH(i), &temp);
379		if (rc)
380			return rc;
381
382		size = (u64)temp << 32;
383
384		rc = pci_read_config_dword(
385			pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(i), &temp);
386		if (rc)
387			return rc;
388
389		size |= temp & CXL_DVSEC_MEM_SIZE_LOW_MASK;
390		if (!size) {
391			info->dvsec_range[i] = (struct range) {
392				.start = 0,
393				.end = CXL_RESOURCE_NONE,
394			};
395			continue;
396		}
397
398		rc = pci_read_config_dword(
399			pdev, d + CXL_DVSEC_RANGE_BASE_HIGH(i), &temp);
400		if (rc)
401			return rc;
402
403		base = (u64)temp << 32;
404
405		rc = pci_read_config_dword(
406			pdev, d + CXL_DVSEC_RANGE_BASE_LOW(i), &temp);
407		if (rc)
408			return rc;
409
410		base |= temp & CXL_DVSEC_MEM_BASE_LOW_MASK;
411
412		info->dvsec_range[i] = (struct range) {
413			.start = base,
414			.end = base + size - 1
415		};
416
417		ranges++;
418	}
419
420	info->ranges = ranges;
421
422	return 0;
423}
424EXPORT_SYMBOL_NS_GPL(cxl_dvsec_rr_decode, CXL);
425
426/**
427 * cxl_hdm_decode_init() - Setup HDM decoding for the endpoint
428 * @cxlds: Device state
429 * @cxlhdm: Mapped HDM decoder Capability
430 * @info: Cached DVSEC range registers info
431 *
432 * Try to enable the endpoint's HDM Decoder Capability
433 */
434int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
435			struct cxl_endpoint_dvsec_info *info)
436{
437	void __iomem *hdm = cxlhdm->regs.hdm_decoder;
438	struct cxl_port *port = cxlhdm->port;
439	struct device *dev = cxlds->dev;
440	struct cxl_port *root;
441	int i, rc, allowed;
442	u32 global_ctrl = 0;
443
444	if (hdm)
445		global_ctrl = readl(hdm + CXL_HDM_DECODER_CTRL_OFFSET);
446
447	/*
448	 * If the HDM Decoder Capability is already enabled then assume
449	 * that some other agent like platform firmware set it up.
450	 */
451	if (global_ctrl & CXL_HDM_DECODER_ENABLE || (!hdm && info->mem_enabled))
452		return devm_cxl_enable_mem(&port->dev, cxlds);
453	else if (!hdm)
454		return -ENODEV;
455
456	root = to_cxl_port(port->dev.parent);
457	while (!is_cxl_root(root) && is_cxl_port(root->dev.parent))
458		root = to_cxl_port(root->dev.parent);
459	if (!is_cxl_root(root)) {
460		dev_err(dev, "Failed to acquire root port for HDM enable\n");
461		return -ENODEV;
462	}
463
464	for (i = 0, allowed = 0; info->mem_enabled && i < info->ranges; i++) {
465		struct device *cxld_dev;
466
467		cxld_dev = device_find_child(&root->dev, &info->dvsec_range[i],
468					     dvsec_range_allowed);
469		if (!cxld_dev) {
470			dev_dbg(dev, "DVSEC Range%d denied by platform\n", i);
471			continue;
472		}
473		dev_dbg(dev, "DVSEC Range%d allowed by platform\n", i);
474		put_device(cxld_dev);
475		allowed++;
476	}
477
478	if (!allowed && info->mem_enabled) {
479		dev_err(dev, "Range register decodes outside platform defined CXL ranges.\n");
480		return -ENXIO;
481	}
482
483	/*
484	 * Per CXL 2.0 Section 8.1.3.8.3 and 8.1.3.8.4 DVSEC CXL Range 1 Base
485	 * [High,Low] when HDM operation is enabled the range register values
486	 * are ignored by the device, but the spec also recommends matching the
487	 * DVSEC Range 1,2 to HDM Decoder Range 0,1. So, non-zero info->ranges
488	 * are expected even though Linux does not require or maintain that
489	 * match. If at least one DVSEC range is enabled and allowed, skip HDM
490	 * Decoder Capability Enable.
491	 */
492	if (info->mem_enabled)
493		return 0;
494
495	rc = devm_cxl_enable_hdm(&port->dev, cxlhdm);
496	if (rc)
497		return rc;
498
499	return devm_cxl_enable_mem(&port->dev, cxlds);
500}
501EXPORT_SYMBOL_NS_GPL(cxl_hdm_decode_init, CXL);
502
503#define CXL_DOE_TABLE_ACCESS_REQ_CODE		0x000000ff
504#define   CXL_DOE_TABLE_ACCESS_REQ_CODE_READ	0
505#define CXL_DOE_TABLE_ACCESS_TABLE_TYPE		0x0000ff00
506#define   CXL_DOE_TABLE_ACCESS_TABLE_TYPE_CDATA	0
507#define CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE	0xffff0000
508#define CXL_DOE_TABLE_ACCESS_LAST_ENTRY		0xffff
509#define CXL_DOE_PROTOCOL_TABLE_ACCESS 2
510
511#define CDAT_DOE_REQ(entry_handle) cpu_to_le32				\
512	(FIELD_PREP(CXL_DOE_TABLE_ACCESS_REQ_CODE,			\
513		    CXL_DOE_TABLE_ACCESS_REQ_CODE_READ) |		\
514	 FIELD_PREP(CXL_DOE_TABLE_ACCESS_TABLE_TYPE,			\
515		    CXL_DOE_TABLE_ACCESS_TABLE_TYPE_CDATA) |		\
516	 FIELD_PREP(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE, (entry_handle)))
517
518static int cxl_cdat_get_length(struct device *dev,
519			       struct pci_doe_mb *cdat_doe,
520			       size_t *length)
521{
522	__le32 request = CDAT_DOE_REQ(0);
523	__le32 response[2];
524	int rc;
525
526	rc = pci_doe(cdat_doe, PCI_DVSEC_VENDOR_ID_CXL,
527		     CXL_DOE_PROTOCOL_TABLE_ACCESS,
528		     &request, sizeof(request),
529		     &response, sizeof(response));
530	if (rc < 0) {
531		dev_err(dev, "DOE failed: %d", rc);
532		return rc;
533	}
534	if (rc < sizeof(response))
535		return -EIO;
536
537	*length = le32_to_cpu(response[1]);
538	dev_dbg(dev, "CDAT length %zu\n", *length);
539
540	return 0;
541}
542
543static int cxl_cdat_read_table(struct device *dev,
544			       struct pci_doe_mb *cdat_doe,
545			       void *cdat_table, size_t *cdat_length)
546{
547	size_t length = *cdat_length + sizeof(__le32);
548	__le32 *data = cdat_table;
549	int entry_handle = 0;
550	__le32 saved_dw = 0;
551
552	do {
553		__le32 request = CDAT_DOE_REQ(entry_handle);
554		struct cdat_entry_header *entry;
555		size_t entry_dw;
556		int rc;
557
558		rc = pci_doe(cdat_doe, PCI_DVSEC_VENDOR_ID_CXL,
559			     CXL_DOE_PROTOCOL_TABLE_ACCESS,
560			     &request, sizeof(request),
561			     data, length);
562		if (rc < 0) {
563			dev_err(dev, "DOE failed: %d", rc);
564			return rc;
565		}
566
567		/* 1 DW Table Access Response Header + CDAT entry */
568		entry = (struct cdat_entry_header *)(data + 1);
569		if ((entry_handle == 0 &&
570		     rc != sizeof(__le32) + sizeof(struct cdat_header)) ||
571		    (entry_handle > 0 &&
572		     (rc < sizeof(__le32) + sizeof(*entry) ||
573		      rc != sizeof(__le32) + le16_to_cpu(entry->length))))
574			return -EIO;
575
576		/* Get the CXL table access header entry handle */
577		entry_handle = FIELD_GET(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE,
578					 le32_to_cpu(data[0]));
579		entry_dw = rc / sizeof(__le32);
580		/* Skip Header */
581		entry_dw -= 1;
582		/*
583		 * Table Access Response Header overwrote the last DW of
584		 * previous entry, so restore that DW
585		 */
586		*data = saved_dw;
587		length -= entry_dw * sizeof(__le32);
588		data += entry_dw;
589		saved_dw = *data;
590	} while (entry_handle != CXL_DOE_TABLE_ACCESS_LAST_ENTRY);
591
592	/* Length in CDAT header may exceed concatenation of CDAT entries */
593	*cdat_length -= length - sizeof(__le32);
594
595	return 0;
596}
597
598/**
599 * read_cdat_data - Read the CDAT data on this port
600 * @port: Port to read data from
601 *
602 * This call will sleep waiting for responses from the DOE mailbox.
603 */
604void read_cdat_data(struct cxl_port *port)
605{
606	struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
607	struct device *host = cxlmd->dev.parent;
608	struct device *dev = &port->dev;
609	struct pci_doe_mb *cdat_doe;
610	size_t cdat_length;
611	void *cdat_table;
612	int rc;
613
614	if (!dev_is_pci(host))
615		return;
616	cdat_doe = pci_find_doe_mailbox(to_pci_dev(host),
617					PCI_DVSEC_VENDOR_ID_CXL,
618					CXL_DOE_PROTOCOL_TABLE_ACCESS);
619	if (!cdat_doe) {
620		dev_dbg(dev, "No CDAT mailbox\n");
621		return;
622	}
623
624	port->cdat_available = true;
625
626	if (cxl_cdat_get_length(dev, cdat_doe, &cdat_length)) {
627		dev_dbg(dev, "No CDAT length\n");
628		return;
629	}
630
631	cdat_table = devm_kzalloc(dev, cdat_length + sizeof(__le32),
632				  GFP_KERNEL);
633	if (!cdat_table)
634		return;
635
636	rc = cxl_cdat_read_table(dev, cdat_doe, cdat_table, &cdat_length);
637	if (rc) {
638		/* Don't leave table data allocated on error */
639		devm_kfree(dev, cdat_table);
640		dev_err(dev, "CDAT data read error\n");
641		return;
642	}
643
644	port->cdat.table = cdat_table + sizeof(__le32);
645	port->cdat.length = cdat_length;
646}
647EXPORT_SYMBOL_NS_GPL(read_cdat_data, CXL);
648
649void cxl_cor_error_detected(struct pci_dev *pdev)
650{
651	struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
652	void __iomem *addr;
653	u32 status;
654
655	if (!cxlds->regs.ras)
656		return;
657
658	addr = cxlds->regs.ras + CXL_RAS_CORRECTABLE_STATUS_OFFSET;
659	status = readl(addr);
660	if (status & CXL_RAS_CORRECTABLE_STATUS_MASK) {
661		writel(status & CXL_RAS_CORRECTABLE_STATUS_MASK, addr);
662		trace_cxl_aer_correctable_error(cxlds->cxlmd, status);
663	}
664}
665EXPORT_SYMBOL_NS_GPL(cxl_cor_error_detected, CXL);
666
667/* CXL spec rev3.0 8.2.4.16.1 */
668static void header_log_copy(struct cxl_dev_state *cxlds, u32 *log)
669{
670	void __iomem *addr;
671	u32 *log_addr;
672	int i, log_u32_size = CXL_HEADERLOG_SIZE / sizeof(u32);
673
674	addr = cxlds->regs.ras + CXL_RAS_HEADER_LOG_OFFSET;
675	log_addr = log;
676
677	for (i = 0; i < log_u32_size; i++) {
678		*log_addr = readl(addr);
679		log_addr++;
680		addr += sizeof(u32);
681	}
682}
683
684/*
685 * Log the state of the RAS status registers and prepare them to log the
686 * next error status. Return 1 if reset needed.
687 */
688static bool cxl_report_and_clear(struct cxl_dev_state *cxlds)
689{
690	u32 hl[CXL_HEADERLOG_SIZE_U32];
691	void __iomem *addr;
692	u32 status;
693	u32 fe;
694
695	if (!cxlds->regs.ras)
696		return false;
697
698	addr = cxlds->regs.ras + CXL_RAS_UNCORRECTABLE_STATUS_OFFSET;
699	status = readl(addr);
700	if (!(status & CXL_RAS_UNCORRECTABLE_STATUS_MASK))
701		return false;
702
703	/* If multiple errors, log header points to first error from ctrl reg */
704	if (hweight32(status) > 1) {
705		void __iomem *rcc_addr =
706			cxlds->regs.ras + CXL_RAS_CAP_CONTROL_OFFSET;
707
708		fe = BIT(FIELD_GET(CXL_RAS_CAP_CONTROL_FE_MASK,
709				   readl(rcc_addr)));
710	} else {
711		fe = status;
712	}
713
714	header_log_copy(cxlds, hl);
715	trace_cxl_aer_uncorrectable_error(cxlds->cxlmd, status, fe, hl);
716	writel(status & CXL_RAS_UNCORRECTABLE_STATUS_MASK, addr);
717
718	return true;
719}
720
721pci_ers_result_t cxl_error_detected(struct pci_dev *pdev,
722				    pci_channel_state_t state)
723{
724	struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
725	struct cxl_memdev *cxlmd = cxlds->cxlmd;
726	struct device *dev = &cxlmd->dev;
727	bool ue;
728
729	/*
730	 * A frozen channel indicates an impending reset which is fatal to
731	 * CXL.mem operation, and will likely crash the system. On the off
732	 * chance the situation is recoverable dump the status of the RAS
733	 * capability registers and bounce the active state of the memdev.
734	 */
735	ue = cxl_report_and_clear(cxlds);
736
737	switch (state) {
738	case pci_channel_io_normal:
739		if (ue) {
740			device_release_driver(dev);
741			return PCI_ERS_RESULT_NEED_RESET;
742		}
743		return PCI_ERS_RESULT_CAN_RECOVER;
744	case pci_channel_io_frozen:
745		dev_warn(&pdev->dev,
746			 "%s: frozen state error detected, disable CXL.mem\n",
747			 dev_name(dev));
748		device_release_driver(dev);
749		return PCI_ERS_RESULT_NEED_RESET;
750	case pci_channel_io_perm_failure:
751		dev_warn(&pdev->dev,
752			 "failure state error detected, request disconnect\n");
753		return PCI_ERS_RESULT_DISCONNECT;
754	}
755	return PCI_ERS_RESULT_NEED_RESET;
756}
757EXPORT_SYMBOL_NS_GPL(cxl_error_detected, CXL);
758