xref: /kernel/linux/linux-6.6/tools/testing/cxl/test/cxl.c (revision 62306a36)
1// SPDX-License-Identifier: GPL-2.0-only
2// Copyright(c) 2021 Intel Corporation. All rights reserved.
3
4#include <linux/platform_device.h>
5#include <linux/genalloc.h>
6#include <linux/module.h>
7#include <linux/mutex.h>
8#include <linux/acpi.h>
9#include <linux/pci.h>
10#include <linux/mm.h>
11#include <cxlmem.h>
12
13#include "../watermark.h"
14#include "mock.h"
15
16static int interleave_arithmetic;
17
18#define NR_CXL_HOST_BRIDGES 2
19#define NR_CXL_SINGLE_HOST 1
20#define NR_CXL_RCH 1
21#define NR_CXL_ROOT_PORTS 2
22#define NR_CXL_SWITCH_PORTS 2
23#define NR_CXL_PORT_DECODERS 8
24#define NR_BRIDGES (NR_CXL_HOST_BRIDGES + NR_CXL_SINGLE_HOST + NR_CXL_RCH)
25
26static struct platform_device *cxl_acpi;
27static struct platform_device *cxl_host_bridge[NR_CXL_HOST_BRIDGES];
28#define NR_MULTI_ROOT (NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS)
29static struct platform_device *cxl_root_port[NR_MULTI_ROOT];
30static struct platform_device *cxl_switch_uport[NR_MULTI_ROOT];
31#define NR_MEM_MULTI \
32	(NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS * NR_CXL_SWITCH_PORTS)
33static struct platform_device *cxl_switch_dport[NR_MEM_MULTI];
34
35static struct platform_device *cxl_hb_single[NR_CXL_SINGLE_HOST];
36static struct platform_device *cxl_root_single[NR_CXL_SINGLE_HOST];
37static struct platform_device *cxl_swu_single[NR_CXL_SINGLE_HOST];
38#define NR_MEM_SINGLE (NR_CXL_SINGLE_HOST * NR_CXL_SWITCH_PORTS)
39static struct platform_device *cxl_swd_single[NR_MEM_SINGLE];
40
41struct platform_device *cxl_mem[NR_MEM_MULTI];
42struct platform_device *cxl_mem_single[NR_MEM_SINGLE];
43
44static struct platform_device *cxl_rch[NR_CXL_RCH];
45static struct platform_device *cxl_rcd[NR_CXL_RCH];
46
47static inline bool is_multi_bridge(struct device *dev)
48{
49	int i;
50
51	for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++)
52		if (&cxl_host_bridge[i]->dev == dev)
53			return true;
54	return false;
55}
56
57static inline bool is_single_bridge(struct device *dev)
58{
59	int i;
60
61	for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++)
62		if (&cxl_hb_single[i]->dev == dev)
63			return true;
64	return false;
65}
66
67static struct acpi_device acpi0017_mock;
68static struct acpi_device host_bridge[NR_BRIDGES] = {
69	[0] = {
70		.handle = &host_bridge[0],
71	},
72	[1] = {
73		.handle = &host_bridge[1],
74	},
75	[2] = {
76		.handle = &host_bridge[2],
77	},
78	[3] = {
79		.handle = &host_bridge[3],
80	},
81};
82
83static bool is_mock_dev(struct device *dev)
84{
85	int i;
86
87	for (i = 0; i < ARRAY_SIZE(cxl_mem); i++)
88		if (dev == &cxl_mem[i]->dev)
89			return true;
90	for (i = 0; i < ARRAY_SIZE(cxl_mem_single); i++)
91		if (dev == &cxl_mem_single[i]->dev)
92			return true;
93	for (i = 0; i < ARRAY_SIZE(cxl_rcd); i++)
94		if (dev == &cxl_rcd[i]->dev)
95			return true;
96	if (dev == &cxl_acpi->dev)
97		return true;
98	return false;
99}
100
101static bool is_mock_adev(struct acpi_device *adev)
102{
103	int i;
104
105	if (adev == &acpi0017_mock)
106		return true;
107
108	for (i = 0; i < ARRAY_SIZE(host_bridge); i++)
109		if (adev == &host_bridge[i])
110			return true;
111
112	return false;
113}
114
115static struct {
116	struct acpi_table_cedt cedt;
117	struct acpi_cedt_chbs chbs[NR_BRIDGES];
118	struct {
119		struct acpi_cedt_cfmws cfmws;
120		u32 target[1];
121	} cfmws0;
122	struct {
123		struct acpi_cedt_cfmws cfmws;
124		u32 target[2];
125	} cfmws1;
126	struct {
127		struct acpi_cedt_cfmws cfmws;
128		u32 target[1];
129	} cfmws2;
130	struct {
131		struct acpi_cedt_cfmws cfmws;
132		u32 target[2];
133	} cfmws3;
134	struct {
135		struct acpi_cedt_cfmws cfmws;
136		u32 target[1];
137	} cfmws4;
138	struct {
139		struct acpi_cedt_cfmws cfmws;
140		u32 target[1];
141	} cfmws5;
142	struct {
143		struct acpi_cedt_cfmws cfmws;
144		u32 target[1];
145	} cfmws6;
146	struct {
147		struct acpi_cedt_cfmws cfmws;
148		u32 target[2];
149	} cfmws7;
150	struct {
151		struct acpi_cedt_cfmws cfmws;
152		u32 target[4];
153	} cfmws8;
154	struct {
155		struct acpi_cedt_cxims cxims;
156		u64 xormap_list[2];
157	} cxims0;
158} __packed mock_cedt = {
159	.cedt = {
160		.header = {
161			.signature = "CEDT",
162			.length = sizeof(mock_cedt),
163			.revision = 1,
164		},
165	},
166	.chbs[0] = {
167		.header = {
168			.type = ACPI_CEDT_TYPE_CHBS,
169			.length = sizeof(mock_cedt.chbs[0]),
170		},
171		.uid = 0,
172		.cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20,
173	},
174	.chbs[1] = {
175		.header = {
176			.type = ACPI_CEDT_TYPE_CHBS,
177			.length = sizeof(mock_cedt.chbs[0]),
178		},
179		.uid = 1,
180		.cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20,
181	},
182	.chbs[2] = {
183		.header = {
184			.type = ACPI_CEDT_TYPE_CHBS,
185			.length = sizeof(mock_cedt.chbs[0]),
186		},
187		.uid = 2,
188		.cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20,
189	},
190	.chbs[3] = {
191		.header = {
192			.type = ACPI_CEDT_TYPE_CHBS,
193			.length = sizeof(mock_cedt.chbs[0]),
194		},
195		.uid = 3,
196		.cxl_version = ACPI_CEDT_CHBS_VERSION_CXL11,
197	},
198	.cfmws0 = {
199		.cfmws = {
200			.header = {
201				.type = ACPI_CEDT_TYPE_CFMWS,
202				.length = sizeof(mock_cedt.cfmws0),
203			},
204			.interleave_ways = 0,
205			.granularity = 4,
206			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
207					ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
208			.qtg_id = 0,
209			.window_size = SZ_256M * 4UL,
210		},
211		.target = { 0 },
212	},
213	.cfmws1 = {
214		.cfmws = {
215			.header = {
216				.type = ACPI_CEDT_TYPE_CFMWS,
217				.length = sizeof(mock_cedt.cfmws1),
218			},
219			.interleave_ways = 1,
220			.granularity = 4,
221			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
222					ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
223			.qtg_id = 1,
224			.window_size = SZ_256M * 8UL,
225		},
226		.target = { 0, 1, },
227	},
228	.cfmws2 = {
229		.cfmws = {
230			.header = {
231				.type = ACPI_CEDT_TYPE_CFMWS,
232				.length = sizeof(mock_cedt.cfmws2),
233			},
234			.interleave_ways = 0,
235			.granularity = 4,
236			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
237					ACPI_CEDT_CFMWS_RESTRICT_PMEM,
238			.qtg_id = 2,
239			.window_size = SZ_256M * 4UL,
240		},
241		.target = { 0 },
242	},
243	.cfmws3 = {
244		.cfmws = {
245			.header = {
246				.type = ACPI_CEDT_TYPE_CFMWS,
247				.length = sizeof(mock_cedt.cfmws3),
248			},
249			.interleave_ways = 1,
250			.granularity = 4,
251			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
252					ACPI_CEDT_CFMWS_RESTRICT_PMEM,
253			.qtg_id = 3,
254			.window_size = SZ_256M * 8UL,
255		},
256		.target = { 0, 1, },
257	},
258	.cfmws4 = {
259		.cfmws = {
260			.header = {
261				.type = ACPI_CEDT_TYPE_CFMWS,
262				.length = sizeof(mock_cedt.cfmws4),
263			},
264			.interleave_ways = 0,
265			.granularity = 4,
266			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
267					ACPI_CEDT_CFMWS_RESTRICT_PMEM,
268			.qtg_id = 4,
269			.window_size = SZ_256M * 4UL,
270		},
271		.target = { 2 },
272	},
273	.cfmws5 = {
274		.cfmws = {
275			.header = {
276				.type = ACPI_CEDT_TYPE_CFMWS,
277				.length = sizeof(mock_cedt.cfmws5),
278			},
279			.interleave_ways = 0,
280			.granularity = 4,
281			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
282					ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
283			.qtg_id = 5,
284			.window_size = SZ_256M,
285		},
286		.target = { 3 },
287	},
288	/* .cfmws6,7,8 use ACPI_CEDT_CFMWS_ARITHMETIC_XOR */
289	.cfmws6 = {
290		.cfmws = {
291			.header = {
292				.type = ACPI_CEDT_TYPE_CFMWS,
293				.length = sizeof(mock_cedt.cfmws6),
294			},
295			.interleave_arithmetic = ACPI_CEDT_CFMWS_ARITHMETIC_XOR,
296			.interleave_ways = 0,
297			.granularity = 4,
298			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
299					ACPI_CEDT_CFMWS_RESTRICT_PMEM,
300			.qtg_id = 0,
301			.window_size = SZ_256M * 8UL,
302		},
303		.target = { 0, },
304	},
305	.cfmws7 = {
306		.cfmws = {
307			.header = {
308				.type = ACPI_CEDT_TYPE_CFMWS,
309				.length = sizeof(mock_cedt.cfmws7),
310			},
311			.interleave_arithmetic = ACPI_CEDT_CFMWS_ARITHMETIC_XOR,
312			.interleave_ways = 1,
313			.granularity = 0,
314			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
315					ACPI_CEDT_CFMWS_RESTRICT_PMEM,
316			.qtg_id = 1,
317			.window_size = SZ_256M * 8UL,
318		},
319		.target = { 0, 1, },
320	},
321	.cfmws8 = {
322		.cfmws = {
323			.header = {
324				.type = ACPI_CEDT_TYPE_CFMWS,
325				.length = sizeof(mock_cedt.cfmws8),
326			},
327			.interleave_arithmetic = ACPI_CEDT_CFMWS_ARITHMETIC_XOR,
328			.interleave_ways = 2,
329			.granularity = 0,
330			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
331					ACPI_CEDT_CFMWS_RESTRICT_PMEM,
332			.qtg_id = 0,
333			.window_size = SZ_256M * 16UL,
334		},
335		.target = { 0, 1, 0, 1, },
336	},
337	.cxims0 = {
338		.cxims = {
339			.header = {
340				.type = ACPI_CEDT_TYPE_CXIMS,
341				.length = sizeof(mock_cedt.cxims0),
342			},
343			.hbig = 0,
344			.nr_xormaps = 2,
345		},
346		.xormap_list = { 0x404100, 0x808200, },
347	},
348};
349
350struct acpi_cedt_cfmws *mock_cfmws[] = {
351	[0] = &mock_cedt.cfmws0.cfmws,
352	[1] = &mock_cedt.cfmws1.cfmws,
353	[2] = &mock_cedt.cfmws2.cfmws,
354	[3] = &mock_cedt.cfmws3.cfmws,
355	[4] = &mock_cedt.cfmws4.cfmws,
356	[5] = &mock_cedt.cfmws5.cfmws,
357	/* Modulo Math above, XOR Math below */
358	[6] = &mock_cedt.cfmws6.cfmws,
359	[7] = &mock_cedt.cfmws7.cfmws,
360	[8] = &mock_cedt.cfmws8.cfmws,
361};
362
363static int cfmws_start;
364static int cfmws_end;
365#define CFMWS_MOD_ARRAY_START 0
366#define CFMWS_MOD_ARRAY_END   5
367#define CFMWS_XOR_ARRAY_START 6
368#define CFMWS_XOR_ARRAY_END   8
369
370struct acpi_cedt_cxims *mock_cxims[1] = {
371	[0] = &mock_cedt.cxims0.cxims,
372};
373
374struct cxl_mock_res {
375	struct list_head list;
376	struct range range;
377};
378
379static LIST_HEAD(mock_res);
380static DEFINE_MUTEX(mock_res_lock);
381static struct gen_pool *cxl_mock_pool;
382
383static void depopulate_all_mock_resources(void)
384{
385	struct cxl_mock_res *res, *_res;
386
387	mutex_lock(&mock_res_lock);
388	list_for_each_entry_safe(res, _res, &mock_res, list) {
389		gen_pool_free(cxl_mock_pool, res->range.start,
390			      range_len(&res->range));
391		list_del(&res->list);
392		kfree(res);
393	}
394	mutex_unlock(&mock_res_lock);
395}
396
397static struct cxl_mock_res *alloc_mock_res(resource_size_t size, int align)
398{
399	struct cxl_mock_res *res = kzalloc(sizeof(*res), GFP_KERNEL);
400	struct genpool_data_align data = {
401		.align = align,
402	};
403	unsigned long phys;
404
405	INIT_LIST_HEAD(&res->list);
406	phys = gen_pool_alloc_algo(cxl_mock_pool, size,
407				   gen_pool_first_fit_align, &data);
408	if (!phys)
409		return NULL;
410
411	res->range = (struct range) {
412		.start = phys,
413		.end = phys + size - 1,
414	};
415	mutex_lock(&mock_res_lock);
416	list_add(&res->list, &mock_res);
417	mutex_unlock(&mock_res_lock);
418
419	return res;
420}
421
422static int populate_cedt(void)
423{
424	struct cxl_mock_res *res;
425	int i;
426
427	for (i = 0; i < ARRAY_SIZE(mock_cedt.chbs); i++) {
428		struct acpi_cedt_chbs *chbs = &mock_cedt.chbs[i];
429		resource_size_t size;
430
431		if (chbs->cxl_version == ACPI_CEDT_CHBS_VERSION_CXL20)
432			size = ACPI_CEDT_CHBS_LENGTH_CXL20;
433		else
434			size = ACPI_CEDT_CHBS_LENGTH_CXL11;
435
436		res = alloc_mock_res(size, size);
437		if (!res)
438			return -ENOMEM;
439		chbs->base = res->range.start;
440		chbs->length = size;
441	}
442
443	for (i = cfmws_start; i <= cfmws_end; i++) {
444		struct acpi_cedt_cfmws *window = mock_cfmws[i];
445
446		res = alloc_mock_res(window->window_size, SZ_256M);
447		if (!res)
448			return -ENOMEM;
449		window->base_hpa = res->range.start;
450	}
451
452	return 0;
453}
454
455static bool is_mock_port(struct device *dev);
456
457/*
458 * WARNING, this hack assumes the format of 'struct cxl_cfmws_context'
459 * and 'struct cxl_chbs_context' share the property that the first
460 * struct member is a cxl_test device being probed by the cxl_acpi
461 * driver.
462 */
463struct cxl_cedt_context {
464	struct device *dev;
465};
466
467static int mock_acpi_table_parse_cedt(enum acpi_cedt_type id,
468				      acpi_tbl_entry_handler_arg handler_arg,
469				      void *arg)
470{
471	struct cxl_cedt_context *ctx = arg;
472	struct device *dev = ctx->dev;
473	union acpi_subtable_headers *h;
474	unsigned long end;
475	int i;
476
477	if (!is_mock_port(dev) && !is_mock_dev(dev))
478		return acpi_table_parse_cedt(id, handler_arg, arg);
479
480	if (id == ACPI_CEDT_TYPE_CHBS)
481		for (i = 0; i < ARRAY_SIZE(mock_cedt.chbs); i++) {
482			h = (union acpi_subtable_headers *)&mock_cedt.chbs[i];
483			end = (unsigned long)&mock_cedt.chbs[i + 1];
484			handler_arg(h, arg, end);
485		}
486
487	if (id == ACPI_CEDT_TYPE_CFMWS)
488		for (i = cfmws_start; i <= cfmws_end; i++) {
489			h = (union acpi_subtable_headers *) mock_cfmws[i];
490			end = (unsigned long) h + mock_cfmws[i]->header.length;
491			handler_arg(h, arg, end);
492		}
493
494	if (id == ACPI_CEDT_TYPE_CXIMS)
495		for (i = 0; i < ARRAY_SIZE(mock_cxims); i++) {
496			h = (union acpi_subtable_headers *)mock_cxims[i];
497			end = (unsigned long)h + mock_cxims[i]->header.length;
498			handler_arg(h, arg, end);
499		}
500
501	return 0;
502}
503
504static bool is_mock_bridge(struct device *dev)
505{
506	int i;
507
508	for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++)
509		if (dev == &cxl_host_bridge[i]->dev)
510			return true;
511	for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++)
512		if (dev == &cxl_hb_single[i]->dev)
513			return true;
514	for (i = 0; i < ARRAY_SIZE(cxl_rch); i++)
515		if (dev == &cxl_rch[i]->dev)
516			return true;
517
518	return false;
519}
520
521static bool is_mock_port(struct device *dev)
522{
523	int i;
524
525	if (is_mock_bridge(dev))
526		return true;
527
528	for (i = 0; i < ARRAY_SIZE(cxl_root_port); i++)
529		if (dev == &cxl_root_port[i]->dev)
530			return true;
531
532	for (i = 0; i < ARRAY_SIZE(cxl_switch_uport); i++)
533		if (dev == &cxl_switch_uport[i]->dev)
534			return true;
535
536	for (i = 0; i < ARRAY_SIZE(cxl_switch_dport); i++)
537		if (dev == &cxl_switch_dport[i]->dev)
538			return true;
539
540	for (i = 0; i < ARRAY_SIZE(cxl_root_single); i++)
541		if (dev == &cxl_root_single[i]->dev)
542			return true;
543
544	for (i = 0; i < ARRAY_SIZE(cxl_swu_single); i++)
545		if (dev == &cxl_swu_single[i]->dev)
546			return true;
547
548	for (i = 0; i < ARRAY_SIZE(cxl_swd_single); i++)
549		if (dev == &cxl_swd_single[i]->dev)
550			return true;
551
552	if (is_cxl_memdev(dev))
553		return is_mock_dev(dev->parent);
554
555	return false;
556}
557
558static int host_bridge_index(struct acpi_device *adev)
559{
560	return adev - host_bridge;
561}
562
563static struct acpi_device *find_host_bridge(acpi_handle handle)
564{
565	int i;
566
567	for (i = 0; i < ARRAY_SIZE(host_bridge); i++)
568		if (handle == host_bridge[i].handle)
569			return &host_bridge[i];
570	return NULL;
571}
572
573static acpi_status
574mock_acpi_evaluate_integer(acpi_handle handle, acpi_string pathname,
575			   struct acpi_object_list *arguments,
576			   unsigned long long *data)
577{
578	struct acpi_device *adev = find_host_bridge(handle);
579
580	if (!adev || strcmp(pathname, METHOD_NAME__UID) != 0)
581		return acpi_evaluate_integer(handle, pathname, arguments, data);
582
583	*data = host_bridge_index(adev);
584	return AE_OK;
585}
586
587static struct pci_bus mock_pci_bus[NR_BRIDGES];
588static struct acpi_pci_root mock_pci_root[ARRAY_SIZE(mock_pci_bus)] = {
589	[0] = {
590		.bus = &mock_pci_bus[0],
591	},
592	[1] = {
593		.bus = &mock_pci_bus[1],
594	},
595	[2] = {
596		.bus = &mock_pci_bus[2],
597	},
598	[3] = {
599		.bus = &mock_pci_bus[3],
600	},
601
602};
603
604static bool is_mock_bus(struct pci_bus *bus)
605{
606	int i;
607
608	for (i = 0; i < ARRAY_SIZE(mock_pci_bus); i++)
609		if (bus == &mock_pci_bus[i])
610			return true;
611	return false;
612}
613
614static struct acpi_pci_root *mock_acpi_pci_find_root(acpi_handle handle)
615{
616	struct acpi_device *adev = find_host_bridge(handle);
617
618	if (!adev)
619		return acpi_pci_find_root(handle);
620	return &mock_pci_root[host_bridge_index(adev)];
621}
622
623static struct cxl_hdm *mock_cxl_setup_hdm(struct cxl_port *port,
624					  struct cxl_endpoint_dvsec_info *info)
625{
626	struct cxl_hdm *cxlhdm = devm_kzalloc(&port->dev, sizeof(*cxlhdm), GFP_KERNEL);
627
628	if (!cxlhdm)
629		return ERR_PTR(-ENOMEM);
630
631	cxlhdm->port = port;
632	return cxlhdm;
633}
634
635static int mock_cxl_add_passthrough_decoder(struct cxl_port *port)
636{
637	dev_err(&port->dev, "unexpected passthrough decoder for cxl_test\n");
638	return -EOPNOTSUPP;
639}
640
641
642struct target_map_ctx {
643	int *target_map;
644	int index;
645	int target_count;
646};
647
648static int map_targets(struct device *dev, void *data)
649{
650	struct platform_device *pdev = to_platform_device(dev);
651	struct target_map_ctx *ctx = data;
652
653	ctx->target_map[ctx->index++] = pdev->id;
654
655	if (ctx->index > ctx->target_count) {
656		dev_WARN_ONCE(dev, 1, "too many targets found?\n");
657		return -ENXIO;
658	}
659
660	return 0;
661}
662
663static int mock_decoder_commit(struct cxl_decoder *cxld)
664{
665	struct cxl_port *port = to_cxl_port(cxld->dev.parent);
666	int id = cxld->id;
667
668	if (cxld->flags & CXL_DECODER_F_ENABLE)
669		return 0;
670
671	dev_dbg(&port->dev, "%s commit\n", dev_name(&cxld->dev));
672	if (cxl_num_decoders_committed(port) != id) {
673		dev_dbg(&port->dev,
674			"%s: out of order commit, expected decoder%d.%d\n",
675			dev_name(&cxld->dev), port->id,
676			cxl_num_decoders_committed(port));
677		return -EBUSY;
678	}
679
680	port->commit_end++;
681	cxld->flags |= CXL_DECODER_F_ENABLE;
682
683	return 0;
684}
685
686static int mock_decoder_reset(struct cxl_decoder *cxld)
687{
688	struct cxl_port *port = to_cxl_port(cxld->dev.parent);
689	int id = cxld->id;
690
691	if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
692		return 0;
693
694	dev_dbg(&port->dev, "%s reset\n", dev_name(&cxld->dev));
695	if (port->commit_end != id) {
696		dev_dbg(&port->dev,
697			"%s: out of order reset, expected decoder%d.%d\n",
698			dev_name(&cxld->dev), port->id, port->commit_end);
699		return -EBUSY;
700	}
701
702	port->commit_end--;
703	cxld->flags &= ~CXL_DECODER_F_ENABLE;
704
705	return 0;
706}
707
708static void default_mock_decoder(struct cxl_decoder *cxld)
709{
710	cxld->hpa_range = (struct range){
711		.start = 0,
712		.end = -1,
713	};
714
715	cxld->interleave_ways = 1;
716	cxld->interleave_granularity = 256;
717	cxld->target_type = CXL_DECODER_HOSTONLYMEM;
718	cxld->commit = mock_decoder_commit;
719	cxld->reset = mock_decoder_reset;
720}
721
722static int first_decoder(struct device *dev, void *data)
723{
724	struct cxl_decoder *cxld;
725
726	if (!is_switch_decoder(dev))
727		return 0;
728	cxld = to_cxl_decoder(dev);
729	if (cxld->id == 0)
730		return 1;
731	return 0;
732}
733
734static void mock_init_hdm_decoder(struct cxl_decoder *cxld)
735{
736	struct acpi_cedt_cfmws *window = mock_cfmws[0];
737	struct platform_device *pdev = NULL;
738	struct cxl_endpoint_decoder *cxled;
739	struct cxl_switch_decoder *cxlsd;
740	struct cxl_port *port, *iter;
741	const int size = SZ_512M;
742	struct cxl_memdev *cxlmd;
743	struct cxl_dport *dport;
744	struct device *dev;
745	bool hb0 = false;
746	u64 base;
747	int i;
748
749	if (is_endpoint_decoder(&cxld->dev)) {
750		cxled = to_cxl_endpoint_decoder(&cxld->dev);
751		cxlmd = cxled_to_memdev(cxled);
752		WARN_ON(!dev_is_platform(cxlmd->dev.parent));
753		pdev = to_platform_device(cxlmd->dev.parent);
754
755		/* check is endpoint is attach to host-bridge0 */
756		port = cxled_to_port(cxled);
757		do {
758			if (port->uport_dev == &cxl_host_bridge[0]->dev) {
759				hb0 = true;
760				break;
761			}
762			if (is_cxl_port(port->dev.parent))
763				port = to_cxl_port(port->dev.parent);
764			else
765				port = NULL;
766		} while (port);
767		port = cxled_to_port(cxled);
768	}
769
770	/*
771	 * The first decoder on the first 2 devices on the first switch
772	 * attached to host-bridge0 mock a fake / static RAM region. All
773	 * other decoders are default disabled. Given the round robin
774	 * assignment those devices are named cxl_mem.0, and cxl_mem.4.
775	 *
776	 * See 'cxl list -BMPu -m cxl_mem.0,cxl_mem.4'
777	 */
778	if (!hb0 || pdev->id % 4 || pdev->id > 4 || cxld->id > 0) {
779		default_mock_decoder(cxld);
780		return;
781	}
782
783	base = window->base_hpa;
784	cxld->hpa_range = (struct range) {
785		.start = base,
786		.end = base + size - 1,
787	};
788
789	cxld->interleave_ways = 2;
790	eig_to_granularity(window->granularity, &cxld->interleave_granularity);
791	cxld->target_type = CXL_DECODER_HOSTONLYMEM;
792	cxld->flags = CXL_DECODER_F_ENABLE;
793	cxled->state = CXL_DECODER_STATE_AUTO;
794	port->commit_end = cxld->id;
795	devm_cxl_dpa_reserve(cxled, 0, size / cxld->interleave_ways, 0);
796	cxld->commit = mock_decoder_commit;
797	cxld->reset = mock_decoder_reset;
798
799	/*
800	 * Now that endpoint decoder is set up, walk up the hierarchy
801	 * and setup the switch and root port decoders targeting @cxlmd.
802	 */
803	iter = port;
804	for (i = 0; i < 2; i++) {
805		dport = iter->parent_dport;
806		iter = dport->port;
807		dev = device_find_child(&iter->dev, NULL, first_decoder);
808		/*
809		 * Ancestor ports are guaranteed to be enumerated before
810		 * @port, and all ports have at least one decoder.
811		 */
812		if (WARN_ON(!dev))
813			continue;
814		cxlsd = to_cxl_switch_decoder(dev);
815		if (i == 0) {
816			/* put cxl_mem.4 second in the decode order */
817			if (pdev->id == 4)
818				cxlsd->target[1] = dport;
819			else
820				cxlsd->target[0] = dport;
821		} else
822			cxlsd->target[0] = dport;
823		cxld = &cxlsd->cxld;
824		cxld->target_type = CXL_DECODER_HOSTONLYMEM;
825		cxld->flags = CXL_DECODER_F_ENABLE;
826		iter->commit_end = 0;
827		/*
828		 * Switch targets 2 endpoints, while host bridge targets
829		 * one root port
830		 */
831		if (i == 0)
832			cxld->interleave_ways = 2;
833		else
834			cxld->interleave_ways = 1;
835		cxld->interleave_granularity = 4096;
836		cxld->hpa_range = (struct range) {
837			.start = base,
838			.end = base + size - 1,
839		};
840		put_device(dev);
841	}
842}
843
844static int mock_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
845				       struct cxl_endpoint_dvsec_info *info)
846{
847	struct cxl_port *port = cxlhdm->port;
848	struct cxl_port *parent_port = to_cxl_port(port->dev.parent);
849	int target_count, i;
850
851	if (is_cxl_endpoint(port))
852		target_count = 0;
853	else if (is_cxl_root(parent_port))
854		target_count = NR_CXL_ROOT_PORTS;
855	else
856		target_count = NR_CXL_SWITCH_PORTS;
857
858	for (i = 0; i < NR_CXL_PORT_DECODERS; i++) {
859		int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 };
860		struct target_map_ctx ctx = {
861			.target_map = target_map,
862			.target_count = target_count,
863		};
864		struct cxl_decoder *cxld;
865		int rc;
866
867		if (target_count) {
868			struct cxl_switch_decoder *cxlsd;
869
870			cxlsd = cxl_switch_decoder_alloc(port, target_count);
871			if (IS_ERR(cxlsd)) {
872				dev_warn(&port->dev,
873					 "Failed to allocate the decoder\n");
874				return PTR_ERR(cxlsd);
875			}
876			cxld = &cxlsd->cxld;
877		} else {
878			struct cxl_endpoint_decoder *cxled;
879
880			cxled = cxl_endpoint_decoder_alloc(port);
881
882			if (IS_ERR(cxled)) {
883				dev_warn(&port->dev,
884					 "Failed to allocate the decoder\n");
885				return PTR_ERR(cxled);
886			}
887			cxld = &cxled->cxld;
888		}
889
890		mock_init_hdm_decoder(cxld);
891
892		if (target_count) {
893			rc = device_for_each_child(port->uport_dev, &ctx,
894						   map_targets);
895			if (rc) {
896				put_device(&cxld->dev);
897				return rc;
898			}
899		}
900
901		rc = cxl_decoder_add_locked(cxld, target_map);
902		if (rc) {
903			put_device(&cxld->dev);
904			dev_err(&port->dev, "Failed to add decoder\n");
905			return rc;
906		}
907
908		rc = cxl_decoder_autoremove(&port->dev, cxld);
909		if (rc)
910			return rc;
911		dev_dbg(&cxld->dev, "Added to port %s\n", dev_name(&port->dev));
912	}
913
914	return 0;
915}
916
917static int mock_cxl_port_enumerate_dports(struct cxl_port *port)
918{
919	struct platform_device **array;
920	int i, array_size;
921
922	if (port->depth == 1) {
923		if (is_multi_bridge(port->uport_dev)) {
924			array_size = ARRAY_SIZE(cxl_root_port);
925			array = cxl_root_port;
926		} else if (is_single_bridge(port->uport_dev)) {
927			array_size = ARRAY_SIZE(cxl_root_single);
928			array = cxl_root_single;
929		} else {
930			dev_dbg(&port->dev, "%s: unknown bridge type\n",
931				dev_name(port->uport_dev));
932			return -ENXIO;
933		}
934	} else if (port->depth == 2) {
935		struct cxl_port *parent = to_cxl_port(port->dev.parent);
936
937		if (is_multi_bridge(parent->uport_dev)) {
938			array_size = ARRAY_SIZE(cxl_switch_dport);
939			array = cxl_switch_dport;
940		} else if (is_single_bridge(parent->uport_dev)) {
941			array_size = ARRAY_SIZE(cxl_swd_single);
942			array = cxl_swd_single;
943		} else {
944			dev_dbg(&port->dev, "%s: unknown bridge type\n",
945				dev_name(port->uport_dev));
946			return -ENXIO;
947		}
948	} else {
949		dev_WARN_ONCE(&port->dev, 1, "unexpected depth %d\n",
950			      port->depth);
951		return -ENXIO;
952	}
953
954	for (i = 0; i < array_size; i++) {
955		struct platform_device *pdev = array[i];
956		struct cxl_dport *dport;
957
958		if (pdev->dev.parent != port->uport_dev) {
959			dev_dbg(&port->dev, "%s: mismatch parent %s\n",
960				dev_name(port->uport_dev),
961				dev_name(pdev->dev.parent));
962			continue;
963		}
964
965		dport = devm_cxl_add_dport(port, &pdev->dev, pdev->id,
966					   CXL_RESOURCE_NONE);
967
968		if (IS_ERR(dport))
969			return PTR_ERR(dport);
970	}
971
972	return 0;
973}
974
975static struct cxl_mock_ops cxl_mock_ops = {
976	.is_mock_adev = is_mock_adev,
977	.is_mock_bridge = is_mock_bridge,
978	.is_mock_bus = is_mock_bus,
979	.is_mock_port = is_mock_port,
980	.is_mock_dev = is_mock_dev,
981	.acpi_table_parse_cedt = mock_acpi_table_parse_cedt,
982	.acpi_evaluate_integer = mock_acpi_evaluate_integer,
983	.acpi_pci_find_root = mock_acpi_pci_find_root,
984	.devm_cxl_port_enumerate_dports = mock_cxl_port_enumerate_dports,
985	.devm_cxl_setup_hdm = mock_cxl_setup_hdm,
986	.devm_cxl_add_passthrough_decoder = mock_cxl_add_passthrough_decoder,
987	.devm_cxl_enumerate_decoders = mock_cxl_enumerate_decoders,
988	.list = LIST_HEAD_INIT(cxl_mock_ops.list),
989};
990
991static void mock_companion(struct acpi_device *adev, struct device *dev)
992{
993	device_initialize(&adev->dev);
994	fwnode_init(&adev->fwnode, NULL);
995	dev->fwnode = &adev->fwnode;
996	adev->fwnode.dev = dev;
997}
998
999#ifndef SZ_64G
1000#define SZ_64G (SZ_32G * 2)
1001#endif
1002
1003static __init int cxl_rch_init(void)
1004{
1005	int rc, i;
1006
1007	for (i = 0; i < ARRAY_SIZE(cxl_rch); i++) {
1008		int idx = NR_CXL_HOST_BRIDGES + NR_CXL_SINGLE_HOST + i;
1009		struct acpi_device *adev = &host_bridge[idx];
1010		struct platform_device *pdev;
1011
1012		pdev = platform_device_alloc("cxl_host_bridge", idx);
1013		if (!pdev)
1014			goto err_bridge;
1015
1016		mock_companion(adev, &pdev->dev);
1017		rc = platform_device_add(pdev);
1018		if (rc) {
1019			platform_device_put(pdev);
1020			goto err_bridge;
1021		}
1022
1023		cxl_rch[i] = pdev;
1024		mock_pci_bus[idx].bridge = &pdev->dev;
1025		rc = sysfs_create_link(&pdev->dev.kobj, &pdev->dev.kobj,
1026				       "firmware_node");
1027		if (rc)
1028			goto err_bridge;
1029	}
1030
1031	for (i = 0; i < ARRAY_SIZE(cxl_rcd); i++) {
1032		int idx = NR_MEM_MULTI + NR_MEM_SINGLE + i;
1033		struct platform_device *rch = cxl_rch[i];
1034		struct platform_device *pdev;
1035
1036		pdev = platform_device_alloc("cxl_rcd", idx);
1037		if (!pdev)
1038			goto err_mem;
1039		pdev->dev.parent = &rch->dev;
1040		set_dev_node(&pdev->dev, i % 2);
1041
1042		rc = platform_device_add(pdev);
1043		if (rc) {
1044			platform_device_put(pdev);
1045			goto err_mem;
1046		}
1047		cxl_rcd[i] = pdev;
1048	}
1049
1050	return 0;
1051
1052err_mem:
1053	for (i = ARRAY_SIZE(cxl_rcd) - 1; i >= 0; i--)
1054		platform_device_unregister(cxl_rcd[i]);
1055err_bridge:
1056	for (i = ARRAY_SIZE(cxl_rch) - 1; i >= 0; i--) {
1057		struct platform_device *pdev = cxl_rch[i];
1058
1059		if (!pdev)
1060			continue;
1061		sysfs_remove_link(&pdev->dev.kobj, "firmware_node");
1062		platform_device_unregister(cxl_rch[i]);
1063	}
1064
1065	return rc;
1066}
1067
1068static void cxl_rch_exit(void)
1069{
1070	int i;
1071
1072	for (i = ARRAY_SIZE(cxl_rcd) - 1; i >= 0; i--)
1073		platform_device_unregister(cxl_rcd[i]);
1074	for (i = ARRAY_SIZE(cxl_rch) - 1; i >= 0; i--) {
1075		struct platform_device *pdev = cxl_rch[i];
1076
1077		if (!pdev)
1078			continue;
1079		sysfs_remove_link(&pdev->dev.kobj, "firmware_node");
1080		platform_device_unregister(cxl_rch[i]);
1081	}
1082}
1083
1084static __init int cxl_single_init(void)
1085{
1086	int i, rc;
1087
1088	for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++) {
1089		struct acpi_device *adev =
1090			&host_bridge[NR_CXL_HOST_BRIDGES + i];
1091		struct platform_device *pdev;
1092
1093		pdev = platform_device_alloc("cxl_host_bridge",
1094					     NR_CXL_HOST_BRIDGES + i);
1095		if (!pdev)
1096			goto err_bridge;
1097
1098		mock_companion(adev, &pdev->dev);
1099		rc = platform_device_add(pdev);
1100		if (rc) {
1101			platform_device_put(pdev);
1102			goto err_bridge;
1103		}
1104
1105		cxl_hb_single[i] = pdev;
1106		mock_pci_bus[i + NR_CXL_HOST_BRIDGES].bridge = &pdev->dev;
1107		rc = sysfs_create_link(&pdev->dev.kobj, &pdev->dev.kobj,
1108				       "physical_node");
1109		if (rc)
1110			goto err_bridge;
1111	}
1112
1113	for (i = 0; i < ARRAY_SIZE(cxl_root_single); i++) {
1114		struct platform_device *bridge =
1115			cxl_hb_single[i % ARRAY_SIZE(cxl_hb_single)];
1116		struct platform_device *pdev;
1117
1118		pdev = platform_device_alloc("cxl_root_port",
1119					     NR_MULTI_ROOT + i);
1120		if (!pdev)
1121			goto err_port;
1122		pdev->dev.parent = &bridge->dev;
1123
1124		rc = platform_device_add(pdev);
1125		if (rc) {
1126			platform_device_put(pdev);
1127			goto err_port;
1128		}
1129		cxl_root_single[i] = pdev;
1130	}
1131
1132	for (i = 0; i < ARRAY_SIZE(cxl_swu_single); i++) {
1133		struct platform_device *root_port = cxl_root_single[i];
1134		struct platform_device *pdev;
1135
1136		pdev = platform_device_alloc("cxl_switch_uport",
1137					     NR_MULTI_ROOT + i);
1138		if (!pdev)
1139			goto err_uport;
1140		pdev->dev.parent = &root_port->dev;
1141
1142		rc = platform_device_add(pdev);
1143		if (rc) {
1144			platform_device_put(pdev);
1145			goto err_uport;
1146		}
1147		cxl_swu_single[i] = pdev;
1148	}
1149
1150	for (i = 0; i < ARRAY_SIZE(cxl_swd_single); i++) {
1151		struct platform_device *uport =
1152			cxl_swu_single[i % ARRAY_SIZE(cxl_swu_single)];
1153		struct platform_device *pdev;
1154
1155		pdev = platform_device_alloc("cxl_switch_dport",
1156					     i + NR_MEM_MULTI);
1157		if (!pdev)
1158			goto err_dport;
1159		pdev->dev.parent = &uport->dev;
1160
1161		rc = platform_device_add(pdev);
1162		if (rc) {
1163			platform_device_put(pdev);
1164			goto err_dport;
1165		}
1166		cxl_swd_single[i] = pdev;
1167	}
1168
1169	for (i = 0; i < ARRAY_SIZE(cxl_mem_single); i++) {
1170		struct platform_device *dport = cxl_swd_single[i];
1171		struct platform_device *pdev;
1172
1173		pdev = platform_device_alloc("cxl_mem", NR_MEM_MULTI + i);
1174		if (!pdev)
1175			goto err_mem;
1176		pdev->dev.parent = &dport->dev;
1177		set_dev_node(&pdev->dev, i % 2);
1178
1179		rc = platform_device_add(pdev);
1180		if (rc) {
1181			platform_device_put(pdev);
1182			goto err_mem;
1183		}
1184		cxl_mem_single[i] = pdev;
1185	}
1186
1187	return 0;
1188
1189err_mem:
1190	for (i = ARRAY_SIZE(cxl_mem_single) - 1; i >= 0; i--)
1191		platform_device_unregister(cxl_mem_single[i]);
1192err_dport:
1193	for (i = ARRAY_SIZE(cxl_swd_single) - 1; i >= 0; i--)
1194		platform_device_unregister(cxl_swd_single[i]);
1195err_uport:
1196	for (i = ARRAY_SIZE(cxl_swu_single) - 1; i >= 0; i--)
1197		platform_device_unregister(cxl_swu_single[i]);
1198err_port:
1199	for (i = ARRAY_SIZE(cxl_root_single) - 1; i >= 0; i--)
1200		platform_device_unregister(cxl_root_single[i]);
1201err_bridge:
1202	for (i = ARRAY_SIZE(cxl_hb_single) - 1; i >= 0; i--) {
1203		struct platform_device *pdev = cxl_hb_single[i];
1204
1205		if (!pdev)
1206			continue;
1207		sysfs_remove_link(&pdev->dev.kobj, "physical_node");
1208		platform_device_unregister(cxl_hb_single[i]);
1209	}
1210
1211	return rc;
1212}
1213
1214static void cxl_single_exit(void)
1215{
1216	int i;
1217
1218	for (i = ARRAY_SIZE(cxl_mem_single) - 1; i >= 0; i--)
1219		platform_device_unregister(cxl_mem_single[i]);
1220	for (i = ARRAY_SIZE(cxl_swd_single) - 1; i >= 0; i--)
1221		platform_device_unregister(cxl_swd_single[i]);
1222	for (i = ARRAY_SIZE(cxl_swu_single) - 1; i >= 0; i--)
1223		platform_device_unregister(cxl_swu_single[i]);
1224	for (i = ARRAY_SIZE(cxl_root_single) - 1; i >= 0; i--)
1225		platform_device_unregister(cxl_root_single[i]);
1226	for (i = ARRAY_SIZE(cxl_hb_single) - 1; i >= 0; i--) {
1227		struct platform_device *pdev = cxl_hb_single[i];
1228
1229		if (!pdev)
1230			continue;
1231		sysfs_remove_link(&pdev->dev.kobj, "physical_node");
1232		platform_device_unregister(cxl_hb_single[i]);
1233	}
1234}
1235
1236static __init int cxl_test_init(void)
1237{
1238	int rc, i;
1239
1240	cxl_acpi_test();
1241	cxl_core_test();
1242	cxl_mem_test();
1243	cxl_pmem_test();
1244	cxl_port_test();
1245
1246	register_cxl_mock_ops(&cxl_mock_ops);
1247
1248	cxl_mock_pool = gen_pool_create(ilog2(SZ_2M), NUMA_NO_NODE);
1249	if (!cxl_mock_pool) {
1250		rc = -ENOMEM;
1251		goto err_gen_pool_create;
1252	}
1253
1254	rc = gen_pool_add(cxl_mock_pool, iomem_resource.end + 1 - SZ_64G,
1255			  SZ_64G, NUMA_NO_NODE);
1256	if (rc)
1257		goto err_gen_pool_add;
1258
1259	if (interleave_arithmetic == 1) {
1260		cfmws_start = CFMWS_XOR_ARRAY_START;
1261		cfmws_end = CFMWS_XOR_ARRAY_END;
1262	} else {
1263		cfmws_start = CFMWS_MOD_ARRAY_START;
1264		cfmws_end = CFMWS_MOD_ARRAY_END;
1265	}
1266
1267	rc = populate_cedt();
1268	if (rc)
1269		goto err_populate;
1270
1271	for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++) {
1272		struct acpi_device *adev = &host_bridge[i];
1273		struct platform_device *pdev;
1274
1275		pdev = platform_device_alloc("cxl_host_bridge", i);
1276		if (!pdev)
1277			goto err_bridge;
1278
1279		mock_companion(adev, &pdev->dev);
1280		rc = platform_device_add(pdev);
1281		if (rc) {
1282			platform_device_put(pdev);
1283			goto err_bridge;
1284		}
1285
1286		cxl_host_bridge[i] = pdev;
1287		mock_pci_bus[i].bridge = &pdev->dev;
1288		rc = sysfs_create_link(&pdev->dev.kobj, &pdev->dev.kobj,
1289				       "physical_node");
1290		if (rc)
1291			goto err_bridge;
1292	}
1293
1294	for (i = 0; i < ARRAY_SIZE(cxl_root_port); i++) {
1295		struct platform_device *bridge =
1296			cxl_host_bridge[i % ARRAY_SIZE(cxl_host_bridge)];
1297		struct platform_device *pdev;
1298
1299		pdev = platform_device_alloc("cxl_root_port", i);
1300		if (!pdev)
1301			goto err_port;
1302		pdev->dev.parent = &bridge->dev;
1303
1304		rc = platform_device_add(pdev);
1305		if (rc) {
1306			platform_device_put(pdev);
1307			goto err_port;
1308		}
1309		cxl_root_port[i] = pdev;
1310	}
1311
1312	BUILD_BUG_ON(ARRAY_SIZE(cxl_switch_uport) != ARRAY_SIZE(cxl_root_port));
1313	for (i = 0; i < ARRAY_SIZE(cxl_switch_uport); i++) {
1314		struct platform_device *root_port = cxl_root_port[i];
1315		struct platform_device *pdev;
1316
1317		pdev = platform_device_alloc("cxl_switch_uport", i);
1318		if (!pdev)
1319			goto err_uport;
1320		pdev->dev.parent = &root_port->dev;
1321
1322		rc = platform_device_add(pdev);
1323		if (rc) {
1324			platform_device_put(pdev);
1325			goto err_uport;
1326		}
1327		cxl_switch_uport[i] = pdev;
1328	}
1329
1330	for (i = 0; i < ARRAY_SIZE(cxl_switch_dport); i++) {
1331		struct platform_device *uport =
1332			cxl_switch_uport[i % ARRAY_SIZE(cxl_switch_uport)];
1333		struct platform_device *pdev;
1334
1335		pdev = platform_device_alloc("cxl_switch_dport", i);
1336		if (!pdev)
1337			goto err_dport;
1338		pdev->dev.parent = &uport->dev;
1339
1340		rc = platform_device_add(pdev);
1341		if (rc) {
1342			platform_device_put(pdev);
1343			goto err_dport;
1344		}
1345		cxl_switch_dport[i] = pdev;
1346	}
1347
1348	for (i = 0; i < ARRAY_SIZE(cxl_mem); i++) {
1349		struct platform_device *dport = cxl_switch_dport[i];
1350		struct platform_device *pdev;
1351
1352		pdev = platform_device_alloc("cxl_mem", i);
1353		if (!pdev)
1354			goto err_mem;
1355		pdev->dev.parent = &dport->dev;
1356		set_dev_node(&pdev->dev, i % 2);
1357
1358		rc = platform_device_add(pdev);
1359		if (rc) {
1360			platform_device_put(pdev);
1361			goto err_mem;
1362		}
1363		cxl_mem[i] = pdev;
1364	}
1365
1366	rc = cxl_single_init();
1367	if (rc)
1368		goto err_mem;
1369
1370	rc = cxl_rch_init();
1371	if (rc)
1372		goto err_single;
1373
1374	cxl_acpi = platform_device_alloc("cxl_acpi", 0);
1375	if (!cxl_acpi)
1376		goto err_rch;
1377
1378	mock_companion(&acpi0017_mock, &cxl_acpi->dev);
1379	acpi0017_mock.dev.bus = &platform_bus_type;
1380
1381	rc = platform_device_add(cxl_acpi);
1382	if (rc)
1383		goto err_add;
1384
1385	return 0;
1386
1387err_add:
1388	platform_device_put(cxl_acpi);
1389err_rch:
1390	cxl_rch_exit();
1391err_single:
1392	cxl_single_exit();
1393err_mem:
1394	for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--)
1395		platform_device_unregister(cxl_mem[i]);
1396err_dport:
1397	for (i = ARRAY_SIZE(cxl_switch_dport) - 1; i >= 0; i--)
1398		platform_device_unregister(cxl_switch_dport[i]);
1399err_uport:
1400	for (i = ARRAY_SIZE(cxl_switch_uport) - 1; i >= 0; i--)
1401		platform_device_unregister(cxl_switch_uport[i]);
1402err_port:
1403	for (i = ARRAY_SIZE(cxl_root_port) - 1; i >= 0; i--)
1404		platform_device_unregister(cxl_root_port[i]);
1405err_bridge:
1406	for (i = ARRAY_SIZE(cxl_host_bridge) - 1; i >= 0; i--) {
1407		struct platform_device *pdev = cxl_host_bridge[i];
1408
1409		if (!pdev)
1410			continue;
1411		sysfs_remove_link(&pdev->dev.kobj, "physical_node");
1412		platform_device_unregister(cxl_host_bridge[i]);
1413	}
1414err_populate:
1415	depopulate_all_mock_resources();
1416err_gen_pool_add:
1417	gen_pool_destroy(cxl_mock_pool);
1418err_gen_pool_create:
1419	unregister_cxl_mock_ops(&cxl_mock_ops);
1420	return rc;
1421}
1422
1423static __exit void cxl_test_exit(void)
1424{
1425	int i;
1426
1427	platform_device_unregister(cxl_acpi);
1428	cxl_rch_exit();
1429	cxl_single_exit();
1430	for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--)
1431		platform_device_unregister(cxl_mem[i]);
1432	for (i = ARRAY_SIZE(cxl_switch_dport) - 1; i >= 0; i--)
1433		platform_device_unregister(cxl_switch_dport[i]);
1434	for (i = ARRAY_SIZE(cxl_switch_uport) - 1; i >= 0; i--)
1435		platform_device_unregister(cxl_switch_uport[i]);
1436	for (i = ARRAY_SIZE(cxl_root_port) - 1; i >= 0; i--)
1437		platform_device_unregister(cxl_root_port[i]);
1438	for (i = ARRAY_SIZE(cxl_host_bridge) - 1; i >= 0; i--) {
1439		struct platform_device *pdev = cxl_host_bridge[i];
1440
1441		if (!pdev)
1442			continue;
1443		sysfs_remove_link(&pdev->dev.kobj, "physical_node");
1444		platform_device_unregister(cxl_host_bridge[i]);
1445	}
1446	depopulate_all_mock_resources();
1447	gen_pool_destroy(cxl_mock_pool);
1448	unregister_cxl_mock_ops(&cxl_mock_ops);
1449}
1450
1451module_param(interleave_arithmetic, int, 0444);
1452MODULE_PARM_DESC(interleave_arithmetic, "Modulo:0, XOR:1");
1453module_init(cxl_test_init);
1454module_exit(cxl_test_exit);
1455MODULE_LICENSE("GPL v2");
1456MODULE_IMPORT_NS(ACPI);
1457MODULE_IMPORT_NS(CXL);
1458