xref: /kernel/linux/linux-6.6/tools/testing/cxl/test/mem.c (revision 62306a36)
1// SPDX-License-Identifier: GPL-2.0-only
2// Copyright(c) 2021 Intel Corporation. All rights reserved.
3
4#include <linux/platform_device.h>
5#include <linux/mod_devicetable.h>
6#include <linux/module.h>
7#include <linux/delay.h>
8#include <linux/sizes.h>
9#include <linux/bits.h>
10#include <asm/unaligned.h>
11#include <crypto/sha2.h>
12#include <cxlmem.h>
13
14#include "trace.h"
15
16#define LSA_SIZE SZ_128K
17#define FW_SIZE SZ_64M
18#define FW_SLOTS 3
19#define DEV_SIZE SZ_2G
20#define EFFECT(x) (1U << x)
21
22#define MOCK_INJECT_DEV_MAX 8
23#define MOCK_INJECT_TEST_MAX 128
24
25static unsigned int poison_inject_dev_max = MOCK_INJECT_DEV_MAX;
26
27enum cxl_command_effects {
28	CONF_CHANGE_COLD_RESET = 0,
29	CONF_CHANGE_IMMEDIATE,
30	DATA_CHANGE_IMMEDIATE,
31	POLICY_CHANGE_IMMEDIATE,
32	LOG_CHANGE_IMMEDIATE,
33	SECURITY_CHANGE_IMMEDIATE,
34	BACKGROUND_OP,
35	SECONDARY_MBOX_SUPPORTED,
36};
37
38#define CXL_CMD_EFFECT_NONE cpu_to_le16(0)
39
40static struct cxl_cel_entry mock_cel[] = {
41	{
42		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_SUPPORTED_LOGS),
43		.effect = CXL_CMD_EFFECT_NONE,
44	},
45	{
46		.opcode = cpu_to_le16(CXL_MBOX_OP_IDENTIFY),
47		.effect = CXL_CMD_EFFECT_NONE,
48	},
49	{
50		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_LSA),
51		.effect = CXL_CMD_EFFECT_NONE,
52	},
53	{
54		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_PARTITION_INFO),
55		.effect = CXL_CMD_EFFECT_NONE,
56	},
57	{
58		.opcode = cpu_to_le16(CXL_MBOX_OP_SET_LSA),
59		.effect = cpu_to_le16(EFFECT(CONF_CHANGE_IMMEDIATE) |
60				      EFFECT(DATA_CHANGE_IMMEDIATE)),
61	},
62	{
63		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_HEALTH_INFO),
64		.effect = CXL_CMD_EFFECT_NONE,
65	},
66	{
67		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_POISON),
68		.effect = CXL_CMD_EFFECT_NONE,
69	},
70	{
71		.opcode = cpu_to_le16(CXL_MBOX_OP_INJECT_POISON),
72		.effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE)),
73	},
74	{
75		.opcode = cpu_to_le16(CXL_MBOX_OP_CLEAR_POISON),
76		.effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE)),
77	},
78	{
79		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_FW_INFO),
80		.effect = CXL_CMD_EFFECT_NONE,
81	},
82	{
83		.opcode = cpu_to_le16(CXL_MBOX_OP_TRANSFER_FW),
84		.effect = cpu_to_le16(EFFECT(CONF_CHANGE_COLD_RESET) |
85				      EFFECT(BACKGROUND_OP)),
86	},
87	{
88		.opcode = cpu_to_le16(CXL_MBOX_OP_ACTIVATE_FW),
89		.effect = cpu_to_le16(EFFECT(CONF_CHANGE_COLD_RESET) |
90				      EFFECT(CONF_CHANGE_IMMEDIATE)),
91	},
92};
93
94/* See CXL 2.0 Table 181 Get Health Info Output Payload */
95struct cxl_mbox_health_info {
96	u8 health_status;
97	u8 media_status;
98	u8 ext_status;
99	u8 life_used;
100	__le16 temperature;
101	__le32 dirty_shutdowns;
102	__le32 volatile_errors;
103	__le32 pmem_errors;
104} __packed;
105
106static struct {
107	struct cxl_mbox_get_supported_logs gsl;
108	struct cxl_gsl_entry entry;
109} mock_gsl_payload = {
110	.gsl = {
111		.entries = cpu_to_le16(1),
112	},
113	.entry = {
114		.uuid = DEFINE_CXL_CEL_UUID,
115		.size = cpu_to_le32(sizeof(mock_cel)),
116	},
117};
118
119#define PASS_TRY_LIMIT 3
120
121#define CXL_TEST_EVENT_CNT_MAX 15
122
123/* Set a number of events to return at a time for simulation.  */
124#define CXL_TEST_EVENT_CNT 3
125
126struct mock_event_log {
127	u16 clear_idx;
128	u16 cur_idx;
129	u16 nr_events;
130	u16 nr_overflow;
131	u16 overflow_reset;
132	struct cxl_event_record_raw *events[CXL_TEST_EVENT_CNT_MAX];
133};
134
135struct mock_event_store {
136	struct cxl_memdev_state *mds;
137	struct mock_event_log mock_logs[CXL_EVENT_TYPE_MAX];
138	u32 ev_status;
139};
140
141struct cxl_mockmem_data {
142	void *lsa;
143	void *fw;
144	int fw_slot;
145	int fw_staged;
146	size_t fw_size;
147	u32 security_state;
148	u8 user_pass[NVDIMM_PASSPHRASE_LEN];
149	u8 master_pass[NVDIMM_PASSPHRASE_LEN];
150	int user_limit;
151	int master_limit;
152	struct mock_event_store mes;
153	u8 event_buf[SZ_4K];
154	u64 timestamp;
155};
156
157static struct mock_event_log *event_find_log(struct device *dev, int log_type)
158{
159	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
160
161	if (log_type >= CXL_EVENT_TYPE_MAX)
162		return NULL;
163	return &mdata->mes.mock_logs[log_type];
164}
165
166static struct cxl_event_record_raw *event_get_current(struct mock_event_log *log)
167{
168	return log->events[log->cur_idx];
169}
170
171static void event_reset_log(struct mock_event_log *log)
172{
173	log->cur_idx = 0;
174	log->clear_idx = 0;
175	log->nr_overflow = log->overflow_reset;
176}
177
178/* Handle can never be 0 use 1 based indexing for handle */
179static u16 event_get_clear_handle(struct mock_event_log *log)
180{
181	return log->clear_idx + 1;
182}
183
184/* Handle can never be 0 use 1 based indexing for handle */
185static __le16 event_get_cur_event_handle(struct mock_event_log *log)
186{
187	u16 cur_handle = log->cur_idx + 1;
188
189	return cpu_to_le16(cur_handle);
190}
191
192static bool event_log_empty(struct mock_event_log *log)
193{
194	return log->cur_idx == log->nr_events;
195}
196
197static void mes_add_event(struct mock_event_store *mes,
198			  enum cxl_event_log_type log_type,
199			  struct cxl_event_record_raw *event)
200{
201	struct mock_event_log *log;
202
203	if (WARN_ON(log_type >= CXL_EVENT_TYPE_MAX))
204		return;
205
206	log = &mes->mock_logs[log_type];
207
208	if ((log->nr_events + 1) > CXL_TEST_EVENT_CNT_MAX) {
209		log->nr_overflow++;
210		log->overflow_reset = log->nr_overflow;
211		return;
212	}
213
214	log->events[log->nr_events] = event;
215	log->nr_events++;
216}
217
218static int mock_get_event(struct device *dev, struct cxl_mbox_cmd *cmd)
219{
220	struct cxl_get_event_payload *pl;
221	struct mock_event_log *log;
222	u16 nr_overflow;
223	u8 log_type;
224	int i;
225
226	if (cmd->size_in != sizeof(log_type))
227		return -EINVAL;
228
229	if (cmd->size_out < struct_size(pl, records, CXL_TEST_EVENT_CNT))
230		return -EINVAL;
231
232	log_type = *((u8 *)cmd->payload_in);
233	if (log_type >= CXL_EVENT_TYPE_MAX)
234		return -EINVAL;
235
236	memset(cmd->payload_out, 0, cmd->size_out);
237
238	log = event_find_log(dev, log_type);
239	if (!log || event_log_empty(log))
240		return 0;
241
242	pl = cmd->payload_out;
243
244	for (i = 0; i < CXL_TEST_EVENT_CNT && !event_log_empty(log); i++) {
245		memcpy(&pl->records[i], event_get_current(log),
246		       sizeof(pl->records[i]));
247		pl->records[i].hdr.handle = event_get_cur_event_handle(log);
248		log->cur_idx++;
249	}
250
251	pl->record_count = cpu_to_le16(i);
252	if (!event_log_empty(log))
253		pl->flags |= CXL_GET_EVENT_FLAG_MORE_RECORDS;
254
255	if (log->nr_overflow) {
256		u64 ns;
257
258		pl->flags |= CXL_GET_EVENT_FLAG_OVERFLOW;
259		pl->overflow_err_count = cpu_to_le16(nr_overflow);
260		ns = ktime_get_real_ns();
261		ns -= 5000000000; /* 5s ago */
262		pl->first_overflow_timestamp = cpu_to_le64(ns);
263		ns = ktime_get_real_ns();
264		ns -= 1000000000; /* 1s ago */
265		pl->last_overflow_timestamp = cpu_to_le64(ns);
266	}
267
268	return 0;
269}
270
271static int mock_clear_event(struct device *dev, struct cxl_mbox_cmd *cmd)
272{
273	struct cxl_mbox_clear_event_payload *pl = cmd->payload_in;
274	struct mock_event_log *log;
275	u8 log_type = pl->event_log;
276	u16 handle;
277	int nr;
278
279	if (log_type >= CXL_EVENT_TYPE_MAX)
280		return -EINVAL;
281
282	log = event_find_log(dev, log_type);
283	if (!log)
284		return 0; /* No mock data in this log */
285
286	/*
287	 * This check is technically not invalid per the specification AFAICS.
288	 * (The host could 'guess' handles and clear them in order).
289	 * However, this is not good behavior for the host so test it.
290	 */
291	if (log->clear_idx + pl->nr_recs > log->cur_idx) {
292		dev_err(dev,
293			"Attempting to clear more events than returned!\n");
294		return -EINVAL;
295	}
296
297	/* Check handle order prior to clearing events */
298	for (nr = 0, handle = event_get_clear_handle(log);
299	     nr < pl->nr_recs;
300	     nr++, handle++) {
301		if (handle != le16_to_cpu(pl->handles[nr])) {
302			dev_err(dev, "Clearing events out of order\n");
303			return -EINVAL;
304		}
305	}
306
307	if (log->nr_overflow)
308		log->nr_overflow = 0;
309
310	/* Clear events */
311	log->clear_idx += pl->nr_recs;
312	return 0;
313}
314
315static void cxl_mock_event_trigger(struct device *dev)
316{
317	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
318	struct mock_event_store *mes = &mdata->mes;
319	int i;
320
321	for (i = CXL_EVENT_TYPE_INFO; i < CXL_EVENT_TYPE_MAX; i++) {
322		struct mock_event_log *log;
323
324		log = event_find_log(dev, i);
325		if (log)
326			event_reset_log(log);
327	}
328
329	cxl_mem_get_event_records(mes->mds, mes->ev_status);
330}
331
332struct cxl_event_record_raw maint_needed = {
333	.hdr = {
334		.id = UUID_INIT(0xBA5EBA11, 0xABCD, 0xEFEB,
335				0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5),
336		.length = sizeof(struct cxl_event_record_raw),
337		.flags[0] = CXL_EVENT_RECORD_FLAG_MAINT_NEEDED,
338		/* .handle = Set dynamically */
339		.related_handle = cpu_to_le16(0xa5b6),
340	},
341	.data = { 0xDE, 0xAD, 0xBE, 0xEF },
342};
343
344struct cxl_event_record_raw hardware_replace = {
345	.hdr = {
346		.id = UUID_INIT(0xABCDEFEB, 0xBA11, 0xBA5E,
347				0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5),
348		.length = sizeof(struct cxl_event_record_raw),
349		.flags[0] = CXL_EVENT_RECORD_FLAG_HW_REPLACE,
350		/* .handle = Set dynamically */
351		.related_handle = cpu_to_le16(0xb6a5),
352	},
353	.data = { 0xDE, 0xAD, 0xBE, 0xEF },
354};
355
356struct cxl_event_gen_media gen_media = {
357	.hdr = {
358		.id = UUID_INIT(0xfbcd0a77, 0xc260, 0x417f,
359				0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6),
360		.length = sizeof(struct cxl_event_gen_media),
361		.flags[0] = CXL_EVENT_RECORD_FLAG_PERMANENT,
362		/* .handle = Set dynamically */
363		.related_handle = cpu_to_le16(0),
364	},
365	.phys_addr = cpu_to_le64(0x2000),
366	.descriptor = CXL_GMER_EVT_DESC_UNCORECTABLE_EVENT,
367	.type = CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR,
368	.transaction_type = CXL_GMER_TRANS_HOST_WRITE,
369	/* .validity_flags = <set below> */
370	.channel = 1,
371	.rank = 30
372};
373
374struct cxl_event_dram dram = {
375	.hdr = {
376		.id = UUID_INIT(0x601dcbb3, 0x9c06, 0x4eab,
377				0xb8, 0xaf, 0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24),
378		.length = sizeof(struct cxl_event_dram),
379		.flags[0] = CXL_EVENT_RECORD_FLAG_PERF_DEGRADED,
380		/* .handle = Set dynamically */
381		.related_handle = cpu_to_le16(0),
382	},
383	.phys_addr = cpu_to_le64(0x8000),
384	.descriptor = CXL_GMER_EVT_DESC_THRESHOLD_EVENT,
385	.type = CXL_GMER_MEM_EVT_TYPE_INV_ADDR,
386	.transaction_type = CXL_GMER_TRANS_INTERNAL_MEDIA_SCRUB,
387	/* .validity_flags = <set below> */
388	.channel = 1,
389	.bank_group = 5,
390	.bank = 2,
391	.column = {0xDE, 0xAD},
392};
393
394struct cxl_event_mem_module mem_module = {
395	.hdr = {
396		.id = UUID_INIT(0xfe927475, 0xdd59, 0x4339,
397				0xa5, 0x86, 0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74),
398		.length = sizeof(struct cxl_event_mem_module),
399		/* .handle = Set dynamically */
400		.related_handle = cpu_to_le16(0),
401	},
402	.event_type = CXL_MMER_TEMP_CHANGE,
403	.info = {
404		.health_status = CXL_DHI_HS_PERFORMANCE_DEGRADED,
405		.media_status = CXL_DHI_MS_ALL_DATA_LOST,
406		.add_status = (CXL_DHI_AS_CRITICAL << 2) |
407			      (CXL_DHI_AS_WARNING << 4) |
408			      (CXL_DHI_AS_WARNING << 5),
409		.device_temp = { 0xDE, 0xAD},
410		.dirty_shutdown_cnt = { 0xde, 0xad, 0xbe, 0xef },
411		.cor_vol_err_cnt = { 0xde, 0xad, 0xbe, 0xef },
412		.cor_per_err_cnt = { 0xde, 0xad, 0xbe, 0xef },
413	}
414};
415
416static int mock_set_timestamp(struct cxl_dev_state *cxlds,
417			      struct cxl_mbox_cmd *cmd)
418{
419	struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
420	struct cxl_mbox_set_timestamp_in *ts = cmd->payload_in;
421
422	if (cmd->size_in != sizeof(*ts))
423		return -EINVAL;
424
425	if (cmd->size_out != 0)
426		return -EINVAL;
427
428	mdata->timestamp = le64_to_cpu(ts->timestamp);
429	return 0;
430}
431
432static void cxl_mock_add_event_logs(struct mock_event_store *mes)
433{
434	put_unaligned_le16(CXL_GMER_VALID_CHANNEL | CXL_GMER_VALID_RANK,
435			   &gen_media.validity_flags);
436
437	put_unaligned_le16(CXL_DER_VALID_CHANNEL | CXL_DER_VALID_BANK_GROUP |
438			   CXL_DER_VALID_BANK | CXL_DER_VALID_COLUMN,
439			   &dram.validity_flags);
440
441	mes_add_event(mes, CXL_EVENT_TYPE_INFO, &maint_needed);
442	mes_add_event(mes, CXL_EVENT_TYPE_INFO,
443		      (struct cxl_event_record_raw *)&gen_media);
444	mes_add_event(mes, CXL_EVENT_TYPE_INFO,
445		      (struct cxl_event_record_raw *)&mem_module);
446	mes->ev_status |= CXLDEV_EVENT_STATUS_INFO;
447
448	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &maint_needed);
449	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
450	mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
451		      (struct cxl_event_record_raw *)&dram);
452	mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
453		      (struct cxl_event_record_raw *)&gen_media);
454	mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
455		      (struct cxl_event_record_raw *)&mem_module);
456	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
457	mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
458		      (struct cxl_event_record_raw *)&dram);
459	/* Overflow this log */
460	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
461	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
462	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
463	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
464	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
465	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
466	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
467	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
468	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
469	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
470	mes->ev_status |= CXLDEV_EVENT_STATUS_FAIL;
471
472	mes_add_event(mes, CXL_EVENT_TYPE_FATAL, &hardware_replace);
473	mes_add_event(mes, CXL_EVENT_TYPE_FATAL,
474		      (struct cxl_event_record_raw *)&dram);
475	mes->ev_status |= CXLDEV_EVENT_STATUS_FATAL;
476}
477
478static int mock_gsl(struct cxl_mbox_cmd *cmd)
479{
480	if (cmd->size_out < sizeof(mock_gsl_payload))
481		return -EINVAL;
482
483	memcpy(cmd->payload_out, &mock_gsl_payload, sizeof(mock_gsl_payload));
484	cmd->size_out = sizeof(mock_gsl_payload);
485
486	return 0;
487}
488
489static int mock_get_log(struct cxl_memdev_state *mds, struct cxl_mbox_cmd *cmd)
490{
491	struct cxl_mbox_get_log *gl = cmd->payload_in;
492	u32 offset = le32_to_cpu(gl->offset);
493	u32 length = le32_to_cpu(gl->length);
494	uuid_t uuid = DEFINE_CXL_CEL_UUID;
495	void *data = &mock_cel;
496
497	if (cmd->size_in < sizeof(*gl))
498		return -EINVAL;
499	if (length > mds->payload_size)
500		return -EINVAL;
501	if (offset + length > sizeof(mock_cel))
502		return -EINVAL;
503	if (!uuid_equal(&gl->uuid, &uuid))
504		return -EINVAL;
505	if (length > cmd->size_out)
506		return -EINVAL;
507
508	memcpy(cmd->payload_out, data + offset, length);
509
510	return 0;
511}
512
513static int mock_rcd_id(struct cxl_mbox_cmd *cmd)
514{
515	struct cxl_mbox_identify id = {
516		.fw_revision = { "mock fw v1 " },
517		.total_capacity =
518			cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER),
519		.volatile_capacity =
520			cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER),
521	};
522
523	if (cmd->size_out < sizeof(id))
524		return -EINVAL;
525
526	memcpy(cmd->payload_out, &id, sizeof(id));
527
528	return 0;
529}
530
531static int mock_id(struct cxl_mbox_cmd *cmd)
532{
533	struct cxl_mbox_identify id = {
534		.fw_revision = { "mock fw v1 " },
535		.lsa_size = cpu_to_le32(LSA_SIZE),
536		.partition_align =
537			cpu_to_le64(SZ_256M / CXL_CAPACITY_MULTIPLIER),
538		.total_capacity =
539			cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER),
540		.inject_poison_limit = cpu_to_le16(MOCK_INJECT_TEST_MAX),
541	};
542
543	put_unaligned_le24(CXL_POISON_LIST_MAX, id.poison_list_max_mer);
544
545	if (cmd->size_out < sizeof(id))
546		return -EINVAL;
547
548	memcpy(cmd->payload_out, &id, sizeof(id));
549
550	return 0;
551}
552
553static int mock_partition_info(struct cxl_mbox_cmd *cmd)
554{
555	struct cxl_mbox_get_partition_info pi = {
556		.active_volatile_cap =
557			cpu_to_le64(DEV_SIZE / 2 / CXL_CAPACITY_MULTIPLIER),
558		.active_persistent_cap =
559			cpu_to_le64(DEV_SIZE / 2 / CXL_CAPACITY_MULTIPLIER),
560	};
561
562	if (cmd->size_out < sizeof(pi))
563		return -EINVAL;
564
565	memcpy(cmd->payload_out, &pi, sizeof(pi));
566
567	return 0;
568}
569
570static int mock_sanitize(struct cxl_mockmem_data *mdata,
571			 struct cxl_mbox_cmd *cmd)
572{
573	if (cmd->size_in != 0)
574		return -EINVAL;
575
576	if (cmd->size_out != 0)
577		return -EINVAL;
578
579	if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
580		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
581		return -ENXIO;
582	}
583	if (mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED) {
584		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
585		return -ENXIO;
586	}
587
588	return 0; /* assume less than 2 secs, no bg */
589}
590
591static int mock_secure_erase(struct cxl_mockmem_data *mdata,
592			     struct cxl_mbox_cmd *cmd)
593{
594	if (cmd->size_in != 0)
595		return -EINVAL;
596
597	if (cmd->size_out != 0)
598		return -EINVAL;
599
600	if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
601		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
602		return -ENXIO;
603	}
604
605	if (mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED) {
606		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
607		return -ENXIO;
608	}
609
610	return 0;
611}
612
613static int mock_get_security_state(struct cxl_mockmem_data *mdata,
614				   struct cxl_mbox_cmd *cmd)
615{
616	if (cmd->size_in)
617		return -EINVAL;
618
619	if (cmd->size_out != sizeof(u32))
620		return -EINVAL;
621
622	memcpy(cmd->payload_out, &mdata->security_state, sizeof(u32));
623
624	return 0;
625}
626
627static void master_plimit_check(struct cxl_mockmem_data *mdata)
628{
629	if (mdata->master_limit == PASS_TRY_LIMIT)
630		return;
631	mdata->master_limit++;
632	if (mdata->master_limit == PASS_TRY_LIMIT)
633		mdata->security_state |= CXL_PMEM_SEC_STATE_MASTER_PLIMIT;
634}
635
636static void user_plimit_check(struct cxl_mockmem_data *mdata)
637{
638	if (mdata->user_limit == PASS_TRY_LIMIT)
639		return;
640	mdata->user_limit++;
641	if (mdata->user_limit == PASS_TRY_LIMIT)
642		mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PLIMIT;
643}
644
645static int mock_set_passphrase(struct cxl_mockmem_data *mdata,
646			       struct cxl_mbox_cmd *cmd)
647{
648	struct cxl_set_pass *set_pass;
649
650	if (cmd->size_in != sizeof(*set_pass))
651		return -EINVAL;
652
653	if (cmd->size_out != 0)
654		return -EINVAL;
655
656	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
657		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
658		return -ENXIO;
659	}
660
661	set_pass = cmd->payload_in;
662	switch (set_pass->type) {
663	case CXL_PMEM_SEC_PASS_MASTER:
664		if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT) {
665			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
666			return -ENXIO;
667		}
668		/*
669		 * CXL spec rev3.0 8.2.9.8.6.2, The master pasphrase shall only be set in
670		 * the security disabled state when the user passphrase is not set.
671		 */
672		if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
673			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
674			return -ENXIO;
675		}
676		if (memcmp(mdata->master_pass, set_pass->old_pass, NVDIMM_PASSPHRASE_LEN)) {
677			master_plimit_check(mdata);
678			cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
679			return -ENXIO;
680		}
681		memcpy(mdata->master_pass, set_pass->new_pass, NVDIMM_PASSPHRASE_LEN);
682		mdata->security_state |= CXL_PMEM_SEC_STATE_MASTER_PASS_SET;
683		return 0;
684
685	case CXL_PMEM_SEC_PASS_USER:
686		if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) {
687			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
688			return -ENXIO;
689		}
690		if (memcmp(mdata->user_pass, set_pass->old_pass, NVDIMM_PASSPHRASE_LEN)) {
691			user_plimit_check(mdata);
692			cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
693			return -ENXIO;
694		}
695		memcpy(mdata->user_pass, set_pass->new_pass, NVDIMM_PASSPHRASE_LEN);
696		mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PASS_SET;
697		return 0;
698
699	default:
700		cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
701	}
702	return -EINVAL;
703}
704
705static int mock_disable_passphrase(struct cxl_mockmem_data *mdata,
706				   struct cxl_mbox_cmd *cmd)
707{
708	struct cxl_disable_pass *dis_pass;
709
710	if (cmd->size_in != sizeof(*dis_pass))
711		return -EINVAL;
712
713	if (cmd->size_out != 0)
714		return -EINVAL;
715
716	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
717		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
718		return -ENXIO;
719	}
720
721	dis_pass = cmd->payload_in;
722	switch (dis_pass->type) {
723	case CXL_PMEM_SEC_PASS_MASTER:
724		if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT) {
725			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
726			return -ENXIO;
727		}
728
729		if (!(mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PASS_SET)) {
730			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
731			return -ENXIO;
732		}
733
734		if (memcmp(dis_pass->pass, mdata->master_pass, NVDIMM_PASSPHRASE_LEN)) {
735			master_plimit_check(mdata);
736			cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
737			return -ENXIO;
738		}
739
740		mdata->master_limit = 0;
741		memset(mdata->master_pass, 0, NVDIMM_PASSPHRASE_LEN);
742		mdata->security_state &= ~CXL_PMEM_SEC_STATE_MASTER_PASS_SET;
743		return 0;
744
745	case CXL_PMEM_SEC_PASS_USER:
746		if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) {
747			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
748			return -ENXIO;
749		}
750
751		if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) {
752			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
753			return -ENXIO;
754		}
755
756		if (memcmp(dis_pass->pass, mdata->user_pass, NVDIMM_PASSPHRASE_LEN)) {
757			user_plimit_check(mdata);
758			cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
759			return -ENXIO;
760		}
761
762		mdata->user_limit = 0;
763		memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN);
764		mdata->security_state &= ~(CXL_PMEM_SEC_STATE_USER_PASS_SET |
765					   CXL_PMEM_SEC_STATE_LOCKED);
766		return 0;
767
768	default:
769		cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
770		return -EINVAL;
771	}
772
773	return 0;
774}
775
776static int mock_freeze_security(struct cxl_mockmem_data *mdata,
777				struct cxl_mbox_cmd *cmd)
778{
779	if (cmd->size_in != 0)
780		return -EINVAL;
781
782	if (cmd->size_out != 0)
783		return -EINVAL;
784
785	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN)
786		return 0;
787
788	mdata->security_state |= CXL_PMEM_SEC_STATE_FROZEN;
789	return 0;
790}
791
792static int mock_unlock_security(struct cxl_mockmem_data *mdata,
793				struct cxl_mbox_cmd *cmd)
794{
795	if (cmd->size_in != NVDIMM_PASSPHRASE_LEN)
796		return -EINVAL;
797
798	if (cmd->size_out != 0)
799		return -EINVAL;
800
801	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
802		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
803		return -ENXIO;
804	}
805
806	if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) {
807		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
808		return -ENXIO;
809	}
810
811	if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) {
812		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
813		return -ENXIO;
814	}
815
816	if (!(mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED)) {
817		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
818		return -ENXIO;
819	}
820
821	if (memcmp(cmd->payload_in, mdata->user_pass, NVDIMM_PASSPHRASE_LEN)) {
822		if (++mdata->user_limit == PASS_TRY_LIMIT)
823			mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PLIMIT;
824		cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
825		return -ENXIO;
826	}
827
828	mdata->user_limit = 0;
829	mdata->security_state &= ~CXL_PMEM_SEC_STATE_LOCKED;
830	return 0;
831}
832
833static int mock_passphrase_secure_erase(struct cxl_mockmem_data *mdata,
834					struct cxl_mbox_cmd *cmd)
835{
836	struct cxl_pass_erase *erase;
837
838	if (cmd->size_in != sizeof(*erase))
839		return -EINVAL;
840
841	if (cmd->size_out != 0)
842		return -EINVAL;
843
844	erase = cmd->payload_in;
845	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
846		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
847		return -ENXIO;
848	}
849
850	if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT &&
851	    erase->type == CXL_PMEM_SEC_PASS_USER) {
852		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
853		return -ENXIO;
854	}
855
856	if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT &&
857	    erase->type == CXL_PMEM_SEC_PASS_MASTER) {
858		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
859		return -ENXIO;
860	}
861
862	switch (erase->type) {
863	case CXL_PMEM_SEC_PASS_MASTER:
864		/*
865		 * The spec does not clearly define the behavior of the scenario
866		 * where a master passphrase is passed in while the master
867		 * passphrase is not set and user passphrase is not set. The
868		 * code will take the assumption that it will behave the same
869		 * as a CXL secure erase command without passphrase (0x4401).
870		 */
871		if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PASS_SET) {
872			if (memcmp(mdata->master_pass, erase->pass,
873				   NVDIMM_PASSPHRASE_LEN)) {
874				master_plimit_check(mdata);
875				cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
876				return -ENXIO;
877			}
878			mdata->master_limit = 0;
879			mdata->user_limit = 0;
880			mdata->security_state &= ~CXL_PMEM_SEC_STATE_USER_PASS_SET;
881			memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN);
882			mdata->security_state &= ~CXL_PMEM_SEC_STATE_LOCKED;
883		} else {
884			/*
885			 * CXL rev3 8.2.9.8.6.3 Disable Passphrase
886			 * When master passphrase is disabled, the device shall
887			 * return Invalid Input for the Passphrase Secure Erase
888			 * command with master passphrase.
889			 */
890			return -EINVAL;
891		}
892		/* Scramble encryption keys so that data is effectively erased */
893		break;
894	case CXL_PMEM_SEC_PASS_USER:
895		/*
896		 * The spec does not clearly define the behavior of the scenario
897		 * where a user passphrase is passed in while the user
898		 * passphrase is not set. The code will take the assumption that
899		 * it will behave the same as a CXL secure erase command without
900		 * passphrase (0x4401).
901		 */
902		if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
903			if (memcmp(mdata->user_pass, erase->pass,
904				   NVDIMM_PASSPHRASE_LEN)) {
905				user_plimit_check(mdata);
906				cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
907				return -ENXIO;
908			}
909			mdata->user_limit = 0;
910			mdata->security_state &= ~CXL_PMEM_SEC_STATE_USER_PASS_SET;
911			memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN);
912		}
913
914		/*
915		 * CXL rev3 Table 8-118
916		 * If user passphrase is not set or supported by device, current
917		 * passphrase value is ignored. Will make the assumption that
918		 * the operation will proceed as secure erase w/o passphrase
919		 * since spec is not explicit.
920		 */
921
922		/* Scramble encryption keys so that data is effectively erased */
923		break;
924	default:
925		return -EINVAL;
926	}
927
928	return 0;
929}
930
931static int mock_get_lsa(struct cxl_mockmem_data *mdata,
932			struct cxl_mbox_cmd *cmd)
933{
934	struct cxl_mbox_get_lsa *get_lsa = cmd->payload_in;
935	void *lsa = mdata->lsa;
936	u32 offset, length;
937
938	if (sizeof(*get_lsa) > cmd->size_in)
939		return -EINVAL;
940	offset = le32_to_cpu(get_lsa->offset);
941	length = le32_to_cpu(get_lsa->length);
942	if (offset + length > LSA_SIZE)
943		return -EINVAL;
944	if (length > cmd->size_out)
945		return -EINVAL;
946
947	memcpy(cmd->payload_out, lsa + offset, length);
948	return 0;
949}
950
951static int mock_set_lsa(struct cxl_mockmem_data *mdata,
952			struct cxl_mbox_cmd *cmd)
953{
954	struct cxl_mbox_set_lsa *set_lsa = cmd->payload_in;
955	void *lsa = mdata->lsa;
956	u32 offset, length;
957
958	if (sizeof(*set_lsa) > cmd->size_in)
959		return -EINVAL;
960	offset = le32_to_cpu(set_lsa->offset);
961	length = cmd->size_in - sizeof(*set_lsa);
962	if (offset + length > LSA_SIZE)
963		return -EINVAL;
964
965	memcpy(lsa + offset, &set_lsa->data[0], length);
966	return 0;
967}
968
969static int mock_health_info(struct cxl_mbox_cmd *cmd)
970{
971	struct cxl_mbox_health_info health_info = {
972		/* set flags for maint needed, perf degraded, hw replacement */
973		.health_status = 0x7,
974		/* set media status to "All Data Lost" */
975		.media_status = 0x3,
976		/*
977		 * set ext_status flags for:
978		 *  ext_life_used: normal,
979		 *  ext_temperature: critical,
980		 *  ext_corrected_volatile: warning,
981		 *  ext_corrected_persistent: normal,
982		 */
983		.ext_status = 0x18,
984		.life_used = 15,
985		.temperature = cpu_to_le16(25),
986		.dirty_shutdowns = cpu_to_le32(10),
987		.volatile_errors = cpu_to_le32(20),
988		.pmem_errors = cpu_to_le32(30),
989	};
990
991	if (cmd->size_out < sizeof(health_info))
992		return -EINVAL;
993
994	memcpy(cmd->payload_out, &health_info, sizeof(health_info));
995	return 0;
996}
997
998static struct mock_poison {
999	struct cxl_dev_state *cxlds;
1000	u64 dpa;
1001} mock_poison_list[MOCK_INJECT_TEST_MAX];
1002
1003static struct cxl_mbox_poison_out *
1004cxl_get_injected_po(struct cxl_dev_state *cxlds, u64 offset, u64 length)
1005{
1006	struct cxl_mbox_poison_out *po;
1007	int nr_records = 0;
1008	u64 dpa;
1009
1010	po = kzalloc(struct_size(po, record, poison_inject_dev_max), GFP_KERNEL);
1011	if (!po)
1012		return NULL;
1013
1014	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1015		if (mock_poison_list[i].cxlds != cxlds)
1016			continue;
1017		if (mock_poison_list[i].dpa < offset ||
1018		    mock_poison_list[i].dpa > offset + length - 1)
1019			continue;
1020
1021		dpa = mock_poison_list[i].dpa + CXL_POISON_SOURCE_INJECTED;
1022		po->record[nr_records].address = cpu_to_le64(dpa);
1023		po->record[nr_records].length = cpu_to_le32(1);
1024		nr_records++;
1025		if (nr_records == poison_inject_dev_max)
1026			break;
1027	}
1028
1029	/* Always return count, even when zero */
1030	po->count = cpu_to_le16(nr_records);
1031
1032	return po;
1033}
1034
1035static int mock_get_poison(struct cxl_dev_state *cxlds,
1036			   struct cxl_mbox_cmd *cmd)
1037{
1038	struct cxl_mbox_poison_in *pi = cmd->payload_in;
1039	struct cxl_mbox_poison_out *po;
1040	u64 offset = le64_to_cpu(pi->offset);
1041	u64 length = le64_to_cpu(pi->length);
1042	int nr_records;
1043
1044	po = cxl_get_injected_po(cxlds, offset, length);
1045	if (!po)
1046		return -ENOMEM;
1047	nr_records = le16_to_cpu(po->count);
1048	memcpy(cmd->payload_out, po, struct_size(po, record, nr_records));
1049	cmd->size_out = struct_size(po, record, nr_records);
1050	kfree(po);
1051
1052	return 0;
1053}
1054
1055static bool mock_poison_dev_max_injected(struct cxl_dev_state *cxlds)
1056{
1057	int count = 0;
1058
1059	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1060		if (mock_poison_list[i].cxlds == cxlds)
1061			count++;
1062	}
1063	return (count >= poison_inject_dev_max);
1064}
1065
1066static bool mock_poison_add(struct cxl_dev_state *cxlds, u64 dpa)
1067{
1068	if (mock_poison_dev_max_injected(cxlds)) {
1069		dev_dbg(cxlds->dev,
1070			"Device poison injection limit has been reached: %d\n",
1071			MOCK_INJECT_DEV_MAX);
1072		return false;
1073	}
1074
1075	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1076		if (!mock_poison_list[i].cxlds) {
1077			mock_poison_list[i].cxlds = cxlds;
1078			mock_poison_list[i].dpa = dpa;
1079			return true;
1080		}
1081	}
1082	dev_dbg(cxlds->dev,
1083		"Mock test poison injection limit has been reached: %d\n",
1084		MOCK_INJECT_TEST_MAX);
1085
1086	return false;
1087}
1088
1089static bool mock_poison_found(struct cxl_dev_state *cxlds, u64 dpa)
1090{
1091	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1092		if (mock_poison_list[i].cxlds == cxlds &&
1093		    mock_poison_list[i].dpa == dpa)
1094			return true;
1095	}
1096	return false;
1097}
1098
1099static int mock_inject_poison(struct cxl_dev_state *cxlds,
1100			      struct cxl_mbox_cmd *cmd)
1101{
1102	struct cxl_mbox_inject_poison *pi = cmd->payload_in;
1103	u64 dpa = le64_to_cpu(pi->address);
1104
1105	if (mock_poison_found(cxlds, dpa)) {
1106		/* Not an error to inject poison if already poisoned */
1107		dev_dbg(cxlds->dev, "DPA: 0x%llx already poisoned\n", dpa);
1108		return 0;
1109	}
1110	if (!mock_poison_add(cxlds, dpa))
1111		return -ENXIO;
1112
1113	return 0;
1114}
1115
1116static bool mock_poison_del(struct cxl_dev_state *cxlds, u64 dpa)
1117{
1118	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1119		if (mock_poison_list[i].cxlds == cxlds &&
1120		    mock_poison_list[i].dpa == dpa) {
1121			mock_poison_list[i].cxlds = NULL;
1122			return true;
1123		}
1124	}
1125	return false;
1126}
1127
1128static int mock_clear_poison(struct cxl_dev_state *cxlds,
1129			     struct cxl_mbox_cmd *cmd)
1130{
1131	struct cxl_mbox_clear_poison *pi = cmd->payload_in;
1132	u64 dpa = le64_to_cpu(pi->address);
1133
1134	/*
1135	 * A real CXL device will write pi->write_data to the address
1136	 * being cleared. In this mock, just delete this address from
1137	 * the mock poison list.
1138	 */
1139	if (!mock_poison_del(cxlds, dpa))
1140		dev_dbg(cxlds->dev, "DPA: 0x%llx not in poison list\n", dpa);
1141
1142	return 0;
1143}
1144
1145static bool mock_poison_list_empty(void)
1146{
1147	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1148		if (mock_poison_list[i].cxlds)
1149			return false;
1150	}
1151	return true;
1152}
1153
1154static ssize_t poison_inject_max_show(struct device_driver *drv, char *buf)
1155{
1156	return sysfs_emit(buf, "%u\n", poison_inject_dev_max);
1157}
1158
1159static ssize_t poison_inject_max_store(struct device_driver *drv,
1160				       const char *buf, size_t len)
1161{
1162	int val;
1163
1164	if (kstrtoint(buf, 0, &val) < 0)
1165		return -EINVAL;
1166
1167	if (!mock_poison_list_empty())
1168		return -EBUSY;
1169
1170	if (val <= MOCK_INJECT_TEST_MAX)
1171		poison_inject_dev_max = val;
1172	else
1173		return -EINVAL;
1174
1175	return len;
1176}
1177
1178static DRIVER_ATTR_RW(poison_inject_max);
1179
1180static struct attribute *cxl_mock_mem_core_attrs[] = {
1181	&driver_attr_poison_inject_max.attr,
1182	NULL
1183};
1184ATTRIBUTE_GROUPS(cxl_mock_mem_core);
1185
1186static int mock_fw_info(struct cxl_mockmem_data *mdata,
1187			struct cxl_mbox_cmd *cmd)
1188{
1189	struct cxl_mbox_get_fw_info fw_info = {
1190		.num_slots = FW_SLOTS,
1191		.slot_info = (mdata->fw_slot & 0x7) |
1192			     ((mdata->fw_staged & 0x7) << 3),
1193		.activation_cap = 0,
1194	};
1195
1196	strcpy(fw_info.slot_1_revision, "cxl_test_fw_001");
1197	strcpy(fw_info.slot_2_revision, "cxl_test_fw_002");
1198	strcpy(fw_info.slot_3_revision, "cxl_test_fw_003");
1199	strcpy(fw_info.slot_4_revision, "");
1200
1201	if (cmd->size_out < sizeof(fw_info))
1202		return -EINVAL;
1203
1204	memcpy(cmd->payload_out, &fw_info, sizeof(fw_info));
1205	return 0;
1206}
1207
1208static int mock_transfer_fw(struct cxl_mockmem_data *mdata,
1209			    struct cxl_mbox_cmd *cmd)
1210{
1211	struct cxl_mbox_transfer_fw *transfer = cmd->payload_in;
1212	void *fw = mdata->fw;
1213	size_t offset, length;
1214
1215	offset = le32_to_cpu(transfer->offset) * CXL_FW_TRANSFER_ALIGNMENT;
1216	length = cmd->size_in - sizeof(*transfer);
1217	if (offset + length > FW_SIZE)
1218		return -EINVAL;
1219
1220	switch (transfer->action) {
1221	case CXL_FW_TRANSFER_ACTION_FULL:
1222		if (offset != 0)
1223			return -EINVAL;
1224		fallthrough;
1225	case CXL_FW_TRANSFER_ACTION_END:
1226		if (transfer->slot == 0 || transfer->slot > FW_SLOTS)
1227			return -EINVAL;
1228		mdata->fw_size = offset + length;
1229		break;
1230	case CXL_FW_TRANSFER_ACTION_INITIATE:
1231	case CXL_FW_TRANSFER_ACTION_CONTINUE:
1232		break;
1233	case CXL_FW_TRANSFER_ACTION_ABORT:
1234		return 0;
1235	default:
1236		return -EINVAL;
1237	}
1238
1239	memcpy(fw + offset, transfer->data, length);
1240	return 0;
1241}
1242
1243static int mock_activate_fw(struct cxl_mockmem_data *mdata,
1244			    struct cxl_mbox_cmd *cmd)
1245{
1246	struct cxl_mbox_activate_fw *activate = cmd->payload_in;
1247
1248	if (activate->slot == 0 || activate->slot > FW_SLOTS)
1249		return -EINVAL;
1250
1251	switch (activate->action) {
1252	case CXL_FW_ACTIVATE_ONLINE:
1253		mdata->fw_slot = activate->slot;
1254		mdata->fw_staged = 0;
1255		return 0;
1256	case CXL_FW_ACTIVATE_OFFLINE:
1257		mdata->fw_staged = activate->slot;
1258		return 0;
1259	}
1260
1261	return -EINVAL;
1262}
1263
1264static int cxl_mock_mbox_send(struct cxl_memdev_state *mds,
1265			      struct cxl_mbox_cmd *cmd)
1266{
1267	struct cxl_dev_state *cxlds = &mds->cxlds;
1268	struct device *dev = cxlds->dev;
1269	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1270	int rc = -EIO;
1271
1272	switch (cmd->opcode) {
1273	case CXL_MBOX_OP_SET_TIMESTAMP:
1274		rc = mock_set_timestamp(cxlds, cmd);
1275		break;
1276	case CXL_MBOX_OP_GET_SUPPORTED_LOGS:
1277		rc = mock_gsl(cmd);
1278		break;
1279	case CXL_MBOX_OP_GET_LOG:
1280		rc = mock_get_log(mds, cmd);
1281		break;
1282	case CXL_MBOX_OP_IDENTIFY:
1283		if (cxlds->rcd)
1284			rc = mock_rcd_id(cmd);
1285		else
1286			rc = mock_id(cmd);
1287		break;
1288	case CXL_MBOX_OP_GET_LSA:
1289		rc = mock_get_lsa(mdata, cmd);
1290		break;
1291	case CXL_MBOX_OP_GET_PARTITION_INFO:
1292		rc = mock_partition_info(cmd);
1293		break;
1294	case CXL_MBOX_OP_GET_EVENT_RECORD:
1295		rc = mock_get_event(dev, cmd);
1296		break;
1297	case CXL_MBOX_OP_CLEAR_EVENT_RECORD:
1298		rc = mock_clear_event(dev, cmd);
1299		break;
1300	case CXL_MBOX_OP_SET_LSA:
1301		rc = mock_set_lsa(mdata, cmd);
1302		break;
1303	case CXL_MBOX_OP_GET_HEALTH_INFO:
1304		rc = mock_health_info(cmd);
1305		break;
1306	case CXL_MBOX_OP_SANITIZE:
1307		rc = mock_sanitize(mdata, cmd);
1308		break;
1309	case CXL_MBOX_OP_SECURE_ERASE:
1310		rc = mock_secure_erase(mdata, cmd);
1311		break;
1312	case CXL_MBOX_OP_GET_SECURITY_STATE:
1313		rc = mock_get_security_state(mdata, cmd);
1314		break;
1315	case CXL_MBOX_OP_SET_PASSPHRASE:
1316		rc = mock_set_passphrase(mdata, cmd);
1317		break;
1318	case CXL_MBOX_OP_DISABLE_PASSPHRASE:
1319		rc = mock_disable_passphrase(mdata, cmd);
1320		break;
1321	case CXL_MBOX_OP_FREEZE_SECURITY:
1322		rc = mock_freeze_security(mdata, cmd);
1323		break;
1324	case CXL_MBOX_OP_UNLOCK:
1325		rc = mock_unlock_security(mdata, cmd);
1326		break;
1327	case CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE:
1328		rc = mock_passphrase_secure_erase(mdata, cmd);
1329		break;
1330	case CXL_MBOX_OP_GET_POISON:
1331		rc = mock_get_poison(cxlds, cmd);
1332		break;
1333	case CXL_MBOX_OP_INJECT_POISON:
1334		rc = mock_inject_poison(cxlds, cmd);
1335		break;
1336	case CXL_MBOX_OP_CLEAR_POISON:
1337		rc = mock_clear_poison(cxlds, cmd);
1338		break;
1339	case CXL_MBOX_OP_GET_FW_INFO:
1340		rc = mock_fw_info(mdata, cmd);
1341		break;
1342	case CXL_MBOX_OP_TRANSFER_FW:
1343		rc = mock_transfer_fw(mdata, cmd);
1344		break;
1345	case CXL_MBOX_OP_ACTIVATE_FW:
1346		rc = mock_activate_fw(mdata, cmd);
1347		break;
1348	default:
1349		break;
1350	}
1351
1352	dev_dbg(dev, "opcode: %#x sz_in: %zd sz_out: %zd rc: %d\n", cmd->opcode,
1353		cmd->size_in, cmd->size_out, rc);
1354
1355	return rc;
1356}
1357
1358static void label_area_release(void *lsa)
1359{
1360	vfree(lsa);
1361}
1362
1363static void fw_buf_release(void *buf)
1364{
1365	vfree(buf);
1366}
1367
1368static bool is_rcd(struct platform_device *pdev)
1369{
1370	const struct platform_device_id *id = platform_get_device_id(pdev);
1371
1372	return !!id->driver_data;
1373}
1374
1375static ssize_t event_trigger_store(struct device *dev,
1376				   struct device_attribute *attr,
1377				   const char *buf, size_t count)
1378{
1379	cxl_mock_event_trigger(dev);
1380	return count;
1381}
1382static DEVICE_ATTR_WO(event_trigger);
1383
1384static int cxl_mock_mem_probe(struct platform_device *pdev)
1385{
1386	struct device *dev = &pdev->dev;
1387	struct cxl_memdev *cxlmd;
1388	struct cxl_memdev_state *mds;
1389	struct cxl_dev_state *cxlds;
1390	struct cxl_mockmem_data *mdata;
1391	int rc;
1392
1393	mdata = devm_kzalloc(dev, sizeof(*mdata), GFP_KERNEL);
1394	if (!mdata)
1395		return -ENOMEM;
1396	dev_set_drvdata(dev, mdata);
1397
1398	mdata->lsa = vmalloc(LSA_SIZE);
1399	if (!mdata->lsa)
1400		return -ENOMEM;
1401	mdata->fw = vmalloc(FW_SIZE);
1402	if (!mdata->fw)
1403		return -ENOMEM;
1404	mdata->fw_slot = 2;
1405
1406	rc = devm_add_action_or_reset(dev, label_area_release, mdata->lsa);
1407	if (rc)
1408		return rc;
1409
1410	rc = devm_add_action_or_reset(dev, fw_buf_release, mdata->fw);
1411	if (rc)
1412		return rc;
1413
1414	mds = cxl_memdev_state_create(dev);
1415	if (IS_ERR(mds))
1416		return PTR_ERR(mds);
1417
1418	mds->mbox_send = cxl_mock_mbox_send;
1419	mds->payload_size = SZ_4K;
1420	mds->event.buf = (struct cxl_get_event_payload *) mdata->event_buf;
1421
1422	cxlds = &mds->cxlds;
1423	cxlds->serial = pdev->id;
1424	if (is_rcd(pdev)) {
1425		cxlds->rcd = true;
1426		cxlds->component_reg_phys = CXL_RESOURCE_NONE;
1427	}
1428
1429	rc = cxl_enumerate_cmds(mds);
1430	if (rc)
1431		return rc;
1432
1433	rc = cxl_poison_state_init(mds);
1434	if (rc)
1435		return rc;
1436
1437	rc = cxl_set_timestamp(mds);
1438	if (rc)
1439		return rc;
1440
1441	cxlds->media_ready = true;
1442	rc = cxl_dev_state_identify(mds);
1443	if (rc)
1444		return rc;
1445
1446	rc = cxl_mem_create_range_info(mds);
1447	if (rc)
1448		return rc;
1449
1450	mdata->mes.mds = mds;
1451	cxl_mock_add_event_logs(&mdata->mes);
1452
1453	cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlds);
1454	if (IS_ERR(cxlmd))
1455		return PTR_ERR(cxlmd);
1456
1457	rc = devm_cxl_setup_fw_upload(&pdev->dev, mds);
1458	if (rc)
1459		return rc;
1460
1461	cxl_mem_get_event_records(mds, CXLDEV_EVENT_STATUS_ALL);
1462
1463	return 0;
1464}
1465
1466static ssize_t security_lock_show(struct device *dev,
1467				  struct device_attribute *attr, char *buf)
1468{
1469	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1470
1471	return sysfs_emit(buf, "%u\n",
1472			  !!(mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED));
1473}
1474
1475static ssize_t security_lock_store(struct device *dev, struct device_attribute *attr,
1476				   const char *buf, size_t count)
1477{
1478	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1479	u32 mask = CXL_PMEM_SEC_STATE_FROZEN | CXL_PMEM_SEC_STATE_USER_PLIMIT |
1480		   CXL_PMEM_SEC_STATE_MASTER_PLIMIT;
1481	int val;
1482
1483	if (kstrtoint(buf, 0, &val) < 0)
1484		return -EINVAL;
1485
1486	if (val == 1) {
1487		if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET))
1488			return -ENXIO;
1489		mdata->security_state |= CXL_PMEM_SEC_STATE_LOCKED;
1490		mdata->security_state &= ~mask;
1491	} else {
1492		return -EINVAL;
1493	}
1494	return count;
1495}
1496
1497static DEVICE_ATTR_RW(security_lock);
1498
1499static ssize_t fw_buf_checksum_show(struct device *dev,
1500				    struct device_attribute *attr, char *buf)
1501{
1502	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1503	u8 hash[SHA256_DIGEST_SIZE];
1504	unsigned char *hstr, *hptr;
1505	struct sha256_state sctx;
1506	ssize_t written = 0;
1507	int i;
1508
1509	sha256_init(&sctx);
1510	sha256_update(&sctx, mdata->fw, mdata->fw_size);
1511	sha256_final(&sctx, hash);
1512
1513	hstr = kzalloc((SHA256_DIGEST_SIZE * 2) + 1, GFP_KERNEL);
1514	if (!hstr)
1515		return -ENOMEM;
1516
1517	hptr = hstr;
1518	for (i = 0; i < SHA256_DIGEST_SIZE; i++)
1519		hptr += sprintf(hptr, "%02x", hash[i]);
1520
1521	written = sysfs_emit(buf, "%s\n", hstr);
1522
1523	kfree(hstr);
1524	return written;
1525}
1526
1527static DEVICE_ATTR_RO(fw_buf_checksum);
1528
1529static struct attribute *cxl_mock_mem_attrs[] = {
1530	&dev_attr_security_lock.attr,
1531	&dev_attr_event_trigger.attr,
1532	&dev_attr_fw_buf_checksum.attr,
1533	NULL
1534};
1535ATTRIBUTE_GROUPS(cxl_mock_mem);
1536
1537static const struct platform_device_id cxl_mock_mem_ids[] = {
1538	{ .name = "cxl_mem", 0 },
1539	{ .name = "cxl_rcd", 1 },
1540	{ },
1541};
1542MODULE_DEVICE_TABLE(platform, cxl_mock_mem_ids);
1543
1544static struct platform_driver cxl_mock_mem_driver = {
1545	.probe = cxl_mock_mem_probe,
1546	.id_table = cxl_mock_mem_ids,
1547	.driver = {
1548		.name = KBUILD_MODNAME,
1549		.dev_groups = cxl_mock_mem_groups,
1550		.groups = cxl_mock_mem_core_groups,
1551	},
1552};
1553
1554module_platform_driver(cxl_mock_mem_driver);
1555MODULE_LICENSE("GPL v2");
1556MODULE_IMPORT_NS(CXL);
1557