1// SPDX-License-Identifier: GPL-2.0
2//
3// regmap KUnit tests
4//
5// Copyright 2023 Arm Ltd
6
7#include <kunit/test.h>
8#include "internal.h"
9
10#define BLOCK_TEST_SIZE 12
11
12static void get_changed_bytes(void *orig, void *new, size_t size)
13{
14	char *o = orig;
15	char *n = new;
16	int i;
17
18	get_random_bytes(new, size);
19
20	/*
21	 * This could be nicer and more efficient but we shouldn't
22	 * super care.
23	 */
24	for (i = 0; i < size; i++)
25		while (n[i] == o[i])
26			get_random_bytes(&n[i], 1);
27}
28
29static const struct regmap_config test_regmap_config = {
30	.max_register = BLOCK_TEST_SIZE,
31	.reg_stride = 1,
32	.val_bits = sizeof(unsigned int) * 8,
33};
34
35struct regcache_types {
36	enum regcache_type type;
37	const char *name;
38};
39
40static void case_to_desc(const struct regcache_types *t, char *desc)
41{
42	strcpy(desc, t->name);
43}
44
45static const struct regcache_types regcache_types_list[] = {
46	{ REGCACHE_NONE, "none" },
47	{ REGCACHE_FLAT, "flat" },
48	{ REGCACHE_RBTREE, "rbtree" },
49	{ REGCACHE_MAPLE, "maple" },
50};
51
52KUNIT_ARRAY_PARAM(regcache_types, regcache_types_list, case_to_desc);
53
54static const struct regcache_types real_cache_types_list[] = {
55	{ REGCACHE_FLAT, "flat" },
56	{ REGCACHE_RBTREE, "rbtree" },
57	{ REGCACHE_MAPLE, "maple" },
58};
59
60KUNIT_ARRAY_PARAM(real_cache_types, real_cache_types_list, case_to_desc);
61
62static const struct regcache_types sparse_cache_types_list[] = {
63	{ REGCACHE_RBTREE, "rbtree" },
64	{ REGCACHE_MAPLE, "maple" },
65};
66
67KUNIT_ARRAY_PARAM(sparse_cache_types, sparse_cache_types_list, case_to_desc);
68
69static struct regmap *gen_regmap(struct regmap_config *config,
70				 struct regmap_ram_data **data)
71{
72	unsigned int *buf;
73	struct regmap *ret;
74	size_t size = (config->max_register + 1) * sizeof(unsigned int);
75	int i;
76	struct reg_default *defaults;
77
78	config->disable_locking = config->cache_type == REGCACHE_RBTREE ||
79					config->cache_type == REGCACHE_MAPLE;
80
81	buf = kmalloc(size, GFP_KERNEL);
82	if (!buf)
83		return ERR_PTR(-ENOMEM);
84
85	get_random_bytes(buf, size);
86
87	*data = kzalloc(sizeof(**data), GFP_KERNEL);
88	if (!(*data))
89		return ERR_PTR(-ENOMEM);
90	(*data)->vals = buf;
91
92	if (config->num_reg_defaults) {
93		defaults = kcalloc(config->num_reg_defaults,
94				   sizeof(struct reg_default),
95				   GFP_KERNEL);
96		if (!defaults)
97			return ERR_PTR(-ENOMEM);
98		config->reg_defaults = defaults;
99
100		for (i = 0; i < config->num_reg_defaults; i++) {
101			defaults[i].reg = i * config->reg_stride;
102			defaults[i].def = buf[i * config->reg_stride];
103		}
104	}
105
106	ret = regmap_init_ram(config, *data);
107	if (IS_ERR(ret)) {
108		kfree(buf);
109		kfree(*data);
110	}
111
112	return ret;
113}
114
115static bool reg_5_false(struct device *context, unsigned int reg)
116{
117	return reg != 5;
118}
119
120static void basic_read_write(struct kunit *test)
121{
122	struct regcache_types *t = (struct regcache_types *)test->param_value;
123	struct regmap *map;
124	struct regmap_config config;
125	struct regmap_ram_data *data;
126	unsigned int val, rval;
127
128	config = test_regmap_config;
129	config.cache_type = t->type;
130
131	map = gen_regmap(&config, &data);
132	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
133	if (IS_ERR(map))
134		return;
135
136	get_random_bytes(&val, sizeof(val));
137
138	/* If we write a value to a register we can read it back */
139	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
140	KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
141	KUNIT_EXPECT_EQ(test, val, rval);
142
143	/* If using a cache the cache satisfied the read */
144	KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[0]);
145
146	regmap_exit(map);
147}
148
149static void bulk_write(struct kunit *test)
150{
151	struct regcache_types *t = (struct regcache_types *)test->param_value;
152	struct regmap *map;
153	struct regmap_config config;
154	struct regmap_ram_data *data;
155	unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
156	int i;
157
158	config = test_regmap_config;
159	config.cache_type = t->type;
160
161	map = gen_regmap(&config, &data);
162	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
163	if (IS_ERR(map))
164		return;
165
166	get_random_bytes(&val, sizeof(val));
167
168	/*
169	 * Data written via the bulk API can be read back with single
170	 * reads.
171	 */
172	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, 0, val,
173						   BLOCK_TEST_SIZE));
174	for (i = 0; i < BLOCK_TEST_SIZE; i++)
175		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval[i]));
176
177	KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
178
179	/* If using a cache the cache satisfied the read */
180	for (i = 0; i < BLOCK_TEST_SIZE; i++)
181		KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
182
183	regmap_exit(map);
184}
185
186static void bulk_read(struct kunit *test)
187{
188	struct regcache_types *t = (struct regcache_types *)test->param_value;
189	struct regmap *map;
190	struct regmap_config config;
191	struct regmap_ram_data *data;
192	unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
193	int i;
194
195	config = test_regmap_config;
196	config.cache_type = t->type;
197
198	map = gen_regmap(&config, &data);
199	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
200	if (IS_ERR(map))
201		return;
202
203	get_random_bytes(&val, sizeof(val));
204
205	/* Data written as single writes can be read via the bulk API */
206	for (i = 0; i < BLOCK_TEST_SIZE; i++)
207		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, val[i]));
208	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
209						  BLOCK_TEST_SIZE));
210	KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
211
212	/* If using a cache the cache satisfied the read */
213	for (i = 0; i < BLOCK_TEST_SIZE; i++)
214		KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
215
216	regmap_exit(map);
217}
218
219static void write_readonly(struct kunit *test)
220{
221	struct regcache_types *t = (struct regcache_types *)test->param_value;
222	struct regmap *map;
223	struct regmap_config config;
224	struct regmap_ram_data *data;
225	unsigned int val;
226	int i;
227
228	config = test_regmap_config;
229	config.cache_type = t->type;
230	config.num_reg_defaults = BLOCK_TEST_SIZE;
231	config.writeable_reg = reg_5_false;
232
233	map = gen_regmap(&config, &data);
234	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
235	if (IS_ERR(map))
236		return;
237
238	get_random_bytes(&val, sizeof(val));
239
240	for (i = 0; i < BLOCK_TEST_SIZE; i++)
241		data->written[i] = false;
242
243	/* Change the value of all registers, readonly should fail */
244	for (i = 0; i < BLOCK_TEST_SIZE; i++)
245		KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, i, val) == 0);
246
247	/* Did that match what we see on the device? */
248	for (i = 0; i < BLOCK_TEST_SIZE; i++)
249		KUNIT_EXPECT_EQ(test, i != 5, data->written[i]);
250
251	regmap_exit(map);
252}
253
254static void read_writeonly(struct kunit *test)
255{
256	struct regcache_types *t = (struct regcache_types *)test->param_value;
257	struct regmap *map;
258	struct regmap_config config;
259	struct regmap_ram_data *data;
260	unsigned int val;
261	int i;
262
263	config = test_regmap_config;
264	config.cache_type = t->type;
265	config.readable_reg = reg_5_false;
266
267	map = gen_regmap(&config, &data);
268	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
269	if (IS_ERR(map))
270		return;
271
272	for (i = 0; i < BLOCK_TEST_SIZE; i++)
273		data->read[i] = false;
274
275	/*
276	 * Try to read all the registers, the writeonly one should
277	 * fail if we aren't using the flat cache.
278	 */
279	for (i = 0; i < BLOCK_TEST_SIZE; i++) {
280		if (t->type != REGCACHE_FLAT) {
281			KUNIT_EXPECT_EQ(test, i != 5,
282					regmap_read(map, i, &val) == 0);
283		} else {
284			KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
285		}
286	}
287
288	/* Did we trigger a hardware access? */
289	KUNIT_EXPECT_FALSE(test, data->read[5]);
290
291	regmap_exit(map);
292}
293
294static void reg_defaults(struct kunit *test)
295{
296	struct regcache_types *t = (struct regcache_types *)test->param_value;
297	struct regmap *map;
298	struct regmap_config config;
299	struct regmap_ram_data *data;
300	unsigned int rval[BLOCK_TEST_SIZE];
301	int i;
302
303	config = test_regmap_config;
304	config.cache_type = t->type;
305	config.num_reg_defaults = BLOCK_TEST_SIZE;
306
307	map = gen_regmap(&config, &data);
308	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
309	if (IS_ERR(map))
310		return;
311
312	/* Read back the expected default data */
313	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
314						  BLOCK_TEST_SIZE));
315	KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
316
317	/* The data should have been read from cache if there was one */
318	for (i = 0; i < BLOCK_TEST_SIZE; i++)
319		KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
320}
321
322static void reg_defaults_read_dev(struct kunit *test)
323{
324	struct regcache_types *t = (struct regcache_types *)test->param_value;
325	struct regmap *map;
326	struct regmap_config config;
327	struct regmap_ram_data *data;
328	unsigned int rval[BLOCK_TEST_SIZE];
329	int i;
330
331	config = test_regmap_config;
332	config.cache_type = t->type;
333	config.num_reg_defaults_raw = BLOCK_TEST_SIZE;
334
335	map = gen_regmap(&config, &data);
336	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
337	if (IS_ERR(map))
338		return;
339
340	/* We should have read the cache defaults back from the map */
341	for (i = 0; i < BLOCK_TEST_SIZE; i++) {
342		KUNIT_EXPECT_EQ(test, t->type != REGCACHE_NONE, data->read[i]);
343		data->read[i] = false;
344	}
345
346	/* Read back the expected default data */
347	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
348						  BLOCK_TEST_SIZE));
349	KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
350
351	/* The data should have been read from cache if there was one */
352	for (i = 0; i < BLOCK_TEST_SIZE; i++)
353		KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
354}
355
356static void register_patch(struct kunit *test)
357{
358	struct regcache_types *t = (struct regcache_types *)test->param_value;
359	struct regmap *map;
360	struct regmap_config config;
361	struct regmap_ram_data *data;
362	struct reg_sequence patch[2];
363	unsigned int rval[BLOCK_TEST_SIZE];
364	int i;
365
366	/* We need defaults so readback works */
367	config = test_regmap_config;
368	config.cache_type = t->type;
369	config.num_reg_defaults = BLOCK_TEST_SIZE;
370
371	map = gen_regmap(&config, &data);
372	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
373	if (IS_ERR(map))
374		return;
375
376	/* Stash the original values */
377	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
378						  BLOCK_TEST_SIZE));
379
380	/* Patch a couple of values */
381	patch[0].reg = 2;
382	patch[0].def = rval[2] + 1;
383	patch[0].delay_us = 0;
384	patch[1].reg = 5;
385	patch[1].def = rval[5] + 1;
386	patch[1].delay_us = 0;
387	KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch,
388						       ARRAY_SIZE(patch)));
389
390	/* Only the patched registers are written */
391	for (i = 0; i < BLOCK_TEST_SIZE; i++) {
392		switch (i) {
393		case 2:
394		case 5:
395			KUNIT_EXPECT_TRUE(test, data->written[i]);
396			KUNIT_EXPECT_EQ(test, data->vals[i], rval[i] + 1);
397			break;
398		default:
399			KUNIT_EXPECT_FALSE(test, data->written[i]);
400			KUNIT_EXPECT_EQ(test, data->vals[i], rval[i]);
401			break;
402		}
403	}
404
405	regmap_exit(map);
406}
407
408static void stride(struct kunit *test)
409{
410	struct regcache_types *t = (struct regcache_types *)test->param_value;
411	struct regmap *map;
412	struct regmap_config config;
413	struct regmap_ram_data *data;
414	unsigned int rval;
415	int i;
416
417	config = test_regmap_config;
418	config.cache_type = t->type;
419	config.reg_stride = 2;
420	config.num_reg_defaults = BLOCK_TEST_SIZE / 2;
421
422	map = gen_regmap(&config, &data);
423	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
424	if (IS_ERR(map))
425		return;
426
427	/* Only even registers can be accessed, try both read and write */
428	for (i = 0; i < BLOCK_TEST_SIZE; i++) {
429		data->read[i] = false;
430		data->written[i] = false;
431
432		if (i % 2) {
433			KUNIT_EXPECT_NE(test, 0, regmap_read(map, i, &rval));
434			KUNIT_EXPECT_NE(test, 0, regmap_write(map, i, rval));
435			KUNIT_EXPECT_FALSE(test, data->read[i]);
436			KUNIT_EXPECT_FALSE(test, data->written[i]);
437		} else {
438			KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
439			KUNIT_EXPECT_EQ(test, data->vals[i], rval);
440			KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE,
441					data->read[i]);
442
443			KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, rval));
444			KUNIT_EXPECT_TRUE(test, data->written[i]);
445		}
446	}
447
448	regmap_exit(map);
449}
450
451static struct regmap_range_cfg test_range = {
452	.selector_reg = 1,
453	.selector_mask = 0xff,
454
455	.window_start = 4,
456	.window_len = 10,
457
458	.range_min = 20,
459	.range_max = 40,
460};
461
462static bool test_range_volatile(struct device *dev, unsigned int reg)
463{
464	if (reg >= test_range.window_start &&
465	    reg <= test_range.selector_reg + test_range.window_len)
466		return true;
467
468	if (reg >= test_range.range_min && reg <= test_range.range_max)
469		return true;
470
471	return false;
472}
473
474static void basic_ranges(struct kunit *test)
475{
476	struct regcache_types *t = (struct regcache_types *)test->param_value;
477	struct regmap *map;
478	struct regmap_config config;
479	struct regmap_ram_data *data;
480	unsigned int val;
481	int i;
482
483	config = test_regmap_config;
484	config.cache_type = t->type;
485	config.volatile_reg = test_range_volatile;
486	config.ranges = &test_range;
487	config.num_ranges = 1;
488	config.max_register = test_range.range_max;
489
490	map = gen_regmap(&config, &data);
491	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
492	if (IS_ERR(map))
493		return;
494
495	for (i = test_range.range_min; i < test_range.range_max; i++) {
496		data->read[i] = false;
497		data->written[i] = false;
498	}
499
500	/* Reset the page to a non-zero value to trigger a change */
501	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.selector_reg,
502					      test_range.range_max));
503
504	/* Check we set the page and use the window for writes */
505	data->written[test_range.selector_reg] = false;
506	data->written[test_range.window_start] = false;
507	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
508	KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
509	KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
510
511	data->written[test_range.selector_reg] = false;
512	data->written[test_range.window_start] = false;
513	KUNIT_EXPECT_EQ(test, 0, regmap_write(map,
514					      test_range.range_min +
515					      test_range.window_len,
516					      0));
517	KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
518	KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
519
520	/* Same for reads */
521	data->written[test_range.selector_reg] = false;
522	data->read[test_range.window_start] = false;
523	KUNIT_EXPECT_EQ(test, 0, regmap_read(map, test_range.range_min, &val));
524	KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
525	KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
526
527	data->written[test_range.selector_reg] = false;
528	data->read[test_range.window_start] = false;
529	KUNIT_EXPECT_EQ(test, 0, regmap_read(map,
530					     test_range.range_min +
531					     test_range.window_len,
532					     &val));
533	KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
534	KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
535
536	/* No physical access triggered in the virtual range */
537	for (i = test_range.range_min; i < test_range.range_max; i++) {
538		KUNIT_EXPECT_FALSE(test, data->read[i]);
539		KUNIT_EXPECT_FALSE(test, data->written[i]);
540	}
541
542	regmap_exit(map);
543}
544
545/* Try to stress dynamic creation of cache data structures */
546static void stress_insert(struct kunit *test)
547{
548	struct regcache_types *t = (struct regcache_types *)test->param_value;
549	struct regmap *map;
550	struct regmap_config config;
551	struct regmap_ram_data *data;
552	unsigned int rval, *vals;
553	size_t buf_sz;
554	int i;
555
556	config = test_regmap_config;
557	config.cache_type = t->type;
558	config.max_register = 300;
559
560	map = gen_regmap(&config, &data);
561	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
562	if (IS_ERR(map))
563		return;
564
565	vals = kunit_kcalloc(test, sizeof(unsigned long), config.max_register,
566			     GFP_KERNEL);
567	KUNIT_ASSERT_FALSE(test, vals == NULL);
568	buf_sz = sizeof(unsigned long) * config.max_register;
569
570	get_random_bytes(vals, buf_sz);
571
572	/* Write data into the map/cache in ever decreasing strides */
573	for (i = 0; i < config.max_register; i += 100)
574		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
575	for (i = 0; i < config.max_register; i += 50)
576		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
577	for (i = 0; i < config.max_register; i += 25)
578		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
579	for (i = 0; i < config.max_register; i += 10)
580		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
581	for (i = 0; i < config.max_register; i += 5)
582		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
583	for (i = 0; i < config.max_register; i += 3)
584		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
585	for (i = 0; i < config.max_register; i += 2)
586		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
587	for (i = 0; i < config.max_register; i++)
588		KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
589
590	/* Do reads from the cache (if there is one) match? */
591	for (i = 0; i < config.max_register; i ++) {
592		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
593		KUNIT_EXPECT_EQ(test, rval, vals[i]);
594		KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
595	}
596
597	regmap_exit(map);
598}
599
600static void cache_bypass(struct kunit *test)
601{
602	struct regcache_types *t = (struct regcache_types *)test->param_value;
603	struct regmap *map;
604	struct regmap_config config;
605	struct regmap_ram_data *data;
606	unsigned int val, rval;
607
608	config = test_regmap_config;
609	config.cache_type = t->type;
610
611	map = gen_regmap(&config, &data);
612	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
613	if (IS_ERR(map))
614		return;
615
616	get_random_bytes(&val, sizeof(val));
617
618	/* Ensure the cache has a value in it */
619	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
620
621	/* Bypass then write a different value */
622	regcache_cache_bypass(map, true);
623	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val + 1));
624
625	/* Read the bypassed value */
626	KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
627	KUNIT_EXPECT_EQ(test, val + 1, rval);
628	KUNIT_EXPECT_EQ(test, data->vals[0], rval);
629
630	/* Disable bypass, the cache should still return the original value */
631	regcache_cache_bypass(map, false);
632	KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
633	KUNIT_EXPECT_EQ(test, val, rval);
634
635	regmap_exit(map);
636}
637
638static void cache_sync(struct kunit *test)
639{
640	struct regcache_types *t = (struct regcache_types *)test->param_value;
641	struct regmap *map;
642	struct regmap_config config;
643	struct regmap_ram_data *data;
644	unsigned int val[BLOCK_TEST_SIZE];
645	int i;
646
647	config = test_regmap_config;
648	config.cache_type = t->type;
649
650	map = gen_regmap(&config, &data);
651	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
652	if (IS_ERR(map))
653		return;
654
655	get_random_bytes(&val, sizeof(val));
656
657	/* Put some data into the cache */
658	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, 0, val,
659						   BLOCK_TEST_SIZE));
660	for (i = 0; i < BLOCK_TEST_SIZE; i++)
661		data->written[i] = false;
662
663	/* Trash the data on the device itself then resync */
664	regcache_mark_dirty(map);
665	memset(data->vals, 0, sizeof(val));
666	KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
667
668	/* Did we just write the correct data out? */
669	KUNIT_EXPECT_MEMEQ(test, data->vals, val, sizeof(val));
670	for (i = 0; i < BLOCK_TEST_SIZE; i++)
671		KUNIT_EXPECT_EQ(test, true, data->written[i]);
672
673	regmap_exit(map);
674}
675
676static void cache_sync_defaults(struct kunit *test)
677{
678	struct regcache_types *t = (struct regcache_types *)test->param_value;
679	struct regmap *map;
680	struct regmap_config config;
681	struct regmap_ram_data *data;
682	unsigned int val;
683	int i;
684
685	config = test_regmap_config;
686	config.cache_type = t->type;
687	config.num_reg_defaults = BLOCK_TEST_SIZE;
688
689	map = gen_regmap(&config, &data);
690	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
691	if (IS_ERR(map))
692		return;
693
694	get_random_bytes(&val, sizeof(val));
695
696	/* Change the value of one register */
697	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 2, val));
698
699	/* Resync */
700	regcache_mark_dirty(map);
701	for (i = 0; i < BLOCK_TEST_SIZE; i++)
702		data->written[i] = false;
703	KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
704
705	/* Did we just sync the one register we touched? */
706	for (i = 0; i < BLOCK_TEST_SIZE; i++)
707		KUNIT_EXPECT_EQ(test, i == 2, data->written[i]);
708
709	regmap_exit(map);
710}
711
712static void cache_sync_readonly(struct kunit *test)
713{
714	struct regcache_types *t = (struct regcache_types *)test->param_value;
715	struct regmap *map;
716	struct regmap_config config;
717	struct regmap_ram_data *data;
718	unsigned int val;
719	int i;
720
721	config = test_regmap_config;
722	config.cache_type = t->type;
723	config.writeable_reg = reg_5_false;
724
725	map = gen_regmap(&config, &data);
726	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
727	if (IS_ERR(map))
728		return;
729
730	/* Read all registers to fill the cache */
731	for (i = 0; i < BLOCK_TEST_SIZE; i++)
732		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
733
734	/* Change the value of all registers, readonly should fail */
735	get_random_bytes(&val, sizeof(val));
736	regcache_cache_only(map, true);
737	for (i = 0; i < BLOCK_TEST_SIZE; i++)
738		KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, i, val) == 0);
739	regcache_cache_only(map, false);
740
741	/* Resync */
742	for (i = 0; i < BLOCK_TEST_SIZE; i++)
743		data->written[i] = false;
744	KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
745
746	/* Did that match what we see on the device? */
747	for (i = 0; i < BLOCK_TEST_SIZE; i++)
748		KUNIT_EXPECT_EQ(test, i != 5, data->written[i]);
749
750	regmap_exit(map);
751}
752
753static void cache_sync_patch(struct kunit *test)
754{
755	struct regcache_types *t = (struct regcache_types *)test->param_value;
756	struct regmap *map;
757	struct regmap_config config;
758	struct regmap_ram_data *data;
759	struct reg_sequence patch[2];
760	unsigned int rval[BLOCK_TEST_SIZE], val;
761	int i;
762
763	/* We need defaults so readback works */
764	config = test_regmap_config;
765	config.cache_type = t->type;
766	config.num_reg_defaults = BLOCK_TEST_SIZE;
767
768	map = gen_regmap(&config, &data);
769	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
770	if (IS_ERR(map))
771		return;
772
773	/* Stash the original values */
774	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
775						  BLOCK_TEST_SIZE));
776
777	/* Patch a couple of values */
778	patch[0].reg = 2;
779	patch[0].def = rval[2] + 1;
780	patch[0].delay_us = 0;
781	patch[1].reg = 5;
782	patch[1].def = rval[5] + 1;
783	patch[1].delay_us = 0;
784	KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch,
785						       ARRAY_SIZE(patch)));
786
787	/* Sync the cache */
788	regcache_mark_dirty(map);
789	for (i = 0; i < BLOCK_TEST_SIZE; i++)
790		data->written[i] = false;
791	KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
792
793	/* The patch should be on the device but not in the cache */
794	for (i = 0; i < BLOCK_TEST_SIZE; i++) {
795		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
796		KUNIT_EXPECT_EQ(test, val, rval[i]);
797
798		switch (i) {
799		case 2:
800		case 5:
801			KUNIT_EXPECT_EQ(test, true, data->written[i]);
802			KUNIT_EXPECT_EQ(test, data->vals[i], rval[i] + 1);
803			break;
804		default:
805			KUNIT_EXPECT_EQ(test, false, data->written[i]);
806			KUNIT_EXPECT_EQ(test, data->vals[i], rval[i]);
807			break;
808		}
809	}
810
811	regmap_exit(map);
812}
813
814static void cache_drop(struct kunit *test)
815{
816	struct regcache_types *t = (struct regcache_types *)test->param_value;
817	struct regmap *map;
818	struct regmap_config config;
819	struct regmap_ram_data *data;
820	unsigned int rval[BLOCK_TEST_SIZE];
821	int i;
822
823	config = test_regmap_config;
824	config.cache_type = t->type;
825	config.num_reg_defaults = BLOCK_TEST_SIZE;
826
827	map = gen_regmap(&config, &data);
828	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
829	if (IS_ERR(map))
830		return;
831
832	/* Ensure the data is read from the cache */
833	for (i = 0; i < BLOCK_TEST_SIZE; i++)
834		data->read[i] = false;
835	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
836						  BLOCK_TEST_SIZE));
837	for (i = 0; i < BLOCK_TEST_SIZE; i++) {
838		KUNIT_EXPECT_FALSE(test, data->read[i]);
839		data->read[i] = false;
840	}
841	KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
842
843	/* Drop some registers */
844	KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 3, 5));
845
846	/* Reread and check only the dropped registers hit the device. */
847	KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
848						  BLOCK_TEST_SIZE));
849	for (i = 0; i < BLOCK_TEST_SIZE; i++)
850		KUNIT_EXPECT_EQ(test, data->read[i], i >= 3 && i <= 5);
851	KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
852
853	regmap_exit(map);
854}
855
856static void cache_present(struct kunit *test)
857{
858	struct regcache_types *t = (struct regcache_types *)test->param_value;
859	struct regmap *map;
860	struct regmap_config config;
861	struct regmap_ram_data *data;
862	unsigned int val;
863	int i;
864
865	config = test_regmap_config;
866	config.cache_type = t->type;
867
868	map = gen_regmap(&config, &data);
869	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
870	if (IS_ERR(map))
871		return;
872
873	for (i = 0; i < BLOCK_TEST_SIZE; i++)
874		data->read[i] = false;
875
876	/* No defaults so no registers cached. */
877	for (i = 0; i < BLOCK_TEST_SIZE; i++)
878		KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, i));
879
880	/* We didn't trigger any reads */
881	for (i = 0; i < BLOCK_TEST_SIZE; i++)
882		KUNIT_ASSERT_FALSE(test, data->read[i]);
883
884	/* Fill the cache */
885	for (i = 0; i < BLOCK_TEST_SIZE; i++)
886		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
887
888	/* Now everything should be cached */
889	for (i = 0; i < BLOCK_TEST_SIZE; i++)
890		KUNIT_ASSERT_TRUE(test, regcache_reg_cached(map, i));
891
892	regmap_exit(map);
893}
894
895struct raw_test_types {
896	const char *name;
897
898	enum regcache_type cache_type;
899	enum regmap_endian val_endian;
900};
901
902static void raw_to_desc(const struct raw_test_types *t, char *desc)
903{
904	strcpy(desc, t->name);
905}
906
907static const struct raw_test_types raw_types_list[] = {
908	{ "none-little",   REGCACHE_NONE,   REGMAP_ENDIAN_LITTLE },
909	{ "none-big",      REGCACHE_NONE,   REGMAP_ENDIAN_BIG },
910	{ "flat-little",   REGCACHE_FLAT,   REGMAP_ENDIAN_LITTLE },
911	{ "flat-big",      REGCACHE_FLAT,   REGMAP_ENDIAN_BIG },
912	{ "rbtree-little", REGCACHE_RBTREE, REGMAP_ENDIAN_LITTLE },
913	{ "rbtree-big",    REGCACHE_RBTREE, REGMAP_ENDIAN_BIG },
914	{ "maple-little",  REGCACHE_MAPLE,  REGMAP_ENDIAN_LITTLE },
915	{ "maple-big",     REGCACHE_MAPLE,  REGMAP_ENDIAN_BIG },
916};
917
918KUNIT_ARRAY_PARAM(raw_test_types, raw_types_list, raw_to_desc);
919
920static const struct raw_test_types raw_cache_types_list[] = {
921	{ "flat-little",   REGCACHE_FLAT,   REGMAP_ENDIAN_LITTLE },
922	{ "flat-big",      REGCACHE_FLAT,   REGMAP_ENDIAN_BIG },
923	{ "rbtree-little", REGCACHE_RBTREE, REGMAP_ENDIAN_LITTLE },
924	{ "rbtree-big",    REGCACHE_RBTREE, REGMAP_ENDIAN_BIG },
925	{ "maple-little",  REGCACHE_MAPLE,  REGMAP_ENDIAN_LITTLE },
926	{ "maple-big",     REGCACHE_MAPLE,  REGMAP_ENDIAN_BIG },
927};
928
929KUNIT_ARRAY_PARAM(raw_test_cache_types, raw_cache_types_list, raw_to_desc);
930
931static const struct regmap_config raw_regmap_config = {
932	.max_register = BLOCK_TEST_SIZE,
933
934	.reg_format_endian = REGMAP_ENDIAN_LITTLE,
935	.reg_bits = 16,
936	.val_bits = 16,
937};
938
939static struct regmap *gen_raw_regmap(struct regmap_config *config,
940				     struct raw_test_types *test_type,
941				     struct regmap_ram_data **data)
942{
943	u16 *buf;
944	struct regmap *ret;
945	size_t size = (config->max_register + 1) * config->reg_bits / 8;
946	int i;
947	struct reg_default *defaults;
948
949	config->cache_type = test_type->cache_type;
950	config->val_format_endian = test_type->val_endian;
951	config->disable_locking = config->cache_type == REGCACHE_RBTREE ||
952					config->cache_type == REGCACHE_MAPLE;
953
954	buf = kmalloc(size, GFP_KERNEL);
955	if (!buf)
956		return ERR_PTR(-ENOMEM);
957
958	get_random_bytes(buf, size);
959
960	*data = kzalloc(sizeof(**data), GFP_KERNEL);
961	if (!(*data))
962		return ERR_PTR(-ENOMEM);
963	(*data)->vals = (void *)buf;
964
965	config->num_reg_defaults = config->max_register + 1;
966	defaults = kcalloc(config->num_reg_defaults,
967			   sizeof(struct reg_default),
968			   GFP_KERNEL);
969	if (!defaults)
970		return ERR_PTR(-ENOMEM);
971	config->reg_defaults = defaults;
972
973	for (i = 0; i < config->num_reg_defaults; i++) {
974		defaults[i].reg = i;
975		switch (test_type->val_endian) {
976		case REGMAP_ENDIAN_LITTLE:
977			defaults[i].def = le16_to_cpu(buf[i]);
978			break;
979		case REGMAP_ENDIAN_BIG:
980			defaults[i].def = be16_to_cpu(buf[i]);
981			break;
982		default:
983			return ERR_PTR(-EINVAL);
984		}
985	}
986
987	/*
988	 * We use the defaults in the tests but they don't make sense
989	 * to the core if there's no cache.
990	 */
991	if (config->cache_type == REGCACHE_NONE)
992		config->num_reg_defaults = 0;
993
994	ret = regmap_init_raw_ram(config, *data);
995	if (IS_ERR(ret)) {
996		kfree(buf);
997		kfree(*data);
998	}
999
1000	return ret;
1001}
1002
1003static void raw_read_defaults_single(struct kunit *test)
1004{
1005	struct raw_test_types *t = (struct raw_test_types *)test->param_value;
1006	struct regmap *map;
1007	struct regmap_config config;
1008	struct regmap_ram_data *data;
1009	unsigned int rval;
1010	int i;
1011
1012	config = raw_regmap_config;
1013
1014	map = gen_raw_regmap(&config, t, &data);
1015	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1016	if (IS_ERR(map))
1017		return;
1018
1019	/* Check that we can read the defaults via the API */
1020	for (i = 0; i < config.max_register + 1; i++) {
1021		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
1022		KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
1023	}
1024
1025	regmap_exit(map);
1026}
1027
1028static void raw_read_defaults(struct kunit *test)
1029{
1030	struct raw_test_types *t = (struct raw_test_types *)test->param_value;
1031	struct regmap *map;
1032	struct regmap_config config;
1033	struct regmap_ram_data *data;
1034	u16 *rval;
1035	u16 def;
1036	size_t val_len;
1037	int i;
1038
1039	config = raw_regmap_config;
1040
1041	map = gen_raw_regmap(&config, t, &data);
1042	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1043	if (IS_ERR(map))
1044		return;
1045
1046	val_len = sizeof(*rval) * (config.max_register + 1);
1047	rval = kmalloc(val_len, GFP_KERNEL);
1048	KUNIT_ASSERT_TRUE(test, rval != NULL);
1049	if (!rval)
1050		return;
1051
1052	/* Check that we can read the defaults via the API */
1053	KUNIT_EXPECT_EQ(test, 0, regmap_raw_read(map, 0, rval, val_len));
1054	for (i = 0; i < config.max_register + 1; i++) {
1055		def = config.reg_defaults[i].def;
1056		if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1057			KUNIT_EXPECT_EQ(test, def, be16_to_cpu(rval[i]));
1058		} else {
1059			KUNIT_EXPECT_EQ(test, def, le16_to_cpu(rval[i]));
1060		}
1061	}
1062
1063	kfree(rval);
1064	regmap_exit(map);
1065}
1066
1067static void raw_write_read_single(struct kunit *test)
1068{
1069	struct raw_test_types *t = (struct raw_test_types *)test->param_value;
1070	struct regmap *map;
1071	struct regmap_config config;
1072	struct regmap_ram_data *data;
1073	u16 val;
1074	unsigned int rval;
1075
1076	config = raw_regmap_config;
1077
1078	map = gen_raw_regmap(&config, t, &data);
1079	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1080	if (IS_ERR(map))
1081		return;
1082
1083	get_random_bytes(&val, sizeof(val));
1084
1085	/* If we write a value to a register we can read it back */
1086	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
1087	KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
1088	KUNIT_EXPECT_EQ(test, val, rval);
1089
1090	regmap_exit(map);
1091}
1092
1093static void raw_write(struct kunit *test)
1094{
1095	struct raw_test_types *t = (struct raw_test_types *)test->param_value;
1096	struct regmap *map;
1097	struct regmap_config config;
1098	struct regmap_ram_data *data;
1099	u16 *hw_buf;
1100	u16 val[2];
1101	unsigned int rval;
1102	int i;
1103
1104	config = raw_regmap_config;
1105
1106	map = gen_raw_regmap(&config, t, &data);
1107	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1108	if (IS_ERR(map))
1109		return;
1110
1111	hw_buf = (u16 *)data->vals;
1112
1113	get_random_bytes(&val, sizeof(val));
1114
1115	/* Do a raw write */
1116	KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, sizeof(val)));
1117
1118	/* We should read back the new values, and defaults for the rest */
1119	for (i = 0; i < config.max_register + 1; i++) {
1120		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
1121
1122		switch (i) {
1123		case 2:
1124		case 3:
1125			if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1126				KUNIT_EXPECT_EQ(test, rval,
1127						be16_to_cpu(val[i % 2]));
1128			} else {
1129				KUNIT_EXPECT_EQ(test, rval,
1130						le16_to_cpu(val[i % 2]));
1131			}
1132			break;
1133		default:
1134			KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
1135			break;
1136		}
1137	}
1138
1139	/* The values should appear in the "hardware" */
1140	KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], val, sizeof(val));
1141
1142	regmap_exit(map);
1143}
1144
1145static void raw_sync(struct kunit *test)
1146{
1147	struct raw_test_types *t = (struct raw_test_types *)test->param_value;
1148	struct regmap *map;
1149	struct regmap_config config;
1150	struct regmap_ram_data *data;
1151	u16 val[3];
1152	u16 *hw_buf;
1153	unsigned int rval;
1154	int i;
1155
1156	config = raw_regmap_config;
1157
1158	map = gen_raw_regmap(&config, t, &data);
1159	KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1160	if (IS_ERR(map))
1161		return;
1162
1163	hw_buf = (u16 *)data->vals;
1164
1165	get_changed_bytes(&hw_buf[2], &val[0], sizeof(val));
1166
1167	/* Do a regular write and a raw write in cache only mode */
1168	regcache_cache_only(map, true);
1169	KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val,
1170						  sizeof(u16) * 2));
1171	KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 4, val[2]));
1172
1173	/* We should read back the new values, and defaults for the rest */
1174	for (i = 0; i < config.max_register + 1; i++) {
1175		KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
1176
1177		switch (i) {
1178		case 2:
1179		case 3:
1180			if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1181				KUNIT_EXPECT_EQ(test, rval,
1182						be16_to_cpu(val[i - 2]));
1183			} else {
1184				KUNIT_EXPECT_EQ(test, rval,
1185						le16_to_cpu(val[i - 2]));
1186			}
1187			break;
1188		case 4:
1189			KUNIT_EXPECT_EQ(test, rval, val[i - 2]);
1190			break;
1191		default:
1192			KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
1193			break;
1194		}
1195	}
1196
1197	/*
1198	 * The value written via _write() was translated by the core,
1199	 * translate the original copy for comparison purposes.
1200	 */
1201	if (config.val_format_endian == REGMAP_ENDIAN_BIG)
1202		val[2] = cpu_to_be16(val[2]);
1203	else
1204		val[2] = cpu_to_le16(val[2]);
1205
1206	/* The values should not appear in the "hardware" */
1207	KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], &val[0], sizeof(val));
1208
1209	for (i = 0; i < config.max_register + 1; i++)
1210		data->written[i] = false;
1211
1212	/* Do the sync */
1213	regcache_cache_only(map, false);
1214	regcache_mark_dirty(map);
1215	KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1216
1217	/* The values should now appear in the "hardware" */
1218	KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], &val[0], sizeof(val));
1219
1220	regmap_exit(map);
1221}
1222
1223static struct kunit_case regmap_test_cases[] = {
1224	KUNIT_CASE_PARAM(basic_read_write, regcache_types_gen_params),
1225	KUNIT_CASE_PARAM(bulk_write, regcache_types_gen_params),
1226	KUNIT_CASE_PARAM(bulk_read, regcache_types_gen_params),
1227	KUNIT_CASE_PARAM(write_readonly, regcache_types_gen_params),
1228	KUNIT_CASE_PARAM(read_writeonly, regcache_types_gen_params),
1229	KUNIT_CASE_PARAM(reg_defaults, regcache_types_gen_params),
1230	KUNIT_CASE_PARAM(reg_defaults_read_dev, regcache_types_gen_params),
1231	KUNIT_CASE_PARAM(register_patch, regcache_types_gen_params),
1232	KUNIT_CASE_PARAM(stride, regcache_types_gen_params),
1233	KUNIT_CASE_PARAM(basic_ranges, regcache_types_gen_params),
1234	KUNIT_CASE_PARAM(stress_insert, regcache_types_gen_params),
1235	KUNIT_CASE_PARAM(cache_bypass, real_cache_types_gen_params),
1236	KUNIT_CASE_PARAM(cache_sync, real_cache_types_gen_params),
1237	KUNIT_CASE_PARAM(cache_sync_defaults, real_cache_types_gen_params),
1238	KUNIT_CASE_PARAM(cache_sync_readonly, real_cache_types_gen_params),
1239	KUNIT_CASE_PARAM(cache_sync_patch, real_cache_types_gen_params),
1240	KUNIT_CASE_PARAM(cache_drop, sparse_cache_types_gen_params),
1241	KUNIT_CASE_PARAM(cache_present, sparse_cache_types_gen_params),
1242
1243	KUNIT_CASE_PARAM(raw_read_defaults_single, raw_test_types_gen_params),
1244	KUNIT_CASE_PARAM(raw_read_defaults, raw_test_types_gen_params),
1245	KUNIT_CASE_PARAM(raw_write_read_single, raw_test_types_gen_params),
1246	KUNIT_CASE_PARAM(raw_write, raw_test_types_gen_params),
1247	KUNIT_CASE_PARAM(raw_sync, raw_test_cache_types_gen_params),
1248	{}
1249};
1250
1251static struct kunit_suite regmap_test_suite = {
1252	.name = "regmap",
1253	.test_cases = regmap_test_cases,
1254};
1255kunit_test_suite(regmap_test_suite);
1256
1257MODULE_LICENSE("GPL v2");
1258