xref: /kernel/linux/linux-5.10/drivers/md/dm-flakey.c (revision 8c2ecf20)
1/*
2 * Copyright (C) 2003 Sistina Software (UK) Limited.
3 * Copyright (C) 2004, 2010-2011 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include <linux/device-mapper.h>
9
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/blkdev.h>
13#include <linux/bio.h>
14#include <linux/slab.h>
15
16#define DM_MSG_PREFIX "flakey"
17
18#define all_corrupt_bio_flags_match(bio, fc)	\
19	(((bio)->bi_opf & (fc)->corrupt_bio_flags) == (fc)->corrupt_bio_flags)
20
21/*
22 * Flakey: Used for testing only, simulates intermittent,
23 * catastrophic device failure.
24 */
25struct flakey_c {
26	struct dm_dev *dev;
27	unsigned long start_time;
28	sector_t start;
29	unsigned up_interval;
30	unsigned down_interval;
31	unsigned long flags;
32	unsigned corrupt_bio_byte;
33	unsigned corrupt_bio_rw;
34	unsigned corrupt_bio_value;
35	unsigned corrupt_bio_flags;
36};
37
38enum feature_flag_bits {
39	DROP_WRITES,
40	ERROR_WRITES
41};
42
43struct per_bio_data {
44	bool bio_submitted;
45};
46
47static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
48			  struct dm_target *ti)
49{
50	int r;
51	unsigned argc;
52	const char *arg_name;
53
54	static const struct dm_arg _args[] = {
55		{0, 6, "Invalid number of feature args"},
56		{1, UINT_MAX, "Invalid corrupt bio byte"},
57		{0, 255, "Invalid corrupt value to write into bio byte (0-255)"},
58		{0, UINT_MAX, "Invalid corrupt bio flags mask"},
59	};
60
61	/* No feature arguments supplied. */
62	if (!as->argc)
63		return 0;
64
65	r = dm_read_arg_group(_args, as, &argc, &ti->error);
66	if (r)
67		return r;
68
69	while (argc) {
70		arg_name = dm_shift_arg(as);
71		argc--;
72
73		if (!arg_name) {
74			ti->error = "Insufficient feature arguments";
75			return -EINVAL;
76		}
77
78		/*
79		 * drop_writes
80		 */
81		if (!strcasecmp(arg_name, "drop_writes")) {
82			if (test_and_set_bit(DROP_WRITES, &fc->flags)) {
83				ti->error = "Feature drop_writes duplicated";
84				return -EINVAL;
85			} else if (test_bit(ERROR_WRITES, &fc->flags)) {
86				ti->error = "Feature drop_writes conflicts with feature error_writes";
87				return -EINVAL;
88			}
89
90			continue;
91		}
92
93		/*
94		 * error_writes
95		 */
96		if (!strcasecmp(arg_name, "error_writes")) {
97			if (test_and_set_bit(ERROR_WRITES, &fc->flags)) {
98				ti->error = "Feature error_writes duplicated";
99				return -EINVAL;
100
101			} else if (test_bit(DROP_WRITES, &fc->flags)) {
102				ti->error = "Feature error_writes conflicts with feature drop_writes";
103				return -EINVAL;
104			}
105
106			continue;
107		}
108
109		/*
110		 * corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>
111		 */
112		if (!strcasecmp(arg_name, "corrupt_bio_byte")) {
113			if (!argc) {
114				ti->error = "Feature corrupt_bio_byte requires parameters";
115				return -EINVAL;
116			}
117
118			r = dm_read_arg(_args + 1, as, &fc->corrupt_bio_byte, &ti->error);
119			if (r)
120				return r;
121			argc--;
122
123			/*
124			 * Direction r or w?
125			 */
126			arg_name = dm_shift_arg(as);
127			if (arg_name && !strcasecmp(arg_name, "w"))
128				fc->corrupt_bio_rw = WRITE;
129			else if (arg_name && !strcasecmp(arg_name, "r"))
130				fc->corrupt_bio_rw = READ;
131			else {
132				ti->error = "Invalid corrupt bio direction (r or w)";
133				return -EINVAL;
134			}
135			argc--;
136
137			/*
138			 * Value of byte (0-255) to write in place of correct one.
139			 */
140			r = dm_read_arg(_args + 2, as, &fc->corrupt_bio_value, &ti->error);
141			if (r)
142				return r;
143			argc--;
144
145			/*
146			 * Only corrupt bios with these flags set.
147			 */
148			r = dm_read_arg(_args + 3, as, &fc->corrupt_bio_flags, &ti->error);
149			if (r)
150				return r;
151			argc--;
152
153			continue;
154		}
155
156		ti->error = "Unrecognised flakey feature requested";
157		return -EINVAL;
158	}
159
160	if (test_bit(DROP_WRITES, &fc->flags) && (fc->corrupt_bio_rw == WRITE)) {
161		ti->error = "drop_writes is incompatible with corrupt_bio_byte with the WRITE flag set";
162		return -EINVAL;
163
164	} else if (test_bit(ERROR_WRITES, &fc->flags) && (fc->corrupt_bio_rw == WRITE)) {
165		ti->error = "error_writes is incompatible with corrupt_bio_byte with the WRITE flag set";
166		return -EINVAL;
167	}
168
169	return 0;
170}
171
172/*
173 * Construct a flakey mapping:
174 * <dev_path> <offset> <up interval> <down interval> [<#feature args> [<arg>]*]
175 *
176 *   Feature args:
177 *     [drop_writes]
178 *     [corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>]
179 *
180 *   Nth_byte starts from 1 for the first byte.
181 *   Direction is r for READ or w for WRITE.
182 *   bio_flags is ignored if 0.
183 */
184static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
185{
186	static const struct dm_arg _args[] = {
187		{0, UINT_MAX, "Invalid up interval"},
188		{0, UINT_MAX, "Invalid down interval"},
189	};
190
191	int r;
192	struct flakey_c *fc;
193	unsigned long long tmpll;
194	struct dm_arg_set as;
195	const char *devname;
196	char dummy;
197
198	as.argc = argc;
199	as.argv = argv;
200
201	if (argc < 4) {
202		ti->error = "Invalid argument count";
203		return -EINVAL;
204	}
205
206	fc = kzalloc(sizeof(*fc), GFP_KERNEL);
207	if (!fc) {
208		ti->error = "Cannot allocate context";
209		return -ENOMEM;
210	}
211	fc->start_time = jiffies;
212
213	devname = dm_shift_arg(&as);
214
215	r = -EINVAL;
216	if (sscanf(dm_shift_arg(&as), "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) {
217		ti->error = "Invalid device sector";
218		goto bad;
219	}
220	fc->start = tmpll;
221
222	r = dm_read_arg(_args, &as, &fc->up_interval, &ti->error);
223	if (r)
224		goto bad;
225
226	r = dm_read_arg(_args, &as, &fc->down_interval, &ti->error);
227	if (r)
228		goto bad;
229
230	if (!(fc->up_interval + fc->down_interval)) {
231		ti->error = "Total (up + down) interval is zero";
232		r = -EINVAL;
233		goto bad;
234	}
235
236	if (fc->up_interval + fc->down_interval < fc->up_interval) {
237		ti->error = "Interval overflow";
238		r = -EINVAL;
239		goto bad;
240	}
241
242	r = parse_features(&as, fc, ti);
243	if (r)
244		goto bad;
245
246	r = dm_get_device(ti, devname, dm_table_get_mode(ti->table), &fc->dev);
247	if (r) {
248		ti->error = "Device lookup failed";
249		goto bad;
250	}
251
252	ti->num_flush_bios = 1;
253	ti->num_discard_bios = 1;
254	ti->per_io_data_size = sizeof(struct per_bio_data);
255	ti->private = fc;
256	return 0;
257
258bad:
259	kfree(fc);
260	return r;
261}
262
263static void flakey_dtr(struct dm_target *ti)
264{
265	struct flakey_c *fc = ti->private;
266
267	dm_put_device(ti, fc->dev);
268	kfree(fc);
269}
270
271static sector_t flakey_map_sector(struct dm_target *ti, sector_t bi_sector)
272{
273	struct flakey_c *fc = ti->private;
274
275	return fc->start + dm_target_offset(ti, bi_sector);
276}
277
278static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
279{
280	struct flakey_c *fc = ti->private;
281
282	bio_set_dev(bio, fc->dev->bdev);
283	if (bio_sectors(bio) || op_is_zone_mgmt(bio_op(bio)))
284		bio->bi_iter.bi_sector =
285			flakey_map_sector(ti, bio->bi_iter.bi_sector);
286}
287
288static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
289{
290	unsigned int corrupt_bio_byte = fc->corrupt_bio_byte - 1;
291
292	struct bvec_iter iter;
293	struct bio_vec bvec;
294
295	if (!bio_has_data(bio))
296		return;
297
298	/*
299	 * Overwrite the Nth byte of the bio's data, on whichever page
300	 * it falls.
301	 */
302	bio_for_each_segment(bvec, bio, iter) {
303		if (bio_iter_len(bio, iter) > corrupt_bio_byte) {
304			char *segment;
305			struct page *page = bio_iter_page(bio, iter);
306			if (unlikely(page == ZERO_PAGE(0)))
307				break;
308			segment = (page_address(page) + bio_iter_offset(bio, iter));
309			segment[corrupt_bio_byte] = fc->corrupt_bio_value;
310			DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "
311				"(rw=%c bi_opf=%u bi_sector=%llu size=%u)\n",
312				bio, fc->corrupt_bio_value, fc->corrupt_bio_byte,
313				(bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_opf,
314				(unsigned long long)bio->bi_iter.bi_sector, bio->bi_iter.bi_size);
315			break;
316		}
317		corrupt_bio_byte -= bio_iter_len(bio, iter);
318	}
319}
320
321static int flakey_map(struct dm_target *ti, struct bio *bio)
322{
323	struct flakey_c *fc = ti->private;
324	unsigned elapsed;
325	struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
326	pb->bio_submitted = false;
327
328	if (op_is_zone_mgmt(bio_op(bio)))
329		goto map_bio;
330
331	/* Are we alive ? */
332	elapsed = (jiffies - fc->start_time) / HZ;
333	if (elapsed % (fc->up_interval + fc->down_interval) >= fc->up_interval) {
334		/*
335		 * Flag this bio as submitted while down.
336		 */
337		pb->bio_submitted = true;
338
339		/*
340		 * Error reads if neither corrupt_bio_byte or drop_writes or error_writes are set.
341		 * Otherwise, flakey_end_io() will decide if the reads should be modified.
342		 */
343		if (bio_data_dir(bio) == READ) {
344			if (!fc->corrupt_bio_byte && !test_bit(DROP_WRITES, &fc->flags) &&
345			    !test_bit(ERROR_WRITES, &fc->flags))
346				return DM_MAPIO_KILL;
347			goto map_bio;
348		}
349
350		/*
351		 * Drop or error writes?
352		 */
353		if (test_bit(DROP_WRITES, &fc->flags)) {
354			bio_endio(bio);
355			return DM_MAPIO_SUBMITTED;
356		}
357		else if (test_bit(ERROR_WRITES, &fc->flags)) {
358			bio_io_error(bio);
359			return DM_MAPIO_SUBMITTED;
360		}
361
362		/*
363		 * Corrupt matching writes.
364		 */
365		if (fc->corrupt_bio_byte) {
366			if (fc->corrupt_bio_rw == WRITE) {
367				if (all_corrupt_bio_flags_match(bio, fc))
368					corrupt_bio_data(bio, fc);
369			}
370			goto map_bio;
371		}
372
373		/*
374		 * By default, error all I/O.
375		 */
376		return DM_MAPIO_KILL;
377	}
378
379map_bio:
380	flakey_map_bio(ti, bio);
381
382	return DM_MAPIO_REMAPPED;
383}
384
385static int flakey_end_io(struct dm_target *ti, struct bio *bio,
386			 blk_status_t *error)
387{
388	struct flakey_c *fc = ti->private;
389	struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
390
391	if (op_is_zone_mgmt(bio_op(bio)))
392		return DM_ENDIO_DONE;
393
394	if (!*error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
395		if (fc->corrupt_bio_byte) {
396			if ((fc->corrupt_bio_rw == READ) &&
397			    all_corrupt_bio_flags_match(bio, fc)) {
398				/*
399				 * Corrupt successful matching READs while in down state.
400				 */
401				corrupt_bio_data(bio, fc);
402			}
403		} else if (!test_bit(DROP_WRITES, &fc->flags) &&
404			   !test_bit(ERROR_WRITES, &fc->flags)) {
405			/*
406			 * Error read during the down_interval if drop_writes
407			 * and error_writes were not configured.
408			 */
409			*error = BLK_STS_IOERR;
410		}
411	}
412
413	return DM_ENDIO_DONE;
414}
415
416static void flakey_status(struct dm_target *ti, status_type_t type,
417			  unsigned status_flags, char *result, unsigned maxlen)
418{
419	unsigned sz = 0;
420	struct flakey_c *fc = ti->private;
421	unsigned drop_writes, error_writes;
422
423	switch (type) {
424	case STATUSTYPE_INFO:
425		result[0] = '\0';
426		break;
427
428	case STATUSTYPE_TABLE:
429		DMEMIT("%s %llu %u %u ", fc->dev->name,
430		       (unsigned long long)fc->start, fc->up_interval,
431		       fc->down_interval);
432
433		drop_writes = test_bit(DROP_WRITES, &fc->flags);
434		error_writes = test_bit(ERROR_WRITES, &fc->flags);
435		DMEMIT("%u ", drop_writes + error_writes + (fc->corrupt_bio_byte > 0) * 5);
436
437		if (drop_writes)
438			DMEMIT("drop_writes ");
439		else if (error_writes)
440			DMEMIT("error_writes ");
441
442		if (fc->corrupt_bio_byte)
443			DMEMIT("corrupt_bio_byte %u %c %u %u ",
444			       fc->corrupt_bio_byte,
445			       (fc->corrupt_bio_rw == WRITE) ? 'w' : 'r',
446			       fc->corrupt_bio_value, fc->corrupt_bio_flags);
447
448		break;
449	}
450}
451
452static int flakey_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
453{
454	struct flakey_c *fc = ti->private;
455
456	*bdev = fc->dev->bdev;
457
458	/*
459	 * Only pass ioctls through if the device sizes match exactly.
460	 */
461	if (fc->start ||
462	    ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
463		return 1;
464	return 0;
465}
466
467#ifdef CONFIG_BLK_DEV_ZONED
468static int flakey_report_zones(struct dm_target *ti,
469		struct dm_report_zones_args *args, unsigned int nr_zones)
470{
471	struct flakey_c *fc = ti->private;
472	sector_t sector = flakey_map_sector(ti, args->next_sector);
473
474	args->start = fc->start;
475	return blkdev_report_zones(fc->dev->bdev, sector, nr_zones,
476				   dm_report_zones_cb, args);
477}
478#endif
479
480static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data)
481{
482	struct flakey_c *fc = ti->private;
483
484	return fn(ti, fc->dev, fc->start, ti->len, data);
485}
486
487static struct target_type flakey_target = {
488	.name   = "flakey",
489	.version = {1, 5, 0},
490#ifdef CONFIG_BLK_DEV_ZONED
491	.features = DM_TARGET_ZONED_HM,
492	.report_zones = flakey_report_zones,
493#endif
494	.module = THIS_MODULE,
495	.ctr    = flakey_ctr,
496	.dtr    = flakey_dtr,
497	.map    = flakey_map,
498	.end_io = flakey_end_io,
499	.status = flakey_status,
500	.prepare_ioctl = flakey_prepare_ioctl,
501	.iterate_devices = flakey_iterate_devices,
502};
503
504static int __init dm_flakey_init(void)
505{
506	int r = dm_register_target(&flakey_target);
507
508	if (r < 0)
509		DMERR("register failed %d", r);
510
511	return r;
512}
513
514static void __exit dm_flakey_exit(void)
515{
516	dm_unregister_target(&flakey_target);
517}
518
519/* Module hooks */
520module_init(dm_flakey_init);
521module_exit(dm_flakey_exit);
522
523MODULE_DESCRIPTION(DM_NAME " flakey target");
524MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
525MODULE_LICENSE("GPL");
526