xref: /kernel/linux/linux-6.6/drivers/md/md-faulty.c (revision 62306a36)
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * faulty.c : Multiple Devices driver for Linux
4 *
5 * Copyright (C) 2004 Neil Brown
6 *
7 * fautly-device-simulator personality for md
8 */
9
10
11/*
12 * The "faulty" personality causes some requests to fail.
13 *
14 * Possible failure modes are:
15 *   reads fail "randomly" but succeed on retry
16 *   writes fail "randomly" but succeed on retry
17 *   reads for some address fail and then persist until a write
18 *   reads for some address fail and then persist irrespective of write
19 *   writes for some address fail and persist
20 *   all writes fail
21 *
22 * Different modes can be active at a time, but only
23 * one can be set at array creation.  Others can be added later.
24 * A mode can be one-shot or recurrent with the recurrence being
25 * once in every N requests.
26 * The bottom 5 bits of the "layout" indicate the mode.  The
27 * remainder indicate a period, or 0 for one-shot.
28 *
29 * There is an implementation limit on the number of concurrently
30 * persisting-faulty blocks. When a new fault is requested that would
31 * exceed the limit, it is ignored.
32 * All current faults can be clear using a layout of "0".
33 *
34 * Requests are always sent to the device.  If they are to fail,
35 * we clone the bio and insert a new b_end_io into the chain.
36 */
37
38#define	WriteTransient	0
39#define	ReadTransient	1
40#define	WritePersistent	2
41#define	ReadPersistent	3
42#define	WriteAll	4 /* doesn't go to device */
43#define	ReadFixable	5
44#define	Modes	6
45
46#define	ClearErrors	31
47#define	ClearFaults	30
48
49#define AllPersist	100 /* internal use only */
50#define	NoPersist	101
51
52#define	ModeMask	0x1f
53#define	ModeShift	5
54
55#define MaxFault	50
56#include <linux/blkdev.h>
57#include <linux/module.h>
58#include <linux/raid/md_u.h>
59#include <linux/slab.h>
60#include "md.h"
61#include <linux/seq_file.h>
62
63
64static void faulty_fail(struct bio *bio)
65{
66	struct bio *b = bio->bi_private;
67
68	b->bi_iter.bi_size = bio->bi_iter.bi_size;
69	b->bi_iter.bi_sector = bio->bi_iter.bi_sector;
70
71	bio_put(bio);
72
73	bio_io_error(b);
74}
75
76struct faulty_conf {
77	int period[Modes];
78	atomic_t counters[Modes];
79	sector_t faults[MaxFault];
80	int	modes[MaxFault];
81	int nfaults;
82	struct md_rdev *rdev;
83};
84
85static int check_mode(struct faulty_conf *conf, int mode)
86{
87	if (conf->period[mode] == 0 &&
88	    atomic_read(&conf->counters[mode]) <= 0)
89		return 0; /* no failure, no decrement */
90
91
92	if (atomic_dec_and_test(&conf->counters[mode])) {
93		if (conf->period[mode])
94			atomic_set(&conf->counters[mode], conf->period[mode]);
95		return 1;
96	}
97	return 0;
98}
99
100static int check_sector(struct faulty_conf *conf, sector_t start, sector_t end, int dir)
101{
102	/* If we find a ReadFixable sector, we fix it ... */
103	int i;
104	for (i=0; i<conf->nfaults; i++)
105		if (conf->faults[i] >= start &&
106		    conf->faults[i] < end) {
107			/* found it ... */
108			switch (conf->modes[i] * 2 + dir) {
109			case WritePersistent*2+WRITE: return 1;
110			case ReadPersistent*2+READ: return 1;
111			case ReadFixable*2+READ: return 1;
112			case ReadFixable*2+WRITE:
113				conf->modes[i] = NoPersist;
114				return 0;
115			case AllPersist*2+READ:
116			case AllPersist*2+WRITE: return 1;
117			default:
118				return 0;
119			}
120		}
121	return 0;
122}
123
124static void add_sector(struct faulty_conf *conf, sector_t start, int mode)
125{
126	int i;
127	int n = conf->nfaults;
128	for (i=0; i<conf->nfaults; i++)
129		if (conf->faults[i] == start) {
130			switch(mode) {
131			case NoPersist: conf->modes[i] = mode; return;
132			case WritePersistent:
133				if (conf->modes[i] == ReadPersistent ||
134				    conf->modes[i] == ReadFixable)
135					conf->modes[i] = AllPersist;
136				else
137					conf->modes[i] = WritePersistent;
138				return;
139			case ReadPersistent:
140				if (conf->modes[i] == WritePersistent)
141					conf->modes[i] = AllPersist;
142				else
143					conf->modes[i] = ReadPersistent;
144				return;
145			case ReadFixable:
146				if (conf->modes[i] == WritePersistent ||
147				    conf->modes[i] == ReadPersistent)
148					conf->modes[i] = AllPersist;
149				else
150					conf->modes[i] = ReadFixable;
151				return;
152			}
153		} else if (conf->modes[i] == NoPersist)
154			n = i;
155
156	if (n >= MaxFault)
157		return;
158	conf->faults[n] = start;
159	conf->modes[n] = mode;
160	if (conf->nfaults == n)
161		conf->nfaults = n+1;
162}
163
164static bool faulty_make_request(struct mddev *mddev, struct bio *bio)
165{
166	struct faulty_conf *conf = mddev->private;
167	int failit = 0;
168
169	if (bio_data_dir(bio) == WRITE) {
170		/* write request */
171		if (atomic_read(&conf->counters[WriteAll])) {
172			/* special case - don't decrement, don't submit_bio_noacct,
173			 * just fail immediately
174			 */
175			bio_io_error(bio);
176			return true;
177		}
178
179		if (check_sector(conf, bio->bi_iter.bi_sector,
180				 bio_end_sector(bio), WRITE))
181			failit = 1;
182		if (check_mode(conf, WritePersistent)) {
183			add_sector(conf, bio->bi_iter.bi_sector,
184				   WritePersistent);
185			failit = 1;
186		}
187		if (check_mode(conf, WriteTransient))
188			failit = 1;
189	} else {
190		/* read request */
191		if (check_sector(conf, bio->bi_iter.bi_sector,
192				 bio_end_sector(bio), READ))
193			failit = 1;
194		if (check_mode(conf, ReadTransient))
195			failit = 1;
196		if (check_mode(conf, ReadPersistent)) {
197			add_sector(conf, bio->bi_iter.bi_sector,
198				   ReadPersistent);
199			failit = 1;
200		}
201		if (check_mode(conf, ReadFixable)) {
202			add_sector(conf, bio->bi_iter.bi_sector,
203				   ReadFixable);
204			failit = 1;
205		}
206	}
207
208	md_account_bio(mddev, &bio);
209	if (failit) {
210		struct bio *b = bio_alloc_clone(conf->rdev->bdev, bio, GFP_NOIO,
211						&mddev->bio_set);
212
213		b->bi_private = bio;
214		b->bi_end_io = faulty_fail;
215		bio = b;
216	} else
217		bio_set_dev(bio, conf->rdev->bdev);
218
219	submit_bio_noacct(bio);
220	return true;
221}
222
223static void faulty_status(struct seq_file *seq, struct mddev *mddev)
224{
225	struct faulty_conf *conf = mddev->private;
226	int n;
227
228	if ((n=atomic_read(&conf->counters[WriteTransient])) != 0)
229		seq_printf(seq, " WriteTransient=%d(%d)",
230			   n, conf->period[WriteTransient]);
231
232	if ((n=atomic_read(&conf->counters[ReadTransient])) != 0)
233		seq_printf(seq, " ReadTransient=%d(%d)",
234			   n, conf->period[ReadTransient]);
235
236	if ((n=atomic_read(&conf->counters[WritePersistent])) != 0)
237		seq_printf(seq, " WritePersistent=%d(%d)",
238			   n, conf->period[WritePersistent]);
239
240	if ((n=atomic_read(&conf->counters[ReadPersistent])) != 0)
241		seq_printf(seq, " ReadPersistent=%d(%d)",
242			   n, conf->period[ReadPersistent]);
243
244
245	if ((n=atomic_read(&conf->counters[ReadFixable])) != 0)
246		seq_printf(seq, " ReadFixable=%d(%d)",
247			   n, conf->period[ReadFixable]);
248
249	if ((n=atomic_read(&conf->counters[WriteAll])) != 0)
250		seq_printf(seq, " WriteAll");
251
252	seq_printf(seq, " nfaults=%d", conf->nfaults);
253}
254
255
256static int faulty_reshape(struct mddev *mddev)
257{
258	int mode = mddev->new_layout & ModeMask;
259	int count = mddev->new_layout >> ModeShift;
260	struct faulty_conf *conf = mddev->private;
261
262	if (mddev->new_layout < 0)
263		return 0;
264
265	/* new layout */
266	if (mode == ClearFaults)
267		conf->nfaults = 0;
268	else if (mode == ClearErrors) {
269		int i;
270		for (i=0 ; i < Modes ; i++) {
271			conf->period[i] = 0;
272			atomic_set(&conf->counters[i], 0);
273		}
274	} else if (mode < Modes) {
275		conf->period[mode] = count;
276		if (!count) count++;
277		atomic_set(&conf->counters[mode], count);
278	} else
279		return -EINVAL;
280	mddev->new_layout = -1;
281	mddev->layout = -1; /* makes sure further changes come through */
282	return 0;
283}
284
285static sector_t faulty_size(struct mddev *mddev, sector_t sectors, int raid_disks)
286{
287	WARN_ONCE(raid_disks,
288		  "%s does not support generic reshape\n", __func__);
289
290	if (sectors == 0)
291		return mddev->dev_sectors;
292
293	return sectors;
294}
295
296static int faulty_run(struct mddev *mddev)
297{
298	struct md_rdev *rdev;
299	int i;
300	struct faulty_conf *conf;
301
302	if (md_check_no_bitmap(mddev))
303		return -EINVAL;
304
305	conf = kmalloc(sizeof(*conf), GFP_KERNEL);
306	if (!conf)
307		return -ENOMEM;
308
309	for (i=0; i<Modes; i++) {
310		atomic_set(&conf->counters[i], 0);
311		conf->period[i] = 0;
312	}
313	conf->nfaults = 0;
314
315	rdev_for_each(rdev, mddev) {
316		conf->rdev = rdev;
317		disk_stack_limits(mddev->gendisk, rdev->bdev,
318				  rdev->data_offset << 9);
319	}
320
321	md_set_array_sectors(mddev, faulty_size(mddev, 0, 0));
322	mddev->private = conf;
323
324	faulty_reshape(mddev);
325
326	return 0;
327}
328
329static void faulty_free(struct mddev *mddev, void *priv)
330{
331	struct faulty_conf *conf = priv;
332
333	kfree(conf);
334}
335
336static struct md_personality faulty_personality =
337{
338	.name		= "faulty",
339	.level		= LEVEL_FAULTY,
340	.owner		= THIS_MODULE,
341	.make_request	= faulty_make_request,
342	.run		= faulty_run,
343	.free		= faulty_free,
344	.status		= faulty_status,
345	.check_reshape	= faulty_reshape,
346	.size		= faulty_size,
347};
348
349static int __init raid_init(void)
350{
351	return register_md_personality(&faulty_personality);
352}
353
354static void raid_exit(void)
355{
356	unregister_md_personality(&faulty_personality);
357}
358
359module_init(raid_init);
360module_exit(raid_exit);
361MODULE_LICENSE("GPL");
362MODULE_DESCRIPTION("Fault injection personality for MD (deprecated)");
363MODULE_ALIAS("md-personality-10"); /* faulty */
364MODULE_ALIAS("md-faulty");
365MODULE_ALIAS("md-level--5");
366