xref: /kernel/linux/linux-5.10/drivers/md/raid0.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3   raid0.c : Multiple Devices driver for Linux
4	     Copyright (C) 1994-96 Marc ZYNGIER
5	     <zyngier@ufr-info-p7.ibp.fr> or
6	     <maz@gloups.fdn.fr>
7	     Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
8
9   RAID-0 management functions.
10
11*/
12
13#include <linux/blkdev.h>
14#include <linux/seq_file.h>
15#include <linux/module.h>
16#include <linux/slab.h>
17#include <trace/events/block.h>
18#include "md.h"
19#include "raid0.h"
20#include "raid5.h"
21
22static int default_layout = 0;
23module_param(default_layout, int, 0644);
24
25#define UNSUPPORTED_MDDEV_FLAGS		\
26	((1L << MD_HAS_JOURNAL) |	\
27	 (1L << MD_JOURNAL_CLEAN) |	\
28	 (1L << MD_FAILFAST_SUPPORTED) |\
29	 (1L << MD_HAS_PPL) |		\
30	 (1L << MD_HAS_MULTIPLE_PPLS))
31
32/*
33 * inform the user of the raid configuration
34*/
35static void dump_zones(struct mddev *mddev)
36{
37	int j, k;
38	sector_t zone_size = 0;
39	sector_t zone_start = 0;
40	char b[BDEVNAME_SIZE];
41	struct r0conf *conf = mddev->private;
42	int raid_disks = conf->strip_zone[0].nb_dev;
43	pr_debug("md: RAID0 configuration for %s - %d zone%s\n",
44		 mdname(mddev),
45		 conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
46	for (j = 0; j < conf->nr_strip_zones; j++) {
47		char line[200];
48		int len = 0;
49
50		for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
51			len += scnprintf(line+len, 200-len, "%s%s", k?"/":"",
52					bdevname(conf->devlist[j*raid_disks
53							       + k]->bdev, b));
54		pr_debug("md: zone%d=[%s]\n", j, line);
55
56		zone_size  = conf->strip_zone[j].zone_end - zone_start;
57		pr_debug("      zone-offset=%10lluKB, device-offset=%10lluKB, size=%10lluKB\n",
58			(unsigned long long)zone_start>>1,
59			(unsigned long long)conf->strip_zone[j].dev_start>>1,
60			(unsigned long long)zone_size>>1);
61		zone_start = conf->strip_zone[j].zone_end;
62	}
63}
64
65static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
66{
67	int i, c, err;
68	sector_t curr_zone_end, sectors;
69	struct md_rdev *smallest, *rdev1, *rdev2, *rdev, **dev;
70	struct strip_zone *zone;
71	int cnt;
72	char b[BDEVNAME_SIZE];
73	char b2[BDEVNAME_SIZE];
74	struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
75	unsigned blksize = 512;
76
77	*private_conf = ERR_PTR(-ENOMEM);
78	if (!conf)
79		return -ENOMEM;
80	rdev_for_each(rdev1, mddev) {
81		pr_debug("md/raid0:%s: looking at %s\n",
82			 mdname(mddev),
83			 bdevname(rdev1->bdev, b));
84		c = 0;
85
86		/* round size to chunk_size */
87		sectors = rdev1->sectors;
88		sector_div(sectors, mddev->chunk_sectors);
89		rdev1->sectors = sectors * mddev->chunk_sectors;
90
91		blksize = max(blksize, queue_logical_block_size(
92				      rdev1->bdev->bd_disk->queue));
93
94		rdev_for_each(rdev2, mddev) {
95			pr_debug("md/raid0:%s:   comparing %s(%llu)"
96				 " with %s(%llu)\n",
97				 mdname(mddev),
98				 bdevname(rdev1->bdev,b),
99				 (unsigned long long)rdev1->sectors,
100				 bdevname(rdev2->bdev,b2),
101				 (unsigned long long)rdev2->sectors);
102			if (rdev2 == rdev1) {
103				pr_debug("md/raid0:%s:   END\n",
104					 mdname(mddev));
105				break;
106			}
107			if (rdev2->sectors == rdev1->sectors) {
108				/*
109				 * Not unique, don't count it as a new
110				 * group
111				 */
112				pr_debug("md/raid0:%s:   EQUAL\n",
113					 mdname(mddev));
114				c = 1;
115				break;
116			}
117			pr_debug("md/raid0:%s:   NOT EQUAL\n",
118				 mdname(mddev));
119		}
120		if (!c) {
121			pr_debug("md/raid0:%s:   ==> UNIQUE\n",
122				 mdname(mddev));
123			conf->nr_strip_zones++;
124			pr_debug("md/raid0:%s: %d zones\n",
125				 mdname(mddev), conf->nr_strip_zones);
126		}
127	}
128	pr_debug("md/raid0:%s: FINAL %d zones\n",
129		 mdname(mddev), conf->nr_strip_zones);
130
131	/*
132	 * now since we have the hard sector sizes, we can make sure
133	 * chunk size is a multiple of that sector size
134	 */
135	if ((mddev->chunk_sectors << 9) % blksize) {
136		pr_warn("md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
137			mdname(mddev),
138			mddev->chunk_sectors << 9, blksize);
139		err = -EINVAL;
140		goto abort;
141	}
142
143	err = -ENOMEM;
144	conf->strip_zone = kcalloc(conf->nr_strip_zones,
145				   sizeof(struct strip_zone),
146				   GFP_KERNEL);
147	if (!conf->strip_zone)
148		goto abort;
149	conf->devlist = kzalloc(array3_size(sizeof(struct md_rdev *),
150					    conf->nr_strip_zones,
151					    mddev->raid_disks),
152				GFP_KERNEL);
153	if (!conf->devlist)
154		goto abort;
155
156	/* The first zone must contain all devices, so here we check that
157	 * there is a proper alignment of slots to devices and find them all
158	 */
159	zone = &conf->strip_zone[0];
160	cnt = 0;
161	smallest = NULL;
162	dev = conf->devlist;
163	err = -EINVAL;
164	rdev_for_each(rdev1, mddev) {
165		int j = rdev1->raid_disk;
166
167		if (mddev->level == 10) {
168			/* taking over a raid10-n2 array */
169			j /= 2;
170			rdev1->new_raid_disk = j;
171		}
172
173		if (mddev->level == 1) {
174			/* taiking over a raid1 array-
175			 * we have only one active disk
176			 */
177			j = 0;
178			rdev1->new_raid_disk = j;
179		}
180
181		if (j < 0) {
182			pr_warn("md/raid0:%s: remove inactive devices before converting to RAID0\n",
183				mdname(mddev));
184			goto abort;
185		}
186		if (j >= mddev->raid_disks) {
187			pr_warn("md/raid0:%s: bad disk number %d - aborting!\n",
188				mdname(mddev), j);
189			goto abort;
190		}
191		if (dev[j]) {
192			pr_warn("md/raid0:%s: multiple devices for %d - aborting!\n",
193				mdname(mddev), j);
194			goto abort;
195		}
196		dev[j] = rdev1;
197
198		if (!smallest || (rdev1->sectors < smallest->sectors))
199			smallest = rdev1;
200		cnt++;
201	}
202	if (cnt != mddev->raid_disks) {
203		pr_warn("md/raid0:%s: too few disks (%d of %d) - aborting!\n",
204			mdname(mddev), cnt, mddev->raid_disks);
205		goto abort;
206	}
207	zone->nb_dev = cnt;
208	zone->zone_end = smallest->sectors * cnt;
209
210	curr_zone_end = zone->zone_end;
211
212	/* now do the other zones */
213	for (i = 1; i < conf->nr_strip_zones; i++)
214	{
215		int j;
216
217		zone = conf->strip_zone + i;
218		dev = conf->devlist + i * mddev->raid_disks;
219
220		pr_debug("md/raid0:%s: zone %d\n", mdname(mddev), i);
221		zone->dev_start = smallest->sectors;
222		smallest = NULL;
223		c = 0;
224
225		for (j=0; j<cnt; j++) {
226			rdev = conf->devlist[j];
227			if (rdev->sectors <= zone->dev_start) {
228				pr_debug("md/raid0:%s: checking %s ... nope\n",
229					 mdname(mddev),
230					 bdevname(rdev->bdev, b));
231				continue;
232			}
233			pr_debug("md/raid0:%s: checking %s ..."
234				 " contained as device %d\n",
235				 mdname(mddev),
236				 bdevname(rdev->bdev, b), c);
237			dev[c] = rdev;
238			c++;
239			if (!smallest || rdev->sectors < smallest->sectors) {
240				smallest = rdev;
241				pr_debug("md/raid0:%s:  (%llu) is smallest!.\n",
242					 mdname(mddev),
243					 (unsigned long long)rdev->sectors);
244			}
245		}
246
247		zone->nb_dev = c;
248		sectors = (smallest->sectors - zone->dev_start) * c;
249		pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
250			 mdname(mddev),
251			 zone->nb_dev, (unsigned long long)sectors);
252
253		curr_zone_end += sectors;
254		zone->zone_end = curr_zone_end;
255
256		pr_debug("md/raid0:%s: current zone start: %llu\n",
257			 mdname(mddev),
258			 (unsigned long long)smallest->sectors);
259	}
260
261	if (conf->nr_strip_zones == 1 || conf->strip_zone[1].nb_dev == 1) {
262		conf->layout = RAID0_ORIG_LAYOUT;
263	} else if (mddev->layout == RAID0_ORIG_LAYOUT ||
264		   mddev->layout == RAID0_ALT_MULTIZONE_LAYOUT) {
265		conf->layout = mddev->layout;
266	} else if (default_layout == RAID0_ORIG_LAYOUT ||
267		   default_layout == RAID0_ALT_MULTIZONE_LAYOUT) {
268		conf->layout = default_layout;
269	} else {
270		pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n",
271		       mdname(mddev));
272		pr_err("md/raid0: please set raid0.default_layout to 1 or 2\n");
273		err = -EOPNOTSUPP;
274		goto abort;
275	}
276
277	if (conf->layout == RAID0_ORIG_LAYOUT) {
278		for (i = 1; i < conf->nr_strip_zones; i++) {
279			sector_t first_sector = conf->strip_zone[i-1].zone_end;
280
281			sector_div(first_sector, mddev->chunk_sectors);
282			zone = conf->strip_zone + i;
283			/* disk_shift is first disk index used in the zone */
284			zone->disk_shift = sector_div(first_sector,
285						      zone->nb_dev);
286		}
287	}
288
289	pr_debug("md/raid0:%s: done.\n", mdname(mddev));
290	*private_conf = conf;
291
292	return 0;
293abort:
294	kfree(conf->strip_zone);
295	kfree(conf->devlist);
296	kfree(conf);
297	*private_conf = ERR_PTR(err);
298	return err;
299}
300
301/* Find the zone which holds a particular offset
302 * Update *sectorp to be an offset in that zone
303 */
304static struct strip_zone *find_zone(struct r0conf *conf,
305				    sector_t *sectorp)
306{
307	int i;
308	struct strip_zone *z = conf->strip_zone;
309	sector_t sector = *sectorp;
310
311	for (i = 0; i < conf->nr_strip_zones; i++)
312		if (sector < z[i].zone_end) {
313			if (i)
314				*sectorp = sector - z[i-1].zone_end;
315			return z + i;
316		}
317	BUG();
318}
319
320/*
321 * remaps the bio to the target device. we separate two flows.
322 * power 2 flow and a general flow for the sake of performance
323*/
324static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
325				sector_t sector, sector_t *sector_offset)
326{
327	unsigned int sect_in_chunk;
328	sector_t chunk;
329	struct r0conf *conf = mddev->private;
330	int raid_disks = conf->strip_zone[0].nb_dev;
331	unsigned int chunk_sects = mddev->chunk_sectors;
332
333	if (is_power_of_2(chunk_sects)) {
334		int chunksect_bits = ffz(~chunk_sects);
335		/* find the sector offset inside the chunk */
336		sect_in_chunk  = sector & (chunk_sects - 1);
337		sector >>= chunksect_bits;
338		/* chunk in zone */
339		chunk = *sector_offset;
340		/* quotient is the chunk in real device*/
341		sector_div(chunk, zone->nb_dev << chunksect_bits);
342	} else{
343		sect_in_chunk = sector_div(sector, chunk_sects);
344		chunk = *sector_offset;
345		sector_div(chunk, chunk_sects * zone->nb_dev);
346	}
347	/*
348	*  position the bio over the real device
349	*  real sector = chunk in device + starting of zone
350	*	+ the position in the chunk
351	*/
352	*sector_offset = (chunk * chunk_sects) + sect_in_chunk;
353	return conf->devlist[(zone - conf->strip_zone)*raid_disks
354			     + sector_div(sector, zone->nb_dev)];
355}
356
357static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks)
358{
359	sector_t array_sectors = 0;
360	struct md_rdev *rdev;
361
362	WARN_ONCE(sectors || raid_disks,
363		  "%s does not support generic reshape\n", __func__);
364
365	rdev_for_each(rdev, mddev)
366		array_sectors += (rdev->sectors &
367				  ~(sector_t)(mddev->chunk_sectors-1));
368
369	return array_sectors;
370}
371
372static void raid0_free(struct mddev *mddev, void *priv);
373
374static int raid0_run(struct mddev *mddev)
375{
376	struct r0conf *conf;
377	int ret;
378
379	if (mddev->chunk_sectors == 0) {
380		pr_warn("md/raid0:%s: chunk size must be set.\n", mdname(mddev));
381		return -EINVAL;
382	}
383	if (md_check_no_bitmap(mddev))
384		return -EINVAL;
385
386	/* if private is not null, we are here after takeover */
387	if (mddev->private == NULL) {
388		ret = create_strip_zones(mddev, &conf);
389		if (ret < 0)
390			return ret;
391		mddev->private = conf;
392	}
393	conf = mddev->private;
394	if (mddev->queue) {
395		struct md_rdev *rdev;
396		bool discard_supported = false;
397
398		blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
399		blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
400		blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors);
401		blk_queue_max_discard_sectors(mddev->queue, UINT_MAX);
402
403		blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
404		blk_queue_io_opt(mddev->queue,
405				 (mddev->chunk_sectors << 9) * mddev->raid_disks);
406
407		rdev_for_each(rdev, mddev) {
408			disk_stack_limits(mddev->gendisk, rdev->bdev,
409					  rdev->data_offset << 9);
410			if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
411				discard_supported = true;
412		}
413		if (!discard_supported)
414			blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue);
415		else
416			blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
417	}
418
419	/* calculate array device size */
420	md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
421
422	pr_debug("md/raid0:%s: md_size is %llu sectors.\n",
423		 mdname(mddev),
424		 (unsigned long long)mddev->array_sectors);
425
426	dump_zones(mddev);
427
428	ret = md_integrity_register(mddev);
429
430	return ret;
431}
432
433static void raid0_free(struct mddev *mddev, void *priv)
434{
435	struct r0conf *conf = priv;
436
437	kfree(conf->strip_zone);
438	kfree(conf->devlist);
439	kfree(conf);
440}
441
442/*
443 * Convert disk_index to the disk order in which it is read/written.
444 *  For example, if we have 4 disks, they are numbered 0,1,2,3. If we
445 *  write the disks starting at disk 3, then the read/write order would
446 *  be disk 3, then 0, then 1, and then disk 2 and we want map_disk_shift()
447 *  to map the disks as follows 0,1,2,3 => 1,2,3,0. So disk 0 would map
448 *  to 1, 1 to 2, 2 to 3, and 3 to 0. That way we can compare disks in
449 *  that 'output' space to understand the read/write disk ordering.
450 */
451static int map_disk_shift(int disk_index, int num_disks, int disk_shift)
452{
453	return ((disk_index + num_disks - disk_shift) % num_disks);
454}
455
456static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
457{
458	struct r0conf *conf = mddev->private;
459	struct strip_zone *zone;
460	sector_t start = bio->bi_iter.bi_sector;
461	sector_t end;
462	unsigned int stripe_size;
463	sector_t first_stripe_index, last_stripe_index;
464	sector_t start_disk_offset;
465	unsigned int start_disk_index;
466	sector_t end_disk_offset;
467	unsigned int end_disk_index;
468	unsigned int disk;
469	sector_t orig_start, orig_end;
470
471	orig_start = start;
472	zone = find_zone(conf, &start);
473
474	if (bio_end_sector(bio) > zone->zone_end) {
475		struct bio *split = bio_split(bio,
476			zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO,
477			&mddev->bio_set);
478		bio_chain(split, bio);
479		submit_bio_noacct(bio);
480		bio = split;
481		end = zone->zone_end;
482	} else
483		end = bio_end_sector(bio);
484
485	orig_end = end;
486	if (zone != conf->strip_zone)
487		end = end - zone[-1].zone_end;
488
489	/* Now start and end is the offset in zone */
490	stripe_size = zone->nb_dev * mddev->chunk_sectors;
491
492	first_stripe_index = start;
493	sector_div(first_stripe_index, stripe_size);
494	last_stripe_index = end;
495	sector_div(last_stripe_index, stripe_size);
496
497	/* In the first zone the original and alternate layouts are the same */
498	if ((conf->layout == RAID0_ORIG_LAYOUT) && (zone != conf->strip_zone)) {
499		sector_div(orig_start, mddev->chunk_sectors);
500		start_disk_index = sector_div(orig_start, zone->nb_dev);
501		start_disk_index = map_disk_shift(start_disk_index,
502						  zone->nb_dev,
503						  zone->disk_shift);
504		sector_div(orig_end, mddev->chunk_sectors);
505		end_disk_index = sector_div(orig_end, zone->nb_dev);
506		end_disk_index = map_disk_shift(end_disk_index,
507						zone->nb_dev, zone->disk_shift);
508	} else {
509		start_disk_index = (int)(start - first_stripe_index * stripe_size) /
510			mddev->chunk_sectors;
511		end_disk_index = (int)(end - last_stripe_index * stripe_size) /
512			mddev->chunk_sectors;
513	}
514	start_disk_offset = ((int)(start - first_stripe_index * stripe_size) %
515		mddev->chunk_sectors) +
516		first_stripe_index * mddev->chunk_sectors;
517	end_disk_offset = ((int)(end - last_stripe_index * stripe_size) %
518		mddev->chunk_sectors) +
519		last_stripe_index * mddev->chunk_sectors;
520
521	for (disk = 0; disk < zone->nb_dev; disk++) {
522		sector_t dev_start, dev_end;
523		struct bio *discard_bio = NULL;
524		struct md_rdev *rdev;
525		int compare_disk;
526
527		compare_disk = map_disk_shift(disk, zone->nb_dev,
528					      zone->disk_shift);
529
530		if (compare_disk < start_disk_index)
531			dev_start = (first_stripe_index + 1) *
532				mddev->chunk_sectors;
533		else if (compare_disk > start_disk_index)
534			dev_start = first_stripe_index * mddev->chunk_sectors;
535		else
536			dev_start = start_disk_offset;
537
538		if (compare_disk < end_disk_index)
539			dev_end = (last_stripe_index + 1) * mddev->chunk_sectors;
540		else if (compare_disk > end_disk_index)
541			dev_end = last_stripe_index * mddev->chunk_sectors;
542		else
543			dev_end = end_disk_offset;
544
545		if (dev_end <= dev_start)
546			continue;
547
548		rdev = conf->devlist[(zone - conf->strip_zone) *
549			conf->strip_zone[0].nb_dev + disk];
550		if (__blkdev_issue_discard(rdev->bdev,
551			dev_start + zone->dev_start + rdev->data_offset,
552			dev_end - dev_start, GFP_NOIO, 0, &discard_bio) ||
553		    !discard_bio)
554			continue;
555		bio_chain(discard_bio, bio);
556		bio_clone_blkg_association(discard_bio, bio);
557		if (mddev->gendisk)
558			trace_block_bio_remap(bdev_get_queue(rdev->bdev),
559				discard_bio, disk_devt(mddev->gendisk),
560				bio->bi_iter.bi_sector);
561		submit_bio_noacct(discard_bio);
562	}
563	bio_endio(bio);
564}
565
566static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
567{
568	struct r0conf *conf = mddev->private;
569	struct strip_zone *zone;
570	struct md_rdev *tmp_dev;
571	sector_t bio_sector;
572	sector_t sector;
573	sector_t orig_sector;
574	unsigned chunk_sects;
575	unsigned sectors;
576
577	if (unlikely(bio->bi_opf & REQ_PREFLUSH)
578	    && md_flush_request(mddev, bio))
579		return true;
580
581	if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) {
582		raid0_handle_discard(mddev, bio);
583		return true;
584	}
585
586	bio_sector = bio->bi_iter.bi_sector;
587	sector = bio_sector;
588	chunk_sects = mddev->chunk_sectors;
589
590	sectors = chunk_sects -
591		(likely(is_power_of_2(chunk_sects))
592		 ? (sector & (chunk_sects-1))
593		 : sector_div(sector, chunk_sects));
594
595	/* Restore due to sector_div */
596	sector = bio_sector;
597
598	if (sectors < bio_sectors(bio)) {
599		struct bio *split = bio_split(bio, sectors, GFP_NOIO,
600					      &mddev->bio_set);
601		bio_chain(split, bio);
602		submit_bio_noacct(bio);
603		bio = split;
604	}
605
606	orig_sector = sector;
607	zone = find_zone(mddev->private, &sector);
608	switch (conf->layout) {
609	case RAID0_ORIG_LAYOUT:
610		tmp_dev = map_sector(mddev, zone, orig_sector, &sector);
611		break;
612	case RAID0_ALT_MULTIZONE_LAYOUT:
613		tmp_dev = map_sector(mddev, zone, sector, &sector);
614		break;
615	default:
616		WARN(1, "md/raid0:%s: Invalid layout\n", mdname(mddev));
617		bio_io_error(bio);
618		return true;
619	}
620
621	if (unlikely(is_mddev_broken(tmp_dev, "raid0"))) {
622		bio_io_error(bio);
623		return true;
624	}
625
626	bio_set_dev(bio, tmp_dev->bdev);
627	bio->bi_iter.bi_sector = sector + zone->dev_start +
628		tmp_dev->data_offset;
629
630	if (mddev->gendisk)
631		trace_block_bio_remap(bio->bi_disk->queue, bio,
632				disk_devt(mddev->gendisk), bio_sector);
633	mddev_check_writesame(mddev, bio);
634	mddev_check_write_zeroes(mddev, bio);
635	submit_bio_noacct(bio);
636	return true;
637}
638
639static void raid0_status(struct seq_file *seq, struct mddev *mddev)
640{
641	seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
642	return;
643}
644
645static void *raid0_takeover_raid45(struct mddev *mddev)
646{
647	struct md_rdev *rdev;
648	struct r0conf *priv_conf;
649
650	if (mddev->degraded != 1) {
651		pr_warn("md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
652			mdname(mddev),
653			mddev->degraded);
654		return ERR_PTR(-EINVAL);
655	}
656
657	rdev_for_each(rdev, mddev) {
658		/* check slot number for a disk */
659		if (rdev->raid_disk == mddev->raid_disks-1) {
660			pr_warn("md/raid0:%s: raid5 must have missing parity disk!\n",
661				mdname(mddev));
662			return ERR_PTR(-EINVAL);
663		}
664		rdev->sectors = mddev->dev_sectors;
665	}
666
667	/* Set new parameters */
668	mddev->new_level = 0;
669	mddev->new_layout = 0;
670	mddev->new_chunk_sectors = mddev->chunk_sectors;
671	mddev->raid_disks--;
672	mddev->delta_disks = -1;
673	/* make sure it will be not marked as dirty */
674	mddev->recovery_cp = MaxSector;
675	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
676
677	create_strip_zones(mddev, &priv_conf);
678
679	return priv_conf;
680}
681
682static void *raid0_takeover_raid10(struct mddev *mddev)
683{
684	struct r0conf *priv_conf;
685
686	/* Check layout:
687	 *  - far_copies must be 1
688	 *  - near_copies must be 2
689	 *  - disks number must be even
690	 *  - all mirrors must be already degraded
691	 */
692	if (mddev->layout != ((1 << 8) + 2)) {
693		pr_warn("md/raid0:%s:: Raid0 cannot takeover layout: 0x%x\n",
694			mdname(mddev),
695			mddev->layout);
696		return ERR_PTR(-EINVAL);
697	}
698	if (mddev->raid_disks & 1) {
699		pr_warn("md/raid0:%s: Raid0 cannot takeover Raid10 with odd disk number.\n",
700			mdname(mddev));
701		return ERR_PTR(-EINVAL);
702	}
703	if (mddev->degraded != (mddev->raid_disks>>1)) {
704		pr_warn("md/raid0:%s: All mirrors must be already degraded!\n",
705			mdname(mddev));
706		return ERR_PTR(-EINVAL);
707	}
708
709	/* Set new parameters */
710	mddev->new_level = 0;
711	mddev->new_layout = 0;
712	mddev->new_chunk_sectors = mddev->chunk_sectors;
713	mddev->delta_disks = - mddev->raid_disks / 2;
714	mddev->raid_disks += mddev->delta_disks;
715	mddev->degraded = 0;
716	/* make sure it will be not marked as dirty */
717	mddev->recovery_cp = MaxSector;
718	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
719
720	create_strip_zones(mddev, &priv_conf);
721	return priv_conf;
722}
723
724static void *raid0_takeover_raid1(struct mddev *mddev)
725{
726	struct r0conf *priv_conf;
727	int chunksect;
728
729	/* Check layout:
730	 *  - (N - 1) mirror drives must be already faulty
731	 */
732	if ((mddev->raid_disks - 1) != mddev->degraded) {
733		pr_err("md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
734		       mdname(mddev));
735		return ERR_PTR(-EINVAL);
736	}
737
738	/*
739	 * a raid1 doesn't have the notion of chunk size, so
740	 * figure out the largest suitable size we can use.
741	 */
742	chunksect = 64 * 2; /* 64K by default */
743
744	/* The array must be an exact multiple of chunksize */
745	while (chunksect && (mddev->array_sectors & (chunksect - 1)))
746		chunksect >>= 1;
747
748	if ((chunksect << 9) < PAGE_SIZE)
749		/* array size does not allow a suitable chunk size */
750		return ERR_PTR(-EINVAL);
751
752	/* Set new parameters */
753	mddev->new_level = 0;
754	mddev->new_layout = 0;
755	mddev->new_chunk_sectors = chunksect;
756	mddev->chunk_sectors = chunksect;
757	mddev->delta_disks = 1 - mddev->raid_disks;
758	mddev->raid_disks = 1;
759	/* make sure it will be not marked as dirty */
760	mddev->recovery_cp = MaxSector;
761	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
762
763	create_strip_zones(mddev, &priv_conf);
764	return priv_conf;
765}
766
767static void *raid0_takeover(struct mddev *mddev)
768{
769	/* raid0 can take over:
770	 *  raid4 - if all data disks are active.
771	 *  raid5 - providing it is Raid4 layout and one disk is faulty
772	 *  raid10 - assuming we have all necessary active disks
773	 *  raid1 - with (N -1) mirror drives faulty
774	 */
775
776	if (mddev->bitmap) {
777		pr_warn("md/raid0: %s: cannot takeover array with bitmap\n",
778			mdname(mddev));
779		return ERR_PTR(-EBUSY);
780	}
781	if (mddev->level == 4)
782		return raid0_takeover_raid45(mddev);
783
784	if (mddev->level == 5) {
785		if (mddev->layout == ALGORITHM_PARITY_N)
786			return raid0_takeover_raid45(mddev);
787
788		pr_warn("md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
789			mdname(mddev), ALGORITHM_PARITY_N);
790	}
791
792	if (mddev->level == 10)
793		return raid0_takeover_raid10(mddev);
794
795	if (mddev->level == 1)
796		return raid0_takeover_raid1(mddev);
797
798	pr_warn("Takeover from raid%i to raid0 not supported\n",
799		mddev->level);
800
801	return ERR_PTR(-EINVAL);
802}
803
804static void raid0_quiesce(struct mddev *mddev, int quiesce)
805{
806}
807
808static struct md_personality raid0_personality=
809{
810	.name		= "raid0",
811	.level		= 0,
812	.owner		= THIS_MODULE,
813	.make_request	= raid0_make_request,
814	.run		= raid0_run,
815	.free		= raid0_free,
816	.status		= raid0_status,
817	.size		= raid0_size,
818	.takeover	= raid0_takeover,
819	.quiesce	= raid0_quiesce,
820};
821
822static int __init raid0_init (void)
823{
824	return register_md_personality (&raid0_personality);
825}
826
827static void raid0_exit (void)
828{
829	unregister_md_personality (&raid0_personality);
830}
831
832module_init(raid0_init);
833module_exit(raid0_exit);
834MODULE_LICENSE("GPL");
835MODULE_DESCRIPTION("RAID0 (striping) personality for MD");
836MODULE_ALIAS("md-personality-2"); /* RAID0 */
837MODULE_ALIAS("md-raid0");
838MODULE_ALIAS("md-level-0");
839