1// SPDX-License-Identifier: GPL-2.0-or-later 2/* 3 * raid10.c : Multiple Devices driver for Linux 4 * 5 * Copyright (C) 2000-2004 Neil Brown 6 * 7 * RAID-10 support for md. 8 * 9 * Base on code in raid1.c. See raid1.c for further copyright information. 10 */ 11 12#include <linux/slab.h> 13#include <linux/delay.h> 14#include <linux/blkdev.h> 15#include <linux/module.h> 16#include <linux/seq_file.h> 17#include <linux/ratelimit.h> 18#include <linux/kthread.h> 19#include <linux/raid/md_p.h> 20#include <trace/events/block.h> 21#include "md.h" 22#include "raid10.h" 23#include "raid0.h" 24#include "md-bitmap.h" 25 26/* 27 * RAID10 provides a combination of RAID0 and RAID1 functionality. 28 * The layout of data is defined by 29 * chunk_size 30 * raid_disks 31 * near_copies (stored in low byte of layout) 32 * far_copies (stored in second byte of layout) 33 * far_offset (stored in bit 16 of layout ) 34 * use_far_sets (stored in bit 17 of layout ) 35 * use_far_sets_bugfixed (stored in bit 18 of layout ) 36 * 37 * The data to be stored is divided into chunks using chunksize. Each device 38 * is divided into far_copies sections. In each section, chunks are laid out 39 * in a style similar to raid0, but near_copies copies of each chunk is stored 40 * (each on a different drive). The starting device for each section is offset 41 * near_copies from the starting device of the previous section. Thus there 42 * are (near_copies * far_copies) of each chunk, and each is on a different 43 * drive. near_copies and far_copies must be at least one, and their product 44 * is at most raid_disks. 45 * 46 * If far_offset is true, then the far_copies are handled a bit differently. 47 * The copies are still in different stripes, but instead of being very far 48 * apart on disk, there are adjacent stripes. 49 * 50 * The far and offset algorithms are handled slightly differently if 51 * 'use_far_sets' is true. In this case, the array's devices are grouped into 52 * sets that are (near_copies * far_copies) in size. The far copied stripes 53 * are still shifted by 'near_copies' devices, but this shifting stays confined 54 * to the set rather than the entire array. This is done to improve the number 55 * of device combinations that can fail without causing the array to fail. 56 * Example 'far' algorithm w/o 'use_far_sets' (each letter represents a chunk 57 * on a device): 58 * A B C D A B C D E 59 * ... ... 60 * D A B C E A B C D 61 * Example 'far' algorithm w/ 'use_far_sets' enabled (sets illustrated w/ []'s): 62 * [A B] [C D] [A B] [C D E] 63 * |...| |...| |...| | ... | 64 * [B A] [D C] [B A] [E C D] 65 */ 66 67static void allow_barrier(struct r10conf *conf); 68static void lower_barrier(struct r10conf *conf); 69static int _enough(struct r10conf *conf, int previous, int ignore); 70static int enough(struct r10conf *conf, int ignore); 71static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, 72 int *skipped); 73static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio); 74static void end_reshape_write(struct bio *bio); 75static void end_reshape(struct r10conf *conf); 76 77#define raid10_log(md, fmt, args...) \ 78 do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid10 " fmt, ##args); } while (0) 79 80#include "raid1-10.c" 81 82/* 83 * for resync bio, r10bio pointer can be retrieved from the per-bio 84 * 'struct resync_pages'. 85 */ 86static inline struct r10bio *get_resync_r10bio(struct bio *bio) 87{ 88 return get_resync_pages(bio)->raid_bio; 89} 90 91static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data) 92{ 93 struct r10conf *conf = data; 94 int size = offsetof(struct r10bio, devs[conf->copies]); 95 96 /* allocate a r10bio with room for raid_disks entries in the 97 * bios array */ 98 return kzalloc(size, gfp_flags); 99} 100 101#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9) 102/* amount of memory to reserve for resync requests */ 103#define RESYNC_WINDOW (1024*1024) 104/* maximum number of concurrent requests, memory permitting */ 105#define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE) 106#define CLUSTER_RESYNC_WINDOW (32 * RESYNC_WINDOW) 107#define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9) 108 109/* 110 * When performing a resync, we need to read and compare, so 111 * we need as many pages are there are copies. 112 * When performing a recovery, we need 2 bios, one for read, 113 * one for write (we recover only one drive per r10buf) 114 * 115 */ 116static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data) 117{ 118 struct r10conf *conf = data; 119 struct r10bio *r10_bio; 120 struct bio *bio; 121 int j; 122 int nalloc, nalloc_rp; 123 struct resync_pages *rps; 124 125 r10_bio = r10bio_pool_alloc(gfp_flags, conf); 126 if (!r10_bio) 127 return NULL; 128 129 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) || 130 test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery)) 131 nalloc = conf->copies; /* resync */ 132 else 133 nalloc = 2; /* recovery */ 134 135 /* allocate once for all bios */ 136 if (!conf->have_replacement) 137 nalloc_rp = nalloc; 138 else 139 nalloc_rp = nalloc * 2; 140 rps = kmalloc_array(nalloc_rp, sizeof(struct resync_pages), gfp_flags); 141 if (!rps) 142 goto out_free_r10bio; 143 144 /* 145 * Allocate bios. 146 */ 147 for (j = nalloc ; j-- ; ) { 148 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES); 149 if (!bio) 150 goto out_free_bio; 151 r10_bio->devs[j].bio = bio; 152 if (!conf->have_replacement) 153 continue; 154 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES); 155 if (!bio) 156 goto out_free_bio; 157 r10_bio->devs[j].repl_bio = bio; 158 } 159 /* 160 * Allocate RESYNC_PAGES data pages and attach them 161 * where needed. 162 */ 163 for (j = 0; j < nalloc; j++) { 164 struct bio *rbio = r10_bio->devs[j].repl_bio; 165 struct resync_pages *rp, *rp_repl; 166 167 rp = &rps[j]; 168 if (rbio) 169 rp_repl = &rps[nalloc + j]; 170 171 bio = r10_bio->devs[j].bio; 172 173 if (!j || test_bit(MD_RECOVERY_SYNC, 174 &conf->mddev->recovery)) { 175 if (resync_alloc_pages(rp, gfp_flags)) 176 goto out_free_pages; 177 } else { 178 memcpy(rp, &rps[0], sizeof(*rp)); 179 resync_get_all_pages(rp); 180 } 181 182 rp->raid_bio = r10_bio; 183 bio->bi_private = rp; 184 if (rbio) { 185 memcpy(rp_repl, rp, sizeof(*rp)); 186 rbio->bi_private = rp_repl; 187 } 188 } 189 190 return r10_bio; 191 192out_free_pages: 193 while (--j >= 0) 194 resync_free_pages(&rps[j]); 195 196 j = 0; 197out_free_bio: 198 for ( ; j < nalloc; j++) { 199 if (r10_bio->devs[j].bio) 200 bio_put(r10_bio->devs[j].bio); 201 if (r10_bio->devs[j].repl_bio) 202 bio_put(r10_bio->devs[j].repl_bio); 203 } 204 kfree(rps); 205out_free_r10bio: 206 rbio_pool_free(r10_bio, conf); 207 return NULL; 208} 209 210static void r10buf_pool_free(void *__r10_bio, void *data) 211{ 212 struct r10conf *conf = data; 213 struct r10bio *r10bio = __r10_bio; 214 int j; 215 struct resync_pages *rp = NULL; 216 217 for (j = conf->copies; j--; ) { 218 struct bio *bio = r10bio->devs[j].bio; 219 220 if (bio) { 221 rp = get_resync_pages(bio); 222 resync_free_pages(rp); 223 bio_put(bio); 224 } 225 226 bio = r10bio->devs[j].repl_bio; 227 if (bio) 228 bio_put(bio); 229 } 230 231 /* resync pages array stored in the 1st bio's .bi_private */ 232 kfree(rp); 233 234 rbio_pool_free(r10bio, conf); 235} 236 237static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio) 238{ 239 int i; 240 241 for (i = 0; i < conf->copies; i++) { 242 struct bio **bio = & r10_bio->devs[i].bio; 243 if (!BIO_SPECIAL(*bio)) 244 bio_put(*bio); 245 *bio = NULL; 246 bio = &r10_bio->devs[i].repl_bio; 247 if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio)) 248 bio_put(*bio); 249 *bio = NULL; 250 } 251} 252 253static void free_r10bio(struct r10bio *r10_bio) 254{ 255 struct r10conf *conf = r10_bio->mddev->private; 256 257 put_all_bios(conf, r10_bio); 258 mempool_free(r10_bio, &conf->r10bio_pool); 259} 260 261static void put_buf(struct r10bio *r10_bio) 262{ 263 struct r10conf *conf = r10_bio->mddev->private; 264 265 mempool_free(r10_bio, &conf->r10buf_pool); 266 267 lower_barrier(conf); 268} 269 270static void reschedule_retry(struct r10bio *r10_bio) 271{ 272 unsigned long flags; 273 struct mddev *mddev = r10_bio->mddev; 274 struct r10conf *conf = mddev->private; 275 276 spin_lock_irqsave(&conf->device_lock, flags); 277 list_add(&r10_bio->retry_list, &conf->retry_list); 278 conf->nr_queued ++; 279 spin_unlock_irqrestore(&conf->device_lock, flags); 280 281 /* wake up frozen array... */ 282 wake_up(&conf->wait_barrier); 283 284 md_wakeup_thread(mddev->thread); 285} 286 287/* 288 * raid_end_bio_io() is called when we have finished servicing a mirrored 289 * operation and are ready to return a success/failure code to the buffer 290 * cache layer. 291 */ 292static void raid_end_bio_io(struct r10bio *r10_bio) 293{ 294 struct bio *bio = r10_bio->master_bio; 295 struct r10conf *conf = r10_bio->mddev->private; 296 297 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) 298 bio->bi_status = BLK_STS_IOERR; 299 300 bio_endio(bio); 301 /* 302 * Wake up any possible resync thread that waits for the device 303 * to go idle. 304 */ 305 allow_barrier(conf); 306 307 free_r10bio(r10_bio); 308} 309 310/* 311 * Update disk head position estimator based on IRQ completion info. 312 */ 313static inline void update_head_pos(int slot, struct r10bio *r10_bio) 314{ 315 struct r10conf *conf = r10_bio->mddev->private; 316 317 conf->mirrors[r10_bio->devs[slot].devnum].head_position = 318 r10_bio->devs[slot].addr + (r10_bio->sectors); 319} 320 321/* 322 * Find the disk number which triggered given bio 323 */ 324static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio, 325 struct bio *bio, int *slotp, int *replp) 326{ 327 int slot; 328 int repl = 0; 329 330 for (slot = 0; slot < conf->copies; slot++) { 331 if (r10_bio->devs[slot].bio == bio) 332 break; 333 if (r10_bio->devs[slot].repl_bio == bio) { 334 repl = 1; 335 break; 336 } 337 } 338 339 BUG_ON(slot == conf->copies); 340 update_head_pos(slot, r10_bio); 341 342 if (slotp) 343 *slotp = slot; 344 if (replp) 345 *replp = repl; 346 return r10_bio->devs[slot].devnum; 347} 348 349static void raid10_end_read_request(struct bio *bio) 350{ 351 int uptodate = !bio->bi_status; 352 struct r10bio *r10_bio = bio->bi_private; 353 int slot; 354 struct md_rdev *rdev; 355 struct r10conf *conf = r10_bio->mddev->private; 356 357 slot = r10_bio->read_slot; 358 rdev = r10_bio->devs[slot].rdev; 359 /* 360 * this branch is our 'one mirror IO has finished' event handler: 361 */ 362 update_head_pos(slot, r10_bio); 363 364 if (uptodate) { 365 /* 366 * Set R10BIO_Uptodate in our master bio, so that 367 * we will return a good error code to the higher 368 * levels even if IO on some other mirrored buffer fails. 369 * 370 * The 'master' represents the composite IO operation to 371 * user-side. So if something waits for IO, then it will 372 * wait for the 'master' bio. 373 */ 374 set_bit(R10BIO_Uptodate, &r10_bio->state); 375 } else { 376 /* If all other devices that store this block have 377 * failed, we want to return the error upwards rather 378 * than fail the last device. Here we redefine 379 * "uptodate" to mean "Don't want to retry" 380 */ 381 if (!_enough(conf, test_bit(R10BIO_Previous, &r10_bio->state), 382 rdev->raid_disk)) 383 uptodate = 1; 384 } 385 if (uptodate) { 386 raid_end_bio_io(r10_bio); 387 rdev_dec_pending(rdev, conf->mddev); 388 } else { 389 /* 390 * oops, read error - keep the refcount on the rdev 391 */ 392 char b[BDEVNAME_SIZE]; 393 pr_err_ratelimited("md/raid10:%s: %s: rescheduling sector %llu\n", 394 mdname(conf->mddev), 395 bdevname(rdev->bdev, b), 396 (unsigned long long)r10_bio->sector); 397 set_bit(R10BIO_ReadError, &r10_bio->state); 398 reschedule_retry(r10_bio); 399 } 400} 401 402static void close_write(struct r10bio *r10_bio) 403{ 404 /* clear the bitmap if all writes complete successfully */ 405 md_bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector, 406 r10_bio->sectors, 407 !test_bit(R10BIO_Degraded, &r10_bio->state), 408 0); 409 md_write_end(r10_bio->mddev); 410} 411 412static void one_write_done(struct r10bio *r10_bio) 413{ 414 if (atomic_dec_and_test(&r10_bio->remaining)) { 415 if (test_bit(R10BIO_WriteError, &r10_bio->state)) 416 reschedule_retry(r10_bio); 417 else { 418 close_write(r10_bio); 419 if (test_bit(R10BIO_MadeGood, &r10_bio->state)) 420 reschedule_retry(r10_bio); 421 else 422 raid_end_bio_io(r10_bio); 423 } 424 } 425} 426 427static void raid10_end_write_request(struct bio *bio) 428{ 429 struct r10bio *r10_bio = bio->bi_private; 430 int dev; 431 int dec_rdev = 1; 432 struct r10conf *conf = r10_bio->mddev->private; 433 int slot, repl; 434 struct md_rdev *rdev = NULL; 435 struct bio *to_put = NULL; 436 bool discard_error; 437 438 discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD; 439 440 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl); 441 442 if (repl) 443 rdev = conf->mirrors[dev].replacement; 444 if (!rdev) { 445 smp_rmb(); 446 repl = 0; 447 rdev = conf->mirrors[dev].rdev; 448 } 449 /* 450 * this branch is our 'one mirror IO has finished' event handler: 451 */ 452 if (bio->bi_status && !discard_error) { 453 if (repl) 454 /* Never record new bad blocks to replacement, 455 * just fail it. 456 */ 457 md_error(rdev->mddev, rdev); 458 else { 459 set_bit(WriteErrorSeen, &rdev->flags); 460 if (!test_and_set_bit(WantReplacement, &rdev->flags)) 461 set_bit(MD_RECOVERY_NEEDED, 462 &rdev->mddev->recovery); 463 464 dec_rdev = 0; 465 if (test_bit(FailFast, &rdev->flags) && 466 (bio->bi_opf & MD_FAILFAST)) { 467 md_error(rdev->mddev, rdev); 468 } 469 470 /* 471 * When the device is faulty, it is not necessary to 472 * handle write error. 473 */ 474 if (!test_bit(Faulty, &rdev->flags)) 475 set_bit(R10BIO_WriteError, &r10_bio->state); 476 else { 477 /* Fail the request */ 478 set_bit(R10BIO_Degraded, &r10_bio->state); 479 r10_bio->devs[slot].bio = NULL; 480 to_put = bio; 481 dec_rdev = 1; 482 } 483 } 484 } else { 485 /* 486 * Set R10BIO_Uptodate in our master bio, so that 487 * we will return a good error code for to the higher 488 * levels even if IO on some other mirrored buffer fails. 489 * 490 * The 'master' represents the composite IO operation to 491 * user-side. So if something waits for IO, then it will 492 * wait for the 'master' bio. 493 */ 494 sector_t first_bad; 495 int bad_sectors; 496 497 /* 498 * Do not set R10BIO_Uptodate if the current device is 499 * rebuilding or Faulty. This is because we cannot use 500 * such device for properly reading the data back (we could 501 * potentially use it, if the current write would have felt 502 * before rdev->recovery_offset, but for simplicity we don't 503 * check this here. 504 */ 505 if (test_bit(In_sync, &rdev->flags) && 506 !test_bit(Faulty, &rdev->flags)) 507 set_bit(R10BIO_Uptodate, &r10_bio->state); 508 509 /* Maybe we can clear some bad blocks. */ 510 if (is_badblock(rdev, 511 r10_bio->devs[slot].addr, 512 r10_bio->sectors, 513 &first_bad, &bad_sectors) && !discard_error) { 514 bio_put(bio); 515 if (repl) 516 r10_bio->devs[slot].repl_bio = IO_MADE_GOOD; 517 else 518 r10_bio->devs[slot].bio = IO_MADE_GOOD; 519 dec_rdev = 0; 520 set_bit(R10BIO_MadeGood, &r10_bio->state); 521 } 522 } 523 524 /* 525 * 526 * Let's see if all mirrored write operations have finished 527 * already. 528 */ 529 one_write_done(r10_bio); 530 if (dec_rdev) 531 rdev_dec_pending(rdev, conf->mddev); 532 if (to_put) 533 bio_put(to_put); 534} 535 536/* 537 * RAID10 layout manager 538 * As well as the chunksize and raid_disks count, there are two 539 * parameters: near_copies and far_copies. 540 * near_copies * far_copies must be <= raid_disks. 541 * Normally one of these will be 1. 542 * If both are 1, we get raid0. 543 * If near_copies == raid_disks, we get raid1. 544 * 545 * Chunks are laid out in raid0 style with near_copies copies of the 546 * first chunk, followed by near_copies copies of the next chunk and 547 * so on. 548 * If far_copies > 1, then after 1/far_copies of the array has been assigned 549 * as described above, we start again with a device offset of near_copies. 550 * So we effectively have another copy of the whole array further down all 551 * the drives, but with blocks on different drives. 552 * With this layout, and block is never stored twice on the one device. 553 * 554 * raid10_find_phys finds the sector offset of a given virtual sector 555 * on each device that it is on. 556 * 557 * raid10_find_virt does the reverse mapping, from a device and a 558 * sector offset to a virtual address 559 */ 560 561static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio) 562{ 563 int n,f; 564 sector_t sector; 565 sector_t chunk; 566 sector_t stripe; 567 int dev; 568 int slot = 0; 569 int last_far_set_start, last_far_set_size; 570 571 last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1; 572 last_far_set_start *= geo->far_set_size; 573 574 last_far_set_size = geo->far_set_size; 575 last_far_set_size += (geo->raid_disks % geo->far_set_size); 576 577 /* now calculate first sector/dev */ 578 chunk = r10bio->sector >> geo->chunk_shift; 579 sector = r10bio->sector & geo->chunk_mask; 580 581 chunk *= geo->near_copies; 582 stripe = chunk; 583 dev = sector_div(stripe, geo->raid_disks); 584 if (geo->far_offset) 585 stripe *= geo->far_copies; 586 587 sector += stripe << geo->chunk_shift; 588 589 /* and calculate all the others */ 590 for (n = 0; n < geo->near_copies; n++) { 591 int d = dev; 592 int set; 593 sector_t s = sector; 594 r10bio->devs[slot].devnum = d; 595 r10bio->devs[slot].addr = s; 596 slot++; 597 598 for (f = 1; f < geo->far_copies; f++) { 599 set = d / geo->far_set_size; 600 d += geo->near_copies; 601 602 if ((geo->raid_disks % geo->far_set_size) && 603 (d > last_far_set_start)) { 604 d -= last_far_set_start; 605 d %= last_far_set_size; 606 d += last_far_set_start; 607 } else { 608 d %= geo->far_set_size; 609 d += geo->far_set_size * set; 610 } 611 s += geo->stride; 612 r10bio->devs[slot].devnum = d; 613 r10bio->devs[slot].addr = s; 614 slot++; 615 } 616 dev++; 617 if (dev >= geo->raid_disks) { 618 dev = 0; 619 sector += (geo->chunk_mask + 1); 620 } 621 } 622} 623 624static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio) 625{ 626 struct geom *geo = &conf->geo; 627 628 if (conf->reshape_progress != MaxSector && 629 ((r10bio->sector >= conf->reshape_progress) != 630 conf->mddev->reshape_backwards)) { 631 set_bit(R10BIO_Previous, &r10bio->state); 632 geo = &conf->prev; 633 } else 634 clear_bit(R10BIO_Previous, &r10bio->state); 635 636 __raid10_find_phys(geo, r10bio); 637} 638 639static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev) 640{ 641 sector_t offset, chunk, vchunk; 642 /* Never use conf->prev as this is only called during resync 643 * or recovery, so reshape isn't happening 644 */ 645 struct geom *geo = &conf->geo; 646 int far_set_start = (dev / geo->far_set_size) * geo->far_set_size; 647 int far_set_size = geo->far_set_size; 648 int last_far_set_start; 649 650 if (geo->raid_disks % geo->far_set_size) { 651 last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1; 652 last_far_set_start *= geo->far_set_size; 653 654 if (dev >= last_far_set_start) { 655 far_set_size = geo->far_set_size; 656 far_set_size += (geo->raid_disks % geo->far_set_size); 657 far_set_start = last_far_set_start; 658 } 659 } 660 661 offset = sector & geo->chunk_mask; 662 if (geo->far_offset) { 663 int fc; 664 chunk = sector >> geo->chunk_shift; 665 fc = sector_div(chunk, geo->far_copies); 666 dev -= fc * geo->near_copies; 667 if (dev < far_set_start) 668 dev += far_set_size; 669 } else { 670 while (sector >= geo->stride) { 671 sector -= geo->stride; 672 if (dev < (geo->near_copies + far_set_start)) 673 dev += far_set_size - geo->near_copies; 674 else 675 dev -= geo->near_copies; 676 } 677 chunk = sector >> geo->chunk_shift; 678 } 679 vchunk = chunk * geo->raid_disks + dev; 680 sector_div(vchunk, geo->near_copies); 681 return (vchunk << geo->chunk_shift) + offset; 682} 683 684/* 685 * This routine returns the disk from which the requested read should 686 * be done. There is a per-array 'next expected sequential IO' sector 687 * number - if this matches on the next IO then we use the last disk. 688 * There is also a per-disk 'last know head position' sector that is 689 * maintained from IRQ contexts, both the normal and the resync IO 690 * completion handlers update this position correctly. If there is no 691 * perfect sequential match then we pick the disk whose head is closest. 692 * 693 * If there are 2 mirrors in the same 2 devices, performance degrades 694 * because position is mirror, not device based. 695 * 696 * The rdev for the device selected will have nr_pending incremented. 697 */ 698 699/* 700 * FIXME: possibly should rethink readbalancing and do it differently 701 * depending on near_copies / far_copies geometry. 702 */ 703static struct md_rdev *read_balance(struct r10conf *conf, 704 struct r10bio *r10_bio, 705 int *max_sectors) 706{ 707 const sector_t this_sector = r10_bio->sector; 708 int disk, slot; 709 int sectors = r10_bio->sectors; 710 int best_good_sectors; 711 sector_t new_distance, best_dist; 712 struct md_rdev *best_dist_rdev, *best_pending_rdev, *rdev = NULL; 713 int do_balance; 714 int best_dist_slot, best_pending_slot; 715 bool has_nonrot_disk = false; 716 unsigned int min_pending; 717 struct geom *geo = &conf->geo; 718 719 raid10_find_phys(conf, r10_bio); 720 rcu_read_lock(); 721 best_dist_slot = -1; 722 min_pending = UINT_MAX; 723 best_dist_rdev = NULL; 724 best_pending_rdev = NULL; 725 best_dist = MaxSector; 726 best_good_sectors = 0; 727 do_balance = 1; 728 clear_bit(R10BIO_FailFast, &r10_bio->state); 729 /* 730 * Check if we can balance. We can balance on the whole 731 * device if no resync is going on (recovery is ok), or below 732 * the resync window. We take the first readable disk when 733 * above the resync window. 734 */ 735 if ((conf->mddev->recovery_cp < MaxSector 736 && (this_sector + sectors >= conf->next_resync)) || 737 (mddev_is_clustered(conf->mddev) && 738 md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector, 739 this_sector + sectors))) 740 do_balance = 0; 741 742 for (slot = 0; slot < conf->copies ; slot++) { 743 sector_t first_bad; 744 int bad_sectors; 745 sector_t dev_sector; 746 unsigned int pending; 747 bool nonrot; 748 749 if (r10_bio->devs[slot].bio == IO_BLOCKED) 750 continue; 751 disk = r10_bio->devs[slot].devnum; 752 rdev = rcu_dereference(conf->mirrors[disk].replacement); 753 if (rdev == NULL || test_bit(Faulty, &rdev->flags) || 754 r10_bio->devs[slot].addr + sectors > 755 rdev->recovery_offset) { 756 /* 757 * Read replacement first to prevent reading both rdev 758 * and replacement as NULL during replacement replace 759 * rdev. 760 */ 761 smp_mb(); 762 rdev = rcu_dereference(conf->mirrors[disk].rdev); 763 } 764 if (rdev == NULL || 765 test_bit(Faulty, &rdev->flags)) 766 continue; 767 if (!test_bit(In_sync, &rdev->flags) && 768 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) 769 continue; 770 771 dev_sector = r10_bio->devs[slot].addr; 772 if (is_badblock(rdev, dev_sector, sectors, 773 &first_bad, &bad_sectors)) { 774 if (best_dist < MaxSector) 775 /* Already have a better slot */ 776 continue; 777 if (first_bad <= dev_sector) { 778 /* Cannot read here. If this is the 779 * 'primary' device, then we must not read 780 * beyond 'bad_sectors' from another device. 781 */ 782 bad_sectors -= (dev_sector - first_bad); 783 if (!do_balance && sectors > bad_sectors) 784 sectors = bad_sectors; 785 if (best_good_sectors > sectors) 786 best_good_sectors = sectors; 787 } else { 788 sector_t good_sectors = 789 first_bad - dev_sector; 790 if (good_sectors > best_good_sectors) { 791 best_good_sectors = good_sectors; 792 best_dist_slot = slot; 793 best_dist_rdev = rdev; 794 } 795 if (!do_balance) 796 /* Must read from here */ 797 break; 798 } 799 continue; 800 } else 801 best_good_sectors = sectors; 802 803 if (!do_balance) 804 break; 805 806 nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev)); 807 has_nonrot_disk |= nonrot; 808 pending = atomic_read(&rdev->nr_pending); 809 if (min_pending > pending && nonrot) { 810 min_pending = pending; 811 best_pending_slot = slot; 812 best_pending_rdev = rdev; 813 } 814 815 if (best_dist_slot >= 0) 816 /* At least 2 disks to choose from so failfast is OK */ 817 set_bit(R10BIO_FailFast, &r10_bio->state); 818 /* This optimisation is debatable, and completely destroys 819 * sequential read speed for 'far copies' arrays. So only 820 * keep it for 'near' arrays, and review those later. 821 */ 822 if (geo->near_copies > 1 && !pending) 823 new_distance = 0; 824 825 /* for far > 1 always use the lowest address */ 826 else if (geo->far_copies > 1) 827 new_distance = r10_bio->devs[slot].addr; 828 else 829 new_distance = abs(r10_bio->devs[slot].addr - 830 conf->mirrors[disk].head_position); 831 832 if (new_distance < best_dist) { 833 best_dist = new_distance; 834 best_dist_slot = slot; 835 best_dist_rdev = rdev; 836 } 837 } 838 if (slot >= conf->copies) { 839 if (has_nonrot_disk) { 840 slot = best_pending_slot; 841 rdev = best_pending_rdev; 842 } else { 843 slot = best_dist_slot; 844 rdev = best_dist_rdev; 845 } 846 } 847 848 if (slot >= 0) { 849 atomic_inc(&rdev->nr_pending); 850 r10_bio->read_slot = slot; 851 } else 852 rdev = NULL; 853 rcu_read_unlock(); 854 *max_sectors = best_good_sectors; 855 856 return rdev; 857} 858 859static void flush_pending_writes(struct r10conf *conf) 860{ 861 /* Any writes that have been queued but are awaiting 862 * bitmap updates get flushed here. 863 */ 864 spin_lock_irq(&conf->device_lock); 865 866 if (conf->pending_bio_list.head) { 867 struct blk_plug plug; 868 struct bio *bio; 869 870 bio = bio_list_get(&conf->pending_bio_list); 871 conf->pending_count = 0; 872 spin_unlock_irq(&conf->device_lock); 873 874 /* 875 * As this is called in a wait_event() loop (see freeze_array), 876 * current->state might be TASK_UNINTERRUPTIBLE which will 877 * cause a warning when we prepare to wait again. As it is 878 * rare that this path is taken, it is perfectly safe to force 879 * us to go around the wait_event() loop again, so the warning 880 * is a false-positive. Silence the warning by resetting 881 * thread state 882 */ 883 __set_current_state(TASK_RUNNING); 884 885 blk_start_plug(&plug); 886 /* flush any pending bitmap writes to disk 887 * before proceeding w/ I/O */ 888 md_bitmap_unplug(conf->mddev->bitmap); 889 wake_up(&conf->wait_barrier); 890 891 while (bio) { /* submit pending writes */ 892 struct bio *next = bio->bi_next; 893 struct md_rdev *rdev = (void*)bio->bi_disk; 894 bio->bi_next = NULL; 895 bio_set_dev(bio, rdev->bdev); 896 if (test_bit(Faulty, &rdev->flags)) { 897 bio_io_error(bio); 898 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && 899 !blk_queue_discard(bio->bi_disk->queue))) 900 /* Just ignore it */ 901 bio_endio(bio); 902 else 903 submit_bio_noacct(bio); 904 bio = next; 905 cond_resched(); 906 } 907 blk_finish_plug(&plug); 908 } else 909 spin_unlock_irq(&conf->device_lock); 910} 911 912/* Barriers.... 913 * Sometimes we need to suspend IO while we do something else, 914 * either some resync/recovery, or reconfigure the array. 915 * To do this we raise a 'barrier'. 916 * The 'barrier' is a counter that can be raised multiple times 917 * to count how many activities are happening which preclude 918 * normal IO. 919 * We can only raise the barrier if there is no pending IO. 920 * i.e. if nr_pending == 0. 921 * We choose only to raise the barrier if no-one is waiting for the 922 * barrier to go down. This means that as soon as an IO request 923 * is ready, no other operations which require a barrier will start 924 * until the IO request has had a chance. 925 * 926 * So: regular IO calls 'wait_barrier'. When that returns there 927 * is no backgroup IO happening, It must arrange to call 928 * allow_barrier when it has finished its IO. 929 * backgroup IO calls must call raise_barrier. Once that returns 930 * there is no normal IO happeing. It must arrange to call 931 * lower_barrier when the particular background IO completes. 932 */ 933 934static void raise_barrier(struct r10conf *conf, int force) 935{ 936 BUG_ON(force && !conf->barrier); 937 spin_lock_irq(&conf->resync_lock); 938 939 /* Wait until no block IO is waiting (unless 'force') */ 940 wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting, 941 conf->resync_lock); 942 943 /* block any new IO from starting */ 944 conf->barrier++; 945 946 /* Now wait for all pending IO to complete */ 947 wait_event_lock_irq(conf->wait_barrier, 948 !atomic_read(&conf->nr_pending) && conf->barrier < RESYNC_DEPTH, 949 conf->resync_lock); 950 951 spin_unlock_irq(&conf->resync_lock); 952} 953 954static void lower_barrier(struct r10conf *conf) 955{ 956 unsigned long flags; 957 spin_lock_irqsave(&conf->resync_lock, flags); 958 conf->barrier--; 959 spin_unlock_irqrestore(&conf->resync_lock, flags); 960 wake_up(&conf->wait_barrier); 961} 962 963static void wait_barrier(struct r10conf *conf) 964{ 965 spin_lock_irq(&conf->resync_lock); 966 if (conf->barrier) { 967 struct bio_list *bio_list = current->bio_list; 968 conf->nr_waiting++; 969 /* Wait for the barrier to drop. 970 * However if there are already pending 971 * requests (preventing the barrier from 972 * rising completely), and the 973 * pre-process bio queue isn't empty, 974 * then don't wait, as we need to empty 975 * that queue to get the nr_pending 976 * count down. 977 */ 978 raid10_log(conf->mddev, "wait barrier"); 979 wait_event_lock_irq(conf->wait_barrier, 980 !conf->barrier || 981 (atomic_read(&conf->nr_pending) && 982 bio_list && 983 (!bio_list_empty(&bio_list[0]) || 984 !bio_list_empty(&bio_list[1]))) || 985 /* move on if recovery thread is 986 * blocked by us 987 */ 988 (conf->mddev->thread->tsk == current && 989 test_bit(MD_RECOVERY_RUNNING, 990 &conf->mddev->recovery) && 991 conf->nr_queued > 0), 992 conf->resync_lock); 993 conf->nr_waiting--; 994 if (!conf->nr_waiting) 995 wake_up(&conf->wait_barrier); 996 } 997 atomic_inc(&conf->nr_pending); 998 spin_unlock_irq(&conf->resync_lock); 999} 1000 1001static void allow_barrier(struct r10conf *conf) 1002{ 1003 if ((atomic_dec_and_test(&conf->nr_pending)) || 1004 (conf->array_freeze_pending)) 1005 wake_up(&conf->wait_barrier); 1006} 1007 1008static void freeze_array(struct r10conf *conf, int extra) 1009{ 1010 /* stop syncio and normal IO and wait for everything to 1011 * go quiet. 1012 * We increment barrier and nr_waiting, and then 1013 * wait until nr_pending match nr_queued+extra 1014 * This is called in the context of one normal IO request 1015 * that has failed. Thus any sync request that might be pending 1016 * will be blocked by nr_pending, and we need to wait for 1017 * pending IO requests to complete or be queued for re-try. 1018 * Thus the number queued (nr_queued) plus this request (extra) 1019 * must match the number of pending IOs (nr_pending) before 1020 * we continue. 1021 */ 1022 spin_lock_irq(&conf->resync_lock); 1023 conf->array_freeze_pending++; 1024 conf->barrier++; 1025 conf->nr_waiting++; 1026 wait_event_lock_irq_cmd(conf->wait_barrier, 1027 atomic_read(&conf->nr_pending) == conf->nr_queued+extra, 1028 conf->resync_lock, 1029 flush_pending_writes(conf)); 1030 1031 conf->array_freeze_pending--; 1032 spin_unlock_irq(&conf->resync_lock); 1033} 1034 1035static void unfreeze_array(struct r10conf *conf) 1036{ 1037 /* reverse the effect of the freeze */ 1038 spin_lock_irq(&conf->resync_lock); 1039 conf->barrier--; 1040 conf->nr_waiting--; 1041 wake_up(&conf->wait_barrier); 1042 spin_unlock_irq(&conf->resync_lock); 1043} 1044 1045static sector_t choose_data_offset(struct r10bio *r10_bio, 1046 struct md_rdev *rdev) 1047{ 1048 if (!test_bit(MD_RECOVERY_RESHAPE, &rdev->mddev->recovery) || 1049 test_bit(R10BIO_Previous, &r10_bio->state)) 1050 return rdev->data_offset; 1051 else 1052 return rdev->new_data_offset; 1053} 1054 1055struct raid10_plug_cb { 1056 struct blk_plug_cb cb; 1057 struct bio_list pending; 1058 int pending_cnt; 1059}; 1060 1061static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule) 1062{ 1063 struct raid10_plug_cb *plug = container_of(cb, struct raid10_plug_cb, 1064 cb); 1065 struct mddev *mddev = plug->cb.data; 1066 struct r10conf *conf = mddev->private; 1067 struct bio *bio; 1068 1069 if (from_schedule || current->bio_list) { 1070 spin_lock_irq(&conf->device_lock); 1071 bio_list_merge(&conf->pending_bio_list, &plug->pending); 1072 conf->pending_count += plug->pending_cnt; 1073 spin_unlock_irq(&conf->device_lock); 1074 wake_up(&conf->wait_barrier); 1075 md_wakeup_thread(mddev->thread); 1076 kfree(plug); 1077 return; 1078 } 1079 1080 /* we aren't scheduling, so we can do the write-out directly. */ 1081 bio = bio_list_get(&plug->pending); 1082 md_bitmap_unplug(mddev->bitmap); 1083 wake_up(&conf->wait_barrier); 1084 1085 while (bio) { /* submit pending writes */ 1086 struct bio *next = bio->bi_next; 1087 struct md_rdev *rdev = (void*)bio->bi_disk; 1088 bio->bi_next = NULL; 1089 bio_set_dev(bio, rdev->bdev); 1090 if (test_bit(Faulty, &rdev->flags)) { 1091 bio_io_error(bio); 1092 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && 1093 !blk_queue_discard(bio->bi_disk->queue))) 1094 /* Just ignore it */ 1095 bio_endio(bio); 1096 else 1097 submit_bio_noacct(bio); 1098 bio = next; 1099 cond_resched(); 1100 } 1101 kfree(plug); 1102} 1103 1104/* 1105 * 1. Register the new request and wait if the reconstruction thread has put 1106 * up a bar for new requests. Continue immediately if no resync is active 1107 * currently. 1108 * 2. If IO spans the reshape position. Need to wait for reshape to pass. 1109 */ 1110static void regular_request_wait(struct mddev *mddev, struct r10conf *conf, 1111 struct bio *bio, sector_t sectors) 1112{ 1113 wait_barrier(conf); 1114 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 1115 bio->bi_iter.bi_sector < conf->reshape_progress && 1116 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) { 1117 raid10_log(conf->mddev, "wait reshape"); 1118 allow_barrier(conf); 1119 wait_event(conf->wait_barrier, 1120 conf->reshape_progress <= bio->bi_iter.bi_sector || 1121 conf->reshape_progress >= bio->bi_iter.bi_sector + 1122 sectors); 1123 wait_barrier(conf); 1124 } 1125} 1126 1127static void raid10_read_request(struct mddev *mddev, struct bio *bio, 1128 struct r10bio *r10_bio) 1129{ 1130 struct r10conf *conf = mddev->private; 1131 struct bio *read_bio; 1132 const int op = bio_op(bio); 1133 const unsigned long do_sync = (bio->bi_opf & REQ_SYNC); 1134 int max_sectors; 1135 struct md_rdev *rdev; 1136 char b[BDEVNAME_SIZE]; 1137 int slot = r10_bio->read_slot; 1138 struct md_rdev *err_rdev = NULL; 1139 gfp_t gfp = GFP_NOIO; 1140 1141 if (slot >= 0 && r10_bio->devs[slot].rdev) { 1142 /* 1143 * This is an error retry, but we cannot 1144 * safely dereference the rdev in the r10_bio, 1145 * we must use the one in conf. 1146 * If it has already been disconnected (unlikely) 1147 * we lose the device name in error messages. 1148 */ 1149 int disk; 1150 /* 1151 * As we are blocking raid10, it is a little safer to 1152 * use __GFP_HIGH. 1153 */ 1154 gfp = GFP_NOIO | __GFP_HIGH; 1155 1156 rcu_read_lock(); 1157 disk = r10_bio->devs[slot].devnum; 1158 err_rdev = rcu_dereference(conf->mirrors[disk].rdev); 1159 if (err_rdev) 1160 bdevname(err_rdev->bdev, b); 1161 else { 1162 strcpy(b, "???"); 1163 /* This never gets dereferenced */ 1164 err_rdev = r10_bio->devs[slot].rdev; 1165 } 1166 rcu_read_unlock(); 1167 } 1168 1169 regular_request_wait(mddev, conf, bio, r10_bio->sectors); 1170 rdev = read_balance(conf, r10_bio, &max_sectors); 1171 if (!rdev) { 1172 if (err_rdev) { 1173 pr_crit_ratelimited("md/raid10:%s: %s: unrecoverable I/O read error for block %llu\n", 1174 mdname(mddev), b, 1175 (unsigned long long)r10_bio->sector); 1176 } 1177 raid_end_bio_io(r10_bio); 1178 return; 1179 } 1180 if (err_rdev) 1181 pr_err_ratelimited("md/raid10:%s: %s: redirecting sector %llu to another mirror\n", 1182 mdname(mddev), 1183 bdevname(rdev->bdev, b), 1184 (unsigned long long)r10_bio->sector); 1185 if (max_sectors < bio_sectors(bio)) { 1186 struct bio *split = bio_split(bio, max_sectors, 1187 gfp, &conf->bio_split); 1188 bio_chain(split, bio); 1189 allow_barrier(conf); 1190 submit_bio_noacct(bio); 1191 wait_barrier(conf); 1192 bio = split; 1193 r10_bio->master_bio = bio; 1194 r10_bio->sectors = max_sectors; 1195 } 1196 slot = r10_bio->read_slot; 1197 1198 read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set); 1199 1200 r10_bio->devs[slot].bio = read_bio; 1201 r10_bio->devs[slot].rdev = rdev; 1202 1203 read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr + 1204 choose_data_offset(r10_bio, rdev); 1205 bio_set_dev(read_bio, rdev->bdev); 1206 read_bio->bi_end_io = raid10_end_read_request; 1207 bio_set_op_attrs(read_bio, op, do_sync); 1208 if (test_bit(FailFast, &rdev->flags) && 1209 test_bit(R10BIO_FailFast, &r10_bio->state)) 1210 read_bio->bi_opf |= MD_FAILFAST; 1211 read_bio->bi_private = r10_bio; 1212 1213 if (mddev->gendisk) 1214 trace_block_bio_remap(read_bio->bi_disk->queue, 1215 read_bio, disk_devt(mddev->gendisk), 1216 r10_bio->sector); 1217 submit_bio_noacct(read_bio); 1218 return; 1219} 1220 1221static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio, 1222 struct bio *bio, bool replacement, 1223 int n_copy) 1224{ 1225 const int op = bio_op(bio); 1226 const unsigned long do_sync = (bio->bi_opf & REQ_SYNC); 1227 const unsigned long do_fua = (bio->bi_opf & REQ_FUA); 1228 unsigned long flags; 1229 struct blk_plug_cb *cb; 1230 struct raid10_plug_cb *plug = NULL; 1231 struct r10conf *conf = mddev->private; 1232 struct md_rdev *rdev; 1233 int devnum = r10_bio->devs[n_copy].devnum; 1234 struct bio *mbio; 1235 1236 if (replacement) { 1237 rdev = conf->mirrors[devnum].replacement; 1238 if (rdev == NULL) { 1239 /* Replacement just got moved to main 'rdev' */ 1240 smp_mb(); 1241 rdev = conf->mirrors[devnum].rdev; 1242 } 1243 } else 1244 rdev = conf->mirrors[devnum].rdev; 1245 1246 mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set); 1247 if (replacement) 1248 r10_bio->devs[n_copy].repl_bio = mbio; 1249 else 1250 r10_bio->devs[n_copy].bio = mbio; 1251 1252 mbio->bi_iter.bi_sector = (r10_bio->devs[n_copy].addr + 1253 choose_data_offset(r10_bio, rdev)); 1254 bio_set_dev(mbio, rdev->bdev); 1255 mbio->bi_end_io = raid10_end_write_request; 1256 bio_set_op_attrs(mbio, op, do_sync | do_fua); 1257 if (!replacement && test_bit(FailFast, 1258 &conf->mirrors[devnum].rdev->flags) 1259 && enough(conf, devnum)) 1260 mbio->bi_opf |= MD_FAILFAST; 1261 mbio->bi_private = r10_bio; 1262 1263 if (conf->mddev->gendisk) 1264 trace_block_bio_remap(mbio->bi_disk->queue, 1265 mbio, disk_devt(conf->mddev->gendisk), 1266 r10_bio->sector); 1267 /* flush_pending_writes() needs access to the rdev so...*/ 1268 mbio->bi_disk = (void *)rdev; 1269 1270 atomic_inc(&r10_bio->remaining); 1271 1272 cb = blk_check_plugged(raid10_unplug, mddev, sizeof(*plug)); 1273 if (cb) 1274 plug = container_of(cb, struct raid10_plug_cb, cb); 1275 else 1276 plug = NULL; 1277 if (plug) { 1278 bio_list_add(&plug->pending, mbio); 1279 plug->pending_cnt++; 1280 } else { 1281 spin_lock_irqsave(&conf->device_lock, flags); 1282 bio_list_add(&conf->pending_bio_list, mbio); 1283 conf->pending_count++; 1284 spin_unlock_irqrestore(&conf->device_lock, flags); 1285 md_wakeup_thread(mddev->thread); 1286 } 1287} 1288 1289static void raid10_write_request(struct mddev *mddev, struct bio *bio, 1290 struct r10bio *r10_bio) 1291{ 1292 struct r10conf *conf = mddev->private; 1293 int i; 1294 struct md_rdev *blocked_rdev; 1295 sector_t sectors; 1296 int max_sectors; 1297 1298 if ((mddev_is_clustered(mddev) && 1299 md_cluster_ops->area_resyncing(mddev, WRITE, 1300 bio->bi_iter.bi_sector, 1301 bio_end_sector(bio)))) { 1302 DEFINE_WAIT(w); 1303 for (;;) { 1304 prepare_to_wait(&conf->wait_barrier, 1305 &w, TASK_IDLE); 1306 if (!md_cluster_ops->area_resyncing(mddev, WRITE, 1307 bio->bi_iter.bi_sector, bio_end_sector(bio))) 1308 break; 1309 schedule(); 1310 } 1311 finish_wait(&conf->wait_barrier, &w); 1312 } 1313 1314 sectors = r10_bio->sectors; 1315 regular_request_wait(mddev, conf, bio, sectors); 1316 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 1317 (mddev->reshape_backwards 1318 ? (bio->bi_iter.bi_sector < conf->reshape_safe && 1319 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) 1320 : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe && 1321 bio->bi_iter.bi_sector < conf->reshape_progress))) { 1322 /* Need to update reshape_position in metadata */ 1323 mddev->reshape_position = conf->reshape_progress; 1324 set_mask_bits(&mddev->sb_flags, 0, 1325 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING)); 1326 md_wakeup_thread(mddev->thread); 1327 raid10_log(conf->mddev, "wait reshape metadata"); 1328 wait_event(mddev->sb_wait, 1329 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); 1330 1331 conf->reshape_safe = mddev->reshape_position; 1332 } 1333 1334 if (conf->pending_count >= max_queued_requests) { 1335 md_wakeup_thread(mddev->thread); 1336 raid10_log(mddev, "wait queued"); 1337 wait_event(conf->wait_barrier, 1338 conf->pending_count < max_queued_requests); 1339 } 1340 /* first select target devices under rcu_lock and 1341 * inc refcount on their rdev. Record them by setting 1342 * bios[x] to bio 1343 * If there are known/acknowledged bad blocks on any device 1344 * on which we have seen a write error, we want to avoid 1345 * writing to those blocks. This potentially requires several 1346 * writes to write around the bad blocks. Each set of writes 1347 * gets its own r10_bio with a set of bios attached. 1348 */ 1349 1350 r10_bio->read_slot = -1; /* make sure repl_bio gets freed */ 1351 raid10_find_phys(conf, r10_bio); 1352retry_write: 1353 blocked_rdev = NULL; 1354 rcu_read_lock(); 1355 max_sectors = r10_bio->sectors; 1356 1357 for (i = 0; i < conf->copies; i++) { 1358 int d = r10_bio->devs[i].devnum; 1359 struct md_rdev *rdev, *rrdev; 1360 1361 rrdev = rcu_dereference(conf->mirrors[d].replacement); 1362 /* 1363 * Read replacement first to prevent reading both rdev and 1364 * replacement as NULL during replacement replace rdev. 1365 */ 1366 smp_mb(); 1367 rdev = rcu_dereference(conf->mirrors[d].rdev); 1368 if (rdev == rrdev) 1369 rrdev = NULL; 1370 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { 1371 atomic_inc(&rdev->nr_pending); 1372 blocked_rdev = rdev; 1373 break; 1374 } 1375 if (rrdev && unlikely(test_bit(Blocked, &rrdev->flags))) { 1376 atomic_inc(&rrdev->nr_pending); 1377 blocked_rdev = rrdev; 1378 break; 1379 } 1380 if (rdev && (test_bit(Faulty, &rdev->flags))) 1381 rdev = NULL; 1382 if (rrdev && (test_bit(Faulty, &rrdev->flags))) 1383 rrdev = NULL; 1384 1385 r10_bio->devs[i].bio = NULL; 1386 r10_bio->devs[i].repl_bio = NULL; 1387 1388 if (!rdev && !rrdev) { 1389 set_bit(R10BIO_Degraded, &r10_bio->state); 1390 continue; 1391 } 1392 if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) { 1393 sector_t first_bad; 1394 sector_t dev_sector = r10_bio->devs[i].addr; 1395 int bad_sectors; 1396 int is_bad; 1397 1398 is_bad = is_badblock(rdev, dev_sector, max_sectors, 1399 &first_bad, &bad_sectors); 1400 if (is_bad < 0) { 1401 /* Mustn't write here until the bad block 1402 * is acknowledged 1403 */ 1404 atomic_inc(&rdev->nr_pending); 1405 set_bit(BlockedBadBlocks, &rdev->flags); 1406 blocked_rdev = rdev; 1407 break; 1408 } 1409 if (is_bad && first_bad <= dev_sector) { 1410 /* Cannot write here at all */ 1411 bad_sectors -= (dev_sector - first_bad); 1412 if (bad_sectors < max_sectors) 1413 /* Mustn't write more than bad_sectors 1414 * to other devices yet 1415 */ 1416 max_sectors = bad_sectors; 1417 /* We don't set R10BIO_Degraded as that 1418 * only applies if the disk is missing, 1419 * so it might be re-added, and we want to 1420 * know to recover this chunk. 1421 * In this case the device is here, and the 1422 * fact that this chunk is not in-sync is 1423 * recorded in the bad block log. 1424 */ 1425 continue; 1426 } 1427 if (is_bad) { 1428 int good_sectors = first_bad - dev_sector; 1429 if (good_sectors < max_sectors) 1430 max_sectors = good_sectors; 1431 } 1432 } 1433 if (rdev) { 1434 r10_bio->devs[i].bio = bio; 1435 atomic_inc(&rdev->nr_pending); 1436 } 1437 if (rrdev) { 1438 r10_bio->devs[i].repl_bio = bio; 1439 atomic_inc(&rrdev->nr_pending); 1440 } 1441 } 1442 rcu_read_unlock(); 1443 1444 if (unlikely(blocked_rdev)) { 1445 /* Have to wait for this device to get unblocked, then retry */ 1446 int j; 1447 int d; 1448 1449 for (j = 0; j < i; j++) { 1450 if (r10_bio->devs[j].bio) { 1451 d = r10_bio->devs[j].devnum; 1452 rdev_dec_pending(conf->mirrors[d].rdev, mddev); 1453 } 1454 if (r10_bio->devs[j].repl_bio) { 1455 struct md_rdev *rdev; 1456 d = r10_bio->devs[j].devnum; 1457 rdev = conf->mirrors[d].replacement; 1458 if (!rdev) { 1459 /* Race with remove_disk */ 1460 smp_mb(); 1461 rdev = conf->mirrors[d].rdev; 1462 } 1463 rdev_dec_pending(rdev, mddev); 1464 } 1465 } 1466 allow_barrier(conf); 1467 raid10_log(conf->mddev, "wait rdev %d blocked", blocked_rdev->raid_disk); 1468 md_wait_for_blocked_rdev(blocked_rdev, mddev); 1469 wait_barrier(conf); 1470 goto retry_write; 1471 } 1472 1473 if (max_sectors < r10_bio->sectors) 1474 r10_bio->sectors = max_sectors; 1475 1476 if (r10_bio->sectors < bio_sectors(bio)) { 1477 struct bio *split = bio_split(bio, r10_bio->sectors, 1478 GFP_NOIO, &conf->bio_split); 1479 bio_chain(split, bio); 1480 allow_barrier(conf); 1481 submit_bio_noacct(bio); 1482 wait_barrier(conf); 1483 bio = split; 1484 r10_bio->master_bio = bio; 1485 } 1486 1487 atomic_set(&r10_bio->remaining, 1); 1488 md_bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0); 1489 1490 for (i = 0; i < conf->copies; i++) { 1491 if (r10_bio->devs[i].bio) 1492 raid10_write_one_disk(mddev, r10_bio, bio, false, i); 1493 if (r10_bio->devs[i].repl_bio) 1494 raid10_write_one_disk(mddev, r10_bio, bio, true, i); 1495 } 1496 one_write_done(r10_bio); 1497} 1498 1499static void __make_request(struct mddev *mddev, struct bio *bio, int sectors) 1500{ 1501 struct r10conf *conf = mddev->private; 1502 struct r10bio *r10_bio; 1503 1504 r10_bio = mempool_alloc(&conf->r10bio_pool, GFP_NOIO); 1505 1506 r10_bio->master_bio = bio; 1507 r10_bio->sectors = sectors; 1508 1509 r10_bio->mddev = mddev; 1510 r10_bio->sector = bio->bi_iter.bi_sector; 1511 r10_bio->state = 0; 1512 r10_bio->read_slot = -1; 1513 memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * conf->copies); 1514 1515 if (bio_data_dir(bio) == READ) 1516 raid10_read_request(mddev, bio, r10_bio); 1517 else 1518 raid10_write_request(mddev, bio, r10_bio); 1519} 1520 1521static bool raid10_make_request(struct mddev *mddev, struct bio *bio) 1522{ 1523 struct r10conf *conf = mddev->private; 1524 sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask); 1525 int chunk_sects = chunk_mask + 1; 1526 int sectors = bio_sectors(bio); 1527 1528 if (unlikely(bio->bi_opf & REQ_PREFLUSH) 1529 && md_flush_request(mddev, bio)) 1530 return true; 1531 1532 if (!md_write_start(mddev, bio)) 1533 return false; 1534 1535 /* 1536 * If this request crosses a chunk boundary, we need to split 1537 * it. 1538 */ 1539 if (unlikely((bio->bi_iter.bi_sector & chunk_mask) + 1540 sectors > chunk_sects 1541 && (conf->geo.near_copies < conf->geo.raid_disks 1542 || conf->prev.near_copies < 1543 conf->prev.raid_disks))) 1544 sectors = chunk_sects - 1545 (bio->bi_iter.bi_sector & 1546 (chunk_sects - 1)); 1547 __make_request(mddev, bio, sectors); 1548 1549 /* In case raid10d snuck in to freeze_array */ 1550 wake_up(&conf->wait_barrier); 1551 return true; 1552} 1553 1554static void raid10_status(struct seq_file *seq, struct mddev *mddev) 1555{ 1556 struct r10conf *conf = mddev->private; 1557 int i; 1558 1559 if (conf->geo.near_copies < conf->geo.raid_disks) 1560 seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2); 1561 if (conf->geo.near_copies > 1) 1562 seq_printf(seq, " %d near-copies", conf->geo.near_copies); 1563 if (conf->geo.far_copies > 1) { 1564 if (conf->geo.far_offset) 1565 seq_printf(seq, " %d offset-copies", conf->geo.far_copies); 1566 else 1567 seq_printf(seq, " %d far-copies", conf->geo.far_copies); 1568 if (conf->geo.far_set_size != conf->geo.raid_disks) 1569 seq_printf(seq, " %d devices per set", conf->geo.far_set_size); 1570 } 1571 seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks, 1572 conf->geo.raid_disks - mddev->degraded); 1573 rcu_read_lock(); 1574 for (i = 0; i < conf->geo.raid_disks; i++) { 1575 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); 1576 seq_printf(seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_"); 1577 } 1578 rcu_read_unlock(); 1579 seq_printf(seq, "]"); 1580} 1581 1582/* check if there are enough drives for 1583 * every block to appear on atleast one. 1584 * Don't consider the device numbered 'ignore' 1585 * as we might be about to remove it. 1586 */ 1587static int _enough(struct r10conf *conf, int previous, int ignore) 1588{ 1589 int first = 0; 1590 int has_enough = 0; 1591 int disks, ncopies; 1592 if (previous) { 1593 disks = conf->prev.raid_disks; 1594 ncopies = conf->prev.near_copies; 1595 } else { 1596 disks = conf->geo.raid_disks; 1597 ncopies = conf->geo.near_copies; 1598 } 1599 1600 rcu_read_lock(); 1601 do { 1602 int n = conf->copies; 1603 int cnt = 0; 1604 int this = first; 1605 while (n--) { 1606 struct md_rdev *rdev; 1607 if (this != ignore && 1608 (rdev = rcu_dereference(conf->mirrors[this].rdev)) && 1609 test_bit(In_sync, &rdev->flags)) 1610 cnt++; 1611 this = (this+1) % disks; 1612 } 1613 if (cnt == 0) 1614 goto out; 1615 first = (first + ncopies) % disks; 1616 } while (first != 0); 1617 has_enough = 1; 1618out: 1619 rcu_read_unlock(); 1620 return has_enough; 1621} 1622 1623static int enough(struct r10conf *conf, int ignore) 1624{ 1625 /* when calling 'enough', both 'prev' and 'geo' must 1626 * be stable. 1627 * This is ensured if ->reconfig_mutex or ->device_lock 1628 * is held. 1629 */ 1630 return _enough(conf, 0, ignore) && 1631 _enough(conf, 1, ignore); 1632} 1633 1634static void raid10_error(struct mddev *mddev, struct md_rdev *rdev) 1635{ 1636 char b[BDEVNAME_SIZE]; 1637 struct r10conf *conf = mddev->private; 1638 unsigned long flags; 1639 1640 /* 1641 * If it is not operational, then we have already marked it as dead 1642 * else if it is the last working disks with "fail_last_dev == false", 1643 * ignore the error, let the next level up know. 1644 * else mark the drive as failed 1645 */ 1646 spin_lock_irqsave(&conf->device_lock, flags); 1647 if (test_bit(In_sync, &rdev->flags) && !mddev->fail_last_dev 1648 && !enough(conf, rdev->raid_disk)) { 1649 /* 1650 * Don't fail the drive, just return an IO error. 1651 */ 1652 spin_unlock_irqrestore(&conf->device_lock, flags); 1653 return; 1654 } 1655 if (test_and_clear_bit(In_sync, &rdev->flags)) 1656 mddev->degraded++; 1657 /* 1658 * If recovery is running, make sure it aborts. 1659 */ 1660 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 1661 set_bit(Blocked, &rdev->flags); 1662 set_bit(Faulty, &rdev->flags); 1663 set_mask_bits(&mddev->sb_flags, 0, 1664 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING)); 1665 spin_unlock_irqrestore(&conf->device_lock, flags); 1666 pr_crit("md/raid10:%s: Disk failure on %s, disabling device.\n" 1667 "md/raid10:%s: Operation continuing on %d devices.\n", 1668 mdname(mddev), bdevname(rdev->bdev, b), 1669 mdname(mddev), conf->geo.raid_disks - mddev->degraded); 1670} 1671 1672static void print_conf(struct r10conf *conf) 1673{ 1674 int i; 1675 struct md_rdev *rdev; 1676 1677 pr_debug("RAID10 conf printout:\n"); 1678 if (!conf) { 1679 pr_debug("(!conf)\n"); 1680 return; 1681 } 1682 pr_debug(" --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded, 1683 conf->geo.raid_disks); 1684 1685 /* This is only called with ->reconfix_mutex held, so 1686 * rcu protection of rdev is not needed */ 1687 for (i = 0; i < conf->geo.raid_disks; i++) { 1688 char b[BDEVNAME_SIZE]; 1689 rdev = conf->mirrors[i].rdev; 1690 if (rdev) 1691 pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n", 1692 i, !test_bit(In_sync, &rdev->flags), 1693 !test_bit(Faulty, &rdev->flags), 1694 bdevname(rdev->bdev,b)); 1695 } 1696} 1697 1698static void close_sync(struct r10conf *conf) 1699{ 1700 wait_barrier(conf); 1701 allow_barrier(conf); 1702 1703 mempool_exit(&conf->r10buf_pool); 1704} 1705 1706static int raid10_spare_active(struct mddev *mddev) 1707{ 1708 int i; 1709 struct r10conf *conf = mddev->private; 1710 struct raid10_info *tmp; 1711 int count = 0; 1712 unsigned long flags; 1713 1714 /* 1715 * Find all non-in_sync disks within the RAID10 configuration 1716 * and mark them in_sync 1717 */ 1718 for (i = 0; i < conf->geo.raid_disks; i++) { 1719 tmp = conf->mirrors + i; 1720 if (tmp->replacement 1721 && tmp->replacement->recovery_offset == MaxSector 1722 && !test_bit(Faulty, &tmp->replacement->flags) 1723 && !test_and_set_bit(In_sync, &tmp->replacement->flags)) { 1724 /* Replacement has just become active */ 1725 if (!tmp->rdev 1726 || !test_and_clear_bit(In_sync, &tmp->rdev->flags)) 1727 count++; 1728 if (tmp->rdev) { 1729 /* Replaced device not technically faulty, 1730 * but we need to be sure it gets removed 1731 * and never re-added. 1732 */ 1733 set_bit(Faulty, &tmp->rdev->flags); 1734 sysfs_notify_dirent_safe( 1735 tmp->rdev->sysfs_state); 1736 } 1737 sysfs_notify_dirent_safe(tmp->replacement->sysfs_state); 1738 } else if (tmp->rdev 1739 && tmp->rdev->recovery_offset == MaxSector 1740 && !test_bit(Faulty, &tmp->rdev->flags) 1741 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { 1742 count++; 1743 sysfs_notify_dirent_safe(tmp->rdev->sysfs_state); 1744 } 1745 } 1746 spin_lock_irqsave(&conf->device_lock, flags); 1747 mddev->degraded -= count; 1748 spin_unlock_irqrestore(&conf->device_lock, flags); 1749 1750 print_conf(conf); 1751 return count; 1752} 1753 1754static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) 1755{ 1756 struct r10conf *conf = mddev->private; 1757 int err = -EEXIST; 1758 int mirror; 1759 int first = 0; 1760 int last = conf->geo.raid_disks - 1; 1761 1762 if (mddev->recovery_cp < MaxSector) 1763 /* only hot-add to in-sync arrays, as recovery is 1764 * very different from resync 1765 */ 1766 return -EBUSY; 1767 if (rdev->saved_raid_disk < 0 && !_enough(conf, 1, -1)) 1768 return -EINVAL; 1769 1770 if (md_integrity_add_rdev(rdev, mddev)) 1771 return -ENXIO; 1772 1773 if (rdev->raid_disk >= 0) 1774 first = last = rdev->raid_disk; 1775 1776 if (rdev->saved_raid_disk >= first && 1777 rdev->saved_raid_disk < conf->geo.raid_disks && 1778 conf->mirrors[rdev->saved_raid_disk].rdev == NULL) 1779 mirror = rdev->saved_raid_disk; 1780 else 1781 mirror = first; 1782 for ( ; mirror <= last ; mirror++) { 1783 struct raid10_info *p = &conf->mirrors[mirror]; 1784 if (p->recovery_disabled == mddev->recovery_disabled) 1785 continue; 1786 if (p->rdev) { 1787 if (!test_bit(WantReplacement, &p->rdev->flags) || 1788 p->replacement != NULL) 1789 continue; 1790 clear_bit(In_sync, &rdev->flags); 1791 set_bit(Replacement, &rdev->flags); 1792 rdev->raid_disk = mirror; 1793 err = 0; 1794 if (mddev->gendisk) 1795 disk_stack_limits(mddev->gendisk, rdev->bdev, 1796 rdev->data_offset << 9); 1797 conf->fullsync = 1; 1798 rcu_assign_pointer(p->replacement, rdev); 1799 break; 1800 } 1801 1802 if (mddev->gendisk) 1803 disk_stack_limits(mddev->gendisk, rdev->bdev, 1804 rdev->data_offset << 9); 1805 1806 p->head_position = 0; 1807 p->recovery_disabled = mddev->recovery_disabled - 1; 1808 rdev->raid_disk = mirror; 1809 err = 0; 1810 if (rdev->saved_raid_disk != mirror) 1811 conf->fullsync = 1; 1812 rcu_assign_pointer(p->rdev, rdev); 1813 break; 1814 } 1815 if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) 1816 blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue); 1817 1818 print_conf(conf); 1819 return err; 1820} 1821 1822static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev) 1823{ 1824 struct r10conf *conf = mddev->private; 1825 int err = 0; 1826 int number = rdev->raid_disk; 1827 struct md_rdev **rdevp; 1828 struct raid10_info *p; 1829 1830 print_conf(conf); 1831 if (unlikely(number >= mddev->raid_disks)) 1832 return 0; 1833 p = conf->mirrors + number; 1834 if (rdev == p->rdev) 1835 rdevp = &p->rdev; 1836 else if (rdev == p->replacement) 1837 rdevp = &p->replacement; 1838 else 1839 return 0; 1840 1841 if (test_bit(In_sync, &rdev->flags) || 1842 atomic_read(&rdev->nr_pending)) { 1843 err = -EBUSY; 1844 goto abort; 1845 } 1846 /* Only remove non-faulty devices if recovery 1847 * is not possible. 1848 */ 1849 if (!test_bit(Faulty, &rdev->flags) && 1850 mddev->recovery_disabled != p->recovery_disabled && 1851 (!p->replacement || p->replacement == rdev) && 1852 number < conf->geo.raid_disks && 1853 enough(conf, -1)) { 1854 err = -EBUSY; 1855 goto abort; 1856 } 1857 *rdevp = NULL; 1858 if (!test_bit(RemoveSynchronized, &rdev->flags)) { 1859 synchronize_rcu(); 1860 if (atomic_read(&rdev->nr_pending)) { 1861 /* lost the race, try later */ 1862 err = -EBUSY; 1863 *rdevp = rdev; 1864 goto abort; 1865 } 1866 } 1867 if (p->replacement) { 1868 /* We must have just cleared 'rdev' */ 1869 p->rdev = p->replacement; 1870 clear_bit(Replacement, &p->replacement->flags); 1871 smp_mb(); /* Make sure other CPUs may see both as identical 1872 * but will never see neither -- if they are careful. 1873 */ 1874 p->replacement = NULL; 1875 } 1876 1877 clear_bit(WantReplacement, &rdev->flags); 1878 err = md_integrity_register(mddev); 1879 1880abort: 1881 1882 print_conf(conf); 1883 return err; 1884} 1885 1886static void __end_sync_read(struct r10bio *r10_bio, struct bio *bio, int d) 1887{ 1888 struct r10conf *conf = r10_bio->mddev->private; 1889 1890 if (!bio->bi_status) 1891 set_bit(R10BIO_Uptodate, &r10_bio->state); 1892 else 1893 /* The write handler will notice the lack of 1894 * R10BIO_Uptodate and record any errors etc 1895 */ 1896 atomic_add(r10_bio->sectors, 1897 &conf->mirrors[d].rdev->corrected_errors); 1898 1899 /* for reconstruct, we always reschedule after a read. 1900 * for resync, only after all reads 1901 */ 1902 rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev); 1903 if (test_bit(R10BIO_IsRecover, &r10_bio->state) || 1904 atomic_dec_and_test(&r10_bio->remaining)) { 1905 /* we have read all the blocks, 1906 * do the comparison in process context in raid10d 1907 */ 1908 reschedule_retry(r10_bio); 1909 } 1910} 1911 1912static void end_sync_read(struct bio *bio) 1913{ 1914 struct r10bio *r10_bio = get_resync_r10bio(bio); 1915 struct r10conf *conf = r10_bio->mddev->private; 1916 int d = find_bio_disk(conf, r10_bio, bio, NULL, NULL); 1917 1918 __end_sync_read(r10_bio, bio, d); 1919} 1920 1921static void end_reshape_read(struct bio *bio) 1922{ 1923 /* reshape read bio isn't allocated from r10buf_pool */ 1924 struct r10bio *r10_bio = bio->bi_private; 1925 1926 __end_sync_read(r10_bio, bio, r10_bio->read_slot); 1927} 1928 1929static void end_sync_request(struct r10bio *r10_bio) 1930{ 1931 struct mddev *mddev = r10_bio->mddev; 1932 1933 while (atomic_dec_and_test(&r10_bio->remaining)) { 1934 if (r10_bio->master_bio == NULL) { 1935 /* the primary of several recovery bios */ 1936 sector_t s = r10_bio->sectors; 1937 if (test_bit(R10BIO_MadeGood, &r10_bio->state) || 1938 test_bit(R10BIO_WriteError, &r10_bio->state)) 1939 reschedule_retry(r10_bio); 1940 else 1941 put_buf(r10_bio); 1942 md_done_sync(mddev, s, 1); 1943 break; 1944 } else { 1945 struct r10bio *r10_bio2 = (struct r10bio *)r10_bio->master_bio; 1946 if (test_bit(R10BIO_MadeGood, &r10_bio->state) || 1947 test_bit(R10BIO_WriteError, &r10_bio->state)) 1948 reschedule_retry(r10_bio); 1949 else 1950 put_buf(r10_bio); 1951 r10_bio = r10_bio2; 1952 } 1953 } 1954} 1955 1956static void end_sync_write(struct bio *bio) 1957{ 1958 struct r10bio *r10_bio = get_resync_r10bio(bio); 1959 struct mddev *mddev = r10_bio->mddev; 1960 struct r10conf *conf = mddev->private; 1961 int d; 1962 sector_t first_bad; 1963 int bad_sectors; 1964 int slot; 1965 int repl; 1966 struct md_rdev *rdev = NULL; 1967 1968 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl); 1969 if (repl) 1970 rdev = conf->mirrors[d].replacement; 1971 else 1972 rdev = conf->mirrors[d].rdev; 1973 1974 if (bio->bi_status) { 1975 if (repl) 1976 md_error(mddev, rdev); 1977 else { 1978 set_bit(WriteErrorSeen, &rdev->flags); 1979 if (!test_and_set_bit(WantReplacement, &rdev->flags)) 1980 set_bit(MD_RECOVERY_NEEDED, 1981 &rdev->mddev->recovery); 1982 set_bit(R10BIO_WriteError, &r10_bio->state); 1983 } 1984 } else if (is_badblock(rdev, 1985 r10_bio->devs[slot].addr, 1986 r10_bio->sectors, 1987 &first_bad, &bad_sectors)) 1988 set_bit(R10BIO_MadeGood, &r10_bio->state); 1989 1990 rdev_dec_pending(rdev, mddev); 1991 1992 end_sync_request(r10_bio); 1993} 1994 1995/* 1996 * Note: sync and recover and handled very differently for raid10 1997 * This code is for resync. 1998 * For resync, we read through virtual addresses and read all blocks. 1999 * If there is any error, we schedule a write. The lowest numbered 2000 * drive is authoritative. 2001 * However requests come for physical address, so we need to map. 2002 * For every physical address there are raid_disks/copies virtual addresses, 2003 * which is always are least one, but is not necessarly an integer. 2004 * This means that a physical address can span multiple chunks, so we may 2005 * have to submit multiple io requests for a single sync request. 2006 */ 2007/* 2008 * We check if all blocks are in-sync and only write to blocks that 2009 * aren't in sync 2010 */ 2011static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) 2012{ 2013 struct r10conf *conf = mddev->private; 2014 int i, first; 2015 struct bio *tbio, *fbio; 2016 int vcnt; 2017 struct page **tpages, **fpages; 2018 2019 atomic_set(&r10_bio->remaining, 1); 2020 2021 /* find the first device with a block */ 2022 for (i=0; i<conf->copies; i++) 2023 if (!r10_bio->devs[i].bio->bi_status) 2024 break; 2025 2026 if (i == conf->copies) 2027 goto done; 2028 2029 first = i; 2030 fbio = r10_bio->devs[i].bio; 2031 fbio->bi_iter.bi_size = r10_bio->sectors << 9; 2032 fbio->bi_iter.bi_idx = 0; 2033 fpages = get_resync_pages(fbio)->pages; 2034 2035 vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9); 2036 /* now find blocks with errors */ 2037 for (i=0 ; i < conf->copies ; i++) { 2038 int j, d; 2039 struct md_rdev *rdev; 2040 struct resync_pages *rp; 2041 2042 tbio = r10_bio->devs[i].bio; 2043 2044 if (tbio->bi_end_io != end_sync_read) 2045 continue; 2046 if (i == first) 2047 continue; 2048 2049 tpages = get_resync_pages(tbio)->pages; 2050 d = r10_bio->devs[i].devnum; 2051 rdev = conf->mirrors[d].rdev; 2052 if (!r10_bio->devs[i].bio->bi_status) { 2053 /* We know that the bi_io_vec layout is the same for 2054 * both 'first' and 'i', so we just compare them. 2055 * All vec entries are PAGE_SIZE; 2056 */ 2057 int sectors = r10_bio->sectors; 2058 for (j = 0; j < vcnt; j++) { 2059 int len = PAGE_SIZE; 2060 if (sectors < (len / 512)) 2061 len = sectors * 512; 2062 if (memcmp(page_address(fpages[j]), 2063 page_address(tpages[j]), 2064 len)) 2065 break; 2066 sectors -= len/512; 2067 } 2068 if (j == vcnt) 2069 continue; 2070 atomic64_add(r10_bio->sectors, &mddev->resync_mismatches); 2071 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) 2072 /* Don't fix anything. */ 2073 continue; 2074 } else if (test_bit(FailFast, &rdev->flags)) { 2075 /* Just give up on this device */ 2076 md_error(rdev->mddev, rdev); 2077 continue; 2078 } 2079 /* Ok, we need to write this bio, either to correct an 2080 * inconsistency or to correct an unreadable block. 2081 * First we need to fixup bv_offset, bv_len and 2082 * bi_vecs, as the read request might have corrupted these 2083 */ 2084 rp = get_resync_pages(tbio); 2085 bio_reset(tbio); 2086 2087 md_bio_reset_resync_pages(tbio, rp, fbio->bi_iter.bi_size); 2088 2089 rp->raid_bio = r10_bio; 2090 tbio->bi_private = rp; 2091 tbio->bi_iter.bi_sector = r10_bio->devs[i].addr; 2092 tbio->bi_end_io = end_sync_write; 2093 bio_set_op_attrs(tbio, REQ_OP_WRITE, 0); 2094 2095 bio_copy_data(tbio, fbio); 2096 2097 atomic_inc(&conf->mirrors[d].rdev->nr_pending); 2098 atomic_inc(&r10_bio->remaining); 2099 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio)); 2100 2101 if (test_bit(FailFast, &conf->mirrors[d].rdev->flags)) 2102 tbio->bi_opf |= MD_FAILFAST; 2103 tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset; 2104 bio_set_dev(tbio, conf->mirrors[d].rdev->bdev); 2105 submit_bio_noacct(tbio); 2106 } 2107 2108 /* Now write out to any replacement devices 2109 * that are active 2110 */ 2111 for (i = 0; i < conf->copies; i++) { 2112 int d; 2113 2114 tbio = r10_bio->devs[i].repl_bio; 2115 if (!tbio || !tbio->bi_end_io) 2116 continue; 2117 if (r10_bio->devs[i].bio->bi_end_io != end_sync_write 2118 && r10_bio->devs[i].bio != fbio) 2119 bio_copy_data(tbio, fbio); 2120 d = r10_bio->devs[i].devnum; 2121 atomic_inc(&r10_bio->remaining); 2122 md_sync_acct(conf->mirrors[d].replacement->bdev, 2123 bio_sectors(tbio)); 2124 submit_bio_noacct(tbio); 2125 } 2126 2127done: 2128 if (atomic_dec_and_test(&r10_bio->remaining)) { 2129 md_done_sync(mddev, r10_bio->sectors, 1); 2130 put_buf(r10_bio); 2131 } 2132} 2133 2134/* 2135 * Now for the recovery code. 2136 * Recovery happens across physical sectors. 2137 * We recover all non-is_sync drives by finding the virtual address of 2138 * each, and then choose a working drive that also has that virt address. 2139 * There is a separate r10_bio for each non-in_sync drive. 2140 * Only the first two slots are in use. The first for reading, 2141 * The second for writing. 2142 * 2143 */ 2144static void fix_recovery_read_error(struct r10bio *r10_bio) 2145{ 2146 /* We got a read error during recovery. 2147 * We repeat the read in smaller page-sized sections. 2148 * If a read succeeds, write it to the new device or record 2149 * a bad block if we cannot. 2150 * If a read fails, record a bad block on both old and 2151 * new devices. 2152 */ 2153 struct mddev *mddev = r10_bio->mddev; 2154 struct r10conf *conf = mddev->private; 2155 struct bio *bio = r10_bio->devs[0].bio; 2156 sector_t sect = 0; 2157 int sectors = r10_bio->sectors; 2158 int idx = 0; 2159 int dr = r10_bio->devs[0].devnum; 2160 int dw = r10_bio->devs[1].devnum; 2161 struct page **pages = get_resync_pages(bio)->pages; 2162 2163 while (sectors) { 2164 int s = sectors; 2165 struct md_rdev *rdev; 2166 sector_t addr; 2167 int ok; 2168 2169 if (s > (PAGE_SIZE>>9)) 2170 s = PAGE_SIZE >> 9; 2171 2172 rdev = conf->mirrors[dr].rdev; 2173 addr = r10_bio->devs[0].addr + sect, 2174 ok = sync_page_io(rdev, 2175 addr, 2176 s << 9, 2177 pages[idx], 2178 REQ_OP_READ, 0, false); 2179 if (ok) { 2180 rdev = conf->mirrors[dw].rdev; 2181 addr = r10_bio->devs[1].addr + sect; 2182 ok = sync_page_io(rdev, 2183 addr, 2184 s << 9, 2185 pages[idx], 2186 REQ_OP_WRITE, 0, false); 2187 if (!ok) { 2188 set_bit(WriteErrorSeen, &rdev->flags); 2189 if (!test_and_set_bit(WantReplacement, 2190 &rdev->flags)) 2191 set_bit(MD_RECOVERY_NEEDED, 2192 &rdev->mddev->recovery); 2193 } 2194 } 2195 if (!ok) { 2196 /* We don't worry if we cannot set a bad block - 2197 * it really is bad so there is no loss in not 2198 * recording it yet 2199 */ 2200 rdev_set_badblocks(rdev, addr, s, 0); 2201 2202 if (rdev != conf->mirrors[dw].rdev) { 2203 /* need bad block on destination too */ 2204 struct md_rdev *rdev2 = conf->mirrors[dw].rdev; 2205 addr = r10_bio->devs[1].addr + sect; 2206 ok = rdev_set_badblocks(rdev2, addr, s, 0); 2207 if (!ok) { 2208 /* just abort the recovery */ 2209 pr_notice("md/raid10:%s: recovery aborted due to read error\n", 2210 mdname(mddev)); 2211 2212 conf->mirrors[dw].recovery_disabled 2213 = mddev->recovery_disabled; 2214 set_bit(MD_RECOVERY_INTR, 2215 &mddev->recovery); 2216 break; 2217 } 2218 } 2219 } 2220 2221 sectors -= s; 2222 sect += s; 2223 idx++; 2224 } 2225} 2226 2227static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio) 2228{ 2229 struct r10conf *conf = mddev->private; 2230 int d; 2231 struct bio *wbio = r10_bio->devs[1].bio; 2232 struct bio *wbio2 = r10_bio->devs[1].repl_bio; 2233 2234 /* Need to test wbio2->bi_end_io before we call 2235 * submit_bio_noacct as if the former is NULL, 2236 * the latter is free to free wbio2. 2237 */ 2238 if (wbio2 && !wbio2->bi_end_io) 2239 wbio2 = NULL; 2240 2241 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) { 2242 fix_recovery_read_error(r10_bio); 2243 if (wbio->bi_end_io) 2244 end_sync_request(r10_bio); 2245 if (wbio2) 2246 end_sync_request(r10_bio); 2247 return; 2248 } 2249 2250 /* 2251 * share the pages with the first bio 2252 * and submit the write request 2253 */ 2254 d = r10_bio->devs[1].devnum; 2255 if (wbio->bi_end_io) { 2256 atomic_inc(&conf->mirrors[d].rdev->nr_pending); 2257 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio)); 2258 submit_bio_noacct(wbio); 2259 } 2260 if (wbio2) { 2261 atomic_inc(&conf->mirrors[d].replacement->nr_pending); 2262 md_sync_acct(conf->mirrors[d].replacement->bdev, 2263 bio_sectors(wbio2)); 2264 submit_bio_noacct(wbio2); 2265 } 2266} 2267 2268/* 2269 * Used by fix_read_error() to decay the per rdev read_errors. 2270 * We halve the read error count for every hour that has elapsed 2271 * since the last recorded read error. 2272 * 2273 */ 2274static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev) 2275{ 2276 long cur_time_mon; 2277 unsigned long hours_since_last; 2278 unsigned int read_errors = atomic_read(&rdev->read_errors); 2279 2280 cur_time_mon = ktime_get_seconds(); 2281 2282 if (rdev->last_read_error == 0) { 2283 /* first time we've seen a read error */ 2284 rdev->last_read_error = cur_time_mon; 2285 return; 2286 } 2287 2288 hours_since_last = (long)(cur_time_mon - 2289 rdev->last_read_error) / 3600; 2290 2291 rdev->last_read_error = cur_time_mon; 2292 2293 /* 2294 * if hours_since_last is > the number of bits in read_errors 2295 * just set read errors to 0. We do this to avoid 2296 * overflowing the shift of read_errors by hours_since_last. 2297 */ 2298 if (hours_since_last >= 8 * sizeof(read_errors)) 2299 atomic_set(&rdev->read_errors, 0); 2300 else 2301 atomic_set(&rdev->read_errors, read_errors >> hours_since_last); 2302} 2303 2304static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector, 2305 int sectors, struct page *page, int rw) 2306{ 2307 sector_t first_bad; 2308 int bad_sectors; 2309 2310 if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors) 2311 && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags))) 2312 return -1; 2313 if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false)) 2314 /* success */ 2315 return 1; 2316 if (rw == WRITE) { 2317 set_bit(WriteErrorSeen, &rdev->flags); 2318 if (!test_and_set_bit(WantReplacement, &rdev->flags)) 2319 set_bit(MD_RECOVERY_NEEDED, 2320 &rdev->mddev->recovery); 2321 } 2322 /* need to record an error - either for the block or the device */ 2323 if (!rdev_set_badblocks(rdev, sector, sectors, 0)) 2324 md_error(rdev->mddev, rdev); 2325 return 0; 2326} 2327 2328/* 2329 * This is a kernel thread which: 2330 * 2331 * 1. Retries failed read operations on working mirrors. 2332 * 2. Updates the raid superblock when problems encounter. 2333 * 3. Performs writes following reads for array synchronising. 2334 */ 2335 2336static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio) 2337{ 2338 int sect = 0; /* Offset from r10_bio->sector */ 2339 int sectors = r10_bio->sectors; 2340 struct md_rdev *rdev; 2341 int max_read_errors = atomic_read(&mddev->max_corr_read_errors); 2342 int d = r10_bio->devs[r10_bio->read_slot].devnum; 2343 2344 /* still own a reference to this rdev, so it cannot 2345 * have been cleared recently. 2346 */ 2347 rdev = conf->mirrors[d].rdev; 2348 2349 if (test_bit(Faulty, &rdev->flags)) 2350 /* drive has already been failed, just ignore any 2351 more fix_read_error() attempts */ 2352 return; 2353 2354 check_decay_read_errors(mddev, rdev); 2355 atomic_inc(&rdev->read_errors); 2356 if (atomic_read(&rdev->read_errors) > max_read_errors) { 2357 char b[BDEVNAME_SIZE]; 2358 bdevname(rdev->bdev, b); 2359 2360 pr_notice("md/raid10:%s: %s: Raid device exceeded read_error threshold [cur %d:max %d]\n", 2361 mdname(mddev), b, 2362 atomic_read(&rdev->read_errors), max_read_errors); 2363 pr_notice("md/raid10:%s: %s: Failing raid device\n", 2364 mdname(mddev), b); 2365 md_error(mddev, rdev); 2366 r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED; 2367 return; 2368 } 2369 2370 while(sectors) { 2371 int s = sectors; 2372 int sl = r10_bio->read_slot; 2373 int success = 0; 2374 int start; 2375 2376 if (s > (PAGE_SIZE>>9)) 2377 s = PAGE_SIZE >> 9; 2378 2379 rcu_read_lock(); 2380 do { 2381 sector_t first_bad; 2382 int bad_sectors; 2383 2384 d = r10_bio->devs[sl].devnum; 2385 rdev = rcu_dereference(conf->mirrors[d].rdev); 2386 if (rdev && 2387 test_bit(In_sync, &rdev->flags) && 2388 !test_bit(Faulty, &rdev->flags) && 2389 is_badblock(rdev, r10_bio->devs[sl].addr + sect, s, 2390 &first_bad, &bad_sectors) == 0) { 2391 atomic_inc(&rdev->nr_pending); 2392 rcu_read_unlock(); 2393 success = sync_page_io(rdev, 2394 r10_bio->devs[sl].addr + 2395 sect, 2396 s<<9, 2397 conf->tmppage, 2398 REQ_OP_READ, 0, false); 2399 rdev_dec_pending(rdev, mddev); 2400 rcu_read_lock(); 2401 if (success) 2402 break; 2403 } 2404 sl++; 2405 if (sl == conf->copies) 2406 sl = 0; 2407 } while (!success && sl != r10_bio->read_slot); 2408 rcu_read_unlock(); 2409 2410 if (!success) { 2411 /* Cannot read from anywhere, just mark the block 2412 * as bad on the first device to discourage future 2413 * reads. 2414 */ 2415 int dn = r10_bio->devs[r10_bio->read_slot].devnum; 2416 rdev = conf->mirrors[dn].rdev; 2417 2418 if (!rdev_set_badblocks( 2419 rdev, 2420 r10_bio->devs[r10_bio->read_slot].addr 2421 + sect, 2422 s, 0)) { 2423 md_error(mddev, rdev); 2424 r10_bio->devs[r10_bio->read_slot].bio 2425 = IO_BLOCKED; 2426 } 2427 break; 2428 } 2429 2430 start = sl; 2431 /* write it back and re-read */ 2432 rcu_read_lock(); 2433 while (sl != r10_bio->read_slot) { 2434 char b[BDEVNAME_SIZE]; 2435 2436 if (sl==0) 2437 sl = conf->copies; 2438 sl--; 2439 d = r10_bio->devs[sl].devnum; 2440 rdev = rcu_dereference(conf->mirrors[d].rdev); 2441 if (!rdev || 2442 test_bit(Faulty, &rdev->flags) || 2443 !test_bit(In_sync, &rdev->flags)) 2444 continue; 2445 2446 atomic_inc(&rdev->nr_pending); 2447 rcu_read_unlock(); 2448 if (r10_sync_page_io(rdev, 2449 r10_bio->devs[sl].addr + 2450 sect, 2451 s, conf->tmppage, WRITE) 2452 == 0) { 2453 /* Well, this device is dead */ 2454 pr_notice("md/raid10:%s: read correction write failed (%d sectors at %llu on %s)\n", 2455 mdname(mddev), s, 2456 (unsigned long long)( 2457 sect + 2458 choose_data_offset(r10_bio, 2459 rdev)), 2460 bdevname(rdev->bdev, b)); 2461 pr_notice("md/raid10:%s: %s: failing drive\n", 2462 mdname(mddev), 2463 bdevname(rdev->bdev, b)); 2464 } 2465 rdev_dec_pending(rdev, mddev); 2466 rcu_read_lock(); 2467 } 2468 sl = start; 2469 while (sl != r10_bio->read_slot) { 2470 char b[BDEVNAME_SIZE]; 2471 2472 if (sl==0) 2473 sl = conf->copies; 2474 sl--; 2475 d = r10_bio->devs[sl].devnum; 2476 rdev = rcu_dereference(conf->mirrors[d].rdev); 2477 if (!rdev || 2478 test_bit(Faulty, &rdev->flags) || 2479 !test_bit(In_sync, &rdev->flags)) 2480 continue; 2481 2482 atomic_inc(&rdev->nr_pending); 2483 rcu_read_unlock(); 2484 switch (r10_sync_page_io(rdev, 2485 r10_bio->devs[sl].addr + 2486 sect, 2487 s, conf->tmppage, 2488 READ)) { 2489 case 0: 2490 /* Well, this device is dead */ 2491 pr_notice("md/raid10:%s: unable to read back corrected sectors (%d sectors at %llu on %s)\n", 2492 mdname(mddev), s, 2493 (unsigned long long)( 2494 sect + 2495 choose_data_offset(r10_bio, rdev)), 2496 bdevname(rdev->bdev, b)); 2497 pr_notice("md/raid10:%s: %s: failing drive\n", 2498 mdname(mddev), 2499 bdevname(rdev->bdev, b)); 2500 break; 2501 case 1: 2502 pr_info("md/raid10:%s: read error corrected (%d sectors at %llu on %s)\n", 2503 mdname(mddev), s, 2504 (unsigned long long)( 2505 sect + 2506 choose_data_offset(r10_bio, rdev)), 2507 bdevname(rdev->bdev, b)); 2508 atomic_add(s, &rdev->corrected_errors); 2509 } 2510 2511 rdev_dec_pending(rdev, mddev); 2512 rcu_read_lock(); 2513 } 2514 rcu_read_unlock(); 2515 2516 sectors -= s; 2517 sect += s; 2518 } 2519} 2520 2521static int narrow_write_error(struct r10bio *r10_bio, int i) 2522{ 2523 struct bio *bio = r10_bio->master_bio; 2524 struct mddev *mddev = r10_bio->mddev; 2525 struct r10conf *conf = mddev->private; 2526 struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev; 2527 /* bio has the data to be written to slot 'i' where 2528 * we just recently had a write error. 2529 * We repeatedly clone the bio and trim down to one block, 2530 * then try the write. Where the write fails we record 2531 * a bad block. 2532 * It is conceivable that the bio doesn't exactly align with 2533 * blocks. We must handle this. 2534 * 2535 * We currently own a reference to the rdev. 2536 */ 2537 2538 int block_sectors; 2539 sector_t sector; 2540 int sectors; 2541 int sect_to_write = r10_bio->sectors; 2542 int ok = 1; 2543 2544 if (rdev->badblocks.shift < 0) 2545 return 0; 2546 2547 block_sectors = roundup(1 << rdev->badblocks.shift, 2548 bdev_logical_block_size(rdev->bdev) >> 9); 2549 sector = r10_bio->sector; 2550 sectors = ((r10_bio->sector + block_sectors) 2551 & ~(sector_t)(block_sectors - 1)) 2552 - sector; 2553 2554 while (sect_to_write) { 2555 struct bio *wbio; 2556 sector_t wsector; 2557 if (sectors > sect_to_write) 2558 sectors = sect_to_write; 2559 /* Write at 'sector' for 'sectors' */ 2560 wbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set); 2561 bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors); 2562 wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector); 2563 wbio->bi_iter.bi_sector = wsector + 2564 choose_data_offset(r10_bio, rdev); 2565 bio_set_dev(wbio, rdev->bdev); 2566 bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); 2567 2568 if (submit_bio_wait(wbio) < 0) 2569 /* Failure! */ 2570 ok = rdev_set_badblocks(rdev, wsector, 2571 sectors, 0) 2572 && ok; 2573 2574 bio_put(wbio); 2575 sect_to_write -= sectors; 2576 sector += sectors; 2577 sectors = block_sectors; 2578 } 2579 return ok; 2580} 2581 2582static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio) 2583{ 2584 int slot = r10_bio->read_slot; 2585 struct bio *bio; 2586 struct r10conf *conf = mddev->private; 2587 struct md_rdev *rdev = r10_bio->devs[slot].rdev; 2588 2589 /* we got a read error. Maybe the drive is bad. Maybe just 2590 * the block and we can fix it. 2591 * We freeze all other IO, and try reading the block from 2592 * other devices. When we find one, we re-write 2593 * and check it that fixes the read error. 2594 * This is all done synchronously while the array is 2595 * frozen. 2596 */ 2597 bio = r10_bio->devs[slot].bio; 2598 bio_put(bio); 2599 r10_bio->devs[slot].bio = NULL; 2600 2601 if (mddev->ro) 2602 r10_bio->devs[slot].bio = IO_BLOCKED; 2603 else if (!test_bit(FailFast, &rdev->flags)) { 2604 freeze_array(conf, 1); 2605 fix_read_error(conf, mddev, r10_bio); 2606 unfreeze_array(conf); 2607 } else 2608 md_error(mddev, rdev); 2609 2610 rdev_dec_pending(rdev, mddev); 2611 allow_barrier(conf); 2612 r10_bio->state = 0; 2613 raid10_read_request(mddev, r10_bio->master_bio, r10_bio); 2614} 2615 2616static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) 2617{ 2618 /* Some sort of write request has finished and it 2619 * succeeded in writing where we thought there was a 2620 * bad block. So forget the bad block. 2621 * Or possibly if failed and we need to record 2622 * a bad block. 2623 */ 2624 int m; 2625 struct md_rdev *rdev; 2626 2627 if (test_bit(R10BIO_IsSync, &r10_bio->state) || 2628 test_bit(R10BIO_IsRecover, &r10_bio->state)) { 2629 for (m = 0; m < conf->copies; m++) { 2630 int dev = r10_bio->devs[m].devnum; 2631 rdev = conf->mirrors[dev].rdev; 2632 if (r10_bio->devs[m].bio == NULL || 2633 r10_bio->devs[m].bio->bi_end_io == NULL) 2634 continue; 2635 if (!r10_bio->devs[m].bio->bi_status) { 2636 rdev_clear_badblocks( 2637 rdev, 2638 r10_bio->devs[m].addr, 2639 r10_bio->sectors, 0); 2640 } else { 2641 if (!rdev_set_badblocks( 2642 rdev, 2643 r10_bio->devs[m].addr, 2644 r10_bio->sectors, 0)) 2645 md_error(conf->mddev, rdev); 2646 } 2647 rdev = conf->mirrors[dev].replacement; 2648 if (r10_bio->devs[m].repl_bio == NULL || 2649 r10_bio->devs[m].repl_bio->bi_end_io == NULL) 2650 continue; 2651 2652 if (!r10_bio->devs[m].repl_bio->bi_status) { 2653 rdev_clear_badblocks( 2654 rdev, 2655 r10_bio->devs[m].addr, 2656 r10_bio->sectors, 0); 2657 } else { 2658 if (!rdev_set_badblocks( 2659 rdev, 2660 r10_bio->devs[m].addr, 2661 r10_bio->sectors, 0)) 2662 md_error(conf->mddev, rdev); 2663 } 2664 } 2665 put_buf(r10_bio); 2666 } else { 2667 bool fail = false; 2668 for (m = 0; m < conf->copies; m++) { 2669 int dev = r10_bio->devs[m].devnum; 2670 struct bio *bio = r10_bio->devs[m].bio; 2671 rdev = conf->mirrors[dev].rdev; 2672 if (bio == IO_MADE_GOOD) { 2673 rdev_clear_badblocks( 2674 rdev, 2675 r10_bio->devs[m].addr, 2676 r10_bio->sectors, 0); 2677 rdev_dec_pending(rdev, conf->mddev); 2678 } else if (bio != NULL && bio->bi_status) { 2679 fail = true; 2680 if (!narrow_write_error(r10_bio, m)) { 2681 md_error(conf->mddev, rdev); 2682 set_bit(R10BIO_Degraded, 2683 &r10_bio->state); 2684 } 2685 rdev_dec_pending(rdev, conf->mddev); 2686 } 2687 bio = r10_bio->devs[m].repl_bio; 2688 rdev = conf->mirrors[dev].replacement; 2689 if (rdev && bio == IO_MADE_GOOD) { 2690 rdev_clear_badblocks( 2691 rdev, 2692 r10_bio->devs[m].addr, 2693 r10_bio->sectors, 0); 2694 rdev_dec_pending(rdev, conf->mddev); 2695 } 2696 } 2697 if (fail) { 2698 spin_lock_irq(&conf->device_lock); 2699 list_add(&r10_bio->retry_list, &conf->bio_end_io_list); 2700 conf->nr_queued++; 2701 spin_unlock_irq(&conf->device_lock); 2702 /* 2703 * In case freeze_array() is waiting for condition 2704 * nr_pending == nr_queued + extra to be true. 2705 */ 2706 wake_up(&conf->wait_barrier); 2707 md_wakeup_thread(conf->mddev->thread); 2708 } else { 2709 if (test_bit(R10BIO_WriteError, 2710 &r10_bio->state)) 2711 close_write(r10_bio); 2712 raid_end_bio_io(r10_bio); 2713 } 2714 } 2715} 2716 2717static void raid10d(struct md_thread *thread) 2718{ 2719 struct mddev *mddev = thread->mddev; 2720 struct r10bio *r10_bio; 2721 unsigned long flags; 2722 struct r10conf *conf = mddev->private; 2723 struct list_head *head = &conf->retry_list; 2724 struct blk_plug plug; 2725 2726 md_check_recovery(mddev); 2727 2728 if (!list_empty_careful(&conf->bio_end_io_list) && 2729 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { 2730 LIST_HEAD(tmp); 2731 spin_lock_irqsave(&conf->device_lock, flags); 2732 if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { 2733 while (!list_empty(&conf->bio_end_io_list)) { 2734 list_move(conf->bio_end_io_list.prev, &tmp); 2735 conf->nr_queued--; 2736 } 2737 } 2738 spin_unlock_irqrestore(&conf->device_lock, flags); 2739 while (!list_empty(&tmp)) { 2740 r10_bio = list_first_entry(&tmp, struct r10bio, 2741 retry_list); 2742 list_del(&r10_bio->retry_list); 2743 if (mddev->degraded) 2744 set_bit(R10BIO_Degraded, &r10_bio->state); 2745 2746 if (test_bit(R10BIO_WriteError, 2747 &r10_bio->state)) 2748 close_write(r10_bio); 2749 raid_end_bio_io(r10_bio); 2750 } 2751 } 2752 2753 blk_start_plug(&plug); 2754 for (;;) { 2755 2756 flush_pending_writes(conf); 2757 2758 spin_lock_irqsave(&conf->device_lock, flags); 2759 if (list_empty(head)) { 2760 spin_unlock_irqrestore(&conf->device_lock, flags); 2761 break; 2762 } 2763 r10_bio = list_entry(head->prev, struct r10bio, retry_list); 2764 list_del(head->prev); 2765 conf->nr_queued--; 2766 spin_unlock_irqrestore(&conf->device_lock, flags); 2767 2768 mddev = r10_bio->mddev; 2769 conf = mddev->private; 2770 if (test_bit(R10BIO_MadeGood, &r10_bio->state) || 2771 test_bit(R10BIO_WriteError, &r10_bio->state)) 2772 handle_write_completed(conf, r10_bio); 2773 else if (test_bit(R10BIO_IsReshape, &r10_bio->state)) 2774 reshape_request_write(mddev, r10_bio); 2775 else if (test_bit(R10BIO_IsSync, &r10_bio->state)) 2776 sync_request_write(mddev, r10_bio); 2777 else if (test_bit(R10BIO_IsRecover, &r10_bio->state)) 2778 recovery_request_write(mddev, r10_bio); 2779 else if (test_bit(R10BIO_ReadError, &r10_bio->state)) 2780 handle_read_error(mddev, r10_bio); 2781 else 2782 WARN_ON_ONCE(1); 2783 2784 cond_resched(); 2785 if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING)) 2786 md_check_recovery(mddev); 2787 } 2788 blk_finish_plug(&plug); 2789} 2790 2791static int init_resync(struct r10conf *conf) 2792{ 2793 int ret, buffs, i; 2794 2795 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE; 2796 BUG_ON(mempool_initialized(&conf->r10buf_pool)); 2797 conf->have_replacement = 0; 2798 for (i = 0; i < conf->geo.raid_disks; i++) 2799 if (conf->mirrors[i].replacement) 2800 conf->have_replacement = 1; 2801 ret = mempool_init(&conf->r10buf_pool, buffs, 2802 r10buf_pool_alloc, r10buf_pool_free, conf); 2803 if (ret) 2804 return ret; 2805 conf->next_resync = 0; 2806 return 0; 2807} 2808 2809static struct r10bio *raid10_alloc_init_r10buf(struct r10conf *conf) 2810{ 2811 struct r10bio *r10bio = mempool_alloc(&conf->r10buf_pool, GFP_NOIO); 2812 struct rsync_pages *rp; 2813 struct bio *bio; 2814 int nalloc; 2815 int i; 2816 2817 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) || 2818 test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery)) 2819 nalloc = conf->copies; /* resync */ 2820 else 2821 nalloc = 2; /* recovery */ 2822 2823 for (i = 0; i < nalloc; i++) { 2824 bio = r10bio->devs[i].bio; 2825 rp = bio->bi_private; 2826 bio_reset(bio); 2827 bio->bi_private = rp; 2828 bio = r10bio->devs[i].repl_bio; 2829 if (bio) { 2830 rp = bio->bi_private; 2831 bio_reset(bio); 2832 bio->bi_private = rp; 2833 } 2834 } 2835 return r10bio; 2836} 2837 2838/* 2839 * Set cluster_sync_high since we need other nodes to add the 2840 * range [cluster_sync_low, cluster_sync_high] to suspend list. 2841 */ 2842static void raid10_set_cluster_sync_high(struct r10conf *conf) 2843{ 2844 sector_t window_size; 2845 int extra_chunk, chunks; 2846 2847 /* 2848 * First, here we define "stripe" as a unit which across 2849 * all member devices one time, so we get chunks by use 2850 * raid_disks / near_copies. Otherwise, if near_copies is 2851 * close to raid_disks, then resync window could increases 2852 * linearly with the increase of raid_disks, which means 2853 * we will suspend a really large IO window while it is not 2854 * necessary. If raid_disks is not divisible by near_copies, 2855 * an extra chunk is needed to ensure the whole "stripe" is 2856 * covered. 2857 */ 2858 2859 chunks = conf->geo.raid_disks / conf->geo.near_copies; 2860 if (conf->geo.raid_disks % conf->geo.near_copies == 0) 2861 extra_chunk = 0; 2862 else 2863 extra_chunk = 1; 2864 window_size = (chunks + extra_chunk) * conf->mddev->chunk_sectors; 2865 2866 /* 2867 * At least use a 32M window to align with raid1's resync window 2868 */ 2869 window_size = (CLUSTER_RESYNC_WINDOW_SECTORS > window_size) ? 2870 CLUSTER_RESYNC_WINDOW_SECTORS : window_size; 2871 2872 conf->cluster_sync_high = conf->cluster_sync_low + window_size; 2873} 2874 2875/* 2876 * perform a "sync" on one "block" 2877 * 2878 * We need to make sure that no normal I/O request - particularly write 2879 * requests - conflict with active sync requests. 2880 * 2881 * This is achieved by tracking pending requests and a 'barrier' concept 2882 * that can be installed to exclude normal IO requests. 2883 * 2884 * Resync and recovery are handled very differently. 2885 * We differentiate by looking at MD_RECOVERY_SYNC in mddev->recovery. 2886 * 2887 * For resync, we iterate over virtual addresses, read all copies, 2888 * and update if there are differences. If only one copy is live, 2889 * skip it. 2890 * For recovery, we iterate over physical addresses, read a good 2891 * value for each non-in_sync drive, and over-write. 2892 * 2893 * So, for recovery we may have several outstanding complex requests for a 2894 * given address, one for each out-of-sync device. We model this by allocating 2895 * a number of r10_bio structures, one for each out-of-sync device. 2896 * As we setup these structures, we collect all bio's together into a list 2897 * which we then process collectively to add pages, and then process again 2898 * to pass to submit_bio_noacct. 2899 * 2900 * The r10_bio structures are linked using a borrowed master_bio pointer. 2901 * This link is counted in ->remaining. When the r10_bio that points to NULL 2902 * has its remaining count decremented to 0, the whole complex operation 2903 * is complete. 2904 * 2905 */ 2906 2907static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, 2908 int *skipped) 2909{ 2910 struct r10conf *conf = mddev->private; 2911 struct r10bio *r10_bio; 2912 struct bio *biolist = NULL, *bio; 2913 sector_t max_sector, nr_sectors; 2914 int i; 2915 int max_sync; 2916 sector_t sync_blocks; 2917 sector_t sectors_skipped = 0; 2918 int chunks_skipped = 0; 2919 sector_t chunk_mask = conf->geo.chunk_mask; 2920 int page_idx = 0; 2921 2922 /* 2923 * Allow skipping a full rebuild for incremental assembly 2924 * of a clean array, like RAID1 does. 2925 */ 2926 if (mddev->bitmap == NULL && 2927 mddev->recovery_cp == MaxSector && 2928 mddev->reshape_position == MaxSector && 2929 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && 2930 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && 2931 !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 2932 conf->fullsync == 0) { 2933 *skipped = 1; 2934 return mddev->dev_sectors - sector_nr; 2935 } 2936 2937 if (!mempool_initialized(&conf->r10buf_pool)) 2938 if (init_resync(conf)) 2939 return 0; 2940 2941 skipped: 2942 max_sector = mddev->dev_sectors; 2943 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || 2944 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 2945 max_sector = mddev->resync_max_sectors; 2946 if (sector_nr >= max_sector) { 2947 conf->cluster_sync_low = 0; 2948 conf->cluster_sync_high = 0; 2949 2950 /* If we aborted, we need to abort the 2951 * sync on the 'current' bitmap chucks (there can 2952 * be several when recovering multiple devices). 2953 * as we may have started syncing it but not finished. 2954 * We can find the current address in 2955 * mddev->curr_resync, but for recovery, 2956 * we need to convert that to several 2957 * virtual addresses. 2958 */ 2959 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { 2960 end_reshape(conf); 2961 close_sync(conf); 2962 return 0; 2963 } 2964 2965 if (mddev->curr_resync < max_sector) { /* aborted */ 2966 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 2967 md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync, 2968 &sync_blocks, 1); 2969 else for (i = 0; i < conf->geo.raid_disks; i++) { 2970 sector_t sect = 2971 raid10_find_virt(conf, mddev->curr_resync, i); 2972 md_bitmap_end_sync(mddev->bitmap, sect, 2973 &sync_blocks, 1); 2974 } 2975 } else { 2976 /* completed sync */ 2977 if ((!mddev->bitmap || conf->fullsync) 2978 && conf->have_replacement 2979 && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 2980 /* Completed a full sync so the replacements 2981 * are now fully recovered. 2982 */ 2983 rcu_read_lock(); 2984 for (i = 0; i < conf->geo.raid_disks; i++) { 2985 struct md_rdev *rdev = 2986 rcu_dereference(conf->mirrors[i].replacement); 2987 if (rdev) 2988 rdev->recovery_offset = MaxSector; 2989 } 2990 rcu_read_unlock(); 2991 } 2992 conf->fullsync = 0; 2993 } 2994 md_bitmap_close_sync(mddev->bitmap); 2995 close_sync(conf); 2996 *skipped = 1; 2997 return sectors_skipped; 2998 } 2999 3000 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 3001 return reshape_request(mddev, sector_nr, skipped); 3002 3003 if (chunks_skipped >= conf->geo.raid_disks) { 3004 /* if there has been nothing to do on any drive, 3005 * then there is nothing to do at all.. 3006 */ 3007 *skipped = 1; 3008 return (max_sector - sector_nr) + sectors_skipped; 3009 } 3010 3011 if (max_sector > mddev->resync_max) 3012 max_sector = mddev->resync_max; /* Don't do IO beyond here */ 3013 3014 /* make sure whole request will fit in a chunk - if chunks 3015 * are meaningful 3016 */ 3017 if (conf->geo.near_copies < conf->geo.raid_disks && 3018 max_sector > (sector_nr | chunk_mask)) 3019 max_sector = (sector_nr | chunk_mask) + 1; 3020 3021 /* 3022 * If there is non-resync activity waiting for a turn, then let it 3023 * though before starting on this new sync request. 3024 */ 3025 if (conf->nr_waiting) 3026 schedule_timeout_uninterruptible(1); 3027 3028 /* Again, very different code for resync and recovery. 3029 * Both must result in an r10bio with a list of bios that 3030 * have bi_end_io, bi_sector, bi_disk set, 3031 * and bi_private set to the r10bio. 3032 * For recovery, we may actually create several r10bios 3033 * with 2 bios in each, that correspond to the bios in the main one. 3034 * In this case, the subordinate r10bios link back through a 3035 * borrowed master_bio pointer, and the counter in the master 3036 * includes a ref from each subordinate. 3037 */ 3038 /* First, we decide what to do and set ->bi_end_io 3039 * To end_sync_read if we want to read, and 3040 * end_sync_write if we will want to write. 3041 */ 3042 3043 max_sync = RESYNC_PAGES << (PAGE_SHIFT-9); 3044 if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 3045 /* recovery... the complicated one */ 3046 int j; 3047 r10_bio = NULL; 3048 3049 for (i = 0 ; i < conf->geo.raid_disks; i++) { 3050 int still_degraded; 3051 struct r10bio *rb2; 3052 sector_t sect; 3053 int must_sync; 3054 int any_working; 3055 int need_recover = 0; 3056 struct raid10_info *mirror = &conf->mirrors[i]; 3057 struct md_rdev *mrdev, *mreplace; 3058 3059 rcu_read_lock(); 3060 mrdev = rcu_dereference(mirror->rdev); 3061 mreplace = rcu_dereference(mirror->replacement); 3062 3063 if (mrdev != NULL && 3064 !test_bit(Faulty, &mrdev->flags) && 3065 !test_bit(In_sync, &mrdev->flags)) 3066 need_recover = 1; 3067 if (mreplace && test_bit(Faulty, &mreplace->flags)) 3068 mreplace = NULL; 3069 3070 if (!need_recover && !mreplace) { 3071 rcu_read_unlock(); 3072 continue; 3073 } 3074 3075 still_degraded = 0; 3076 /* want to reconstruct this device */ 3077 rb2 = r10_bio; 3078 sect = raid10_find_virt(conf, sector_nr, i); 3079 if (sect >= mddev->resync_max_sectors) { 3080 /* last stripe is not complete - don't 3081 * try to recover this sector. 3082 */ 3083 rcu_read_unlock(); 3084 continue; 3085 } 3086 /* Unless we are doing a full sync, or a replacement 3087 * we only need to recover the block if it is set in 3088 * the bitmap 3089 */ 3090 must_sync = md_bitmap_start_sync(mddev->bitmap, sect, 3091 &sync_blocks, 1); 3092 if (sync_blocks < max_sync) 3093 max_sync = sync_blocks; 3094 if (!must_sync && 3095 mreplace == NULL && 3096 !conf->fullsync) { 3097 /* yep, skip the sync_blocks here, but don't assume 3098 * that there will never be anything to do here 3099 */ 3100 chunks_skipped = -1; 3101 rcu_read_unlock(); 3102 continue; 3103 } 3104 atomic_inc(&mrdev->nr_pending); 3105 if (mreplace) 3106 atomic_inc(&mreplace->nr_pending); 3107 rcu_read_unlock(); 3108 3109 r10_bio = raid10_alloc_init_r10buf(conf); 3110 r10_bio->state = 0; 3111 raise_barrier(conf, rb2 != NULL); 3112 atomic_set(&r10_bio->remaining, 0); 3113 3114 r10_bio->master_bio = (struct bio*)rb2; 3115 if (rb2) 3116 atomic_inc(&rb2->remaining); 3117 r10_bio->mddev = mddev; 3118 set_bit(R10BIO_IsRecover, &r10_bio->state); 3119 r10_bio->sector = sect; 3120 3121 raid10_find_phys(conf, r10_bio); 3122 3123 /* Need to check if the array will still be 3124 * degraded 3125 */ 3126 rcu_read_lock(); 3127 for (j = 0; j < conf->geo.raid_disks; j++) { 3128 struct md_rdev *rdev = rcu_dereference( 3129 conf->mirrors[j].rdev); 3130 if (rdev == NULL || test_bit(Faulty, &rdev->flags)) { 3131 still_degraded = 1; 3132 break; 3133 } 3134 } 3135 3136 must_sync = md_bitmap_start_sync(mddev->bitmap, sect, 3137 &sync_blocks, still_degraded); 3138 3139 any_working = 0; 3140 for (j=0; j<conf->copies;j++) { 3141 int k; 3142 int d = r10_bio->devs[j].devnum; 3143 sector_t from_addr, to_addr; 3144 struct md_rdev *rdev = 3145 rcu_dereference(conf->mirrors[d].rdev); 3146 sector_t sector, first_bad; 3147 int bad_sectors; 3148 if (!rdev || 3149 !test_bit(In_sync, &rdev->flags)) 3150 continue; 3151 /* This is where we read from */ 3152 any_working = 1; 3153 sector = r10_bio->devs[j].addr; 3154 3155 if (is_badblock(rdev, sector, max_sync, 3156 &first_bad, &bad_sectors)) { 3157 if (first_bad > sector) 3158 max_sync = first_bad - sector; 3159 else { 3160 bad_sectors -= (sector 3161 - first_bad); 3162 if (max_sync > bad_sectors) 3163 max_sync = bad_sectors; 3164 continue; 3165 } 3166 } 3167 bio = r10_bio->devs[0].bio; 3168 bio->bi_next = biolist; 3169 biolist = bio; 3170 bio->bi_end_io = end_sync_read; 3171 bio_set_op_attrs(bio, REQ_OP_READ, 0); 3172 if (test_bit(FailFast, &rdev->flags)) 3173 bio->bi_opf |= MD_FAILFAST; 3174 from_addr = r10_bio->devs[j].addr; 3175 bio->bi_iter.bi_sector = from_addr + 3176 rdev->data_offset; 3177 bio_set_dev(bio, rdev->bdev); 3178 atomic_inc(&rdev->nr_pending); 3179 /* and we write to 'i' (if not in_sync) */ 3180 3181 for (k=0; k<conf->copies; k++) 3182 if (r10_bio->devs[k].devnum == i) 3183 break; 3184 BUG_ON(k == conf->copies); 3185 to_addr = r10_bio->devs[k].addr; 3186 r10_bio->devs[0].devnum = d; 3187 r10_bio->devs[0].addr = from_addr; 3188 r10_bio->devs[1].devnum = i; 3189 r10_bio->devs[1].addr = to_addr; 3190 3191 if (need_recover) { 3192 bio = r10_bio->devs[1].bio; 3193 bio->bi_next = biolist; 3194 biolist = bio; 3195 bio->bi_end_io = end_sync_write; 3196 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 3197 bio->bi_iter.bi_sector = to_addr 3198 + mrdev->data_offset; 3199 bio_set_dev(bio, mrdev->bdev); 3200 atomic_inc(&r10_bio->remaining); 3201 } else 3202 r10_bio->devs[1].bio->bi_end_io = NULL; 3203 3204 /* and maybe write to replacement */ 3205 bio = r10_bio->devs[1].repl_bio; 3206 if (bio) 3207 bio->bi_end_io = NULL; 3208 /* Note: if replace is not NULL, then bio 3209 * cannot be NULL as r10buf_pool_alloc will 3210 * have allocated it. 3211 */ 3212 if (!mreplace) 3213 break; 3214 bio->bi_next = biolist; 3215 biolist = bio; 3216 bio->bi_end_io = end_sync_write; 3217 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 3218 bio->bi_iter.bi_sector = to_addr + 3219 mreplace->data_offset; 3220 bio_set_dev(bio, mreplace->bdev); 3221 atomic_inc(&r10_bio->remaining); 3222 break; 3223 } 3224 rcu_read_unlock(); 3225 if (j == conf->copies) { 3226 /* Cannot recover, so abort the recovery or 3227 * record a bad block */ 3228 if (any_working) { 3229 /* problem is that there are bad blocks 3230 * on other device(s) 3231 */ 3232 int k; 3233 for (k = 0; k < conf->copies; k++) 3234 if (r10_bio->devs[k].devnum == i) 3235 break; 3236 if (!test_bit(In_sync, 3237 &mrdev->flags) 3238 && !rdev_set_badblocks( 3239 mrdev, 3240 r10_bio->devs[k].addr, 3241 max_sync, 0)) 3242 any_working = 0; 3243 if (mreplace && 3244 !rdev_set_badblocks( 3245 mreplace, 3246 r10_bio->devs[k].addr, 3247 max_sync, 0)) 3248 any_working = 0; 3249 } 3250 if (!any_working) { 3251 if (!test_and_set_bit(MD_RECOVERY_INTR, 3252 &mddev->recovery)) 3253 pr_warn("md/raid10:%s: insufficient working devices for recovery.\n", 3254 mdname(mddev)); 3255 mirror->recovery_disabled 3256 = mddev->recovery_disabled; 3257 } 3258 put_buf(r10_bio); 3259 if (rb2) 3260 atomic_dec(&rb2->remaining); 3261 r10_bio = rb2; 3262 rdev_dec_pending(mrdev, mddev); 3263 if (mreplace) 3264 rdev_dec_pending(mreplace, mddev); 3265 break; 3266 } 3267 rdev_dec_pending(mrdev, mddev); 3268 if (mreplace) 3269 rdev_dec_pending(mreplace, mddev); 3270 if (r10_bio->devs[0].bio->bi_opf & MD_FAILFAST) { 3271 /* Only want this if there is elsewhere to 3272 * read from. 'j' is currently the first 3273 * readable copy. 3274 */ 3275 int targets = 1; 3276 for (; j < conf->copies; j++) { 3277 int d = r10_bio->devs[j].devnum; 3278 if (conf->mirrors[d].rdev && 3279 test_bit(In_sync, 3280 &conf->mirrors[d].rdev->flags)) 3281 targets++; 3282 } 3283 if (targets == 1) 3284 r10_bio->devs[0].bio->bi_opf 3285 &= ~MD_FAILFAST; 3286 } 3287 } 3288 if (biolist == NULL) { 3289 while (r10_bio) { 3290 struct r10bio *rb2 = r10_bio; 3291 r10_bio = (struct r10bio*) rb2->master_bio; 3292 rb2->master_bio = NULL; 3293 put_buf(rb2); 3294 } 3295 goto giveup; 3296 } 3297 } else { 3298 /* resync. Schedule a read for every block at this virt offset */ 3299 int count = 0; 3300 3301 /* 3302 * Since curr_resync_completed could probably not update in 3303 * time, and we will set cluster_sync_low based on it. 3304 * Let's check against "sector_nr + 2 * RESYNC_SECTORS" for 3305 * safety reason, which ensures curr_resync_completed is 3306 * updated in bitmap_cond_end_sync. 3307 */ 3308 md_bitmap_cond_end_sync(mddev->bitmap, sector_nr, 3309 mddev_is_clustered(mddev) && 3310 (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high)); 3311 3312 if (!md_bitmap_start_sync(mddev->bitmap, sector_nr, 3313 &sync_blocks, mddev->degraded) && 3314 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, 3315 &mddev->recovery)) { 3316 /* We can skip this block */ 3317 *skipped = 1; 3318 return sync_blocks + sectors_skipped; 3319 } 3320 if (sync_blocks < max_sync) 3321 max_sync = sync_blocks; 3322 r10_bio = raid10_alloc_init_r10buf(conf); 3323 r10_bio->state = 0; 3324 3325 r10_bio->mddev = mddev; 3326 atomic_set(&r10_bio->remaining, 0); 3327 raise_barrier(conf, 0); 3328 conf->next_resync = sector_nr; 3329 3330 r10_bio->master_bio = NULL; 3331 r10_bio->sector = sector_nr; 3332 set_bit(R10BIO_IsSync, &r10_bio->state); 3333 raid10_find_phys(conf, r10_bio); 3334 r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1; 3335 3336 for (i = 0; i < conf->copies; i++) { 3337 int d = r10_bio->devs[i].devnum; 3338 sector_t first_bad, sector; 3339 int bad_sectors; 3340 struct md_rdev *rdev; 3341 3342 if (r10_bio->devs[i].repl_bio) 3343 r10_bio->devs[i].repl_bio->bi_end_io = NULL; 3344 3345 bio = r10_bio->devs[i].bio; 3346 bio->bi_status = BLK_STS_IOERR; 3347 rcu_read_lock(); 3348 rdev = rcu_dereference(conf->mirrors[d].rdev); 3349 if (rdev == NULL || test_bit(Faulty, &rdev->flags)) { 3350 rcu_read_unlock(); 3351 continue; 3352 } 3353 sector = r10_bio->devs[i].addr; 3354 if (is_badblock(rdev, sector, max_sync, 3355 &first_bad, &bad_sectors)) { 3356 if (first_bad > sector) 3357 max_sync = first_bad - sector; 3358 else { 3359 bad_sectors -= (sector - first_bad); 3360 if (max_sync > bad_sectors) 3361 max_sync = bad_sectors; 3362 rcu_read_unlock(); 3363 continue; 3364 } 3365 } 3366 atomic_inc(&rdev->nr_pending); 3367 atomic_inc(&r10_bio->remaining); 3368 bio->bi_next = biolist; 3369 biolist = bio; 3370 bio->bi_end_io = end_sync_read; 3371 bio_set_op_attrs(bio, REQ_OP_READ, 0); 3372 if (test_bit(FailFast, &rdev->flags)) 3373 bio->bi_opf |= MD_FAILFAST; 3374 bio->bi_iter.bi_sector = sector + rdev->data_offset; 3375 bio_set_dev(bio, rdev->bdev); 3376 count++; 3377 3378 rdev = rcu_dereference(conf->mirrors[d].replacement); 3379 if (rdev == NULL || test_bit(Faulty, &rdev->flags)) { 3380 rcu_read_unlock(); 3381 continue; 3382 } 3383 atomic_inc(&rdev->nr_pending); 3384 3385 /* Need to set up for writing to the replacement */ 3386 bio = r10_bio->devs[i].repl_bio; 3387 bio->bi_status = BLK_STS_IOERR; 3388 3389 sector = r10_bio->devs[i].addr; 3390 bio->bi_next = biolist; 3391 biolist = bio; 3392 bio->bi_end_io = end_sync_write; 3393 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 3394 if (test_bit(FailFast, &rdev->flags)) 3395 bio->bi_opf |= MD_FAILFAST; 3396 bio->bi_iter.bi_sector = sector + rdev->data_offset; 3397 bio_set_dev(bio, rdev->bdev); 3398 count++; 3399 rcu_read_unlock(); 3400 } 3401 3402 if (count < 2) { 3403 for (i=0; i<conf->copies; i++) { 3404 int d = r10_bio->devs[i].devnum; 3405 if (r10_bio->devs[i].bio->bi_end_io) 3406 rdev_dec_pending(conf->mirrors[d].rdev, 3407 mddev); 3408 if (r10_bio->devs[i].repl_bio && 3409 r10_bio->devs[i].repl_bio->bi_end_io) 3410 rdev_dec_pending( 3411 conf->mirrors[d].replacement, 3412 mddev); 3413 } 3414 put_buf(r10_bio); 3415 biolist = NULL; 3416 goto giveup; 3417 } 3418 } 3419 3420 nr_sectors = 0; 3421 if (sector_nr + max_sync < max_sector) 3422 max_sector = sector_nr + max_sync; 3423 do { 3424 struct page *page; 3425 int len = PAGE_SIZE; 3426 if (sector_nr + (len>>9) > max_sector) 3427 len = (max_sector - sector_nr) << 9; 3428 if (len == 0) 3429 break; 3430 for (bio= biolist ; bio ; bio=bio->bi_next) { 3431 struct resync_pages *rp = get_resync_pages(bio); 3432 page = resync_fetch_page(rp, page_idx); 3433 /* 3434 * won't fail because the vec table is big enough 3435 * to hold all these pages 3436 */ 3437 bio_add_page(bio, page, len, 0); 3438 } 3439 nr_sectors += len>>9; 3440 sector_nr += len>>9; 3441 } while (++page_idx < RESYNC_PAGES); 3442 r10_bio->sectors = nr_sectors; 3443 3444 if (mddev_is_clustered(mddev) && 3445 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 3446 /* It is resync not recovery */ 3447 if (conf->cluster_sync_high < sector_nr + nr_sectors) { 3448 conf->cluster_sync_low = mddev->curr_resync_completed; 3449 raid10_set_cluster_sync_high(conf); 3450 /* Send resync message */ 3451 md_cluster_ops->resync_info_update(mddev, 3452 conf->cluster_sync_low, 3453 conf->cluster_sync_high); 3454 } 3455 } else if (mddev_is_clustered(mddev)) { 3456 /* This is recovery not resync */ 3457 sector_t sect_va1, sect_va2; 3458 bool broadcast_msg = false; 3459 3460 for (i = 0; i < conf->geo.raid_disks; i++) { 3461 /* 3462 * sector_nr is a device address for recovery, so we 3463 * need translate it to array address before compare 3464 * with cluster_sync_high. 3465 */ 3466 sect_va1 = raid10_find_virt(conf, sector_nr, i); 3467 3468 if (conf->cluster_sync_high < sect_va1 + nr_sectors) { 3469 broadcast_msg = true; 3470 /* 3471 * curr_resync_completed is similar as 3472 * sector_nr, so make the translation too. 3473 */ 3474 sect_va2 = raid10_find_virt(conf, 3475 mddev->curr_resync_completed, i); 3476 3477 if (conf->cluster_sync_low == 0 || 3478 conf->cluster_sync_low > sect_va2) 3479 conf->cluster_sync_low = sect_va2; 3480 } 3481 } 3482 if (broadcast_msg) { 3483 raid10_set_cluster_sync_high(conf); 3484 md_cluster_ops->resync_info_update(mddev, 3485 conf->cluster_sync_low, 3486 conf->cluster_sync_high); 3487 } 3488 } 3489 3490 while (biolist) { 3491 bio = biolist; 3492 biolist = biolist->bi_next; 3493 3494 bio->bi_next = NULL; 3495 r10_bio = get_resync_r10bio(bio); 3496 r10_bio->sectors = nr_sectors; 3497 3498 if (bio->bi_end_io == end_sync_read) { 3499 md_sync_acct_bio(bio, nr_sectors); 3500 bio->bi_status = 0; 3501 submit_bio_noacct(bio); 3502 } 3503 } 3504 3505 if (sectors_skipped) 3506 /* pretend they weren't skipped, it makes 3507 * no important difference in this case 3508 */ 3509 md_done_sync(mddev, sectors_skipped, 1); 3510 3511 return sectors_skipped + nr_sectors; 3512 giveup: 3513 /* There is nowhere to write, so all non-sync 3514 * drives must be failed or in resync, all drives 3515 * have a bad block, so try the next chunk... 3516 */ 3517 if (sector_nr + max_sync < max_sector) 3518 max_sector = sector_nr + max_sync; 3519 3520 sectors_skipped += (max_sector - sector_nr); 3521 chunks_skipped ++; 3522 sector_nr = max_sector; 3523 goto skipped; 3524} 3525 3526static sector_t 3527raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks) 3528{ 3529 sector_t size; 3530 struct r10conf *conf = mddev->private; 3531 3532 if (!raid_disks) 3533 raid_disks = min(conf->geo.raid_disks, 3534 conf->prev.raid_disks); 3535 if (!sectors) 3536 sectors = conf->dev_sectors; 3537 3538 size = sectors >> conf->geo.chunk_shift; 3539 sector_div(size, conf->geo.far_copies); 3540 size = size * raid_disks; 3541 sector_div(size, conf->geo.near_copies); 3542 3543 return size << conf->geo.chunk_shift; 3544} 3545 3546static void calc_sectors(struct r10conf *conf, sector_t size) 3547{ 3548 /* Calculate the number of sectors-per-device that will 3549 * actually be used, and set conf->dev_sectors and 3550 * conf->stride 3551 */ 3552 3553 size = size >> conf->geo.chunk_shift; 3554 sector_div(size, conf->geo.far_copies); 3555 size = size * conf->geo.raid_disks; 3556 sector_div(size, conf->geo.near_copies); 3557 /* 'size' is now the number of chunks in the array */ 3558 /* calculate "used chunks per device" */ 3559 size = size * conf->copies; 3560 3561 /* We need to round up when dividing by raid_disks to 3562 * get the stride size. 3563 */ 3564 size = DIV_ROUND_UP_SECTOR_T(size, conf->geo.raid_disks); 3565 3566 conf->dev_sectors = size << conf->geo.chunk_shift; 3567 3568 if (conf->geo.far_offset) 3569 conf->geo.stride = 1 << conf->geo.chunk_shift; 3570 else { 3571 sector_div(size, conf->geo.far_copies); 3572 conf->geo.stride = size << conf->geo.chunk_shift; 3573 } 3574} 3575 3576enum geo_type {geo_new, geo_old, geo_start}; 3577static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new) 3578{ 3579 int nc, fc, fo; 3580 int layout, chunk, disks; 3581 switch (new) { 3582 case geo_old: 3583 layout = mddev->layout; 3584 chunk = mddev->chunk_sectors; 3585 disks = mddev->raid_disks - mddev->delta_disks; 3586 break; 3587 case geo_new: 3588 layout = mddev->new_layout; 3589 chunk = mddev->new_chunk_sectors; 3590 disks = mddev->raid_disks; 3591 break; 3592 default: /* avoid 'may be unused' warnings */ 3593 case geo_start: /* new when starting reshape - raid_disks not 3594 * updated yet. */ 3595 layout = mddev->new_layout; 3596 chunk = mddev->new_chunk_sectors; 3597 disks = mddev->raid_disks + mddev->delta_disks; 3598 break; 3599 } 3600 if (layout >> 19) 3601 return -1; 3602 if (chunk < (PAGE_SIZE >> 9) || 3603 !is_power_of_2(chunk)) 3604 return -2; 3605 nc = layout & 255; 3606 fc = (layout >> 8) & 255; 3607 fo = layout & (1<<16); 3608 geo->raid_disks = disks; 3609 geo->near_copies = nc; 3610 geo->far_copies = fc; 3611 geo->far_offset = fo; 3612 switch (layout >> 17) { 3613 case 0: /* original layout. simple but not always optimal */ 3614 geo->far_set_size = disks; 3615 break; 3616 case 1: /* "improved" layout which was buggy. Hopefully no-one is 3617 * actually using this, but leave code here just in case.*/ 3618 geo->far_set_size = disks/fc; 3619 WARN(geo->far_set_size < fc, 3620 "This RAID10 layout does not provide data safety - please backup and create new array\n"); 3621 break; 3622 case 2: /* "improved" layout fixed to match documentation */ 3623 geo->far_set_size = fc * nc; 3624 break; 3625 default: /* Not a valid layout */ 3626 return -1; 3627 } 3628 geo->chunk_mask = chunk - 1; 3629 geo->chunk_shift = ffz(~chunk); 3630 return nc*fc; 3631} 3632 3633static void raid10_free_conf(struct r10conf *conf) 3634{ 3635 if (!conf) 3636 return; 3637 3638 mempool_exit(&conf->r10bio_pool); 3639 kfree(conf->mirrors); 3640 kfree(conf->mirrors_old); 3641 kfree(conf->mirrors_new); 3642 safe_put_page(conf->tmppage); 3643 bioset_exit(&conf->bio_split); 3644 kfree(conf); 3645} 3646 3647static struct r10conf *setup_conf(struct mddev *mddev) 3648{ 3649 struct r10conf *conf = NULL; 3650 int err = -EINVAL; 3651 struct geom geo; 3652 int copies; 3653 3654 copies = setup_geo(&geo, mddev, geo_new); 3655 3656 if (copies == -2) { 3657 pr_warn("md/raid10:%s: chunk size must be at least PAGE_SIZE(%ld) and be a power of 2.\n", 3658 mdname(mddev), PAGE_SIZE); 3659 goto out; 3660 } 3661 3662 if (copies < 2 || copies > mddev->raid_disks) { 3663 pr_warn("md/raid10:%s: unsupported raid10 layout: 0x%8x\n", 3664 mdname(mddev), mddev->new_layout); 3665 goto out; 3666 } 3667 3668 err = -ENOMEM; 3669 conf = kzalloc(sizeof(struct r10conf), GFP_KERNEL); 3670 if (!conf) 3671 goto out; 3672 3673 /* FIXME calc properly */ 3674 conf->mirrors = kcalloc(mddev->raid_disks + max(0, -mddev->delta_disks), 3675 sizeof(struct raid10_info), 3676 GFP_KERNEL); 3677 if (!conf->mirrors) 3678 goto out; 3679 3680 conf->tmppage = alloc_page(GFP_KERNEL); 3681 if (!conf->tmppage) 3682 goto out; 3683 3684 conf->geo = geo; 3685 conf->copies = copies; 3686 err = mempool_init(&conf->r10bio_pool, NR_RAID_BIOS, r10bio_pool_alloc, 3687 rbio_pool_free, conf); 3688 if (err) 3689 goto out; 3690 3691 err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0); 3692 if (err) 3693 goto out; 3694 3695 calc_sectors(conf, mddev->dev_sectors); 3696 if (mddev->reshape_position == MaxSector) { 3697 conf->prev = conf->geo; 3698 conf->reshape_progress = MaxSector; 3699 } else { 3700 if (setup_geo(&conf->prev, mddev, geo_old) != conf->copies) { 3701 err = -EINVAL; 3702 goto out; 3703 } 3704 conf->reshape_progress = mddev->reshape_position; 3705 if (conf->prev.far_offset) 3706 conf->prev.stride = 1 << conf->prev.chunk_shift; 3707 else 3708 /* far_copies must be 1 */ 3709 conf->prev.stride = conf->dev_sectors; 3710 } 3711 conf->reshape_safe = conf->reshape_progress; 3712 spin_lock_init(&conf->device_lock); 3713 INIT_LIST_HEAD(&conf->retry_list); 3714 INIT_LIST_HEAD(&conf->bio_end_io_list); 3715 3716 spin_lock_init(&conf->resync_lock); 3717 init_waitqueue_head(&conf->wait_barrier); 3718 atomic_set(&conf->nr_pending, 0); 3719 3720 err = -ENOMEM; 3721 conf->thread = md_register_thread(raid10d, mddev, "raid10"); 3722 if (!conf->thread) 3723 goto out; 3724 3725 conf->mddev = mddev; 3726 return conf; 3727 3728 out: 3729 raid10_free_conf(conf); 3730 return ERR_PTR(err); 3731} 3732 3733static void raid10_set_io_opt(struct r10conf *conf) 3734{ 3735 int raid_disks = conf->geo.raid_disks; 3736 3737 if (!(conf->geo.raid_disks % conf->geo.near_copies)) 3738 raid_disks /= conf->geo.near_copies; 3739 blk_queue_io_opt(conf->mddev->queue, (conf->mddev->chunk_sectors << 9) * 3740 raid_disks); 3741} 3742 3743static int raid10_run(struct mddev *mddev) 3744{ 3745 struct r10conf *conf; 3746 int i, disk_idx; 3747 struct raid10_info *disk; 3748 struct md_rdev *rdev; 3749 sector_t size; 3750 sector_t min_offset_diff = 0; 3751 int first = 1; 3752 bool discard_supported = false; 3753 3754 if (mddev_init_writes_pending(mddev) < 0) 3755 return -ENOMEM; 3756 3757 if (mddev->private == NULL) { 3758 conf = setup_conf(mddev); 3759 if (IS_ERR(conf)) 3760 return PTR_ERR(conf); 3761 mddev->private = conf; 3762 } 3763 conf = mddev->private; 3764 if (!conf) 3765 goto out; 3766 3767 mddev->thread = conf->thread; 3768 conf->thread = NULL; 3769 3770 if (mddev_is_clustered(conf->mddev)) { 3771 int fc, fo; 3772 3773 fc = (mddev->layout >> 8) & 255; 3774 fo = mddev->layout & (1<<16); 3775 if (fc > 1 || fo > 0) { 3776 pr_err("only near layout is supported by clustered" 3777 " raid10\n"); 3778 goto out_free_conf; 3779 } 3780 } 3781 3782 if (mddev->queue) { 3783 blk_queue_max_discard_sectors(mddev->queue, 3784 mddev->chunk_sectors); 3785 blk_queue_max_write_same_sectors(mddev->queue, 0); 3786 blk_queue_max_write_zeroes_sectors(mddev->queue, 0); 3787 blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9); 3788 raid10_set_io_opt(conf); 3789 } 3790 3791 rdev_for_each(rdev, mddev) { 3792 long long diff; 3793 3794 disk_idx = rdev->raid_disk; 3795 if (disk_idx < 0) 3796 continue; 3797 if (disk_idx >= conf->geo.raid_disks && 3798 disk_idx >= conf->prev.raid_disks) 3799 continue; 3800 disk = conf->mirrors + disk_idx; 3801 3802 if (test_bit(Replacement, &rdev->flags)) { 3803 if (disk->replacement) 3804 goto out_free_conf; 3805 disk->replacement = rdev; 3806 } else { 3807 if (disk->rdev) 3808 goto out_free_conf; 3809 disk->rdev = rdev; 3810 } 3811 diff = (rdev->new_data_offset - rdev->data_offset); 3812 if (!mddev->reshape_backwards) 3813 diff = -diff; 3814 if (diff < 0) 3815 diff = 0; 3816 if (first || diff < min_offset_diff) 3817 min_offset_diff = diff; 3818 3819 if (mddev->gendisk) 3820 disk_stack_limits(mddev->gendisk, rdev->bdev, 3821 rdev->data_offset << 9); 3822 3823 disk->head_position = 0; 3824 3825 if (blk_queue_discard(bdev_get_queue(rdev->bdev))) 3826 discard_supported = true; 3827 first = 0; 3828 } 3829 3830 if (mddev->queue) { 3831 if (discard_supported) 3832 blk_queue_flag_set(QUEUE_FLAG_DISCARD, 3833 mddev->queue); 3834 else 3835 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, 3836 mddev->queue); 3837 } 3838 /* need to check that every block has at least one working mirror */ 3839 if (!enough(conf, -1)) { 3840 pr_err("md/raid10:%s: not enough operational mirrors.\n", 3841 mdname(mddev)); 3842 goto out_free_conf; 3843 } 3844 3845 if (conf->reshape_progress != MaxSector) { 3846 /* must ensure that shape change is supported */ 3847 if (conf->geo.far_copies != 1 && 3848 conf->geo.far_offset == 0) 3849 goto out_free_conf; 3850 if (conf->prev.far_copies != 1 && 3851 conf->prev.far_offset == 0) 3852 goto out_free_conf; 3853 } 3854 3855 mddev->degraded = 0; 3856 for (i = 0; 3857 i < conf->geo.raid_disks 3858 || i < conf->prev.raid_disks; 3859 i++) { 3860 3861 disk = conf->mirrors + i; 3862 3863 if (!disk->rdev && disk->replacement) { 3864 /* The replacement is all we have - use it */ 3865 disk->rdev = disk->replacement; 3866 disk->replacement = NULL; 3867 clear_bit(Replacement, &disk->rdev->flags); 3868 } 3869 3870 if (!disk->rdev || 3871 !test_bit(In_sync, &disk->rdev->flags)) { 3872 disk->head_position = 0; 3873 mddev->degraded++; 3874 if (disk->rdev && 3875 disk->rdev->saved_raid_disk < 0) 3876 conf->fullsync = 1; 3877 } 3878 3879 if (disk->replacement && 3880 !test_bit(In_sync, &disk->replacement->flags) && 3881 disk->replacement->saved_raid_disk < 0) { 3882 conf->fullsync = 1; 3883 } 3884 3885 disk->recovery_disabled = mddev->recovery_disabled - 1; 3886 } 3887 3888 if (mddev->recovery_cp != MaxSector) 3889 pr_notice("md/raid10:%s: not clean -- starting background reconstruction\n", 3890 mdname(mddev)); 3891 pr_info("md/raid10:%s: active with %d out of %d devices\n", 3892 mdname(mddev), conf->geo.raid_disks - mddev->degraded, 3893 conf->geo.raid_disks); 3894 /* 3895 * Ok, everything is just fine now 3896 */ 3897 mddev->dev_sectors = conf->dev_sectors; 3898 size = raid10_size(mddev, 0, 0); 3899 md_set_array_sectors(mddev, size); 3900 mddev->resync_max_sectors = size; 3901 set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags); 3902 3903 if (md_integrity_register(mddev)) 3904 goto out_free_conf; 3905 3906 if (conf->reshape_progress != MaxSector) { 3907 unsigned long before_length, after_length; 3908 3909 before_length = ((1 << conf->prev.chunk_shift) * 3910 conf->prev.far_copies); 3911 after_length = ((1 << conf->geo.chunk_shift) * 3912 conf->geo.far_copies); 3913 3914 if (max(before_length, after_length) > min_offset_diff) { 3915 /* This cannot work */ 3916 pr_warn("md/raid10: offset difference not enough to continue reshape\n"); 3917 goto out_free_conf; 3918 } 3919 conf->offset_diff = min_offset_diff; 3920 3921 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 3922 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 3923 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 3924 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 3925 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 3926 "reshape"); 3927 if (!mddev->sync_thread) 3928 goto out_free_conf; 3929 } 3930 3931 return 0; 3932 3933out_free_conf: 3934 md_unregister_thread(&mddev->thread); 3935 raid10_free_conf(conf); 3936 mddev->private = NULL; 3937out: 3938 return -EIO; 3939} 3940 3941static void raid10_free(struct mddev *mddev, void *priv) 3942{ 3943 raid10_free_conf(priv); 3944} 3945 3946static void raid10_quiesce(struct mddev *mddev, int quiesce) 3947{ 3948 struct r10conf *conf = mddev->private; 3949 3950 if (quiesce) 3951 raise_barrier(conf, 0); 3952 else 3953 lower_barrier(conf); 3954} 3955 3956static int raid10_resize(struct mddev *mddev, sector_t sectors) 3957{ 3958 /* Resize of 'far' arrays is not supported. 3959 * For 'near' and 'offset' arrays we can set the 3960 * number of sectors used to be an appropriate multiple 3961 * of the chunk size. 3962 * For 'offset', this is far_copies*chunksize. 3963 * For 'near' the multiplier is the LCM of 3964 * near_copies and raid_disks. 3965 * So if far_copies > 1 && !far_offset, fail. 3966 * Else find LCM(raid_disks, near_copy)*far_copies and 3967 * multiply by chunk_size. Then round to this number. 3968 * This is mostly done by raid10_size() 3969 */ 3970 struct r10conf *conf = mddev->private; 3971 sector_t oldsize, size; 3972 3973 if (mddev->reshape_position != MaxSector) 3974 return -EBUSY; 3975 3976 if (conf->geo.far_copies > 1 && !conf->geo.far_offset) 3977 return -EINVAL; 3978 3979 oldsize = raid10_size(mddev, 0, 0); 3980 size = raid10_size(mddev, sectors, 0); 3981 if (mddev->external_size && 3982 mddev->array_sectors > size) 3983 return -EINVAL; 3984 if (mddev->bitmap) { 3985 int ret = md_bitmap_resize(mddev->bitmap, size, 0, 0); 3986 if (ret) 3987 return ret; 3988 } 3989 md_set_array_sectors(mddev, size); 3990 if (sectors > mddev->dev_sectors && 3991 mddev->recovery_cp > oldsize) { 3992 mddev->recovery_cp = oldsize; 3993 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3994 } 3995 calc_sectors(conf, sectors); 3996 mddev->dev_sectors = conf->dev_sectors; 3997 mddev->resync_max_sectors = size; 3998 return 0; 3999} 4000 4001static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs) 4002{ 4003 struct md_rdev *rdev; 4004 struct r10conf *conf; 4005 4006 if (mddev->degraded > 0) { 4007 pr_warn("md/raid10:%s: Error: degraded raid0!\n", 4008 mdname(mddev)); 4009 return ERR_PTR(-EINVAL); 4010 } 4011 sector_div(size, devs); 4012 4013 /* Set new parameters */ 4014 mddev->new_level = 10; 4015 /* new layout: far_copies = 1, near_copies = 2 */ 4016 mddev->new_layout = (1<<8) + 2; 4017 mddev->new_chunk_sectors = mddev->chunk_sectors; 4018 mddev->delta_disks = mddev->raid_disks; 4019 mddev->raid_disks *= 2; 4020 /* make sure it will be not marked as dirty */ 4021 mddev->recovery_cp = MaxSector; 4022 mddev->dev_sectors = size; 4023 4024 conf = setup_conf(mddev); 4025 if (!IS_ERR(conf)) { 4026 rdev_for_each(rdev, mddev) 4027 if (rdev->raid_disk >= 0) { 4028 rdev->new_raid_disk = rdev->raid_disk * 2; 4029 rdev->sectors = size; 4030 } 4031 conf->barrier = 1; 4032 } 4033 4034 return conf; 4035} 4036 4037static void *raid10_takeover(struct mddev *mddev) 4038{ 4039 struct r0conf *raid0_conf; 4040 4041 /* raid10 can take over: 4042 * raid0 - providing it has only two drives 4043 */ 4044 if (mddev->level == 0) { 4045 /* for raid0 takeover only one zone is supported */ 4046 raid0_conf = mddev->private; 4047 if (raid0_conf->nr_strip_zones > 1) { 4048 pr_warn("md/raid10:%s: cannot takeover raid 0 with more than one zone.\n", 4049 mdname(mddev)); 4050 return ERR_PTR(-EINVAL); 4051 } 4052 return raid10_takeover_raid0(mddev, 4053 raid0_conf->strip_zone->zone_end, 4054 raid0_conf->strip_zone->nb_dev); 4055 } 4056 return ERR_PTR(-EINVAL); 4057} 4058 4059static int raid10_check_reshape(struct mddev *mddev) 4060{ 4061 /* Called when there is a request to change 4062 * - layout (to ->new_layout) 4063 * - chunk size (to ->new_chunk_sectors) 4064 * - raid_disks (by delta_disks) 4065 * or when trying to restart a reshape that was ongoing. 4066 * 4067 * We need to validate the request and possibly allocate 4068 * space if that might be an issue later. 4069 * 4070 * Currently we reject any reshape of a 'far' mode array, 4071 * allow chunk size to change if new is generally acceptable, 4072 * allow raid_disks to increase, and allow 4073 * a switch between 'near' mode and 'offset' mode. 4074 */ 4075 struct r10conf *conf = mddev->private; 4076 struct geom geo; 4077 4078 if (conf->geo.far_copies != 1 && !conf->geo.far_offset) 4079 return -EINVAL; 4080 4081 if (setup_geo(&geo, mddev, geo_start) != conf->copies) 4082 /* mustn't change number of copies */ 4083 return -EINVAL; 4084 if (geo.far_copies > 1 && !geo.far_offset) 4085 /* Cannot switch to 'far' mode */ 4086 return -EINVAL; 4087 4088 if (mddev->array_sectors & geo.chunk_mask) 4089 /* not factor of array size */ 4090 return -EINVAL; 4091 4092 if (!enough(conf, -1)) 4093 return -EINVAL; 4094 4095 kfree(conf->mirrors_new); 4096 conf->mirrors_new = NULL; 4097 if (mddev->delta_disks > 0) { 4098 /* allocate new 'mirrors' list */ 4099 conf->mirrors_new = 4100 kcalloc(mddev->raid_disks + mddev->delta_disks, 4101 sizeof(struct raid10_info), 4102 GFP_KERNEL); 4103 if (!conf->mirrors_new) 4104 return -ENOMEM; 4105 } 4106 return 0; 4107} 4108 4109/* 4110 * Need to check if array has failed when deciding whether to: 4111 * - start an array 4112 * - remove non-faulty devices 4113 * - add a spare 4114 * - allow a reshape 4115 * This determination is simple when no reshape is happening. 4116 * However if there is a reshape, we need to carefully check 4117 * both the before and after sections. 4118 * This is because some failed devices may only affect one 4119 * of the two sections, and some non-in_sync devices may 4120 * be insync in the section most affected by failed devices. 4121 */ 4122static int calc_degraded(struct r10conf *conf) 4123{ 4124 int degraded, degraded2; 4125 int i; 4126 4127 rcu_read_lock(); 4128 degraded = 0; 4129 /* 'prev' section first */ 4130 for (i = 0; i < conf->prev.raid_disks; i++) { 4131 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); 4132 if (!rdev || test_bit(Faulty, &rdev->flags)) 4133 degraded++; 4134 else if (!test_bit(In_sync, &rdev->flags)) 4135 /* When we can reduce the number of devices in 4136 * an array, this might not contribute to 4137 * 'degraded'. It does now. 4138 */ 4139 degraded++; 4140 } 4141 rcu_read_unlock(); 4142 if (conf->geo.raid_disks == conf->prev.raid_disks) 4143 return degraded; 4144 rcu_read_lock(); 4145 degraded2 = 0; 4146 for (i = 0; i < conf->geo.raid_disks; i++) { 4147 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); 4148 if (!rdev || test_bit(Faulty, &rdev->flags)) 4149 degraded2++; 4150 else if (!test_bit(In_sync, &rdev->flags)) { 4151 /* If reshape is increasing the number of devices, 4152 * this section has already been recovered, so 4153 * it doesn't contribute to degraded. 4154 * else it does. 4155 */ 4156 if (conf->geo.raid_disks <= conf->prev.raid_disks) 4157 degraded2++; 4158 } 4159 } 4160 rcu_read_unlock(); 4161 if (degraded2 > degraded) 4162 return degraded2; 4163 return degraded; 4164} 4165 4166static int raid10_start_reshape(struct mddev *mddev) 4167{ 4168 /* A 'reshape' has been requested. This commits 4169 * the various 'new' fields and sets MD_RECOVER_RESHAPE 4170 * This also checks if there are enough spares and adds them 4171 * to the array. 4172 * We currently require enough spares to make the final 4173 * array non-degraded. We also require that the difference 4174 * between old and new data_offset - on each device - is 4175 * enough that we never risk over-writing. 4176 */ 4177 4178 unsigned long before_length, after_length; 4179 sector_t min_offset_diff = 0; 4180 int first = 1; 4181 struct geom new; 4182 struct r10conf *conf = mddev->private; 4183 struct md_rdev *rdev; 4184 int spares = 0; 4185 int ret; 4186 4187 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4188 return -EBUSY; 4189 4190 if (setup_geo(&new, mddev, geo_start) != conf->copies) 4191 return -EINVAL; 4192 4193 before_length = ((1 << conf->prev.chunk_shift) * 4194 conf->prev.far_copies); 4195 after_length = ((1 << conf->geo.chunk_shift) * 4196 conf->geo.far_copies); 4197 4198 rdev_for_each(rdev, mddev) { 4199 if (!test_bit(In_sync, &rdev->flags) 4200 && !test_bit(Faulty, &rdev->flags)) 4201 spares++; 4202 if (rdev->raid_disk >= 0) { 4203 long long diff = (rdev->new_data_offset 4204 - rdev->data_offset); 4205 if (!mddev->reshape_backwards) 4206 diff = -diff; 4207 if (diff < 0) 4208 diff = 0; 4209 if (first || diff < min_offset_diff) 4210 min_offset_diff = diff; 4211 first = 0; 4212 } 4213 } 4214 4215 if (max(before_length, after_length) > min_offset_diff) 4216 return -EINVAL; 4217 4218 if (spares < mddev->delta_disks) 4219 return -EINVAL; 4220 4221 conf->offset_diff = min_offset_diff; 4222 spin_lock_irq(&conf->device_lock); 4223 if (conf->mirrors_new) { 4224 memcpy(conf->mirrors_new, conf->mirrors, 4225 sizeof(struct raid10_info)*conf->prev.raid_disks); 4226 smp_mb(); 4227 kfree(conf->mirrors_old); 4228 conf->mirrors_old = conf->mirrors; 4229 conf->mirrors = conf->mirrors_new; 4230 conf->mirrors_new = NULL; 4231 } 4232 setup_geo(&conf->geo, mddev, geo_start); 4233 smp_mb(); 4234 if (mddev->reshape_backwards) { 4235 sector_t size = raid10_size(mddev, 0, 0); 4236 if (size < mddev->array_sectors) { 4237 spin_unlock_irq(&conf->device_lock); 4238 pr_warn("md/raid10:%s: array size must be reduce before number of disks\n", 4239 mdname(mddev)); 4240 return -EINVAL; 4241 } 4242 mddev->resync_max_sectors = size; 4243 conf->reshape_progress = size; 4244 } else 4245 conf->reshape_progress = 0; 4246 conf->reshape_safe = conf->reshape_progress; 4247 spin_unlock_irq(&conf->device_lock); 4248 4249 if (mddev->delta_disks && mddev->bitmap) { 4250 struct mdp_superblock_1 *sb = NULL; 4251 sector_t oldsize, newsize; 4252 4253 oldsize = raid10_size(mddev, 0, 0); 4254 newsize = raid10_size(mddev, 0, conf->geo.raid_disks); 4255 4256 if (!mddev_is_clustered(mddev)) { 4257 ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0); 4258 if (ret) 4259 goto abort; 4260 else 4261 goto out; 4262 } 4263 4264 rdev_for_each(rdev, mddev) { 4265 if (rdev->raid_disk > -1 && 4266 !test_bit(Faulty, &rdev->flags)) 4267 sb = page_address(rdev->sb_page); 4268 } 4269 4270 /* 4271 * some node is already performing reshape, and no need to 4272 * call md_bitmap_resize again since it should be called when 4273 * receiving BITMAP_RESIZE msg 4274 */ 4275 if ((sb && (le32_to_cpu(sb->feature_map) & 4276 MD_FEATURE_RESHAPE_ACTIVE)) || (oldsize == newsize)) 4277 goto out; 4278 4279 ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0); 4280 if (ret) 4281 goto abort; 4282 4283 ret = md_cluster_ops->resize_bitmaps(mddev, newsize, oldsize); 4284 if (ret) { 4285 md_bitmap_resize(mddev->bitmap, oldsize, 0, 0); 4286 goto abort; 4287 } 4288 } 4289out: 4290 if (mddev->delta_disks > 0) { 4291 rdev_for_each(rdev, mddev) 4292 if (rdev->raid_disk < 0 && 4293 !test_bit(Faulty, &rdev->flags)) { 4294 if (raid10_add_disk(mddev, rdev) == 0) { 4295 if (rdev->raid_disk >= 4296 conf->prev.raid_disks) 4297 set_bit(In_sync, &rdev->flags); 4298 else 4299 rdev->recovery_offset = 0; 4300 4301 /* Failure here is OK */ 4302 sysfs_link_rdev(mddev, rdev); 4303 } 4304 } else if (rdev->raid_disk >= conf->prev.raid_disks 4305 && !test_bit(Faulty, &rdev->flags)) { 4306 /* This is a spare that was manually added */ 4307 set_bit(In_sync, &rdev->flags); 4308 } 4309 } 4310 /* When a reshape changes the number of devices, 4311 * ->degraded is measured against the larger of the 4312 * pre and post numbers. 4313 */ 4314 spin_lock_irq(&conf->device_lock); 4315 mddev->degraded = calc_degraded(conf); 4316 spin_unlock_irq(&conf->device_lock); 4317 mddev->raid_disks = conf->geo.raid_disks; 4318 mddev->reshape_position = conf->reshape_progress; 4319 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 4320 4321 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4322 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 4323 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 4324 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 4325 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 4326 4327 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 4328 "reshape"); 4329 if (!mddev->sync_thread) { 4330 ret = -EAGAIN; 4331 goto abort; 4332 } 4333 conf->reshape_checkpoint = jiffies; 4334 md_wakeup_thread(mddev->sync_thread); 4335 md_new_event(mddev); 4336 return 0; 4337 4338abort: 4339 mddev->recovery = 0; 4340 spin_lock_irq(&conf->device_lock); 4341 conf->geo = conf->prev; 4342 mddev->raid_disks = conf->geo.raid_disks; 4343 rdev_for_each(rdev, mddev) 4344 rdev->new_data_offset = rdev->data_offset; 4345 smp_wmb(); 4346 conf->reshape_progress = MaxSector; 4347 conf->reshape_safe = MaxSector; 4348 mddev->reshape_position = MaxSector; 4349 spin_unlock_irq(&conf->device_lock); 4350 return ret; 4351} 4352 4353/* Calculate the last device-address that could contain 4354 * any block from the chunk that includes the array-address 's' 4355 * and report the next address. 4356 * i.e. the address returned will be chunk-aligned and after 4357 * any data that is in the chunk containing 's'. 4358 */ 4359static sector_t last_dev_address(sector_t s, struct geom *geo) 4360{ 4361 s = (s | geo->chunk_mask) + 1; 4362 s >>= geo->chunk_shift; 4363 s *= geo->near_copies; 4364 s = DIV_ROUND_UP_SECTOR_T(s, geo->raid_disks); 4365 s *= geo->far_copies; 4366 s <<= geo->chunk_shift; 4367 return s; 4368} 4369 4370/* Calculate the first device-address that could contain 4371 * any block from the chunk that includes the array-address 's'. 4372 * This too will be the start of a chunk 4373 */ 4374static sector_t first_dev_address(sector_t s, struct geom *geo) 4375{ 4376 s >>= geo->chunk_shift; 4377 s *= geo->near_copies; 4378 sector_div(s, geo->raid_disks); 4379 s *= geo->far_copies; 4380 s <<= geo->chunk_shift; 4381 return s; 4382} 4383 4384static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, 4385 int *skipped) 4386{ 4387 /* We simply copy at most one chunk (smallest of old and new) 4388 * at a time, possibly less if that exceeds RESYNC_PAGES, 4389 * or we hit a bad block or something. 4390 * This might mean we pause for normal IO in the middle of 4391 * a chunk, but that is not a problem as mddev->reshape_position 4392 * can record any location. 4393 * 4394 * If we will want to write to a location that isn't 4395 * yet recorded as 'safe' (i.e. in metadata on disk) then 4396 * we need to flush all reshape requests and update the metadata. 4397 * 4398 * When reshaping forwards (e.g. to more devices), we interpret 4399 * 'safe' as the earliest block which might not have been copied 4400 * down yet. We divide this by previous stripe size and multiply 4401 * by previous stripe length to get lowest device offset that we 4402 * cannot write to yet. 4403 * We interpret 'sector_nr' as an address that we want to write to. 4404 * From this we use last_device_address() to find where we might 4405 * write to, and first_device_address on the 'safe' position. 4406 * If this 'next' write position is after the 'safe' position, 4407 * we must update the metadata to increase the 'safe' position. 4408 * 4409 * When reshaping backwards, we round in the opposite direction 4410 * and perform the reverse test: next write position must not be 4411 * less than current safe position. 4412 * 4413 * In all this the minimum difference in data offsets 4414 * (conf->offset_diff - always positive) allows a bit of slack, 4415 * so next can be after 'safe', but not by more than offset_diff 4416 * 4417 * We need to prepare all the bios here before we start any IO 4418 * to ensure the size we choose is acceptable to all devices. 4419 * The means one for each copy for write-out and an extra one for 4420 * read-in. 4421 * We store the read-in bio in ->master_bio and the others in 4422 * ->devs[x].bio and ->devs[x].repl_bio. 4423 */ 4424 struct r10conf *conf = mddev->private; 4425 struct r10bio *r10_bio; 4426 sector_t next, safe, last; 4427 int max_sectors; 4428 int nr_sectors; 4429 int s; 4430 struct md_rdev *rdev; 4431 int need_flush = 0; 4432 struct bio *blist; 4433 struct bio *bio, *read_bio; 4434 int sectors_done = 0; 4435 struct page **pages; 4436 4437 if (sector_nr == 0) { 4438 /* If restarting in the middle, skip the initial sectors */ 4439 if (mddev->reshape_backwards && 4440 conf->reshape_progress < raid10_size(mddev, 0, 0)) { 4441 sector_nr = (raid10_size(mddev, 0, 0) 4442 - conf->reshape_progress); 4443 } else if (!mddev->reshape_backwards && 4444 conf->reshape_progress > 0) 4445 sector_nr = conf->reshape_progress; 4446 if (sector_nr) { 4447 mddev->curr_resync_completed = sector_nr; 4448 sysfs_notify_dirent_safe(mddev->sysfs_completed); 4449 *skipped = 1; 4450 return sector_nr; 4451 } 4452 } 4453 4454 /* We don't use sector_nr to track where we are up to 4455 * as that doesn't work well for ->reshape_backwards. 4456 * So just use ->reshape_progress. 4457 */ 4458 if (mddev->reshape_backwards) { 4459 /* 'next' is the earliest device address that we might 4460 * write to for this chunk in the new layout 4461 */ 4462 next = first_dev_address(conf->reshape_progress - 1, 4463 &conf->geo); 4464 4465 /* 'safe' is the last device address that we might read from 4466 * in the old layout after a restart 4467 */ 4468 safe = last_dev_address(conf->reshape_safe - 1, 4469 &conf->prev); 4470 4471 if (next + conf->offset_diff < safe) 4472 need_flush = 1; 4473 4474 last = conf->reshape_progress - 1; 4475 sector_nr = last & ~(sector_t)(conf->geo.chunk_mask 4476 & conf->prev.chunk_mask); 4477 if (sector_nr + RESYNC_SECTORS < last) 4478 sector_nr = last + 1 - RESYNC_SECTORS; 4479 } else { 4480 /* 'next' is after the last device address that we 4481 * might write to for this chunk in the new layout 4482 */ 4483 next = last_dev_address(conf->reshape_progress, &conf->geo); 4484 4485 /* 'safe' is the earliest device address that we might 4486 * read from in the old layout after a restart 4487 */ 4488 safe = first_dev_address(conf->reshape_safe, &conf->prev); 4489 4490 /* Need to update metadata if 'next' might be beyond 'safe' 4491 * as that would possibly corrupt data 4492 */ 4493 if (next > safe + conf->offset_diff) 4494 need_flush = 1; 4495 4496 sector_nr = conf->reshape_progress; 4497 last = sector_nr | (conf->geo.chunk_mask 4498 & conf->prev.chunk_mask); 4499 4500 if (sector_nr + RESYNC_SECTORS <= last) 4501 last = sector_nr + RESYNC_SECTORS - 1; 4502 } 4503 4504 if (need_flush || 4505 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) { 4506 /* Need to update reshape_position in metadata */ 4507 wait_barrier(conf); 4508 mddev->reshape_position = conf->reshape_progress; 4509 if (mddev->reshape_backwards) 4510 mddev->curr_resync_completed = raid10_size(mddev, 0, 0) 4511 - conf->reshape_progress; 4512 else 4513 mddev->curr_resync_completed = conf->reshape_progress; 4514 conf->reshape_checkpoint = jiffies; 4515 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 4516 md_wakeup_thread(mddev->thread); 4517 wait_event(mddev->sb_wait, mddev->sb_flags == 0 || 4518 test_bit(MD_RECOVERY_INTR, &mddev->recovery)); 4519 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 4520 allow_barrier(conf); 4521 return sectors_done; 4522 } 4523 conf->reshape_safe = mddev->reshape_position; 4524 allow_barrier(conf); 4525 } 4526 4527 raise_barrier(conf, 0); 4528read_more: 4529 /* Now schedule reads for blocks from sector_nr to last */ 4530 r10_bio = raid10_alloc_init_r10buf(conf); 4531 r10_bio->state = 0; 4532 raise_barrier(conf, 1); 4533 atomic_set(&r10_bio->remaining, 0); 4534 r10_bio->mddev = mddev; 4535 r10_bio->sector = sector_nr; 4536 set_bit(R10BIO_IsReshape, &r10_bio->state); 4537 r10_bio->sectors = last - sector_nr + 1; 4538 rdev = read_balance(conf, r10_bio, &max_sectors); 4539 BUG_ON(!test_bit(R10BIO_Previous, &r10_bio->state)); 4540 4541 if (!rdev) { 4542 /* Cannot read from here, so need to record bad blocks 4543 * on all the target devices. 4544 */ 4545 // FIXME 4546 mempool_free(r10_bio, &conf->r10buf_pool); 4547 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4548 return sectors_done; 4549 } 4550 4551 read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev); 4552 4553 bio_set_dev(read_bio, rdev->bdev); 4554 read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr 4555 + rdev->data_offset); 4556 read_bio->bi_private = r10_bio; 4557 read_bio->bi_end_io = end_reshape_read; 4558 bio_set_op_attrs(read_bio, REQ_OP_READ, 0); 4559 read_bio->bi_flags &= (~0UL << BIO_RESET_BITS); 4560 read_bio->bi_status = 0; 4561 read_bio->bi_vcnt = 0; 4562 read_bio->bi_iter.bi_size = 0; 4563 r10_bio->master_bio = read_bio; 4564 r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum; 4565 4566 /* 4567 * Broadcast RESYNC message to other nodes, so all nodes would not 4568 * write to the region to avoid conflict. 4569 */ 4570 if (mddev_is_clustered(mddev) && conf->cluster_sync_high <= sector_nr) { 4571 struct mdp_superblock_1 *sb = NULL; 4572 int sb_reshape_pos = 0; 4573 4574 conf->cluster_sync_low = sector_nr; 4575 conf->cluster_sync_high = sector_nr + CLUSTER_RESYNC_WINDOW_SECTORS; 4576 sb = page_address(rdev->sb_page); 4577 if (sb) { 4578 sb_reshape_pos = le64_to_cpu(sb->reshape_position); 4579 /* 4580 * Set cluster_sync_low again if next address for array 4581 * reshape is less than cluster_sync_low. Since we can't 4582 * update cluster_sync_low until it has finished reshape. 4583 */ 4584 if (sb_reshape_pos < conf->cluster_sync_low) 4585 conf->cluster_sync_low = sb_reshape_pos; 4586 } 4587 4588 md_cluster_ops->resync_info_update(mddev, conf->cluster_sync_low, 4589 conf->cluster_sync_high); 4590 } 4591 4592 /* Now find the locations in the new layout */ 4593 __raid10_find_phys(&conf->geo, r10_bio); 4594 4595 blist = read_bio; 4596 read_bio->bi_next = NULL; 4597 4598 rcu_read_lock(); 4599 for (s = 0; s < conf->copies*2; s++) { 4600 struct bio *b; 4601 int d = r10_bio->devs[s/2].devnum; 4602 struct md_rdev *rdev2; 4603 if (s&1) { 4604 rdev2 = rcu_dereference(conf->mirrors[d].replacement); 4605 b = r10_bio->devs[s/2].repl_bio; 4606 } else { 4607 rdev2 = rcu_dereference(conf->mirrors[d].rdev); 4608 b = r10_bio->devs[s/2].bio; 4609 } 4610 if (!rdev2 || test_bit(Faulty, &rdev2->flags)) 4611 continue; 4612 4613 bio_set_dev(b, rdev2->bdev); 4614 b->bi_iter.bi_sector = r10_bio->devs[s/2].addr + 4615 rdev2->new_data_offset; 4616 b->bi_end_io = end_reshape_write; 4617 bio_set_op_attrs(b, REQ_OP_WRITE, 0); 4618 b->bi_next = blist; 4619 blist = b; 4620 } 4621 4622 /* Now add as many pages as possible to all of these bios. */ 4623 4624 nr_sectors = 0; 4625 pages = get_resync_pages(r10_bio->devs[0].bio)->pages; 4626 for (s = 0 ; s < max_sectors; s += PAGE_SIZE >> 9) { 4627 struct page *page = pages[s / (PAGE_SIZE >> 9)]; 4628 int len = (max_sectors - s) << 9; 4629 if (len > PAGE_SIZE) 4630 len = PAGE_SIZE; 4631 for (bio = blist; bio ; bio = bio->bi_next) { 4632 /* 4633 * won't fail because the vec table is big enough 4634 * to hold all these pages 4635 */ 4636 bio_add_page(bio, page, len, 0); 4637 } 4638 sector_nr += len >> 9; 4639 nr_sectors += len >> 9; 4640 } 4641 rcu_read_unlock(); 4642 r10_bio->sectors = nr_sectors; 4643 4644 /* Now submit the read */ 4645 md_sync_acct_bio(read_bio, r10_bio->sectors); 4646 atomic_inc(&r10_bio->remaining); 4647 read_bio->bi_next = NULL; 4648 submit_bio_noacct(read_bio); 4649 sectors_done += nr_sectors; 4650 if (sector_nr <= last) 4651 goto read_more; 4652 4653 lower_barrier(conf); 4654 4655 /* Now that we have done the whole section we can 4656 * update reshape_progress 4657 */ 4658 if (mddev->reshape_backwards) 4659 conf->reshape_progress -= sectors_done; 4660 else 4661 conf->reshape_progress += sectors_done; 4662 4663 return sectors_done; 4664} 4665 4666static void end_reshape_request(struct r10bio *r10_bio); 4667static int handle_reshape_read_error(struct mddev *mddev, 4668 struct r10bio *r10_bio); 4669static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio) 4670{ 4671 /* Reshape read completed. Hopefully we have a block 4672 * to write out. 4673 * If we got a read error then we do sync 1-page reads from 4674 * elsewhere until we find the data - or give up. 4675 */ 4676 struct r10conf *conf = mddev->private; 4677 int s; 4678 4679 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) 4680 if (handle_reshape_read_error(mddev, r10_bio) < 0) { 4681 /* Reshape has been aborted */ 4682 md_done_sync(mddev, r10_bio->sectors, 0); 4683 return; 4684 } 4685 4686 /* We definitely have the data in the pages, schedule the 4687 * writes. 4688 */ 4689 atomic_set(&r10_bio->remaining, 1); 4690 for (s = 0; s < conf->copies*2; s++) { 4691 struct bio *b; 4692 int d = r10_bio->devs[s/2].devnum; 4693 struct md_rdev *rdev; 4694 rcu_read_lock(); 4695 if (s&1) { 4696 rdev = rcu_dereference(conf->mirrors[d].replacement); 4697 b = r10_bio->devs[s/2].repl_bio; 4698 } else { 4699 rdev = rcu_dereference(conf->mirrors[d].rdev); 4700 b = r10_bio->devs[s/2].bio; 4701 } 4702 if (!rdev || test_bit(Faulty, &rdev->flags)) { 4703 rcu_read_unlock(); 4704 continue; 4705 } 4706 atomic_inc(&rdev->nr_pending); 4707 rcu_read_unlock(); 4708 md_sync_acct_bio(b, r10_bio->sectors); 4709 atomic_inc(&r10_bio->remaining); 4710 b->bi_next = NULL; 4711 submit_bio_noacct(b); 4712 } 4713 end_reshape_request(r10_bio); 4714} 4715 4716static void end_reshape(struct r10conf *conf) 4717{ 4718 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) 4719 return; 4720 4721 spin_lock_irq(&conf->device_lock); 4722 conf->prev = conf->geo; 4723 md_finish_reshape(conf->mddev); 4724 smp_wmb(); 4725 conf->reshape_progress = MaxSector; 4726 conf->reshape_safe = MaxSector; 4727 spin_unlock_irq(&conf->device_lock); 4728 4729 if (conf->mddev->queue) 4730 raid10_set_io_opt(conf); 4731 conf->fullsync = 0; 4732} 4733 4734static void raid10_update_reshape_pos(struct mddev *mddev) 4735{ 4736 struct r10conf *conf = mddev->private; 4737 sector_t lo, hi; 4738 4739 md_cluster_ops->resync_info_get(mddev, &lo, &hi); 4740 if (((mddev->reshape_position <= hi) && (mddev->reshape_position >= lo)) 4741 || mddev->reshape_position == MaxSector) 4742 conf->reshape_progress = mddev->reshape_position; 4743 else 4744 WARN_ON_ONCE(1); 4745} 4746 4747static int handle_reshape_read_error(struct mddev *mddev, 4748 struct r10bio *r10_bio) 4749{ 4750 /* Use sync reads to get the blocks from somewhere else */ 4751 int sectors = r10_bio->sectors; 4752 struct r10conf *conf = mddev->private; 4753 struct r10bio *r10b; 4754 int slot = 0; 4755 int idx = 0; 4756 struct page **pages; 4757 4758 r10b = kmalloc(struct_size(r10b, devs, conf->copies), GFP_NOIO); 4759 if (!r10b) { 4760 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4761 return -ENOMEM; 4762 } 4763 4764 /* reshape IOs share pages from .devs[0].bio */ 4765 pages = get_resync_pages(r10_bio->devs[0].bio)->pages; 4766 4767 r10b->sector = r10_bio->sector; 4768 __raid10_find_phys(&conf->prev, r10b); 4769 4770 while (sectors) { 4771 int s = sectors; 4772 int success = 0; 4773 int first_slot = slot; 4774 4775 if (s > (PAGE_SIZE >> 9)) 4776 s = PAGE_SIZE >> 9; 4777 4778 rcu_read_lock(); 4779 while (!success) { 4780 int d = r10b->devs[slot].devnum; 4781 struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev); 4782 sector_t addr; 4783 if (rdev == NULL || 4784 test_bit(Faulty, &rdev->flags) || 4785 !test_bit(In_sync, &rdev->flags)) 4786 goto failed; 4787 4788 addr = r10b->devs[slot].addr + idx * PAGE_SIZE; 4789 atomic_inc(&rdev->nr_pending); 4790 rcu_read_unlock(); 4791 success = sync_page_io(rdev, 4792 addr, 4793 s << 9, 4794 pages[idx], 4795 REQ_OP_READ, 0, false); 4796 rdev_dec_pending(rdev, mddev); 4797 rcu_read_lock(); 4798 if (success) 4799 break; 4800 failed: 4801 slot++; 4802 if (slot >= conf->copies) 4803 slot = 0; 4804 if (slot == first_slot) 4805 break; 4806 } 4807 rcu_read_unlock(); 4808 if (!success) { 4809 /* couldn't read this block, must give up */ 4810 set_bit(MD_RECOVERY_INTR, 4811 &mddev->recovery); 4812 kfree(r10b); 4813 return -EIO; 4814 } 4815 sectors -= s; 4816 idx++; 4817 } 4818 kfree(r10b); 4819 return 0; 4820} 4821 4822static void end_reshape_write(struct bio *bio) 4823{ 4824 struct r10bio *r10_bio = get_resync_r10bio(bio); 4825 struct mddev *mddev = r10_bio->mddev; 4826 struct r10conf *conf = mddev->private; 4827 int d; 4828 int slot; 4829 int repl; 4830 struct md_rdev *rdev = NULL; 4831 4832 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl); 4833 if (repl) 4834 rdev = conf->mirrors[d].replacement; 4835 if (!rdev) { 4836 smp_mb(); 4837 rdev = conf->mirrors[d].rdev; 4838 } 4839 4840 if (bio->bi_status) { 4841 /* FIXME should record badblock */ 4842 md_error(mddev, rdev); 4843 } 4844 4845 rdev_dec_pending(rdev, mddev); 4846 end_reshape_request(r10_bio); 4847} 4848 4849static void end_reshape_request(struct r10bio *r10_bio) 4850{ 4851 if (!atomic_dec_and_test(&r10_bio->remaining)) 4852 return; 4853 md_done_sync(r10_bio->mddev, r10_bio->sectors, 1); 4854 bio_put(r10_bio->master_bio); 4855 put_buf(r10_bio); 4856} 4857 4858static void raid10_finish_reshape(struct mddev *mddev) 4859{ 4860 struct r10conf *conf = mddev->private; 4861 4862 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 4863 return; 4864 4865 if (mddev->delta_disks > 0) { 4866 if (mddev->recovery_cp > mddev->resync_max_sectors) { 4867 mddev->recovery_cp = mddev->resync_max_sectors; 4868 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4869 } 4870 mddev->resync_max_sectors = mddev->array_sectors; 4871 } else { 4872 int d; 4873 rcu_read_lock(); 4874 for (d = conf->geo.raid_disks ; 4875 d < conf->geo.raid_disks - mddev->delta_disks; 4876 d++) { 4877 struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev); 4878 if (rdev) 4879 clear_bit(In_sync, &rdev->flags); 4880 rdev = rcu_dereference(conf->mirrors[d].replacement); 4881 if (rdev) 4882 clear_bit(In_sync, &rdev->flags); 4883 } 4884 rcu_read_unlock(); 4885 } 4886 mddev->layout = mddev->new_layout; 4887 mddev->chunk_sectors = 1 << conf->geo.chunk_shift; 4888 mddev->reshape_position = MaxSector; 4889 mddev->delta_disks = 0; 4890 mddev->reshape_backwards = 0; 4891} 4892 4893static struct md_personality raid10_personality = 4894{ 4895 .name = "raid10", 4896 .level = 10, 4897 .owner = THIS_MODULE, 4898 .make_request = raid10_make_request, 4899 .run = raid10_run, 4900 .free = raid10_free, 4901 .status = raid10_status, 4902 .error_handler = raid10_error, 4903 .hot_add_disk = raid10_add_disk, 4904 .hot_remove_disk= raid10_remove_disk, 4905 .spare_active = raid10_spare_active, 4906 .sync_request = raid10_sync_request, 4907 .quiesce = raid10_quiesce, 4908 .size = raid10_size, 4909 .resize = raid10_resize, 4910 .takeover = raid10_takeover, 4911 .check_reshape = raid10_check_reshape, 4912 .start_reshape = raid10_start_reshape, 4913 .finish_reshape = raid10_finish_reshape, 4914 .update_reshape_pos = raid10_update_reshape_pos, 4915}; 4916 4917static int __init raid_init(void) 4918{ 4919 return register_md_personality(&raid10_personality); 4920} 4921 4922static void raid_exit(void) 4923{ 4924 unregister_md_personality(&raid10_personality); 4925} 4926 4927module_init(raid_init); 4928module_exit(raid_exit); 4929MODULE_LICENSE("GPL"); 4930MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD"); 4931MODULE_ALIAS("md-personality-9"); /* RAID10 */ 4932MODULE_ALIAS("md-raid10"); 4933MODULE_ALIAS("md-level-10"); 4934 4935module_param(max_queued_requests, int, S_IRUGO|S_IWUSR); 4936