1// SPDX-License-Identifier: GPL-2.0-or-later 2/* 3 md.c : Multiple Devices driver for Linux 4 Copyright (C) 1998, 1999, 2000 Ingo Molnar 5 6 completely rewritten, based on the MD driver code from Marc Zyngier 7 8 Changes: 9 10 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar 11 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com> 12 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net> 13 - kerneld support by Boris Tobotras <boris@xtalk.msk.su> 14 - kmod support by: Cyrus Durgin 15 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com> 16 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au> 17 18 - lots of fixes and improvements to the RAID1/RAID5 and generic 19 RAID code (such as request based resynchronization): 20 21 Neil Brown <neilb@cse.unsw.edu.au>. 22 23 - persistent bitmap code 24 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc. 25 26 27 Errors, Warnings, etc. 28 Please use: 29 pr_crit() for error conditions that risk data loss 30 pr_err() for error conditions that are unexpected, like an IO error 31 or internal inconsistency 32 pr_warn() for error conditions that could have been predicated, like 33 adding a device to an array when it has incompatible metadata 34 pr_info() for every interesting, very rare events, like an array starting 35 or stopping, or resync starting or stopping 36 pr_debug() for everything else. 37 38*/ 39 40#include <linux/sched/mm.h> 41#include <linux/sched/signal.h> 42#include <linux/kthread.h> 43#include <linux/blkdev.h> 44#include <linux/badblocks.h> 45#include <linux/sysctl.h> 46#include <linux/seq_file.h> 47#include <linux/fs.h> 48#include <linux/poll.h> 49#include <linux/ctype.h> 50#include <linux/string.h> 51#include <linux/hdreg.h> 52#include <linux/proc_fs.h> 53#include <linux/random.h> 54#include <linux/module.h> 55#include <linux/reboot.h> 56#include <linux/file.h> 57#include <linux/compat.h> 58#include <linux/delay.h> 59#include <linux/raid/md_p.h> 60#include <linux/raid/md_u.h> 61#include <linux/raid/detect.h> 62#include <linux/slab.h> 63#include <linux/percpu-refcount.h> 64#include <linux/part_stat.h> 65 66#include <trace/events/block.h> 67#include "md.h" 68#include "md-bitmap.h" 69#include "md-cluster.h" 70 71/* pers_list is a list of registered personalities protected 72 * by pers_lock. 73 * pers_lock does extra service to protect accesses to 74 * mddev->thread when the mutex cannot be held. 75 */ 76static LIST_HEAD(pers_list); 77static DEFINE_SPINLOCK(pers_lock); 78 79static struct kobj_type md_ktype; 80 81struct md_cluster_operations *md_cluster_ops; 82EXPORT_SYMBOL(md_cluster_ops); 83static struct module *md_cluster_mod; 84 85static DECLARE_WAIT_QUEUE_HEAD(resync_wait); 86static struct workqueue_struct *md_wq; 87static struct workqueue_struct *md_misc_wq; 88static struct workqueue_struct *md_rdev_misc_wq; 89 90static int remove_and_add_spares(struct mddev *mddev, 91 struct md_rdev *this); 92static void mddev_detach(struct mddev *mddev); 93 94/* 95 * Default number of read corrections we'll attempt on an rdev 96 * before ejecting it from the array. We divide the read error 97 * count by 2 for every hour elapsed between read errors. 98 */ 99#define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20 100/* Default safemode delay: 200 msec */ 101#define DEFAULT_SAFEMODE_DELAY ((200 * HZ)/1000 +1) 102/* 103 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit' 104 * is 1000 KB/sec, so the extra system load does not show up that much. 105 * Increase it if you want to have more _guaranteed_ speed. Note that 106 * the RAID driver will use the maximum available bandwidth if the IO 107 * subsystem is idle. There is also an 'absolute maximum' reconstruction 108 * speed limit - in case reconstruction slows down your system despite 109 * idle IO detection. 110 * 111 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max. 112 * or /sys/block/mdX/md/sync_speed_{min,max} 113 */ 114 115static int sysctl_speed_limit_min = 1000; 116static int sysctl_speed_limit_max = 200000; 117static inline int speed_min(struct mddev *mddev) 118{ 119 return mddev->sync_speed_min ? 120 mddev->sync_speed_min : sysctl_speed_limit_min; 121} 122 123static inline int speed_max(struct mddev *mddev) 124{ 125 return mddev->sync_speed_max ? 126 mddev->sync_speed_max : sysctl_speed_limit_max; 127} 128 129static void rdev_uninit_serial(struct md_rdev *rdev) 130{ 131 if (!test_and_clear_bit(CollisionCheck, &rdev->flags)) 132 return; 133 134 kvfree(rdev->serial); 135 rdev->serial = NULL; 136} 137 138static void rdevs_uninit_serial(struct mddev *mddev) 139{ 140 struct md_rdev *rdev; 141 142 rdev_for_each(rdev, mddev) 143 rdev_uninit_serial(rdev); 144} 145 146static int rdev_init_serial(struct md_rdev *rdev) 147{ 148 /* serial_nums equals with BARRIER_BUCKETS_NR */ 149 int i, serial_nums = 1 << ((PAGE_SHIFT - ilog2(sizeof(atomic_t)))); 150 struct serial_in_rdev *serial = NULL; 151 152 if (test_bit(CollisionCheck, &rdev->flags)) 153 return 0; 154 155 serial = kvmalloc(sizeof(struct serial_in_rdev) * serial_nums, 156 GFP_KERNEL); 157 if (!serial) 158 return -ENOMEM; 159 160 for (i = 0; i < serial_nums; i++) { 161 struct serial_in_rdev *serial_tmp = &serial[i]; 162 163 spin_lock_init(&serial_tmp->serial_lock); 164 serial_tmp->serial_rb = RB_ROOT_CACHED; 165 init_waitqueue_head(&serial_tmp->serial_io_wait); 166 } 167 168 rdev->serial = serial; 169 set_bit(CollisionCheck, &rdev->flags); 170 171 return 0; 172} 173 174static int rdevs_init_serial(struct mddev *mddev) 175{ 176 struct md_rdev *rdev; 177 int ret = 0; 178 179 rdev_for_each(rdev, mddev) { 180 ret = rdev_init_serial(rdev); 181 if (ret) 182 break; 183 } 184 185 /* Free all resources if pool is not existed */ 186 if (ret && !mddev->serial_info_pool) 187 rdevs_uninit_serial(mddev); 188 189 return ret; 190} 191 192/* 193 * rdev needs to enable serial stuffs if it meets the conditions: 194 * 1. it is multi-queue device flaged with writemostly. 195 * 2. the write-behind mode is enabled. 196 */ 197static int rdev_need_serial(struct md_rdev *rdev) 198{ 199 return (rdev && rdev->mddev->bitmap_info.max_write_behind > 0 && 200 rdev->bdev->bd_disk->queue->nr_hw_queues != 1 && 201 test_bit(WriteMostly, &rdev->flags)); 202} 203 204/* 205 * Init resource for rdev(s), then create serial_info_pool if: 206 * 1. rdev is the first device which return true from rdev_enable_serial. 207 * 2. rdev is NULL, means we want to enable serialization for all rdevs. 208 */ 209void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev, 210 bool is_suspend) 211{ 212 int ret = 0; 213 214 if (rdev && !rdev_need_serial(rdev) && 215 !test_bit(CollisionCheck, &rdev->flags)) 216 return; 217 218 if (!is_suspend) 219 mddev_suspend(mddev); 220 221 if (!rdev) 222 ret = rdevs_init_serial(mddev); 223 else 224 ret = rdev_init_serial(rdev); 225 if (ret) 226 goto abort; 227 228 if (mddev->serial_info_pool == NULL) { 229 /* 230 * already in memalloc noio context by 231 * mddev_suspend() 232 */ 233 mddev->serial_info_pool = 234 mempool_create_kmalloc_pool(NR_SERIAL_INFOS, 235 sizeof(struct serial_info)); 236 if (!mddev->serial_info_pool) { 237 rdevs_uninit_serial(mddev); 238 pr_err("can't alloc memory pool for serialization\n"); 239 } 240 } 241 242abort: 243 if (!is_suspend) 244 mddev_resume(mddev); 245} 246 247/* 248 * Free resource from rdev(s), and destroy serial_info_pool under conditions: 249 * 1. rdev is the last device flaged with CollisionCheck. 250 * 2. when bitmap is destroyed while policy is not enabled. 251 * 3. for disable policy, the pool is destroyed only when no rdev needs it. 252 */ 253void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev, 254 bool is_suspend) 255{ 256 if (rdev && !test_bit(CollisionCheck, &rdev->flags)) 257 return; 258 259 if (mddev->serial_info_pool) { 260 struct md_rdev *temp; 261 int num = 0; /* used to track if other rdevs need the pool */ 262 263 if (!is_suspend) 264 mddev_suspend(mddev); 265 rdev_for_each(temp, mddev) { 266 if (!rdev) { 267 if (!mddev->serialize_policy || 268 !rdev_need_serial(temp)) 269 rdev_uninit_serial(temp); 270 else 271 num++; 272 } else if (temp != rdev && 273 test_bit(CollisionCheck, &temp->flags)) 274 num++; 275 } 276 277 if (rdev) 278 rdev_uninit_serial(rdev); 279 280 if (num) 281 pr_info("The mempool could be used by other devices\n"); 282 else { 283 mempool_destroy(mddev->serial_info_pool); 284 mddev->serial_info_pool = NULL; 285 } 286 if (!is_suspend) 287 mddev_resume(mddev); 288 } 289} 290 291static struct ctl_table_header *raid_table_header; 292 293static struct ctl_table raid_table[] = { 294 { 295 .procname = "speed_limit_min", 296 .data = &sysctl_speed_limit_min, 297 .maxlen = sizeof(int), 298 .mode = S_IRUGO|S_IWUSR, 299 .proc_handler = proc_dointvec, 300 }, 301 { 302 .procname = "speed_limit_max", 303 .data = &sysctl_speed_limit_max, 304 .maxlen = sizeof(int), 305 .mode = S_IRUGO|S_IWUSR, 306 .proc_handler = proc_dointvec, 307 }, 308 { } 309}; 310 311static struct ctl_table raid_dir_table[] = { 312 { 313 .procname = "raid", 314 .maxlen = 0, 315 .mode = S_IRUGO|S_IXUGO, 316 .child = raid_table, 317 }, 318 { } 319}; 320 321static struct ctl_table raid_root_table[] = { 322 { 323 .procname = "dev", 324 .maxlen = 0, 325 .mode = 0555, 326 .child = raid_dir_table, 327 }, 328 { } 329}; 330 331static int start_readonly; 332 333/* 334 * The original mechanism for creating an md device is to create 335 * a device node in /dev and to open it. This causes races with device-close. 336 * The preferred method is to write to the "new_array" module parameter. 337 * This can avoid races. 338 * Setting create_on_open to false disables the original mechanism 339 * so all the races disappear. 340 */ 341static bool create_on_open = true; 342 343struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, 344 struct mddev *mddev) 345{ 346 if (!mddev || !bioset_initialized(&mddev->bio_set)) 347 return bio_alloc(gfp_mask, nr_iovecs); 348 349 return bio_alloc_bioset(gfp_mask, nr_iovecs, &mddev->bio_set); 350} 351EXPORT_SYMBOL_GPL(bio_alloc_mddev); 352 353static struct bio *md_bio_alloc_sync(struct mddev *mddev) 354{ 355 if (!mddev || !bioset_initialized(&mddev->sync_set)) 356 return bio_alloc(GFP_NOIO, 1); 357 358 return bio_alloc_bioset(GFP_NOIO, 1, &mddev->sync_set); 359} 360 361/* 362 * We have a system wide 'event count' that is incremented 363 * on any 'interesting' event, and readers of /proc/mdstat 364 * can use 'poll' or 'select' to find out when the event 365 * count increases. 366 * 367 * Events are: 368 * start array, stop array, error, add device, remove device, 369 * start build, activate spare 370 */ 371static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters); 372static atomic_t md_event_count; 373void md_new_event(struct mddev *mddev) 374{ 375 atomic_inc(&md_event_count); 376 wake_up(&md_event_waiters); 377} 378EXPORT_SYMBOL_GPL(md_new_event); 379 380/* 381 * Enables to iterate over all existing md arrays 382 * all_mddevs_lock protects this list. 383 */ 384static LIST_HEAD(all_mddevs); 385static DEFINE_SPINLOCK(all_mddevs_lock); 386 387/* 388 * iterates through all used mddevs in the system. 389 * We take care to grab the all_mddevs_lock whenever navigating 390 * the list, and to always hold a refcount when unlocked. 391 * Any code which breaks out of this loop while own 392 * a reference to the current mddev and must mddev_put it. 393 */ 394#define for_each_mddev(_mddev,_tmp) \ 395 \ 396 for (({ spin_lock(&all_mddevs_lock); \ 397 _tmp = all_mddevs.next; \ 398 _mddev = NULL;}); \ 399 ({ if (_tmp != &all_mddevs) \ 400 mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\ 401 spin_unlock(&all_mddevs_lock); \ 402 if (_mddev) mddev_put(_mddev); \ 403 _mddev = list_entry(_tmp, struct mddev, all_mddevs); \ 404 _tmp != &all_mddevs;}); \ 405 ({ spin_lock(&all_mddevs_lock); \ 406 _tmp = _tmp->next;}) \ 407 ) 408 409/* Rather than calling directly into the personality make_request function, 410 * IO requests come here first so that we can check if the device is 411 * being suspended pending a reconfiguration. 412 * We hold a refcount over the call to ->make_request. By the time that 413 * call has finished, the bio has been linked into some internal structure 414 * and so is visible to ->quiesce(), so we don't need the refcount any more. 415 */ 416static bool is_suspended(struct mddev *mddev, struct bio *bio) 417{ 418 if (mddev->suspended) 419 return true; 420 if (bio_data_dir(bio) != WRITE) 421 return false; 422 if (mddev->suspend_lo >= mddev->suspend_hi) 423 return false; 424 if (bio->bi_iter.bi_sector >= mddev->suspend_hi) 425 return false; 426 if (bio_end_sector(bio) < mddev->suspend_lo) 427 return false; 428 return true; 429} 430 431void md_handle_request(struct mddev *mddev, struct bio *bio) 432{ 433check_suspended: 434 rcu_read_lock(); 435 if (is_suspended(mddev, bio)) { 436 DEFINE_WAIT(__wait); 437 for (;;) { 438 prepare_to_wait(&mddev->sb_wait, &__wait, 439 TASK_UNINTERRUPTIBLE); 440 if (!is_suspended(mddev, bio)) 441 break; 442 rcu_read_unlock(); 443 schedule(); 444 rcu_read_lock(); 445 } 446 finish_wait(&mddev->sb_wait, &__wait); 447 } 448 atomic_inc(&mddev->active_io); 449 rcu_read_unlock(); 450 451 if (!mddev->pers->make_request(mddev, bio)) { 452 atomic_dec(&mddev->active_io); 453 wake_up(&mddev->sb_wait); 454 goto check_suspended; 455 } 456 457 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) 458 wake_up(&mddev->sb_wait); 459} 460EXPORT_SYMBOL(md_handle_request); 461 462static blk_qc_t md_submit_bio(struct bio *bio) 463{ 464 const int rw = bio_data_dir(bio); 465 struct mddev *mddev = bio->bi_disk->private_data; 466 467 if (mddev == NULL || mddev->pers == NULL) { 468 bio_io_error(bio); 469 return BLK_QC_T_NONE; 470 } 471 472 if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) { 473 bio_io_error(bio); 474 return BLK_QC_T_NONE; 475 } 476 477 blk_queue_split(&bio); 478 479 if (mddev->ro == 1 && unlikely(rw == WRITE)) { 480 if (bio_sectors(bio) != 0) 481 bio->bi_status = BLK_STS_IOERR; 482 bio_endio(bio); 483 return BLK_QC_T_NONE; 484 } 485 486 /* bio could be mergeable after passing to underlayer */ 487 bio->bi_opf &= ~REQ_NOMERGE; 488 489 md_handle_request(mddev, bio); 490 491 return BLK_QC_T_NONE; 492} 493 494/* mddev_suspend makes sure no new requests are submitted 495 * to the device, and that any requests that have been submitted 496 * are completely handled. 497 * Once mddev_detach() is called and completes, the module will be 498 * completely unused. 499 */ 500void mddev_suspend(struct mddev *mddev) 501{ 502 WARN_ON_ONCE(mddev->thread && current == mddev->thread->tsk); 503 lockdep_assert_held(&mddev->reconfig_mutex); 504 if (mddev->suspended++) 505 return; 506 synchronize_rcu(); 507 wake_up(&mddev->sb_wait); 508 set_bit(MD_ALLOW_SB_UPDATE, &mddev->flags); 509 smp_mb__after_atomic(); 510 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0); 511 mddev->pers->quiesce(mddev, 1); 512 clear_bit_unlock(MD_ALLOW_SB_UPDATE, &mddev->flags); 513 wait_event(mddev->sb_wait, !test_bit(MD_UPDATING_SB, &mddev->flags)); 514 515 del_timer_sync(&mddev->safemode_timer); 516 /* restrict memory reclaim I/O during raid array is suspend */ 517 mddev->noio_flag = memalloc_noio_save(); 518} 519EXPORT_SYMBOL_GPL(mddev_suspend); 520 521void mddev_resume(struct mddev *mddev) 522{ 523 /* entred the memalloc scope from mddev_suspend() */ 524 memalloc_noio_restore(mddev->noio_flag); 525 lockdep_assert_held(&mddev->reconfig_mutex); 526 if (--mddev->suspended) 527 return; 528 wake_up(&mddev->sb_wait); 529 mddev->pers->quiesce(mddev, 0); 530 531 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 532 md_wakeup_thread(mddev->thread); 533 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ 534} 535EXPORT_SYMBOL_GPL(mddev_resume); 536 537/* 538 * Generic flush handling for md 539 */ 540 541static void md_end_flush(struct bio *bio) 542{ 543 struct md_rdev *rdev = bio->bi_private; 544 struct mddev *mddev = rdev->mddev; 545 546 bio_put(bio); 547 548 rdev_dec_pending(rdev, mddev); 549 550 if (atomic_dec_and_test(&mddev->flush_pending)) { 551 /* The pre-request flush has finished */ 552 queue_work(md_wq, &mddev->flush_work); 553 } 554} 555 556static void md_submit_flush_data(struct work_struct *ws); 557 558static void submit_flushes(struct work_struct *ws) 559{ 560 struct mddev *mddev = container_of(ws, struct mddev, flush_work); 561 struct md_rdev *rdev; 562 563 mddev->start_flush = ktime_get_boottime(); 564 INIT_WORK(&mddev->flush_work, md_submit_flush_data); 565 atomic_set(&mddev->flush_pending, 1); 566 rcu_read_lock(); 567 rdev_for_each_rcu(rdev, mddev) 568 if (rdev->raid_disk >= 0 && 569 !test_bit(Faulty, &rdev->flags)) { 570 /* Take two references, one is dropped 571 * when request finishes, one after 572 * we reclaim rcu_read_lock 573 */ 574 struct bio *bi; 575 atomic_inc(&rdev->nr_pending); 576 atomic_inc(&rdev->nr_pending); 577 rcu_read_unlock(); 578 bi = bio_alloc_mddev(GFP_NOIO, 0, mddev); 579 bi->bi_end_io = md_end_flush; 580 bi->bi_private = rdev; 581 bio_set_dev(bi, rdev->bdev); 582 bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; 583 atomic_inc(&mddev->flush_pending); 584 submit_bio(bi); 585 rcu_read_lock(); 586 rdev_dec_pending(rdev, mddev); 587 } 588 rcu_read_unlock(); 589 if (atomic_dec_and_test(&mddev->flush_pending)) 590 queue_work(md_wq, &mddev->flush_work); 591} 592 593static void md_submit_flush_data(struct work_struct *ws) 594{ 595 struct mddev *mddev = container_of(ws, struct mddev, flush_work); 596 struct bio *bio = mddev->flush_bio; 597 598 /* 599 * must reset flush_bio before calling into md_handle_request to avoid a 600 * deadlock, because other bios passed md_handle_request suspend check 601 * could wait for this and below md_handle_request could wait for those 602 * bios because of suspend check 603 */ 604 spin_lock_irq(&mddev->lock); 605 mddev->last_flush = mddev->start_flush; 606 mddev->flush_bio = NULL; 607 spin_unlock_irq(&mddev->lock); 608 wake_up(&mddev->sb_wait); 609 610 if (bio->bi_iter.bi_size == 0) { 611 /* an empty barrier - all done */ 612 bio_endio(bio); 613 } else { 614 bio->bi_opf &= ~REQ_PREFLUSH; 615 md_handle_request(mddev, bio); 616 } 617} 618 619/* 620 * Manages consolidation of flushes and submitting any flushes needed for 621 * a bio with REQ_PREFLUSH. Returns true if the bio is finished or is 622 * being finished in another context. Returns false if the flushing is 623 * complete but still needs the I/O portion of the bio to be processed. 624 */ 625bool md_flush_request(struct mddev *mddev, struct bio *bio) 626{ 627 ktime_t start = ktime_get_boottime(); 628 spin_lock_irq(&mddev->lock); 629 wait_event_lock_irq(mddev->sb_wait, 630 !mddev->flush_bio || 631 ktime_after(mddev->last_flush, start), 632 mddev->lock); 633 if (!ktime_after(mddev->last_flush, start)) { 634 WARN_ON(mddev->flush_bio); 635 mddev->flush_bio = bio; 636 bio = NULL; 637 } 638 spin_unlock_irq(&mddev->lock); 639 640 if (!bio) { 641 INIT_WORK(&mddev->flush_work, submit_flushes); 642 queue_work(md_wq, &mddev->flush_work); 643 } else { 644 /* flush was performed for some other bio while we waited. */ 645 if (bio->bi_iter.bi_size == 0) 646 /* an empty barrier - all done */ 647 bio_endio(bio); 648 else { 649 bio->bi_opf &= ~REQ_PREFLUSH; 650 return false; 651 } 652 } 653 return true; 654} 655EXPORT_SYMBOL(md_flush_request); 656 657static inline struct mddev *mddev_get(struct mddev *mddev) 658{ 659 atomic_inc(&mddev->active); 660 return mddev; 661} 662 663static void mddev_delayed_delete(struct work_struct *ws); 664 665static void mddev_put(struct mddev *mddev) 666{ 667 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) 668 return; 669 if (!mddev->raid_disks && list_empty(&mddev->disks) && 670 mddev->ctime == 0 && !mddev->hold_active) { 671 /* Array is not configured at all, and not held active, 672 * so destroy it */ 673 list_del_init(&mddev->all_mddevs); 674 675 /* 676 * Call queue_work inside the spinlock so that 677 * flush_workqueue() after mddev_find will succeed in waiting 678 * for the work to be done. 679 */ 680 INIT_WORK(&mddev->del_work, mddev_delayed_delete); 681 queue_work(md_misc_wq, &mddev->del_work); 682 } 683 spin_unlock(&all_mddevs_lock); 684} 685 686static void md_safemode_timeout(struct timer_list *t); 687 688void mddev_init(struct mddev *mddev) 689{ 690 kobject_init(&mddev->kobj, &md_ktype); 691 mutex_init(&mddev->open_mutex); 692 mutex_init(&mddev->reconfig_mutex); 693 mutex_init(&mddev->bitmap_info.mutex); 694 INIT_LIST_HEAD(&mddev->disks); 695 INIT_LIST_HEAD(&mddev->all_mddevs); 696 timer_setup(&mddev->safemode_timer, md_safemode_timeout, 0); 697 atomic_set(&mddev->active, 1); 698 atomic_set(&mddev->openers, 0); 699 atomic_set(&mddev->active_io, 0); 700 spin_lock_init(&mddev->lock); 701 atomic_set(&mddev->flush_pending, 0); 702 init_waitqueue_head(&mddev->sb_wait); 703 init_waitqueue_head(&mddev->recovery_wait); 704 mddev->reshape_position = MaxSector; 705 mddev->reshape_backwards = 0; 706 mddev->last_sync_action = "none"; 707 mddev->resync_min = 0; 708 mddev->resync_max = MaxSector; 709 mddev->level = LEVEL_NONE; 710} 711EXPORT_SYMBOL_GPL(mddev_init); 712 713static struct mddev *mddev_find_locked(dev_t unit) 714{ 715 struct mddev *mddev; 716 717 list_for_each_entry(mddev, &all_mddevs, all_mddevs) 718 if (mddev->unit == unit) 719 return mddev; 720 721 return NULL; 722} 723 724static struct mddev *mddev_find(dev_t unit) 725{ 726 struct mddev *mddev; 727 728 if (MAJOR(unit) != MD_MAJOR) 729 unit &= ~((1 << MdpMinorShift) - 1); 730 731 spin_lock(&all_mddevs_lock); 732 mddev = mddev_find_locked(unit); 733 if (mddev) 734 mddev_get(mddev); 735 spin_unlock(&all_mddevs_lock); 736 737 return mddev; 738} 739 740static struct mddev *mddev_find_or_alloc(dev_t unit) 741{ 742 struct mddev *mddev, *new = NULL; 743 744 if (unit && MAJOR(unit) != MD_MAJOR) 745 unit &= ~((1<<MdpMinorShift)-1); 746 747 retry: 748 spin_lock(&all_mddevs_lock); 749 750 if (unit) { 751 mddev = mddev_find_locked(unit); 752 if (mddev) { 753 mddev_get(mddev); 754 spin_unlock(&all_mddevs_lock); 755 kfree(new); 756 return mddev; 757 } 758 759 if (new) { 760 list_add(&new->all_mddevs, &all_mddevs); 761 spin_unlock(&all_mddevs_lock); 762 new->hold_active = UNTIL_IOCTL; 763 return new; 764 } 765 } else if (new) { 766 /* find an unused unit number */ 767 static int next_minor = 512; 768 int start = next_minor; 769 int is_free = 0; 770 int dev = 0; 771 while (!is_free) { 772 dev = MKDEV(MD_MAJOR, next_minor); 773 next_minor++; 774 if (next_minor > MINORMASK) 775 next_minor = 0; 776 if (next_minor == start) { 777 /* Oh dear, all in use. */ 778 spin_unlock(&all_mddevs_lock); 779 kfree(new); 780 return NULL; 781 } 782 783 is_free = !mddev_find_locked(dev); 784 } 785 new->unit = dev; 786 new->md_minor = MINOR(dev); 787 new->hold_active = UNTIL_STOP; 788 list_add(&new->all_mddevs, &all_mddevs); 789 spin_unlock(&all_mddevs_lock); 790 return new; 791 } 792 spin_unlock(&all_mddevs_lock); 793 794 new = kzalloc(sizeof(*new), GFP_KERNEL); 795 if (!new) 796 return NULL; 797 798 new->unit = unit; 799 if (MAJOR(unit) == MD_MAJOR) 800 new->md_minor = MINOR(unit); 801 else 802 new->md_minor = MINOR(unit) >> MdpMinorShift; 803 804 mddev_init(new); 805 806 goto retry; 807} 808 809static struct attribute_group md_redundancy_group; 810 811void mddev_unlock(struct mddev *mddev) 812{ 813 if (mddev->to_remove) { 814 /* These cannot be removed under reconfig_mutex as 815 * an access to the files will try to take reconfig_mutex 816 * while holding the file unremovable, which leads to 817 * a deadlock. 818 * So hold set sysfs_active while the remove in happeing, 819 * and anything else which might set ->to_remove or my 820 * otherwise change the sysfs namespace will fail with 821 * -EBUSY if sysfs_active is still set. 822 * We set sysfs_active under reconfig_mutex and elsewhere 823 * test it under the same mutex to ensure its correct value 824 * is seen. 825 */ 826 struct attribute_group *to_remove = mddev->to_remove; 827 mddev->to_remove = NULL; 828 mddev->sysfs_active = 1; 829 mutex_unlock(&mddev->reconfig_mutex); 830 831 if (mddev->kobj.sd) { 832 if (to_remove != &md_redundancy_group) 833 sysfs_remove_group(&mddev->kobj, to_remove); 834 if (mddev->pers == NULL || 835 mddev->pers->sync_request == NULL) { 836 sysfs_remove_group(&mddev->kobj, &md_redundancy_group); 837 if (mddev->sysfs_action) 838 sysfs_put(mddev->sysfs_action); 839 if (mddev->sysfs_completed) 840 sysfs_put(mddev->sysfs_completed); 841 if (mddev->sysfs_degraded) 842 sysfs_put(mddev->sysfs_degraded); 843 mddev->sysfs_action = NULL; 844 mddev->sysfs_completed = NULL; 845 mddev->sysfs_degraded = NULL; 846 } 847 } 848 mddev->sysfs_active = 0; 849 } else 850 mutex_unlock(&mddev->reconfig_mutex); 851 852 /* As we've dropped the mutex we need a spinlock to 853 * make sure the thread doesn't disappear 854 */ 855 spin_lock(&pers_lock); 856 md_wakeup_thread(mddev->thread); 857 wake_up(&mddev->sb_wait); 858 spin_unlock(&pers_lock); 859} 860EXPORT_SYMBOL_GPL(mddev_unlock); 861 862struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr) 863{ 864 struct md_rdev *rdev; 865 866 rdev_for_each_rcu(rdev, mddev) 867 if (rdev->desc_nr == nr) 868 return rdev; 869 870 return NULL; 871} 872EXPORT_SYMBOL_GPL(md_find_rdev_nr_rcu); 873 874static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev) 875{ 876 struct md_rdev *rdev; 877 878 rdev_for_each(rdev, mddev) 879 if (rdev->bdev->bd_dev == dev) 880 return rdev; 881 882 return NULL; 883} 884 885struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev) 886{ 887 struct md_rdev *rdev; 888 889 rdev_for_each_rcu(rdev, mddev) 890 if (rdev->bdev->bd_dev == dev) 891 return rdev; 892 893 return NULL; 894} 895EXPORT_SYMBOL_GPL(md_find_rdev_rcu); 896 897static struct md_personality *find_pers(int level, char *clevel) 898{ 899 struct md_personality *pers; 900 list_for_each_entry(pers, &pers_list, list) { 901 if (level != LEVEL_NONE && pers->level == level) 902 return pers; 903 if (strcmp(pers->name, clevel)==0) 904 return pers; 905 } 906 return NULL; 907} 908 909/* return the offset of the super block in 512byte sectors */ 910static inline sector_t calc_dev_sboffset(struct md_rdev *rdev) 911{ 912 sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512; 913 return MD_NEW_SIZE_SECTORS(num_sectors); 914} 915 916static int alloc_disk_sb(struct md_rdev *rdev) 917{ 918 rdev->sb_page = alloc_page(GFP_KERNEL); 919 if (!rdev->sb_page) 920 return -ENOMEM; 921 return 0; 922} 923 924void md_rdev_clear(struct md_rdev *rdev) 925{ 926 if (rdev->sb_page) { 927 put_page(rdev->sb_page); 928 rdev->sb_loaded = 0; 929 rdev->sb_page = NULL; 930 rdev->sb_start = 0; 931 rdev->sectors = 0; 932 } 933 if (rdev->bb_page) { 934 put_page(rdev->bb_page); 935 rdev->bb_page = NULL; 936 } 937 badblocks_exit(&rdev->badblocks); 938} 939EXPORT_SYMBOL_GPL(md_rdev_clear); 940 941static void super_written(struct bio *bio) 942{ 943 struct md_rdev *rdev = bio->bi_private; 944 struct mddev *mddev = rdev->mddev; 945 946 if (bio->bi_status) { 947 pr_err("md: %s gets error=%d\n", __func__, 948 blk_status_to_errno(bio->bi_status)); 949 md_error(mddev, rdev); 950 if (!test_bit(Faulty, &rdev->flags) 951 && (bio->bi_opf & MD_FAILFAST)) { 952 set_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags); 953 set_bit(LastDev, &rdev->flags); 954 } 955 } else 956 clear_bit(LastDev, &rdev->flags); 957 958 bio_put(bio); 959 960 rdev_dec_pending(rdev, mddev); 961 962 if (atomic_dec_and_test(&mddev->pending_writes)) 963 wake_up(&mddev->sb_wait); 964} 965 966void md_super_write(struct mddev *mddev, struct md_rdev *rdev, 967 sector_t sector, int size, struct page *page) 968{ 969 /* write first size bytes of page to sector of rdev 970 * Increment mddev->pending_writes before returning 971 * and decrement it on completion, waking up sb_wait 972 * if zero is reached. 973 * If an error occurred, call md_error 974 */ 975 struct bio *bio; 976 int ff = 0; 977 978 if (!page) 979 return; 980 981 if (test_bit(Faulty, &rdev->flags)) 982 return; 983 984 bio = md_bio_alloc_sync(mddev); 985 986 atomic_inc(&rdev->nr_pending); 987 988 bio_set_dev(bio, rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev); 989 bio->bi_iter.bi_sector = sector; 990 bio_add_page(bio, page, size, 0); 991 bio->bi_private = rdev; 992 bio->bi_end_io = super_written; 993 994 if (test_bit(MD_FAILFAST_SUPPORTED, &mddev->flags) && 995 test_bit(FailFast, &rdev->flags) && 996 !test_bit(LastDev, &rdev->flags)) 997 ff = MD_FAILFAST; 998 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA | ff; 999 1000 atomic_inc(&mddev->pending_writes); 1001 submit_bio(bio); 1002} 1003 1004int md_super_wait(struct mddev *mddev) 1005{ 1006 /* wait for all superblock writes that were scheduled to complete */ 1007 wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0); 1008 if (test_and_clear_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags)) 1009 return -EAGAIN; 1010 return 0; 1011} 1012 1013int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, 1014 struct page *page, int op, int op_flags, bool metadata_op) 1015{ 1016 struct bio *bio = md_bio_alloc_sync(rdev->mddev); 1017 int ret; 1018 1019 if (metadata_op && rdev->meta_bdev) 1020 bio_set_dev(bio, rdev->meta_bdev); 1021 else 1022 bio_set_dev(bio, rdev->bdev); 1023 bio_set_op_attrs(bio, op, op_flags); 1024 if (metadata_op) 1025 bio->bi_iter.bi_sector = sector + rdev->sb_start; 1026 else if (rdev->mddev->reshape_position != MaxSector && 1027 (rdev->mddev->reshape_backwards == 1028 (sector >= rdev->mddev->reshape_position))) 1029 bio->bi_iter.bi_sector = sector + rdev->new_data_offset; 1030 else 1031 bio->bi_iter.bi_sector = sector + rdev->data_offset; 1032 bio_add_page(bio, page, size, 0); 1033 1034 submit_bio_wait(bio); 1035 1036 ret = !bio->bi_status; 1037 bio_put(bio); 1038 return ret; 1039} 1040EXPORT_SYMBOL_GPL(sync_page_io); 1041 1042static int read_disk_sb(struct md_rdev *rdev, int size) 1043{ 1044 char b[BDEVNAME_SIZE]; 1045 1046 if (rdev->sb_loaded) 1047 return 0; 1048 1049 if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true)) 1050 goto fail; 1051 rdev->sb_loaded = 1; 1052 return 0; 1053 1054fail: 1055 pr_err("md: disabled device %s, could not read superblock.\n", 1056 bdevname(rdev->bdev,b)); 1057 return -EINVAL; 1058} 1059 1060static int md_uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2) 1061{ 1062 return sb1->set_uuid0 == sb2->set_uuid0 && 1063 sb1->set_uuid1 == sb2->set_uuid1 && 1064 sb1->set_uuid2 == sb2->set_uuid2 && 1065 sb1->set_uuid3 == sb2->set_uuid3; 1066} 1067 1068static int md_sb_equal(mdp_super_t *sb1, mdp_super_t *sb2) 1069{ 1070 int ret; 1071 mdp_super_t *tmp1, *tmp2; 1072 1073 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL); 1074 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL); 1075 1076 if (!tmp1 || !tmp2) { 1077 ret = 0; 1078 goto abort; 1079 } 1080 1081 *tmp1 = *sb1; 1082 *tmp2 = *sb2; 1083 1084 /* 1085 * nr_disks is not constant 1086 */ 1087 tmp1->nr_disks = 0; 1088 tmp2->nr_disks = 0; 1089 1090 ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0); 1091abort: 1092 kfree(tmp1); 1093 kfree(tmp2); 1094 return ret; 1095} 1096 1097static u32 md_csum_fold(u32 csum) 1098{ 1099 csum = (csum & 0xffff) + (csum >> 16); 1100 return (csum & 0xffff) + (csum >> 16); 1101} 1102 1103static unsigned int calc_sb_csum(mdp_super_t *sb) 1104{ 1105 u64 newcsum = 0; 1106 u32 *sb32 = (u32*)sb; 1107 int i; 1108 unsigned int disk_csum, csum; 1109 1110 disk_csum = sb->sb_csum; 1111 sb->sb_csum = 0; 1112 1113 for (i = 0; i < MD_SB_BYTES/4 ; i++) 1114 newcsum += sb32[i]; 1115 csum = (newcsum & 0xffffffff) + (newcsum>>32); 1116 1117#ifdef CONFIG_ALPHA 1118 /* This used to use csum_partial, which was wrong for several 1119 * reasons including that different results are returned on 1120 * different architectures. It isn't critical that we get exactly 1121 * the same return value as before (we always csum_fold before 1122 * testing, and that removes any differences). However as we 1123 * know that csum_partial always returned a 16bit value on 1124 * alphas, do a fold to maximise conformity to previous behaviour. 1125 */ 1126 sb->sb_csum = md_csum_fold(disk_csum); 1127#else 1128 sb->sb_csum = disk_csum; 1129#endif 1130 return csum; 1131} 1132 1133/* 1134 * Handle superblock details. 1135 * We want to be able to handle multiple superblock formats 1136 * so we have a common interface to them all, and an array of 1137 * different handlers. 1138 * We rely on user-space to write the initial superblock, and support 1139 * reading and updating of superblocks. 1140 * Interface methods are: 1141 * int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version) 1142 * loads and validates a superblock on dev. 1143 * if refdev != NULL, compare superblocks on both devices 1144 * Return: 1145 * 0 - dev has a superblock that is compatible with refdev 1146 * 1 - dev has a superblock that is compatible and newer than refdev 1147 * so dev should be used as the refdev in future 1148 * -EINVAL superblock incompatible or invalid 1149 * -othererror e.g. -EIO 1150 * 1151 * int validate_super(struct mddev *mddev, struct md_rdev *dev) 1152 * Verify that dev is acceptable into mddev. 1153 * The first time, mddev->raid_disks will be 0, and data from 1154 * dev should be merged in. Subsequent calls check that dev 1155 * is new enough. Return 0 or -EINVAL 1156 * 1157 * void sync_super(struct mddev *mddev, struct md_rdev *dev) 1158 * Update the superblock for rdev with data in mddev 1159 * This does not write to disc. 1160 * 1161 */ 1162 1163struct super_type { 1164 char *name; 1165 struct module *owner; 1166 int (*load_super)(struct md_rdev *rdev, 1167 struct md_rdev *refdev, 1168 int minor_version); 1169 int (*validate_super)(struct mddev *mddev, 1170 struct md_rdev *freshest, 1171 struct md_rdev *rdev); 1172 void (*sync_super)(struct mddev *mddev, 1173 struct md_rdev *rdev); 1174 unsigned long long (*rdev_size_change)(struct md_rdev *rdev, 1175 sector_t num_sectors); 1176 int (*allow_new_offset)(struct md_rdev *rdev, 1177 unsigned long long new_offset); 1178}; 1179 1180/* 1181 * Check that the given mddev has no bitmap. 1182 * 1183 * This function is called from the run method of all personalities that do not 1184 * support bitmaps. It prints an error message and returns non-zero if mddev 1185 * has a bitmap. Otherwise, it returns 0. 1186 * 1187 */ 1188int md_check_no_bitmap(struct mddev *mddev) 1189{ 1190 if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset) 1191 return 0; 1192 pr_warn("%s: bitmaps are not supported for %s\n", 1193 mdname(mddev), mddev->pers->name); 1194 return 1; 1195} 1196EXPORT_SYMBOL(md_check_no_bitmap); 1197 1198/* 1199 * load_super for 0.90.0 1200 */ 1201static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version) 1202{ 1203 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 1204 mdp_super_t *sb; 1205 int ret; 1206 bool spare_disk = true; 1207 1208 /* 1209 * Calculate the position of the superblock (512byte sectors), 1210 * it's at the end of the disk. 1211 * 1212 * It also happens to be a multiple of 4Kb. 1213 */ 1214 rdev->sb_start = calc_dev_sboffset(rdev); 1215 1216 ret = read_disk_sb(rdev, MD_SB_BYTES); 1217 if (ret) 1218 return ret; 1219 1220 ret = -EINVAL; 1221 1222 bdevname(rdev->bdev, b); 1223 sb = page_address(rdev->sb_page); 1224 1225 if (sb->md_magic != MD_SB_MAGIC) { 1226 pr_warn("md: invalid raid superblock magic on %s\n", b); 1227 goto abort; 1228 } 1229 1230 if (sb->major_version != 0 || 1231 sb->minor_version < 90 || 1232 sb->minor_version > 91) { 1233 pr_warn("Bad version number %d.%d on %s\n", 1234 sb->major_version, sb->minor_version, b); 1235 goto abort; 1236 } 1237 1238 if (sb->raid_disks <= 0) 1239 goto abort; 1240 1241 if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) { 1242 pr_warn("md: invalid superblock checksum on %s\n", b); 1243 goto abort; 1244 } 1245 1246 rdev->preferred_minor = sb->md_minor; 1247 rdev->data_offset = 0; 1248 rdev->new_data_offset = 0; 1249 rdev->sb_size = MD_SB_BYTES; 1250 rdev->badblocks.shift = -1; 1251 1252 if (sb->level == LEVEL_MULTIPATH) 1253 rdev->desc_nr = -1; 1254 else 1255 rdev->desc_nr = sb->this_disk.number; 1256 1257 /* not spare disk, or LEVEL_MULTIPATH */ 1258 if (sb->level == LEVEL_MULTIPATH || 1259 (rdev->desc_nr >= 0 && 1260 rdev->desc_nr < MD_SB_DISKS && 1261 sb->disks[rdev->desc_nr].state & 1262 ((1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))) 1263 spare_disk = false; 1264 1265 if (!refdev) { 1266 if (!spare_disk) 1267 ret = 1; 1268 else 1269 ret = 0; 1270 } else { 1271 __u64 ev1, ev2; 1272 mdp_super_t *refsb = page_address(refdev->sb_page); 1273 if (!md_uuid_equal(refsb, sb)) { 1274 pr_warn("md: %s has different UUID to %s\n", 1275 b, bdevname(refdev->bdev,b2)); 1276 goto abort; 1277 } 1278 if (!md_sb_equal(refsb, sb)) { 1279 pr_warn("md: %s has same UUID but different superblock to %s\n", 1280 b, bdevname(refdev->bdev, b2)); 1281 goto abort; 1282 } 1283 ev1 = md_event(sb); 1284 ev2 = md_event(refsb); 1285 1286 if (!spare_disk && ev1 > ev2) 1287 ret = 1; 1288 else 1289 ret = 0; 1290 } 1291 rdev->sectors = rdev->sb_start; 1292 /* Limit to 4TB as metadata cannot record more than that. 1293 * (not needed for Linear and RAID0 as metadata doesn't 1294 * record this size) 1295 */ 1296 if ((u64)rdev->sectors >= (2ULL << 32) && sb->level >= 1) 1297 rdev->sectors = (sector_t)(2ULL << 32) - 2; 1298 1299 if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1) 1300 /* "this cannot possibly happen" ... */ 1301 ret = -EINVAL; 1302 1303 abort: 1304 return ret; 1305} 1306 1307/* 1308 * validate_super for 0.90.0 1309 * note: we are not using "freshest" for 0.9 superblock 1310 */ 1311static int super_90_validate(struct mddev *mddev, struct md_rdev *freshest, struct md_rdev *rdev) 1312{ 1313 mdp_disk_t *desc; 1314 mdp_super_t *sb = page_address(rdev->sb_page); 1315 __u64 ev1 = md_event(sb); 1316 1317 rdev->raid_disk = -1; 1318 clear_bit(Faulty, &rdev->flags); 1319 clear_bit(In_sync, &rdev->flags); 1320 clear_bit(Bitmap_sync, &rdev->flags); 1321 clear_bit(WriteMostly, &rdev->flags); 1322 1323 if (mddev->raid_disks == 0) { 1324 mddev->major_version = 0; 1325 mddev->minor_version = sb->minor_version; 1326 mddev->patch_version = sb->patch_version; 1327 mddev->external = 0; 1328 mddev->chunk_sectors = sb->chunk_size >> 9; 1329 mddev->ctime = sb->ctime; 1330 mddev->utime = sb->utime; 1331 mddev->level = sb->level; 1332 mddev->clevel[0] = 0; 1333 mddev->layout = sb->layout; 1334 mddev->raid_disks = sb->raid_disks; 1335 mddev->dev_sectors = ((sector_t)sb->size) * 2; 1336 mddev->events = ev1; 1337 mddev->bitmap_info.offset = 0; 1338 mddev->bitmap_info.space = 0; 1339 /* bitmap can use 60 K after the 4K superblocks */ 1340 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; 1341 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9); 1342 mddev->reshape_backwards = 0; 1343 1344 if (mddev->minor_version >= 91) { 1345 mddev->reshape_position = sb->reshape_position; 1346 mddev->delta_disks = sb->delta_disks; 1347 mddev->new_level = sb->new_level; 1348 mddev->new_layout = sb->new_layout; 1349 mddev->new_chunk_sectors = sb->new_chunk >> 9; 1350 if (mddev->delta_disks < 0) 1351 mddev->reshape_backwards = 1; 1352 } else { 1353 mddev->reshape_position = MaxSector; 1354 mddev->delta_disks = 0; 1355 mddev->new_level = mddev->level; 1356 mddev->new_layout = mddev->layout; 1357 mddev->new_chunk_sectors = mddev->chunk_sectors; 1358 } 1359 if (mddev->level == 0) 1360 mddev->layout = -1; 1361 1362 if (sb->state & (1<<MD_SB_CLEAN)) 1363 mddev->recovery_cp = MaxSector; 1364 else { 1365 if (sb->events_hi == sb->cp_events_hi && 1366 sb->events_lo == sb->cp_events_lo) { 1367 mddev->recovery_cp = sb->recovery_cp; 1368 } else 1369 mddev->recovery_cp = 0; 1370 } 1371 1372 memcpy(mddev->uuid+0, &sb->set_uuid0, 4); 1373 memcpy(mddev->uuid+4, &sb->set_uuid1, 4); 1374 memcpy(mddev->uuid+8, &sb->set_uuid2, 4); 1375 memcpy(mddev->uuid+12,&sb->set_uuid3, 4); 1376 1377 mddev->max_disks = MD_SB_DISKS; 1378 1379 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) && 1380 mddev->bitmap_info.file == NULL) { 1381 mddev->bitmap_info.offset = 1382 mddev->bitmap_info.default_offset; 1383 mddev->bitmap_info.space = 1384 mddev->bitmap_info.default_space; 1385 } 1386 1387 } else if (mddev->pers == NULL) { 1388 /* Insist on good event counter while assembling, except 1389 * for spares (which don't need an event count) */ 1390 ++ev1; 1391 if (sb->disks[rdev->desc_nr].state & ( 1392 (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE))) 1393 if (ev1 < mddev->events) 1394 return -EINVAL; 1395 } else if (mddev->bitmap) { 1396 /* if adding to array with a bitmap, then we can accept an 1397 * older device ... but not too old. 1398 */ 1399 if (ev1 < mddev->bitmap->events_cleared) 1400 return 0; 1401 if (ev1 < mddev->events) 1402 set_bit(Bitmap_sync, &rdev->flags); 1403 } else { 1404 if (ev1 < mddev->events) 1405 /* just a hot-add of a new device, leave raid_disk at -1 */ 1406 return 0; 1407 } 1408 1409 if (mddev->level != LEVEL_MULTIPATH) { 1410 desc = sb->disks + rdev->desc_nr; 1411 1412 if (desc->state & (1<<MD_DISK_FAULTY)) 1413 set_bit(Faulty, &rdev->flags); 1414 else if (desc->state & (1<<MD_DISK_SYNC) /* && 1415 desc->raid_disk < mddev->raid_disks */) { 1416 set_bit(In_sync, &rdev->flags); 1417 rdev->raid_disk = desc->raid_disk; 1418 rdev->saved_raid_disk = desc->raid_disk; 1419 } else if (desc->state & (1<<MD_DISK_ACTIVE)) { 1420 /* active but not in sync implies recovery up to 1421 * reshape position. We don't know exactly where 1422 * that is, so set to zero for now */ 1423 if (mddev->minor_version >= 91) { 1424 rdev->recovery_offset = 0; 1425 rdev->raid_disk = desc->raid_disk; 1426 } 1427 } 1428 if (desc->state & (1<<MD_DISK_WRITEMOSTLY)) 1429 set_bit(WriteMostly, &rdev->flags); 1430 if (desc->state & (1<<MD_DISK_FAILFAST)) 1431 set_bit(FailFast, &rdev->flags); 1432 } else /* MULTIPATH are always insync */ 1433 set_bit(In_sync, &rdev->flags); 1434 return 0; 1435} 1436 1437/* 1438 * sync_super for 0.90.0 1439 */ 1440static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev) 1441{ 1442 mdp_super_t *sb; 1443 struct md_rdev *rdev2; 1444 int next_spare = mddev->raid_disks; 1445 1446 /* make rdev->sb match mddev data.. 1447 * 1448 * 1/ zero out disks 1449 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare); 1450 * 3/ any empty disks < next_spare become removed 1451 * 1452 * disks[0] gets initialised to REMOVED because 1453 * we cannot be sure from other fields if it has 1454 * been initialised or not. 1455 */ 1456 int i; 1457 int active=0, working=0,failed=0,spare=0,nr_disks=0; 1458 1459 rdev->sb_size = MD_SB_BYTES; 1460 1461 sb = page_address(rdev->sb_page); 1462 1463 memset(sb, 0, sizeof(*sb)); 1464 1465 sb->md_magic = MD_SB_MAGIC; 1466 sb->major_version = mddev->major_version; 1467 sb->patch_version = mddev->patch_version; 1468 sb->gvalid_words = 0; /* ignored */ 1469 memcpy(&sb->set_uuid0, mddev->uuid+0, 4); 1470 memcpy(&sb->set_uuid1, mddev->uuid+4, 4); 1471 memcpy(&sb->set_uuid2, mddev->uuid+8, 4); 1472 memcpy(&sb->set_uuid3, mddev->uuid+12,4); 1473 1474 sb->ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX); 1475 sb->level = mddev->level; 1476 sb->size = mddev->dev_sectors / 2; 1477 sb->raid_disks = mddev->raid_disks; 1478 sb->md_minor = mddev->md_minor; 1479 sb->not_persistent = 0; 1480 sb->utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX); 1481 sb->state = 0; 1482 sb->events_hi = (mddev->events>>32); 1483 sb->events_lo = (u32)mddev->events; 1484 1485 if (mddev->reshape_position == MaxSector) 1486 sb->minor_version = 90; 1487 else { 1488 sb->minor_version = 91; 1489 sb->reshape_position = mddev->reshape_position; 1490 sb->new_level = mddev->new_level; 1491 sb->delta_disks = mddev->delta_disks; 1492 sb->new_layout = mddev->new_layout; 1493 sb->new_chunk = mddev->new_chunk_sectors << 9; 1494 } 1495 mddev->minor_version = sb->minor_version; 1496 if (mddev->in_sync) 1497 { 1498 sb->recovery_cp = mddev->recovery_cp; 1499 sb->cp_events_hi = (mddev->events>>32); 1500 sb->cp_events_lo = (u32)mddev->events; 1501 if (mddev->recovery_cp == MaxSector) 1502 sb->state = (1<< MD_SB_CLEAN); 1503 } else 1504 sb->recovery_cp = 0; 1505 1506 sb->layout = mddev->layout; 1507 sb->chunk_size = mddev->chunk_sectors << 9; 1508 1509 if (mddev->bitmap && mddev->bitmap_info.file == NULL) 1510 sb->state |= (1<<MD_SB_BITMAP_PRESENT); 1511 1512 sb->disks[0].state = (1<<MD_DISK_REMOVED); 1513 rdev_for_each(rdev2, mddev) { 1514 mdp_disk_t *d; 1515 int desc_nr; 1516 int is_active = test_bit(In_sync, &rdev2->flags); 1517 1518 if (rdev2->raid_disk >= 0 && 1519 sb->minor_version >= 91) 1520 /* we have nowhere to store the recovery_offset, 1521 * but if it is not below the reshape_position, 1522 * we can piggy-back on that. 1523 */ 1524 is_active = 1; 1525 if (rdev2->raid_disk < 0 || 1526 test_bit(Faulty, &rdev2->flags)) 1527 is_active = 0; 1528 if (is_active) 1529 desc_nr = rdev2->raid_disk; 1530 else 1531 desc_nr = next_spare++; 1532 rdev2->desc_nr = desc_nr; 1533 d = &sb->disks[rdev2->desc_nr]; 1534 nr_disks++; 1535 d->number = rdev2->desc_nr; 1536 d->major = MAJOR(rdev2->bdev->bd_dev); 1537 d->minor = MINOR(rdev2->bdev->bd_dev); 1538 if (is_active) 1539 d->raid_disk = rdev2->raid_disk; 1540 else 1541 d->raid_disk = rdev2->desc_nr; /* compatibility */ 1542 if (test_bit(Faulty, &rdev2->flags)) 1543 d->state = (1<<MD_DISK_FAULTY); 1544 else if (is_active) { 1545 d->state = (1<<MD_DISK_ACTIVE); 1546 if (test_bit(In_sync, &rdev2->flags)) 1547 d->state |= (1<<MD_DISK_SYNC); 1548 active++; 1549 working++; 1550 } else { 1551 d->state = 0; 1552 spare++; 1553 working++; 1554 } 1555 if (test_bit(WriteMostly, &rdev2->flags)) 1556 d->state |= (1<<MD_DISK_WRITEMOSTLY); 1557 if (test_bit(FailFast, &rdev2->flags)) 1558 d->state |= (1<<MD_DISK_FAILFAST); 1559 } 1560 /* now set the "removed" and "faulty" bits on any missing devices */ 1561 for (i=0 ; i < mddev->raid_disks ; i++) { 1562 mdp_disk_t *d = &sb->disks[i]; 1563 if (d->state == 0 && d->number == 0) { 1564 d->number = i; 1565 d->raid_disk = i; 1566 d->state = (1<<MD_DISK_REMOVED); 1567 d->state |= (1<<MD_DISK_FAULTY); 1568 failed++; 1569 } 1570 } 1571 sb->nr_disks = nr_disks; 1572 sb->active_disks = active; 1573 sb->working_disks = working; 1574 sb->failed_disks = failed; 1575 sb->spare_disks = spare; 1576 1577 sb->this_disk = sb->disks[rdev->desc_nr]; 1578 sb->sb_csum = calc_sb_csum(sb); 1579} 1580 1581/* 1582 * rdev_size_change for 0.90.0 1583 */ 1584static unsigned long long 1585super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) 1586{ 1587 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) 1588 return 0; /* component must fit device */ 1589 if (rdev->mddev->bitmap_info.offset) 1590 return 0; /* can't move bitmap */ 1591 rdev->sb_start = calc_dev_sboffset(rdev); 1592 if (!num_sectors || num_sectors > rdev->sb_start) 1593 num_sectors = rdev->sb_start; 1594 /* Limit to 4TB as metadata cannot record more than that. 1595 * 4TB == 2^32 KB, or 2*2^32 sectors. 1596 */ 1597 if ((u64)num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1) 1598 num_sectors = (sector_t)(2ULL << 32) - 2; 1599 do { 1600 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 1601 rdev->sb_page); 1602 } while (md_super_wait(rdev->mddev) < 0); 1603 return num_sectors; 1604} 1605 1606static int 1607super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset) 1608{ 1609 /* non-zero offset changes not possible with v0.90 */ 1610 return new_offset == 0; 1611} 1612 1613/* 1614 * version 1 superblock 1615 */ 1616 1617static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb) 1618{ 1619 __le32 disk_csum; 1620 u32 csum; 1621 unsigned long long newcsum; 1622 int size = 256 + le32_to_cpu(sb->max_dev)*2; 1623 __le32 *isuper = (__le32*)sb; 1624 1625 disk_csum = sb->sb_csum; 1626 sb->sb_csum = 0; 1627 newcsum = 0; 1628 for (; size >= 4; size -= 4) 1629 newcsum += le32_to_cpu(*isuper++); 1630 1631 if (size == 2) 1632 newcsum += le16_to_cpu(*(__le16*) isuper); 1633 1634 csum = (newcsum & 0xffffffff) + (newcsum >> 32); 1635 sb->sb_csum = disk_csum; 1636 return cpu_to_le32(csum); 1637} 1638 1639static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version) 1640{ 1641 struct mdp_superblock_1 *sb; 1642 int ret; 1643 sector_t sb_start; 1644 sector_t sectors; 1645 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 1646 int bmask; 1647 bool spare_disk = true; 1648 1649 /* 1650 * Calculate the position of the superblock in 512byte sectors. 1651 * It is always aligned to a 4K boundary and 1652 * depeding on minor_version, it can be: 1653 * 0: At least 8K, but less than 12K, from end of device 1654 * 1: At start of device 1655 * 2: 4K from start of device. 1656 */ 1657 switch(minor_version) { 1658 case 0: 1659 sb_start = i_size_read(rdev->bdev->bd_inode) >> 9; 1660 sb_start -= 8*2; 1661 sb_start &= ~(sector_t)(4*2-1); 1662 break; 1663 case 1: 1664 sb_start = 0; 1665 break; 1666 case 2: 1667 sb_start = 8; 1668 break; 1669 default: 1670 return -EINVAL; 1671 } 1672 rdev->sb_start = sb_start; 1673 1674 /* superblock is rarely larger than 1K, but it can be larger, 1675 * and it is safe to read 4k, so we do that 1676 */ 1677 ret = read_disk_sb(rdev, 4096); 1678 if (ret) return ret; 1679 1680 sb = page_address(rdev->sb_page); 1681 1682 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) || 1683 sb->major_version != cpu_to_le32(1) || 1684 le32_to_cpu(sb->max_dev) > (4096-256)/2 || 1685 le64_to_cpu(sb->super_offset) != rdev->sb_start || 1686 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0) 1687 return -EINVAL; 1688 1689 if (calc_sb_1_csum(sb) != sb->sb_csum) { 1690 pr_warn("md: invalid superblock checksum on %s\n", 1691 bdevname(rdev->bdev,b)); 1692 return -EINVAL; 1693 } 1694 if (le64_to_cpu(sb->data_size) < 10) { 1695 pr_warn("md: data_size too small on %s\n", 1696 bdevname(rdev->bdev,b)); 1697 return -EINVAL; 1698 } 1699 if (sb->pad0 || 1700 sb->pad3[0] || 1701 memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1]))) 1702 /* Some padding is non-zero, might be a new feature */ 1703 return -EINVAL; 1704 1705 rdev->preferred_minor = 0xffff; 1706 rdev->data_offset = le64_to_cpu(sb->data_offset); 1707 rdev->new_data_offset = rdev->data_offset; 1708 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) && 1709 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET)) 1710 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset); 1711 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); 1712 1713 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; 1714 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; 1715 if (rdev->sb_size & bmask) 1716 rdev->sb_size = (rdev->sb_size | bmask) + 1; 1717 1718 if (minor_version 1719 && rdev->data_offset < sb_start + (rdev->sb_size/512)) 1720 return -EINVAL; 1721 if (minor_version 1722 && rdev->new_data_offset < sb_start + (rdev->sb_size/512)) 1723 return -EINVAL; 1724 1725 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH)) 1726 rdev->desc_nr = -1; 1727 else 1728 rdev->desc_nr = le32_to_cpu(sb->dev_number); 1729 1730 if (!rdev->bb_page) { 1731 rdev->bb_page = alloc_page(GFP_KERNEL); 1732 if (!rdev->bb_page) 1733 return -ENOMEM; 1734 } 1735 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) && 1736 rdev->badblocks.count == 0) { 1737 /* need to load the bad block list. 1738 * Currently we limit it to one page. 1739 */ 1740 s32 offset; 1741 sector_t bb_sector; 1742 __le64 *bbp; 1743 int i; 1744 int sectors = le16_to_cpu(sb->bblog_size); 1745 if (sectors > (PAGE_SIZE / 512)) 1746 return -EINVAL; 1747 offset = le32_to_cpu(sb->bblog_offset); 1748 if (offset == 0) 1749 return -EINVAL; 1750 bb_sector = (long long)offset; 1751 if (!sync_page_io(rdev, bb_sector, sectors << 9, 1752 rdev->bb_page, REQ_OP_READ, 0, true)) 1753 return -EIO; 1754 bbp = (__le64 *)page_address(rdev->bb_page); 1755 rdev->badblocks.shift = sb->bblog_shift; 1756 for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) { 1757 u64 bb = le64_to_cpu(*bbp); 1758 int count = bb & (0x3ff); 1759 u64 sector = bb >> 10; 1760 sector <<= sb->bblog_shift; 1761 count <<= sb->bblog_shift; 1762 if (bb + 1 == 0) 1763 break; 1764 if (badblocks_set(&rdev->badblocks, sector, count, 1)) 1765 return -EINVAL; 1766 } 1767 } else if (sb->bblog_offset != 0) 1768 rdev->badblocks.shift = 0; 1769 1770 if ((le32_to_cpu(sb->feature_map) & 1771 (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS))) { 1772 rdev->ppl.offset = (__s16)le16_to_cpu(sb->ppl.offset); 1773 rdev->ppl.size = le16_to_cpu(sb->ppl.size); 1774 rdev->ppl.sector = rdev->sb_start + rdev->ppl.offset; 1775 } 1776 1777 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT) && 1778 sb->level != 0) 1779 return -EINVAL; 1780 1781 /* not spare disk, or LEVEL_MULTIPATH */ 1782 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH) || 1783 (rdev->desc_nr >= 0 && 1784 rdev->desc_nr < le32_to_cpu(sb->max_dev) && 1785 (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX || 1786 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL))) 1787 spare_disk = false; 1788 1789 if (!refdev) { 1790 if (!spare_disk) 1791 ret = 1; 1792 else 1793 ret = 0; 1794 } else { 1795 __u64 ev1, ev2; 1796 struct mdp_superblock_1 *refsb = page_address(refdev->sb_page); 1797 1798 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 || 1799 sb->level != refsb->level || 1800 sb->layout != refsb->layout || 1801 sb->chunksize != refsb->chunksize) { 1802 pr_warn("md: %s has strangely different superblock to %s\n", 1803 bdevname(rdev->bdev,b), 1804 bdevname(refdev->bdev,b2)); 1805 return -EINVAL; 1806 } 1807 ev1 = le64_to_cpu(sb->events); 1808 ev2 = le64_to_cpu(refsb->events); 1809 1810 if (!spare_disk && ev1 > ev2) 1811 ret = 1; 1812 else 1813 ret = 0; 1814 } 1815 if (minor_version) { 1816 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9); 1817 sectors -= rdev->data_offset; 1818 } else 1819 sectors = rdev->sb_start; 1820 if (sectors < le64_to_cpu(sb->data_size)) 1821 return -EINVAL; 1822 rdev->sectors = le64_to_cpu(sb->data_size); 1823 return ret; 1824} 1825 1826static int super_1_validate(struct mddev *mddev, struct md_rdev *freshest, struct md_rdev *rdev) 1827{ 1828 struct mdp_superblock_1 *sb = page_address(rdev->sb_page); 1829 __u64 ev1 = le64_to_cpu(sb->events); 1830 1831 rdev->raid_disk = -1; 1832 clear_bit(Faulty, &rdev->flags); 1833 clear_bit(In_sync, &rdev->flags); 1834 clear_bit(Bitmap_sync, &rdev->flags); 1835 clear_bit(WriteMostly, &rdev->flags); 1836 1837 if (mddev->raid_disks == 0) { 1838 mddev->major_version = 1; 1839 mddev->patch_version = 0; 1840 mddev->external = 0; 1841 mddev->chunk_sectors = le32_to_cpu(sb->chunksize); 1842 mddev->ctime = le64_to_cpu(sb->ctime); 1843 mddev->utime = le64_to_cpu(sb->utime); 1844 mddev->level = le32_to_cpu(sb->level); 1845 mddev->clevel[0] = 0; 1846 mddev->layout = le32_to_cpu(sb->layout); 1847 mddev->raid_disks = le32_to_cpu(sb->raid_disks); 1848 mddev->dev_sectors = le64_to_cpu(sb->size); 1849 mddev->events = ev1; 1850 mddev->bitmap_info.offset = 0; 1851 mddev->bitmap_info.space = 0; 1852 /* Default location for bitmap is 1K after superblock 1853 * using 3K - total of 4K 1854 */ 1855 mddev->bitmap_info.default_offset = 1024 >> 9; 1856 mddev->bitmap_info.default_space = (4096-1024) >> 9; 1857 mddev->reshape_backwards = 0; 1858 1859 mddev->recovery_cp = le64_to_cpu(sb->resync_offset); 1860 memcpy(mddev->uuid, sb->set_uuid, 16); 1861 1862 mddev->max_disks = (4096-256)/2; 1863 1864 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) && 1865 mddev->bitmap_info.file == NULL) { 1866 mddev->bitmap_info.offset = 1867 (__s32)le32_to_cpu(sb->bitmap_offset); 1868 /* Metadata doesn't record how much space is available. 1869 * For 1.0, we assume we can use up to the superblock 1870 * if before, else to 4K beyond superblock. 1871 * For others, assume no change is possible. 1872 */ 1873 if (mddev->minor_version > 0) 1874 mddev->bitmap_info.space = 0; 1875 else if (mddev->bitmap_info.offset > 0) 1876 mddev->bitmap_info.space = 1877 8 - mddev->bitmap_info.offset; 1878 else 1879 mddev->bitmap_info.space = 1880 -mddev->bitmap_info.offset; 1881 } 1882 1883 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { 1884 mddev->reshape_position = le64_to_cpu(sb->reshape_position); 1885 mddev->delta_disks = le32_to_cpu(sb->delta_disks); 1886 mddev->new_level = le32_to_cpu(sb->new_level); 1887 mddev->new_layout = le32_to_cpu(sb->new_layout); 1888 mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk); 1889 if (mddev->delta_disks < 0 || 1890 (mddev->delta_disks == 0 && 1891 (le32_to_cpu(sb->feature_map) 1892 & MD_FEATURE_RESHAPE_BACKWARDS))) 1893 mddev->reshape_backwards = 1; 1894 } else { 1895 mddev->reshape_position = MaxSector; 1896 mddev->delta_disks = 0; 1897 mddev->new_level = mddev->level; 1898 mddev->new_layout = mddev->layout; 1899 mddev->new_chunk_sectors = mddev->chunk_sectors; 1900 } 1901 1902 if (mddev->level == 0 && 1903 !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT)) 1904 mddev->layout = -1; 1905 1906 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL) 1907 set_bit(MD_HAS_JOURNAL, &mddev->flags); 1908 1909 if (le32_to_cpu(sb->feature_map) & 1910 (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS)) { 1911 if (le32_to_cpu(sb->feature_map) & 1912 (MD_FEATURE_BITMAP_OFFSET | MD_FEATURE_JOURNAL)) 1913 return -EINVAL; 1914 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_PPL) && 1915 (le32_to_cpu(sb->feature_map) & 1916 MD_FEATURE_MULTIPLE_PPLS)) 1917 return -EINVAL; 1918 set_bit(MD_HAS_PPL, &mddev->flags); 1919 } 1920 } else if (mddev->pers == NULL) { 1921 /* Insist of good event counter while assembling, except for 1922 * spares (which don't need an event count). 1923 * Similar to mdadm, we allow event counter difference of 1 1924 * from the freshest device. 1925 */ 1926 if (rdev->desc_nr >= 0 && 1927 rdev->desc_nr < le32_to_cpu(sb->max_dev) && 1928 (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX || 1929 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL)) 1930 if (ev1 + 1 < mddev->events) 1931 return -EINVAL; 1932 } else if (mddev->bitmap) { 1933 /* If adding to array with a bitmap, then we can accept an 1934 * older device, but not too old. 1935 */ 1936 if (ev1 < mddev->bitmap->events_cleared) 1937 return 0; 1938 if (ev1 < mddev->events) 1939 set_bit(Bitmap_sync, &rdev->flags); 1940 } else { 1941 if (ev1 < mddev->events) 1942 /* just a hot-add of a new device, leave raid_disk at -1 */ 1943 return 0; 1944 } 1945 if (mddev->level != LEVEL_MULTIPATH) { 1946 int role; 1947 if (rdev->desc_nr < 0 || 1948 rdev->desc_nr >= le32_to_cpu(sb->max_dev)) { 1949 role = MD_DISK_ROLE_SPARE; 1950 rdev->desc_nr = -1; 1951 } else if (mddev->pers == NULL && freshest && ev1 < mddev->events) { 1952 /* 1953 * If we are assembling, and our event counter is smaller than the 1954 * highest event counter, we cannot trust our superblock about the role. 1955 * It could happen that our rdev was marked as Faulty, and all other 1956 * superblocks were updated with +1 event counter. 1957 * Then, before the next superblock update, which typically happens when 1958 * remove_and_add_spares() removes the device from the array, there was 1959 * a crash or reboot. 1960 * If we allow current rdev without consulting the freshest superblock, 1961 * we could cause data corruption. 1962 * Note that in this case our event counter is smaller by 1 than the 1963 * highest, otherwise, this rdev would not be allowed into array; 1964 * both kernel and mdadm allow event counter difference of 1. 1965 */ 1966 struct mdp_superblock_1 *freshest_sb = page_address(freshest->sb_page); 1967 u32 freshest_max_dev = le32_to_cpu(freshest_sb->max_dev); 1968 1969 if (rdev->desc_nr >= freshest_max_dev) { 1970 /* this is unexpected, better not proceed */ 1971 pr_warn("md: %s: rdev[%pg]: desc_nr(%d) >= freshest(%pg)->sb->max_dev(%u)\n", 1972 mdname(mddev), rdev->bdev, rdev->desc_nr, 1973 freshest->bdev, freshest_max_dev); 1974 return -EUCLEAN; 1975 } 1976 1977 role = le16_to_cpu(freshest_sb->dev_roles[rdev->desc_nr]); 1978 pr_debug("md: %s: rdev[%pg]: role=%d(0x%x) according to freshest %pg\n", 1979 mdname(mddev), rdev->bdev, role, role, freshest->bdev); 1980 } else { 1981 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); 1982 } 1983 switch(role) { 1984 case MD_DISK_ROLE_SPARE: /* spare */ 1985 break; 1986 case MD_DISK_ROLE_FAULTY: /* faulty */ 1987 set_bit(Faulty, &rdev->flags); 1988 break; 1989 case MD_DISK_ROLE_JOURNAL: /* journal device */ 1990 if (!(le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)) { 1991 /* journal device without journal feature */ 1992 pr_warn("md: journal device provided without journal feature, ignoring the device\n"); 1993 return -EINVAL; 1994 } 1995 set_bit(Journal, &rdev->flags); 1996 rdev->journal_tail = le64_to_cpu(sb->journal_tail); 1997 rdev->raid_disk = 0; 1998 break; 1999 default: 2000 rdev->saved_raid_disk = role; 2001 if ((le32_to_cpu(sb->feature_map) & 2002 MD_FEATURE_RECOVERY_OFFSET)) { 2003 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset); 2004 if (!(le32_to_cpu(sb->feature_map) & 2005 MD_FEATURE_RECOVERY_BITMAP)) 2006 rdev->saved_raid_disk = -1; 2007 } else { 2008 /* 2009 * If the array is FROZEN, then the device can't 2010 * be in_sync with rest of array. 2011 */ 2012 if (!test_bit(MD_RECOVERY_FROZEN, 2013 &mddev->recovery)) 2014 set_bit(In_sync, &rdev->flags); 2015 } 2016 rdev->raid_disk = role; 2017 break; 2018 } 2019 if (sb->devflags & WriteMostly1) 2020 set_bit(WriteMostly, &rdev->flags); 2021 if (sb->devflags & FailFast1) 2022 set_bit(FailFast, &rdev->flags); 2023 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT) 2024 set_bit(Replacement, &rdev->flags); 2025 } else /* MULTIPATH are always insync */ 2026 set_bit(In_sync, &rdev->flags); 2027 2028 return 0; 2029} 2030 2031static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev) 2032{ 2033 struct mdp_superblock_1 *sb; 2034 struct md_rdev *rdev2; 2035 int max_dev, i; 2036 /* make rdev->sb match mddev and rdev data. */ 2037 2038 sb = page_address(rdev->sb_page); 2039 2040 sb->feature_map = 0; 2041 sb->pad0 = 0; 2042 sb->recovery_offset = cpu_to_le64(0); 2043 memset(sb->pad3, 0, sizeof(sb->pad3)); 2044 2045 sb->utime = cpu_to_le64((__u64)mddev->utime); 2046 sb->events = cpu_to_le64(mddev->events); 2047 if (mddev->in_sync) 2048 sb->resync_offset = cpu_to_le64(mddev->recovery_cp); 2049 else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags)) 2050 sb->resync_offset = cpu_to_le64(MaxSector); 2051 else 2052 sb->resync_offset = cpu_to_le64(0); 2053 2054 sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors)); 2055 2056 sb->raid_disks = cpu_to_le32(mddev->raid_disks); 2057 sb->size = cpu_to_le64(mddev->dev_sectors); 2058 sb->chunksize = cpu_to_le32(mddev->chunk_sectors); 2059 sb->level = cpu_to_le32(mddev->level); 2060 sb->layout = cpu_to_le32(mddev->layout); 2061 if (test_bit(FailFast, &rdev->flags)) 2062 sb->devflags |= FailFast1; 2063 else 2064 sb->devflags &= ~FailFast1; 2065 2066 if (test_bit(WriteMostly, &rdev->flags)) 2067 sb->devflags |= WriteMostly1; 2068 else 2069 sb->devflags &= ~WriteMostly1; 2070 sb->data_offset = cpu_to_le64(rdev->data_offset); 2071 sb->data_size = cpu_to_le64(rdev->sectors); 2072 2073 if (mddev->bitmap && mddev->bitmap_info.file == NULL) { 2074 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset); 2075 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET); 2076 } 2077 2078 if (rdev->raid_disk >= 0 && !test_bit(Journal, &rdev->flags) && 2079 !test_bit(In_sync, &rdev->flags)) { 2080 sb->feature_map |= 2081 cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET); 2082 sb->recovery_offset = 2083 cpu_to_le64(rdev->recovery_offset); 2084 if (rdev->saved_raid_disk >= 0 && mddev->bitmap) 2085 sb->feature_map |= 2086 cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP); 2087 } 2088 /* Note: recovery_offset and journal_tail share space */ 2089 if (test_bit(Journal, &rdev->flags)) 2090 sb->journal_tail = cpu_to_le64(rdev->journal_tail); 2091 if (test_bit(Replacement, &rdev->flags)) 2092 sb->feature_map |= 2093 cpu_to_le32(MD_FEATURE_REPLACEMENT); 2094 2095 if (mddev->reshape_position != MaxSector) { 2096 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE); 2097 sb->reshape_position = cpu_to_le64(mddev->reshape_position); 2098 sb->new_layout = cpu_to_le32(mddev->new_layout); 2099 sb->delta_disks = cpu_to_le32(mddev->delta_disks); 2100 sb->new_level = cpu_to_le32(mddev->new_level); 2101 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors); 2102 if (mddev->delta_disks == 0 && 2103 mddev->reshape_backwards) 2104 sb->feature_map 2105 |= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS); 2106 if (rdev->new_data_offset != rdev->data_offset) { 2107 sb->feature_map 2108 |= cpu_to_le32(MD_FEATURE_NEW_OFFSET); 2109 sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset 2110 - rdev->data_offset)); 2111 } 2112 } 2113 2114 if (mddev_is_clustered(mddev)) 2115 sb->feature_map |= cpu_to_le32(MD_FEATURE_CLUSTERED); 2116 2117 if (rdev->badblocks.count == 0) 2118 /* Nothing to do for bad blocks*/ ; 2119 else if (sb->bblog_offset == 0) 2120 /* Cannot record bad blocks on this device */ 2121 md_error(mddev, rdev); 2122 else { 2123 struct badblocks *bb = &rdev->badblocks; 2124 __le64 *bbp = (__le64 *)page_address(rdev->bb_page); 2125 u64 *p = bb->page; 2126 sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS); 2127 if (bb->changed) { 2128 unsigned seq; 2129 2130retry: 2131 seq = read_seqbegin(&bb->lock); 2132 2133 memset(bbp, 0xff, PAGE_SIZE); 2134 2135 for (i = 0 ; i < bb->count ; i++) { 2136 u64 internal_bb = p[i]; 2137 u64 store_bb = ((BB_OFFSET(internal_bb) << 10) 2138 | BB_LEN(internal_bb)); 2139 bbp[i] = cpu_to_le64(store_bb); 2140 } 2141 bb->changed = 0; 2142 if (read_seqretry(&bb->lock, seq)) 2143 goto retry; 2144 2145 bb->sector = (rdev->sb_start + 2146 (int)le32_to_cpu(sb->bblog_offset)); 2147 bb->size = le16_to_cpu(sb->bblog_size); 2148 } 2149 } 2150 2151 max_dev = 0; 2152 rdev_for_each(rdev2, mddev) 2153 if (rdev2->desc_nr+1 > max_dev) 2154 max_dev = rdev2->desc_nr+1; 2155 2156 if (max_dev > le32_to_cpu(sb->max_dev)) { 2157 int bmask; 2158 sb->max_dev = cpu_to_le32(max_dev); 2159 rdev->sb_size = max_dev * 2 + 256; 2160 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; 2161 if (rdev->sb_size & bmask) 2162 rdev->sb_size = (rdev->sb_size | bmask) + 1; 2163 } else 2164 max_dev = le32_to_cpu(sb->max_dev); 2165 2166 for (i=0; i<max_dev;i++) 2167 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE); 2168 2169 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) 2170 sb->feature_map |= cpu_to_le32(MD_FEATURE_JOURNAL); 2171 2172 if (test_bit(MD_HAS_PPL, &mddev->flags)) { 2173 if (test_bit(MD_HAS_MULTIPLE_PPLS, &mddev->flags)) 2174 sb->feature_map |= 2175 cpu_to_le32(MD_FEATURE_MULTIPLE_PPLS); 2176 else 2177 sb->feature_map |= cpu_to_le32(MD_FEATURE_PPL); 2178 sb->ppl.offset = cpu_to_le16(rdev->ppl.offset); 2179 sb->ppl.size = cpu_to_le16(rdev->ppl.size); 2180 } 2181 2182 rdev_for_each(rdev2, mddev) { 2183 i = rdev2->desc_nr; 2184 if (test_bit(Faulty, &rdev2->flags)) 2185 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY); 2186 else if (test_bit(In_sync, &rdev2->flags)) 2187 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); 2188 else if (test_bit(Journal, &rdev2->flags)) 2189 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_JOURNAL); 2190 else if (rdev2->raid_disk >= 0) 2191 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); 2192 else 2193 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE); 2194 } 2195 2196 sb->sb_csum = calc_sb_1_csum(sb); 2197} 2198 2199static sector_t super_1_choose_bm_space(sector_t dev_size) 2200{ 2201 sector_t bm_space; 2202 2203 /* if the device is bigger than 8Gig, save 64k for bitmap 2204 * usage, if bigger than 200Gig, save 128k 2205 */ 2206 if (dev_size < 64*2) 2207 bm_space = 0; 2208 else if (dev_size - 64*2 >= 200*1024*1024*2) 2209 bm_space = 128*2; 2210 else if (dev_size - 4*2 > 8*1024*1024*2) 2211 bm_space = 64*2; 2212 else 2213 bm_space = 4*2; 2214 return bm_space; 2215} 2216 2217static unsigned long long 2218super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) 2219{ 2220 struct mdp_superblock_1 *sb; 2221 sector_t max_sectors; 2222 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) 2223 return 0; /* component must fit device */ 2224 if (rdev->data_offset != rdev->new_data_offset) 2225 return 0; /* too confusing */ 2226 if (rdev->sb_start < rdev->data_offset) { 2227 /* minor versions 1 and 2; superblock before data */ 2228 max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9; 2229 max_sectors -= rdev->data_offset; 2230 if (!num_sectors || num_sectors > max_sectors) 2231 num_sectors = max_sectors; 2232 } else if (rdev->mddev->bitmap_info.offset) { 2233 /* minor version 0 with bitmap we can't move */ 2234 return 0; 2235 } else { 2236 /* minor version 0; superblock after data */ 2237 sector_t sb_start, bm_space; 2238 sector_t dev_size = i_size_read(rdev->bdev->bd_inode) >> 9; 2239 2240 /* 8K is for superblock */ 2241 sb_start = dev_size - 8*2; 2242 sb_start &= ~(sector_t)(4*2 - 1); 2243 2244 bm_space = super_1_choose_bm_space(dev_size); 2245 2246 /* Space that can be used to store date needs to decrease 2247 * superblock bitmap space and bad block space(4K) 2248 */ 2249 max_sectors = sb_start - bm_space - 4*2; 2250 2251 if (!num_sectors || num_sectors > max_sectors) 2252 num_sectors = max_sectors; 2253 rdev->sb_start = sb_start; 2254 } 2255 sb = page_address(rdev->sb_page); 2256 sb->data_size = cpu_to_le64(num_sectors); 2257 sb->super_offset = cpu_to_le64(rdev->sb_start); 2258 sb->sb_csum = calc_sb_1_csum(sb); 2259 do { 2260 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 2261 rdev->sb_page); 2262 } while (md_super_wait(rdev->mddev) < 0); 2263 return num_sectors; 2264 2265} 2266 2267static int 2268super_1_allow_new_offset(struct md_rdev *rdev, 2269 unsigned long long new_offset) 2270{ 2271 /* All necessary checks on new >= old have been done */ 2272 struct bitmap *bitmap; 2273 if (new_offset >= rdev->data_offset) 2274 return 1; 2275 2276 /* with 1.0 metadata, there is no metadata to tread on 2277 * so we can always move back */ 2278 if (rdev->mddev->minor_version == 0) 2279 return 1; 2280 2281 /* otherwise we must be sure not to step on 2282 * any metadata, so stay: 2283 * 36K beyond start of superblock 2284 * beyond end of badblocks 2285 * beyond write-intent bitmap 2286 */ 2287 if (rdev->sb_start + (32+4)*2 > new_offset) 2288 return 0; 2289 bitmap = rdev->mddev->bitmap; 2290 if (bitmap && !rdev->mddev->bitmap_info.file && 2291 rdev->sb_start + rdev->mddev->bitmap_info.offset + 2292 bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset) 2293 return 0; 2294 if (rdev->badblocks.sector + rdev->badblocks.size > new_offset) 2295 return 0; 2296 2297 return 1; 2298} 2299 2300static struct super_type super_types[] = { 2301 [0] = { 2302 .name = "0.90.0", 2303 .owner = THIS_MODULE, 2304 .load_super = super_90_load, 2305 .validate_super = super_90_validate, 2306 .sync_super = super_90_sync, 2307 .rdev_size_change = super_90_rdev_size_change, 2308 .allow_new_offset = super_90_allow_new_offset, 2309 }, 2310 [1] = { 2311 .name = "md-1", 2312 .owner = THIS_MODULE, 2313 .load_super = super_1_load, 2314 .validate_super = super_1_validate, 2315 .sync_super = super_1_sync, 2316 .rdev_size_change = super_1_rdev_size_change, 2317 .allow_new_offset = super_1_allow_new_offset, 2318 }, 2319}; 2320 2321static void sync_super(struct mddev *mddev, struct md_rdev *rdev) 2322{ 2323 if (mddev->sync_super) { 2324 mddev->sync_super(mddev, rdev); 2325 return; 2326 } 2327 2328 BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types)); 2329 2330 super_types[mddev->major_version].sync_super(mddev, rdev); 2331} 2332 2333static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2) 2334{ 2335 struct md_rdev *rdev, *rdev2; 2336 2337 rcu_read_lock(); 2338 rdev_for_each_rcu(rdev, mddev1) { 2339 if (test_bit(Faulty, &rdev->flags) || 2340 test_bit(Journal, &rdev->flags) || 2341 rdev->raid_disk == -1) 2342 continue; 2343 rdev_for_each_rcu(rdev2, mddev2) { 2344 if (test_bit(Faulty, &rdev2->flags) || 2345 test_bit(Journal, &rdev2->flags) || 2346 rdev2->raid_disk == -1) 2347 continue; 2348 if (rdev->bdev->bd_disk == rdev2->bdev->bd_disk) { 2349 rcu_read_unlock(); 2350 return 1; 2351 } 2352 } 2353 } 2354 rcu_read_unlock(); 2355 return 0; 2356} 2357 2358static LIST_HEAD(pending_raid_disks); 2359 2360/* 2361 * Try to register data integrity profile for an mddev 2362 * 2363 * This is called when an array is started and after a disk has been kicked 2364 * from the array. It only succeeds if all working and active component devices 2365 * are integrity capable with matching profiles. 2366 */ 2367int md_integrity_register(struct mddev *mddev) 2368{ 2369 struct md_rdev *rdev, *reference = NULL; 2370 2371 if (list_empty(&mddev->disks)) 2372 return 0; /* nothing to do */ 2373 if (!mddev->gendisk || blk_get_integrity(mddev->gendisk)) 2374 return 0; /* shouldn't register, or already is */ 2375 rdev_for_each(rdev, mddev) { 2376 /* skip spares and non-functional disks */ 2377 if (test_bit(Faulty, &rdev->flags)) 2378 continue; 2379 if (rdev->raid_disk < 0) 2380 continue; 2381 if (!reference) { 2382 /* Use the first rdev as the reference */ 2383 reference = rdev; 2384 continue; 2385 } 2386 /* does this rdev's profile match the reference profile? */ 2387 if (blk_integrity_compare(reference->bdev->bd_disk, 2388 rdev->bdev->bd_disk) < 0) 2389 return -EINVAL; 2390 } 2391 if (!reference || !bdev_get_integrity(reference->bdev)) 2392 return 0; 2393 /* 2394 * All component devices are integrity capable and have matching 2395 * profiles, register the common profile for the md device. 2396 */ 2397 blk_integrity_register(mddev->gendisk, 2398 bdev_get_integrity(reference->bdev)); 2399 2400 pr_debug("md: data integrity enabled on %s\n", mdname(mddev)); 2401 if (bioset_integrity_create(&mddev->bio_set, BIO_POOL_SIZE)) { 2402 pr_err("md: failed to create integrity pool for %s\n", 2403 mdname(mddev)); 2404 return -EINVAL; 2405 } 2406 return 0; 2407} 2408EXPORT_SYMBOL(md_integrity_register); 2409 2410/* 2411 * Attempt to add an rdev, but only if it is consistent with the current 2412 * integrity profile 2413 */ 2414int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev) 2415{ 2416 struct blk_integrity *bi_mddev; 2417 char name[BDEVNAME_SIZE]; 2418 2419 if (!mddev->gendisk) 2420 return 0; 2421 2422 bi_mddev = blk_get_integrity(mddev->gendisk); 2423 2424 if (!bi_mddev) /* nothing to do */ 2425 return 0; 2426 2427 if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) { 2428 pr_err("%s: incompatible integrity profile for %s\n", 2429 mdname(mddev), bdevname(rdev->bdev, name)); 2430 return -ENXIO; 2431 } 2432 2433 return 0; 2434} 2435EXPORT_SYMBOL(md_integrity_add_rdev); 2436 2437static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev) 2438{ 2439 char b[BDEVNAME_SIZE]; 2440 struct kobject *ko; 2441 int err; 2442 2443 /* prevent duplicates */ 2444 if (find_rdev(mddev, rdev->bdev->bd_dev)) 2445 return -EEXIST; 2446 2447 if ((bdev_read_only(rdev->bdev) || bdev_read_only(rdev->meta_bdev)) && 2448 mddev->pers) 2449 return -EROFS; 2450 2451 /* make sure rdev->sectors exceeds mddev->dev_sectors */ 2452 if (!test_bit(Journal, &rdev->flags) && 2453 rdev->sectors && 2454 (mddev->dev_sectors == 0 || rdev->sectors < mddev->dev_sectors)) { 2455 if (mddev->pers) { 2456 /* Cannot change size, so fail 2457 * If mddev->level <= 0, then we don't care 2458 * about aligning sizes (e.g. linear) 2459 */ 2460 if (mddev->level > 0) 2461 return -ENOSPC; 2462 } else 2463 mddev->dev_sectors = rdev->sectors; 2464 } 2465 2466 /* Verify rdev->desc_nr is unique. 2467 * If it is -1, assign a free number, else 2468 * check number is not in use 2469 */ 2470 rcu_read_lock(); 2471 if (rdev->desc_nr < 0) { 2472 int choice = 0; 2473 if (mddev->pers) 2474 choice = mddev->raid_disks; 2475 while (md_find_rdev_nr_rcu(mddev, choice)) 2476 choice++; 2477 rdev->desc_nr = choice; 2478 } else { 2479 if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) { 2480 rcu_read_unlock(); 2481 return -EBUSY; 2482 } 2483 } 2484 rcu_read_unlock(); 2485 if (!test_bit(Journal, &rdev->flags) && 2486 mddev->max_disks && rdev->desc_nr >= mddev->max_disks) { 2487 pr_warn("md: %s: array is limited to %d devices\n", 2488 mdname(mddev), mddev->max_disks); 2489 return -EBUSY; 2490 } 2491 bdevname(rdev->bdev,b); 2492 strreplace(b, '/', '!'); 2493 2494 rdev->mddev = mddev; 2495 pr_debug("md: bind<%s>\n", b); 2496 2497 if (mddev->raid_disks) 2498 mddev_create_serial_pool(mddev, rdev, false); 2499 2500 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b))) 2501 goto fail; 2502 2503 ko = &part_to_dev(rdev->bdev->bd_part)->kobj; 2504 /* failure here is OK */ 2505 err = sysfs_create_link(&rdev->kobj, ko, "block"); 2506 rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state"); 2507 rdev->sysfs_unack_badblocks = 2508 sysfs_get_dirent_safe(rdev->kobj.sd, "unacknowledged_bad_blocks"); 2509 rdev->sysfs_badblocks = 2510 sysfs_get_dirent_safe(rdev->kobj.sd, "bad_blocks"); 2511 2512 list_add_rcu(&rdev->same_set, &mddev->disks); 2513 bd_link_disk_holder(rdev->bdev, mddev->gendisk); 2514 2515 /* May as well allow recovery to be retried once */ 2516 mddev->recovery_disabled++; 2517 2518 return 0; 2519 2520 fail: 2521 pr_warn("md: failed to register dev-%s for %s\n", 2522 b, mdname(mddev)); 2523 mddev_destroy_serial_pool(mddev, rdev, false); 2524 return err; 2525} 2526 2527static void rdev_delayed_delete(struct work_struct *ws) 2528{ 2529 struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work); 2530 kobject_del(&rdev->kobj); 2531 kobject_put(&rdev->kobj); 2532} 2533 2534static void unbind_rdev_from_array(struct md_rdev *rdev) 2535{ 2536 char b[BDEVNAME_SIZE]; 2537 2538 bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk); 2539 list_del_rcu(&rdev->same_set); 2540 pr_debug("md: unbind<%s>\n", bdevname(rdev->bdev,b)); 2541 mddev_destroy_serial_pool(rdev->mddev, rdev, false); 2542 rdev->mddev = NULL; 2543 sysfs_remove_link(&rdev->kobj, "block"); 2544 sysfs_put(rdev->sysfs_state); 2545 sysfs_put(rdev->sysfs_unack_badblocks); 2546 sysfs_put(rdev->sysfs_badblocks); 2547 rdev->sysfs_state = NULL; 2548 rdev->sysfs_unack_badblocks = NULL; 2549 rdev->sysfs_badblocks = NULL; 2550 rdev->badblocks.count = 0; 2551 /* We need to delay this, otherwise we can deadlock when 2552 * writing to 'remove' to "dev/state". We also need 2553 * to delay it due to rcu usage. 2554 */ 2555 synchronize_rcu(); 2556 INIT_WORK(&rdev->del_work, rdev_delayed_delete); 2557 kobject_get(&rdev->kobj); 2558 queue_work(md_rdev_misc_wq, &rdev->del_work); 2559} 2560 2561/* 2562 * prevent the device from being mounted, repartitioned or 2563 * otherwise reused by a RAID array (or any other kernel 2564 * subsystem), by bd_claiming the device. 2565 */ 2566static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared) 2567{ 2568 int err = 0; 2569 struct block_device *bdev; 2570 2571 bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, 2572 shared ? (struct md_rdev *)lock_rdev : rdev); 2573 if (IS_ERR(bdev)) { 2574 pr_warn("md: could not open device unknown-block(%u,%u).\n", 2575 MAJOR(dev), MINOR(dev)); 2576 return PTR_ERR(bdev); 2577 } 2578 rdev->bdev = bdev; 2579 return err; 2580} 2581 2582static void unlock_rdev(struct md_rdev *rdev) 2583{ 2584 struct block_device *bdev = rdev->bdev; 2585 rdev->bdev = NULL; 2586 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 2587} 2588 2589void md_autodetect_dev(dev_t dev); 2590 2591static void export_rdev(struct md_rdev *rdev) 2592{ 2593 char b[BDEVNAME_SIZE]; 2594 2595 pr_debug("md: export_rdev(%s)\n", bdevname(rdev->bdev,b)); 2596 md_rdev_clear(rdev); 2597#ifndef MODULE 2598 if (test_bit(AutoDetected, &rdev->flags)) 2599 md_autodetect_dev(rdev->bdev->bd_dev); 2600#endif 2601 unlock_rdev(rdev); 2602 kobject_put(&rdev->kobj); 2603} 2604 2605void md_kick_rdev_from_array(struct md_rdev *rdev) 2606{ 2607 unbind_rdev_from_array(rdev); 2608 export_rdev(rdev); 2609} 2610EXPORT_SYMBOL_GPL(md_kick_rdev_from_array); 2611 2612static void export_array(struct mddev *mddev) 2613{ 2614 struct md_rdev *rdev; 2615 2616 while (!list_empty(&mddev->disks)) { 2617 rdev = list_first_entry(&mddev->disks, struct md_rdev, 2618 same_set); 2619 md_kick_rdev_from_array(rdev); 2620 } 2621 mddev->raid_disks = 0; 2622 mddev->major_version = 0; 2623} 2624 2625static bool set_in_sync(struct mddev *mddev) 2626{ 2627 lockdep_assert_held(&mddev->lock); 2628 if (!mddev->in_sync) { 2629 mddev->sync_checkers++; 2630 spin_unlock(&mddev->lock); 2631 percpu_ref_switch_to_atomic_sync(&mddev->writes_pending); 2632 spin_lock(&mddev->lock); 2633 if (!mddev->in_sync && 2634 percpu_ref_is_zero(&mddev->writes_pending)) { 2635 mddev->in_sync = 1; 2636 /* 2637 * Ensure ->in_sync is visible before we clear 2638 * ->sync_checkers. 2639 */ 2640 smp_mb(); 2641 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 2642 sysfs_notify_dirent_safe(mddev->sysfs_state); 2643 } 2644 if (--mddev->sync_checkers == 0) 2645 percpu_ref_switch_to_percpu(&mddev->writes_pending); 2646 } 2647 if (mddev->safemode == 1) 2648 mddev->safemode = 0; 2649 return mddev->in_sync; 2650} 2651 2652static void sync_sbs(struct mddev *mddev, int nospares) 2653{ 2654 /* Update each superblock (in-memory image), but 2655 * if we are allowed to, skip spares which already 2656 * have the right event counter, or have one earlier 2657 * (which would mean they aren't being marked as dirty 2658 * with the rest of the array) 2659 */ 2660 struct md_rdev *rdev; 2661 rdev_for_each(rdev, mddev) { 2662 if (rdev->sb_events == mddev->events || 2663 (nospares && 2664 rdev->raid_disk < 0 && 2665 rdev->sb_events+1 == mddev->events)) { 2666 /* Don't update this superblock */ 2667 rdev->sb_loaded = 2; 2668 } else { 2669 sync_super(mddev, rdev); 2670 rdev->sb_loaded = 1; 2671 } 2672 } 2673} 2674 2675static bool does_sb_need_changing(struct mddev *mddev) 2676{ 2677 struct md_rdev *rdev = NULL, *iter; 2678 struct mdp_superblock_1 *sb; 2679 int role; 2680 2681 /* Find a good rdev */ 2682 rdev_for_each(iter, mddev) 2683 if ((iter->raid_disk >= 0) && !test_bit(Faulty, &iter->flags)) { 2684 rdev = iter; 2685 break; 2686 } 2687 2688 /* No good device found. */ 2689 if (!rdev) 2690 return false; 2691 2692 sb = page_address(rdev->sb_page); 2693 /* Check if a device has become faulty or a spare become active */ 2694 rdev_for_each(rdev, mddev) { 2695 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); 2696 /* Device activated? */ 2697 if (role == 0xffff && rdev->raid_disk >=0 && 2698 !test_bit(Faulty, &rdev->flags)) 2699 return true; 2700 /* Device turned faulty? */ 2701 if (test_bit(Faulty, &rdev->flags) && (role < 0xfffd)) 2702 return true; 2703 } 2704 2705 /* Check if any mddev parameters have changed */ 2706 if ((mddev->dev_sectors != le64_to_cpu(sb->size)) || 2707 (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) || 2708 (mddev->layout != le32_to_cpu(sb->layout)) || 2709 (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) || 2710 (mddev->chunk_sectors != le32_to_cpu(sb->chunksize))) 2711 return true; 2712 2713 return false; 2714} 2715 2716void md_update_sb(struct mddev *mddev, int force_change) 2717{ 2718 struct md_rdev *rdev; 2719 int sync_req; 2720 int nospares = 0; 2721 int any_badblocks_changed = 0; 2722 int ret = -1; 2723 2724 if (mddev->ro) { 2725 if (force_change) 2726 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 2727 return; 2728 } 2729 2730repeat: 2731 if (mddev_is_clustered(mddev)) { 2732 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) 2733 force_change = 1; 2734 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags)) 2735 nospares = 1; 2736 ret = md_cluster_ops->metadata_update_start(mddev); 2737 /* Has someone else has updated the sb */ 2738 if (!does_sb_need_changing(mddev)) { 2739 if (ret == 0) 2740 md_cluster_ops->metadata_update_cancel(mddev); 2741 bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING), 2742 BIT(MD_SB_CHANGE_DEVS) | 2743 BIT(MD_SB_CHANGE_CLEAN)); 2744 return; 2745 } 2746 } 2747 2748 /* 2749 * First make sure individual recovery_offsets are correct 2750 * curr_resync_completed can only be used during recovery. 2751 * During reshape/resync it might use array-addresses rather 2752 * that device addresses. 2753 */ 2754 rdev_for_each(rdev, mddev) { 2755 if (rdev->raid_disk >= 0 && 2756 mddev->delta_disks >= 0 && 2757 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && 2758 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery) && 2759 !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 2760 !test_bit(Journal, &rdev->flags) && 2761 !test_bit(In_sync, &rdev->flags) && 2762 mddev->curr_resync_completed > rdev->recovery_offset) 2763 rdev->recovery_offset = mddev->curr_resync_completed; 2764 2765 } 2766 if (!mddev->persistent) { 2767 clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 2768 clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 2769 if (!mddev->external) { 2770 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); 2771 rdev_for_each(rdev, mddev) { 2772 if (rdev->badblocks.changed) { 2773 rdev->badblocks.changed = 0; 2774 ack_all_badblocks(&rdev->badblocks); 2775 md_error(mddev, rdev); 2776 } 2777 clear_bit(Blocked, &rdev->flags); 2778 clear_bit(BlockedBadBlocks, &rdev->flags); 2779 wake_up(&rdev->blocked_wait); 2780 } 2781 } 2782 wake_up(&mddev->sb_wait); 2783 return; 2784 } 2785 2786 spin_lock(&mddev->lock); 2787 2788 mddev->utime = ktime_get_real_seconds(); 2789 2790 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) 2791 force_change = 1; 2792 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags)) 2793 /* just a clean<-> dirty transition, possibly leave spares alone, 2794 * though if events isn't the right even/odd, we will have to do 2795 * spares after all 2796 */ 2797 nospares = 1; 2798 if (force_change) 2799 nospares = 0; 2800 if (mddev->degraded) 2801 /* If the array is degraded, then skipping spares is both 2802 * dangerous and fairly pointless. 2803 * Dangerous because a device that was removed from the array 2804 * might have a event_count that still looks up-to-date, 2805 * so it can be re-added without a resync. 2806 * Pointless because if there are any spares to skip, 2807 * then a recovery will happen and soon that array won't 2808 * be degraded any more and the spare can go back to sleep then. 2809 */ 2810 nospares = 0; 2811 2812 sync_req = mddev->in_sync; 2813 2814 /* If this is just a dirty<->clean transition, and the array is clean 2815 * and 'events' is odd, we can roll back to the previous clean state */ 2816 if (nospares 2817 && (mddev->in_sync && mddev->recovery_cp == MaxSector) 2818 && mddev->can_decrease_events 2819 && mddev->events != 1) { 2820 mddev->events--; 2821 mddev->can_decrease_events = 0; 2822 } else { 2823 /* otherwise we have to go forward and ... */ 2824 mddev->events ++; 2825 mddev->can_decrease_events = nospares; 2826 } 2827 2828 /* 2829 * This 64-bit counter should never wrap. 2830 * Either we are in around ~1 trillion A.C., assuming 2831 * 1 reboot per second, or we have a bug... 2832 */ 2833 WARN_ON(mddev->events == 0); 2834 2835 rdev_for_each(rdev, mddev) { 2836 if (rdev->badblocks.changed) 2837 any_badblocks_changed++; 2838 if (test_bit(Faulty, &rdev->flags)) 2839 set_bit(FaultRecorded, &rdev->flags); 2840 } 2841 2842 sync_sbs(mddev, nospares); 2843 spin_unlock(&mddev->lock); 2844 2845 pr_debug("md: updating %s RAID superblock on device (in sync %d)\n", 2846 mdname(mddev), mddev->in_sync); 2847 2848 if (mddev->queue) 2849 blk_add_trace_msg(mddev->queue, "md md_update_sb"); 2850rewrite: 2851 md_bitmap_update_sb(mddev->bitmap); 2852 rdev_for_each(rdev, mddev) { 2853 char b[BDEVNAME_SIZE]; 2854 2855 if (rdev->sb_loaded != 1) 2856 continue; /* no noise on spare devices */ 2857 2858 if (!test_bit(Faulty, &rdev->flags)) { 2859 md_super_write(mddev,rdev, 2860 rdev->sb_start, rdev->sb_size, 2861 rdev->sb_page); 2862 pr_debug("md: (write) %s's sb offset: %llu\n", 2863 bdevname(rdev->bdev, b), 2864 (unsigned long long)rdev->sb_start); 2865 rdev->sb_events = mddev->events; 2866 if (rdev->badblocks.size) { 2867 md_super_write(mddev, rdev, 2868 rdev->badblocks.sector, 2869 rdev->badblocks.size << 9, 2870 rdev->bb_page); 2871 rdev->badblocks.size = 0; 2872 } 2873 2874 } else 2875 pr_debug("md: %s (skipping faulty)\n", 2876 bdevname(rdev->bdev, b)); 2877 2878 if (mddev->level == LEVEL_MULTIPATH) 2879 /* only need to write one superblock... */ 2880 break; 2881 } 2882 if (md_super_wait(mddev) < 0) 2883 goto rewrite; 2884 /* if there was a failure, MD_SB_CHANGE_DEVS was set, and we re-write super */ 2885 2886 if (mddev_is_clustered(mddev) && ret == 0) 2887 md_cluster_ops->metadata_update_finish(mddev); 2888 2889 if (mddev->in_sync != sync_req || 2890 !bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING), 2891 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_CLEAN))) 2892 /* have to write it out again */ 2893 goto repeat; 2894 wake_up(&mddev->sb_wait); 2895 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 2896 sysfs_notify_dirent_safe(mddev->sysfs_completed); 2897 2898 rdev_for_each(rdev, mddev) { 2899 if (test_and_clear_bit(FaultRecorded, &rdev->flags)) 2900 clear_bit(Blocked, &rdev->flags); 2901 2902 if (any_badblocks_changed) 2903 ack_all_badblocks(&rdev->badblocks); 2904 clear_bit(BlockedBadBlocks, &rdev->flags); 2905 wake_up(&rdev->blocked_wait); 2906 } 2907} 2908EXPORT_SYMBOL(md_update_sb); 2909 2910static int add_bound_rdev(struct md_rdev *rdev) 2911{ 2912 struct mddev *mddev = rdev->mddev; 2913 int err = 0; 2914 bool add_journal = test_bit(Journal, &rdev->flags); 2915 2916 if (!mddev->pers->hot_remove_disk || add_journal) { 2917 /* If there is hot_add_disk but no hot_remove_disk 2918 * then added disks for geometry changes, 2919 * and should be added immediately. 2920 */ 2921 super_types[mddev->major_version]. 2922 validate_super(mddev, NULL/*freshest*/, rdev); 2923 if (add_journal) 2924 mddev_suspend(mddev); 2925 err = mddev->pers->hot_add_disk(mddev, rdev); 2926 if (add_journal) 2927 mddev_resume(mddev); 2928 if (err) { 2929 md_kick_rdev_from_array(rdev); 2930 return err; 2931 } 2932 } 2933 sysfs_notify_dirent_safe(rdev->sysfs_state); 2934 2935 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 2936 if (mddev->degraded) 2937 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 2938 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2939 md_new_event(mddev); 2940 md_wakeup_thread(mddev->thread); 2941 return 0; 2942} 2943 2944/* words written to sysfs files may, or may not, be \n terminated. 2945 * We want to accept with case. For this we use cmd_match. 2946 */ 2947static int cmd_match(const char *cmd, const char *str) 2948{ 2949 /* See if cmd, written into a sysfs file, matches 2950 * str. They must either be the same, or cmd can 2951 * have a trailing newline 2952 */ 2953 while (*cmd && *str && *cmd == *str) { 2954 cmd++; 2955 str++; 2956 } 2957 if (*cmd == '\n') 2958 cmd++; 2959 if (*str || *cmd) 2960 return 0; 2961 return 1; 2962} 2963 2964struct rdev_sysfs_entry { 2965 struct attribute attr; 2966 ssize_t (*show)(struct md_rdev *, char *); 2967 ssize_t (*store)(struct md_rdev *, const char *, size_t); 2968}; 2969 2970static ssize_t 2971state_show(struct md_rdev *rdev, char *page) 2972{ 2973 char *sep = ","; 2974 size_t len = 0; 2975 unsigned long flags = READ_ONCE(rdev->flags); 2976 2977 if (test_bit(Faulty, &flags) || 2978 (!test_bit(ExternalBbl, &flags) && 2979 rdev->badblocks.unacked_exist)) 2980 len += sprintf(page+len, "faulty%s", sep); 2981 if (test_bit(In_sync, &flags)) 2982 len += sprintf(page+len, "in_sync%s", sep); 2983 if (test_bit(Journal, &flags)) 2984 len += sprintf(page+len, "journal%s", sep); 2985 if (test_bit(WriteMostly, &flags)) 2986 len += sprintf(page+len, "write_mostly%s", sep); 2987 if (test_bit(Blocked, &flags) || 2988 (rdev->badblocks.unacked_exist 2989 && !test_bit(Faulty, &flags))) 2990 len += sprintf(page+len, "blocked%s", sep); 2991 if (!test_bit(Faulty, &flags) && 2992 !test_bit(Journal, &flags) && 2993 !test_bit(In_sync, &flags)) 2994 len += sprintf(page+len, "spare%s", sep); 2995 if (test_bit(WriteErrorSeen, &flags)) 2996 len += sprintf(page+len, "write_error%s", sep); 2997 if (test_bit(WantReplacement, &flags)) 2998 len += sprintf(page+len, "want_replacement%s", sep); 2999 if (test_bit(Replacement, &flags)) 3000 len += sprintf(page+len, "replacement%s", sep); 3001 if (test_bit(ExternalBbl, &flags)) 3002 len += sprintf(page+len, "external_bbl%s", sep); 3003 if (test_bit(FailFast, &flags)) 3004 len += sprintf(page+len, "failfast%s", sep); 3005 3006 if (len) 3007 len -= strlen(sep); 3008 3009 return len+sprintf(page+len, "\n"); 3010} 3011 3012static ssize_t 3013state_store(struct md_rdev *rdev, const char *buf, size_t len) 3014{ 3015 /* can write 3016 * faulty - simulates an error 3017 * remove - disconnects the device 3018 * writemostly - sets write_mostly 3019 * -writemostly - clears write_mostly 3020 * blocked - sets the Blocked flags 3021 * -blocked - clears the Blocked and possibly simulates an error 3022 * insync - sets Insync providing device isn't active 3023 * -insync - clear Insync for a device with a slot assigned, 3024 * so that it gets rebuilt based on bitmap 3025 * write_error - sets WriteErrorSeen 3026 * -write_error - clears WriteErrorSeen 3027 * {,-}failfast - set/clear FailFast 3028 */ 3029 3030 struct mddev *mddev = rdev->mddev; 3031 int err = -EINVAL; 3032 bool need_update_sb = false; 3033 3034 if (cmd_match(buf, "faulty") && rdev->mddev->pers) { 3035 md_error(rdev->mddev, rdev); 3036 if (test_bit(Faulty, &rdev->flags)) 3037 err = 0; 3038 else 3039 err = -EBUSY; 3040 } else if (cmd_match(buf, "remove")) { 3041 if (rdev->mddev->pers) { 3042 clear_bit(Blocked, &rdev->flags); 3043 remove_and_add_spares(rdev->mddev, rdev); 3044 } 3045 if (rdev->raid_disk >= 0) 3046 err = -EBUSY; 3047 else { 3048 err = 0; 3049 if (mddev_is_clustered(mddev)) 3050 err = md_cluster_ops->remove_disk(mddev, rdev); 3051 3052 if (err == 0) { 3053 md_kick_rdev_from_array(rdev); 3054 if (mddev->pers) { 3055 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 3056 md_wakeup_thread(mddev->thread); 3057 } 3058 md_new_event(mddev); 3059 } 3060 } 3061 } else if (cmd_match(buf, "writemostly")) { 3062 set_bit(WriteMostly, &rdev->flags); 3063 mddev_create_serial_pool(rdev->mddev, rdev, false); 3064 need_update_sb = true; 3065 err = 0; 3066 } else if (cmd_match(buf, "-writemostly")) { 3067 mddev_destroy_serial_pool(rdev->mddev, rdev, false); 3068 clear_bit(WriteMostly, &rdev->flags); 3069 need_update_sb = true; 3070 err = 0; 3071 } else if (cmd_match(buf, "blocked")) { 3072 set_bit(Blocked, &rdev->flags); 3073 err = 0; 3074 } else if (cmd_match(buf, "-blocked")) { 3075 if (!test_bit(Faulty, &rdev->flags) && 3076 !test_bit(ExternalBbl, &rdev->flags) && 3077 rdev->badblocks.unacked_exist) { 3078 /* metadata handler doesn't understand badblocks, 3079 * so we need to fail the device 3080 */ 3081 md_error(rdev->mddev, rdev); 3082 } 3083 clear_bit(Blocked, &rdev->flags); 3084 clear_bit(BlockedBadBlocks, &rdev->flags); 3085 wake_up(&rdev->blocked_wait); 3086 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 3087 md_wakeup_thread(rdev->mddev->thread); 3088 3089 err = 0; 3090 } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) { 3091 set_bit(In_sync, &rdev->flags); 3092 err = 0; 3093 } else if (cmd_match(buf, "failfast")) { 3094 set_bit(FailFast, &rdev->flags); 3095 need_update_sb = true; 3096 err = 0; 3097 } else if (cmd_match(buf, "-failfast")) { 3098 clear_bit(FailFast, &rdev->flags); 3099 need_update_sb = true; 3100 err = 0; 3101 } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 && 3102 !test_bit(Journal, &rdev->flags)) { 3103 if (rdev->mddev->pers == NULL) { 3104 clear_bit(In_sync, &rdev->flags); 3105 rdev->saved_raid_disk = rdev->raid_disk; 3106 rdev->raid_disk = -1; 3107 err = 0; 3108 } 3109 } else if (cmd_match(buf, "write_error")) { 3110 set_bit(WriteErrorSeen, &rdev->flags); 3111 err = 0; 3112 } else if (cmd_match(buf, "-write_error")) { 3113 clear_bit(WriteErrorSeen, &rdev->flags); 3114 err = 0; 3115 } else if (cmd_match(buf, "want_replacement")) { 3116 /* Any non-spare device that is not a replacement can 3117 * become want_replacement at any time, but we then need to 3118 * check if recovery is needed. 3119 */ 3120 if (rdev->raid_disk >= 0 && 3121 !test_bit(Journal, &rdev->flags) && 3122 !test_bit(Replacement, &rdev->flags)) 3123 set_bit(WantReplacement, &rdev->flags); 3124 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 3125 md_wakeup_thread(rdev->mddev->thread); 3126 err = 0; 3127 } else if (cmd_match(buf, "-want_replacement")) { 3128 /* Clearing 'want_replacement' is always allowed. 3129 * Once replacements starts it is too late though. 3130 */ 3131 err = 0; 3132 clear_bit(WantReplacement, &rdev->flags); 3133 } else if (cmd_match(buf, "replacement")) { 3134 /* Can only set a device as a replacement when array has not 3135 * yet been started. Once running, replacement is automatic 3136 * from spares, or by assigning 'slot'. 3137 */ 3138 if (rdev->mddev->pers) 3139 err = -EBUSY; 3140 else { 3141 set_bit(Replacement, &rdev->flags); 3142 err = 0; 3143 } 3144 } else if (cmd_match(buf, "-replacement")) { 3145 /* Similarly, can only clear Replacement before start */ 3146 if (rdev->mddev->pers) 3147 err = -EBUSY; 3148 else { 3149 clear_bit(Replacement, &rdev->flags); 3150 err = 0; 3151 } 3152 } else if (cmd_match(buf, "re-add")) { 3153 if (!rdev->mddev->pers) 3154 err = -EINVAL; 3155 else if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) && 3156 rdev->saved_raid_disk >= 0) { 3157 /* clear_bit is performed _after_ all the devices 3158 * have their local Faulty bit cleared. If any writes 3159 * happen in the meantime in the local node, they 3160 * will land in the local bitmap, which will be synced 3161 * by this node eventually 3162 */ 3163 if (!mddev_is_clustered(rdev->mddev) || 3164 (err = md_cluster_ops->gather_bitmaps(rdev)) == 0) { 3165 clear_bit(Faulty, &rdev->flags); 3166 err = add_bound_rdev(rdev); 3167 } 3168 } else 3169 err = -EBUSY; 3170 } else if (cmd_match(buf, "external_bbl") && (rdev->mddev->external)) { 3171 set_bit(ExternalBbl, &rdev->flags); 3172 rdev->badblocks.shift = 0; 3173 err = 0; 3174 } else if (cmd_match(buf, "-external_bbl") && (rdev->mddev->external)) { 3175 clear_bit(ExternalBbl, &rdev->flags); 3176 err = 0; 3177 } 3178 if (need_update_sb) 3179 md_update_sb(mddev, 1); 3180 if (!err) 3181 sysfs_notify_dirent_safe(rdev->sysfs_state); 3182 return err ? err : len; 3183} 3184static struct rdev_sysfs_entry rdev_state = 3185__ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store); 3186 3187static ssize_t 3188errors_show(struct md_rdev *rdev, char *page) 3189{ 3190 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors)); 3191} 3192 3193static ssize_t 3194errors_store(struct md_rdev *rdev, const char *buf, size_t len) 3195{ 3196 unsigned int n; 3197 int rv; 3198 3199 rv = kstrtouint(buf, 10, &n); 3200 if (rv < 0) 3201 return rv; 3202 atomic_set(&rdev->corrected_errors, n); 3203 return len; 3204} 3205static struct rdev_sysfs_entry rdev_errors = 3206__ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store); 3207 3208static ssize_t 3209slot_show(struct md_rdev *rdev, char *page) 3210{ 3211 if (test_bit(Journal, &rdev->flags)) 3212 return sprintf(page, "journal\n"); 3213 else if (rdev->raid_disk < 0) 3214 return sprintf(page, "none\n"); 3215 else 3216 return sprintf(page, "%d\n", rdev->raid_disk); 3217} 3218 3219static ssize_t 3220slot_store(struct md_rdev *rdev, const char *buf, size_t len) 3221{ 3222 int slot; 3223 int err; 3224 3225 if (test_bit(Journal, &rdev->flags)) 3226 return -EBUSY; 3227 if (strncmp(buf, "none", 4)==0) 3228 slot = -1; 3229 else { 3230 err = kstrtouint(buf, 10, (unsigned int *)&slot); 3231 if (err < 0) 3232 return err; 3233 if (slot < 0) 3234 /* overflow */ 3235 return -ENOSPC; 3236 } 3237 if (rdev->mddev->pers && slot == -1) { 3238 /* Setting 'slot' on an active array requires also 3239 * updating the 'rd%d' link, and communicating 3240 * with the personality with ->hot_*_disk. 3241 * For now we only support removing 3242 * failed/spare devices. This normally happens automatically, 3243 * but not when the metadata is externally managed. 3244 */ 3245 if (rdev->raid_disk == -1) 3246 return -EEXIST; 3247 /* personality does all needed checks */ 3248 if (rdev->mddev->pers->hot_remove_disk == NULL) 3249 return -EINVAL; 3250 clear_bit(Blocked, &rdev->flags); 3251 remove_and_add_spares(rdev->mddev, rdev); 3252 if (rdev->raid_disk >= 0) 3253 return -EBUSY; 3254 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 3255 md_wakeup_thread(rdev->mddev->thread); 3256 } else if (rdev->mddev->pers) { 3257 /* Activating a spare .. or possibly reactivating 3258 * if we ever get bitmaps working here. 3259 */ 3260 int err; 3261 3262 if (rdev->raid_disk != -1) 3263 return -EBUSY; 3264 3265 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery)) 3266 return -EBUSY; 3267 3268 if (rdev->mddev->pers->hot_add_disk == NULL) 3269 return -EINVAL; 3270 3271 if (slot >= rdev->mddev->raid_disks && 3272 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) 3273 return -ENOSPC; 3274 3275 rdev->raid_disk = slot; 3276 if (test_bit(In_sync, &rdev->flags)) 3277 rdev->saved_raid_disk = slot; 3278 else 3279 rdev->saved_raid_disk = -1; 3280 clear_bit(In_sync, &rdev->flags); 3281 clear_bit(Bitmap_sync, &rdev->flags); 3282 err = rdev->mddev->pers->hot_add_disk(rdev->mddev, rdev); 3283 if (err) { 3284 rdev->raid_disk = -1; 3285 return err; 3286 } else 3287 sysfs_notify_dirent_safe(rdev->sysfs_state); 3288 /* failure here is OK */; 3289 sysfs_link_rdev(rdev->mddev, rdev); 3290 /* don't wakeup anyone, leave that to userspace. */ 3291 } else { 3292 if (slot >= rdev->mddev->raid_disks && 3293 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) 3294 return -ENOSPC; 3295 rdev->raid_disk = slot; 3296 /* assume it is working */ 3297 clear_bit(Faulty, &rdev->flags); 3298 clear_bit(WriteMostly, &rdev->flags); 3299 set_bit(In_sync, &rdev->flags); 3300 sysfs_notify_dirent_safe(rdev->sysfs_state); 3301 } 3302 return len; 3303} 3304 3305static struct rdev_sysfs_entry rdev_slot = 3306__ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store); 3307 3308static ssize_t 3309offset_show(struct md_rdev *rdev, char *page) 3310{ 3311 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset); 3312} 3313 3314static ssize_t 3315offset_store(struct md_rdev *rdev, const char *buf, size_t len) 3316{ 3317 unsigned long long offset; 3318 if (kstrtoull(buf, 10, &offset) < 0) 3319 return -EINVAL; 3320 if (rdev->mddev->pers && rdev->raid_disk >= 0) 3321 return -EBUSY; 3322 if (rdev->sectors && rdev->mddev->external) 3323 /* Must set offset before size, so overlap checks 3324 * can be sane */ 3325 return -EBUSY; 3326 rdev->data_offset = offset; 3327 rdev->new_data_offset = offset; 3328 return len; 3329} 3330 3331static struct rdev_sysfs_entry rdev_offset = 3332__ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store); 3333 3334static ssize_t new_offset_show(struct md_rdev *rdev, char *page) 3335{ 3336 return sprintf(page, "%llu\n", 3337 (unsigned long long)rdev->new_data_offset); 3338} 3339 3340static ssize_t new_offset_store(struct md_rdev *rdev, 3341 const char *buf, size_t len) 3342{ 3343 unsigned long long new_offset; 3344 struct mddev *mddev = rdev->mddev; 3345 3346 if (kstrtoull(buf, 10, &new_offset) < 0) 3347 return -EINVAL; 3348 3349 if (mddev->sync_thread || 3350 test_bit(MD_RECOVERY_RUNNING,&mddev->recovery)) 3351 return -EBUSY; 3352 if (new_offset == rdev->data_offset) 3353 /* reset is always permitted */ 3354 ; 3355 else if (new_offset > rdev->data_offset) { 3356 /* must not push array size beyond rdev_sectors */ 3357 if (new_offset - rdev->data_offset 3358 + mddev->dev_sectors > rdev->sectors) 3359 return -E2BIG; 3360 } 3361 /* Metadata worries about other space details. */ 3362 3363 /* decreasing the offset is inconsistent with a backwards 3364 * reshape. 3365 */ 3366 if (new_offset < rdev->data_offset && 3367 mddev->reshape_backwards) 3368 return -EINVAL; 3369 /* Increasing offset is inconsistent with forwards 3370 * reshape. reshape_direction should be set to 3371 * 'backwards' first. 3372 */ 3373 if (new_offset > rdev->data_offset && 3374 !mddev->reshape_backwards) 3375 return -EINVAL; 3376 3377 if (mddev->pers && mddev->persistent && 3378 !super_types[mddev->major_version] 3379 .allow_new_offset(rdev, new_offset)) 3380 return -E2BIG; 3381 rdev->new_data_offset = new_offset; 3382 if (new_offset > rdev->data_offset) 3383 mddev->reshape_backwards = 1; 3384 else if (new_offset < rdev->data_offset) 3385 mddev->reshape_backwards = 0; 3386 3387 return len; 3388} 3389static struct rdev_sysfs_entry rdev_new_offset = 3390__ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store); 3391 3392static ssize_t 3393rdev_size_show(struct md_rdev *rdev, char *page) 3394{ 3395 return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2); 3396} 3397 3398static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2) 3399{ 3400 /* check if two start/length pairs overlap */ 3401 if (s1+l1 <= s2) 3402 return 0; 3403 if (s2+l2 <= s1) 3404 return 0; 3405 return 1; 3406} 3407 3408static int strict_blocks_to_sectors(const char *buf, sector_t *sectors) 3409{ 3410 unsigned long long blocks; 3411 sector_t new; 3412 3413 if (kstrtoull(buf, 10, &blocks) < 0) 3414 return -EINVAL; 3415 3416 if (blocks & 1ULL << (8 * sizeof(blocks) - 1)) 3417 return -EINVAL; /* sector conversion overflow */ 3418 3419 new = blocks * 2; 3420 if (new != blocks * 2) 3421 return -EINVAL; /* unsigned long long to sector_t overflow */ 3422 3423 *sectors = new; 3424 return 0; 3425} 3426 3427static ssize_t 3428rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len) 3429{ 3430 struct mddev *my_mddev = rdev->mddev; 3431 sector_t oldsectors = rdev->sectors; 3432 sector_t sectors; 3433 3434 if (test_bit(Journal, &rdev->flags)) 3435 return -EBUSY; 3436 if (strict_blocks_to_sectors(buf, §ors) < 0) 3437 return -EINVAL; 3438 if (rdev->data_offset != rdev->new_data_offset) 3439 return -EINVAL; /* too confusing */ 3440 if (my_mddev->pers && rdev->raid_disk >= 0) { 3441 if (my_mddev->persistent) { 3442 sectors = super_types[my_mddev->major_version]. 3443 rdev_size_change(rdev, sectors); 3444 if (!sectors) 3445 return -EBUSY; 3446 } else if (!sectors) 3447 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) - 3448 rdev->data_offset; 3449 if (!my_mddev->pers->resize) 3450 /* Cannot change size for RAID0 or Linear etc */ 3451 return -EINVAL; 3452 } 3453 if (sectors < my_mddev->dev_sectors) 3454 return -EINVAL; /* component must fit device */ 3455 3456 rdev->sectors = sectors; 3457 if (sectors > oldsectors && my_mddev->external) { 3458 /* Need to check that all other rdevs with the same 3459 * ->bdev do not overlap. 'rcu' is sufficient to walk 3460 * the rdev lists safely. 3461 * This check does not provide a hard guarantee, it 3462 * just helps avoid dangerous mistakes. 3463 */ 3464 struct mddev *mddev; 3465 int overlap = 0; 3466 struct list_head *tmp; 3467 3468 rcu_read_lock(); 3469 for_each_mddev(mddev, tmp) { 3470 struct md_rdev *rdev2; 3471 3472 rdev_for_each(rdev2, mddev) 3473 if (rdev->bdev == rdev2->bdev && 3474 rdev != rdev2 && 3475 overlaps(rdev->data_offset, rdev->sectors, 3476 rdev2->data_offset, 3477 rdev2->sectors)) { 3478 overlap = 1; 3479 break; 3480 } 3481 if (overlap) { 3482 mddev_put(mddev); 3483 break; 3484 } 3485 } 3486 rcu_read_unlock(); 3487 if (overlap) { 3488 /* Someone else could have slipped in a size 3489 * change here, but doing so is just silly. 3490 * We put oldsectors back because we *know* it is 3491 * safe, and trust userspace not to race with 3492 * itself 3493 */ 3494 rdev->sectors = oldsectors; 3495 return -EBUSY; 3496 } 3497 } 3498 return len; 3499} 3500 3501static struct rdev_sysfs_entry rdev_size = 3502__ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store); 3503 3504static ssize_t recovery_start_show(struct md_rdev *rdev, char *page) 3505{ 3506 unsigned long long recovery_start = rdev->recovery_offset; 3507 3508 if (test_bit(In_sync, &rdev->flags) || 3509 recovery_start == MaxSector) 3510 return sprintf(page, "none\n"); 3511 3512 return sprintf(page, "%llu\n", recovery_start); 3513} 3514 3515static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len) 3516{ 3517 unsigned long long recovery_start; 3518 3519 if (cmd_match(buf, "none")) 3520 recovery_start = MaxSector; 3521 else if (kstrtoull(buf, 10, &recovery_start)) 3522 return -EINVAL; 3523 3524 if (rdev->mddev->pers && 3525 rdev->raid_disk >= 0) 3526 return -EBUSY; 3527 3528 rdev->recovery_offset = recovery_start; 3529 if (recovery_start == MaxSector) 3530 set_bit(In_sync, &rdev->flags); 3531 else 3532 clear_bit(In_sync, &rdev->flags); 3533 return len; 3534} 3535 3536static struct rdev_sysfs_entry rdev_recovery_start = 3537__ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store); 3538 3539/* sysfs access to bad-blocks list. 3540 * We present two files. 3541 * 'bad-blocks' lists sector numbers and lengths of ranges that 3542 * are recorded as bad. The list is truncated to fit within 3543 * the one-page limit of sysfs. 3544 * Writing "sector length" to this file adds an acknowledged 3545 * bad block list. 3546 * 'unacknowledged-bad-blocks' lists bad blocks that have not yet 3547 * been acknowledged. Writing to this file adds bad blocks 3548 * without acknowledging them. This is largely for testing. 3549 */ 3550static ssize_t bb_show(struct md_rdev *rdev, char *page) 3551{ 3552 return badblocks_show(&rdev->badblocks, page, 0); 3553} 3554static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len) 3555{ 3556 int rv = badblocks_store(&rdev->badblocks, page, len, 0); 3557 /* Maybe that ack was all we needed */ 3558 if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags)) 3559 wake_up(&rdev->blocked_wait); 3560 return rv; 3561} 3562static struct rdev_sysfs_entry rdev_bad_blocks = 3563__ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store); 3564 3565static ssize_t ubb_show(struct md_rdev *rdev, char *page) 3566{ 3567 return badblocks_show(&rdev->badblocks, page, 1); 3568} 3569static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len) 3570{ 3571 return badblocks_store(&rdev->badblocks, page, len, 1); 3572} 3573static struct rdev_sysfs_entry rdev_unack_bad_blocks = 3574__ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store); 3575 3576static ssize_t 3577ppl_sector_show(struct md_rdev *rdev, char *page) 3578{ 3579 return sprintf(page, "%llu\n", (unsigned long long)rdev->ppl.sector); 3580} 3581 3582static ssize_t 3583ppl_sector_store(struct md_rdev *rdev, const char *buf, size_t len) 3584{ 3585 unsigned long long sector; 3586 3587 if (kstrtoull(buf, 10, §or) < 0) 3588 return -EINVAL; 3589 if (sector != (sector_t)sector) 3590 return -EINVAL; 3591 3592 if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) && 3593 rdev->raid_disk >= 0) 3594 return -EBUSY; 3595 3596 if (rdev->mddev->persistent) { 3597 if (rdev->mddev->major_version == 0) 3598 return -EINVAL; 3599 if ((sector > rdev->sb_start && 3600 sector - rdev->sb_start > S16_MAX) || 3601 (sector < rdev->sb_start && 3602 rdev->sb_start - sector > -S16_MIN)) 3603 return -EINVAL; 3604 rdev->ppl.offset = sector - rdev->sb_start; 3605 } else if (!rdev->mddev->external) { 3606 return -EBUSY; 3607 } 3608 rdev->ppl.sector = sector; 3609 return len; 3610} 3611 3612static struct rdev_sysfs_entry rdev_ppl_sector = 3613__ATTR(ppl_sector, S_IRUGO|S_IWUSR, ppl_sector_show, ppl_sector_store); 3614 3615static ssize_t 3616ppl_size_show(struct md_rdev *rdev, char *page) 3617{ 3618 return sprintf(page, "%u\n", rdev->ppl.size); 3619} 3620 3621static ssize_t 3622ppl_size_store(struct md_rdev *rdev, const char *buf, size_t len) 3623{ 3624 unsigned int size; 3625 3626 if (kstrtouint(buf, 10, &size) < 0) 3627 return -EINVAL; 3628 3629 if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) && 3630 rdev->raid_disk >= 0) 3631 return -EBUSY; 3632 3633 if (rdev->mddev->persistent) { 3634 if (rdev->mddev->major_version == 0) 3635 return -EINVAL; 3636 if (size > U16_MAX) 3637 return -EINVAL; 3638 } else if (!rdev->mddev->external) { 3639 return -EBUSY; 3640 } 3641 rdev->ppl.size = size; 3642 return len; 3643} 3644 3645static struct rdev_sysfs_entry rdev_ppl_size = 3646__ATTR(ppl_size, S_IRUGO|S_IWUSR, ppl_size_show, ppl_size_store); 3647 3648static struct attribute *rdev_default_attrs[] = { 3649 &rdev_state.attr, 3650 &rdev_errors.attr, 3651 &rdev_slot.attr, 3652 &rdev_offset.attr, 3653 &rdev_new_offset.attr, 3654 &rdev_size.attr, 3655 &rdev_recovery_start.attr, 3656 &rdev_bad_blocks.attr, 3657 &rdev_unack_bad_blocks.attr, 3658 &rdev_ppl_sector.attr, 3659 &rdev_ppl_size.attr, 3660 NULL, 3661}; 3662static ssize_t 3663rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 3664{ 3665 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 3666 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj); 3667 3668 if (!entry->show) 3669 return -EIO; 3670 if (!rdev->mddev) 3671 return -ENODEV; 3672 return entry->show(rdev, page); 3673} 3674 3675static ssize_t 3676rdev_attr_store(struct kobject *kobj, struct attribute *attr, 3677 const char *page, size_t length) 3678{ 3679 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 3680 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj); 3681 ssize_t rv; 3682 struct mddev *mddev = rdev->mddev; 3683 3684 if (!entry->store) 3685 return -EIO; 3686 if (!capable(CAP_SYS_ADMIN)) 3687 return -EACCES; 3688 rv = mddev ? mddev_lock(mddev) : -ENODEV; 3689 if (!rv) { 3690 if (rdev->mddev == NULL) 3691 rv = -ENODEV; 3692 else 3693 rv = entry->store(rdev, page, length); 3694 mddev_unlock(mddev); 3695 } 3696 return rv; 3697} 3698 3699static void rdev_free(struct kobject *ko) 3700{ 3701 struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj); 3702 kfree(rdev); 3703} 3704static const struct sysfs_ops rdev_sysfs_ops = { 3705 .show = rdev_attr_show, 3706 .store = rdev_attr_store, 3707}; 3708static struct kobj_type rdev_ktype = { 3709 .release = rdev_free, 3710 .sysfs_ops = &rdev_sysfs_ops, 3711 .default_attrs = rdev_default_attrs, 3712}; 3713 3714int md_rdev_init(struct md_rdev *rdev) 3715{ 3716 rdev->desc_nr = -1; 3717 rdev->saved_raid_disk = -1; 3718 rdev->raid_disk = -1; 3719 rdev->flags = 0; 3720 rdev->data_offset = 0; 3721 rdev->new_data_offset = 0; 3722 rdev->sb_events = 0; 3723 rdev->last_read_error = 0; 3724 rdev->sb_loaded = 0; 3725 rdev->bb_page = NULL; 3726 atomic_set(&rdev->nr_pending, 0); 3727 atomic_set(&rdev->read_errors, 0); 3728 atomic_set(&rdev->corrected_errors, 0); 3729 3730 INIT_LIST_HEAD(&rdev->same_set); 3731 init_waitqueue_head(&rdev->blocked_wait); 3732 3733 /* Add space to store bad block list. 3734 * This reserves the space even on arrays where it cannot 3735 * be used - I wonder if that matters 3736 */ 3737 return badblocks_init(&rdev->badblocks, 0); 3738} 3739EXPORT_SYMBOL_GPL(md_rdev_init); 3740/* 3741 * Import a device. If 'super_format' >= 0, then sanity check the superblock 3742 * 3743 * mark the device faulty if: 3744 * 3745 * - the device is nonexistent (zero size) 3746 * - the device has no valid superblock 3747 * 3748 * a faulty rdev _never_ has rdev->sb set. 3749 */ 3750static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor) 3751{ 3752 char b[BDEVNAME_SIZE]; 3753 int err; 3754 struct md_rdev *rdev; 3755 sector_t size; 3756 3757 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); 3758 if (!rdev) 3759 return ERR_PTR(-ENOMEM); 3760 3761 err = md_rdev_init(rdev); 3762 if (err) 3763 goto abort_free; 3764 err = alloc_disk_sb(rdev); 3765 if (err) 3766 goto abort_free; 3767 3768 err = lock_rdev(rdev, newdev, super_format == -2); 3769 if (err) 3770 goto abort_free; 3771 3772 kobject_init(&rdev->kobj, &rdev_ktype); 3773 3774 size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS; 3775 if (!size) { 3776 pr_warn("md: %s has zero or unknown size, marking faulty!\n", 3777 bdevname(rdev->bdev,b)); 3778 err = -EINVAL; 3779 goto abort_free; 3780 } 3781 3782 if (super_format >= 0) { 3783 err = super_types[super_format]. 3784 load_super(rdev, NULL, super_minor); 3785 if (err == -EINVAL) { 3786 pr_warn("md: %s does not have a valid v%d.%d superblock, not importing!\n", 3787 bdevname(rdev->bdev,b), 3788 super_format, super_minor); 3789 goto abort_free; 3790 } 3791 if (err < 0) { 3792 pr_warn("md: could not read %s's sb, not importing!\n", 3793 bdevname(rdev->bdev,b)); 3794 goto abort_free; 3795 } 3796 } 3797 3798 return rdev; 3799 3800abort_free: 3801 if (rdev->bdev) 3802 unlock_rdev(rdev); 3803 md_rdev_clear(rdev); 3804 kfree(rdev); 3805 return ERR_PTR(err); 3806} 3807 3808/* 3809 * Check a full RAID array for plausibility 3810 */ 3811 3812static int analyze_sbs(struct mddev *mddev) 3813{ 3814 int i; 3815 struct md_rdev *rdev, *freshest, *tmp; 3816 char b[BDEVNAME_SIZE]; 3817 3818 freshest = NULL; 3819 rdev_for_each_safe(rdev, tmp, mddev) 3820 switch (super_types[mddev->major_version]. 3821 load_super(rdev, freshest, mddev->minor_version)) { 3822 case 1: 3823 freshest = rdev; 3824 break; 3825 case 0: 3826 break; 3827 default: 3828 pr_warn("md: fatal superblock inconsistency in %s -- removing from array\n", 3829 bdevname(rdev->bdev,b)); 3830 md_kick_rdev_from_array(rdev); 3831 } 3832 3833 /* Cannot find a valid fresh disk */ 3834 if (!freshest) { 3835 pr_warn("md: cannot find a valid disk\n"); 3836 return -EINVAL; 3837 } 3838 3839 super_types[mddev->major_version]. 3840 validate_super(mddev, NULL/*freshest*/, freshest); 3841 3842 i = 0; 3843 rdev_for_each_safe(rdev, tmp, mddev) { 3844 if (mddev->max_disks && 3845 (rdev->desc_nr >= mddev->max_disks || 3846 i > mddev->max_disks)) { 3847 pr_warn("md: %s: %s: only %d devices permitted\n", 3848 mdname(mddev), bdevname(rdev->bdev, b), 3849 mddev->max_disks); 3850 md_kick_rdev_from_array(rdev); 3851 continue; 3852 } 3853 if (rdev != freshest) { 3854 if (super_types[mddev->major_version]. 3855 validate_super(mddev, freshest, rdev)) { 3856 pr_warn("md: kicking non-fresh %s from array!\n", 3857 bdevname(rdev->bdev,b)); 3858 md_kick_rdev_from_array(rdev); 3859 continue; 3860 } 3861 } 3862 if (mddev->level == LEVEL_MULTIPATH) { 3863 rdev->desc_nr = i++; 3864 rdev->raid_disk = rdev->desc_nr; 3865 set_bit(In_sync, &rdev->flags); 3866 } else if (rdev->raid_disk >= 3867 (mddev->raid_disks - min(0, mddev->delta_disks)) && 3868 !test_bit(Journal, &rdev->flags)) { 3869 rdev->raid_disk = -1; 3870 clear_bit(In_sync, &rdev->flags); 3871 } 3872 } 3873 3874 return 0; 3875} 3876 3877/* Read a fixed-point number. 3878 * Numbers in sysfs attributes should be in "standard" units where 3879 * possible, so time should be in seconds. 3880 * However we internally use a a much smaller unit such as 3881 * milliseconds or jiffies. 3882 * This function takes a decimal number with a possible fractional 3883 * component, and produces an integer which is the result of 3884 * multiplying that number by 10^'scale'. 3885 * all without any floating-point arithmetic. 3886 */ 3887int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale) 3888{ 3889 unsigned long result = 0; 3890 long decimals = -1; 3891 while (isdigit(*cp) || (*cp == '.' && decimals < 0)) { 3892 if (*cp == '.') 3893 decimals = 0; 3894 else if (decimals < scale) { 3895 unsigned int value; 3896 value = *cp - '0'; 3897 result = result * 10 + value; 3898 if (decimals >= 0) 3899 decimals++; 3900 } 3901 cp++; 3902 } 3903 if (*cp == '\n') 3904 cp++; 3905 if (*cp) 3906 return -EINVAL; 3907 if (decimals < 0) 3908 decimals = 0; 3909 *res = result * int_pow(10, scale - decimals); 3910 return 0; 3911} 3912 3913static ssize_t 3914safe_delay_show(struct mddev *mddev, char *page) 3915{ 3916 unsigned int msec = ((unsigned long)mddev->safemode_delay*1000)/HZ; 3917 3918 return sprintf(page, "%u.%03u\n", msec/1000, msec%1000); 3919} 3920static ssize_t 3921safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len) 3922{ 3923 unsigned long msec; 3924 3925 if (mddev_is_clustered(mddev)) { 3926 pr_warn("md: Safemode is disabled for clustered mode\n"); 3927 return -EINVAL; 3928 } 3929 3930 if (strict_strtoul_scaled(cbuf, &msec, 3) < 0 || msec > UINT_MAX / HZ) 3931 return -EINVAL; 3932 if (msec == 0) 3933 mddev->safemode_delay = 0; 3934 else { 3935 unsigned long old_delay = mddev->safemode_delay; 3936 unsigned long new_delay = (msec*HZ)/1000; 3937 3938 if (new_delay == 0) 3939 new_delay = 1; 3940 mddev->safemode_delay = new_delay; 3941 if (new_delay < old_delay || old_delay == 0) 3942 mod_timer(&mddev->safemode_timer, jiffies+1); 3943 } 3944 return len; 3945} 3946static struct md_sysfs_entry md_safe_delay = 3947__ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store); 3948 3949static ssize_t 3950level_show(struct mddev *mddev, char *page) 3951{ 3952 struct md_personality *p; 3953 int ret; 3954 spin_lock(&mddev->lock); 3955 p = mddev->pers; 3956 if (p) 3957 ret = sprintf(page, "%s\n", p->name); 3958 else if (mddev->clevel[0]) 3959 ret = sprintf(page, "%s\n", mddev->clevel); 3960 else if (mddev->level != LEVEL_NONE) 3961 ret = sprintf(page, "%d\n", mddev->level); 3962 else 3963 ret = 0; 3964 spin_unlock(&mddev->lock); 3965 return ret; 3966} 3967 3968static ssize_t 3969level_store(struct mddev *mddev, const char *buf, size_t len) 3970{ 3971 char clevel[16]; 3972 ssize_t rv; 3973 size_t slen = len; 3974 struct md_personality *pers, *oldpers; 3975 long level; 3976 void *priv, *oldpriv; 3977 struct md_rdev *rdev; 3978 3979 if (slen == 0 || slen >= sizeof(clevel)) 3980 return -EINVAL; 3981 3982 rv = mddev_lock(mddev); 3983 if (rv) 3984 return rv; 3985 3986 if (mddev->pers == NULL) { 3987 strncpy(mddev->clevel, buf, slen); 3988 if (mddev->clevel[slen-1] == '\n') 3989 slen--; 3990 mddev->clevel[slen] = 0; 3991 mddev->level = LEVEL_NONE; 3992 rv = len; 3993 goto out_unlock; 3994 } 3995 rv = -EROFS; 3996 if (mddev->ro) 3997 goto out_unlock; 3998 3999 /* request to change the personality. Need to ensure: 4000 * - array is not engaged in resync/recovery/reshape 4001 * - old personality can be suspended 4002 * - new personality will access other array. 4003 */ 4004 4005 rv = -EBUSY; 4006 if (mddev->sync_thread || 4007 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 4008 mddev->reshape_position != MaxSector || 4009 mddev->sysfs_active) 4010 goto out_unlock; 4011 4012 rv = -EINVAL; 4013 if (!mddev->pers->quiesce) { 4014 pr_warn("md: %s: %s does not support online personality change\n", 4015 mdname(mddev), mddev->pers->name); 4016 goto out_unlock; 4017 } 4018 4019 /* Now find the new personality */ 4020 strncpy(clevel, buf, slen); 4021 if (clevel[slen-1] == '\n') 4022 slen--; 4023 clevel[slen] = 0; 4024 if (kstrtol(clevel, 10, &level)) 4025 level = LEVEL_NONE; 4026 4027 if (request_module("md-%s", clevel) != 0) 4028 request_module("md-level-%s", clevel); 4029 spin_lock(&pers_lock); 4030 pers = find_pers(level, clevel); 4031 if (!pers || !try_module_get(pers->owner)) { 4032 spin_unlock(&pers_lock); 4033 pr_warn("md: personality %s not loaded\n", clevel); 4034 rv = -EINVAL; 4035 goto out_unlock; 4036 } 4037 spin_unlock(&pers_lock); 4038 4039 if (pers == mddev->pers) { 4040 /* Nothing to do! */ 4041 module_put(pers->owner); 4042 rv = len; 4043 goto out_unlock; 4044 } 4045 if (!pers->takeover) { 4046 module_put(pers->owner); 4047 pr_warn("md: %s: %s does not support personality takeover\n", 4048 mdname(mddev), clevel); 4049 rv = -EINVAL; 4050 goto out_unlock; 4051 } 4052 4053 rdev_for_each(rdev, mddev) 4054 rdev->new_raid_disk = rdev->raid_disk; 4055 4056 /* ->takeover must set new_* and/or delta_disks 4057 * if it succeeds, and may set them when it fails. 4058 */ 4059 priv = pers->takeover(mddev); 4060 if (IS_ERR(priv)) { 4061 mddev->new_level = mddev->level; 4062 mddev->new_layout = mddev->layout; 4063 mddev->new_chunk_sectors = mddev->chunk_sectors; 4064 mddev->raid_disks -= mddev->delta_disks; 4065 mddev->delta_disks = 0; 4066 mddev->reshape_backwards = 0; 4067 module_put(pers->owner); 4068 pr_warn("md: %s: %s would not accept array\n", 4069 mdname(mddev), clevel); 4070 rv = PTR_ERR(priv); 4071 goto out_unlock; 4072 } 4073 4074 /* Looks like we have a winner */ 4075 mddev_suspend(mddev); 4076 mddev_detach(mddev); 4077 4078 spin_lock(&mddev->lock); 4079 oldpers = mddev->pers; 4080 oldpriv = mddev->private; 4081 mddev->pers = pers; 4082 mddev->private = priv; 4083 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); 4084 mddev->level = mddev->new_level; 4085 mddev->layout = mddev->new_layout; 4086 mddev->chunk_sectors = mddev->new_chunk_sectors; 4087 mddev->delta_disks = 0; 4088 mddev->reshape_backwards = 0; 4089 mddev->degraded = 0; 4090 spin_unlock(&mddev->lock); 4091 4092 if (oldpers->sync_request == NULL && 4093 mddev->external) { 4094 /* We are converting from a no-redundancy array 4095 * to a redundancy array and metadata is managed 4096 * externally so we need to be sure that writes 4097 * won't block due to a need to transition 4098 * clean->dirty 4099 * until external management is started. 4100 */ 4101 mddev->in_sync = 0; 4102 mddev->safemode_delay = 0; 4103 mddev->safemode = 0; 4104 } 4105 4106 oldpers->free(mddev, oldpriv); 4107 4108 if (oldpers->sync_request == NULL && 4109 pers->sync_request != NULL) { 4110 /* need to add the md_redundancy_group */ 4111 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group)) 4112 pr_warn("md: cannot register extra attributes for %s\n", 4113 mdname(mddev)); 4114 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action"); 4115 mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed"); 4116 mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded"); 4117 } 4118 if (oldpers->sync_request != NULL && 4119 pers->sync_request == NULL) { 4120 /* need to remove the md_redundancy_group */ 4121 if (mddev->to_remove == NULL) 4122 mddev->to_remove = &md_redundancy_group; 4123 } 4124 4125 module_put(oldpers->owner); 4126 4127 rdev_for_each(rdev, mddev) { 4128 if (rdev->raid_disk < 0) 4129 continue; 4130 if (rdev->new_raid_disk >= mddev->raid_disks) 4131 rdev->new_raid_disk = -1; 4132 if (rdev->new_raid_disk == rdev->raid_disk) 4133 continue; 4134 sysfs_unlink_rdev(mddev, rdev); 4135 } 4136 rdev_for_each(rdev, mddev) { 4137 if (rdev->raid_disk < 0) 4138 continue; 4139 if (rdev->new_raid_disk == rdev->raid_disk) 4140 continue; 4141 rdev->raid_disk = rdev->new_raid_disk; 4142 if (rdev->raid_disk < 0) 4143 clear_bit(In_sync, &rdev->flags); 4144 else { 4145 if (sysfs_link_rdev(mddev, rdev)) 4146 pr_warn("md: cannot register rd%d for %s after level change\n", 4147 rdev->raid_disk, mdname(mddev)); 4148 } 4149 } 4150 4151 if (pers->sync_request == NULL) { 4152 /* this is now an array without redundancy, so 4153 * it must always be in_sync 4154 */ 4155 mddev->in_sync = 1; 4156 del_timer_sync(&mddev->safemode_timer); 4157 } 4158 blk_set_stacking_limits(&mddev->queue->limits); 4159 pers->run(mddev); 4160 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 4161 mddev_resume(mddev); 4162 if (!mddev->thread) 4163 md_update_sb(mddev, 1); 4164 sysfs_notify_dirent_safe(mddev->sysfs_level); 4165 md_new_event(mddev); 4166 rv = len; 4167out_unlock: 4168 mddev_unlock(mddev); 4169 return rv; 4170} 4171 4172static struct md_sysfs_entry md_level = 4173__ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store); 4174 4175static ssize_t 4176layout_show(struct mddev *mddev, char *page) 4177{ 4178 /* just a number, not meaningful for all levels */ 4179 if (mddev->reshape_position != MaxSector && 4180 mddev->layout != mddev->new_layout) 4181 return sprintf(page, "%d (%d)\n", 4182 mddev->new_layout, mddev->layout); 4183 return sprintf(page, "%d\n", mddev->layout); 4184} 4185 4186static ssize_t 4187layout_store(struct mddev *mddev, const char *buf, size_t len) 4188{ 4189 unsigned int n; 4190 int err; 4191 4192 err = kstrtouint(buf, 10, &n); 4193 if (err < 0) 4194 return err; 4195 err = mddev_lock(mddev); 4196 if (err) 4197 return err; 4198 4199 if (mddev->pers) { 4200 if (mddev->pers->check_reshape == NULL) 4201 err = -EBUSY; 4202 else if (mddev->ro) 4203 err = -EROFS; 4204 else { 4205 mddev->new_layout = n; 4206 err = mddev->pers->check_reshape(mddev); 4207 if (err) 4208 mddev->new_layout = mddev->layout; 4209 } 4210 } else { 4211 mddev->new_layout = n; 4212 if (mddev->reshape_position == MaxSector) 4213 mddev->layout = n; 4214 } 4215 mddev_unlock(mddev); 4216 return err ?: len; 4217} 4218static struct md_sysfs_entry md_layout = 4219__ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store); 4220 4221static ssize_t 4222raid_disks_show(struct mddev *mddev, char *page) 4223{ 4224 if (mddev->raid_disks == 0) 4225 return 0; 4226 if (mddev->reshape_position != MaxSector && 4227 mddev->delta_disks != 0) 4228 return sprintf(page, "%d (%d)\n", mddev->raid_disks, 4229 mddev->raid_disks - mddev->delta_disks); 4230 return sprintf(page, "%d\n", mddev->raid_disks); 4231} 4232 4233static int update_raid_disks(struct mddev *mddev, int raid_disks); 4234 4235static ssize_t 4236raid_disks_store(struct mddev *mddev, const char *buf, size_t len) 4237{ 4238 unsigned int n; 4239 int err; 4240 4241 err = kstrtouint(buf, 10, &n); 4242 if (err < 0) 4243 return err; 4244 4245 err = mddev_lock(mddev); 4246 if (err) 4247 return err; 4248 if (mddev->pers) 4249 err = update_raid_disks(mddev, n); 4250 else if (mddev->reshape_position != MaxSector) { 4251 struct md_rdev *rdev; 4252 int olddisks = mddev->raid_disks - mddev->delta_disks; 4253 4254 err = -EINVAL; 4255 rdev_for_each(rdev, mddev) { 4256 if (olddisks < n && 4257 rdev->data_offset < rdev->new_data_offset) 4258 goto out_unlock; 4259 if (olddisks > n && 4260 rdev->data_offset > rdev->new_data_offset) 4261 goto out_unlock; 4262 } 4263 err = 0; 4264 mddev->delta_disks = n - olddisks; 4265 mddev->raid_disks = n; 4266 mddev->reshape_backwards = (mddev->delta_disks < 0); 4267 } else 4268 mddev->raid_disks = n; 4269out_unlock: 4270 mddev_unlock(mddev); 4271 return err ? err : len; 4272} 4273static struct md_sysfs_entry md_raid_disks = 4274__ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store); 4275 4276static ssize_t 4277uuid_show(struct mddev *mddev, char *page) 4278{ 4279 return sprintf(page, "%pU\n", mddev->uuid); 4280} 4281static struct md_sysfs_entry md_uuid = 4282__ATTR(uuid, S_IRUGO, uuid_show, NULL); 4283 4284static ssize_t 4285chunk_size_show(struct mddev *mddev, char *page) 4286{ 4287 if (mddev->reshape_position != MaxSector && 4288 mddev->chunk_sectors != mddev->new_chunk_sectors) 4289 return sprintf(page, "%d (%d)\n", 4290 mddev->new_chunk_sectors << 9, 4291 mddev->chunk_sectors << 9); 4292 return sprintf(page, "%d\n", mddev->chunk_sectors << 9); 4293} 4294 4295static ssize_t 4296chunk_size_store(struct mddev *mddev, const char *buf, size_t len) 4297{ 4298 unsigned long n; 4299 int err; 4300 4301 err = kstrtoul(buf, 10, &n); 4302 if (err < 0) 4303 return err; 4304 4305 err = mddev_lock(mddev); 4306 if (err) 4307 return err; 4308 if (mddev->pers) { 4309 if (mddev->pers->check_reshape == NULL) 4310 err = -EBUSY; 4311 else if (mddev->ro) 4312 err = -EROFS; 4313 else { 4314 mddev->new_chunk_sectors = n >> 9; 4315 err = mddev->pers->check_reshape(mddev); 4316 if (err) 4317 mddev->new_chunk_sectors = mddev->chunk_sectors; 4318 } 4319 } else { 4320 mddev->new_chunk_sectors = n >> 9; 4321 if (mddev->reshape_position == MaxSector) 4322 mddev->chunk_sectors = n >> 9; 4323 } 4324 mddev_unlock(mddev); 4325 return err ?: len; 4326} 4327static struct md_sysfs_entry md_chunk_size = 4328__ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store); 4329 4330static ssize_t 4331resync_start_show(struct mddev *mddev, char *page) 4332{ 4333 if (mddev->recovery_cp == MaxSector) 4334 return sprintf(page, "none\n"); 4335 return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp); 4336} 4337 4338static ssize_t 4339resync_start_store(struct mddev *mddev, const char *buf, size_t len) 4340{ 4341 unsigned long long n; 4342 int err; 4343 4344 if (cmd_match(buf, "none")) 4345 n = MaxSector; 4346 else { 4347 err = kstrtoull(buf, 10, &n); 4348 if (err < 0) 4349 return err; 4350 if (n != (sector_t)n) 4351 return -EINVAL; 4352 } 4353 4354 err = mddev_lock(mddev); 4355 if (err) 4356 return err; 4357 if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) 4358 err = -EBUSY; 4359 4360 if (!err) { 4361 mddev->recovery_cp = n; 4362 if (mddev->pers) 4363 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 4364 } 4365 mddev_unlock(mddev); 4366 return err ?: len; 4367} 4368static struct md_sysfs_entry md_resync_start = 4369__ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR, 4370 resync_start_show, resync_start_store); 4371 4372/* 4373 * The array state can be: 4374 * 4375 * clear 4376 * No devices, no size, no level 4377 * Equivalent to STOP_ARRAY ioctl 4378 * inactive 4379 * May have some settings, but array is not active 4380 * all IO results in error 4381 * When written, doesn't tear down array, but just stops it 4382 * suspended (not supported yet) 4383 * All IO requests will block. The array can be reconfigured. 4384 * Writing this, if accepted, will block until array is quiescent 4385 * readonly 4386 * no resync can happen. no superblocks get written. 4387 * write requests fail 4388 * read-auto 4389 * like readonly, but behaves like 'clean' on a write request. 4390 * 4391 * clean - no pending writes, but otherwise active. 4392 * When written to inactive array, starts without resync 4393 * If a write request arrives then 4394 * if metadata is known, mark 'dirty' and switch to 'active'. 4395 * if not known, block and switch to write-pending 4396 * If written to an active array that has pending writes, then fails. 4397 * active 4398 * fully active: IO and resync can be happening. 4399 * When written to inactive array, starts with resync 4400 * 4401 * write-pending 4402 * clean, but writes are blocked waiting for 'active' to be written. 4403 * 4404 * active-idle 4405 * like active, but no writes have been seen for a while (100msec). 4406 * 4407 * broken 4408 * RAID0/LINEAR-only: same as clean, but array is missing a member. 4409 * It's useful because RAID0/LINEAR mounted-arrays aren't stopped 4410 * when a member is gone, so this state will at least alert the 4411 * user that something is wrong. 4412 */ 4413enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active, 4414 write_pending, active_idle, broken, bad_word}; 4415static char *array_states[] = { 4416 "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active", 4417 "write-pending", "active-idle", "broken", NULL }; 4418 4419static int match_word(const char *word, char **list) 4420{ 4421 int n; 4422 for (n=0; list[n]; n++) 4423 if (cmd_match(word, list[n])) 4424 break; 4425 return n; 4426} 4427 4428static ssize_t 4429array_state_show(struct mddev *mddev, char *page) 4430{ 4431 enum array_state st = inactive; 4432 4433 if (mddev->pers && !test_bit(MD_NOT_READY, &mddev->flags)) { 4434 switch(mddev->ro) { 4435 case 1: 4436 st = readonly; 4437 break; 4438 case 2: 4439 st = read_auto; 4440 break; 4441 case 0: 4442 spin_lock(&mddev->lock); 4443 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) 4444 st = write_pending; 4445 else if (mddev->in_sync) 4446 st = clean; 4447 else if (mddev->safemode) 4448 st = active_idle; 4449 else 4450 st = active; 4451 spin_unlock(&mddev->lock); 4452 } 4453 4454 if (test_bit(MD_BROKEN, &mddev->flags) && st == clean) 4455 st = broken; 4456 } else { 4457 if (list_empty(&mddev->disks) && 4458 mddev->raid_disks == 0 && 4459 mddev->dev_sectors == 0) 4460 st = clear; 4461 else 4462 st = inactive; 4463 } 4464 return sprintf(page, "%s\n", array_states[st]); 4465} 4466 4467static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev); 4468static int md_set_readonly(struct mddev *mddev, struct block_device *bdev); 4469static int restart_array(struct mddev *mddev); 4470 4471static ssize_t 4472array_state_store(struct mddev *mddev, const char *buf, size_t len) 4473{ 4474 int err = 0; 4475 enum array_state st = match_word(buf, array_states); 4476 4477 if (mddev->pers && (st == active || st == clean) && mddev->ro != 1) { 4478 /* don't take reconfig_mutex when toggling between 4479 * clean and active 4480 */ 4481 spin_lock(&mddev->lock); 4482 if (st == active) { 4483 restart_array(mddev); 4484 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); 4485 md_wakeup_thread(mddev->thread); 4486 wake_up(&mddev->sb_wait); 4487 } else /* st == clean */ { 4488 restart_array(mddev); 4489 if (!set_in_sync(mddev)) 4490 err = -EBUSY; 4491 } 4492 if (!err) 4493 sysfs_notify_dirent_safe(mddev->sysfs_state); 4494 spin_unlock(&mddev->lock); 4495 return err ?: len; 4496 } 4497 err = mddev_lock(mddev); 4498 if (err) 4499 return err; 4500 err = -EINVAL; 4501 switch(st) { 4502 case bad_word: 4503 break; 4504 case clear: 4505 /* stopping an active array */ 4506 err = do_md_stop(mddev, 0, NULL); 4507 break; 4508 case inactive: 4509 /* stopping an active array */ 4510 if (mddev->pers) 4511 err = do_md_stop(mddev, 2, NULL); 4512 else 4513 err = 0; /* already inactive */ 4514 break; 4515 case suspended: 4516 break; /* not supported yet */ 4517 case readonly: 4518 if (mddev->pers) 4519 err = md_set_readonly(mddev, NULL); 4520 else { 4521 mddev->ro = 1; 4522 set_disk_ro(mddev->gendisk, 1); 4523 err = do_md_run(mddev); 4524 } 4525 break; 4526 case read_auto: 4527 if (mddev->pers) { 4528 if (mddev->ro == 0) 4529 err = md_set_readonly(mddev, NULL); 4530 else if (mddev->ro == 1) 4531 err = restart_array(mddev); 4532 if (err == 0) { 4533 mddev->ro = 2; 4534 set_disk_ro(mddev->gendisk, 0); 4535 } 4536 } else { 4537 mddev->ro = 2; 4538 err = do_md_run(mddev); 4539 } 4540 break; 4541 case clean: 4542 if (mddev->pers) { 4543 err = restart_array(mddev); 4544 if (err) 4545 break; 4546 spin_lock(&mddev->lock); 4547 if (!set_in_sync(mddev)) 4548 err = -EBUSY; 4549 spin_unlock(&mddev->lock); 4550 } else 4551 err = -EINVAL; 4552 break; 4553 case active: 4554 if (mddev->pers) { 4555 err = restart_array(mddev); 4556 if (err) 4557 break; 4558 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); 4559 wake_up(&mddev->sb_wait); 4560 err = 0; 4561 } else { 4562 mddev->ro = 0; 4563 set_disk_ro(mddev->gendisk, 0); 4564 err = do_md_run(mddev); 4565 } 4566 break; 4567 case write_pending: 4568 case active_idle: 4569 case broken: 4570 /* these cannot be set */ 4571 break; 4572 } 4573 4574 if (!err) { 4575 if (mddev->hold_active == UNTIL_IOCTL) 4576 mddev->hold_active = 0; 4577 sysfs_notify_dirent_safe(mddev->sysfs_state); 4578 } 4579 mddev_unlock(mddev); 4580 return err ?: len; 4581} 4582static struct md_sysfs_entry md_array_state = 4583__ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store); 4584 4585static ssize_t 4586max_corrected_read_errors_show(struct mddev *mddev, char *page) { 4587 return sprintf(page, "%d\n", 4588 atomic_read(&mddev->max_corr_read_errors)); 4589} 4590 4591static ssize_t 4592max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len) 4593{ 4594 unsigned int n; 4595 int rv; 4596 4597 rv = kstrtouint(buf, 10, &n); 4598 if (rv < 0) 4599 return rv; 4600 if (n > INT_MAX) 4601 return -EINVAL; 4602 atomic_set(&mddev->max_corr_read_errors, n); 4603 return len; 4604} 4605 4606static struct md_sysfs_entry max_corr_read_errors = 4607__ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show, 4608 max_corrected_read_errors_store); 4609 4610static ssize_t 4611null_show(struct mddev *mddev, char *page) 4612{ 4613 return -EINVAL; 4614} 4615 4616/* need to ensure rdev_delayed_delete() has completed */ 4617static void flush_rdev_wq(struct mddev *mddev) 4618{ 4619 struct md_rdev *rdev; 4620 4621 rcu_read_lock(); 4622 rdev_for_each_rcu(rdev, mddev) 4623 if (work_pending(&rdev->del_work)) { 4624 flush_workqueue(md_rdev_misc_wq); 4625 break; 4626 } 4627 rcu_read_unlock(); 4628} 4629 4630static ssize_t 4631new_dev_store(struct mddev *mddev, const char *buf, size_t len) 4632{ 4633 /* buf must be %d:%d\n? giving major and minor numbers */ 4634 /* The new device is added to the array. 4635 * If the array has a persistent superblock, we read the 4636 * superblock to initialise info and check validity. 4637 * Otherwise, only checking done is that in bind_rdev_to_array, 4638 * which mainly checks size. 4639 */ 4640 char *e; 4641 int major = simple_strtoul(buf, &e, 10); 4642 int minor; 4643 dev_t dev; 4644 struct md_rdev *rdev; 4645 int err; 4646 4647 if (!*buf || *e != ':' || !e[1] || e[1] == '\n') 4648 return -EINVAL; 4649 minor = simple_strtoul(e+1, &e, 10); 4650 if (*e && *e != '\n') 4651 return -EINVAL; 4652 dev = MKDEV(major, minor); 4653 if (major != MAJOR(dev) || 4654 minor != MINOR(dev)) 4655 return -EOVERFLOW; 4656 4657 flush_rdev_wq(mddev); 4658 err = mddev_lock(mddev); 4659 if (err) 4660 return err; 4661 if (mddev->persistent) { 4662 rdev = md_import_device(dev, mddev->major_version, 4663 mddev->minor_version); 4664 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) { 4665 struct md_rdev *rdev0 4666 = list_entry(mddev->disks.next, 4667 struct md_rdev, same_set); 4668 err = super_types[mddev->major_version] 4669 .load_super(rdev, rdev0, mddev->minor_version); 4670 if (err < 0) 4671 goto out; 4672 } 4673 } else if (mddev->external) 4674 rdev = md_import_device(dev, -2, -1); 4675 else 4676 rdev = md_import_device(dev, -1, -1); 4677 4678 if (IS_ERR(rdev)) { 4679 mddev_unlock(mddev); 4680 return PTR_ERR(rdev); 4681 } 4682 err = bind_rdev_to_array(rdev, mddev); 4683 out: 4684 if (err) 4685 export_rdev(rdev); 4686 mddev_unlock(mddev); 4687 if (!err) 4688 md_new_event(mddev); 4689 return err ? err : len; 4690} 4691 4692static struct md_sysfs_entry md_new_device = 4693__ATTR(new_dev, S_IWUSR, null_show, new_dev_store); 4694 4695static ssize_t 4696bitmap_store(struct mddev *mddev, const char *buf, size_t len) 4697{ 4698 char *end; 4699 unsigned long chunk, end_chunk; 4700 int err; 4701 4702 err = mddev_lock(mddev); 4703 if (err) 4704 return err; 4705 if (!mddev->bitmap) 4706 goto out; 4707 /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */ 4708 while (*buf) { 4709 chunk = end_chunk = simple_strtoul(buf, &end, 0); 4710 if (buf == end) break; 4711 if (*end == '-') { /* range */ 4712 buf = end + 1; 4713 end_chunk = simple_strtoul(buf, &end, 0); 4714 if (buf == end) break; 4715 } 4716 if (*end && !isspace(*end)) break; 4717 md_bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk); 4718 buf = skip_spaces(end); 4719 } 4720 md_bitmap_unplug(mddev->bitmap); /* flush the bits to disk */ 4721out: 4722 mddev_unlock(mddev); 4723 return len; 4724} 4725 4726static struct md_sysfs_entry md_bitmap = 4727__ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store); 4728 4729static ssize_t 4730size_show(struct mddev *mddev, char *page) 4731{ 4732 return sprintf(page, "%llu\n", 4733 (unsigned long long)mddev->dev_sectors / 2); 4734} 4735 4736static int update_size(struct mddev *mddev, sector_t num_sectors); 4737 4738static ssize_t 4739size_store(struct mddev *mddev, const char *buf, size_t len) 4740{ 4741 /* If array is inactive, we can reduce the component size, but 4742 * not increase it (except from 0). 4743 * If array is active, we can try an on-line resize 4744 */ 4745 sector_t sectors; 4746 int err = strict_blocks_to_sectors(buf, §ors); 4747 4748 if (err < 0) 4749 return err; 4750 err = mddev_lock(mddev); 4751 if (err) 4752 return err; 4753 if (mddev->pers) { 4754 err = update_size(mddev, sectors); 4755 if (err == 0) 4756 md_update_sb(mddev, 1); 4757 } else { 4758 if (mddev->dev_sectors == 0 || 4759 mddev->dev_sectors > sectors) 4760 mddev->dev_sectors = sectors; 4761 else 4762 err = -ENOSPC; 4763 } 4764 mddev_unlock(mddev); 4765 return err ? err : len; 4766} 4767 4768static struct md_sysfs_entry md_size = 4769__ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store); 4770 4771/* Metadata version. 4772 * This is one of 4773 * 'none' for arrays with no metadata (good luck...) 4774 * 'external' for arrays with externally managed metadata, 4775 * or N.M for internally known formats 4776 */ 4777static ssize_t 4778metadata_show(struct mddev *mddev, char *page) 4779{ 4780 if (mddev->persistent) 4781 return sprintf(page, "%d.%d\n", 4782 mddev->major_version, mddev->minor_version); 4783 else if (mddev->external) 4784 return sprintf(page, "external:%s\n", mddev->metadata_type); 4785 else 4786 return sprintf(page, "none\n"); 4787} 4788 4789static ssize_t 4790metadata_store(struct mddev *mddev, const char *buf, size_t len) 4791{ 4792 int major, minor; 4793 char *e; 4794 int err; 4795 /* Changing the details of 'external' metadata is 4796 * always permitted. Otherwise there must be 4797 * no devices attached to the array. 4798 */ 4799 4800 err = mddev_lock(mddev); 4801 if (err) 4802 return err; 4803 err = -EBUSY; 4804 if (mddev->external && strncmp(buf, "external:", 9) == 0) 4805 ; 4806 else if (!list_empty(&mddev->disks)) 4807 goto out_unlock; 4808 4809 err = 0; 4810 if (cmd_match(buf, "none")) { 4811 mddev->persistent = 0; 4812 mddev->external = 0; 4813 mddev->major_version = 0; 4814 mddev->minor_version = 90; 4815 goto out_unlock; 4816 } 4817 if (strncmp(buf, "external:", 9) == 0) { 4818 size_t namelen = len-9; 4819 if (namelen >= sizeof(mddev->metadata_type)) 4820 namelen = sizeof(mddev->metadata_type)-1; 4821 strncpy(mddev->metadata_type, buf+9, namelen); 4822 mddev->metadata_type[namelen] = 0; 4823 if (namelen && mddev->metadata_type[namelen-1] == '\n') 4824 mddev->metadata_type[--namelen] = 0; 4825 mddev->persistent = 0; 4826 mddev->external = 1; 4827 mddev->major_version = 0; 4828 mddev->minor_version = 90; 4829 goto out_unlock; 4830 } 4831 major = simple_strtoul(buf, &e, 10); 4832 err = -EINVAL; 4833 if (e==buf || *e != '.') 4834 goto out_unlock; 4835 buf = e+1; 4836 minor = simple_strtoul(buf, &e, 10); 4837 if (e==buf || (*e && *e != '\n') ) 4838 goto out_unlock; 4839 err = -ENOENT; 4840 if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL) 4841 goto out_unlock; 4842 mddev->major_version = major; 4843 mddev->minor_version = minor; 4844 mddev->persistent = 1; 4845 mddev->external = 0; 4846 err = 0; 4847out_unlock: 4848 mddev_unlock(mddev); 4849 return err ?: len; 4850} 4851 4852static struct md_sysfs_entry md_metadata = 4853__ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store); 4854 4855static ssize_t 4856action_show(struct mddev *mddev, char *page) 4857{ 4858 char *type = "idle"; 4859 unsigned long recovery = mddev->recovery; 4860 if (test_bit(MD_RECOVERY_FROZEN, &recovery)) 4861 type = "frozen"; 4862 else if (test_bit(MD_RECOVERY_RUNNING, &recovery) || 4863 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery))) { 4864 if (test_bit(MD_RECOVERY_RESHAPE, &recovery)) 4865 type = "reshape"; 4866 else if (test_bit(MD_RECOVERY_SYNC, &recovery)) { 4867 if (!test_bit(MD_RECOVERY_REQUESTED, &recovery)) 4868 type = "resync"; 4869 else if (test_bit(MD_RECOVERY_CHECK, &recovery)) 4870 type = "check"; 4871 else 4872 type = "repair"; 4873 } else if (test_bit(MD_RECOVERY_RECOVER, &recovery)) 4874 type = "recover"; 4875 else if (mddev->reshape_position != MaxSector) 4876 type = "reshape"; 4877 } 4878 return sprintf(page, "%s\n", type); 4879} 4880 4881static ssize_t 4882action_store(struct mddev *mddev, const char *page, size_t len) 4883{ 4884 if (!mddev->pers || !mddev->pers->sync_request) 4885 return -EINVAL; 4886 4887 4888 if (cmd_match(page, "idle") || cmd_match(page, "frozen")) { 4889 if (cmd_match(page, "frozen")) 4890 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4891 else 4892 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4893 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && 4894 mddev_lock(mddev) == 0) { 4895 if (work_pending(&mddev->del_work)) 4896 flush_workqueue(md_misc_wq); 4897 if (mddev->sync_thread) { 4898 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4899 md_reap_sync_thread(mddev); 4900 } 4901 mddev_unlock(mddev); 4902 } 4903 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4904 return -EBUSY; 4905 else if (cmd_match(page, "resync")) 4906 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4907 else if (cmd_match(page, "recover")) { 4908 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4909 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 4910 } else if (cmd_match(page, "reshape")) { 4911 int err; 4912 if (mddev->pers->start_reshape == NULL) 4913 return -EINVAL; 4914 err = mddev_lock(mddev); 4915 if (!err) { 4916 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { 4917 err = -EBUSY; 4918 } else if (mddev->reshape_position == MaxSector || 4919 mddev->pers->check_reshape == NULL || 4920 mddev->pers->check_reshape(mddev)) { 4921 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4922 err = mddev->pers->start_reshape(mddev); 4923 } else { 4924 /* 4925 * If reshape is still in progress, and 4926 * md_check_recovery() can continue to reshape, 4927 * don't restart reshape because data can be 4928 * corrupted for raid456. 4929 */ 4930 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4931 } 4932 mddev_unlock(mddev); 4933 } 4934 if (err) 4935 return err; 4936 sysfs_notify_dirent_safe(mddev->sysfs_degraded); 4937 } else { 4938 if (cmd_match(page, "check")) 4939 set_bit(MD_RECOVERY_CHECK, &mddev->recovery); 4940 else if (!cmd_match(page, "repair")) 4941 return -EINVAL; 4942 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4943 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 4944 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4945 } 4946 if (mddev->ro == 2) { 4947 /* A write to sync_action is enough to justify 4948 * canceling read-auto mode 4949 */ 4950 mddev->ro = 0; 4951 md_wakeup_thread(mddev->sync_thread); 4952 } 4953 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4954 md_wakeup_thread(mddev->thread); 4955 sysfs_notify_dirent_safe(mddev->sysfs_action); 4956 return len; 4957} 4958 4959static struct md_sysfs_entry md_scan_mode = 4960__ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store); 4961 4962static ssize_t 4963last_sync_action_show(struct mddev *mddev, char *page) 4964{ 4965 return sprintf(page, "%s\n", mddev->last_sync_action); 4966} 4967 4968static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action); 4969 4970static ssize_t 4971mismatch_cnt_show(struct mddev *mddev, char *page) 4972{ 4973 return sprintf(page, "%llu\n", 4974 (unsigned long long) 4975 atomic64_read(&mddev->resync_mismatches)); 4976} 4977 4978static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt); 4979 4980static ssize_t 4981sync_min_show(struct mddev *mddev, char *page) 4982{ 4983 return sprintf(page, "%d (%s)\n", speed_min(mddev), 4984 mddev->sync_speed_min ? "local": "system"); 4985} 4986 4987static ssize_t 4988sync_min_store(struct mddev *mddev, const char *buf, size_t len) 4989{ 4990 unsigned int min; 4991 int rv; 4992 4993 if (strncmp(buf, "system", 6)==0) { 4994 min = 0; 4995 } else { 4996 rv = kstrtouint(buf, 10, &min); 4997 if (rv < 0) 4998 return rv; 4999 if (min == 0) 5000 return -EINVAL; 5001 } 5002 mddev->sync_speed_min = min; 5003 return len; 5004} 5005 5006static struct md_sysfs_entry md_sync_min = 5007__ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store); 5008 5009static ssize_t 5010sync_max_show(struct mddev *mddev, char *page) 5011{ 5012 return sprintf(page, "%d (%s)\n", speed_max(mddev), 5013 mddev->sync_speed_max ? "local": "system"); 5014} 5015 5016static ssize_t 5017sync_max_store(struct mddev *mddev, const char *buf, size_t len) 5018{ 5019 unsigned int max; 5020 int rv; 5021 5022 if (strncmp(buf, "system", 6)==0) { 5023 max = 0; 5024 } else { 5025 rv = kstrtouint(buf, 10, &max); 5026 if (rv < 0) 5027 return rv; 5028 if (max == 0) 5029 return -EINVAL; 5030 } 5031 mddev->sync_speed_max = max; 5032 return len; 5033} 5034 5035static struct md_sysfs_entry md_sync_max = 5036__ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store); 5037 5038static ssize_t 5039degraded_show(struct mddev *mddev, char *page) 5040{ 5041 return sprintf(page, "%d\n", mddev->degraded); 5042} 5043static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded); 5044 5045static ssize_t 5046sync_force_parallel_show(struct mddev *mddev, char *page) 5047{ 5048 return sprintf(page, "%d\n", mddev->parallel_resync); 5049} 5050 5051static ssize_t 5052sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len) 5053{ 5054 long n; 5055 5056 if (kstrtol(buf, 10, &n)) 5057 return -EINVAL; 5058 5059 if (n != 0 && n != 1) 5060 return -EINVAL; 5061 5062 mddev->parallel_resync = n; 5063 5064 if (mddev->sync_thread) 5065 wake_up(&resync_wait); 5066 5067 return len; 5068} 5069 5070/* force parallel resync, even with shared block devices */ 5071static struct md_sysfs_entry md_sync_force_parallel = 5072__ATTR(sync_force_parallel, S_IRUGO|S_IWUSR, 5073 sync_force_parallel_show, sync_force_parallel_store); 5074 5075static ssize_t 5076sync_speed_show(struct mddev *mddev, char *page) 5077{ 5078 unsigned long resync, dt, db; 5079 if (mddev->curr_resync == 0) 5080 return sprintf(page, "none\n"); 5081 resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active); 5082 dt = (jiffies - mddev->resync_mark) / HZ; 5083 if (!dt) dt++; 5084 db = resync - mddev->resync_mark_cnt; 5085 return sprintf(page, "%lu\n", db/dt/2); /* K/sec */ 5086} 5087 5088static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed); 5089 5090static ssize_t 5091sync_completed_show(struct mddev *mddev, char *page) 5092{ 5093 unsigned long long max_sectors, resync; 5094 5095 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 5096 return sprintf(page, "none\n"); 5097 5098 if (mddev->curr_resync == 1 || 5099 mddev->curr_resync == 2) 5100 return sprintf(page, "delayed\n"); 5101 5102 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || 5103 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 5104 max_sectors = mddev->resync_max_sectors; 5105 else 5106 max_sectors = mddev->dev_sectors; 5107 5108 resync = mddev->curr_resync_completed; 5109 return sprintf(page, "%llu / %llu\n", resync, max_sectors); 5110} 5111 5112static struct md_sysfs_entry md_sync_completed = 5113 __ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL); 5114 5115static ssize_t 5116min_sync_show(struct mddev *mddev, char *page) 5117{ 5118 return sprintf(page, "%llu\n", 5119 (unsigned long long)mddev->resync_min); 5120} 5121static ssize_t 5122min_sync_store(struct mddev *mddev, const char *buf, size_t len) 5123{ 5124 unsigned long long min; 5125 int err; 5126 5127 if (kstrtoull(buf, 10, &min)) 5128 return -EINVAL; 5129 5130 spin_lock(&mddev->lock); 5131 err = -EINVAL; 5132 if (min > mddev->resync_max) 5133 goto out_unlock; 5134 5135 err = -EBUSY; 5136 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 5137 goto out_unlock; 5138 5139 /* Round down to multiple of 4K for safety */ 5140 mddev->resync_min = round_down(min, 8); 5141 err = 0; 5142 5143out_unlock: 5144 spin_unlock(&mddev->lock); 5145 return err ?: len; 5146} 5147 5148static struct md_sysfs_entry md_min_sync = 5149__ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store); 5150 5151static ssize_t 5152max_sync_show(struct mddev *mddev, char *page) 5153{ 5154 if (mddev->resync_max == MaxSector) 5155 return sprintf(page, "max\n"); 5156 else 5157 return sprintf(page, "%llu\n", 5158 (unsigned long long)mddev->resync_max); 5159} 5160static ssize_t 5161max_sync_store(struct mddev *mddev, const char *buf, size_t len) 5162{ 5163 int err; 5164 spin_lock(&mddev->lock); 5165 if (strncmp(buf, "max", 3) == 0) 5166 mddev->resync_max = MaxSector; 5167 else { 5168 unsigned long long max; 5169 int chunk; 5170 5171 err = -EINVAL; 5172 if (kstrtoull(buf, 10, &max)) 5173 goto out_unlock; 5174 if (max < mddev->resync_min) 5175 goto out_unlock; 5176 5177 err = -EBUSY; 5178 if (max < mddev->resync_max && 5179 mddev->ro == 0 && 5180 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 5181 goto out_unlock; 5182 5183 /* Must be a multiple of chunk_size */ 5184 chunk = mddev->chunk_sectors; 5185 if (chunk) { 5186 sector_t temp = max; 5187 5188 err = -EINVAL; 5189 if (sector_div(temp, chunk)) 5190 goto out_unlock; 5191 } 5192 mddev->resync_max = max; 5193 } 5194 wake_up(&mddev->recovery_wait); 5195 err = 0; 5196out_unlock: 5197 spin_unlock(&mddev->lock); 5198 return err ?: len; 5199} 5200 5201static struct md_sysfs_entry md_max_sync = 5202__ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store); 5203 5204static ssize_t 5205suspend_lo_show(struct mddev *mddev, char *page) 5206{ 5207 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo); 5208} 5209 5210static ssize_t 5211suspend_lo_store(struct mddev *mddev, const char *buf, size_t len) 5212{ 5213 unsigned long long new; 5214 int err; 5215 5216 err = kstrtoull(buf, 10, &new); 5217 if (err < 0) 5218 return err; 5219 if (new != (sector_t)new) 5220 return -EINVAL; 5221 5222 err = mddev_lock(mddev); 5223 if (err) 5224 return err; 5225 err = -EINVAL; 5226 if (mddev->pers == NULL || 5227 mddev->pers->quiesce == NULL) 5228 goto unlock; 5229 mddev_suspend(mddev); 5230 mddev->suspend_lo = new; 5231 mddev_resume(mddev); 5232 5233 err = 0; 5234unlock: 5235 mddev_unlock(mddev); 5236 return err ?: len; 5237} 5238static struct md_sysfs_entry md_suspend_lo = 5239__ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store); 5240 5241static ssize_t 5242suspend_hi_show(struct mddev *mddev, char *page) 5243{ 5244 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi); 5245} 5246 5247static ssize_t 5248suspend_hi_store(struct mddev *mddev, const char *buf, size_t len) 5249{ 5250 unsigned long long new; 5251 int err; 5252 5253 err = kstrtoull(buf, 10, &new); 5254 if (err < 0) 5255 return err; 5256 if (new != (sector_t)new) 5257 return -EINVAL; 5258 5259 err = mddev_lock(mddev); 5260 if (err) 5261 return err; 5262 err = -EINVAL; 5263 if (mddev->pers == NULL) 5264 goto unlock; 5265 5266 mddev_suspend(mddev); 5267 mddev->suspend_hi = new; 5268 mddev_resume(mddev); 5269 5270 err = 0; 5271unlock: 5272 mddev_unlock(mddev); 5273 return err ?: len; 5274} 5275static struct md_sysfs_entry md_suspend_hi = 5276__ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store); 5277 5278static ssize_t 5279reshape_position_show(struct mddev *mddev, char *page) 5280{ 5281 if (mddev->reshape_position != MaxSector) 5282 return sprintf(page, "%llu\n", 5283 (unsigned long long)mddev->reshape_position); 5284 strcpy(page, "none\n"); 5285 return 5; 5286} 5287 5288static ssize_t 5289reshape_position_store(struct mddev *mddev, const char *buf, size_t len) 5290{ 5291 struct md_rdev *rdev; 5292 unsigned long long new; 5293 int err; 5294 5295 err = kstrtoull(buf, 10, &new); 5296 if (err < 0) 5297 return err; 5298 if (new != (sector_t)new) 5299 return -EINVAL; 5300 err = mddev_lock(mddev); 5301 if (err) 5302 return err; 5303 err = -EBUSY; 5304 if (mddev->pers) 5305 goto unlock; 5306 mddev->reshape_position = new; 5307 mddev->delta_disks = 0; 5308 mddev->reshape_backwards = 0; 5309 mddev->new_level = mddev->level; 5310 mddev->new_layout = mddev->layout; 5311 mddev->new_chunk_sectors = mddev->chunk_sectors; 5312 rdev_for_each(rdev, mddev) 5313 rdev->new_data_offset = rdev->data_offset; 5314 err = 0; 5315unlock: 5316 mddev_unlock(mddev); 5317 return err ?: len; 5318} 5319 5320static struct md_sysfs_entry md_reshape_position = 5321__ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show, 5322 reshape_position_store); 5323 5324static ssize_t 5325reshape_direction_show(struct mddev *mddev, char *page) 5326{ 5327 return sprintf(page, "%s\n", 5328 mddev->reshape_backwards ? "backwards" : "forwards"); 5329} 5330 5331static ssize_t 5332reshape_direction_store(struct mddev *mddev, const char *buf, size_t len) 5333{ 5334 int backwards = 0; 5335 int err; 5336 5337 if (cmd_match(buf, "forwards")) 5338 backwards = 0; 5339 else if (cmd_match(buf, "backwards")) 5340 backwards = 1; 5341 else 5342 return -EINVAL; 5343 if (mddev->reshape_backwards == backwards) 5344 return len; 5345 5346 err = mddev_lock(mddev); 5347 if (err) 5348 return err; 5349 /* check if we are allowed to change */ 5350 if (mddev->delta_disks) 5351 err = -EBUSY; 5352 else if (mddev->persistent && 5353 mddev->major_version == 0) 5354 err = -EINVAL; 5355 else 5356 mddev->reshape_backwards = backwards; 5357 mddev_unlock(mddev); 5358 return err ?: len; 5359} 5360 5361static struct md_sysfs_entry md_reshape_direction = 5362__ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show, 5363 reshape_direction_store); 5364 5365static ssize_t 5366array_size_show(struct mddev *mddev, char *page) 5367{ 5368 if (mddev->external_size) 5369 return sprintf(page, "%llu\n", 5370 (unsigned long long)mddev->array_sectors/2); 5371 else 5372 return sprintf(page, "default\n"); 5373} 5374 5375static ssize_t 5376array_size_store(struct mddev *mddev, const char *buf, size_t len) 5377{ 5378 sector_t sectors; 5379 int err; 5380 5381 err = mddev_lock(mddev); 5382 if (err) 5383 return err; 5384 5385 /* cluster raid doesn't support change array_sectors */ 5386 if (mddev_is_clustered(mddev)) { 5387 mddev_unlock(mddev); 5388 return -EINVAL; 5389 } 5390 5391 if (strncmp(buf, "default", 7) == 0) { 5392 if (mddev->pers) 5393 sectors = mddev->pers->size(mddev, 0, 0); 5394 else 5395 sectors = mddev->array_sectors; 5396 5397 mddev->external_size = 0; 5398 } else { 5399 if (strict_blocks_to_sectors(buf, §ors) < 0) 5400 err = -EINVAL; 5401 else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors) 5402 err = -E2BIG; 5403 else 5404 mddev->external_size = 1; 5405 } 5406 5407 if (!err) { 5408 mddev->array_sectors = sectors; 5409 if (mddev->pers) { 5410 set_capacity(mddev->gendisk, mddev->array_sectors); 5411 revalidate_disk_size(mddev->gendisk, true); 5412 } 5413 } 5414 mddev_unlock(mddev); 5415 return err ?: len; 5416} 5417 5418static struct md_sysfs_entry md_array_size = 5419__ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show, 5420 array_size_store); 5421 5422static ssize_t 5423consistency_policy_show(struct mddev *mddev, char *page) 5424{ 5425 int ret; 5426 5427 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) { 5428 ret = sprintf(page, "journal\n"); 5429 } else if (test_bit(MD_HAS_PPL, &mddev->flags)) { 5430 ret = sprintf(page, "ppl\n"); 5431 } else if (mddev->bitmap) { 5432 ret = sprintf(page, "bitmap\n"); 5433 } else if (mddev->pers) { 5434 if (mddev->pers->sync_request) 5435 ret = sprintf(page, "resync\n"); 5436 else 5437 ret = sprintf(page, "none\n"); 5438 } else { 5439 ret = sprintf(page, "unknown\n"); 5440 } 5441 5442 return ret; 5443} 5444 5445static ssize_t 5446consistency_policy_store(struct mddev *mddev, const char *buf, size_t len) 5447{ 5448 int err = 0; 5449 5450 if (mddev->pers) { 5451 if (mddev->pers->change_consistency_policy) 5452 err = mddev->pers->change_consistency_policy(mddev, buf); 5453 else 5454 err = -EBUSY; 5455 } else if (mddev->external && strncmp(buf, "ppl", 3) == 0) { 5456 set_bit(MD_HAS_PPL, &mddev->flags); 5457 } else { 5458 err = -EINVAL; 5459 } 5460 5461 return err ? err : len; 5462} 5463 5464static struct md_sysfs_entry md_consistency_policy = 5465__ATTR(consistency_policy, S_IRUGO | S_IWUSR, consistency_policy_show, 5466 consistency_policy_store); 5467 5468static ssize_t fail_last_dev_show(struct mddev *mddev, char *page) 5469{ 5470 return sprintf(page, "%d\n", mddev->fail_last_dev); 5471} 5472 5473/* 5474 * Setting fail_last_dev to true to allow last device to be forcibly removed 5475 * from RAID1/RAID10. 5476 */ 5477static ssize_t 5478fail_last_dev_store(struct mddev *mddev, const char *buf, size_t len) 5479{ 5480 int ret; 5481 bool value; 5482 5483 ret = kstrtobool(buf, &value); 5484 if (ret) 5485 return ret; 5486 5487 if (value != mddev->fail_last_dev) 5488 mddev->fail_last_dev = value; 5489 5490 return len; 5491} 5492static struct md_sysfs_entry md_fail_last_dev = 5493__ATTR(fail_last_dev, S_IRUGO | S_IWUSR, fail_last_dev_show, 5494 fail_last_dev_store); 5495 5496static ssize_t serialize_policy_show(struct mddev *mddev, char *page) 5497{ 5498 if (mddev->pers == NULL || (mddev->pers->level != 1)) 5499 return sprintf(page, "n/a\n"); 5500 else 5501 return sprintf(page, "%d\n", mddev->serialize_policy); 5502} 5503 5504/* 5505 * Setting serialize_policy to true to enforce write IO is not reordered 5506 * for raid1. 5507 */ 5508static ssize_t 5509serialize_policy_store(struct mddev *mddev, const char *buf, size_t len) 5510{ 5511 int err; 5512 bool value; 5513 5514 err = kstrtobool(buf, &value); 5515 if (err) 5516 return err; 5517 5518 if (value == mddev->serialize_policy) 5519 return len; 5520 5521 err = mddev_lock(mddev); 5522 if (err) 5523 return err; 5524 if (mddev->pers == NULL || (mddev->pers->level != 1)) { 5525 pr_err("md: serialize_policy is only effective for raid1\n"); 5526 err = -EINVAL; 5527 goto unlock; 5528 } 5529 5530 mddev_suspend(mddev); 5531 if (value) 5532 mddev_create_serial_pool(mddev, NULL, true); 5533 else 5534 mddev_destroy_serial_pool(mddev, NULL, true); 5535 mddev->serialize_policy = value; 5536 mddev_resume(mddev); 5537unlock: 5538 mddev_unlock(mddev); 5539 return err ?: len; 5540} 5541 5542static struct md_sysfs_entry md_serialize_policy = 5543__ATTR(serialize_policy, S_IRUGO | S_IWUSR, serialize_policy_show, 5544 serialize_policy_store); 5545 5546 5547static struct attribute *md_default_attrs[] = { 5548 &md_level.attr, 5549 &md_layout.attr, 5550 &md_raid_disks.attr, 5551 &md_uuid.attr, 5552 &md_chunk_size.attr, 5553 &md_size.attr, 5554 &md_resync_start.attr, 5555 &md_metadata.attr, 5556 &md_new_device.attr, 5557 &md_safe_delay.attr, 5558 &md_array_state.attr, 5559 &md_reshape_position.attr, 5560 &md_reshape_direction.attr, 5561 &md_array_size.attr, 5562 &max_corr_read_errors.attr, 5563 &md_consistency_policy.attr, 5564 &md_fail_last_dev.attr, 5565 &md_serialize_policy.attr, 5566 NULL, 5567}; 5568 5569static struct attribute *md_redundancy_attrs[] = { 5570 &md_scan_mode.attr, 5571 &md_last_scan_mode.attr, 5572 &md_mismatches.attr, 5573 &md_sync_min.attr, 5574 &md_sync_max.attr, 5575 &md_sync_speed.attr, 5576 &md_sync_force_parallel.attr, 5577 &md_sync_completed.attr, 5578 &md_min_sync.attr, 5579 &md_max_sync.attr, 5580 &md_suspend_lo.attr, 5581 &md_suspend_hi.attr, 5582 &md_bitmap.attr, 5583 &md_degraded.attr, 5584 NULL, 5585}; 5586static struct attribute_group md_redundancy_group = { 5587 .name = NULL, 5588 .attrs = md_redundancy_attrs, 5589}; 5590 5591static ssize_t 5592md_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 5593{ 5594 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 5595 struct mddev *mddev = container_of(kobj, struct mddev, kobj); 5596 ssize_t rv; 5597 5598 if (!entry->show) 5599 return -EIO; 5600 spin_lock(&all_mddevs_lock); 5601 if (list_empty(&mddev->all_mddevs)) { 5602 spin_unlock(&all_mddevs_lock); 5603 return -EBUSY; 5604 } 5605 mddev_get(mddev); 5606 spin_unlock(&all_mddevs_lock); 5607 5608 rv = entry->show(mddev, page); 5609 mddev_put(mddev); 5610 return rv; 5611} 5612 5613static ssize_t 5614md_attr_store(struct kobject *kobj, struct attribute *attr, 5615 const char *page, size_t length) 5616{ 5617 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 5618 struct mddev *mddev = container_of(kobj, struct mddev, kobj); 5619 ssize_t rv; 5620 5621 if (!entry->store) 5622 return -EIO; 5623 if (!capable(CAP_SYS_ADMIN)) 5624 return -EACCES; 5625 spin_lock(&all_mddevs_lock); 5626 if (list_empty(&mddev->all_mddevs)) { 5627 spin_unlock(&all_mddevs_lock); 5628 return -EBUSY; 5629 } 5630 mddev_get(mddev); 5631 spin_unlock(&all_mddevs_lock); 5632 rv = entry->store(mddev, page, length); 5633 mddev_put(mddev); 5634 return rv; 5635} 5636 5637static void md_free(struct kobject *ko) 5638{ 5639 struct mddev *mddev = container_of(ko, struct mddev, kobj); 5640 5641 if (mddev->sysfs_state) 5642 sysfs_put(mddev->sysfs_state); 5643 if (mddev->sysfs_level) 5644 sysfs_put(mddev->sysfs_level); 5645 5646 if (mddev->gendisk) 5647 del_gendisk(mddev->gendisk); 5648 if (mddev->queue) 5649 blk_cleanup_queue(mddev->queue); 5650 if (mddev->gendisk) 5651 put_disk(mddev->gendisk); 5652 percpu_ref_exit(&mddev->writes_pending); 5653 5654 bioset_exit(&mddev->bio_set); 5655 bioset_exit(&mddev->sync_set); 5656 kfree(mddev); 5657} 5658 5659static const struct sysfs_ops md_sysfs_ops = { 5660 .show = md_attr_show, 5661 .store = md_attr_store, 5662}; 5663static struct kobj_type md_ktype = { 5664 .release = md_free, 5665 .sysfs_ops = &md_sysfs_ops, 5666 .default_attrs = md_default_attrs, 5667}; 5668 5669int mdp_major = 0; 5670 5671static void mddev_delayed_delete(struct work_struct *ws) 5672{ 5673 struct mddev *mddev = container_of(ws, struct mddev, del_work); 5674 5675 sysfs_remove_group(&mddev->kobj, &md_bitmap_group); 5676 kobject_del(&mddev->kobj); 5677 kobject_put(&mddev->kobj); 5678} 5679 5680static void no_op(struct percpu_ref *r) {} 5681 5682int mddev_init_writes_pending(struct mddev *mddev) 5683{ 5684 if (mddev->writes_pending.percpu_count_ptr) 5685 return 0; 5686 if (percpu_ref_init(&mddev->writes_pending, no_op, 5687 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL) < 0) 5688 return -ENOMEM; 5689 /* We want to start with the refcount at zero */ 5690 percpu_ref_put(&mddev->writes_pending); 5691 return 0; 5692} 5693EXPORT_SYMBOL_GPL(mddev_init_writes_pending); 5694 5695static int md_alloc(dev_t dev, char *name) 5696{ 5697 /* 5698 * If dev is zero, name is the name of a device to allocate with 5699 * an arbitrary minor number. It will be "md_???" 5700 * If dev is non-zero it must be a device number with a MAJOR of 5701 * MD_MAJOR or mdp_major. In this case, if "name" is NULL, then 5702 * the device is being created by opening a node in /dev. 5703 * If "name" is not NULL, the device is being created by 5704 * writing to /sys/module/md_mod/parameters/new_array. 5705 */ 5706 static DEFINE_MUTEX(disks_mutex); 5707 struct mddev *mddev = mddev_find_or_alloc(dev); 5708 struct gendisk *disk; 5709 int partitioned; 5710 int shift; 5711 int unit; 5712 int error; 5713 5714 if (!mddev) 5715 return -ENODEV; 5716 5717 partitioned = (MAJOR(mddev->unit) != MD_MAJOR); 5718 shift = partitioned ? MdpMinorShift : 0; 5719 unit = MINOR(mddev->unit) >> shift; 5720 5721 /* wait for any previous instance of this device to be 5722 * completely removed (mddev_delayed_delete). 5723 */ 5724 flush_workqueue(md_misc_wq); 5725 flush_workqueue(md_rdev_misc_wq); 5726 5727 mutex_lock(&disks_mutex); 5728 error = -EEXIST; 5729 if (mddev->gendisk) 5730 goto abort; 5731 5732 if (name && !dev) { 5733 /* Need to ensure that 'name' is not a duplicate. 5734 */ 5735 struct mddev *mddev2; 5736 spin_lock(&all_mddevs_lock); 5737 5738 list_for_each_entry(mddev2, &all_mddevs, all_mddevs) 5739 if (mddev2->gendisk && 5740 strcmp(mddev2->gendisk->disk_name, name) == 0) { 5741 spin_unlock(&all_mddevs_lock); 5742 goto abort; 5743 } 5744 spin_unlock(&all_mddevs_lock); 5745 } 5746 if (name && dev) 5747 /* 5748 * Creating /dev/mdNNN via "newarray", so adjust hold_active. 5749 */ 5750 mddev->hold_active = UNTIL_STOP; 5751 5752 error = -ENOMEM; 5753 mddev->queue = blk_alloc_queue(NUMA_NO_NODE); 5754 if (!mddev->queue) 5755 goto abort; 5756 5757 blk_set_stacking_limits(&mddev->queue->limits); 5758 5759 disk = alloc_disk(1 << shift); 5760 if (!disk) { 5761 blk_cleanup_queue(mddev->queue); 5762 mddev->queue = NULL; 5763 goto abort; 5764 } 5765 disk->major = MAJOR(mddev->unit); 5766 disk->first_minor = unit << shift; 5767 if (name) 5768 strcpy(disk->disk_name, name); 5769 else if (partitioned) 5770 sprintf(disk->disk_name, "md_d%d", unit); 5771 else 5772 sprintf(disk->disk_name, "md%d", unit); 5773 disk->fops = &md_fops; 5774 disk->private_data = mddev; 5775 disk->queue = mddev->queue; 5776 blk_queue_write_cache(mddev->queue, true, true); 5777 /* Allow extended partitions. This makes the 5778 * 'mdp' device redundant, but we can't really 5779 * remove it now. 5780 */ 5781 disk->flags |= GENHD_FL_EXT_DEVT; 5782 disk->events |= DISK_EVENT_MEDIA_CHANGE; 5783 mddev->gendisk = disk; 5784 add_disk(disk); 5785 5786 error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md"); 5787 if (error) { 5788 /* This isn't possible, but as kobject_init_and_add is marked 5789 * __must_check, we must do something with the result 5790 */ 5791 pr_debug("md: cannot register %s/md - name in use\n", 5792 disk->disk_name); 5793 error = 0; 5794 } 5795 if (mddev->kobj.sd && 5796 sysfs_create_group(&mddev->kobj, &md_bitmap_group)) 5797 pr_debug("pointless warning\n"); 5798 abort: 5799 mutex_unlock(&disks_mutex); 5800 if (!error && mddev->kobj.sd) { 5801 kobject_uevent(&mddev->kobj, KOBJ_ADD); 5802 mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state"); 5803 mddev->sysfs_level = sysfs_get_dirent_safe(mddev->kobj.sd, "level"); 5804 } 5805 mddev_put(mddev); 5806 return error; 5807} 5808 5809static struct kobject *md_probe(dev_t dev, int *part, void *data) 5810{ 5811 if (create_on_open) 5812 md_alloc(dev, NULL); 5813 return NULL; 5814} 5815 5816static int add_named_array(const char *val, const struct kernel_param *kp) 5817{ 5818 /* 5819 * val must be "md_*" or "mdNNN". 5820 * For "md_*" we allocate an array with a large free minor number, and 5821 * set the name to val. val must not already be an active name. 5822 * For "mdNNN" we allocate an array with the minor number NNN 5823 * which must not already be in use. 5824 */ 5825 int len = strlen(val); 5826 char buf[DISK_NAME_LEN]; 5827 unsigned long devnum; 5828 5829 while (len && val[len-1] == '\n') 5830 len--; 5831 if (len >= DISK_NAME_LEN) 5832 return -E2BIG; 5833 strlcpy(buf, val, len+1); 5834 if (strncmp(buf, "md_", 3) == 0) 5835 return md_alloc(0, buf); 5836 if (strncmp(buf, "md", 2) == 0 && 5837 isdigit(buf[2]) && 5838 kstrtoul(buf+2, 10, &devnum) == 0 && 5839 devnum <= MINORMASK) 5840 return md_alloc(MKDEV(MD_MAJOR, devnum), NULL); 5841 5842 return -EINVAL; 5843} 5844 5845static void md_safemode_timeout(struct timer_list *t) 5846{ 5847 struct mddev *mddev = from_timer(mddev, t, safemode_timer); 5848 5849 mddev->safemode = 1; 5850 if (mddev->external) 5851 sysfs_notify_dirent_safe(mddev->sysfs_state); 5852 5853 md_wakeup_thread(mddev->thread); 5854} 5855 5856static int start_dirty_degraded; 5857 5858int md_run(struct mddev *mddev) 5859{ 5860 int err; 5861 struct md_rdev *rdev; 5862 struct md_personality *pers; 5863 5864 if (list_empty(&mddev->disks)) 5865 /* cannot run an array with no devices.. */ 5866 return -EINVAL; 5867 5868 if (mddev->pers) 5869 return -EBUSY; 5870 /* Cannot run until previous stop completes properly */ 5871 if (mddev->sysfs_active) 5872 return -EBUSY; 5873 5874 /* 5875 * Analyze all RAID superblock(s) 5876 */ 5877 if (!mddev->raid_disks) { 5878 if (!mddev->persistent) 5879 return -EINVAL; 5880 err = analyze_sbs(mddev); 5881 if (err) 5882 return -EINVAL; 5883 } 5884 5885 if (mddev->level != LEVEL_NONE) 5886 request_module("md-level-%d", mddev->level); 5887 else if (mddev->clevel[0]) 5888 request_module("md-%s", mddev->clevel); 5889 5890 /* 5891 * Drop all container device buffers, from now on 5892 * the only valid external interface is through the md 5893 * device. 5894 */ 5895 mddev->has_superblocks = false; 5896 rdev_for_each(rdev, mddev) { 5897 if (test_bit(Faulty, &rdev->flags)) 5898 continue; 5899 sync_blockdev(rdev->bdev); 5900 invalidate_bdev(rdev->bdev); 5901 if (mddev->ro != 1 && 5902 (bdev_read_only(rdev->bdev) || 5903 bdev_read_only(rdev->meta_bdev))) { 5904 mddev->ro = 1; 5905 if (mddev->gendisk) 5906 set_disk_ro(mddev->gendisk, 1); 5907 } 5908 5909 if (rdev->sb_page) 5910 mddev->has_superblocks = true; 5911 5912 /* perform some consistency tests on the device. 5913 * We don't want the data to overlap the metadata, 5914 * Internal Bitmap issues have been handled elsewhere. 5915 */ 5916 if (rdev->meta_bdev) { 5917 /* Nothing to check */; 5918 } else if (rdev->data_offset < rdev->sb_start) { 5919 if (mddev->dev_sectors && 5920 rdev->data_offset + mddev->dev_sectors 5921 > rdev->sb_start) { 5922 pr_warn("md: %s: data overlaps metadata\n", 5923 mdname(mddev)); 5924 return -EINVAL; 5925 } 5926 } else { 5927 if (rdev->sb_start + rdev->sb_size/512 5928 > rdev->data_offset) { 5929 pr_warn("md: %s: metadata overlaps data\n", 5930 mdname(mddev)); 5931 return -EINVAL; 5932 } 5933 } 5934 sysfs_notify_dirent_safe(rdev->sysfs_state); 5935 } 5936 5937 if (!bioset_initialized(&mddev->bio_set)) { 5938 err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); 5939 if (err) 5940 return err; 5941 } 5942 if (!bioset_initialized(&mddev->sync_set)) { 5943 err = bioset_init(&mddev->sync_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); 5944 if (err) 5945 return err; 5946 } 5947 5948 spin_lock(&pers_lock); 5949 pers = find_pers(mddev->level, mddev->clevel); 5950 if (!pers || !try_module_get(pers->owner)) { 5951 spin_unlock(&pers_lock); 5952 if (mddev->level != LEVEL_NONE) 5953 pr_warn("md: personality for level %d is not loaded!\n", 5954 mddev->level); 5955 else 5956 pr_warn("md: personality for level %s is not loaded!\n", 5957 mddev->clevel); 5958 err = -EINVAL; 5959 goto abort; 5960 } 5961 spin_unlock(&pers_lock); 5962 if (mddev->level != pers->level) { 5963 mddev->level = pers->level; 5964 mddev->new_level = pers->level; 5965 } 5966 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); 5967 5968 if (mddev->reshape_position != MaxSector && 5969 pers->start_reshape == NULL) { 5970 /* This personality cannot handle reshaping... */ 5971 module_put(pers->owner); 5972 err = -EINVAL; 5973 goto abort; 5974 } 5975 5976 if (pers->sync_request) { 5977 /* Warn if this is a potentially silly 5978 * configuration. 5979 */ 5980 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 5981 struct md_rdev *rdev2; 5982 int warned = 0; 5983 5984 rdev_for_each(rdev, mddev) 5985 rdev_for_each(rdev2, mddev) { 5986 if (rdev < rdev2 && 5987 rdev->bdev->bd_disk == 5988 rdev2->bdev->bd_disk) { 5989 pr_warn("%s: WARNING: %s appears to be on the same physical disk as %s.\n", 5990 mdname(mddev), 5991 bdevname(rdev->bdev,b), 5992 bdevname(rdev2->bdev,b2)); 5993 warned = 1; 5994 } 5995 } 5996 5997 if (warned) 5998 pr_warn("True protection against single-disk failure might be compromised.\n"); 5999 } 6000 6001 mddev->recovery = 0; 6002 /* may be over-ridden by personality */ 6003 mddev->resync_max_sectors = mddev->dev_sectors; 6004 6005 mddev->ok_start_degraded = start_dirty_degraded; 6006 6007 if (start_readonly && mddev->ro == 0) 6008 mddev->ro = 2; /* read-only, but switch on first write */ 6009 6010 err = pers->run(mddev); 6011 if (err) 6012 pr_warn("md: pers->run() failed ...\n"); 6013 else if (pers->size(mddev, 0, 0) < mddev->array_sectors) { 6014 WARN_ONCE(!mddev->external_size, 6015 "%s: default size too small, but 'external_size' not in effect?\n", 6016 __func__); 6017 pr_warn("md: invalid array_size %llu > default size %llu\n", 6018 (unsigned long long)mddev->array_sectors / 2, 6019 (unsigned long long)pers->size(mddev, 0, 0) / 2); 6020 err = -EINVAL; 6021 } 6022 if (err == 0 && pers->sync_request && 6023 (mddev->bitmap_info.file || mddev->bitmap_info.offset)) { 6024 struct bitmap *bitmap; 6025 6026 bitmap = md_bitmap_create(mddev, -1); 6027 if (IS_ERR(bitmap)) { 6028 err = PTR_ERR(bitmap); 6029 pr_warn("%s: failed to create bitmap (%d)\n", 6030 mdname(mddev), err); 6031 } else 6032 mddev->bitmap = bitmap; 6033 6034 } 6035 if (err) 6036 goto bitmap_abort; 6037 6038 if (mddev->bitmap_info.max_write_behind > 0) { 6039 bool create_pool = false; 6040 6041 rdev_for_each(rdev, mddev) { 6042 if (test_bit(WriteMostly, &rdev->flags) && 6043 rdev_init_serial(rdev)) 6044 create_pool = true; 6045 } 6046 if (create_pool && mddev->serial_info_pool == NULL) { 6047 mddev->serial_info_pool = 6048 mempool_create_kmalloc_pool(NR_SERIAL_INFOS, 6049 sizeof(struct serial_info)); 6050 if (!mddev->serial_info_pool) { 6051 err = -ENOMEM; 6052 goto bitmap_abort; 6053 } 6054 } 6055 } 6056 6057 if (mddev->queue) { 6058 bool nonrot = true; 6059 6060 rdev_for_each(rdev, mddev) { 6061 if (rdev->raid_disk >= 0 && 6062 !blk_queue_nonrot(bdev_get_queue(rdev->bdev))) { 6063 nonrot = false; 6064 break; 6065 } 6066 } 6067 if (mddev->degraded) 6068 nonrot = false; 6069 if (nonrot) 6070 blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue); 6071 else 6072 blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue); 6073 } 6074 if (pers->sync_request) { 6075 if (mddev->kobj.sd && 6076 sysfs_create_group(&mddev->kobj, &md_redundancy_group)) 6077 pr_warn("md: cannot register extra attributes for %s\n", 6078 mdname(mddev)); 6079 mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action"); 6080 mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed"); 6081 mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded"); 6082 } else if (mddev->ro == 2) /* auto-readonly not meaningful */ 6083 mddev->ro = 0; 6084 6085 atomic_set(&mddev->max_corr_read_errors, 6086 MD_DEFAULT_MAX_CORRECTED_READ_ERRORS); 6087 mddev->safemode = 0; 6088 if (mddev_is_clustered(mddev)) 6089 mddev->safemode_delay = 0; 6090 else 6091 mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY; 6092 mddev->in_sync = 1; 6093 smp_wmb(); 6094 spin_lock(&mddev->lock); 6095 mddev->pers = pers; 6096 spin_unlock(&mddev->lock); 6097 rdev_for_each(rdev, mddev) 6098 if (rdev->raid_disk >= 0) 6099 sysfs_link_rdev(mddev, rdev); /* failure here is OK */ 6100 6101 if (mddev->degraded && !mddev->ro) 6102 /* This ensures that recovering status is reported immediately 6103 * via sysfs - until a lack of spares is confirmed. 6104 */ 6105 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 6106 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6107 6108 if (mddev->sb_flags) 6109 md_update_sb(mddev, 0); 6110 6111 md_new_event(mddev); 6112 return 0; 6113 6114bitmap_abort: 6115 mddev_detach(mddev); 6116 if (mddev->private) 6117 pers->free(mddev, mddev->private); 6118 mddev->private = NULL; 6119 module_put(pers->owner); 6120 md_bitmap_destroy(mddev); 6121abort: 6122 bioset_exit(&mddev->bio_set); 6123 bioset_exit(&mddev->sync_set); 6124 return err; 6125} 6126EXPORT_SYMBOL_GPL(md_run); 6127 6128int do_md_run(struct mddev *mddev) 6129{ 6130 int err; 6131 6132 set_bit(MD_NOT_READY, &mddev->flags); 6133 err = md_run(mddev); 6134 if (err) 6135 goto out; 6136 err = md_bitmap_load(mddev); 6137 if (err) { 6138 md_bitmap_destroy(mddev); 6139 goto out; 6140 } 6141 6142 if (mddev_is_clustered(mddev)) 6143 md_allow_write(mddev); 6144 6145 /* run start up tasks that require md_thread */ 6146 md_start(mddev); 6147 6148 md_wakeup_thread(mddev->thread); 6149 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ 6150 6151 set_capacity(mddev->gendisk, mddev->array_sectors); 6152 revalidate_disk_size(mddev->gendisk, true); 6153 clear_bit(MD_NOT_READY, &mddev->flags); 6154 mddev->changed = 1; 6155 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); 6156 sysfs_notify_dirent_safe(mddev->sysfs_state); 6157 sysfs_notify_dirent_safe(mddev->sysfs_action); 6158 sysfs_notify_dirent_safe(mddev->sysfs_degraded); 6159out: 6160 clear_bit(MD_NOT_READY, &mddev->flags); 6161 return err; 6162} 6163 6164int md_start(struct mddev *mddev) 6165{ 6166 int ret = 0; 6167 6168 if (mddev->pers->start) { 6169 set_bit(MD_RECOVERY_WAIT, &mddev->recovery); 6170 md_wakeup_thread(mddev->thread); 6171 ret = mddev->pers->start(mddev); 6172 clear_bit(MD_RECOVERY_WAIT, &mddev->recovery); 6173 md_wakeup_thread(mddev->sync_thread); 6174 } 6175 return ret; 6176} 6177EXPORT_SYMBOL_GPL(md_start); 6178 6179static int restart_array(struct mddev *mddev) 6180{ 6181 struct gendisk *disk = mddev->gendisk; 6182 struct md_rdev *rdev; 6183 bool has_journal = false; 6184 bool has_readonly = false; 6185 6186 /* Complain if it has no devices */ 6187 if (list_empty(&mddev->disks)) 6188 return -ENXIO; 6189 if (!mddev->pers) 6190 return -EINVAL; 6191 if (!mddev->ro) 6192 return -EBUSY; 6193 6194 rcu_read_lock(); 6195 rdev_for_each_rcu(rdev, mddev) { 6196 if (test_bit(Journal, &rdev->flags) && 6197 !test_bit(Faulty, &rdev->flags)) 6198 has_journal = true; 6199 if (bdev_read_only(rdev->bdev)) 6200 has_readonly = true; 6201 } 6202 rcu_read_unlock(); 6203 if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && !has_journal) 6204 /* Don't restart rw with journal missing/faulty */ 6205 return -EINVAL; 6206 if (has_readonly) 6207 return -EROFS; 6208 6209 mddev->safemode = 0; 6210 mddev->ro = 0; 6211 set_disk_ro(disk, 0); 6212 pr_debug("md: %s switched to read-write mode.\n", mdname(mddev)); 6213 /* Kick recovery or resync if necessary */ 6214 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6215 md_wakeup_thread(mddev->thread); 6216 md_wakeup_thread(mddev->sync_thread); 6217 sysfs_notify_dirent_safe(mddev->sysfs_state); 6218 return 0; 6219} 6220 6221static void md_clean(struct mddev *mddev) 6222{ 6223 mddev->array_sectors = 0; 6224 mddev->external_size = 0; 6225 mddev->dev_sectors = 0; 6226 mddev->raid_disks = 0; 6227 mddev->recovery_cp = 0; 6228 mddev->resync_min = 0; 6229 mddev->resync_max = MaxSector; 6230 mddev->reshape_position = MaxSector; 6231 mddev->external = 0; 6232 mddev->persistent = 0; 6233 mddev->level = LEVEL_NONE; 6234 mddev->clevel[0] = 0; 6235 mddev->flags = 0; 6236 mddev->sb_flags = 0; 6237 mddev->ro = 0; 6238 mddev->metadata_type[0] = 0; 6239 mddev->chunk_sectors = 0; 6240 mddev->ctime = mddev->utime = 0; 6241 mddev->layout = 0; 6242 mddev->max_disks = 0; 6243 mddev->events = 0; 6244 mddev->can_decrease_events = 0; 6245 mddev->delta_disks = 0; 6246 mddev->reshape_backwards = 0; 6247 mddev->new_level = LEVEL_NONE; 6248 mddev->new_layout = 0; 6249 mddev->new_chunk_sectors = 0; 6250 mddev->curr_resync = 0; 6251 atomic64_set(&mddev->resync_mismatches, 0); 6252 mddev->suspend_lo = mddev->suspend_hi = 0; 6253 mddev->sync_speed_min = mddev->sync_speed_max = 0; 6254 mddev->recovery = 0; 6255 mddev->in_sync = 0; 6256 mddev->changed = 0; 6257 mddev->degraded = 0; 6258 mddev->safemode = 0; 6259 mddev->private = NULL; 6260 mddev->cluster_info = NULL; 6261 mddev->bitmap_info.offset = 0; 6262 mddev->bitmap_info.default_offset = 0; 6263 mddev->bitmap_info.default_space = 0; 6264 mddev->bitmap_info.chunksize = 0; 6265 mddev->bitmap_info.daemon_sleep = 0; 6266 mddev->bitmap_info.max_write_behind = 0; 6267 mddev->bitmap_info.nodes = 0; 6268} 6269 6270static void __md_stop_writes(struct mddev *mddev) 6271{ 6272 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 6273 if (work_pending(&mddev->del_work)) 6274 flush_workqueue(md_misc_wq); 6275 if (mddev->sync_thread) { 6276 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 6277 md_reap_sync_thread(mddev); 6278 } 6279 6280 del_timer_sync(&mddev->safemode_timer); 6281 6282 if (mddev->pers && mddev->pers->quiesce) { 6283 mddev->pers->quiesce(mddev, 1); 6284 mddev->pers->quiesce(mddev, 0); 6285 } 6286 md_bitmap_flush(mddev); 6287 6288 if (mddev->ro == 0 && 6289 ((!mddev->in_sync && !mddev_is_clustered(mddev)) || 6290 mddev->sb_flags)) { 6291 /* mark array as shutdown cleanly */ 6292 if (!mddev_is_clustered(mddev)) 6293 mddev->in_sync = 1; 6294 md_update_sb(mddev, 1); 6295 } 6296 /* disable policy to guarantee rdevs free resources for serialization */ 6297 mddev->serialize_policy = 0; 6298 mddev_destroy_serial_pool(mddev, NULL, true); 6299} 6300 6301void md_stop_writes(struct mddev *mddev) 6302{ 6303 mddev_lock_nointr(mddev); 6304 __md_stop_writes(mddev); 6305 mddev_unlock(mddev); 6306} 6307EXPORT_SYMBOL_GPL(md_stop_writes); 6308 6309static void mddev_detach(struct mddev *mddev) 6310{ 6311 md_bitmap_wait_behind_writes(mddev); 6312 if (mddev->pers && mddev->pers->quiesce && !mddev->suspended) { 6313 mddev->pers->quiesce(mddev, 1); 6314 mddev->pers->quiesce(mddev, 0); 6315 } 6316 md_unregister_thread(&mddev->thread); 6317 if (mddev->queue) 6318 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 6319} 6320 6321static void __md_stop(struct mddev *mddev) 6322{ 6323 struct md_personality *pers = mddev->pers; 6324 md_bitmap_destroy(mddev); 6325 mddev_detach(mddev); 6326 /* Ensure ->event_work is done */ 6327 if (mddev->event_work.func) 6328 flush_workqueue(md_misc_wq); 6329 spin_lock(&mddev->lock); 6330 mddev->pers = NULL; 6331 spin_unlock(&mddev->lock); 6332 pers->free(mddev, mddev->private); 6333 mddev->private = NULL; 6334 if (pers->sync_request && mddev->to_remove == NULL) 6335 mddev->to_remove = &md_redundancy_group; 6336 module_put(pers->owner); 6337 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 6338} 6339 6340void md_stop(struct mddev *mddev) 6341{ 6342 lockdep_assert_held(&mddev->reconfig_mutex); 6343 6344 /* stop the array and free an attached data structures. 6345 * This is called from dm-raid 6346 */ 6347 __md_stop_writes(mddev); 6348 __md_stop(mddev); 6349 bioset_exit(&mddev->bio_set); 6350 bioset_exit(&mddev->sync_set); 6351} 6352 6353EXPORT_SYMBOL_GPL(md_stop); 6354 6355static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) 6356{ 6357 int err = 0; 6358 int did_freeze = 0; 6359 6360 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { 6361 did_freeze = 1; 6362 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 6363 md_wakeup_thread(mddev->thread); 6364 } 6365 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 6366 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 6367 if (mddev->sync_thread) 6368 /* Thread might be blocked waiting for metadata update 6369 * which will now never happen */ 6370 wake_up_process(mddev->sync_thread->tsk); 6371 6372 if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) 6373 return -EBUSY; 6374 mddev_unlock(mddev); 6375 wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING, 6376 &mddev->recovery)); 6377 wait_event(mddev->sb_wait, 6378 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); 6379 mddev_lock_nointr(mddev); 6380 6381 mutex_lock(&mddev->open_mutex); 6382 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) || 6383 mddev->sync_thread || 6384 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { 6385 pr_warn("md: %s still in use.\n",mdname(mddev)); 6386 if (did_freeze) { 6387 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 6388 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6389 md_wakeup_thread(mddev->thread); 6390 } 6391 err = -EBUSY; 6392 goto out; 6393 } 6394 if (mddev->pers) { 6395 __md_stop_writes(mddev); 6396 6397 err = -ENXIO; 6398 if (mddev->ro==1) 6399 goto out; 6400 mddev->ro = 1; 6401 set_disk_ro(mddev->gendisk, 1); 6402 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 6403 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6404 md_wakeup_thread(mddev->thread); 6405 sysfs_notify_dirent_safe(mddev->sysfs_state); 6406 err = 0; 6407 } 6408out: 6409 mutex_unlock(&mddev->open_mutex); 6410 return err; 6411} 6412 6413/* mode: 6414 * 0 - completely stop and dis-assemble array 6415 * 2 - stop but do not disassemble array 6416 */ 6417static int do_md_stop(struct mddev *mddev, int mode, 6418 struct block_device *bdev) 6419{ 6420 struct gendisk *disk = mddev->gendisk; 6421 struct md_rdev *rdev; 6422 int did_freeze = 0; 6423 6424 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { 6425 did_freeze = 1; 6426 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 6427 md_wakeup_thread(mddev->thread); 6428 } 6429 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 6430 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 6431 if (mddev->sync_thread) 6432 /* Thread might be blocked waiting for metadata update 6433 * which will now never happen */ 6434 wake_up_process(mddev->sync_thread->tsk); 6435 6436 mddev_unlock(mddev); 6437 wait_event(resync_wait, (mddev->sync_thread == NULL && 6438 !test_bit(MD_RECOVERY_RUNNING, 6439 &mddev->recovery))); 6440 mddev_lock_nointr(mddev); 6441 6442 mutex_lock(&mddev->open_mutex); 6443 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) || 6444 mddev->sysfs_active || 6445 mddev->sync_thread || 6446 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { 6447 pr_warn("md: %s still in use.\n",mdname(mddev)); 6448 mutex_unlock(&mddev->open_mutex); 6449 if (did_freeze) { 6450 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 6451 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6452 md_wakeup_thread(mddev->thread); 6453 } 6454 return -EBUSY; 6455 } 6456 if (mddev->pers) { 6457 if (mddev->ro) 6458 set_disk_ro(disk, 0); 6459 6460 __md_stop_writes(mddev); 6461 __md_stop(mddev); 6462 6463 /* tell userspace to handle 'inactive' */ 6464 sysfs_notify_dirent_safe(mddev->sysfs_state); 6465 6466 rdev_for_each(rdev, mddev) 6467 if (rdev->raid_disk >= 0) 6468 sysfs_unlink_rdev(mddev, rdev); 6469 6470 set_capacity(disk, 0); 6471 mutex_unlock(&mddev->open_mutex); 6472 mddev->changed = 1; 6473 revalidate_disk_size(disk, true); 6474 6475 if (mddev->ro) 6476 mddev->ro = 0; 6477 } else 6478 mutex_unlock(&mddev->open_mutex); 6479 /* 6480 * Free resources if final stop 6481 */ 6482 if (mode == 0) { 6483 pr_info("md: %s stopped.\n", mdname(mddev)); 6484 6485 if (mddev->bitmap_info.file) { 6486 struct file *f = mddev->bitmap_info.file; 6487 spin_lock(&mddev->lock); 6488 mddev->bitmap_info.file = NULL; 6489 spin_unlock(&mddev->lock); 6490 fput(f); 6491 } 6492 mddev->bitmap_info.offset = 0; 6493 6494 export_array(mddev); 6495 6496 md_clean(mddev); 6497 if (mddev->hold_active == UNTIL_STOP) 6498 mddev->hold_active = 0; 6499 } 6500 md_new_event(mddev); 6501 sysfs_notify_dirent_safe(mddev->sysfs_state); 6502 return 0; 6503} 6504 6505#ifndef MODULE 6506static void autorun_array(struct mddev *mddev) 6507{ 6508 struct md_rdev *rdev; 6509 int err; 6510 6511 if (list_empty(&mddev->disks)) 6512 return; 6513 6514 pr_info("md: running: "); 6515 6516 rdev_for_each(rdev, mddev) { 6517 char b[BDEVNAME_SIZE]; 6518 pr_cont("<%s>", bdevname(rdev->bdev,b)); 6519 } 6520 pr_cont("\n"); 6521 6522 err = do_md_run(mddev); 6523 if (err) { 6524 pr_warn("md: do_md_run() returned %d\n", err); 6525 do_md_stop(mddev, 0, NULL); 6526 } 6527} 6528 6529/* 6530 * lets try to run arrays based on all disks that have arrived 6531 * until now. (those are in pending_raid_disks) 6532 * 6533 * the method: pick the first pending disk, collect all disks with 6534 * the same UUID, remove all from the pending list and put them into 6535 * the 'same_array' list. Then order this list based on superblock 6536 * update time (freshest comes first), kick out 'old' disks and 6537 * compare superblocks. If everything's fine then run it. 6538 * 6539 * If "unit" is allocated, then bump its reference count 6540 */ 6541static void autorun_devices(int part) 6542{ 6543 struct md_rdev *rdev0, *rdev, *tmp; 6544 struct mddev *mddev; 6545 char b[BDEVNAME_SIZE]; 6546 6547 pr_info("md: autorun ...\n"); 6548 while (!list_empty(&pending_raid_disks)) { 6549 int unit; 6550 dev_t dev; 6551 LIST_HEAD(candidates); 6552 rdev0 = list_entry(pending_raid_disks.next, 6553 struct md_rdev, same_set); 6554 6555 pr_debug("md: considering %s ...\n", bdevname(rdev0->bdev,b)); 6556 INIT_LIST_HEAD(&candidates); 6557 rdev_for_each_list(rdev, tmp, &pending_raid_disks) 6558 if (super_90_load(rdev, rdev0, 0) >= 0) { 6559 pr_debug("md: adding %s ...\n", 6560 bdevname(rdev->bdev,b)); 6561 list_move(&rdev->same_set, &candidates); 6562 } 6563 /* 6564 * now we have a set of devices, with all of them having 6565 * mostly sane superblocks. It's time to allocate the 6566 * mddev. 6567 */ 6568 if (part) { 6569 dev = MKDEV(mdp_major, 6570 rdev0->preferred_minor << MdpMinorShift); 6571 unit = MINOR(dev) >> MdpMinorShift; 6572 } else { 6573 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor); 6574 unit = MINOR(dev); 6575 } 6576 if (rdev0->preferred_minor != unit) { 6577 pr_warn("md: unit number in %s is bad: %d\n", 6578 bdevname(rdev0->bdev, b), rdev0->preferred_minor); 6579 break; 6580 } 6581 6582 md_probe(dev, NULL, NULL); 6583 mddev = mddev_find(dev); 6584 if (!mddev) 6585 break; 6586 6587 if (mddev_lock(mddev)) 6588 pr_warn("md: %s locked, cannot run\n", mdname(mddev)); 6589 else if (mddev->raid_disks || mddev->major_version 6590 || !list_empty(&mddev->disks)) { 6591 pr_warn("md: %s already running, cannot run %s\n", 6592 mdname(mddev), bdevname(rdev0->bdev,b)); 6593 mddev_unlock(mddev); 6594 } else { 6595 pr_debug("md: created %s\n", mdname(mddev)); 6596 mddev->persistent = 1; 6597 rdev_for_each_list(rdev, tmp, &candidates) { 6598 list_del_init(&rdev->same_set); 6599 if (bind_rdev_to_array(rdev, mddev)) 6600 export_rdev(rdev); 6601 } 6602 autorun_array(mddev); 6603 mddev_unlock(mddev); 6604 } 6605 /* on success, candidates will be empty, on error 6606 * it won't... 6607 */ 6608 rdev_for_each_list(rdev, tmp, &candidates) { 6609 list_del_init(&rdev->same_set); 6610 export_rdev(rdev); 6611 } 6612 mddev_put(mddev); 6613 } 6614 pr_info("md: ... autorun DONE.\n"); 6615} 6616#endif /* !MODULE */ 6617 6618static int get_version(void __user *arg) 6619{ 6620 mdu_version_t ver; 6621 6622 ver.major = MD_MAJOR_VERSION; 6623 ver.minor = MD_MINOR_VERSION; 6624 ver.patchlevel = MD_PATCHLEVEL_VERSION; 6625 6626 if (copy_to_user(arg, &ver, sizeof(ver))) 6627 return -EFAULT; 6628 6629 return 0; 6630} 6631 6632static int get_array_info(struct mddev *mddev, void __user *arg) 6633{ 6634 mdu_array_info_t info; 6635 int nr,working,insync,failed,spare; 6636 struct md_rdev *rdev; 6637 6638 nr = working = insync = failed = spare = 0; 6639 rcu_read_lock(); 6640 rdev_for_each_rcu(rdev, mddev) { 6641 nr++; 6642 if (test_bit(Faulty, &rdev->flags)) 6643 failed++; 6644 else { 6645 working++; 6646 if (test_bit(In_sync, &rdev->flags)) 6647 insync++; 6648 else if (test_bit(Journal, &rdev->flags)) 6649 /* TODO: add journal count to md_u.h */ 6650 ; 6651 else 6652 spare++; 6653 } 6654 } 6655 rcu_read_unlock(); 6656 6657 info.major_version = mddev->major_version; 6658 info.minor_version = mddev->minor_version; 6659 info.patch_version = MD_PATCHLEVEL_VERSION; 6660 info.ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX); 6661 info.level = mddev->level; 6662 info.size = mddev->dev_sectors / 2; 6663 if (info.size != mddev->dev_sectors / 2) /* overflow */ 6664 info.size = -1; 6665 info.nr_disks = nr; 6666 info.raid_disks = mddev->raid_disks; 6667 info.md_minor = mddev->md_minor; 6668 info.not_persistent= !mddev->persistent; 6669 6670 info.utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX); 6671 info.state = 0; 6672 if (mddev->in_sync) 6673 info.state = (1<<MD_SB_CLEAN); 6674 if (mddev->bitmap && mddev->bitmap_info.offset) 6675 info.state |= (1<<MD_SB_BITMAP_PRESENT); 6676 if (mddev_is_clustered(mddev)) 6677 info.state |= (1<<MD_SB_CLUSTERED); 6678 info.active_disks = insync; 6679 info.working_disks = working; 6680 info.failed_disks = failed; 6681 info.spare_disks = spare; 6682 6683 info.layout = mddev->layout; 6684 info.chunk_size = mddev->chunk_sectors << 9; 6685 6686 if (copy_to_user(arg, &info, sizeof(info))) 6687 return -EFAULT; 6688 6689 return 0; 6690} 6691 6692static int get_bitmap_file(struct mddev *mddev, void __user * arg) 6693{ 6694 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */ 6695 char *ptr; 6696 int err; 6697 6698 file = kzalloc(sizeof(*file), GFP_NOIO); 6699 if (!file) 6700 return -ENOMEM; 6701 6702 err = 0; 6703 spin_lock(&mddev->lock); 6704 /* bitmap enabled */ 6705 if (mddev->bitmap_info.file) { 6706 ptr = file_path(mddev->bitmap_info.file, file->pathname, 6707 sizeof(file->pathname)); 6708 if (IS_ERR(ptr)) 6709 err = PTR_ERR(ptr); 6710 else 6711 memmove(file->pathname, ptr, 6712 sizeof(file->pathname)-(ptr-file->pathname)); 6713 } 6714 spin_unlock(&mddev->lock); 6715 6716 if (err == 0 && 6717 copy_to_user(arg, file, sizeof(*file))) 6718 err = -EFAULT; 6719 6720 kfree(file); 6721 return err; 6722} 6723 6724static int get_disk_info(struct mddev *mddev, void __user * arg) 6725{ 6726 mdu_disk_info_t info; 6727 struct md_rdev *rdev; 6728 6729 if (copy_from_user(&info, arg, sizeof(info))) 6730 return -EFAULT; 6731 6732 rcu_read_lock(); 6733 rdev = md_find_rdev_nr_rcu(mddev, info.number); 6734 if (rdev) { 6735 info.major = MAJOR(rdev->bdev->bd_dev); 6736 info.minor = MINOR(rdev->bdev->bd_dev); 6737 info.raid_disk = rdev->raid_disk; 6738 info.state = 0; 6739 if (test_bit(Faulty, &rdev->flags)) 6740 info.state |= (1<<MD_DISK_FAULTY); 6741 else if (test_bit(In_sync, &rdev->flags)) { 6742 info.state |= (1<<MD_DISK_ACTIVE); 6743 info.state |= (1<<MD_DISK_SYNC); 6744 } 6745 if (test_bit(Journal, &rdev->flags)) 6746 info.state |= (1<<MD_DISK_JOURNAL); 6747 if (test_bit(WriteMostly, &rdev->flags)) 6748 info.state |= (1<<MD_DISK_WRITEMOSTLY); 6749 if (test_bit(FailFast, &rdev->flags)) 6750 info.state |= (1<<MD_DISK_FAILFAST); 6751 } else { 6752 info.major = info.minor = 0; 6753 info.raid_disk = -1; 6754 info.state = (1<<MD_DISK_REMOVED); 6755 } 6756 rcu_read_unlock(); 6757 6758 if (copy_to_user(arg, &info, sizeof(info))) 6759 return -EFAULT; 6760 6761 return 0; 6762} 6763 6764int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info) 6765{ 6766 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 6767 struct md_rdev *rdev; 6768 dev_t dev = MKDEV(info->major,info->minor); 6769 6770 if (mddev_is_clustered(mddev) && 6771 !(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) { 6772 pr_warn("%s: Cannot add to clustered mddev.\n", 6773 mdname(mddev)); 6774 return -EINVAL; 6775 } 6776 6777 if (info->major != MAJOR(dev) || info->minor != MINOR(dev)) 6778 return -EOVERFLOW; 6779 6780 if (!mddev->raid_disks) { 6781 int err; 6782 /* expecting a device which has a superblock */ 6783 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version); 6784 if (IS_ERR(rdev)) { 6785 pr_warn("md: md_import_device returned %ld\n", 6786 PTR_ERR(rdev)); 6787 return PTR_ERR(rdev); 6788 } 6789 if (!list_empty(&mddev->disks)) { 6790 struct md_rdev *rdev0 6791 = list_entry(mddev->disks.next, 6792 struct md_rdev, same_set); 6793 err = super_types[mddev->major_version] 6794 .load_super(rdev, rdev0, mddev->minor_version); 6795 if (err < 0) { 6796 pr_warn("md: %s has different UUID to %s\n", 6797 bdevname(rdev->bdev,b), 6798 bdevname(rdev0->bdev,b2)); 6799 export_rdev(rdev); 6800 return -EINVAL; 6801 } 6802 } 6803 err = bind_rdev_to_array(rdev, mddev); 6804 if (err) 6805 export_rdev(rdev); 6806 return err; 6807 } 6808 6809 /* 6810 * md_add_new_disk can be used once the array is assembled 6811 * to add "hot spares". They must already have a superblock 6812 * written 6813 */ 6814 if (mddev->pers) { 6815 int err; 6816 if (!mddev->pers->hot_add_disk) { 6817 pr_warn("%s: personality does not support diskops!\n", 6818 mdname(mddev)); 6819 return -EINVAL; 6820 } 6821 if (mddev->persistent) 6822 rdev = md_import_device(dev, mddev->major_version, 6823 mddev->minor_version); 6824 else 6825 rdev = md_import_device(dev, -1, -1); 6826 if (IS_ERR(rdev)) { 6827 pr_warn("md: md_import_device returned %ld\n", 6828 PTR_ERR(rdev)); 6829 return PTR_ERR(rdev); 6830 } 6831 /* set saved_raid_disk if appropriate */ 6832 if (!mddev->persistent) { 6833 if (info->state & (1<<MD_DISK_SYNC) && 6834 info->raid_disk < mddev->raid_disks) { 6835 rdev->raid_disk = info->raid_disk; 6836 set_bit(In_sync, &rdev->flags); 6837 clear_bit(Bitmap_sync, &rdev->flags); 6838 } else 6839 rdev->raid_disk = -1; 6840 rdev->saved_raid_disk = rdev->raid_disk; 6841 } else 6842 super_types[mddev->major_version]. 6843 validate_super(mddev, NULL/*freshest*/, rdev); 6844 if ((info->state & (1<<MD_DISK_SYNC)) && 6845 rdev->raid_disk != info->raid_disk) { 6846 /* This was a hot-add request, but events doesn't 6847 * match, so reject it. 6848 */ 6849 export_rdev(rdev); 6850 return -EINVAL; 6851 } 6852 6853 clear_bit(In_sync, &rdev->flags); /* just to be sure */ 6854 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 6855 set_bit(WriteMostly, &rdev->flags); 6856 else 6857 clear_bit(WriteMostly, &rdev->flags); 6858 if (info->state & (1<<MD_DISK_FAILFAST)) 6859 set_bit(FailFast, &rdev->flags); 6860 else 6861 clear_bit(FailFast, &rdev->flags); 6862 6863 if (info->state & (1<<MD_DISK_JOURNAL)) { 6864 struct md_rdev *rdev2; 6865 bool has_journal = false; 6866 6867 /* make sure no existing journal disk */ 6868 rdev_for_each(rdev2, mddev) { 6869 if (test_bit(Journal, &rdev2->flags)) { 6870 has_journal = true; 6871 break; 6872 } 6873 } 6874 if (has_journal || mddev->bitmap) { 6875 export_rdev(rdev); 6876 return -EBUSY; 6877 } 6878 set_bit(Journal, &rdev->flags); 6879 } 6880 /* 6881 * check whether the device shows up in other nodes 6882 */ 6883 if (mddev_is_clustered(mddev)) { 6884 if (info->state & (1 << MD_DISK_CANDIDATE)) 6885 set_bit(Candidate, &rdev->flags); 6886 else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) { 6887 /* --add initiated by this node */ 6888 err = md_cluster_ops->add_new_disk(mddev, rdev); 6889 if (err) { 6890 export_rdev(rdev); 6891 return err; 6892 } 6893 } 6894 } 6895 6896 rdev->raid_disk = -1; 6897 err = bind_rdev_to_array(rdev, mddev); 6898 6899 if (err) 6900 export_rdev(rdev); 6901 6902 if (mddev_is_clustered(mddev)) { 6903 if (info->state & (1 << MD_DISK_CANDIDATE)) { 6904 if (!err) { 6905 err = md_cluster_ops->new_disk_ack(mddev, 6906 err == 0); 6907 if (err) 6908 md_kick_rdev_from_array(rdev); 6909 } 6910 } else { 6911 if (err) 6912 md_cluster_ops->add_new_disk_cancel(mddev); 6913 else 6914 err = add_bound_rdev(rdev); 6915 } 6916 6917 } else if (!err) 6918 err = add_bound_rdev(rdev); 6919 6920 return err; 6921 } 6922 6923 /* otherwise, md_add_new_disk is only allowed 6924 * for major_version==0 superblocks 6925 */ 6926 if (mddev->major_version != 0) { 6927 pr_warn("%s: ADD_NEW_DISK not supported\n", mdname(mddev)); 6928 return -EINVAL; 6929 } 6930 6931 if (!(info->state & (1<<MD_DISK_FAULTY))) { 6932 int err; 6933 rdev = md_import_device(dev, -1, 0); 6934 if (IS_ERR(rdev)) { 6935 pr_warn("md: error, md_import_device() returned %ld\n", 6936 PTR_ERR(rdev)); 6937 return PTR_ERR(rdev); 6938 } 6939 rdev->desc_nr = info->number; 6940 if (info->raid_disk < mddev->raid_disks) 6941 rdev->raid_disk = info->raid_disk; 6942 else 6943 rdev->raid_disk = -1; 6944 6945 if (rdev->raid_disk < mddev->raid_disks) 6946 if (info->state & (1<<MD_DISK_SYNC)) 6947 set_bit(In_sync, &rdev->flags); 6948 6949 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 6950 set_bit(WriteMostly, &rdev->flags); 6951 if (info->state & (1<<MD_DISK_FAILFAST)) 6952 set_bit(FailFast, &rdev->flags); 6953 6954 if (!mddev->persistent) { 6955 pr_debug("md: nonpersistent superblock ...\n"); 6956 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512; 6957 } else 6958 rdev->sb_start = calc_dev_sboffset(rdev); 6959 rdev->sectors = rdev->sb_start; 6960 6961 err = bind_rdev_to_array(rdev, mddev); 6962 if (err) { 6963 export_rdev(rdev); 6964 return err; 6965 } 6966 } 6967 6968 return 0; 6969} 6970 6971static int hot_remove_disk(struct mddev *mddev, dev_t dev) 6972{ 6973 char b[BDEVNAME_SIZE]; 6974 struct md_rdev *rdev; 6975 6976 if (!mddev->pers) 6977 return -ENODEV; 6978 6979 rdev = find_rdev(mddev, dev); 6980 if (!rdev) 6981 return -ENXIO; 6982 6983 if (rdev->raid_disk < 0) 6984 goto kick_rdev; 6985 6986 clear_bit(Blocked, &rdev->flags); 6987 remove_and_add_spares(mddev, rdev); 6988 6989 if (rdev->raid_disk >= 0) 6990 goto busy; 6991 6992kick_rdev: 6993 if (mddev_is_clustered(mddev)) { 6994 if (md_cluster_ops->remove_disk(mddev, rdev)) 6995 goto busy; 6996 } 6997 6998 md_kick_rdev_from_array(rdev); 6999 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 7000 if (mddev->thread) 7001 md_wakeup_thread(mddev->thread); 7002 else 7003 md_update_sb(mddev, 1); 7004 md_new_event(mddev); 7005 7006 return 0; 7007busy: 7008 pr_debug("md: cannot remove active disk %s from %s ...\n", 7009 bdevname(rdev->bdev,b), mdname(mddev)); 7010 return -EBUSY; 7011} 7012 7013static int hot_add_disk(struct mddev *mddev, dev_t dev) 7014{ 7015 char b[BDEVNAME_SIZE]; 7016 int err; 7017 struct md_rdev *rdev; 7018 7019 if (!mddev->pers) 7020 return -ENODEV; 7021 7022 if (mddev->major_version != 0) { 7023 pr_warn("%s: HOT_ADD may only be used with version-0 superblocks.\n", 7024 mdname(mddev)); 7025 return -EINVAL; 7026 } 7027 if (!mddev->pers->hot_add_disk) { 7028 pr_warn("%s: personality does not support diskops!\n", 7029 mdname(mddev)); 7030 return -EINVAL; 7031 } 7032 7033 rdev = md_import_device(dev, -1, 0); 7034 if (IS_ERR(rdev)) { 7035 pr_warn("md: error, md_import_device() returned %ld\n", 7036 PTR_ERR(rdev)); 7037 return -EINVAL; 7038 } 7039 7040 if (mddev->persistent) 7041 rdev->sb_start = calc_dev_sboffset(rdev); 7042 else 7043 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512; 7044 7045 rdev->sectors = rdev->sb_start; 7046 7047 if (test_bit(Faulty, &rdev->flags)) { 7048 pr_warn("md: can not hot-add faulty %s disk to %s!\n", 7049 bdevname(rdev->bdev,b), mdname(mddev)); 7050 err = -EINVAL; 7051 goto abort_export; 7052 } 7053 7054 clear_bit(In_sync, &rdev->flags); 7055 rdev->desc_nr = -1; 7056 rdev->saved_raid_disk = -1; 7057 err = bind_rdev_to_array(rdev, mddev); 7058 if (err) 7059 goto abort_export; 7060 7061 /* 7062 * The rest should better be atomic, we can have disk failures 7063 * noticed in interrupt contexts ... 7064 */ 7065 7066 rdev->raid_disk = -1; 7067 7068 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 7069 if (!mddev->thread) 7070 md_update_sb(mddev, 1); 7071 /* 7072 * Kick recovery, maybe this spare has to be added to the 7073 * array immediately. 7074 */ 7075 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7076 md_wakeup_thread(mddev->thread); 7077 md_new_event(mddev); 7078 return 0; 7079 7080abort_export: 7081 export_rdev(rdev); 7082 return err; 7083} 7084 7085static int set_bitmap_file(struct mddev *mddev, int fd) 7086{ 7087 int err = 0; 7088 7089 if (mddev->pers) { 7090 if (!mddev->pers->quiesce || !mddev->thread) 7091 return -EBUSY; 7092 if (mddev->recovery || mddev->sync_thread) 7093 return -EBUSY; 7094 /* we should be able to change the bitmap.. */ 7095 } 7096 7097 if (fd >= 0) { 7098 struct inode *inode; 7099 struct file *f; 7100 7101 if (mddev->bitmap || mddev->bitmap_info.file) 7102 return -EEXIST; /* cannot add when bitmap is present */ 7103 f = fget(fd); 7104 7105 if (f == NULL) { 7106 pr_warn("%s: error: failed to get bitmap file\n", 7107 mdname(mddev)); 7108 return -EBADF; 7109 } 7110 7111 inode = f->f_mapping->host; 7112 if (!S_ISREG(inode->i_mode)) { 7113 pr_warn("%s: error: bitmap file must be a regular file\n", 7114 mdname(mddev)); 7115 err = -EBADF; 7116 } else if (!(f->f_mode & FMODE_WRITE)) { 7117 pr_warn("%s: error: bitmap file must open for write\n", 7118 mdname(mddev)); 7119 err = -EBADF; 7120 } else if (atomic_read(&inode->i_writecount) != 1) { 7121 pr_warn("%s: error: bitmap file is already in use\n", 7122 mdname(mddev)); 7123 err = -EBUSY; 7124 } 7125 if (err) { 7126 fput(f); 7127 return err; 7128 } 7129 mddev->bitmap_info.file = f; 7130 mddev->bitmap_info.offset = 0; /* file overrides offset */ 7131 } else if (mddev->bitmap == NULL) 7132 return -ENOENT; /* cannot remove what isn't there */ 7133 err = 0; 7134 if (mddev->pers) { 7135 if (fd >= 0) { 7136 struct bitmap *bitmap; 7137 7138 bitmap = md_bitmap_create(mddev, -1); 7139 mddev_suspend(mddev); 7140 if (!IS_ERR(bitmap)) { 7141 mddev->bitmap = bitmap; 7142 err = md_bitmap_load(mddev); 7143 } else 7144 err = PTR_ERR(bitmap); 7145 if (err) { 7146 md_bitmap_destroy(mddev); 7147 fd = -1; 7148 } 7149 mddev_resume(mddev); 7150 } else if (fd < 0) { 7151 mddev_suspend(mddev); 7152 md_bitmap_destroy(mddev); 7153 mddev_resume(mddev); 7154 } 7155 } 7156 if (fd < 0) { 7157 struct file *f = mddev->bitmap_info.file; 7158 if (f) { 7159 spin_lock(&mddev->lock); 7160 mddev->bitmap_info.file = NULL; 7161 spin_unlock(&mddev->lock); 7162 fput(f); 7163 } 7164 } 7165 7166 return err; 7167} 7168 7169/* 7170 * md_set_array_info is used two different ways 7171 * The original usage is when creating a new array. 7172 * In this usage, raid_disks is > 0 and it together with 7173 * level, size, not_persistent,layout,chunksize determine the 7174 * shape of the array. 7175 * This will always create an array with a type-0.90.0 superblock. 7176 * The newer usage is when assembling an array. 7177 * In this case raid_disks will be 0, and the major_version field is 7178 * use to determine which style super-blocks are to be found on the devices. 7179 * The minor and patch _version numbers are also kept incase the 7180 * super_block handler wishes to interpret them. 7181 */ 7182int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info) 7183{ 7184 if (info->raid_disks == 0) { 7185 /* just setting version number for superblock loading */ 7186 if (info->major_version < 0 || 7187 info->major_version >= ARRAY_SIZE(super_types) || 7188 super_types[info->major_version].name == NULL) { 7189 /* maybe try to auto-load a module? */ 7190 pr_warn("md: superblock version %d not known\n", 7191 info->major_version); 7192 return -EINVAL; 7193 } 7194 mddev->major_version = info->major_version; 7195 mddev->minor_version = info->minor_version; 7196 mddev->patch_version = info->patch_version; 7197 mddev->persistent = !info->not_persistent; 7198 /* ensure mddev_put doesn't delete this now that there 7199 * is some minimal configuration. 7200 */ 7201 mddev->ctime = ktime_get_real_seconds(); 7202 return 0; 7203 } 7204 mddev->major_version = MD_MAJOR_VERSION; 7205 mddev->minor_version = MD_MINOR_VERSION; 7206 mddev->patch_version = MD_PATCHLEVEL_VERSION; 7207 mddev->ctime = ktime_get_real_seconds(); 7208 7209 mddev->level = info->level; 7210 mddev->clevel[0] = 0; 7211 mddev->dev_sectors = 2 * (sector_t)info->size; 7212 mddev->raid_disks = info->raid_disks; 7213 /* don't set md_minor, it is determined by which /dev/md* was 7214 * openned 7215 */ 7216 if (info->state & (1<<MD_SB_CLEAN)) 7217 mddev->recovery_cp = MaxSector; 7218 else 7219 mddev->recovery_cp = 0; 7220 mddev->persistent = ! info->not_persistent; 7221 mddev->external = 0; 7222 7223 mddev->layout = info->layout; 7224 if (mddev->level == 0) 7225 /* Cannot trust RAID0 layout info here */ 7226 mddev->layout = -1; 7227 mddev->chunk_sectors = info->chunk_size >> 9; 7228 7229 if (mddev->persistent) { 7230 mddev->max_disks = MD_SB_DISKS; 7231 mddev->flags = 0; 7232 mddev->sb_flags = 0; 7233 } 7234 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 7235 7236 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; 7237 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9); 7238 mddev->bitmap_info.offset = 0; 7239 7240 mddev->reshape_position = MaxSector; 7241 7242 /* 7243 * Generate a 128 bit UUID 7244 */ 7245 get_random_bytes(mddev->uuid, 16); 7246 7247 mddev->new_level = mddev->level; 7248 mddev->new_chunk_sectors = mddev->chunk_sectors; 7249 mddev->new_layout = mddev->layout; 7250 mddev->delta_disks = 0; 7251 mddev->reshape_backwards = 0; 7252 7253 return 0; 7254} 7255 7256void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors) 7257{ 7258 lockdep_assert_held(&mddev->reconfig_mutex); 7259 7260 if (mddev->external_size) 7261 return; 7262 7263 mddev->array_sectors = array_sectors; 7264} 7265EXPORT_SYMBOL(md_set_array_sectors); 7266 7267static int update_size(struct mddev *mddev, sector_t num_sectors) 7268{ 7269 struct md_rdev *rdev; 7270 int rv; 7271 int fit = (num_sectors == 0); 7272 sector_t old_dev_sectors = mddev->dev_sectors; 7273 7274 if (mddev->pers->resize == NULL) 7275 return -EINVAL; 7276 /* The "num_sectors" is the number of sectors of each device that 7277 * is used. This can only make sense for arrays with redundancy. 7278 * linear and raid0 always use whatever space is available. We can only 7279 * consider changing this number if no resync or reconstruction is 7280 * happening, and if the new size is acceptable. It must fit before the 7281 * sb_start or, if that is <data_offset, it must fit before the size 7282 * of each device. If num_sectors is zero, we find the largest size 7283 * that fits. 7284 */ 7285 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 7286 mddev->sync_thread) 7287 return -EBUSY; 7288 if (mddev->ro) 7289 return -EROFS; 7290 7291 rdev_for_each(rdev, mddev) { 7292 sector_t avail = rdev->sectors; 7293 7294 if (fit && (num_sectors == 0 || num_sectors > avail)) 7295 num_sectors = avail; 7296 if (avail < num_sectors) 7297 return -ENOSPC; 7298 } 7299 rv = mddev->pers->resize(mddev, num_sectors); 7300 if (!rv) { 7301 if (mddev_is_clustered(mddev)) 7302 md_cluster_ops->update_size(mddev, old_dev_sectors); 7303 else if (mddev->queue) { 7304 set_capacity(mddev->gendisk, mddev->array_sectors); 7305 revalidate_disk_size(mddev->gendisk, true); 7306 } 7307 } 7308 return rv; 7309} 7310 7311static int update_raid_disks(struct mddev *mddev, int raid_disks) 7312{ 7313 int rv; 7314 struct md_rdev *rdev; 7315 /* change the number of raid disks */ 7316 if (mddev->pers->check_reshape == NULL) 7317 return -EINVAL; 7318 if (mddev->ro) 7319 return -EROFS; 7320 if (raid_disks <= 0 || 7321 (mddev->max_disks && raid_disks >= mddev->max_disks)) 7322 return -EINVAL; 7323 if (mddev->sync_thread || 7324 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 7325 test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) || 7326 mddev->reshape_position != MaxSector) 7327 return -EBUSY; 7328 7329 rdev_for_each(rdev, mddev) { 7330 if (mddev->raid_disks < raid_disks && 7331 rdev->data_offset < rdev->new_data_offset) 7332 return -EINVAL; 7333 if (mddev->raid_disks > raid_disks && 7334 rdev->data_offset > rdev->new_data_offset) 7335 return -EINVAL; 7336 } 7337 7338 mddev->delta_disks = raid_disks - mddev->raid_disks; 7339 if (mddev->delta_disks < 0) 7340 mddev->reshape_backwards = 1; 7341 else if (mddev->delta_disks > 0) 7342 mddev->reshape_backwards = 0; 7343 7344 rv = mddev->pers->check_reshape(mddev); 7345 if (rv < 0) { 7346 mddev->delta_disks = 0; 7347 mddev->reshape_backwards = 0; 7348 } 7349 return rv; 7350} 7351 7352/* 7353 * update_array_info is used to change the configuration of an 7354 * on-line array. 7355 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size 7356 * fields in the info are checked against the array. 7357 * Any differences that cannot be handled will cause an error. 7358 * Normally, only one change can be managed at a time. 7359 */ 7360static int update_array_info(struct mddev *mddev, mdu_array_info_t *info) 7361{ 7362 int rv = 0; 7363 int cnt = 0; 7364 int state = 0; 7365 7366 /* calculate expected state,ignoring low bits */ 7367 if (mddev->bitmap && mddev->bitmap_info.offset) 7368 state |= (1 << MD_SB_BITMAP_PRESENT); 7369 7370 if (mddev->major_version != info->major_version || 7371 mddev->minor_version != info->minor_version || 7372/* mddev->patch_version != info->patch_version || */ 7373 mddev->ctime != info->ctime || 7374 mddev->level != info->level || 7375/* mddev->layout != info->layout || */ 7376 mddev->persistent != !info->not_persistent || 7377 mddev->chunk_sectors != info->chunk_size >> 9 || 7378 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */ 7379 ((state^info->state) & 0xfffffe00) 7380 ) 7381 return -EINVAL; 7382 /* Check there is only one change */ 7383 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) 7384 cnt++; 7385 if (mddev->raid_disks != info->raid_disks) 7386 cnt++; 7387 if (mddev->layout != info->layout) 7388 cnt++; 7389 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) 7390 cnt++; 7391 if (cnt == 0) 7392 return 0; 7393 if (cnt > 1) 7394 return -EINVAL; 7395 7396 if (mddev->layout != info->layout) { 7397 /* Change layout 7398 * we don't need to do anything at the md level, the 7399 * personality will take care of it all. 7400 */ 7401 if (mddev->pers->check_reshape == NULL) 7402 return -EINVAL; 7403 else { 7404 mddev->new_layout = info->layout; 7405 rv = mddev->pers->check_reshape(mddev); 7406 if (rv) 7407 mddev->new_layout = mddev->layout; 7408 return rv; 7409 } 7410 } 7411 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) 7412 rv = update_size(mddev, (sector_t)info->size * 2); 7413 7414 if (mddev->raid_disks != info->raid_disks) 7415 rv = update_raid_disks(mddev, info->raid_disks); 7416 7417 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) { 7418 if (mddev->pers->quiesce == NULL || mddev->thread == NULL) { 7419 rv = -EINVAL; 7420 goto err; 7421 } 7422 if (mddev->recovery || mddev->sync_thread) { 7423 rv = -EBUSY; 7424 goto err; 7425 } 7426 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) { 7427 struct bitmap *bitmap; 7428 /* add the bitmap */ 7429 if (mddev->bitmap) { 7430 rv = -EEXIST; 7431 goto err; 7432 } 7433 if (mddev->bitmap_info.default_offset == 0) { 7434 rv = -EINVAL; 7435 goto err; 7436 } 7437 mddev->bitmap_info.offset = 7438 mddev->bitmap_info.default_offset; 7439 mddev->bitmap_info.space = 7440 mddev->bitmap_info.default_space; 7441 bitmap = md_bitmap_create(mddev, -1); 7442 mddev_suspend(mddev); 7443 if (!IS_ERR(bitmap)) { 7444 mddev->bitmap = bitmap; 7445 rv = md_bitmap_load(mddev); 7446 } else 7447 rv = PTR_ERR(bitmap); 7448 if (rv) 7449 md_bitmap_destroy(mddev); 7450 mddev_resume(mddev); 7451 } else { 7452 /* remove the bitmap */ 7453 if (!mddev->bitmap) { 7454 rv = -ENOENT; 7455 goto err; 7456 } 7457 if (mddev->bitmap->storage.file) { 7458 rv = -EINVAL; 7459 goto err; 7460 } 7461 if (mddev->bitmap_info.nodes) { 7462 /* hold PW on all the bitmap lock */ 7463 if (md_cluster_ops->lock_all_bitmaps(mddev) <= 0) { 7464 pr_warn("md: can't change bitmap to none since the array is in use by more than one node\n"); 7465 rv = -EPERM; 7466 md_cluster_ops->unlock_all_bitmaps(mddev); 7467 goto err; 7468 } 7469 7470 mddev->bitmap_info.nodes = 0; 7471 md_cluster_ops->leave(mddev); 7472 module_put(md_cluster_mod); 7473 mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY; 7474 } 7475 mddev_suspend(mddev); 7476 md_bitmap_destroy(mddev); 7477 mddev_resume(mddev); 7478 mddev->bitmap_info.offset = 0; 7479 } 7480 } 7481 md_update_sb(mddev, 1); 7482 return rv; 7483err: 7484 return rv; 7485} 7486 7487static int set_disk_faulty(struct mddev *mddev, dev_t dev) 7488{ 7489 struct md_rdev *rdev; 7490 int err = 0; 7491 7492 if (mddev->pers == NULL) 7493 return -ENODEV; 7494 7495 rcu_read_lock(); 7496 rdev = md_find_rdev_rcu(mddev, dev); 7497 if (!rdev) 7498 err = -ENODEV; 7499 else { 7500 md_error(mddev, rdev); 7501 if (!test_bit(Faulty, &rdev->flags)) 7502 err = -EBUSY; 7503 } 7504 rcu_read_unlock(); 7505 return err; 7506} 7507 7508/* 7509 * We have a problem here : there is no easy way to give a CHS 7510 * virtual geometry. We currently pretend that we have a 2 heads 7511 * 4 sectors (with a BIG number of cylinders...). This drives 7512 * dosfs just mad... ;-) 7513 */ 7514static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo) 7515{ 7516 struct mddev *mddev = bdev->bd_disk->private_data; 7517 7518 geo->heads = 2; 7519 geo->sectors = 4; 7520 geo->cylinders = mddev->array_sectors / 8; 7521 return 0; 7522} 7523 7524static inline bool md_ioctl_valid(unsigned int cmd) 7525{ 7526 switch (cmd) { 7527 case ADD_NEW_DISK: 7528 case BLKROSET: 7529 case GET_ARRAY_INFO: 7530 case GET_BITMAP_FILE: 7531 case GET_DISK_INFO: 7532 case HOT_ADD_DISK: 7533 case HOT_REMOVE_DISK: 7534 case RAID_VERSION: 7535 case RESTART_ARRAY_RW: 7536 case RUN_ARRAY: 7537 case SET_ARRAY_INFO: 7538 case SET_BITMAP_FILE: 7539 case SET_DISK_FAULTY: 7540 case STOP_ARRAY: 7541 case STOP_ARRAY_RO: 7542 case CLUSTERED_DISK_NACK: 7543 return true; 7544 default: 7545 return false; 7546 } 7547} 7548 7549static int md_ioctl(struct block_device *bdev, fmode_t mode, 7550 unsigned int cmd, unsigned long arg) 7551{ 7552 int err = 0; 7553 void __user *argp = (void __user *)arg; 7554 struct mddev *mddev = NULL; 7555 int ro; 7556 bool did_set_md_closing = false; 7557 7558 if (!md_ioctl_valid(cmd)) 7559 return -ENOTTY; 7560 7561 switch (cmd) { 7562 case RAID_VERSION: 7563 case GET_ARRAY_INFO: 7564 case GET_DISK_INFO: 7565 break; 7566 default: 7567 if (!capable(CAP_SYS_ADMIN)) 7568 return -EACCES; 7569 } 7570 7571 /* 7572 * Commands dealing with the RAID driver but not any 7573 * particular array: 7574 */ 7575 switch (cmd) { 7576 case RAID_VERSION: 7577 err = get_version(argp); 7578 goto out; 7579 default:; 7580 } 7581 7582 /* 7583 * Commands creating/starting a new array: 7584 */ 7585 7586 mddev = bdev->bd_disk->private_data; 7587 7588 if (!mddev) { 7589 BUG(); 7590 goto out; 7591 } 7592 7593 /* Some actions do not requires the mutex */ 7594 switch (cmd) { 7595 case GET_ARRAY_INFO: 7596 if (!mddev->raid_disks && !mddev->external) 7597 err = -ENODEV; 7598 else 7599 err = get_array_info(mddev, argp); 7600 goto out; 7601 7602 case GET_DISK_INFO: 7603 if (!mddev->raid_disks && !mddev->external) 7604 err = -ENODEV; 7605 else 7606 err = get_disk_info(mddev, argp); 7607 goto out; 7608 7609 case SET_DISK_FAULTY: 7610 err = set_disk_faulty(mddev, new_decode_dev(arg)); 7611 goto out; 7612 7613 case GET_BITMAP_FILE: 7614 err = get_bitmap_file(mddev, argp); 7615 goto out; 7616 7617 } 7618 7619 if (cmd == ADD_NEW_DISK || cmd == HOT_ADD_DISK) 7620 flush_rdev_wq(mddev); 7621 7622 if (cmd == HOT_REMOVE_DISK) 7623 /* need to ensure recovery thread has run */ 7624 wait_event_interruptible_timeout(mddev->sb_wait, 7625 !test_bit(MD_RECOVERY_NEEDED, 7626 &mddev->recovery), 7627 msecs_to_jiffies(5000)); 7628 if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) { 7629 /* Need to flush page cache, and ensure no-one else opens 7630 * and writes 7631 */ 7632 mutex_lock(&mddev->open_mutex); 7633 if (mddev->pers && atomic_read(&mddev->openers) > 1) { 7634 mutex_unlock(&mddev->open_mutex); 7635 err = -EBUSY; 7636 goto out; 7637 } 7638 if (test_and_set_bit(MD_CLOSING, &mddev->flags)) { 7639 mutex_unlock(&mddev->open_mutex); 7640 err = -EBUSY; 7641 goto out; 7642 } 7643 did_set_md_closing = true; 7644 mutex_unlock(&mddev->open_mutex); 7645 sync_blockdev(bdev); 7646 } 7647 err = mddev_lock(mddev); 7648 if (err) { 7649 pr_debug("md: ioctl lock interrupted, reason %d, cmd %d\n", 7650 err, cmd); 7651 goto out; 7652 } 7653 7654 if (cmd == SET_ARRAY_INFO) { 7655 mdu_array_info_t info; 7656 if (!arg) 7657 memset(&info, 0, sizeof(info)); 7658 else if (copy_from_user(&info, argp, sizeof(info))) { 7659 err = -EFAULT; 7660 goto unlock; 7661 } 7662 if (mddev->pers) { 7663 err = update_array_info(mddev, &info); 7664 if (err) { 7665 pr_warn("md: couldn't update array info. %d\n", err); 7666 goto unlock; 7667 } 7668 goto unlock; 7669 } 7670 if (!list_empty(&mddev->disks)) { 7671 pr_warn("md: array %s already has disks!\n", mdname(mddev)); 7672 err = -EBUSY; 7673 goto unlock; 7674 } 7675 if (mddev->raid_disks) { 7676 pr_warn("md: array %s already initialised!\n", mdname(mddev)); 7677 err = -EBUSY; 7678 goto unlock; 7679 } 7680 err = md_set_array_info(mddev, &info); 7681 if (err) { 7682 pr_warn("md: couldn't set array info. %d\n", err); 7683 goto unlock; 7684 } 7685 goto unlock; 7686 } 7687 7688 /* 7689 * Commands querying/configuring an existing array: 7690 */ 7691 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY, 7692 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */ 7693 if ((!mddev->raid_disks && !mddev->external) 7694 && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY 7695 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE 7696 && cmd != GET_BITMAP_FILE) { 7697 err = -ENODEV; 7698 goto unlock; 7699 } 7700 7701 /* 7702 * Commands even a read-only array can execute: 7703 */ 7704 switch (cmd) { 7705 case RESTART_ARRAY_RW: 7706 err = restart_array(mddev); 7707 goto unlock; 7708 7709 case STOP_ARRAY: 7710 err = do_md_stop(mddev, 0, bdev); 7711 goto unlock; 7712 7713 case STOP_ARRAY_RO: 7714 err = md_set_readonly(mddev, bdev); 7715 goto unlock; 7716 7717 case HOT_REMOVE_DISK: 7718 err = hot_remove_disk(mddev, new_decode_dev(arg)); 7719 goto unlock; 7720 7721 case ADD_NEW_DISK: 7722 /* We can support ADD_NEW_DISK on read-only arrays 7723 * only if we are re-adding a preexisting device. 7724 * So require mddev->pers and MD_DISK_SYNC. 7725 */ 7726 if (mddev->pers) { 7727 mdu_disk_info_t info; 7728 if (copy_from_user(&info, argp, sizeof(info))) 7729 err = -EFAULT; 7730 else if (!(info.state & (1<<MD_DISK_SYNC))) 7731 /* Need to clear read-only for this */ 7732 break; 7733 else 7734 err = md_add_new_disk(mddev, &info); 7735 goto unlock; 7736 } 7737 break; 7738 7739 case BLKROSET: 7740 if (get_user(ro, (int __user *)(arg))) { 7741 err = -EFAULT; 7742 goto unlock; 7743 } 7744 err = -EINVAL; 7745 7746 /* if the bdev is going readonly the value of mddev->ro 7747 * does not matter, no writes are coming 7748 */ 7749 if (ro) 7750 goto unlock; 7751 7752 /* are we are already prepared for writes? */ 7753 if (mddev->ro != 1) 7754 goto unlock; 7755 7756 /* transitioning to readauto need only happen for 7757 * arrays that call md_write_start 7758 */ 7759 if (mddev->pers) { 7760 err = restart_array(mddev); 7761 if (err == 0) { 7762 mddev->ro = 2; 7763 set_disk_ro(mddev->gendisk, 0); 7764 } 7765 } 7766 goto unlock; 7767 } 7768 7769 /* 7770 * The remaining ioctls are changing the state of the 7771 * superblock, so we do not allow them on read-only arrays. 7772 */ 7773 if (mddev->ro && mddev->pers) { 7774 if (mddev->ro == 2) { 7775 mddev->ro = 0; 7776 sysfs_notify_dirent_safe(mddev->sysfs_state); 7777 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7778 /* mddev_unlock will wake thread */ 7779 /* If a device failed while we were read-only, we 7780 * need to make sure the metadata is updated now. 7781 */ 7782 if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) { 7783 mddev_unlock(mddev); 7784 wait_event(mddev->sb_wait, 7785 !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) && 7786 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); 7787 mddev_lock_nointr(mddev); 7788 } 7789 } else { 7790 err = -EROFS; 7791 goto unlock; 7792 } 7793 } 7794 7795 switch (cmd) { 7796 case ADD_NEW_DISK: 7797 { 7798 mdu_disk_info_t info; 7799 if (copy_from_user(&info, argp, sizeof(info))) 7800 err = -EFAULT; 7801 else 7802 err = md_add_new_disk(mddev, &info); 7803 goto unlock; 7804 } 7805 7806 case CLUSTERED_DISK_NACK: 7807 if (mddev_is_clustered(mddev)) 7808 md_cluster_ops->new_disk_ack(mddev, false); 7809 else 7810 err = -EINVAL; 7811 goto unlock; 7812 7813 case HOT_ADD_DISK: 7814 err = hot_add_disk(mddev, new_decode_dev(arg)); 7815 goto unlock; 7816 7817 case RUN_ARRAY: 7818 err = do_md_run(mddev); 7819 goto unlock; 7820 7821 case SET_BITMAP_FILE: 7822 err = set_bitmap_file(mddev, (int)arg); 7823 goto unlock; 7824 7825 default: 7826 err = -EINVAL; 7827 goto unlock; 7828 } 7829 7830unlock: 7831 if (mddev->hold_active == UNTIL_IOCTL && 7832 err != -EINVAL) 7833 mddev->hold_active = 0; 7834 mddev_unlock(mddev); 7835out: 7836 if(did_set_md_closing) 7837 clear_bit(MD_CLOSING, &mddev->flags); 7838 return err; 7839} 7840#ifdef CONFIG_COMPAT 7841static int md_compat_ioctl(struct block_device *bdev, fmode_t mode, 7842 unsigned int cmd, unsigned long arg) 7843{ 7844 switch (cmd) { 7845 case HOT_REMOVE_DISK: 7846 case HOT_ADD_DISK: 7847 case SET_DISK_FAULTY: 7848 case SET_BITMAP_FILE: 7849 /* These take in integer arg, do not convert */ 7850 break; 7851 default: 7852 arg = (unsigned long)compat_ptr(arg); 7853 break; 7854 } 7855 7856 return md_ioctl(bdev, mode, cmd, arg); 7857} 7858#endif /* CONFIG_COMPAT */ 7859 7860static int md_open(struct block_device *bdev, fmode_t mode) 7861{ 7862 /* 7863 * Succeed if we can lock the mddev, which confirms that 7864 * it isn't being stopped right now. 7865 */ 7866 struct mddev *mddev = mddev_find(bdev->bd_dev); 7867 int err; 7868 7869 if (!mddev) 7870 return -ENODEV; 7871 7872 if (mddev->gendisk != bdev->bd_disk) { 7873 /* we are racing with mddev_put which is discarding this 7874 * bd_disk. 7875 */ 7876 mddev_put(mddev); 7877 /* Wait until bdev->bd_disk is definitely gone */ 7878 if (work_pending(&mddev->del_work)) 7879 flush_workqueue(md_misc_wq); 7880 return -EBUSY; 7881 } 7882 BUG_ON(mddev != bdev->bd_disk->private_data); 7883 7884 if ((err = mutex_lock_interruptible(&mddev->open_mutex))) 7885 goto out; 7886 7887 if (test_bit(MD_CLOSING, &mddev->flags)) { 7888 mutex_unlock(&mddev->open_mutex); 7889 err = -ENODEV; 7890 goto out; 7891 } 7892 7893 err = 0; 7894 atomic_inc(&mddev->openers); 7895 mutex_unlock(&mddev->open_mutex); 7896 7897 bdev_check_media_change(bdev); 7898 out: 7899 if (err) 7900 mddev_put(mddev); 7901 return err; 7902} 7903 7904static void md_release(struct gendisk *disk, fmode_t mode) 7905{ 7906 struct mddev *mddev = disk->private_data; 7907 7908 BUG_ON(!mddev); 7909 atomic_dec(&mddev->openers); 7910 mddev_put(mddev); 7911} 7912 7913static unsigned int md_check_events(struct gendisk *disk, unsigned int clearing) 7914{ 7915 struct mddev *mddev = disk->private_data; 7916 unsigned int ret = 0; 7917 7918 if (mddev->changed) 7919 ret = DISK_EVENT_MEDIA_CHANGE; 7920 mddev->changed = 0; 7921 return ret; 7922} 7923 7924const struct block_device_operations md_fops = 7925{ 7926 .owner = THIS_MODULE, 7927 .submit_bio = md_submit_bio, 7928 .open = md_open, 7929 .release = md_release, 7930 .ioctl = md_ioctl, 7931#ifdef CONFIG_COMPAT 7932 .compat_ioctl = md_compat_ioctl, 7933#endif 7934 .getgeo = md_getgeo, 7935 .check_events = md_check_events, 7936}; 7937 7938static int md_thread(void *arg) 7939{ 7940 struct md_thread *thread = arg; 7941 7942 /* 7943 * md_thread is a 'system-thread', it's priority should be very 7944 * high. We avoid resource deadlocks individually in each 7945 * raid personality. (RAID5 does preallocation) We also use RR and 7946 * the very same RT priority as kswapd, thus we will never get 7947 * into a priority inversion deadlock. 7948 * 7949 * we definitely have to have equal or higher priority than 7950 * bdflush, otherwise bdflush will deadlock if there are too 7951 * many dirty RAID5 blocks. 7952 */ 7953 7954 allow_signal(SIGKILL); 7955 while (!kthread_should_stop()) { 7956 7957 /* We need to wait INTERRUPTIBLE so that 7958 * we don't add to the load-average. 7959 * That means we need to be sure no signals are 7960 * pending 7961 */ 7962 if (signal_pending(current)) 7963 flush_signals(current); 7964 7965 wait_event_interruptible_timeout 7966 (thread->wqueue, 7967 test_bit(THREAD_WAKEUP, &thread->flags) 7968 || kthread_should_stop() || kthread_should_park(), 7969 thread->timeout); 7970 7971 clear_bit(THREAD_WAKEUP, &thread->flags); 7972 if (kthread_should_park()) 7973 kthread_parkme(); 7974 if (!kthread_should_stop()) 7975 thread->run(thread); 7976 } 7977 7978 return 0; 7979} 7980 7981void md_wakeup_thread(struct md_thread *thread) 7982{ 7983 if (thread) { 7984 pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm); 7985 set_bit(THREAD_WAKEUP, &thread->flags); 7986 wake_up(&thread->wqueue); 7987 } 7988} 7989EXPORT_SYMBOL(md_wakeup_thread); 7990 7991struct md_thread *md_register_thread(void (*run) (struct md_thread *), 7992 struct mddev *mddev, const char *name) 7993{ 7994 struct md_thread *thread; 7995 7996 thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL); 7997 if (!thread) 7998 return NULL; 7999 8000 init_waitqueue_head(&thread->wqueue); 8001 8002 thread->run = run; 8003 thread->mddev = mddev; 8004 thread->timeout = MAX_SCHEDULE_TIMEOUT; 8005 thread->tsk = kthread_run(md_thread, thread, 8006 "%s_%s", 8007 mdname(thread->mddev), 8008 name); 8009 if (IS_ERR(thread->tsk)) { 8010 kfree(thread); 8011 return NULL; 8012 } 8013 return thread; 8014} 8015EXPORT_SYMBOL(md_register_thread); 8016 8017void md_unregister_thread(struct md_thread **threadp) 8018{ 8019 struct md_thread *thread; 8020 8021 /* 8022 * Locking ensures that mddev_unlock does not wake_up a 8023 * non-existent thread 8024 */ 8025 spin_lock(&pers_lock); 8026 thread = *threadp; 8027 if (!thread) { 8028 spin_unlock(&pers_lock); 8029 return; 8030 } 8031 *threadp = NULL; 8032 spin_unlock(&pers_lock); 8033 8034 pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk)); 8035 kthread_stop(thread->tsk); 8036 kfree(thread); 8037} 8038EXPORT_SYMBOL(md_unregister_thread); 8039 8040void md_error(struct mddev *mddev, struct md_rdev *rdev) 8041{ 8042 if (!rdev || test_bit(Faulty, &rdev->flags)) 8043 return; 8044 8045 if (!mddev->pers || !mddev->pers->error_handler) 8046 return; 8047 mddev->pers->error_handler(mddev,rdev); 8048 if (mddev->degraded) 8049 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 8050 sysfs_notify_dirent_safe(rdev->sysfs_state); 8051 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 8052 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 8053 md_wakeup_thread(mddev->thread); 8054 if (mddev->event_work.func) 8055 queue_work(md_misc_wq, &mddev->event_work); 8056 md_new_event(mddev); 8057} 8058EXPORT_SYMBOL(md_error); 8059 8060/* seq_file implementation /proc/mdstat */ 8061 8062static void status_unused(struct seq_file *seq) 8063{ 8064 int i = 0; 8065 struct md_rdev *rdev; 8066 8067 seq_printf(seq, "unused devices: "); 8068 8069 list_for_each_entry(rdev, &pending_raid_disks, same_set) { 8070 char b[BDEVNAME_SIZE]; 8071 i++; 8072 seq_printf(seq, "%s ", 8073 bdevname(rdev->bdev,b)); 8074 } 8075 if (!i) 8076 seq_printf(seq, "<none>"); 8077 8078 seq_printf(seq, "\n"); 8079} 8080 8081static int status_resync(struct seq_file *seq, struct mddev *mddev) 8082{ 8083 sector_t max_sectors, resync, res; 8084 unsigned long dt, db = 0; 8085 sector_t rt, curr_mark_cnt, resync_mark_cnt; 8086 int scale, recovery_active; 8087 unsigned int per_milli; 8088 8089 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || 8090 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 8091 max_sectors = mddev->resync_max_sectors; 8092 else 8093 max_sectors = mddev->dev_sectors; 8094 8095 resync = mddev->curr_resync; 8096 if (resync <= 3) { 8097 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) 8098 /* Still cleaning up */ 8099 resync = max_sectors; 8100 } else if (resync > max_sectors) 8101 resync = max_sectors; 8102 else 8103 resync -= atomic_read(&mddev->recovery_active); 8104 8105 if (resync == 0) { 8106 if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery)) { 8107 struct md_rdev *rdev; 8108 8109 rdev_for_each(rdev, mddev) 8110 if (rdev->raid_disk >= 0 && 8111 !test_bit(Faulty, &rdev->flags) && 8112 rdev->recovery_offset != MaxSector && 8113 rdev->recovery_offset) { 8114 seq_printf(seq, "\trecover=REMOTE"); 8115 return 1; 8116 } 8117 if (mddev->reshape_position != MaxSector) 8118 seq_printf(seq, "\treshape=REMOTE"); 8119 else 8120 seq_printf(seq, "\tresync=REMOTE"); 8121 return 1; 8122 } 8123 if (mddev->recovery_cp < MaxSector) { 8124 seq_printf(seq, "\tresync=PENDING"); 8125 return 1; 8126 } 8127 return 0; 8128 } 8129 if (resync < 3) { 8130 seq_printf(seq, "\tresync=DELAYED"); 8131 return 1; 8132 } 8133 8134 WARN_ON(max_sectors == 0); 8135 /* Pick 'scale' such that (resync>>scale)*1000 will fit 8136 * in a sector_t, and (max_sectors>>scale) will fit in a 8137 * u32, as those are the requirements for sector_div. 8138 * Thus 'scale' must be at least 10 8139 */ 8140 scale = 10; 8141 if (sizeof(sector_t) > sizeof(unsigned long)) { 8142 while ( max_sectors/2 > (1ULL<<(scale+32))) 8143 scale++; 8144 } 8145 res = (resync>>scale)*1000; 8146 sector_div(res, (u32)((max_sectors>>scale)+1)); 8147 8148 per_milli = res; 8149 { 8150 int i, x = per_milli/50, y = 20-x; 8151 seq_printf(seq, "["); 8152 for (i = 0; i < x; i++) 8153 seq_printf(seq, "="); 8154 seq_printf(seq, ">"); 8155 for (i = 0; i < y; i++) 8156 seq_printf(seq, "."); 8157 seq_printf(seq, "] "); 8158 } 8159 seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)", 8160 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)? 8161 "reshape" : 8162 (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)? 8163 "check" : 8164 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ? 8165 "resync" : "recovery"))), 8166 per_milli/10, per_milli % 10, 8167 (unsigned long long) resync/2, 8168 (unsigned long long) max_sectors/2); 8169 8170 /* 8171 * dt: time from mark until now 8172 * db: blocks written from mark until now 8173 * rt: remaining time 8174 * 8175 * rt is a sector_t, which is always 64bit now. We are keeping 8176 * the original algorithm, but it is not really necessary. 8177 * 8178 * Original algorithm: 8179 * So we divide before multiply in case it is 32bit and close 8180 * to the limit. 8181 * We scale the divisor (db) by 32 to avoid losing precision 8182 * near the end of resync when the number of remaining sectors 8183 * is close to 'db'. 8184 * We then divide rt by 32 after multiplying by db to compensate. 8185 * The '+1' avoids division by zero if db is very small. 8186 */ 8187 dt = ((jiffies - mddev->resync_mark) / HZ); 8188 if (!dt) dt++; 8189 8190 curr_mark_cnt = mddev->curr_mark_cnt; 8191 recovery_active = atomic_read(&mddev->recovery_active); 8192 resync_mark_cnt = mddev->resync_mark_cnt; 8193 8194 if (curr_mark_cnt >= (recovery_active + resync_mark_cnt)) 8195 db = curr_mark_cnt - (recovery_active + resync_mark_cnt); 8196 8197 rt = max_sectors - resync; /* number of remaining sectors */ 8198 rt = div64_u64(rt, db/32+1); 8199 rt *= dt; 8200 rt >>= 5; 8201 8202 seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60, 8203 ((unsigned long)rt % 60)/6); 8204 8205 seq_printf(seq, " speed=%ldK/sec", db/2/dt); 8206 return 1; 8207} 8208 8209static void *md_seq_start(struct seq_file *seq, loff_t *pos) 8210{ 8211 struct list_head *tmp; 8212 loff_t l = *pos; 8213 struct mddev *mddev; 8214 8215 if (l == 0x10000) { 8216 ++*pos; 8217 return (void *)2; 8218 } 8219 if (l > 0x10000) 8220 return NULL; 8221 if (!l--) 8222 /* header */ 8223 return (void*)1; 8224 8225 spin_lock(&all_mddevs_lock); 8226 list_for_each(tmp,&all_mddevs) 8227 if (!l--) { 8228 mddev = list_entry(tmp, struct mddev, all_mddevs); 8229 mddev_get(mddev); 8230 spin_unlock(&all_mddevs_lock); 8231 return mddev; 8232 } 8233 spin_unlock(&all_mddevs_lock); 8234 if (!l--) 8235 return (void*)2;/* tail */ 8236 return NULL; 8237} 8238 8239static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos) 8240{ 8241 struct list_head *tmp; 8242 struct mddev *next_mddev, *mddev = v; 8243 8244 ++*pos; 8245 if (v == (void*)2) 8246 return NULL; 8247 8248 spin_lock(&all_mddevs_lock); 8249 if (v == (void*)1) 8250 tmp = all_mddevs.next; 8251 else 8252 tmp = mddev->all_mddevs.next; 8253 if (tmp != &all_mddevs) 8254 next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs)); 8255 else { 8256 next_mddev = (void*)2; 8257 *pos = 0x10000; 8258 } 8259 spin_unlock(&all_mddevs_lock); 8260 8261 if (v != (void*)1) 8262 mddev_put(mddev); 8263 return next_mddev; 8264 8265} 8266 8267static void md_seq_stop(struct seq_file *seq, void *v) 8268{ 8269 struct mddev *mddev = v; 8270 8271 if (mddev && v != (void*)1 && v != (void*)2) 8272 mddev_put(mddev); 8273} 8274 8275static int md_seq_show(struct seq_file *seq, void *v) 8276{ 8277 struct mddev *mddev = v; 8278 sector_t sectors; 8279 struct md_rdev *rdev; 8280 8281 if (v == (void*)1) { 8282 struct md_personality *pers; 8283 seq_printf(seq, "Personalities : "); 8284 spin_lock(&pers_lock); 8285 list_for_each_entry(pers, &pers_list, list) 8286 seq_printf(seq, "[%s] ", pers->name); 8287 8288 spin_unlock(&pers_lock); 8289 seq_printf(seq, "\n"); 8290 seq->poll_event = atomic_read(&md_event_count); 8291 return 0; 8292 } 8293 if (v == (void*)2) { 8294 status_unused(seq); 8295 return 0; 8296 } 8297 8298 spin_lock(&mddev->lock); 8299 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) { 8300 seq_printf(seq, "%s : %sactive", mdname(mddev), 8301 mddev->pers ? "" : "in"); 8302 if (mddev->pers) { 8303 if (mddev->ro==1) 8304 seq_printf(seq, " (read-only)"); 8305 if (mddev->ro==2) 8306 seq_printf(seq, " (auto-read-only)"); 8307 seq_printf(seq, " %s", mddev->pers->name); 8308 } 8309 8310 sectors = 0; 8311 rcu_read_lock(); 8312 rdev_for_each_rcu(rdev, mddev) { 8313 char b[BDEVNAME_SIZE]; 8314 seq_printf(seq, " %s[%d]", 8315 bdevname(rdev->bdev,b), rdev->desc_nr); 8316 if (test_bit(WriteMostly, &rdev->flags)) 8317 seq_printf(seq, "(W)"); 8318 if (test_bit(Journal, &rdev->flags)) 8319 seq_printf(seq, "(J)"); 8320 if (test_bit(Faulty, &rdev->flags)) { 8321 seq_printf(seq, "(F)"); 8322 continue; 8323 } 8324 if (rdev->raid_disk < 0) 8325 seq_printf(seq, "(S)"); /* spare */ 8326 if (test_bit(Replacement, &rdev->flags)) 8327 seq_printf(seq, "(R)"); 8328 sectors += rdev->sectors; 8329 } 8330 rcu_read_unlock(); 8331 8332 if (!list_empty(&mddev->disks)) { 8333 if (mddev->pers) 8334 seq_printf(seq, "\n %llu blocks", 8335 (unsigned long long) 8336 mddev->array_sectors / 2); 8337 else 8338 seq_printf(seq, "\n %llu blocks", 8339 (unsigned long long)sectors / 2); 8340 } 8341 if (mddev->persistent) { 8342 if (mddev->major_version != 0 || 8343 mddev->minor_version != 90) { 8344 seq_printf(seq," super %d.%d", 8345 mddev->major_version, 8346 mddev->minor_version); 8347 } 8348 } else if (mddev->external) 8349 seq_printf(seq, " super external:%s", 8350 mddev->metadata_type); 8351 else 8352 seq_printf(seq, " super non-persistent"); 8353 8354 if (mddev->pers) { 8355 mddev->pers->status(seq, mddev); 8356 seq_printf(seq, "\n "); 8357 if (mddev->pers->sync_request) { 8358 if (status_resync(seq, mddev)) 8359 seq_printf(seq, "\n "); 8360 } 8361 } else 8362 seq_printf(seq, "\n "); 8363 8364 md_bitmap_status(seq, mddev->bitmap); 8365 8366 seq_printf(seq, "\n"); 8367 } 8368 spin_unlock(&mddev->lock); 8369 8370 return 0; 8371} 8372 8373static const struct seq_operations md_seq_ops = { 8374 .start = md_seq_start, 8375 .next = md_seq_next, 8376 .stop = md_seq_stop, 8377 .show = md_seq_show, 8378}; 8379 8380static int md_seq_open(struct inode *inode, struct file *file) 8381{ 8382 struct seq_file *seq; 8383 int error; 8384 8385 error = seq_open(file, &md_seq_ops); 8386 if (error) 8387 return error; 8388 8389 seq = file->private_data; 8390 seq->poll_event = atomic_read(&md_event_count); 8391 return error; 8392} 8393 8394static int md_unloading; 8395static __poll_t mdstat_poll(struct file *filp, poll_table *wait) 8396{ 8397 struct seq_file *seq = filp->private_data; 8398 __poll_t mask; 8399 8400 if (md_unloading) 8401 return EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI; 8402 poll_wait(filp, &md_event_waiters, wait); 8403 8404 /* always allow read */ 8405 mask = EPOLLIN | EPOLLRDNORM; 8406 8407 if (seq->poll_event != atomic_read(&md_event_count)) 8408 mask |= EPOLLERR | EPOLLPRI; 8409 return mask; 8410} 8411 8412static const struct proc_ops mdstat_proc_ops = { 8413 .proc_open = md_seq_open, 8414 .proc_read = seq_read, 8415 .proc_lseek = seq_lseek, 8416 .proc_release = seq_release, 8417 .proc_poll = mdstat_poll, 8418}; 8419 8420int register_md_personality(struct md_personality *p) 8421{ 8422 pr_debug("md: %s personality registered for level %d\n", 8423 p->name, p->level); 8424 spin_lock(&pers_lock); 8425 list_add_tail(&p->list, &pers_list); 8426 spin_unlock(&pers_lock); 8427 return 0; 8428} 8429EXPORT_SYMBOL(register_md_personality); 8430 8431int unregister_md_personality(struct md_personality *p) 8432{ 8433 pr_debug("md: %s personality unregistered\n", p->name); 8434 spin_lock(&pers_lock); 8435 list_del_init(&p->list); 8436 spin_unlock(&pers_lock); 8437 return 0; 8438} 8439EXPORT_SYMBOL(unregister_md_personality); 8440 8441int register_md_cluster_operations(struct md_cluster_operations *ops, 8442 struct module *module) 8443{ 8444 int ret = 0; 8445 spin_lock(&pers_lock); 8446 if (md_cluster_ops != NULL) 8447 ret = -EALREADY; 8448 else { 8449 md_cluster_ops = ops; 8450 md_cluster_mod = module; 8451 } 8452 spin_unlock(&pers_lock); 8453 return ret; 8454} 8455EXPORT_SYMBOL(register_md_cluster_operations); 8456 8457int unregister_md_cluster_operations(void) 8458{ 8459 spin_lock(&pers_lock); 8460 md_cluster_ops = NULL; 8461 spin_unlock(&pers_lock); 8462 return 0; 8463} 8464EXPORT_SYMBOL(unregister_md_cluster_operations); 8465 8466int md_setup_cluster(struct mddev *mddev, int nodes) 8467{ 8468 int ret; 8469 if (!md_cluster_ops) 8470 request_module("md-cluster"); 8471 spin_lock(&pers_lock); 8472 /* ensure module won't be unloaded */ 8473 if (!md_cluster_ops || !try_module_get(md_cluster_mod)) { 8474 pr_warn("can't find md-cluster module or get it's reference.\n"); 8475 spin_unlock(&pers_lock); 8476 return -ENOENT; 8477 } 8478 spin_unlock(&pers_lock); 8479 8480 ret = md_cluster_ops->join(mddev, nodes); 8481 if (!ret) 8482 mddev->safemode_delay = 0; 8483 return ret; 8484} 8485 8486void md_cluster_stop(struct mddev *mddev) 8487{ 8488 if (!md_cluster_ops) 8489 return; 8490 md_cluster_ops->leave(mddev); 8491 module_put(md_cluster_mod); 8492} 8493 8494static int is_mddev_idle(struct mddev *mddev, int init) 8495{ 8496 struct md_rdev *rdev; 8497 int idle; 8498 int curr_events; 8499 8500 idle = 1; 8501 rcu_read_lock(); 8502 rdev_for_each_rcu(rdev, mddev) { 8503 struct gendisk *disk = rdev->bdev->bd_disk; 8504 curr_events = (int)part_stat_read_accum(&disk->part0, sectors) - 8505 atomic_read(&disk->sync_io); 8506 /* sync IO will cause sync_io to increase before the disk_stats 8507 * as sync_io is counted when a request starts, and 8508 * disk_stats is counted when it completes. 8509 * So resync activity will cause curr_events to be smaller than 8510 * when there was no such activity. 8511 * non-sync IO will cause disk_stat to increase without 8512 * increasing sync_io so curr_events will (eventually) 8513 * be larger than it was before. Once it becomes 8514 * substantially larger, the test below will cause 8515 * the array to appear non-idle, and resync will slow 8516 * down. 8517 * If there is a lot of outstanding resync activity when 8518 * we set last_event to curr_events, then all that activity 8519 * completing might cause the array to appear non-idle 8520 * and resync will be slowed down even though there might 8521 * not have been non-resync activity. This will only 8522 * happen once though. 'last_events' will soon reflect 8523 * the state where there is little or no outstanding 8524 * resync requests, and further resync activity will 8525 * always make curr_events less than last_events. 8526 * 8527 */ 8528 if (init || curr_events - rdev->last_events > 64) { 8529 rdev->last_events = curr_events; 8530 idle = 0; 8531 } 8532 } 8533 rcu_read_unlock(); 8534 return idle; 8535} 8536 8537void md_done_sync(struct mddev *mddev, int blocks, int ok) 8538{ 8539 /* another "blocks" (512byte) blocks have been synced */ 8540 atomic_sub(blocks, &mddev->recovery_active); 8541 wake_up(&mddev->recovery_wait); 8542 if (!ok) { 8543 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 8544 set_bit(MD_RECOVERY_ERROR, &mddev->recovery); 8545 md_wakeup_thread(mddev->thread); 8546 // stop recovery, signal do_sync .... 8547 } 8548} 8549EXPORT_SYMBOL(md_done_sync); 8550 8551/* md_write_start(mddev, bi) 8552 * If we need to update some array metadata (e.g. 'active' flag 8553 * in superblock) before writing, schedule a superblock update 8554 * and wait for it to complete. 8555 * A return value of 'false' means that the write wasn't recorded 8556 * and cannot proceed as the array is being suspend. 8557 */ 8558bool md_write_start(struct mddev *mddev, struct bio *bi) 8559{ 8560 int did_change = 0; 8561 8562 if (bio_data_dir(bi) != WRITE) 8563 return true; 8564 8565 BUG_ON(mddev->ro == 1); 8566 if (mddev->ro == 2) { 8567 /* need to switch to read/write */ 8568 mddev->ro = 0; 8569 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 8570 md_wakeup_thread(mddev->thread); 8571 md_wakeup_thread(mddev->sync_thread); 8572 did_change = 1; 8573 } 8574 rcu_read_lock(); 8575 percpu_ref_get(&mddev->writes_pending); 8576 smp_mb(); /* Match smp_mb in set_in_sync() */ 8577 if (mddev->safemode == 1) 8578 mddev->safemode = 0; 8579 /* sync_checkers is always 0 when writes_pending is in per-cpu mode */ 8580 if (mddev->in_sync || mddev->sync_checkers) { 8581 spin_lock(&mddev->lock); 8582 if (mddev->in_sync) { 8583 mddev->in_sync = 0; 8584 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 8585 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); 8586 md_wakeup_thread(mddev->thread); 8587 did_change = 1; 8588 } 8589 spin_unlock(&mddev->lock); 8590 } 8591 rcu_read_unlock(); 8592 if (did_change) 8593 sysfs_notify_dirent_safe(mddev->sysfs_state); 8594 if (!mddev->has_superblocks) 8595 return true; 8596 wait_event(mddev->sb_wait, 8597 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) || 8598 mddev->suspended); 8599 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { 8600 percpu_ref_put(&mddev->writes_pending); 8601 return false; 8602 } 8603 return true; 8604} 8605EXPORT_SYMBOL(md_write_start); 8606 8607/* md_write_inc can only be called when md_write_start() has 8608 * already been called at least once of the current request. 8609 * It increments the counter and is useful when a single request 8610 * is split into several parts. Each part causes an increment and 8611 * so needs a matching md_write_end(). 8612 * Unlike md_write_start(), it is safe to call md_write_inc() inside 8613 * a spinlocked region. 8614 */ 8615void md_write_inc(struct mddev *mddev, struct bio *bi) 8616{ 8617 if (bio_data_dir(bi) != WRITE) 8618 return; 8619 WARN_ON_ONCE(mddev->in_sync || mddev->ro); 8620 percpu_ref_get(&mddev->writes_pending); 8621} 8622EXPORT_SYMBOL(md_write_inc); 8623 8624void md_write_end(struct mddev *mddev) 8625{ 8626 percpu_ref_put(&mddev->writes_pending); 8627 8628 if (mddev->safemode == 2) 8629 md_wakeup_thread(mddev->thread); 8630 else if (mddev->safemode_delay) 8631 /* The roundup() ensures this only performs locking once 8632 * every ->safemode_delay jiffies 8633 */ 8634 mod_timer(&mddev->safemode_timer, 8635 roundup(jiffies, mddev->safemode_delay) + 8636 mddev->safemode_delay); 8637} 8638 8639EXPORT_SYMBOL(md_write_end); 8640 8641/* md_allow_write(mddev) 8642 * Calling this ensures that the array is marked 'active' so that writes 8643 * may proceed without blocking. It is important to call this before 8644 * attempting a GFP_KERNEL allocation while holding the mddev lock. 8645 * Must be called with mddev_lock held. 8646 */ 8647void md_allow_write(struct mddev *mddev) 8648{ 8649 if (!mddev->pers) 8650 return; 8651 if (mddev->ro) 8652 return; 8653 if (!mddev->pers->sync_request) 8654 return; 8655 8656 spin_lock(&mddev->lock); 8657 if (mddev->in_sync) { 8658 mddev->in_sync = 0; 8659 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 8660 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); 8661 if (mddev->safemode_delay && 8662 mddev->safemode == 0) 8663 mddev->safemode = 1; 8664 spin_unlock(&mddev->lock); 8665 md_update_sb(mddev, 0); 8666 sysfs_notify_dirent_safe(mddev->sysfs_state); 8667 /* wait for the dirty state to be recorded in the metadata */ 8668 wait_event(mddev->sb_wait, 8669 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); 8670 } else 8671 spin_unlock(&mddev->lock); 8672} 8673EXPORT_SYMBOL_GPL(md_allow_write); 8674 8675#define SYNC_MARKS 10 8676#define SYNC_MARK_STEP (3*HZ) 8677#define UPDATE_FREQUENCY (5*60*HZ) 8678void md_do_sync(struct md_thread *thread) 8679{ 8680 struct mddev *mddev = thread->mddev; 8681 struct mddev *mddev2; 8682 unsigned int currspeed = 0, window; 8683 sector_t max_sectors,j, io_sectors, recovery_done; 8684 unsigned long mark[SYNC_MARKS]; 8685 unsigned long update_time; 8686 sector_t mark_cnt[SYNC_MARKS]; 8687 int last_mark,m; 8688 struct list_head *tmp; 8689 sector_t last_check; 8690 int skipped = 0; 8691 struct md_rdev *rdev; 8692 char *desc, *action = NULL; 8693 struct blk_plug plug; 8694 int ret; 8695 8696 /* just incase thread restarts... */ 8697 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery) || 8698 test_bit(MD_RECOVERY_WAIT, &mddev->recovery)) 8699 return; 8700 if (mddev->ro) {/* never try to sync a read-only array */ 8701 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 8702 return; 8703 } 8704 8705 if (mddev_is_clustered(mddev)) { 8706 ret = md_cluster_ops->resync_start(mddev); 8707 if (ret) 8708 goto skip; 8709 8710 set_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags); 8711 if (!(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || 8712 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) || 8713 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) 8714 && ((unsigned long long)mddev->curr_resync_completed 8715 < (unsigned long long)mddev->resync_max_sectors)) 8716 goto skip; 8717 } 8718 8719 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 8720 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) { 8721 desc = "data-check"; 8722 action = "check"; 8723 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 8724 desc = "requested-resync"; 8725 action = "repair"; 8726 } else 8727 desc = "resync"; 8728 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 8729 desc = "reshape"; 8730 else 8731 desc = "recovery"; 8732 8733 mddev->last_sync_action = action ?: desc; 8734 8735 /* we overload curr_resync somewhat here. 8736 * 0 == not engaged in resync at all 8737 * 2 == checking that there is no conflict with another sync 8738 * 1 == like 2, but have yielded to allow conflicting resync to 8739 * commence 8740 * other == active in resync - this many blocks 8741 * 8742 * Before starting a resync we must have set curr_resync to 8743 * 2, and then checked that every "conflicting" array has curr_resync 8744 * less than ours. When we find one that is the same or higher 8745 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync 8746 * to 1 if we choose to yield (based arbitrarily on address of mddev structure). 8747 * This will mean we have to start checking from the beginning again. 8748 * 8749 */ 8750 8751 do { 8752 int mddev2_minor = -1; 8753 mddev->curr_resync = 2; 8754 8755 try_again: 8756 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 8757 goto skip; 8758 for_each_mddev(mddev2, tmp) { 8759 if (mddev2 == mddev) 8760 continue; 8761 if (!mddev->parallel_resync 8762 && mddev2->curr_resync 8763 && match_mddev_units(mddev, mddev2)) { 8764 DEFINE_WAIT(wq); 8765 if (mddev < mddev2 && mddev->curr_resync == 2) { 8766 /* arbitrarily yield */ 8767 mddev->curr_resync = 1; 8768 wake_up(&resync_wait); 8769 } 8770 if (mddev > mddev2 && mddev->curr_resync == 1) 8771 /* no need to wait here, we can wait the next 8772 * time 'round when curr_resync == 2 8773 */ 8774 continue; 8775 /* We need to wait 'interruptible' so as not to 8776 * contribute to the load average, and not to 8777 * be caught by 'softlockup' 8778 */ 8779 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE); 8780 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 8781 mddev2->curr_resync >= mddev->curr_resync) { 8782 if (mddev2_minor != mddev2->md_minor) { 8783 mddev2_minor = mddev2->md_minor; 8784 pr_info("md: delaying %s of %s until %s has finished (they share one or more physical units)\n", 8785 desc, mdname(mddev), 8786 mdname(mddev2)); 8787 } 8788 mddev_put(mddev2); 8789 if (signal_pending(current)) 8790 flush_signals(current); 8791 schedule(); 8792 finish_wait(&resync_wait, &wq); 8793 goto try_again; 8794 } 8795 finish_wait(&resync_wait, &wq); 8796 } 8797 } 8798 } while (mddev->curr_resync < 2); 8799 8800 j = 0; 8801 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 8802 /* resync follows the size requested by the personality, 8803 * which defaults to physical size, but can be virtual size 8804 */ 8805 max_sectors = mddev->resync_max_sectors; 8806 atomic64_set(&mddev->resync_mismatches, 0); 8807 /* we don't use the checkpoint if there's a bitmap */ 8808 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 8809 j = mddev->resync_min; 8810 else if (!mddev->bitmap) 8811 j = mddev->recovery_cp; 8812 8813 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { 8814 max_sectors = mddev->resync_max_sectors; 8815 /* 8816 * If the original node aborts reshaping then we continue the 8817 * reshaping, so set j again to avoid restart reshape from the 8818 * first beginning 8819 */ 8820 if (mddev_is_clustered(mddev) && 8821 mddev->reshape_position != MaxSector) 8822 j = mddev->reshape_position; 8823 } else { 8824 /* recovery follows the physical size of devices */ 8825 max_sectors = mddev->dev_sectors; 8826 j = MaxSector; 8827 rcu_read_lock(); 8828 rdev_for_each_rcu(rdev, mddev) 8829 if (rdev->raid_disk >= 0 && 8830 !test_bit(Journal, &rdev->flags) && 8831 !test_bit(Faulty, &rdev->flags) && 8832 !test_bit(In_sync, &rdev->flags) && 8833 rdev->recovery_offset < j) 8834 j = rdev->recovery_offset; 8835 rcu_read_unlock(); 8836 8837 /* If there is a bitmap, we need to make sure all 8838 * writes that started before we added a spare 8839 * complete before we start doing a recovery. 8840 * Otherwise the write might complete and (via 8841 * bitmap_endwrite) set a bit in the bitmap after the 8842 * recovery has checked that bit and skipped that 8843 * region. 8844 */ 8845 if (mddev->bitmap) { 8846 mddev->pers->quiesce(mddev, 1); 8847 mddev->pers->quiesce(mddev, 0); 8848 } 8849 } 8850 8851 pr_info("md: %s of RAID array %s\n", desc, mdname(mddev)); 8852 pr_debug("md: minimum _guaranteed_ speed: %d KB/sec/disk.\n", speed_min(mddev)); 8853 pr_debug("md: using maximum available idle IO bandwidth (but not more than %d KB/sec) for %s.\n", 8854 speed_max(mddev), desc); 8855 8856 is_mddev_idle(mddev, 1); /* this initializes IO event counters */ 8857 8858 io_sectors = 0; 8859 for (m = 0; m < SYNC_MARKS; m++) { 8860 mark[m] = jiffies; 8861 mark_cnt[m] = io_sectors; 8862 } 8863 last_mark = 0; 8864 mddev->resync_mark = mark[last_mark]; 8865 mddev->resync_mark_cnt = mark_cnt[last_mark]; 8866 8867 /* 8868 * Tune reconstruction: 8869 */ 8870 window = 32 * (PAGE_SIZE / 512); 8871 pr_debug("md: using %dk window, over a total of %lluk.\n", 8872 window/2, (unsigned long long)max_sectors/2); 8873 8874 atomic_set(&mddev->recovery_active, 0); 8875 last_check = 0; 8876 8877 if (j>2) { 8878 pr_debug("md: resuming %s of %s from checkpoint.\n", 8879 desc, mdname(mddev)); 8880 mddev->curr_resync = j; 8881 } else 8882 mddev->curr_resync = 3; /* no longer delayed */ 8883 mddev->curr_resync_completed = j; 8884 sysfs_notify_dirent_safe(mddev->sysfs_completed); 8885 md_new_event(mddev); 8886 update_time = jiffies; 8887 8888 blk_start_plug(&plug); 8889 while (j < max_sectors) { 8890 sector_t sectors; 8891 8892 skipped = 0; 8893 8894 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 8895 ((mddev->curr_resync > mddev->curr_resync_completed && 8896 (mddev->curr_resync - mddev->curr_resync_completed) 8897 > (max_sectors >> 4)) || 8898 time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) || 8899 (j - mddev->curr_resync_completed)*2 8900 >= mddev->resync_max - mddev->curr_resync_completed || 8901 mddev->curr_resync_completed > mddev->resync_max 8902 )) { 8903 /* time to update curr_resync_completed */ 8904 wait_event(mddev->recovery_wait, 8905 atomic_read(&mddev->recovery_active) == 0); 8906 mddev->curr_resync_completed = j; 8907 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && 8908 j > mddev->recovery_cp) 8909 mddev->recovery_cp = j; 8910 update_time = jiffies; 8911 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 8912 sysfs_notify_dirent_safe(mddev->sysfs_completed); 8913 } 8914 8915 while (j >= mddev->resync_max && 8916 !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 8917 /* As this condition is controlled by user-space, 8918 * we can block indefinitely, so use '_interruptible' 8919 * to avoid triggering warnings. 8920 */ 8921 flush_signals(current); /* just in case */ 8922 wait_event_interruptible(mddev->recovery_wait, 8923 mddev->resync_max > j 8924 || test_bit(MD_RECOVERY_INTR, 8925 &mddev->recovery)); 8926 } 8927 8928 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 8929 break; 8930 8931 sectors = mddev->pers->sync_request(mddev, j, &skipped); 8932 if (sectors == 0) { 8933 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 8934 break; 8935 } 8936 8937 if (!skipped) { /* actual IO requested */ 8938 io_sectors += sectors; 8939 atomic_add(sectors, &mddev->recovery_active); 8940 } 8941 8942 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 8943 break; 8944 8945 j += sectors; 8946 if (j > max_sectors) 8947 /* when skipping, extra large numbers can be returned. */ 8948 j = max_sectors; 8949 if (j > 2) 8950 mddev->curr_resync = j; 8951 mddev->curr_mark_cnt = io_sectors; 8952 if (last_check == 0) 8953 /* this is the earliest that rebuild will be 8954 * visible in /proc/mdstat 8955 */ 8956 md_new_event(mddev); 8957 8958 if (last_check + window > io_sectors || j == max_sectors) 8959 continue; 8960 8961 last_check = io_sectors; 8962 repeat: 8963 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) { 8964 /* step marks */ 8965 int next = (last_mark+1) % SYNC_MARKS; 8966 8967 mddev->resync_mark = mark[next]; 8968 mddev->resync_mark_cnt = mark_cnt[next]; 8969 mark[next] = jiffies; 8970 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active); 8971 last_mark = next; 8972 } 8973 8974 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 8975 break; 8976 8977 /* 8978 * this loop exits only if either when we are slower than 8979 * the 'hard' speed limit, or the system was IO-idle for 8980 * a jiffy. 8981 * the system might be non-idle CPU-wise, but we only care 8982 * about not overloading the IO subsystem. (things like an 8983 * e2fsck being done on the RAID array should execute fast) 8984 */ 8985 cond_resched(); 8986 8987 recovery_done = io_sectors - atomic_read(&mddev->recovery_active); 8988 currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2 8989 /((jiffies-mddev->resync_mark)/HZ +1) +1; 8990 8991 if (currspeed > speed_min(mddev)) { 8992 if (currspeed > speed_max(mddev)) { 8993 msleep(500); 8994 goto repeat; 8995 } 8996 if (!is_mddev_idle(mddev, 0)) { 8997 /* 8998 * Give other IO more of a chance. 8999 * The faster the devices, the less we wait. 9000 */ 9001 wait_event(mddev->recovery_wait, 9002 !atomic_read(&mddev->recovery_active)); 9003 } 9004 } 9005 } 9006 pr_info("md: %s: %s %s.\n",mdname(mddev), desc, 9007 test_bit(MD_RECOVERY_INTR, &mddev->recovery) 9008 ? "interrupted" : "done"); 9009 /* 9010 * this also signals 'finished resyncing' to md_stop 9011 */ 9012 blk_finish_plug(&plug); 9013 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); 9014 9015 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 9016 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 9017 mddev->curr_resync > 3) { 9018 mddev->curr_resync_completed = mddev->curr_resync; 9019 sysfs_notify_dirent_safe(mddev->sysfs_completed); 9020 } 9021 mddev->pers->sync_request(mddev, max_sectors, &skipped); 9022 9023 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && 9024 mddev->curr_resync > 3) { 9025 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 9026 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 9027 if (mddev->curr_resync >= mddev->recovery_cp) { 9028 pr_debug("md: checkpointing %s of %s.\n", 9029 desc, mdname(mddev)); 9030 if (test_bit(MD_RECOVERY_ERROR, 9031 &mddev->recovery)) 9032 mddev->recovery_cp = 9033 mddev->curr_resync_completed; 9034 else 9035 mddev->recovery_cp = 9036 mddev->curr_resync; 9037 } 9038 } else 9039 mddev->recovery_cp = MaxSector; 9040 } else { 9041 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 9042 mddev->curr_resync = MaxSector; 9043 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 9044 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) { 9045 rcu_read_lock(); 9046 rdev_for_each_rcu(rdev, mddev) 9047 if (rdev->raid_disk >= 0 && 9048 mddev->delta_disks >= 0 && 9049 !test_bit(Journal, &rdev->flags) && 9050 !test_bit(Faulty, &rdev->flags) && 9051 !test_bit(In_sync, &rdev->flags) && 9052 rdev->recovery_offset < mddev->curr_resync) 9053 rdev->recovery_offset = mddev->curr_resync; 9054 rcu_read_unlock(); 9055 } 9056 } 9057 } 9058 skip: 9059 /* set CHANGE_PENDING here since maybe another update is needed, 9060 * so other nodes are informed. It should be harmless for normal 9061 * raid */ 9062 set_mask_bits(&mddev->sb_flags, 0, 9063 BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS)); 9064 9065 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 9066 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 9067 mddev->delta_disks > 0 && 9068 mddev->pers->finish_reshape && 9069 mddev->pers->size && 9070 mddev->queue) { 9071 mddev_lock_nointr(mddev); 9072 md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0)); 9073 mddev_unlock(mddev); 9074 if (!mddev_is_clustered(mddev)) { 9075 set_capacity(mddev->gendisk, mddev->array_sectors); 9076 revalidate_disk_size(mddev->gendisk, true); 9077 } 9078 } 9079 9080 spin_lock(&mddev->lock); 9081 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 9082 /* We completed so min/max setting can be forgotten if used. */ 9083 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 9084 mddev->resync_min = 0; 9085 mddev->resync_max = MaxSector; 9086 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 9087 mddev->resync_min = mddev->curr_resync_completed; 9088 set_bit(MD_RECOVERY_DONE, &mddev->recovery); 9089 mddev->curr_resync = 0; 9090 spin_unlock(&mddev->lock); 9091 9092 wake_up(&resync_wait); 9093 md_wakeup_thread(mddev->thread); 9094 return; 9095} 9096EXPORT_SYMBOL_GPL(md_do_sync); 9097 9098static int remove_and_add_spares(struct mddev *mddev, 9099 struct md_rdev *this) 9100{ 9101 struct md_rdev *rdev; 9102 int spares = 0; 9103 int removed = 0; 9104 bool remove_some = false; 9105 9106 if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 9107 /* Mustn't remove devices when resync thread is running */ 9108 return 0; 9109 9110 rdev_for_each(rdev, mddev) { 9111 if ((this == NULL || rdev == this) && 9112 rdev->raid_disk >= 0 && 9113 !test_bit(Blocked, &rdev->flags) && 9114 test_bit(Faulty, &rdev->flags) && 9115 atomic_read(&rdev->nr_pending)==0) { 9116 /* Faulty non-Blocked devices with nr_pending == 0 9117 * never get nr_pending incremented, 9118 * never get Faulty cleared, and never get Blocked set. 9119 * So we can synchronize_rcu now rather than once per device 9120 */ 9121 remove_some = true; 9122 set_bit(RemoveSynchronized, &rdev->flags); 9123 } 9124 } 9125 9126 if (remove_some) 9127 synchronize_rcu(); 9128 rdev_for_each(rdev, mddev) { 9129 if ((this == NULL || rdev == this) && 9130 rdev->raid_disk >= 0 && 9131 !test_bit(Blocked, &rdev->flags) && 9132 ((test_bit(RemoveSynchronized, &rdev->flags) || 9133 (!test_bit(In_sync, &rdev->flags) && 9134 !test_bit(Journal, &rdev->flags))) && 9135 atomic_read(&rdev->nr_pending)==0)) { 9136 if (mddev->pers->hot_remove_disk( 9137 mddev, rdev) == 0) { 9138 sysfs_unlink_rdev(mddev, rdev); 9139 rdev->saved_raid_disk = rdev->raid_disk; 9140 rdev->raid_disk = -1; 9141 removed++; 9142 } 9143 } 9144 if (remove_some && test_bit(RemoveSynchronized, &rdev->flags)) 9145 clear_bit(RemoveSynchronized, &rdev->flags); 9146 } 9147 9148 if (removed && mddev->kobj.sd) 9149 sysfs_notify_dirent_safe(mddev->sysfs_degraded); 9150 9151 if (this && removed) 9152 goto no_add; 9153 9154 rdev_for_each(rdev, mddev) { 9155 if (this && this != rdev) 9156 continue; 9157 if (test_bit(Candidate, &rdev->flags)) 9158 continue; 9159 if (rdev->raid_disk >= 0 && 9160 !test_bit(In_sync, &rdev->flags) && 9161 !test_bit(Journal, &rdev->flags) && 9162 !test_bit(Faulty, &rdev->flags)) 9163 spares++; 9164 if (rdev->raid_disk >= 0) 9165 continue; 9166 if (test_bit(Faulty, &rdev->flags)) 9167 continue; 9168 if (!test_bit(Journal, &rdev->flags)) { 9169 if (mddev->ro && 9170 ! (rdev->saved_raid_disk >= 0 && 9171 !test_bit(Bitmap_sync, &rdev->flags))) 9172 continue; 9173 9174 rdev->recovery_offset = 0; 9175 } 9176 if (mddev->pers->hot_add_disk(mddev, rdev) == 0) { 9177 /* failure here is OK */ 9178 sysfs_link_rdev(mddev, rdev); 9179 if (!test_bit(Journal, &rdev->flags)) 9180 spares++; 9181 md_new_event(mddev); 9182 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 9183 } 9184 } 9185no_add: 9186 if (removed) 9187 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 9188 return spares; 9189} 9190 9191static void md_start_sync(struct work_struct *ws) 9192{ 9193 struct mddev *mddev = container_of(ws, struct mddev, del_work); 9194 9195 mddev->sync_thread = md_register_thread(md_do_sync, 9196 mddev, 9197 "resync"); 9198 if (!mddev->sync_thread) { 9199 pr_warn("%s: could not start resync thread...\n", 9200 mdname(mddev)); 9201 /* leave the spares where they are, it shouldn't hurt */ 9202 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 9203 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 9204 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 9205 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 9206 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 9207 wake_up(&resync_wait); 9208 if (test_and_clear_bit(MD_RECOVERY_RECOVER, 9209 &mddev->recovery)) 9210 if (mddev->sysfs_action) 9211 sysfs_notify_dirent_safe(mddev->sysfs_action); 9212 } else 9213 md_wakeup_thread(mddev->sync_thread); 9214 sysfs_notify_dirent_safe(mddev->sysfs_action); 9215 md_new_event(mddev); 9216} 9217 9218/* 9219 * This routine is regularly called by all per-raid-array threads to 9220 * deal with generic issues like resync and super-block update. 9221 * Raid personalities that don't have a thread (linear/raid0) do not 9222 * need this as they never do any recovery or update the superblock. 9223 * 9224 * It does not do any resync itself, but rather "forks" off other threads 9225 * to do that as needed. 9226 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in 9227 * "->recovery" and create a thread at ->sync_thread. 9228 * When the thread finishes it sets MD_RECOVERY_DONE 9229 * and wakeups up this thread which will reap the thread and finish up. 9230 * This thread also removes any faulty devices (with nr_pending == 0). 9231 * 9232 * The overall approach is: 9233 * 1/ if the superblock needs updating, update it. 9234 * 2/ If a recovery thread is running, don't do anything else. 9235 * 3/ If recovery has finished, clean up, possibly marking spares active. 9236 * 4/ If there are any faulty devices, remove them. 9237 * 5/ If array is degraded, try to add spares devices 9238 * 6/ If array has spares or is not in-sync, start a resync thread. 9239 */ 9240void md_check_recovery(struct mddev *mddev) 9241{ 9242 if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && mddev->sb_flags) { 9243 /* Write superblock - thread that called mddev_suspend() 9244 * holds reconfig_mutex for us. 9245 */ 9246 set_bit(MD_UPDATING_SB, &mddev->flags); 9247 smp_mb__after_atomic(); 9248 if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags)) 9249 md_update_sb(mddev, 0); 9250 clear_bit_unlock(MD_UPDATING_SB, &mddev->flags); 9251 wake_up(&mddev->sb_wait); 9252 } 9253 9254 if (mddev->suspended) 9255 return; 9256 9257 if (mddev->bitmap) 9258 md_bitmap_daemon_work(mddev); 9259 9260 if (signal_pending(current)) { 9261 if (mddev->pers->sync_request && !mddev->external) { 9262 pr_debug("md: %s in immediate safe mode\n", 9263 mdname(mddev)); 9264 mddev->safemode = 2; 9265 } 9266 flush_signals(current); 9267 } 9268 9269 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) 9270 return; 9271 if ( ! ( 9272 (mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) || 9273 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || 9274 test_bit(MD_RECOVERY_DONE, &mddev->recovery) || 9275 (mddev->external == 0 && mddev->safemode == 1) || 9276 (mddev->safemode == 2 9277 && !mddev->in_sync && mddev->recovery_cp == MaxSector) 9278 )) 9279 return; 9280 9281 if (mddev_trylock(mddev)) { 9282 int spares = 0; 9283 bool try_set_sync = mddev->safemode != 0; 9284 9285 if (!mddev->external && mddev->safemode == 1) 9286 mddev->safemode = 0; 9287 9288 if (mddev->ro) { 9289 struct md_rdev *rdev; 9290 if (!mddev->external && mddev->in_sync) 9291 /* 'Blocked' flag not needed as failed devices 9292 * will be recorded if array switched to read/write. 9293 * Leaving it set will prevent the device 9294 * from being removed. 9295 */ 9296 rdev_for_each(rdev, mddev) 9297 clear_bit(Blocked, &rdev->flags); 9298 /* On a read-only array we can: 9299 * - remove failed devices 9300 * - add already-in_sync devices if the array itself 9301 * is in-sync. 9302 * As we only add devices that are already in-sync, 9303 * we can activate the spares immediately. 9304 */ 9305 remove_and_add_spares(mddev, NULL); 9306 /* There is no thread, but we need to call 9307 * ->spare_active and clear saved_raid_disk 9308 */ 9309 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 9310 md_reap_sync_thread(mddev); 9311 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 9312 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 9313 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); 9314 goto unlock; 9315 } 9316 9317 if (mddev_is_clustered(mddev)) { 9318 struct md_rdev *rdev, *tmp; 9319 /* kick the device if another node issued a 9320 * remove disk. 9321 */ 9322 rdev_for_each_safe(rdev, tmp, mddev) { 9323 if (test_and_clear_bit(ClusterRemove, &rdev->flags) && 9324 rdev->raid_disk < 0) 9325 md_kick_rdev_from_array(rdev); 9326 } 9327 } 9328 9329 if (try_set_sync && !mddev->external && !mddev->in_sync) { 9330 spin_lock(&mddev->lock); 9331 set_in_sync(mddev); 9332 spin_unlock(&mddev->lock); 9333 } 9334 9335 if (mddev->sb_flags) 9336 md_update_sb(mddev, 0); 9337 9338 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && 9339 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) { 9340 /* resync/recovery still happening */ 9341 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 9342 goto unlock; 9343 } 9344 if (mddev->sync_thread) { 9345 md_reap_sync_thread(mddev); 9346 goto unlock; 9347 } 9348 /* Set RUNNING before clearing NEEDED to avoid 9349 * any transients in the value of "sync_action". 9350 */ 9351 mddev->curr_resync_completed = 0; 9352 spin_lock(&mddev->lock); 9353 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 9354 spin_unlock(&mddev->lock); 9355 /* Clear some bits that don't mean anything, but 9356 * might be left set 9357 */ 9358 clear_bit(MD_RECOVERY_INTR, &mddev->recovery); 9359 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 9360 9361 if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || 9362 test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) 9363 goto not_running; 9364 /* no recovery is running. 9365 * remove any failed drives, then 9366 * add spares if possible. 9367 * Spares are also removed and re-added, to allow 9368 * the personality to fail the re-add. 9369 */ 9370 9371 if (mddev->reshape_position != MaxSector) { 9372 if (mddev->pers->check_reshape == NULL || 9373 mddev->pers->check_reshape(mddev) != 0) 9374 /* Cannot proceed */ 9375 goto not_running; 9376 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 9377 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 9378 } else if ((spares = remove_and_add_spares(mddev, NULL))) { 9379 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 9380 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 9381 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 9382 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 9383 } else if (mddev->recovery_cp < MaxSector) { 9384 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 9385 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 9386 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 9387 /* nothing to be done ... */ 9388 goto not_running; 9389 9390 if (mddev->pers->sync_request) { 9391 if (spares) { 9392 /* We are adding a device or devices to an array 9393 * which has the bitmap stored on all devices. 9394 * So make sure all bitmap pages get written 9395 */ 9396 md_bitmap_write_all(mddev->bitmap); 9397 } 9398 INIT_WORK(&mddev->del_work, md_start_sync); 9399 queue_work(md_misc_wq, &mddev->del_work); 9400 goto unlock; 9401 } 9402 not_running: 9403 if (!mddev->sync_thread) { 9404 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 9405 wake_up(&resync_wait); 9406 if (test_and_clear_bit(MD_RECOVERY_RECOVER, 9407 &mddev->recovery)) 9408 if (mddev->sysfs_action) 9409 sysfs_notify_dirent_safe(mddev->sysfs_action); 9410 } 9411 unlock: 9412 wake_up(&mddev->sb_wait); 9413 mddev_unlock(mddev); 9414 } 9415} 9416EXPORT_SYMBOL(md_check_recovery); 9417 9418void md_reap_sync_thread(struct mddev *mddev) 9419{ 9420 struct md_rdev *rdev; 9421 sector_t old_dev_sectors = mddev->dev_sectors; 9422 bool is_reshaped = false; 9423 9424 /* resync has finished, collect result */ 9425 md_unregister_thread(&mddev->sync_thread); 9426 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 9427 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && 9428 mddev->degraded != mddev->raid_disks) { 9429 /* success...*/ 9430 /* activate any spares */ 9431 if (mddev->pers->spare_active(mddev)) { 9432 sysfs_notify_dirent_safe(mddev->sysfs_degraded); 9433 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 9434 } 9435 } 9436 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 9437 mddev->pers->finish_reshape) { 9438 mddev->pers->finish_reshape(mddev); 9439 if (mddev_is_clustered(mddev)) 9440 is_reshaped = true; 9441 } 9442 9443 /* If array is no-longer degraded, then any saved_raid_disk 9444 * information must be scrapped. 9445 */ 9446 if (!mddev->degraded) 9447 rdev_for_each(rdev, mddev) 9448 rdev->saved_raid_disk = -1; 9449 9450 md_update_sb(mddev, 1); 9451 /* MD_SB_CHANGE_PENDING should be cleared by md_update_sb, so we can 9452 * call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by 9453 * clustered raid */ 9454 if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags)) 9455 md_cluster_ops->resync_finish(mddev); 9456 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 9457 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 9458 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 9459 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 9460 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 9461 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 9462 /* 9463 * We call md_cluster_ops->update_size here because sync_size could 9464 * be changed by md_update_sb, and MD_RECOVERY_RESHAPE is cleared, 9465 * so it is time to update size across cluster. 9466 */ 9467 if (mddev_is_clustered(mddev) && is_reshaped 9468 && !test_bit(MD_CLOSING, &mddev->flags)) 9469 md_cluster_ops->update_size(mddev, old_dev_sectors); 9470 wake_up(&resync_wait); 9471 /* flag recovery needed just to double check */ 9472 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 9473 sysfs_notify_dirent_safe(mddev->sysfs_completed); 9474 sysfs_notify_dirent_safe(mddev->sysfs_action); 9475 md_new_event(mddev); 9476 if (mddev->event_work.func) 9477 queue_work(md_misc_wq, &mddev->event_work); 9478} 9479EXPORT_SYMBOL(md_reap_sync_thread); 9480 9481void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev) 9482{ 9483 sysfs_notify_dirent_safe(rdev->sysfs_state); 9484 wait_event_timeout(rdev->blocked_wait, 9485 !test_bit(Blocked, &rdev->flags) && 9486 !test_bit(BlockedBadBlocks, &rdev->flags), 9487 msecs_to_jiffies(5000)); 9488 rdev_dec_pending(rdev, mddev); 9489} 9490EXPORT_SYMBOL(md_wait_for_blocked_rdev); 9491 9492void md_finish_reshape(struct mddev *mddev) 9493{ 9494 /* called be personality module when reshape completes. */ 9495 struct md_rdev *rdev; 9496 9497 rdev_for_each(rdev, mddev) { 9498 if (rdev->data_offset > rdev->new_data_offset) 9499 rdev->sectors += rdev->data_offset - rdev->new_data_offset; 9500 else 9501 rdev->sectors -= rdev->new_data_offset - rdev->data_offset; 9502 rdev->data_offset = rdev->new_data_offset; 9503 } 9504} 9505EXPORT_SYMBOL(md_finish_reshape); 9506 9507/* Bad block management */ 9508 9509/* Returns 1 on success, 0 on failure */ 9510int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors, 9511 int is_new) 9512{ 9513 struct mddev *mddev = rdev->mddev; 9514 int rv; 9515 if (is_new) 9516 s += rdev->new_data_offset; 9517 else 9518 s += rdev->data_offset; 9519 rv = badblocks_set(&rdev->badblocks, s, sectors, 0); 9520 if (rv == 0) { 9521 /* Make sure they get written out promptly */ 9522 if (test_bit(ExternalBbl, &rdev->flags)) 9523 sysfs_notify_dirent_safe(rdev->sysfs_unack_badblocks); 9524 sysfs_notify_dirent_safe(rdev->sysfs_state); 9525 set_mask_bits(&mddev->sb_flags, 0, 9526 BIT(MD_SB_CHANGE_CLEAN) | BIT(MD_SB_CHANGE_PENDING)); 9527 md_wakeup_thread(rdev->mddev->thread); 9528 return 1; 9529 } else 9530 return 0; 9531} 9532EXPORT_SYMBOL_GPL(rdev_set_badblocks); 9533 9534int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors, 9535 int is_new) 9536{ 9537 int rv; 9538 if (is_new) 9539 s += rdev->new_data_offset; 9540 else 9541 s += rdev->data_offset; 9542 rv = badblocks_clear(&rdev->badblocks, s, sectors); 9543 if ((rv == 0) && test_bit(ExternalBbl, &rdev->flags)) 9544 sysfs_notify_dirent_safe(rdev->sysfs_badblocks); 9545 return rv; 9546} 9547EXPORT_SYMBOL_GPL(rdev_clear_badblocks); 9548 9549static int md_notify_reboot(struct notifier_block *this, 9550 unsigned long code, void *x) 9551{ 9552 struct list_head *tmp; 9553 struct mddev *mddev; 9554 int need_delay = 0; 9555 9556 for_each_mddev(mddev, tmp) { 9557 if (mddev_trylock(mddev)) { 9558 if (mddev->pers) 9559 __md_stop_writes(mddev); 9560 if (mddev->persistent) 9561 mddev->safemode = 2; 9562 mddev_unlock(mddev); 9563 } 9564 need_delay = 1; 9565 } 9566 /* 9567 * certain more exotic SCSI devices are known to be 9568 * volatile wrt too early system reboots. While the 9569 * right place to handle this issue is the given 9570 * driver, we do want to have a safe RAID driver ... 9571 */ 9572 if (need_delay) 9573 mdelay(1000*1); 9574 9575 return NOTIFY_DONE; 9576} 9577 9578static struct notifier_block md_notifier = { 9579 .notifier_call = md_notify_reboot, 9580 .next = NULL, 9581 .priority = INT_MAX, /* before any real devices */ 9582}; 9583 9584static void md_geninit(void) 9585{ 9586 pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t)); 9587 9588 proc_create("mdstat", S_IRUGO, NULL, &mdstat_proc_ops); 9589} 9590 9591static int __init md_init(void) 9592{ 9593 int ret = -ENOMEM; 9594 9595 md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0); 9596 if (!md_wq) 9597 goto err_wq; 9598 9599 md_misc_wq = alloc_workqueue("md_misc", 0, 0); 9600 if (!md_misc_wq) 9601 goto err_misc_wq; 9602 9603 md_rdev_misc_wq = alloc_workqueue("md_rdev_misc", 0, 0); 9604 if (!md_rdev_misc_wq) 9605 goto err_rdev_misc_wq; 9606 9607 if ((ret = register_blkdev(MD_MAJOR, "md")) < 0) 9608 goto err_md; 9609 9610 if ((ret = register_blkdev(0, "mdp")) < 0) 9611 goto err_mdp; 9612 mdp_major = ret; 9613 9614 blk_register_region(MKDEV(MD_MAJOR, 0), 512, THIS_MODULE, 9615 md_probe, NULL, NULL); 9616 blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE, 9617 md_probe, NULL, NULL); 9618 9619 register_reboot_notifier(&md_notifier); 9620 raid_table_header = register_sysctl_table(raid_root_table); 9621 9622 md_geninit(); 9623 return 0; 9624 9625err_mdp: 9626 unregister_blkdev(MD_MAJOR, "md"); 9627err_md: 9628 destroy_workqueue(md_rdev_misc_wq); 9629err_rdev_misc_wq: 9630 destroy_workqueue(md_misc_wq); 9631err_misc_wq: 9632 destroy_workqueue(md_wq); 9633err_wq: 9634 return ret; 9635} 9636 9637static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev) 9638{ 9639 struct mdp_superblock_1 *sb = page_address(rdev->sb_page); 9640 struct md_rdev *rdev2, *tmp; 9641 int role, ret; 9642 char b[BDEVNAME_SIZE]; 9643 9644 /* 9645 * If size is changed in another node then we need to 9646 * do resize as well. 9647 */ 9648 if (mddev->dev_sectors != le64_to_cpu(sb->size)) { 9649 ret = mddev->pers->resize(mddev, le64_to_cpu(sb->size)); 9650 if (ret) 9651 pr_info("md-cluster: resize failed\n"); 9652 else 9653 md_bitmap_update_sb(mddev->bitmap); 9654 } 9655 9656 /* Check for change of roles in the active devices */ 9657 rdev_for_each_safe(rdev2, tmp, mddev) { 9658 if (test_bit(Faulty, &rdev2->flags)) 9659 continue; 9660 9661 /* Check if the roles changed */ 9662 role = le16_to_cpu(sb->dev_roles[rdev2->desc_nr]); 9663 9664 if (test_bit(Candidate, &rdev2->flags)) { 9665 if (role == 0xfffe) { 9666 pr_info("md: Removing Candidate device %s because add failed\n", bdevname(rdev2->bdev,b)); 9667 md_kick_rdev_from_array(rdev2); 9668 continue; 9669 } 9670 else 9671 clear_bit(Candidate, &rdev2->flags); 9672 } 9673 9674 if (role != rdev2->raid_disk) { 9675 /* 9676 * got activated except reshape is happening. 9677 */ 9678 if (rdev2->raid_disk == -1 && role != 0xffff && 9679 !(le32_to_cpu(sb->feature_map) & 9680 MD_FEATURE_RESHAPE_ACTIVE)) { 9681 rdev2->saved_raid_disk = role; 9682 ret = remove_and_add_spares(mddev, rdev2); 9683 pr_info("Activated spare: %s\n", 9684 bdevname(rdev2->bdev,b)); 9685 /* wakeup mddev->thread here, so array could 9686 * perform resync with the new activated disk */ 9687 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 9688 md_wakeup_thread(mddev->thread); 9689 } 9690 /* device faulty 9691 * We just want to do the minimum to mark the disk 9692 * as faulty. The recovery is performed by the 9693 * one who initiated the error. 9694 */ 9695 if ((role == 0xfffe) || (role == 0xfffd)) { 9696 md_error(mddev, rdev2); 9697 clear_bit(Blocked, &rdev2->flags); 9698 } 9699 } 9700 } 9701 9702 if (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) { 9703 ret = update_raid_disks(mddev, le32_to_cpu(sb->raid_disks)); 9704 if (ret) 9705 pr_warn("md: updating array disks failed. %d\n", ret); 9706 } 9707 9708 /* 9709 * Since mddev->delta_disks has already updated in update_raid_disks, 9710 * so it is time to check reshape. 9711 */ 9712 if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) && 9713 (le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { 9714 /* 9715 * reshape is happening in the remote node, we need to 9716 * update reshape_position and call start_reshape. 9717 */ 9718 mddev->reshape_position = le64_to_cpu(sb->reshape_position); 9719 if (mddev->pers->update_reshape_pos) 9720 mddev->pers->update_reshape_pos(mddev); 9721 if (mddev->pers->start_reshape) 9722 mddev->pers->start_reshape(mddev); 9723 } else if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) && 9724 mddev->reshape_position != MaxSector && 9725 !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { 9726 /* reshape is just done in another node. */ 9727 mddev->reshape_position = MaxSector; 9728 if (mddev->pers->update_reshape_pos) 9729 mddev->pers->update_reshape_pos(mddev); 9730 } 9731 9732 /* Finally set the event to be up to date */ 9733 mddev->events = le64_to_cpu(sb->events); 9734} 9735 9736static int read_rdev(struct mddev *mddev, struct md_rdev *rdev) 9737{ 9738 int err; 9739 struct page *swapout = rdev->sb_page; 9740 struct mdp_superblock_1 *sb; 9741 9742 /* Store the sb page of the rdev in the swapout temporary 9743 * variable in case we err in the future 9744 */ 9745 rdev->sb_page = NULL; 9746 err = alloc_disk_sb(rdev); 9747 if (err == 0) { 9748 ClearPageUptodate(rdev->sb_page); 9749 rdev->sb_loaded = 0; 9750 err = super_types[mddev->major_version]. 9751 load_super(rdev, NULL, mddev->minor_version); 9752 } 9753 if (err < 0) { 9754 pr_warn("%s: %d Could not reload rdev(%d) err: %d. Restoring old values\n", 9755 __func__, __LINE__, rdev->desc_nr, err); 9756 if (rdev->sb_page) 9757 put_page(rdev->sb_page); 9758 rdev->sb_page = swapout; 9759 rdev->sb_loaded = 1; 9760 return err; 9761 } 9762 9763 sb = page_address(rdev->sb_page); 9764 /* Read the offset unconditionally, even if MD_FEATURE_RECOVERY_OFFSET 9765 * is not set 9766 */ 9767 9768 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RECOVERY_OFFSET)) 9769 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset); 9770 9771 /* The other node finished recovery, call spare_active to set 9772 * device In_sync and mddev->degraded 9773 */ 9774 if (rdev->recovery_offset == MaxSector && 9775 !test_bit(In_sync, &rdev->flags) && 9776 mddev->pers->spare_active(mddev)) 9777 sysfs_notify_dirent_safe(mddev->sysfs_degraded); 9778 9779 put_page(swapout); 9780 return 0; 9781} 9782 9783void md_reload_sb(struct mddev *mddev, int nr) 9784{ 9785 struct md_rdev *rdev = NULL, *iter; 9786 int err; 9787 9788 /* Find the rdev */ 9789 rdev_for_each_rcu(iter, mddev) { 9790 if (iter->desc_nr == nr) { 9791 rdev = iter; 9792 break; 9793 } 9794 } 9795 9796 if (!rdev) { 9797 pr_warn("%s: %d Could not find rdev with nr %d\n", __func__, __LINE__, nr); 9798 return; 9799 } 9800 9801 err = read_rdev(mddev, rdev); 9802 if (err < 0) 9803 return; 9804 9805 check_sb_changes(mddev, rdev); 9806 9807 /* Read all rdev's to update recovery_offset */ 9808 rdev_for_each_rcu(rdev, mddev) { 9809 if (!test_bit(Faulty, &rdev->flags)) 9810 read_rdev(mddev, rdev); 9811 } 9812} 9813EXPORT_SYMBOL(md_reload_sb); 9814 9815#ifndef MODULE 9816 9817/* 9818 * Searches all registered partitions for autorun RAID arrays 9819 * at boot time. 9820 */ 9821 9822static DEFINE_MUTEX(detected_devices_mutex); 9823static LIST_HEAD(all_detected_devices); 9824struct detected_devices_node { 9825 struct list_head list; 9826 dev_t dev; 9827}; 9828 9829void md_autodetect_dev(dev_t dev) 9830{ 9831 struct detected_devices_node *node_detected_dev; 9832 9833 node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL); 9834 if (node_detected_dev) { 9835 node_detected_dev->dev = dev; 9836 mutex_lock(&detected_devices_mutex); 9837 list_add_tail(&node_detected_dev->list, &all_detected_devices); 9838 mutex_unlock(&detected_devices_mutex); 9839 } 9840} 9841 9842void md_autostart_arrays(int part) 9843{ 9844 struct md_rdev *rdev; 9845 struct detected_devices_node *node_detected_dev; 9846 dev_t dev; 9847 int i_scanned, i_passed; 9848 9849 i_scanned = 0; 9850 i_passed = 0; 9851 9852 pr_info("md: Autodetecting RAID arrays.\n"); 9853 9854 mutex_lock(&detected_devices_mutex); 9855 while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) { 9856 i_scanned++; 9857 node_detected_dev = list_entry(all_detected_devices.next, 9858 struct detected_devices_node, list); 9859 list_del(&node_detected_dev->list); 9860 dev = node_detected_dev->dev; 9861 kfree(node_detected_dev); 9862 mutex_unlock(&detected_devices_mutex); 9863 rdev = md_import_device(dev,0, 90); 9864 mutex_lock(&detected_devices_mutex); 9865 if (IS_ERR(rdev)) 9866 continue; 9867 9868 if (test_bit(Faulty, &rdev->flags)) 9869 continue; 9870 9871 set_bit(AutoDetected, &rdev->flags); 9872 list_add(&rdev->same_set, &pending_raid_disks); 9873 i_passed++; 9874 } 9875 mutex_unlock(&detected_devices_mutex); 9876 9877 pr_debug("md: Scanned %d and added %d devices.\n", i_scanned, i_passed); 9878 9879 autorun_devices(part); 9880} 9881 9882#endif /* !MODULE */ 9883 9884static __exit void md_exit(void) 9885{ 9886 struct mddev *mddev; 9887 struct list_head *tmp; 9888 int delay = 1; 9889 9890 blk_unregister_region(MKDEV(MD_MAJOR,0), 512); 9891 blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS); 9892 9893 unregister_blkdev(MD_MAJOR,"md"); 9894 unregister_blkdev(mdp_major, "mdp"); 9895 unregister_reboot_notifier(&md_notifier); 9896 unregister_sysctl_table(raid_table_header); 9897 9898 /* We cannot unload the modules while some process is 9899 * waiting for us in select() or poll() - wake them up 9900 */ 9901 md_unloading = 1; 9902 while (waitqueue_active(&md_event_waiters)) { 9903 /* not safe to leave yet */ 9904 wake_up(&md_event_waiters); 9905 msleep(delay); 9906 delay += delay; 9907 } 9908 remove_proc_entry("mdstat", NULL); 9909 9910 for_each_mddev(mddev, tmp) { 9911 export_array(mddev); 9912 mddev->ctime = 0; 9913 mddev->hold_active = 0; 9914 /* 9915 * for_each_mddev() will call mddev_put() at the end of each 9916 * iteration. As the mddev is now fully clear, this will 9917 * schedule the mddev for destruction by a workqueue, and the 9918 * destroy_workqueue() below will wait for that to complete. 9919 */ 9920 } 9921 destroy_workqueue(md_rdev_misc_wq); 9922 destroy_workqueue(md_misc_wq); 9923 destroy_workqueue(md_wq); 9924} 9925 9926subsys_initcall(md_init); 9927module_exit(md_exit) 9928 9929static int get_ro(char *buffer, const struct kernel_param *kp) 9930{ 9931 return sprintf(buffer, "%d\n", start_readonly); 9932} 9933static int set_ro(const char *val, const struct kernel_param *kp) 9934{ 9935 return kstrtouint(val, 10, (unsigned int *)&start_readonly); 9936} 9937 9938module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR); 9939module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR); 9940module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR); 9941module_param(create_on_open, bool, S_IRUSR|S_IWUSR); 9942 9943MODULE_LICENSE("GPL"); 9944MODULE_DESCRIPTION("MD RAID framework"); 9945MODULE_ALIAS("md"); 9946MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR); 9947