Lines Matching defs:r1_bio

59 static int check_and_add_serial(struct md_rdev *rdev, struct r1bio *r1_bio,
64 sector_t lo = r1_bio->sector;
65 sector_t hi = lo + r1_bio->sectors;
82 static void wait_for_serialization(struct md_rdev *rdev, struct r1bio *r1_bio)
86 int idx = sector_to_idx(r1_bio->sector);
93 check_and_add_serial(rdev, r1_bio, si, idx) == 0);
149 struct r1bio *r1_bio;
155 r1_bio = r1bio_pool_alloc(gfp_flags, pi);
156 if (!r1_bio)
171 r1_bio->bios[j] = bio;
186 bio = r1_bio->bios[j];
196 rp->raid_bio = r1_bio;
200 r1_bio->master_bio = NULL;
202 return r1_bio;
210 bio_put(r1_bio->bios[j]);
214 rbio_pool_free(r1_bio, data);
237 static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio)
242 struct bio **bio = r1_bio->bios + i;
249 static void free_r1bio(struct r1bio *r1_bio)
251 struct r1conf *conf = r1_bio->mddev->private;
253 put_all_bios(conf, r1_bio);
254 mempool_free(r1_bio, &conf->r1bio_pool);
257 static void put_buf(struct r1bio *r1_bio)
259 struct r1conf *conf = r1_bio->mddev->private;
260 sector_t sect = r1_bio->sector;
264 struct bio *bio = r1_bio->bios[i];
266 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
269 mempool_free(r1_bio, &conf->r1buf_pool);
274 static void reschedule_retry(struct r1bio *r1_bio)
277 struct mddev *mddev = r1_bio->mddev;
281 idx = sector_to_idx(r1_bio->sector);
283 list_add(&r1_bio->retry_list, &conf->retry_list);
296 static void call_bio_endio(struct r1bio *r1_bio)
298 struct bio *bio = r1_bio->master_bio;
300 if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
306 static void raid_end_bio_io(struct r1bio *r1_bio)
308 struct bio *bio = r1_bio->master_bio;
309 struct r1conf *conf = r1_bio->mddev->private;
312 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
318 call_bio_endio(r1_bio);
324 allow_barrier(conf, r1_bio->sector);
326 free_r1bio(r1_bio);
332 static inline void update_head_pos(int disk, struct r1bio *r1_bio)
334 struct r1conf *conf = r1_bio->mddev->private;
337 r1_bio->sector + (r1_bio->sectors);
343 static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
346 struct r1conf *conf = r1_bio->mddev->private;
350 if (r1_bio->bios[mirror] == bio)
354 update_head_pos(mirror, r1_bio);
362 struct r1bio *r1_bio = bio->bi_private;
363 struct r1conf *conf = r1_bio->mddev->private;
364 struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev;
369 update_head_pos(r1_bio->read_disk, r1_bio);
372 set_bit(R1BIO_Uptodate, &r1_bio->state);
374 test_bit(R1BIO_FailFast, &r1_bio->state))
385 if (r1_bio->mddev->degraded == conf->raid_disks ||
386 (r1_bio->mddev->degraded == conf->raid_disks-1 &&
393 raid_end_bio_io(r1_bio);
403 (unsigned long long)r1_bio->sector);
404 set_bit(R1BIO_ReadError, &r1_bio->state);
405 reschedule_retry(r1_bio);
410 static void close_write(struct r1bio *r1_bio)
413 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
414 bio_free_pages(r1_bio->behind_master_bio);
415 bio_put(r1_bio->behind_master_bio);
416 r1_bio->behind_master_bio = NULL;
419 md_bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
420 r1_bio->sectors,
421 !test_bit(R1BIO_Degraded, &r1_bio->state),
422 test_bit(R1BIO_BehindIO, &r1_bio->state));
423 md_write_end(r1_bio->mddev);
426 static void r1_bio_write_done(struct r1bio *r1_bio)
428 if (!atomic_dec_and_test(&r1_bio->remaining))
431 if (test_bit(R1BIO_WriteError, &r1_bio->state))
432 reschedule_retry(r1_bio);
434 close_write(r1_bio);
435 if (test_bit(R1BIO_MadeGood, &r1_bio->state))
436 reschedule_retry(r1_bio);
438 raid_end_bio_io(r1_bio);
444 struct r1bio *r1_bio = bio->bi_private;
445 int behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
446 struct r1conf *conf = r1_bio->mddev->private;
448 int mirror = find_bio_disk(r1_bio, bio);
451 sector_t lo = r1_bio->sector;
452 sector_t hi = r1_bio->sector + r1_bio->sectors;
469 md_error(r1_bio->mddev, rdev);
477 set_bit(R1BIO_WriteError, &r1_bio->state);
480 set_bit(R1BIO_Degraded, &r1_bio->state);
482 r1_bio->bios[mirror] = NULL;
499 r1_bio->bios[mirror] = NULL;
511 set_bit(R1BIO_Uptodate, &r1_bio->state);
514 if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
516 r1_bio->bios[mirror] = IO_MADE_GOOD;
517 set_bit(R1BIO_MadeGood, &r1_bio->state);
525 atomic_dec(&r1_bio->behind_remaining);
534 if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
535 test_bit(R1BIO_Uptodate, &r1_bio->state)) {
537 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
538 struct bio *mbio = r1_bio->master_bio;
543 call_bio_endio(r1_bio);
548 if (r1_bio->bios[mirror] == NULL)
555 r1_bio_write_done(r1_bio);
594 static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors)
596 const sector_t this_sector = r1_bio->sector;
615 sectors = r1_bio->sectors;
624 clear_bit(R1BIO_FailFast, &r1_bio->state);
642 if (r1_bio->bios[disk] == IO_BLOCKED
703 set_bit(R1BIO_FailFast, &r1_bio->state);
1099 static void alloc_behind_master_bio(struct r1bio *r1_bio,
1107 behind_bio = bio_alloc_mddev(GFP_NOIO, vcnt, r1_bio->mddev);
1135 r1_bio->behind_master_bio = behind_bio;
1136 set_bit(R1BIO_BehindIO, &r1_bio->state);
1178 static void init_r1bio(struct r1bio *r1_bio, struct mddev *mddev, struct bio *bio)
1180 r1_bio->master_bio = bio;
1181 r1_bio->sectors = bio_sectors(bio);
1182 r1_bio->state = 0;
1183 r1_bio->mddev = mddev;
1184 r1_bio->sector = bio->bi_iter.bi_sector;
1191 struct r1bio *r1_bio;
1193 r1_bio = mempool_alloc(&conf->r1bio_pool, GFP_NOIO);
1195 memset(r1_bio->bios, 0, conf->raid_disks * sizeof(r1_bio->bios[0]));
1196 init_r1bio(r1_bio, mddev, bio);
1197 return r1_bio;
1201 int max_read_sectors, struct r1bio *r1_bio)
1211 bool print_msg = !!r1_bio;
1215 * If r1_bio is set, we are blocking the raid1d thread
1219 gfp_t gfp = r1_bio ? (GFP_NOIO | __GFP_HIGH) : GFP_NOIO;
1225 rdev = rcu_dereference(conf->mirrors[r1_bio->read_disk].rdev);
1239 if (!r1_bio)
1240 r1_bio = alloc_r1bio(mddev, bio);
1242 init_r1bio(r1_bio, mddev, bio);
1243 r1_bio->sectors = max_read_sectors;
1249 rdisk = read_balance(conf, r1_bio, &max_sectors);
1257 (unsigned long long)r1_bio->sector);
1259 raid_end_bio_io(r1_bio);
1267 (unsigned long long)r1_bio->sector,
1287 r1_bio->master_bio = bio;
1288 r1_bio->sectors = max_sectors;
1291 r1_bio->read_disk = rdisk;
1295 r1_bio->bios[rdisk] = read_bio;
1297 read_bio->bi_iter.bi_sector = r1_bio->sector +
1303 test_bit(R1BIO_FailFast, &r1_bio->state))
1305 read_bio->bi_private = r1_bio;
1309 disk_devt(mddev->gendisk), r1_bio->sector);
1318 struct r1bio *r1_bio;
1352 r1_bio = alloc_r1bio(mddev, bio);
1353 r1_bio->sectors = max_write_sectors;
1376 max_sectors = r1_bio->sectors;
1384 r1_bio->bios[i] = NULL;
1387 set_bit(R1BIO_Degraded, &r1_bio->state);
1397 is_bad = is_badblock(rdev, r1_bio->sector, max_sectors,
1406 if (is_bad && first_bad <= r1_bio->sector) {
1408 bad_sectors -= (r1_bio->sector - first_bad);
1428 int good_sectors = first_bad - r1_bio->sector;
1433 r1_bio->bios[i] = bio;
1442 if (r1_bio->bios[j])
1444 r1_bio->state = 0;
1458 r1_bio->master_bio = bio;
1459 r1_bio->sectors = max_sectors;
1462 atomic_set(&r1_bio->remaining, 1);
1463 atomic_set(&r1_bio->behind_remaining, 0);
1470 if (!r1_bio->bios[i])
1482 alloc_behind_master_bio(r1_bio, bio);
1485 md_bitmap_startwrite(bitmap, r1_bio->sector, r1_bio->sectors,
1486 test_bit(R1BIO_BehindIO, &r1_bio->state));
1490 if (r1_bio->behind_master_bio)
1491 mbio = bio_clone_fast(r1_bio->behind_master_bio,
1496 if (r1_bio->behind_master_bio) {
1498 wait_for_serialization(rdev, r1_bio);
1500 atomic_inc(&r1_bio->behind_remaining);
1502 wait_for_serialization(rdev, r1_bio);
1504 r1_bio->bios[i] = mbio;
1506 mbio->bi_iter.bi_sector = (r1_bio->sector +
1515 mbio->bi_private = r1_bio;
1517 atomic_inc(&r1_bio->remaining);
1522 r1_bio->sector);
1543 r1_bio_write_done(r1_bio);
1864 struct r1bio *r1_bio = get_resync_r1bio(bio);
1866 update_head_pos(r1_bio->read_disk, r1_bio);
1874 set_bit(R1BIO_Uptodate, &r1_bio->state);
1876 if (atomic_dec_and_test(&r1_bio->remaining))
1877 reschedule_retry(r1_bio);
1880 static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio)
1883 sector_t s = r1_bio->sector;
1884 long sectors_to_go = r1_bio->sectors;
1894 static void put_sync_write_buf(struct r1bio *r1_bio, int uptodate)
1896 if (atomic_dec_and_test(&r1_bio->remaining)) {
1897 struct mddev *mddev = r1_bio->mddev;
1898 int s = r1_bio->sectors;
1900 if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
1901 test_bit(R1BIO_WriteError, &r1_bio->state))
1902 reschedule_retry(r1_bio);
1904 put_buf(r1_bio);
1913 struct r1bio *r1_bio = get_resync_r1bio(bio);
1914 struct mddev *mddev = r1_bio->mddev;
1918 struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev;
1921 abort_sync_write(mddev, r1_bio);
1926 set_bit(R1BIO_WriteError, &r1_bio->state);
1927 } else if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
1929 !is_badblock(conf->mirrors[r1_bio->read_disk].rdev,
1930 r1_bio->sector,
1931 r1_bio->sectors,
1934 set_bit(R1BIO_MadeGood, &r1_bio->state);
1936 put_sync_write_buf(r1_bio, uptodate);
1958 static int fix_sync_read_error(struct r1bio *r1_bio)
1971 struct mddev *mddev = r1_bio->mddev;
1973 struct bio *bio = r1_bio->bios[r1_bio->read_disk];
1975 sector_t sect = r1_bio->sector;
1976 int sectors = r1_bio->sectors;
1980 rdev = conf->mirrors[r1_bio->read_disk].rdev;
1994 int d = r1_bio->read_disk;
2001 if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
2017 } while (!success && d != r1_bio->read_disk);
2029 (unsigned long long)r1_bio->sector);
2041 md_done_sync(mddev, r1_bio->sectors, 0);
2042 put_buf(r1_bio);
2054 while (d != r1_bio->read_disk) {
2058 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
2064 r1_bio->bios[d]->bi_end_io = NULL;
2069 while (d != r1_bio->read_disk) {
2073 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
2085 set_bit(R1BIO_Uptodate, &r1_bio->state);
2090 static void process_checks(struct r1bio *r1_bio)
2099 struct mddev *mddev = r1_bio->mddev;
2106 vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
2109 struct bio *b = r1_bio->bios[i];
2117 b->bi_iter.bi_sector = r1_bio->sector +
2121 rp->raid_bio = r1_bio;
2125 md_bio_reset_resync_pages(b, rp, r1_bio->sectors << 9);
2128 if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
2129 !r1_bio->bios[primary]->bi_status) {
2130 r1_bio->bios[primary]->bi_end_io = NULL;
2134 r1_bio->read_disk = primary;
2137 struct bio *pbio = r1_bio->bios[primary];
2138 struct bio *sbio = r1_bio->bios[i];
2164 atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
2177 static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
2184 if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
2186 if (!fix_sync_read_error(r1_bio))
2190 process_checks(r1_bio);
2195 atomic_set(&r1_bio->remaining, 1);
2197 wbio = r1_bio->bios[i];
2200 (i == r1_bio->read_disk ||
2204 abort_sync_write(mddev, r1_bio);
2213 atomic_inc(&r1_bio->remaining);
2219 put_sync_write_buf(r1_bio, 1);
2326 static int narrow_write_error(struct r1bio *r1_bio, int i)
2328 struct mddev *mddev = r1_bio->mddev;
2346 int sect_to_write = r1_bio->sectors;
2354 sector = r1_bio->sector;
2365 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
2366 wbio = bio_clone_fast(r1_bio->behind_master_bio,
2370 wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO,
2375 wbio->bi_iter.bi_sector = r1_bio->sector;
2376 wbio->bi_iter.bi_size = r1_bio->sectors << 9;
2378 bio_trim(wbio, sector - r1_bio->sector, sectors);
2396 static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2399 int s = r1_bio->sectors;
2402 struct bio *bio = r1_bio->bios[m];
2406 test_bit(R1BIO_MadeGood, &r1_bio->state)) {
2407 rdev_clear_badblocks(rdev, r1_bio->sector, s, 0);
2410 test_bit(R1BIO_WriteError, &r1_bio->state)) {
2411 if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
2415 put_buf(r1_bio);
2419 static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2425 if (r1_bio->bios[m] == IO_MADE_GOOD) {
2428 r1_bio->sector,
2429 r1_bio->sectors, 0);
2431 } else if (r1_bio->bios[m] != NULL) {
2437 if (!narrow_write_error(r1_bio, m)) {
2441 set_bit(R1BIO_Degraded, &r1_bio->state);
2448 list_add(&r1_bio->retry_list, &conf->bio_end_io_list);
2449 idx = sector_to_idx(r1_bio->sector);
2459 if (test_bit(R1BIO_WriteError, &r1_bio->state))
2460 close_write(r1_bio);
2461 raid_end_bio_io(r1_bio);
2465 static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
2471 clear_bit(R1BIO_ReadError, &r1_bio->state);
2481 bio = r1_bio->bios[r1_bio->read_disk];
2483 r1_bio->bios[r1_bio->read_disk] = NULL;
2485 rdev = conf->mirrors[r1_bio->read_disk].rdev;
2489 fix_read_error(conf, r1_bio->read_disk,
2490 r1_bio->sector, r1_bio->sectors);
2495 r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED;
2499 allow_barrier(conf, r1_bio->sector);
2500 bio = r1_bio->master_bio;
2502 /* Reuse the old r1_bio so that the IO_BLOCKED settings are preserved */
2503 r1_bio->state = 0;
2504 raid1_read_request(mddev, bio, r1_bio->sectors, r1_bio);
2510 struct r1bio *r1_bio;
2527 r1_bio = list_first_entry(&tmp, struct r1bio,
2529 list_del(&r1_bio->retry_list);
2530 idx = sector_to_idx(r1_bio->sector);
2533 set_bit(R1BIO_Degraded, &r1_bio->state);
2534 if (test_bit(R1BIO_WriteError, &r1_bio->state))
2535 close_write(r1_bio);
2536 raid_end_bio_io(r1_bio);
2550 r1_bio = list_entry(head->prev, struct r1bio, retry_list);
2552 idx = sector_to_idx(r1_bio->sector);
2556 mddev = r1_bio->mddev;
2558 if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
2559 if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2560 test_bit(R1BIO_WriteError, &r1_bio->state))
2561 handle_sync_write_finished(conf, r1_bio);
2563 sync_request_write(mddev, r1_bio);
2564 } else if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2565 test_bit(R1BIO_WriteError, &r1_bio->state))
2566 handle_write_finished(conf, r1_bio);
2567 else if (test_bit(R1BIO_ReadError, &r1_bio->state))
2568 handle_read_error(conf, r1_bio);
2621 struct r1bio *r1_bio;
2697 r1_bio = raid1_alloc_init_r1buf(conf);
2709 r1_bio->mddev = mddev;
2710 r1_bio->sector = sector_nr;
2711 r1_bio->state = 0;
2712 set_bit(R1BIO_IsSync, &r1_bio->state);
2718 bio = r1_bio->bios[i];
2781 r1_bio->read_disk = disk;
2789 if (r1_bio->bios[i]->bi_end_io == end_sync_write) {
2797 put_buf(r1_bio);
2831 put_buf(r1_bio);
2861 bio = r1_bio->bios[i];
2878 r1_bio->sectors = nr_sectors;
2894 atomic_set(&r1_bio->remaining, read_targets);
2896 bio = r1_bio->bios[i];
2906 atomic_set(&r1_bio->remaining, 1);
2907 bio = r1_bio->bios[r1_bio->read_disk];