Lines Matching refs:r1_bio

59 static int check_and_add_serial(struct md_rdev *rdev, struct r1bio *r1_bio,
64 sector_t lo = r1_bio->sector;
65 sector_t hi = lo + r1_bio->sectors;
82 static void wait_for_serialization(struct md_rdev *rdev, struct r1bio *r1_bio)
86 int idx = sector_to_idx(r1_bio->sector);
93 check_and_add_serial(rdev, r1_bio, si, idx) == 0);
149 struct r1bio *r1_bio;
155 r1_bio = r1bio_pool_alloc(gfp_flags, pi);
156 if (!r1_bio)
172 r1_bio->bios[j] = bio;
187 bio = r1_bio->bios[j];
197 rp->raid_bio = r1_bio;
201 r1_bio->master_bio = NULL;
203 return r1_bio;
211 bio_uninit(r1_bio->bios[j]);
212 kfree(r1_bio->bios[j]);
217 rbio_pool_free(r1_bio, data);
241 static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio)
246 struct bio **bio = r1_bio->bios + i;
253 static void free_r1bio(struct r1bio *r1_bio)
255 struct r1conf *conf = r1_bio->mddev->private;
257 put_all_bios(conf, r1_bio);
258 mempool_free(r1_bio, &conf->r1bio_pool);
261 static void put_buf(struct r1bio *r1_bio)
263 struct r1conf *conf = r1_bio->mddev->private;
264 sector_t sect = r1_bio->sector;
268 struct bio *bio = r1_bio->bios[i];
270 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
273 mempool_free(r1_bio, &conf->r1buf_pool);
278 static void reschedule_retry(struct r1bio *r1_bio)
281 struct mddev *mddev = r1_bio->mddev;
285 idx = sector_to_idx(r1_bio->sector);
287 list_add(&r1_bio->retry_list, &conf->retry_list);
300 static void call_bio_endio(struct r1bio *r1_bio)
302 struct bio *bio = r1_bio->master_bio;
304 if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
310 static void raid_end_bio_io(struct r1bio *r1_bio)
312 struct bio *bio = r1_bio->master_bio;
313 struct r1conf *conf = r1_bio->mddev->private;
314 sector_t sector = r1_bio->sector;
317 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
323 call_bio_endio(r1_bio);
326 free_r1bio(r1_bio);
337 static inline void update_head_pos(int disk, struct r1bio *r1_bio)
339 struct r1conf *conf = r1_bio->mddev->private;
342 r1_bio->sector + (r1_bio->sectors);
348 static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
351 struct r1conf *conf = r1_bio->mddev->private;
355 if (r1_bio->bios[mirror] == bio)
359 update_head_pos(mirror, r1_bio);
367 struct r1bio *r1_bio = bio->bi_private;
368 struct r1conf *conf = r1_bio->mddev->private;
369 struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev;
374 update_head_pos(r1_bio->read_disk, r1_bio);
377 set_bit(R1BIO_Uptodate, &r1_bio->state);
379 test_bit(R1BIO_FailFast, &r1_bio->state))
390 if (r1_bio->mddev->degraded == conf->raid_disks ||
391 (r1_bio->mddev->degraded == conf->raid_disks-1 &&
398 raid_end_bio_io(r1_bio);
407 (unsigned long long)r1_bio->sector);
408 set_bit(R1BIO_ReadError, &r1_bio->state);
409 reschedule_retry(r1_bio);
414 static void close_write(struct r1bio *r1_bio)
417 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
418 bio_free_pages(r1_bio->behind_master_bio);
419 bio_put(r1_bio->behind_master_bio);
420 r1_bio->behind_master_bio = NULL;
423 md_bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
424 r1_bio->sectors,
425 !test_bit(R1BIO_Degraded, &r1_bio->state),
426 test_bit(R1BIO_BehindIO, &r1_bio->state));
427 md_write_end(r1_bio->mddev);
430 static void r1_bio_write_done(struct r1bio *r1_bio)
432 if (!atomic_dec_and_test(&r1_bio->remaining))
435 if (test_bit(R1BIO_WriteError, &r1_bio->state))
436 reschedule_retry(r1_bio);
438 close_write(r1_bio);
439 if (test_bit(R1BIO_MadeGood, &r1_bio->state))
440 reschedule_retry(r1_bio);
442 raid_end_bio_io(r1_bio);
448 struct r1bio *r1_bio = bio->bi_private;
449 int behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
450 struct r1conf *conf = r1_bio->mddev->private;
452 int mirror = find_bio_disk(r1_bio, bio);
455 sector_t lo = r1_bio->sector;
456 sector_t hi = r1_bio->sector + r1_bio->sectors;
473 md_error(r1_bio->mddev, rdev);
481 set_bit(R1BIO_WriteError, &r1_bio->state);
484 set_bit(R1BIO_Degraded, &r1_bio->state);
486 r1_bio->bios[mirror] = NULL;
503 r1_bio->bios[mirror] = NULL;
515 set_bit(R1BIO_Uptodate, &r1_bio->state);
518 if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
520 r1_bio->bios[mirror] = IO_MADE_GOOD;
521 set_bit(R1BIO_MadeGood, &r1_bio->state);
529 atomic_dec(&r1_bio->behind_remaining);
538 if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
539 test_bit(R1BIO_Uptodate, &r1_bio->state)) {
541 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
542 struct bio *mbio = r1_bio->master_bio;
547 call_bio_endio(r1_bio);
552 if (r1_bio->bios[mirror] == NULL)
559 r1_bio_write_done(r1_bio);
598 static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors)
600 const sector_t this_sector = r1_bio->sector;
619 sectors = r1_bio->sectors;
628 clear_bit(R1BIO_FailFast, &r1_bio->state);
646 if (r1_bio->bios[disk] == IO_BLOCKED
707 set_bit(R1BIO_FailFast, &r1_bio->state);
1119 static void alloc_behind_master_bio(struct r1bio *r1_bio,
1128 &r1_bio->mddev->bio_set);
1157 r1_bio->behind_master_bio = behind_bio;
1158 set_bit(R1BIO_BehindIO, &r1_bio->state);
1193 static void init_r1bio(struct r1bio *r1_bio, struct mddev *mddev, struct bio *bio)
1195 r1_bio->master_bio = bio;
1196 r1_bio->sectors = bio_sectors(bio);
1197 r1_bio->state = 0;
1198 r1_bio->mddev = mddev;
1199 r1_bio->sector = bio->bi_iter.bi_sector;
1206 struct r1bio *r1_bio;
1208 r1_bio = mempool_alloc(&conf->r1bio_pool, GFP_NOIO);
1210 memset(r1_bio->bios, 0, conf->raid_disks * sizeof(r1_bio->bios[0]));
1211 init_r1bio(r1_bio, mddev, bio);
1212 return r1_bio;
1216 int max_read_sectors, struct r1bio *r1_bio)
1226 bool r1bio_existed = !!r1_bio;
1230 * If r1_bio is set, we are blocking the raid1d thread
1234 gfp_t gfp = r1_bio ? (GFP_NOIO | __GFP_HIGH) : GFP_NOIO;
1240 rdev = rcu_dereference(conf->mirrors[r1_bio->read_disk].rdev);
1258 if (!r1_bio)
1259 r1_bio = alloc_r1bio(mddev, bio);
1261 init_r1bio(r1_bio, mddev, bio);
1262 r1_bio->sectors = max_read_sectors;
1268 rdisk = read_balance(conf, r1_bio, &max_sectors);
1276 (unsigned long long)r1_bio->sector);
1278 raid_end_bio_io(r1_bio);
1286 (unsigned long long)r1_bio->sector,
1306 r1_bio->master_bio = bio;
1307 r1_bio->sectors = max_sectors;
1310 r1_bio->read_disk = rdisk;
1313 r1_bio->master_bio = bio;
1318 r1_bio->bios[rdisk] = read_bio;
1320 read_bio->bi_iter.bi_sector = r1_bio->sector +
1325 test_bit(R1BIO_FailFast, &r1_bio->state))
1327 read_bio->bi_private = r1_bio;
1331 r1_bio->sector);
1340 struct r1bio *r1_bio;
1382 r1_bio = alloc_r1bio(mddev, bio);
1383 r1_bio->sectors = max_write_sectors;
1399 max_sectors = r1_bio->sectors;
1416 r1_bio->bios[i] = NULL;
1419 set_bit(R1BIO_Degraded, &r1_bio->state);
1429 is_bad = is_badblock(rdev, r1_bio->sector, max_sectors,
1438 if (is_bad && first_bad <= r1_bio->sector) {
1440 bad_sectors -= (r1_bio->sector - first_bad);
1460 int good_sectors = first_bad - r1_bio->sector;
1465 r1_bio->bios[i] = bio;
1474 if (r1_bio->bios[j])
1476 free_r1bio(r1_bio);
1504 r1_bio->master_bio = bio;
1505 r1_bio->sectors = max_sectors;
1509 r1_bio->master_bio = bio;
1510 atomic_set(&r1_bio->remaining, 1);
1511 atomic_set(&r1_bio->behind_remaining, 0);
1518 if (!r1_bio->bios[i])
1530 alloc_behind_master_bio(r1_bio, bio);
1533 md_bitmap_startwrite(bitmap, r1_bio->sector, r1_bio->sectors,
1534 test_bit(R1BIO_BehindIO, &r1_bio->state));
1538 if (r1_bio->behind_master_bio) {
1540 r1_bio->behind_master_bio,
1543 wait_for_serialization(rdev, r1_bio);
1545 atomic_inc(&r1_bio->behind_remaining);
1551 wait_for_serialization(rdev, r1_bio);
1554 r1_bio->bios[i] = mbio;
1556 mbio->bi_iter.bi_sector = (r1_bio->sector + rdev->data_offset);
1563 mbio->bi_private = r1_bio;
1565 atomic_inc(&r1_bio->remaining);
1569 r1_bio->sector);
1580 r1_bio_write_done(r1_bio);
1910 struct r1bio *r1_bio = get_resync_r1bio(bio);
1912 update_head_pos(r1_bio->read_disk, r1_bio);
1920 set_bit(R1BIO_Uptodate, &r1_bio->state);
1922 if (atomic_dec_and_test(&r1_bio->remaining))
1923 reschedule_retry(r1_bio);
1926 static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio)
1929 sector_t s = r1_bio->sector;
1930 long sectors_to_go = r1_bio->sectors;
1940 static void put_sync_write_buf(struct r1bio *r1_bio, int uptodate)
1942 if (atomic_dec_and_test(&r1_bio->remaining)) {
1943 struct mddev *mddev = r1_bio->mddev;
1944 int s = r1_bio->sectors;
1946 if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
1947 test_bit(R1BIO_WriteError, &r1_bio->state))
1948 reschedule_retry(r1_bio);
1950 put_buf(r1_bio);
1959 struct r1bio *r1_bio = get_resync_r1bio(bio);
1960 struct mddev *mddev = r1_bio->mddev;
1964 struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev;
1967 abort_sync_write(mddev, r1_bio);
1972 set_bit(R1BIO_WriteError, &r1_bio->state);
1973 } else if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
1975 !is_badblock(conf->mirrors[r1_bio->read_disk].rdev,
1976 r1_bio->sector,
1977 r1_bio->sectors,
1980 set_bit(R1BIO_MadeGood, &r1_bio->state);
1982 put_sync_write_buf(r1_bio, uptodate);
2004 static int fix_sync_read_error(struct r1bio *r1_bio)
2017 struct mddev *mddev = r1_bio->mddev;
2019 struct bio *bio = r1_bio->bios[r1_bio->read_disk];
2021 sector_t sect = r1_bio->sector;
2022 int sectors = r1_bio->sectors;
2026 rdev = conf->mirrors[r1_bio->read_disk].rdev;
2040 int d = r1_bio->read_disk;
2047 if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
2063 } while (!success && d != r1_bio->read_disk);
2074 (unsigned long long)r1_bio->sector);
2086 md_done_sync(mddev, r1_bio->sectors, 0);
2087 put_buf(r1_bio);
2099 while (d != r1_bio->read_disk) {
2103 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
2109 r1_bio->bios[d]->bi_end_io = NULL;
2114 while (d != r1_bio->read_disk) {
2118 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
2130 set_bit(R1BIO_Uptodate, &r1_bio->state);
2135 static void process_checks(struct r1bio *r1_bio)
2144 struct mddev *mddev = r1_bio->mddev;
2151 vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
2154 struct bio *b = r1_bio->bios[i];
2162 b->bi_iter.bi_sector = r1_bio->sector +
2165 rp->raid_bio = r1_bio;
2169 md_bio_reset_resync_pages(b, rp, r1_bio->sectors << 9);
2172 if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
2173 !r1_bio->bios[primary]->bi_status) {
2174 r1_bio->bios[primary]->bi_end_io = NULL;
2178 r1_bio->read_disk = primary;
2181 struct bio *pbio = r1_bio->bios[primary];
2182 struct bio *sbio = r1_bio->bios[i];
2208 atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
2221 static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
2228 if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
2230 if (!fix_sync_read_error(r1_bio))
2234 process_checks(r1_bio);
2239 atomic_set(&r1_bio->remaining, 1);
2241 wbio = r1_bio->bios[i];
2244 (i == r1_bio->read_disk ||
2248 abort_sync_write(mddev, r1_bio);
2257 atomic_inc(&r1_bio->remaining);
2263 put_sync_write_buf(r1_bio, 1);
2369 static int narrow_write_error(struct r1bio *r1_bio, int i)
2371 struct mddev *mddev = r1_bio->mddev;
2389 int sect_to_write = r1_bio->sectors;
2397 sector = r1_bio->sector;
2408 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
2410 r1_bio->behind_master_bio,
2413 wbio = bio_alloc_clone(rdev->bdev, r1_bio->master_bio,
2418 wbio->bi_iter.bi_sector = r1_bio->sector;
2419 wbio->bi_iter.bi_size = r1_bio->sectors << 9;
2421 bio_trim(wbio, sector - r1_bio->sector, sectors);
2438 static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2441 int s = r1_bio->sectors;
2444 struct bio *bio = r1_bio->bios[m];
2448 test_bit(R1BIO_MadeGood, &r1_bio->state)) {
2449 rdev_clear_badblocks(rdev, r1_bio->sector, s, 0);
2452 test_bit(R1BIO_WriteError, &r1_bio->state)) {
2453 if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
2457 put_buf(r1_bio);
2461 static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2467 if (r1_bio->bios[m] == IO_MADE_GOOD) {
2470 r1_bio->sector,
2471 r1_bio->sectors, 0);
2473 } else if (r1_bio->bios[m] != NULL) {
2479 if (!narrow_write_error(r1_bio, m)) {
2483 set_bit(R1BIO_Degraded, &r1_bio->state);
2490 list_add(&r1_bio->retry_list, &conf->bio_end_io_list);
2491 idx = sector_to_idx(r1_bio->sector);
2501 if (test_bit(R1BIO_WriteError, &r1_bio->state))
2502 close_write(r1_bio);
2503 raid_end_bio_io(r1_bio);
2507 static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
2514 clear_bit(R1BIO_ReadError, &r1_bio->state);
2524 bio = r1_bio->bios[r1_bio->read_disk];
2526 r1_bio->bios[r1_bio->read_disk] = NULL;
2528 rdev = conf->mirrors[r1_bio->read_disk].rdev;
2532 fix_read_error(conf, r1_bio->read_disk,
2533 r1_bio->sector, r1_bio->sectors);
2538 r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED;
2542 sector = r1_bio->sector;
2543 bio = r1_bio->master_bio;
2545 /* Reuse the old r1_bio so that the IO_BLOCKED settings are preserved */
2546 r1_bio->state = 0;
2547 raid1_read_request(mddev, bio, r1_bio->sectors, r1_bio);
2554 struct r1bio *r1_bio;
2571 r1_bio = list_first_entry(&tmp, struct r1bio,
2573 list_del(&r1_bio->retry_list);
2574 idx = sector_to_idx(r1_bio->sector);
2577 set_bit(R1BIO_Degraded, &r1_bio->state);
2578 if (test_bit(R1BIO_WriteError, &r1_bio->state))
2579 close_write(r1_bio);
2580 raid_end_bio_io(r1_bio);
2594 r1_bio = list_entry(head->prev, struct r1bio, retry_list);
2596 idx = sector_to_idx(r1_bio->sector);
2600 mddev = r1_bio->mddev;
2602 if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
2603 if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2604 test_bit(R1BIO_WriteError, &r1_bio->state))
2605 handle_sync_write_finished(conf, r1_bio);
2607 sync_request_write(mddev, r1_bio);
2608 } else if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2609 test_bit(R1BIO_WriteError, &r1_bio->state))
2610 handle_write_finished(conf, r1_bio);
2611 else if (test_bit(R1BIO_ReadError, &r1_bio->state))
2612 handle_read_error(conf, r1_bio);
2665 struct r1bio *r1_bio;
2741 r1_bio = raid1_alloc_init_r1buf(conf);
2753 r1_bio->mddev = mddev;
2754 r1_bio->sector = sector_nr;
2755 r1_bio->state = 0;
2756 set_bit(R1BIO_IsSync, &r1_bio->state);
2762 bio = r1_bio->bios[i];
2825 r1_bio->read_disk = disk;
2833 if (r1_bio->bios[i]->bi_end_io == end_sync_write) {
2841 put_buf(r1_bio);
2875 put_buf(r1_bio);
2905 bio = r1_bio->bios[i];
2922 r1_bio->sectors = nr_sectors;
2938 atomic_set(&r1_bio->remaining, read_targets);
2940 bio = r1_bio->bios[i];
2950 atomic_set(&r1_bio->remaining, 1);
2951 bio = r1_bio->bios[r1_bio->read_disk];