Lines Matching defs:bio

122  * for resync bio, r1bio pointer can be retrieved from the per-bio
125 static inline struct r1bio *get_resync_r1bio(struct bio *bio)
127 return get_resync_pages(bio)->raid_bio;
150 struct bio *bio;
168 bio = bio_kmalloc(RESYNC_PAGES, gfp_flags);
169 if (!bio)
171 bio_init(bio, NULL, bio->bi_inline_vecs, RESYNC_PAGES, 0);
172 r1_bio->bios[j] = bio;
176 * the first bio.
178 * RESYNC_PAGES for each bio.
187 bio = r1_bio->bios[j];
198 bio->bi_private = rp;
235 /* resync pages array stored in the 1st bio's .bi_private */
246 struct bio **bio = r1_bio->bios + i;
247 if (!BIO_SPECIAL(*bio))
248 bio_put(*bio);
249 *bio = NULL;
268 struct bio *bio = r1_bio->bios[i];
269 if (bio->bi_end_io)
302 struct bio *bio = r1_bio->master_bio;
305 bio->bi_status = BLK_STS_IOERR;
307 bio_endio(bio);
312 struct bio *bio = r1_bio->master_bio;
319 (bio_data_dir(bio) == WRITE) ? "write" : "read",
320 (unsigned long long) bio->bi_iter.bi_sector,
321 (unsigned long long) bio_end_sector(bio) - 1);
346 * Find the disk number which triggered given bio
348 static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
355 if (r1_bio->bios[mirror] == bio)
364 static void raid1_end_read_request(struct bio *bio)
366 int uptodate = !bio->bi_status;
367 struct r1bio *r1_bio = bio->bi_private;
446 static void raid1_end_write_request(struct bio *bio)
448 struct r1bio *r1_bio = bio->bi_private;
451 struct bio *to_put = NULL;
452 int mirror = find_bio_disk(r1_bio, bio);
458 discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
463 if (bio->bi_status && !discard_error) {
470 (bio->bi_opf & MD_FAILFAST) &&
487 to_put = bio;
491 * Set R1BIO_Uptodate in our master bio, so that we
498 * will wait for the 'master' bio.
504 to_put = bio;
532 * In behind mode, we ACK the master bio once the I/O
542 struct bio *mbio = r1_bio->master_bio;
799 static void flush_bio_list(struct r1conf *conf, struct bio *bio)
805 while (bio) { /* submit pending writes */
806 struct bio *next = bio->bi_next;
808 raid1_submit_write(bio);
809 bio = next;
823 struct bio *bio;
825 bio = bio_list_get(&conf->pending_bio_list);
839 flush_bio_list(conf, bio);
1120 struct bio *bio)
1122 int size = bio->bi_iter.bi_size;
1125 struct bio *behind_bio = NULL;
1133 if (!bio_has_data(bio)) {
1155 bio_copy_data(behind_bio, bio);
1164 bio->bi_iter.bi_size);
1175 struct bio *bio;
1188 bio = bio_list_get(&plug->pending);
1189 flush_bio_list(conf, bio);
1193 static void init_r1bio(struct r1bio *r1_bio, struct mddev *mddev, struct bio *bio)
1195 r1_bio->master_bio = bio;
1196 r1_bio->sectors = bio_sectors(bio);
1199 r1_bio->sector = bio->bi_iter.bi_sector;
1203 alloc_r1bio(struct mddev *mddev, struct bio *bio)
1209 /* Ensure no bio records IO_BLOCKED */
1211 init_r1bio(r1_bio, mddev, bio);
1215 static void raid1_read_request(struct mddev *mddev, struct bio *bio,
1220 struct bio *read_bio;
1222 const enum req_op op = bio_op(bio);
1223 const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC;
1252 if (!wait_read_barrier(conf, bio->bi_iter.bi_sector,
1253 bio->bi_opf & REQ_NOWAIT)) {
1254 bio_wouldblock_error(bio);
1259 r1_bio = alloc_r1bio(mddev, bio);
1261 init_r1bio(r1_bio, mddev, bio);
1300 if (max_sectors < bio_sectors(bio)) {
1301 struct bio *split = bio_split(bio, max_sectors,
1303 bio_chain(split, bio);
1304 submit_bio_noacct(bio);
1305 bio = split;
1306 r1_bio->master_bio = bio;
1312 md_account_bio(mddev, &bio);
1313 r1_bio->master_bio = bio;
1315 read_bio = bio_alloc_clone(mirror->rdev->bdev, bio, gfp,
1336 static void raid1_write_request(struct mddev *mddev, struct bio *bio,
1351 bio->bi_iter.bi_sector, bio_end_sector(bio))) {
1354 if (bio->bi_opf & REQ_NOWAIT) {
1355 bio_wouldblock_error(bio);
1362 bio->bi_iter.bi_sector,
1363 bio_end_sector(bio)))
1375 if (!wait_barrier(conf, bio->bi_iter.bi_sector,
1376 bio->bi_opf & REQ_NOWAIT)) {
1377 bio_wouldblock_error(bio);
1382 r1_bio = alloc_r1bio(mddev, bio);
1387 * bios[x] to bio
1406 * bio later.
1465 r1_bio->bios[i] = bio;
1477 allow_barrier(conf, bio->bi_iter.bi_sector);
1479 if (bio->bi_opf & REQ_NOWAIT) {
1480 bio_wouldblock_error(bio);
1485 wait_barrier(conf, bio->bi_iter.bi_sector, false);
1492 * at a time and thus needs a new bio that can fit the whole payload
1493 * this bio in page sized chunks.
1498 if (max_sectors < bio_sectors(bio)) {
1499 struct bio *split = bio_split(bio, max_sectors,
1501 bio_chain(split, bio);
1502 submit_bio_noacct(bio);
1503 bio = split;
1504 r1_bio->master_bio = bio;
1508 md_account_bio(mddev, &bio);
1509 r1_bio->master_bio = bio;
1516 struct bio *mbio = NULL;
1530 alloc_behind_master_bio(r1_bio, bio);
1547 mbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO,
1558 mbio->bi_opf = bio_op(bio) | (bio->bi_opf & (REQ_SYNC | REQ_FUA));
1586 static bool raid1_make_request(struct mddev *mddev, struct bio *bio)
1590 if (unlikely(bio->bi_opf & REQ_PREFLUSH)
1591 && md_flush_request(mddev, bio))
1602 bio->bi_iter.bi_sector, bio_sectors(bio));
1604 if (bio_data_dir(bio) == READ)
1605 raid1_read_request(mddev, bio, sectors, NULL);
1607 if (!md_write_start(mddev,bio))
1609 raid1_write_request(mddev, bio, sectors);
1908 static void end_sync_read(struct bio *bio)
1910 struct r1bio *r1_bio = get_resync_r1bio(bio);
1919 if (!bio->bi_status)
1956 static void end_sync_write(struct bio *bio)
1958 int uptodate = !bio->bi_status;
1959 struct r1bio *r1_bio = get_resync_r1bio(bio);
1964 struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev;
2019 struct bio *bio = r1_bio->bios[r1_bio->read_disk];
2020 struct page **pages = get_resync_pages(bio)->pages;
2035 bio->bi_end_io = end_sync_write;
2073 mdname(mddev), bio->bi_bdev,
2131 bio->bi_status = 0;
2154 struct bio *b = r1_bio->bios[i];
2158 /* fixup the bio for reuse, but preserve errno */
2181 struct bio *pbio = r1_bio->bios[primary];
2182 struct bio *sbio = r1_bio->bios[i];
2226 struct bio *wbio;
2375 /* bio has the data to be written to device 'i' where
2377 * We repeatedly clone the bio and trim down to one block,
2380 * It is conceivable that the bio doesn't exactly align with
2403 struct bio *wbio;
2444 struct bio *bio = r1_bio->bios[m];
2445 if (bio->bi_end_io == NULL)
2447 if (!bio->bi_status &&
2451 if (bio->bi_status &&
2510 struct bio *bio;
2524 bio = r1_bio->bios[r1_bio->read_disk];
2525 bio_put(bio);
2543 bio = r1_bio->master_bio;
2547 raid1_read_request(mddev, bio, r1_bio->sectors, r1_bio);
2638 struct bio *bio;
2642 bio = r1bio->bios[i];
2643 rps = bio->bi_private;
2644 bio_reset(bio, NULL, 0);
2645 bio->bi_private = rps;
2666 struct bio *bio;
2762 bio = r1_bio->bios[i];
2770 bio->bi_opf = REQ_OP_WRITE;
2771 bio->bi_end_io = end_sync_write;
2797 bio->bi_opf = REQ_OP_READ;
2798 bio->bi_end_io = end_sync_read;
2809 bio->bi_opf = REQ_OP_WRITE;
2810 bio->bi_end_io = end_sync_write;
2814 if (rdev && bio->bi_end_io) {
2816 bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
2817 bio_set_dev(bio, rdev->bdev);
2819 bio->bi_opf |= MD_FAILFAST;
2905 bio = r1_bio->bios[i];
2906 rp = get_resync_pages(bio);
2907 if (bio->bi_end_io) {
2914 __bio_add_page(bio, page, len, 0);
2940 bio = r1_bio->bios[i];
2941 if (bio->bi_end_io == end_sync_read) {
2943 md_sync_acct_bio(bio, nr_sectors);
2945 bio->bi_opf &= ~MD_FAILFAST;
2946 submit_bio_noacct(bio);
2951 bio = r1_bio->bios[r1_bio->read_disk];
2952 md_sync_acct_bio(bio, nr_sectors);
2954 bio->bi_opf &= ~MD_FAILFAST;
2955 submit_bio_noacct(bio);