Lines Matching defs:bio

114 	struct bio flush_bio;
215 struct bio *current_bio;/* current_bio accepting new data */
226 struct bio *split_bio;
244 IO_UNIT_IO_START = 1, /* io_unit bio start writing to log,
245 * don't accepting new bio */
246 IO_UNIT_IO_END = 2, /* io_unit bio finish writing to log */
294 struct bio *wbi, *wbi2;
558 static void r5l_log_endio(struct bio *bio)
560 struct r5l_io_unit *io = bio->bi_private;
567 if (bio->bi_status)
570 bio_put(bio);
607 struct bio *bi;
736 static struct bio *r5l_bio_alloc(struct r5l_log *log)
738 struct bio *bio = bio_alloc_bioset(log->rdev->bdev, BIO_MAX_VECS,
741 bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start;
743 return bio;
753 * which will require a new bio.
1098 int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio)
1104 * raid disks. So if bio is finished, the log disk cache is
1106 * the bio from log disk, so we don't need to flush again
1108 if (bio->bi_iter.bi_size == 0) {
1109 bio_endio(bio);
1112 bio->bi_opf &= ~REQ_PREFLUSH;
1115 if (bio->bi_iter.bi_size == 0) {
1118 bio_list_add(&log->current_io->flush_barriers, bio);
1254 static void r5l_log_flush_endio(struct bio *bio)
1256 struct r5l_log *log = container_of(bio, struct r5l_log,
1261 if (bio->bi_status)
1263 bio_uninit(bio);
1294 /* flush bio is running */
1670 struct bio bio;
1673 bio_init(&bio, log->rdev->bdev, ctx->ra_bvec,
1675 bio.bi_iter.bi_sector = log->rdev->data_offset + offset;
1681 __bio_add_page(&bio, ctx->ra_pool[ctx->valid_pages], PAGE_SIZE,
1691 ret = submit_bio_wait(&bio);
1692 bio_uninit(&bio);