Lines Matching refs:bio

9  * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
18 #include <linux/bio.h>
155 * string format. Useful in the debugging and tracing bio or request. For
239 static void req_bio_endio(struct request *rq, struct bio *bio,
243 bio->bi_status = error;
246 bio_set_flag(bio, BIO_QUIET);
248 bio_advance(bio, nbytes);
255 if (bio->bi_iter.bi_size)
256 bio->bi_status = BLK_STS_IOERR;
258 bio->bi_iter.bi_sector = rq->__sector;
261 /* don't actually finish bio if it's part of flush sequence */
262 if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
263 bio_endio(bio);
275 printk(KERN_INFO " bio %p, biotail %p, len %u\n",
276 rq->bio, rq->biotail, blk_rq_bytes(rq));
394 /* for synchronous bio-based driver finish in-flight integrity i/o */
475 static inline int bio_queue_enter(struct bio *bio)
477 struct request_queue *q = bio->bi_disk->queue;
478 bool nowait = bio->bi_opf & REQ_NOWAIT;
484 bio_wouldblock_error(bio);
486 bio_io_error(bio);
648 static void handle_bad_sector(struct bio *bio, sector_t maxsector)
654 bio_devname(bio, b), bio->bi_opf,
655 bio_end_sector(bio), maxsector);
693 static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
695 const int op = bio_op(bio);
700 if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
703 bio_devname(bio, b), part->partno);
711 static noinline int should_fail_bio(struct bio *bio)
713 if (should_fail_request(&bio->bi_disk->part0, bio->bi_iter.bi_size))
720 * Check whether this bio extends beyond the end of the device or partition.
724 static inline int bio_check_eod(struct bio *bio, sector_t maxsector)
726 unsigned int nr_sectors = bio_sectors(bio);
730 bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
731 handle_bad_sector(bio, maxsector);
740 static inline int blk_partition_remap(struct bio *bio)
746 p = __disk_get_part(bio->bi_disk, bio->bi_partno);
749 if (unlikely(should_fail_request(p, bio->bi_iter.bi_size)))
751 if (unlikely(bio_check_ro(bio, p)))
754 if (bio_sectors(bio)) {
755 if (bio_check_eod(bio, part_nr_sects_read(p)))
757 bio->bi_iter.bi_sector += p->start_sect;
758 trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p),
759 bio->bi_iter.bi_sector - p->start_sect);
761 bio->bi_partno = 0;
772 struct bio *bio)
774 sector_t pos = bio->bi_iter.bi_sector;
775 int nr_sectors = bio_sectors(bio);
781 /* The bio sector must point to the start of a sequential zone */
798 bio->bi_opf |= REQ_NOMERGE;
803 static noinline_for_stack bool submit_bio_checks(struct bio *bio)
805 struct request_queue *q = bio->bi_disk->queue;
811 plug = blk_mq_plug(q, bio);
813 bio->bi_opf |= REQ_NOWAIT;
819 if ((bio->bi_opf & REQ_NOWAIT) && !blk_queue_nowait(q))
822 if (should_fail_bio(bio))
825 if (bio->bi_partno) {
826 if (unlikely(blk_partition_remap(bio)))
829 if (unlikely(bio_check_ro(bio, &bio->bi_disk->part0)))
831 if (unlikely(bio_check_eod(bio, get_capacity(bio->bi_disk))))
836 * Filter flush bio's early so that bio based drivers without flush
839 if (op_is_flush(bio->bi_opf) &&
841 bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
842 if (!bio_sectors(bio)) {
849 bio->bi_opf &= ~REQ_HIPRI;
851 switch (bio_op(bio)) {
865 status = blk_check_zone_append(q, bio);
897 if (blk_throtl_bio(bio))
900 blk_cgroup_bio_start(bio);
901 blkcg_bio_issue_init(bio);
903 if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
904 trace_block_bio_queue(q, bio);
908 bio_set_flag(bio, BIO_TRACE_COMPLETION);
915 bio->bi_status = status;
916 bio_endio(bio);
920 static blk_qc_t __submit_bio(struct bio *bio)
922 struct gendisk *disk = bio->bi_disk;
925 if (blk_crypto_bio_prep(&bio)) {
927 return blk_mq_submit_bio(bio);
928 ret = disk->fops->submit_bio(bio);
938 * - Before entering the loop, bio->bi_next is NULL (as all callers ensure
939 * that), so we have a list with a single bio.
945 * - In this case we really did just take the bio of the top of the list (no
953 static blk_qc_t __submit_bio_noacct(struct bio *bio)
958 BUG_ON(bio->bi_next);
964 struct request_queue *q = bio->bi_disk->queue;
967 if (unlikely(bio_queue_enter(bio) != 0))
976 ret = __submit_bio(bio);
984 while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
985 if (q == bio->bi_disk->queue)
986 bio_list_add(&same, bio);
988 bio_list_add(&lower, bio);
996 } while ((bio = bio_list_pop(&bio_list_on_stack[0])));
1002 static blk_qc_t __submit_bio_noacct_mq(struct bio *bio)
1010 struct gendisk *disk = bio->bi_disk;
1012 if (unlikely(bio_queue_enter(bio) != 0))
1015 if (!blk_crypto_bio_prep(&bio)) {
1021 ret = blk_mq_submit_bio(bio);
1022 } while ((bio = bio_list_pop(&bio_list[0])));
1029 * submit_bio_noacct - re-submit a bio to the block device layer for I/O
1030 * @bio: The bio describing the location in memory and on the device.
1037 blk_qc_t submit_bio_noacct(struct bio *bio)
1039 if (!submit_bio_checks(bio))
1049 bio_list_add(&current->bio_list[0], bio);
1053 if (!bio->bi_disk->fops->submit_bio)
1054 return __submit_bio_noacct_mq(bio);
1055 return __submit_bio_noacct(bio);
1060 * submit_bio - submit a bio to the block device layer for I/O
1061 * @bio: The &struct bio which describes the I/O
1064 * fully set up &struct bio that describes the I/O that needs to be done. The
1065 * bio will be send to the device described by the bi_disk and bi_partno fields.
1069 * in @bio. The bio must NOT be touched by thecaller until ->bi_end_io() has
1072 blk_qc_t submit_bio(struct bio *bio)
1074 if (blkcg_punt_bio_submit(bio))
1081 if (bio_has_data(bio)) {
1084 if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
1085 count = queue_logical_block_size(bio->bi_disk->queue) >> 9;
1087 count = bio_sectors(bio);
1089 if (op_is_write(bio_op(bio))) {
1092 task_io_account_read(bio->bi_iter.bi_size);
1103 if (unlikely(bio_op(bio) == REQ_OP_READ &&
1104 bio_flagged(bio, BIO_WORKINGSET))) {
1109 ret = submit_bio_noacct(bio);
1115 return submit_bio_noacct(bio);
1225 struct bio *bio;
1237 for (bio = rq->bio; bio; bio = bio->bi_next) {
1238 if ((bio->bi_opf & ff) != ff)
1240 bytes += bio->bi_iter.bi_size;
1330 struct bio *bio)
1332 *part = disk_map_sector_rcu(disk, bio->bi_iter.bi_sector);
1334 return __part_start_io_acct(*part, bio_sectors(bio), bio_op(bio));
1359 void part_end_io_acct(struct hd_struct *part, struct bio *bio,
1362 __part_end_io_acct(part, bio_op(bio), start_time);
1375 * Steal bios from a request and add them to a bio list.
1380 if (rq->bio) {
1382 list->tail->bi_next = rq->bio;
1384 list->head = rq->bio;
1387 rq->bio = NULL;
1428 if (!req->bio)
1451 while (req->bio) {
1452 struct bio *bio = req->bio;
1453 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
1455 if (bio_bytes == bio->bi_iter.bi_size)
1456 req->bio = bio->bi_next;
1459 bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1460 req_bio_endio(req, bio, bio_bytes, error);
1472 if (!req->bio) {
1488 /* mixed attributes always follow the first bio */
1491 req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
1568 struct bio *bio;
1570 while ((bio = rq->bio) != NULL) {
1571 rq->bio = bio->bi_next;
1573 bio_put(bio);
1583 * @gfp_mask: memory allocation mask for bio
1584 * @bio_ctr: setup function to be called for each clone bio.
1597 int (*bio_ctr)(struct bio *, struct bio *, void *),
1600 struct bio *bio, *bio_src;
1606 bio = bio_clone_fast(bio_src, gfp_mask, bs);
1607 if (!bio)
1610 if (bio_ctr && bio_ctr(bio, bio_src, data))
1613 if (rq->bio) {
1614 rq->biotail->bi_next = bio;
1615 rq->biotail = bio;
1617 rq->bio = rq->biotail = bio;
1619 bio = NULL;
1632 if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0)
1638 if (bio)
1639 bio_put(bio);
1794 sizeof_field(struct bio, bi_opf));