Lines Matching refs:bio
9 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
17 #include <linux/bio.h>
134 * string format. Useful in the debugging and tracing bio or request. For
338 int __bio_queue_enter(struct request_queue *q, struct bio *bio)
341 struct gendisk *disk = bio->bi_bdev->bd_disk;
343 if (bio->bi_opf & REQ_NOWAIT) {
346 bio_wouldblock_error(bio);
368 bio_io_error(bio);
499 static inline void bio_check_ro(struct bio *bio)
501 if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) {
502 if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
505 if (bio->bi_bdev->bd_ro_warned)
508 bio->bi_bdev->bd_ro_warned = true;
514 bio->bi_bdev);
518 static noinline int should_fail_bio(struct bio *bio)
520 if (should_fail_request(bdev_whole(bio->bi_bdev), bio->bi_iter.bi_size))
527 * Check whether this bio extends beyond the end of the device or partition.
531 static inline int bio_check_eod(struct bio *bio)
533 sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
534 unsigned int nr_sectors = bio_sectors(bio);
538 bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
541 current->comm, bio->bi_bdev, bio->bi_opf,
542 bio->bi_iter.bi_sector, nr_sectors, maxsector);
551 static int blk_partition_remap(struct bio *bio)
553 struct block_device *p = bio->bi_bdev;
555 if (unlikely(should_fail_request(p, bio->bi_iter.bi_size)))
557 if (bio_sectors(bio)) {
558 bio->bi_iter.bi_sector += p->bd_start_sect;
559 trace_block_bio_remap(bio, p->bd_dev,
560 bio->bi_iter.bi_sector -
563 bio_set_flag(bio, BIO_REMAPPED);
571 struct bio *bio)
573 int nr_sectors = bio_sectors(bio);
576 if (!bdev_is_zoned(bio->bi_bdev))
579 /* The bio sector must point to the start of a sequential zone */
580 if (!bdev_is_zone_start(bio->bi_bdev, bio->bi_iter.bi_sector) ||
581 !bio_zone_is_seq(bio))
596 bio->bi_opf |= REQ_NOMERGE;
601 static void __submit_bio(struct bio *bio)
603 if (unlikely(!blk_crypto_bio_prep(&bio)))
606 if (!bio->bi_bdev->bd_has_submit_bio) {
607 blk_mq_submit_bio(bio);
608 } else if (likely(bio_queue_enter(bio) == 0)) {
609 struct gendisk *disk = bio->bi_bdev->bd_disk;
611 disk->fops->submit_bio(bio);
620 * - Before entering the loop, bio->bi_next is NULL (as all callers ensure
621 * that), so we have a list with a single bio.
627 * - In this case we really did just take the bio of the top of the list (no
635 static void __submit_bio_noacct(struct bio *bio)
639 BUG_ON(bio->bi_next);
645 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
654 __submit_bio(bio);
662 while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
663 if (q == bdev_get_queue(bio->bi_bdev))
664 bio_list_add(&same, bio);
666 bio_list_add(&lower, bio);
674 } while ((bio = bio_list_pop(&bio_list_on_stack[0])));
679 static void __submit_bio_noacct_mq(struct bio *bio)
686 __submit_bio(bio);
687 } while ((bio = bio_list_pop(&bio_list[0])));
692 void submit_bio_noacct_nocheck(struct bio *bio)
694 blk_cgroup_bio_start(bio);
695 blkcg_bio_issue_init(bio);
697 if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
698 trace_block_bio_queue(bio);
703 bio_set_flag(bio, BIO_TRACE_COMPLETION);
713 bio_list_add(¤t->bio_list[0], bio);
714 else if (!bio->bi_bdev->bd_has_submit_bio)
715 __submit_bio_noacct_mq(bio);
717 __submit_bio_noacct(bio);
721 * submit_bio_noacct - re-submit a bio to the block device layer for I/O
722 * @bio: The bio describing the location in memory and on the device.
729 void submit_bio_noacct(struct bio *bio)
731 struct block_device *bdev = bio->bi_bdev;
741 if ((bio->bi_opf & REQ_NOWAIT) && !bdev_nowait(bdev))
744 if (should_fail_bio(bio))
746 bio_check_ro(bio);
747 if (!bio_flagged(bio, BIO_REMAPPED)) {
748 if (unlikely(bio_check_eod(bio)))
750 if (bdev->bd_partno && unlikely(blk_partition_remap(bio)))
755 * Filter flush bio's early so that bio based drivers without flush
758 if (op_is_flush(bio->bi_opf)) {
759 if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_WRITE &&
760 bio_op(bio) != REQ_OP_ZONE_APPEND))
763 bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
764 if (!bio_sectors(bio)) {
772 bio_clear_polled(bio);
774 switch (bio_op(bio)) {
784 status = blk_check_zone_append(q, bio);
792 if (!bdev_is_zoned(bio->bi_bdev))
796 if (!bdev_is_zoned(bio->bi_bdev) || !blk_queue_zone_resetall(q))
807 if (blk_throtl_bio(bio))
809 submit_bio_noacct_nocheck(bio);
815 bio->bi_status = status;
816 bio_endio(bio);
821 * submit_bio - submit a bio to the block device layer for I/O
822 * @bio: The &struct bio which describes the I/O
825 * fully set up &struct bio that describes the I/O that needs to be done. The
826 * bio will be send to the device described by the bi_bdev field.
830 * in @bio. The bio must NOT be touched by the caller until ->bi_end_io() has
833 void submit_bio(struct bio *bio)
835 if (bio_op(bio) == REQ_OP_READ) {
836 task_io_account_read(bio->bi_iter.bi_size);
837 count_vm_events(PGPGIN, bio_sectors(bio));
838 } else if (bio_op(bio) == REQ_OP_WRITE) {
839 count_vm_events(PGPGOUT, bio_sectors(bio));
842 submit_bio_noacct(bio);
848 * @bio: bio to poll for
852 * Poll for completions on queue associated with the bio. Returns number of
855 * Note: the caller must either be the context that submitted @bio, or
856 * be in a RCU critical section to prevent freeing of @bio.
858 int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
860 blk_qc_t cookie = READ_ONCE(bio->bi_cookie);
865 bdev = READ_ONCE(bio->bi_bdev);
899 ret = disk->fops->poll_bio(bio, iob, flags);
907 * Helper to implement file_operations.iopoll. Requires the bio to be stored
908 * in iocb->private, and cleared before freeing the bio.
913 struct bio *bio;
917 * Note: the bio cache only uses SLAB_TYPESAFE_BY_RCU, so bio can
918 * point to a freshly allocated bio at this point. If that happens
921 * 1) the bio is beeing initialized and bi_bdev is NULL. We can just
923 * 2) the bio points to a not poll enabled device. bio_poll will catch
925 * 3) the bio points to a poll capable device, including but not
926 * limited to the one that the original bio pointed to. In this
937 bio = READ_ONCE(kiocb->private);
938 if (bio)
939 ret = bio_poll(bio, iob, flags);
974 * bio_start_io_acct - start I/O accounting for bio based drivers
975 * @bio: bio to start account for
979 unsigned long bio_start_io_acct(struct bio *bio)
981 return bdev_start_io_acct(bio->bi_bdev, bio_op(bio), jiffies);
1002 void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
1005 bdev_end_io_acct(orig_bdev, bio_op(bio), bio_sectors(bio), start_time);
1198 sizeof_field(struct bio, bi_opf));