Lines Matching defs:bio

11 #include <linux/bio.h>
603 rq->bio = rq->biotail = NULL;
676 rq->bio = rq->biotail = NULL;
756 printk(KERN_INFO " bio %p, biotail %p, len %u\n",
757 rq->bio, rq->biotail, blk_rq_bytes(rq));
761 static void req_bio_endio(struct request *rq, struct bio *bio,
765 bio->bi_status = error;
774 if (bio->bi_iter.bi_size != nbytes) {
775 bio->bi_status = BLK_STS_IOERR;
776 nbytes = bio->bi_iter.bi_size;
778 bio->bi_iter.bi_sector = rq->__sector;
782 bio_advance(bio, nbytes);
785 bio_set_flag(bio, BIO_QUIET);
786 /* don't actually finish bio if it's part of flush sequence */
787 if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
788 bio_endio(bio);
824 struct bio *bio = req->bio;
828 if (!bio)
845 struct bio *next = bio->bi_next;
848 bio_clear_flag(bio, BIO_TRACE_COMPLETION);
851 bio->bi_iter.bi_sector = req->__sector;
854 bio_endio(bio);
855 bio = next;
856 } while (bio);
864 req->bio = NULL;
898 if (!req->bio)
924 while (req->bio) {
925 struct bio *bio = req->bio;
926 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
928 if (bio_bytes == bio->bi_iter.bi_size)
929 req->bio = bio->bi_next;
932 bio_clear_flag(bio, BIO_TRACE_COMPLETION);
933 req_bio_endio(req, bio, bio_bytes, error);
945 if (!req->bio) {
961 /* mixed attributes always follow the first bio */
964 req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
1012 * All non-passthrough requests are created from a bio with one
1015 * lower device by dm-multipath we can get here without a bio.
1017 if (req->bio)
1018 req->part = req->bio->bi_bdev;
1091 prefetch(rq->bio);
1272 if (rq->bio && rq->bio->bi_opf & REQ_POLLED)
1273 WRITE_ONCE(rq->bio->bi_cookie, rq->mq_hctx->queue_num);
2590 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
2595 if (bio->bi_opf & REQ_RAHEAD)
2598 rq->__sector = bio->bi_iter.bi_sector;
2599 blk_rq_bio_prep(rq, bio, nr_segs);
2602 err = blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO);
2888 struct bio *bio, unsigned int nr_segs)
2890 if (!blk_queue_nomerges(q) && bio_mergeable(bio)) {
2891 if (blk_attempt_plug_merge(q, bio, nr_segs))
2893 if (blk_mq_sched_bio_merge(q, bio, nr_segs))
2901 struct bio *bio,
2907 .cmd_flags = bio->bi_opf,
2911 if (blk_mq_attempt_bio_merge(q, bio, nsegs))
2914 rq_qos_throttle(q, bio);
2925 rq_qos_cleanup(q, bio);
2926 if (bio->bi_opf & REQ_NOWAIT)
2927 bio_wouldblock_error(bio);
2931 /* return true if this @rq can be used for @bio */
2933 struct bio *bio)
2935 enum hctx_type type = blk_mq_get_hctx_type(bio->bi_opf);
2943 if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
2952 rq_qos_throttle(rq->q, bio);
2955 rq->cmd_flags = bio->bi_opf;
2960 static void bio_set_ioprio(struct bio *bio)
2963 if (IOPRIO_PRIO_CLASS(bio->bi_ioprio) == IOPRIO_CLASS_NONE)
2964 bio->bi_ioprio = get_current_ioprio();
2965 blkcg_set_ioprio(bio);
2970 * @bio: Bio pointer.
2972 * Builds up a request structure from @q and @bio and send to the device. The
2978 * It will not queue the request if there is an error with the bio, or at the
2981 void blk_mq_submit_bio(struct bio *bio)
2983 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
2984 struct blk_plug *plug = blk_mq_plug(bio);
2985 const int is_sync = op_is_sync(bio->bi_opf);
2991 bio = blk_queue_bounce(bio, q);
2992 bio_set_ioprio(bio);
3000 if (unlikely(bio_may_exceed_limits(bio, &q->limits))) {
3001 bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
3002 if (!bio)
3005 if (!bio_integrity_prep(bio))
3007 if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
3009 if (blk_mq_can_use_cached_rq(rq, plug, bio))
3013 if (unlikely(bio_queue_enter(bio)))
3015 if (unlikely(bio_may_exceed_limits(bio, &q->limits))) {
3016 bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
3017 if (!bio)
3020 if (!bio_integrity_prep(bio))
3024 rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
3032 trace_block_getrq(bio);
3034 rq_qos_track(q, rq, bio);
3036 blk_mq_bio_to_request(rq, bio, nr_segs);
3040 bio->bi_status = ret;
3041 bio_endio(bio);
3046 if (op_is_flush(bio->bi_opf) && blk_insert_flush(rq))
3137 struct bio *bio;
3139 while ((bio = rq->bio) != NULL) {
3140 rq->bio = bio->bi_next;
3142 bio_put(bio);
3152 * @gfp_mask: memory allocation mask for bio
3153 * @bio_ctr: setup function to be called for each clone bio.
3166 int (*bio_ctr)(struct bio *, struct bio *, void *),
3169 struct bio *bio, *bio_src;
3175 bio = bio_alloc_clone(rq->q->disk->part0, bio_src, gfp_mask,
3177 if (!bio)
3180 if (bio_ctr && bio_ctr(bio, bio_src, data))
3183 if (rq->bio) {
3184 rq->biotail->bi_next = bio;
3185 rq->biotail = bio;
3187 rq->bio = rq->biotail = bio;
3189 bio = NULL;
3202 if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0)
3208 if (bio)
3209 bio_put(bio);
3218 * Steal bios from a request and add them to a bio list.
3223 if (rq->bio) {
3225 list->tail->bi_next = rq->bio;
3227 list->head = rq->bio;
3230 rq->bio = NULL;