Lines Matching refs:req

53 static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
55 return bio_will_gap(req->q, req, req->biotail, bio);
58 static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
60 return bio_will_gap(req->q, NULL, bio, req->bio);
540 * segment is bigger than number of req's physical segments
555 static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
558 if (!blk_cgroup_mergeable(req, bio))
561 if (blk_integrity_merge_bio(req->q, req, bio) == false)
565 if (req_op(req) == REQ_OP_DISCARD)
568 if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req))
575 req->nr_phys_segments += nr_phys_segs;
579 req_set_nomerge(req->q, req);
583 int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
585 if (req_gap_back_merge(req, bio))
587 if (blk_integrity_rq(req) &&
588 integrity_req_gap_back_merge(req, bio))
590 if (!bio_crypt_ctx_back_mergeable(req, bio))
592 if (blk_rq_sectors(req) + bio_sectors(bio) >
593 blk_rq_get_max_sectors(req, blk_rq_pos(req))) {
594 req_set_nomerge(req->q, req);
598 return ll_new_hw_segment(req, bio, nr_segs);
601 static int ll_front_merge_fn(struct request *req, struct bio *bio,
604 if (req_gap_front_merge(req, bio))
606 if (blk_integrity_rq(req) &&
607 integrity_req_gap_front_merge(req, bio))
609 if (!bio_crypt_ctx_front_mergeable(req, bio))
611 if (blk_rq_sectors(req) + bio_sectors(bio) >
612 blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
613 req_set_nomerge(req->q, req);
617 return ll_new_hw_segment(req, bio, nr_segs);
620 static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
623 unsigned short segments = blk_rq_nr_discard_segments(req);
627 if (blk_rq_sectors(req) + bio_sectors(next->bio) >
628 blk_rq_get_max_sectors(req, blk_rq_pos(req)))
631 req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next);
634 req_set_nomerge(q, req);
638 static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
643 if (req_gap_back_merge(req, next->bio))
649 if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
650 blk_rq_get_max_sectors(req, blk_rq_pos(req)))
653 total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
654 if (total_phys_segments > blk_rq_get_max_segments(req))
657 if (!blk_cgroup_mergeable(req, next->bio))
660 if (blk_integrity_merge_rq(q, req, next) == false)
663 if (!bio_crypt_ctx_merge_rq(req, next))
667 req->nr_phys_segments = total_phys_segments;
701 static void blk_account_io_merge_request(struct request *req)
703 if (blk_do_io_stat(req)) {
705 part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
708 hd_struct_put(req->part);
712 static enum elv_merge blk_try_req_merge(struct request *req,
715 if (blk_discard_mergable(req))
717 else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
728 struct request *req, struct request *next)
730 if (!rq_mergeable(req) || !rq_mergeable(next))
733 if (req_op(req) != req_op(next))
736 if (rq_data_dir(req) != rq_data_dir(next)
737 || req->rq_disk != next->rq_disk)
740 if (req_op(req) == REQ_OP_WRITE_SAME &&
741 !blk_write_same_mergeable(req->bio, next->bio))
748 if (req->write_hint != next->write_hint)
751 if (req->ioprio != next->ioprio)
762 switch (blk_try_req_merge(req, next)) {
764 if (!req_attempt_discard_merge(q, req, next))
768 if (!ll_merge_requests_fn(q, req, next))
781 if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) ||
782 (req->cmd_flags & REQ_FAILFAST_MASK) !=
784 blk_rq_set_mixed_merge(req);
793 if (next->start_time_ns < req->start_time_ns)
794 req->start_time_ns = next->start_time_ns;
796 req->biotail->bi_next = next->bio;
797 req->biotail = next->biotail;
799 req->__data_len += blk_rq_bytes(next);
801 if (!blk_discard_mergable(req))
802 elv_merge_requests(q, req, next);
814 * ownership of bio passed from next to req, return 'next' for
911 static void blk_account_io_merge_bio(struct request *req)
913 if (!blk_do_io_stat(req))
917 part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
927 static enum bio_merge_status bio_attempt_back_merge(struct request *req,
932 if (!ll_back_merge_fn(req, bio, nr_segs))
935 trace_block_bio_backmerge(req->q, req, bio);
936 rq_qos_merge(req->q, req, bio);
938 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
939 blk_rq_set_mixed_merge(req);
941 req->biotail->bi_next = bio;
942 req->biotail = bio;
943 req->__data_len += bio->bi_iter.bi_size;
947 blk_account_io_merge_bio(req);
951 static enum bio_merge_status bio_attempt_front_merge(struct request *req,
956 if (!ll_front_merge_fn(req, bio, nr_segs))
959 trace_block_bio_frontmerge(req->q, req, bio);
960 rq_qos_merge(req->q, req, bio);
962 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
963 blk_rq_set_mixed_merge(req);
965 bio->bi_next = req->bio;
966 req->bio = bio;
968 req->__sector = bio->bi_iter.bi_sector;
969 req->__data_len += bio->bi_iter.bi_size;
971 bio_crypt_do_front_merge(req, bio);
973 blk_account_io_merge_bio(req);
978 struct request *req, struct bio *bio)
980 unsigned short segments = blk_rq_nr_discard_segments(req);
984 if (blk_rq_sectors(req) + bio_sectors(bio) >
985 blk_rq_get_max_sectors(req, blk_rq_pos(req)))
988 rq_qos_merge(q, req, bio);
990 req->biotail->bi_next = bio;
991 req->biotail = bio;
992 req->__data_len += bio->bi_iter.bi_size;
993 req->nr_phys_segments = segments + 1;
995 blk_account_io_merge_bio(req);
998 req_set_nomerge(q, req);