Lines Matching refs:rq
42 #include "blk-rq-qos.h"
48 static void blk_mq_insert_request(struct request *rq, blk_insert_t flags);
49 static void blk_mq_request_bypass_insert(struct request *rq,
92 static bool blk_mq_check_inflight(struct request *rq, void *priv)
96 if (rq->part && blk_do_io_stat(rq) &&
97 (!mi->part->bd_partno || rq->part == mi->part) &&
98 blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT)
99 mi->inflight[rq_data_dir(rq)]++;
315 void blk_rq_init(struct request_queue *q, struct request *rq)
317 memset(rq, 0, sizeof(*rq));
319 INIT_LIST_HEAD(&rq->queuelist);
320 rq->q = q;
321 rq->__sector = (sector_t) -1;
322 INIT_HLIST_NODE(&rq->hash);
323 RB_CLEAR_NODE(&rq->rb_node);
324 rq->tag = BLK_MQ_NO_TAG;
325 rq->internal_tag = BLK_MQ_NO_TAG;
326 rq->start_time_ns = ktime_get_ns();
327 rq->part = NULL;
328 blk_crypto_rq_set_defaults(rq);
333 static inline void blk_mq_rq_time_init(struct request *rq, u64 alloc_time_ns)
335 if (blk_mq_need_time_stamp(rq))
336 rq->start_time_ns = ktime_get_ns();
338 rq->start_time_ns = 0;
341 if (blk_queue_rq_alloc_time(rq->q))
342 rq->alloc_time_ns = alloc_time_ns ?: rq->start_time_ns;
344 rq->alloc_time_ns = 0;
354 struct request *rq = tags->static_rqs[tag];
356 rq->q = q;
357 rq->mq_ctx = ctx;
358 rq->mq_hctx = hctx;
359 rq->cmd_flags = data->cmd_flags;
365 rq->rq_flags = data->rq_flags;
368 rq->tag = BLK_MQ_NO_TAG;
369 rq->internal_tag = tag;
371 rq->tag = tag;
372 rq->internal_tag = BLK_MQ_NO_TAG;
374 rq->timeout = 0;
376 rq->part = NULL;
377 rq->io_start_time_ns = 0;
378 rq->stats_sectors = 0;
379 rq->nr_phys_segments = 0;
381 rq->nr_integrity_segments = 0;
383 rq->end_io = NULL;
384 rq->end_io_data = NULL;
386 blk_crypto_rq_set_defaults(rq);
387 INIT_LIST_HEAD(&rq->queuelist);
389 WRITE_ONCE(rq->deadline, 0);
390 req_ref_set(rq, 1);
392 if (rq->rq_flags & RQF_USE_SCHED) {
395 INIT_HLIST_NODE(&rq->hash);
396 RB_CLEAR_NODE(&rq->rb_node);
399 e->type->ops.prepare_request(rq);
402 return rq;
410 struct request *rq;
425 rq = blk_mq_rq_ctx_init(data, tags, tag);
426 rq_list_add(data->cached_rq, rq);
440 struct request *rq;
486 rq = __blk_mq_alloc_requests_batch(data);
487 if (rq) {
488 blk_mq_rq_time_init(rq, alloc_time_ns);
489 return rq;
513 rq = blk_mq_rq_ctx_init(data, blk_mq_tags_from_data(data), tag);
514 blk_mq_rq_time_init(rq, alloc_time_ns);
515 return rq;
530 struct request *rq;
537 rq = __blk_mq_alloc_requests(&data);
538 if (unlikely(!rq))
540 return rq;
548 struct request *rq;
556 rq = blk_mq_rq_cache_fill(q, plug, opf, flags);
557 if (!rq)
560 rq = rq_list_peek(&plug->cached_rq);
561 if (!rq || rq->q != q)
564 if (blk_mq_get_hctx_type(opf) != rq->mq_hctx->type)
566 if (op_is_flush(rq->cmd_flags) != op_is_flush(opf))
569 plug->cached_rq = rq_list_next(rq);
570 blk_mq_rq_time_init(rq, 0);
573 rq->cmd_flags = opf;
574 INIT_LIST_HEAD(&rq->queuelist);
575 return rq;
581 struct request *rq;
583 rq = blk_mq_alloc_cached_request(q, opf, flags);
584 if (!rq) {
597 rq = __blk_mq_alloc_requests(&data);
598 if (!rq)
601 rq->__data_len = 0;
602 rq->__sector = (sector_t) -1;
603 rq->bio = rq->biotail = NULL;
604 return rq;
621 struct request *rq;
672 rq = blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag);
673 blk_mq_rq_time_init(rq, alloc_time_ns);
674 rq->__data_len = 0;
675 rq->__sector = (sector_t) -1;
676 rq->bio = rq->biotail = NULL;
677 return rq;
685 static void blk_mq_finish_request(struct request *rq)
687 struct request_queue *q = rq->q;
689 if (rq->rq_flags & RQF_USE_SCHED) {
690 q->elevator->type->ops.finish_request(rq);
694 * to avoid double finish_request() on the rq.
696 rq->rq_flags &= ~RQF_USE_SCHED;
700 static void __blk_mq_free_request(struct request *rq)
702 struct request_queue *q = rq->q;
703 struct blk_mq_ctx *ctx = rq->mq_ctx;
704 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
705 const int sched_tag = rq->internal_tag;
707 blk_crypto_free_request(rq);
708 blk_pm_mark_last_busy(rq);
709 rq->mq_hctx = NULL;
711 if (rq->rq_flags & RQF_MQ_INFLIGHT)
714 if (rq->tag != BLK_MQ_NO_TAG)
715 blk_mq_put_tag(hctx->tags, ctx, rq->tag);
722 void blk_mq_free_request(struct request *rq)
724 struct request_queue *q = rq->q;
726 blk_mq_finish_request(rq);
728 if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
731 rq_qos_done(q, rq);
733 WRITE_ONCE(rq->state, MQ_RQ_IDLE);
734 if (req_ref_put_and_test(rq))
735 __blk_mq_free_request(rq);
741 struct request *rq;
743 while ((rq = rq_list_pop(&plug->cached_rq)) != NULL)
744 blk_mq_free_request(rq);
747 void blk_dump_rq_flags(struct request *rq, char *msg)
750 rq->q->disk ? rq->q->disk->disk_name : "?",
751 (__force unsigned long long) rq->cmd_flags);
754 (unsigned long long)blk_rq_pos(rq),
755 blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
757 rq->bio, rq->biotail, blk_rq_bytes(rq));
761 static void req_bio_endio(struct request *rq, struct bio *bio,
766 } else if (req_op(rq) == REQ_OP_ZONE_APPEND) {
778 bio->bi_iter.bi_sector = rq->__sector;
784 if (unlikely(rq->rq_flags & RQF_QUIET))
787 if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
1028 static inline void __blk_mq_end_request_acct(struct request *rq, u64 now)
1030 if (rq->rq_flags & RQF_STATS)
1031 blk_stat_add(rq, now);
1033 blk_mq_sched_completed_request(rq, now);
1034 blk_account_io_done(rq, now);
1037 inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
1039 if (blk_mq_need_time_stamp(rq))
1040 __blk_mq_end_request_acct(rq, ktime_get_ns());
1042 blk_mq_finish_request(rq);
1044 if (rq->end_io) {
1045 rq_qos_done(rq->q, rq);
1046 if (rq->end_io(rq, error) == RQ_END_IO_FREE)
1047 blk_mq_free_request(rq);
1049 blk_mq_free_request(rq);
1054 void blk_mq_end_request(struct request *rq, blk_status_t error)
1056 if (blk_update_request(rq, error, blk_rq_bytes(rq)))
1058 __blk_mq_end_request(rq, error);
1084 struct request *rq;
1090 while ((rq = rq_list_pop(&iob->req_list)) != NULL) {
1091 prefetch(rq->bio);
1092 prefetch(rq->rq_next);
1094 blk_complete_request(rq);
1096 __blk_mq_end_request_acct(rq, now);
1098 blk_mq_finish_request(rq);
1100 rq_qos_done(rq->q, rq);
1106 if (rq->end_io && rq->end_io(rq, 0) == RQ_END_IO_NONE)
1109 WRITE_ONCE(rq->state, MQ_RQ_IDLE);
1110 if (!req_ref_put_and_test(rq))
1113 blk_crypto_free_request(rq);
1114 blk_pm_mark_last_busy(rq);
1116 if (nr_tags == TAG_COMP_BATCH || cur_hctx != rq->mq_hctx) {
1120 cur_hctx = rq->mq_hctx;
1122 tags[nr_tags++] = rq->tag;
1133 struct request *rq, *next;
1135 llist_for_each_entry_safe(rq, next, entry, ipi_list)
1136 rq->q->mq_ops->complete(rq);
1155 static inline bool blk_mq_complete_need_ipi(struct request *rq)
1160 !test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags))
1172 if (cpu == rq->mq_ctx->cpu ||
1173 (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags) &&
1174 cpus_share_cache(cpu, rq->mq_ctx->cpu)))
1178 return cpu_online(rq->mq_ctx->cpu);
1181 static void blk_mq_complete_send_ipi(struct request *rq)
1185 cpu = rq->mq_ctx->cpu;
1186 if (llist_add(&rq->ipi_list, &per_cpu(blk_cpu_done, cpu)))
1190 static void blk_mq_raise_softirq(struct request *rq)
1196 if (llist_add(&rq->ipi_list, list))
1201 bool blk_mq_complete_request_remote(struct request *rq)
1203 WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
1210 if ((rq->mq_hctx->nr_ctx == 1 &&
1211 rq->mq_ctx->cpu == raw_smp_processor_id()) ||
1212 rq->cmd_flags & REQ_POLLED)
1215 if (blk_mq_complete_need_ipi(rq)) {
1216 blk_mq_complete_send_ipi(rq);
1220 if (rq->q->nr_hw_queues == 1) {
1221 blk_mq_raise_softirq(rq);
1230 * @rq: the request being processed
1235 void blk_mq_complete_request(struct request *rq)
1237 if (!blk_mq_complete_request_remote(rq))
1238 rq->q->mq_ops->complete(rq);
1244 * @rq: Pointer to request to be started
1250 void blk_mq_start_request(struct request *rq)
1252 struct request_queue *q = rq->q;
1254 trace_block_rq_issue(rq);
1257 rq->io_start_time_ns = ktime_get_ns();
1258 rq->stats_sectors = blk_rq_sectors(rq);
1259 rq->rq_flags |= RQF_STATS;
1260 rq_qos_issue(q, rq);
1263 WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE);
1265 blk_add_timer(rq);
1266 WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT);
1269 if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE)
1270 q->integrity.profile->prepare_fn(rq);
1272 if (rq->bio && rq->bio->bi_opf & REQ_POLLED)
1273 WRITE_ONCE(rq->bio->bi_cookie, rq->mq_hctx->queue_num);
1289 static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
1294 trace_block_plug(rq->q);
1296 (!blk_queue_nomerges(rq->q) &&
1300 trace_block_plug(rq->q);
1303 if (!plug->multiple_queues && last && last->q != rq->q)
1309 if (!plug->has_elevator && (rq->rq_flags & RQF_SCHED_TAGS))
1311 rq->rq_next = NULL;
1312 rq_list_add(&plug->mq_list, rq);
1318 * @rq: request to insert
1328 void blk_execute_rq_nowait(struct request *rq, bool at_head)
1330 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1333 WARN_ON(!blk_rq_is_passthrough(rq));
1335 blk_account_io_start(rq);
1343 blk_add_rq_to_plug(current->plug, rq);
1347 blk_mq_insert_request(rq, at_head ? BLK_MQ_INSERT_AT_HEAD : 0);
1357 static enum rq_end_io_ret blk_end_sync_rq(struct request *rq, blk_status_t ret)
1359 struct blk_rq_wait *wait = rq->end_io_data;
1366 bool blk_rq_is_poll(struct request *rq)
1368 if (!rq->mq_hctx)
1370 if (rq->mq_hctx->type != HCTX_TYPE_POLL)
1376 static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
1379 blk_hctx_poll(rq->q, rq->mq_hctx, NULL, 0);
1386 * @rq: request to insert
1394 blk_status_t blk_execute_rq(struct request *rq, bool at_head)
1396 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1402 WARN_ON(!blk_rq_is_passthrough(rq));
1404 rq->end_io_data = &wait;
1405 rq->end_io = blk_end_sync_rq;
1407 blk_account_io_start(rq);
1408 blk_mq_insert_request(rq, at_head ? BLK_MQ_INSERT_AT_HEAD : 0);
1411 if (blk_rq_is_poll(rq)) {
1412 blk_rq_poll_completion(rq, &wait.done);
1432 static void __blk_mq_requeue_request(struct request *rq)
1434 struct request_queue *q = rq->q;
1436 blk_mq_put_driver_tag(rq);
1438 trace_block_rq_requeue(rq);
1439 rq_qos_requeue(q, rq);
1441 if (blk_mq_request_started(rq)) {
1442 WRITE_ONCE(rq->state, MQ_RQ_IDLE);
1443 rq->rq_flags &= ~RQF_TIMED_OUT;
1447 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
1449 struct request_queue *q = rq->q;
1452 __blk_mq_requeue_request(rq);
1455 blk_mq_sched_requeue_request(rq);
1458 list_add_tail(&rq->queuelist, &q->requeue_list);
1472 struct request *rq;
1480 rq = list_entry(rq_list.next, struct request, queuelist);
1487 if (rq->rq_flags & RQF_DONTPREP) {
1488 list_del_init(&rq->queuelist);
1489 blk_mq_request_bypass_insert(rq, 0);
1491 list_del_init(&rq->queuelist);
1492 blk_mq_insert_request(rq, BLK_MQ_INSERT_AT_HEAD);
1497 rq = list_entry(flush_list.next, struct request, queuelist);
1498 list_del_init(&rq->queuelist);
1499 blk_mq_insert_request(rq, 0);
1519 static bool blk_is_flush_data_rq(struct request *rq)
1521 return (rq->rq_flags & RQF_FLUSH_SEQ) && !is_flush_rq(rq);
1524 static bool blk_mq_rq_inflight(struct request *rq, void *priv)
1536 if (blk_mq_request_started(rq) && !(blk_queue_quiesced(rq->q) &&
1537 blk_is_flush_data_rq(rq) &&
1538 blk_mq_request_completed(rq))) {
1578 static bool blk_mq_req_expired(struct request *rq, struct blk_expired_data *expired)
1582 if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT)
1584 if (rq->rq_flags & RQF_TIMED_OUT)
1587 deadline = READ_ONCE(rq->deadline);
1598 void blk_mq_put_rq_ref(struct request *rq)
1600 if (is_flush_rq(rq)) {
1601 if (rq->end_io(rq, 0) == RQ_END_IO_FREE)
1602 blk_mq_free_request(rq);
1603 } else if (req_ref_put_and_test(rq)) {
1604 __blk_mq_free_request(rq);
1608 static bool blk_mq_check_expired(struct request *rq, void *priv)
1619 if (blk_mq_req_expired(rq, expired)) {
1626 static bool blk_mq_handle_expired(struct request *rq, void *priv)
1630 if (blk_mq_req_expired(rq, expired))
1631 blk_mq_rq_timed_out(rq);
1730 struct request *rq;
1743 dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next);
1744 list_del_init(&dispatch_data->rq->queuelist);
1750 return !dispatch_data->rq;
1759 .rq = NULL,
1765 return data.rq;
1768 static bool __blk_mq_alloc_driver_tag(struct request *rq)
1770 struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags;
1771 unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags;
1774 blk_mq_tag_busy(rq->mq_hctx);
1776 if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) {
1777 bt = &rq->mq_hctx->tags->breserved_tags;
1780 if (!hctx_may_queue(rq->mq_hctx, bt))
1788 rq->tag = tag + tag_offset;
1792 bool __blk_mq_get_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq)
1794 if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_alloc_driver_tag(rq))
1798 !(rq->rq_flags & RQF_MQ_INFLIGHT)) {
1799 rq->rq_flags |= RQF_MQ_INFLIGHT;
1802 hctx->tags->rqs[rq->tag] = rq;
1834 struct request *rq)
1853 return blk_mq_get_driver_tag(rq);
1860 if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag))
1899 ret = blk_mq_get_driver_tag(rq);
1946 static void blk_mq_handle_dev_resource(struct request *rq,
1949 list_add(&rq->queuelist, list);
1950 __blk_mq_requeue_request(rq);
1953 static void blk_mq_handle_zone_resource(struct request *rq,
1962 list_add(&rq->queuelist, zone_list);
1963 __blk_mq_requeue_request(rq);
1972 static enum prep_dispatch blk_mq_prep_dispatch_rq(struct request *rq,
1975 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1979 budget_token = blk_mq_get_dispatch_budget(rq->q);
1981 blk_mq_put_driver_tag(rq);
1984 blk_mq_set_rq_budget_token(rq, budget_token);
1987 if (!blk_mq_get_driver_tag(rq)) {
1995 if (!blk_mq_mark_tag_wait(hctx, rq)) {
2001 blk_mq_put_dispatch_budget(rq->q, budget_token);
2013 struct request *rq;
2015 list_for_each_entry(rq, list, queuelist) {
2016 int budget_token = blk_mq_get_rq_budget_token(rq);
2048 struct request *rq;
2064 rq = list_first_entry(list, struct request, queuelist);
2066 WARN_ON_ONCE(hctx != rq->mq_hctx);
2067 prep = blk_mq_prep_dispatch_rq(rq, !nr_budgets);
2071 list_del_init(&rq->queuelist);
2073 bd.rq = rq;
2091 blk_mq_handle_dev_resource(rq, list);
2099 blk_mq_handle_zone_resource(rq, &zone_list);
2103 blk_mq_end_request(rq, ret);
2161 * and dm-rq.
2473 * @rq: Pointer to request to be inserted.
2479 static void blk_mq_request_bypass_insert(struct request *rq, blk_insert_t flags)
2481 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
2485 list_add(&rq->queuelist, &hctx->dispatch);
2487 list_add_tail(&rq->queuelist, &hctx->dispatch);
2495 struct request *rq;
2513 list_for_each_entry(rq, list, queuelist) {
2514 BUG_ON(rq->mq_ctx != ctx);
2515 trace_block_rq_insert(rq);
2516 if (rq->cmd_flags & REQ_NOWAIT)
2528 static void blk_mq_insert_request(struct request *rq, blk_insert_t flags)
2530 struct request_queue *q = rq->q;
2531 struct blk_mq_ctx *ctx = rq->mq_ctx;
2532 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
2534 if (blk_rq_is_passthrough(rq)) {
2545 blk_mq_request_bypass_insert(rq, flags);
2546 } else if (req_op(rq) == REQ_OP_FLUSH) {
2557 * rq to the front of hctx->dispatch, it is easier to introduce
2558 * extra time to flush rq's latency because of S_SCHED_RESTART
2563 * drive when adding flush rq to the front of hctx->dispatch.
2565 * Simply queue flush rq to the front of hctx->dispatch so that
2568 blk_mq_request_bypass_insert(rq, BLK_MQ_INSERT_AT_HEAD);
2572 WARN_ON_ONCE(rq->tag != BLK_MQ_NO_TAG);
2574 list_add(&rq->queuelist, &list);
2577 trace_block_rq_insert(rq);
2581 list_add(&rq->queuelist, &ctx->rq_lists[hctx->type]);
2583 list_add_tail(&rq->queuelist,
2590 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
2596 rq->cmd_flags |= REQ_FAILFAST_MASK;
2598 rq->__sector = bio->bi_iter.bi_sector;
2599 blk_rq_bio_prep(rq, bio, nr_segs);
2602 err = blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO);
2605 blk_account_io_start(rq);
2609 struct request *rq, bool last)
2611 struct request_queue *q = rq->q;
2613 .rq = rq,
2631 __blk_mq_requeue_request(rq);
2641 static bool blk_mq_get_budget_and_tag(struct request *rq)
2645 budget_token = blk_mq_get_dispatch_budget(rq->q);
2648 blk_mq_set_rq_budget_token(rq, budget_token);
2649 if (!blk_mq_get_driver_tag(rq)) {
2650 blk_mq_put_dispatch_budget(rq->q, budget_token);
2659 * @rq: Pointer to request to be sent.
2667 struct request *rq)
2671 if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) {
2672 blk_mq_insert_request(rq, 0);
2676 if ((rq->rq_flags & RQF_USE_SCHED) || !blk_mq_get_budget_and_tag(rq)) {
2677 blk_mq_insert_request(rq, 0);
2678 blk_mq_run_hw_queue(hctx, rq->cmd_flags & REQ_NOWAIT);
2682 ret = __blk_mq_issue_directly(hctx, rq, true);
2688 blk_mq_request_bypass_insert(rq, 0);
2692 blk_mq_end_request(rq, ret);
2697 static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
2699 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
2701 if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) {
2702 blk_mq_insert_request(rq, 0);
2706 if (!blk_mq_get_budget_and_tag(rq))
2708 return __blk_mq_issue_directly(hctx, rq, last);
2714 struct request *rq;
2718 while ((rq = rq_list_pop(&plug->mq_list))) {
2721 if (hctx != rq->mq_hctx) {
2726 hctx = rq->mq_hctx;
2729 ret = blk_mq_request_issue_directly(rq, last);
2736 blk_mq_request_bypass_insert(rq, 0);
2740 blk_mq_end_request(rq, ret);
2769 struct request *rq = rq_list_pop(&plug->mq_list);
2772 this_hctx = rq->mq_hctx;
2773 this_ctx = rq->mq_ctx;
2774 is_passthrough = blk_rq_is_passthrough(rq);
2775 } else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx ||
2776 is_passthrough != blk_rq_is_passthrough(rq)) {
2777 rq_list_add_tail(&requeue_lastp, rq);
2780 list_add(&rq->queuelist, &list);
2806 struct request *rq;
2822 rq = rq_list_peek(&plug->mq_list);
2823 q = rq->q;
2836 !(rq->mq_hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
2861 struct request *rq = list_first_entry(list, struct request,
2864 list_del_init(&rq->queuelist);
2865 ret = blk_mq_request_issue_directly(rq, list_empty(list));
2872 blk_mq_request_bypass_insert(rq, 0);
2877 blk_mq_end_request(rq, ret);
2909 struct request *rq;
2922 rq = __blk_mq_alloc_requests(&data);
2923 if (rq)
2924 return rq;
2931 /* return true if this @rq can be used for @bio */
2932 static bool blk_mq_can_use_cached_rq(struct request *rq, struct blk_plug *plug,
2936 enum hctx_type hctx_type = rq->mq_hctx->type;
2938 WARN_ON_ONCE(rq_list_peek(&plug->cached_rq) != rq);
2943 if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
2951 plug->cached_rq = rq_list_next(rq);
2952 rq_qos_throttle(rq->q, bio);
2954 blk_mq_rq_time_init(rq, 0);
2955 rq->cmd_flags = bio->bi_opf;
2956 INIT_LIST_HEAD(&rq->queuelist);
2987 struct request *rq = NULL;
2995 rq = rq_list_peek(&plug->cached_rq);
2996 if (rq && rq->q != q)
2997 rq = NULL;
2999 if (rq) {
3009 if (blk_mq_can_use_cached_rq(rq, plug, bio))
3024 rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
3025 if (unlikely(!rq)) {
3034 rq_qos_track(q, rq, bio);
3036 blk_mq_bio_to_request(rq, bio, nr_segs);
3038 ret = blk_crypto_rq_get_keyslot(rq);
3042 blk_mq_free_request(rq);
3046 if (op_is_flush(bio->bi_opf) && blk_insert_flush(rq))
3050 blk_add_rq_to_plug(plug, rq);
3054 hctx = rq->mq_hctx;
3055 if ((rq->rq_flags & RQF_USE_SCHED) ||
3057 blk_mq_insert_request(rq, 0);
3060 blk_mq_run_dispatch_ops(q, blk_mq_try_issue_directly(hctx, rq));
3067 * @rq: the request being queued
3069 blk_status_t blk_insert_cloned_request(struct request *rq)
3071 struct request_queue *q = rq->q;
3072 unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
3073 unsigned int max_segments = blk_rq_get_max_segments(rq);
3076 if (blk_rq_sectors(rq) > max_sectors) {
3091 __func__, blk_rq_sectors(rq), max_sectors);
3099 rq->nr_phys_segments = blk_recalc_rq_segments(rq);
3100 if (rq->nr_phys_segments > max_segments) {
3102 __func__, rq->nr_phys_segments, max_segments);
3106 if (q->disk && should_fail_request(q->disk->part0, blk_rq_bytes(rq)))
3109 ret = blk_crypto_rq_get_keyslot(rq);
3113 blk_account_io_start(rq);
3121 ret = blk_mq_request_issue_directly(rq, true));
3123 blk_account_io_done(rq, ktime_get_ns());
3130 * @rq: the clone request to be cleaned up
3133 * Free all bios in @rq for a cloned request.
3135 void blk_rq_unprep_clone(struct request *rq)
3139 while ((bio = rq->bio) != NULL) {
3140 rq->bio = bio->bi_next;
3149 * @rq: the request to be setup
3158 * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
3162 * the caller must complete @rq before @rq_src.
3164 int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
3175 bio = bio_alloc_clone(rq->q->disk->part0, bio_src, gfp_mask,
3183 if (rq->bio) {
3184 rq->biotail->bi_next = bio;
3185 rq->biotail = bio;
3187 rq->bio = rq->biotail = bio;
3193 rq->__sector = blk_rq_pos(rq_src);
3194 rq->__data_len = blk_rq_bytes(rq_src);
3196 rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
3197 rq->special_vec = rq_src->special_vec;
3199 rq->nr_phys_segments = rq_src->nr_phys_segments;
3200 rq->ioprio = rq_src->ioprio;
3202 if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0)
3210 blk_rq_unprep_clone(rq);
3221 void blk_steal_bios(struct bio_list *list, struct request *rq)
3223 if (rq->bio) {
3225 list->tail->bi_next = rq->bio;
3227 list->head = rq->bio;
3228 list->tail = rq->biotail;
3230 rq->bio = NULL;
3231 rq->biotail = NULL;
3234 rq->__data_len = 0;
3263 struct request *rq = drv_tags->rqs[i];
3264 unsigned long rq_addr = (unsigned long)rq;
3267 WARN_ON_ONCE(req_ref_read(rq) != 0);
3268 cmpxchg(&drv_tags->rqs[i], rq, NULL);
3301 struct request *rq = tags->static_rqs[i];
3303 if (!rq)
3305 set->ops->exit_request(set, rq, hctx_idx);
3398 static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
3404 ret = set->ops->init_request(set, rq, hctx_idx, node);
3409 WRITE_ONCE(rq->state, MQ_RQ_IDLE);
3471 struct request *rq = p;
3473 tags->static_rqs[i] = rq;
3474 if (blk_mq_init_request(set, rq, hctx_idx, node)) {
3495 static bool blk_mq_has_request(struct request *rq, void *data)
3499 if (rq->mq_hctx != iter_data->hctx)
4868 int blk_rq_poll(struct request *rq, struct io_comp_batch *iob,
4871 struct request_queue *q = rq->q;
4874 if (!blk_rq_is_poll(rq))
4879 ret = blk_hctx_poll(q, rq->mq_hctx, iob, poll_flags);
4886 unsigned int blk_mq_rq_cpu(struct request *rq)
4888 return rq->mq_ctx->cpu;