Lines Matching refs:plug

519 					    struct blk_plug *plug,
527 .nr_tags = plug->nr_ios,
528 .cached_rq = &plug->cached_rq,
535 plug->nr_ios = 1;
547 struct blk_plug *plug = current->plug;
550 if (!plug)
553 if (rq_list_empty(plug->cached_rq)) {
554 if (plug->nr_ios == 1)
556 rq = blk_mq_rq_cache_fill(q, plug, opf, flags);
560 rq = rq_list_peek(&plug->cached_rq);
569 plug->cached_rq = rq_list_next(rq);
739 void blk_mq_free_plug_rqs(struct blk_plug *plug)
743 while ((rq = rq_list_pop(&plug->cached_rq)) != NULL)
1278 * Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
1282 static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
1284 if (plug->multiple_queues)
1289 static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
1291 struct request *last = rq_list_peek(&plug->mq_list);
1293 if (!plug->rq_count) {
1295 } else if (plug->rq_count >= blk_plug_max_rq_count(plug) ||
1298 blk_mq_flush_plug_list(plug, false);
1303 if (!plug->multiple_queues && last && last->q != rq->q)
1304 plug->multiple_queues = true;
1309 if (!plug->has_elevator && (rq->rq_flags & RQF_SCHED_TAGS))
1310 plug->has_elevator = true;
1312 rq_list_add(&plug->mq_list, rq);
1313 plug->rq_count++;
1339 * device, directly accessing the plug instead of using blk_mq_plug()
1342 if (current->plug && !at_head) {
1343 blk_add_rq_to_plug(current->plug, rq);
2510 * preemption doesn't flush plug list, so it's possible ctx->cpu is
2711 static void blk_mq_plug_issue_direct(struct blk_plug *plug)
2718 while ((rq = rq_list_pop(&plug->mq_list))) {
2719 bool last = rq_list_empty(plug->mq_list);
2751 struct blk_plug *plug)
2755 q->mq_ops->queue_rqs(&plug->mq_list);
2758 static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
2769 struct request *rq = rq_list_pop(&plug->mq_list);
2782 } while (!rq_list_empty(plug->mq_list));
2784 plug->mq_list = requeue_list;
2804 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
2810 * plug->mq_list via a schedule() in the driver's queue_rq() callback.
2815 if (plug->rq_count == 0)
2817 plug->rq_count = 0;
2819 if (!plug->multiple_queues && !plug->has_elevator && !from_schedule) {
2822 rq = rq_list_peek(&plug->mq_list);
2827 * If we do, we can dispatch the whole plug list in one go. We
2838 __blk_mq_flush_plug_list(q, plug));
2839 if (rq_list_empty(plug->mq_list))
2844 blk_mq_plug_issue_direct(plug));
2845 if (rq_list_empty(plug->mq_list))
2850 blk_mq_dispatch_plug_list(plug, from_schedule);
2851 } while (!rq_list_empty(plug->mq_list));
2900 struct blk_plug *plug,
2916 if (plug) {
2917 data.nr_tags = plug->nr_ios;
2918 plug->nr_ios = 1;
2919 data.cached_rq = &plug->cached_rq;
2932 static bool blk_mq_can_use_cached_rq(struct request *rq, struct blk_plug *plug,
2938 WARN_ON_ONCE(rq_list_peek(&plug->cached_rq) != rq);
2948 * plug and hence killed the cached_rq list as well. Pop this entry
2951 plug->cached_rq = rq_list_next(rq);
2975 * * We want to place request at plug queue for possible future merging
2984 struct blk_plug *plug = blk_mq_plug(bio);
2994 if (plug) {
2995 rq = rq_list_peek(&plug->cached_rq);
3009 if (blk_mq_can_use_cached_rq(rq, plug, bio))
3024 rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
3049 if (plug) {
3050 blk_add_rq_to_plug(plug, rq);