Lines Matching defs:plug
1896 * preemption doesn't flush plug list, so it's possible ctx->cpu is
1924 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1928 if (list_empty(&plug->mq_list))
1930 list_splice_init(&plug->mq_list, &list);
1932 if (plug->rq_count > 2 && plug->multiple_queues)
1935 plug->rq_count = 0;
2140 static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
2142 list_add_tail(&rq->queuelist, &plug->mq_list);
2143 plug->rq_count++;
2144 if (!plug->multiple_queues && !list_is_singular(&plug->mq_list)) {
2147 tmp = list_first_entry(&plug->mq_list, struct request,
2150 plug->multiple_queues = true;
2155 * Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
2159 static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
2161 if (plug->multiple_queues)
2173 * * We want to place request at plug queue for possible future merging
2190 struct blk_plug *plug;
2236 plug = blk_mq_plug(q, bio);
2241 } else if (plug && (q->nr_hw_queues == 1 ||
2249 * IO may benefit a lot from plug merging.
2251 unsigned int request_count = plug->rq_count;
2257 last = list_entry_rq(plug->mq_list.prev);
2259 if (request_count >= blk_plug_max_rq_count(plug) || (last &&
2261 blk_flush_plug_list(plug, false);
2265 blk_add_rq_to_plug(plug, rq);
2269 } else if (plug && !blk_queue_nomerges(q)) {
2272 * Otherwise the existing request in the plug list will be
2273 * issued. So the plug list will have one request at most
2274 * The plug list might get flushed before this. If that happens,
2275 * the plug list is empty, and same_queue_rq is invalid.
2277 if (list_empty(&plug->mq_list))
2281 plug->rq_count--;
2283 blk_add_rq_to_plug(plug, rq);
3968 if (current->plug)
3969 blk_flush_plug_list(current->plug, false);