Lines Matching refs:plug
876 * first place, directly accessing the plug instead of using
880 blk_flush_plug(current->plug, false);
1050 void blk_start_plug_nr_ios(struct blk_plug *plug, unsigned short nr_ios)
1055 * If this is a nested plug, don't actually assign it.
1057 if (tsk->plug)
1060 plug->mq_list = NULL;
1061 plug->cached_rq = NULL;
1062 plug->nr_ios = min_t(unsigned short, nr_ios, BLK_MAX_REQUEST_COUNT);
1063 plug->rq_count = 0;
1064 plug->multiple_queues = false;
1065 plug->has_elevator = false;
1066 INIT_LIST_HEAD(&plug->cb_list);
1072 tsk->plug = plug;
1077 * @plug: The &struct blk_plug that needs to be initialized
1095 * plug. By flushing the pending I/O when the process goes to sleep, we avoid
1098 void blk_start_plug(struct blk_plug *plug)
1100 blk_start_plug_nr_ios(plug, 1);
1104 static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
1108 while (!list_empty(&plug->cb_list)) {
1109 list_splice_init(&plug->cb_list, &callbacks);
1124 struct blk_plug *plug = current->plug;
1127 if (!plug)
1130 list_for_each_entry(cb, &plug->cb_list, list)
1140 list_add(&cb->list, &plug->cb_list);
1146 void __blk_flush_plug(struct blk_plug *plug, bool from_schedule)
1148 if (!list_empty(&plug->cb_list))
1149 flush_plug_callbacks(plug, from_schedule);
1150 blk_mq_flush_plug_list(plug, from_schedule);
1157 if (unlikely(!rq_list_empty(plug->cached_rq)))
1158 blk_mq_free_plug_rqs(plug);
1163 * @plug: The &struct blk_plug passed to blk_start_plug()
1171 void blk_finish_plug(struct blk_plug *plug)
1173 if (plug == current->plug) {
1174 __blk_flush_plug(plug, false);
1175 current->plug = NULL;