Lines Matching defs:bfqq

137 void bfq_mark_bfqq_##name(struct bfq_queue *bfqq)			\
139 __set_bit(BFQQF_##name, &(bfqq)->flags); \
141 void bfq_clear_bfqq_##name(struct bfq_queue *bfqq) \
143 __clear_bit(BFQQF_##name, &(bfqq)->flags); \
145 int bfq_bfqq_##name(const struct bfq_queue *bfqq) \
147 return test_bit(BFQQF_##name, &(bfqq)->flags); \
237 #define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 19)
245 #define BFQQ_TOTALLY_SEEKY(bfqq) (bfqq->seek_history == -1)
383 return bic->bfqq[1][actuator_idx];
385 return bic->bfqq[0][actuator_idx];
388 static void bfq_put_stable_ref(struct bfq_queue *bfqq);
391 struct bfq_queue *bfqq,
395 struct bfq_queue *old_bfqq = bic->bfqq[is_sync][actuator_idx];
398 * If bfqq != NULL, then a non-stable queue merge between
399 * bic->bfqq and bfqq is happening here. This causes troubles
400 * in the following case: bic->bfqq has also been scheduled
402 * and bic->stable_merge_bfqq == bfqq happens to
403 * hold. Troubles occur because bfqq may then undergo a split,
405 * bic->stable_merge_bfqq points exactly to bfqq, then bfqq
408 * bic->stable_merge_bfqq == bfqq.
412 /* Clear bic pointer if bfqq is detached from this bic */
417 bic->bfqq[1][actuator_idx] = bfqq;
419 bic->bfqq[0][actuator_idx] = bfqq;
421 if (bfqq && bfqq_data->stable_merge_bfqq == bfqq) {
484 #define bfq_class_idle(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
585 static bool bfqq_request_over_limit(struct bfq_queue *bfqq, int limit)
587 struct bfq_data *bfqd = bfqq->bfqd;
588 struct bfq_entity *entity = &bfqq->entity;
592 int class_idx = bfqq->ioprio_class - 1;
602 /* +1 for bfqq entity, root cgroup not included */
603 depth = bfqg_to_blkg(bfqq_group(bfqq))->blkcg->css.cgroup->level + 1;
639 * For bfqq itself we take into account service trees
655 bfq_log_bfqq(bfqq->bfqd, bfqq,
669 static bool bfqq_request_over_limit(struct bfq_queue *bfqq, int limit)
707 struct bfq_queue *bfqq =
716 if (bfqq && bfqq_request_over_limit(bfqq, limit)) {
733 struct bfq_queue *bfqq = NULL;
741 bfqq = rb_entry(parent, struct bfq_queue, pos_node);
747 if (sector > blk_rq_pos(bfqq->next_rq))
749 else if (sector < blk_rq_pos(bfqq->next_rq))
754 bfqq = NULL;
763 bfqq ? bfqq->pid : 0);
765 return bfqq;
768 static bool bfq_too_late_for_merging(struct bfq_queue *bfqq)
770 return bfqq->service_from_backlogged > 0 &&
771 time_is_before_jiffies(bfqq->first_IO_time +
784 bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
789 if (bfqq->pos_root) {
790 rb_erase(&bfqq->pos_node, bfqq->pos_root);
791 bfqq->pos_root = NULL;
795 if (bfqq == &bfqd->oom_bfqq)
799 * bfqq cannot be merged any longer (see comments in
800 * bfq_setup_cooperator): no point in adding bfqq into the
803 if (bfq_too_late_for_merging(bfqq))
806 if (bfq_class_idle(bfqq))
808 if (!bfqq->next_rq)
811 bfqq->pos_root = &bfqq_group(bfqq)->rq_pos_tree;
812 __bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root,
813 blk_rq_pos(bfqq->next_rq), &parent, &p);
815 rb_link_node(&bfqq->pos_node, parent, p);
816 rb_insert_color(&bfqq->pos_node, bfqq->pos_root);
818 bfqq->pos_root = NULL;
824 * or, as a special case, if bfqq must receive a share of the
826 * queue must receive. If bfqq does sync I/O, then these are the only
827 * two cases where bfqq happens to be guaranteed its share of the
828 * throughput even if I/O dispatching is not plugged when bfqq remains
855 struct bfq_queue *bfqq)
857 bool smallest_weight = bfqq &&
858 bfqq->weight_counter &&
859 bfqq->weight_counter ==
899 void bfq_weights_tree_add(struct bfq_queue *bfqq)
901 struct rb_root_cached *root = &bfqq->bfqd->queue_weights_tree;
902 struct bfq_entity *entity = &bfqq->entity;
918 if (bfqq->weight_counter)
928 bfqq->weight_counter = __counter;
939 bfqq->weight_counter = kzalloc(sizeof(struct bfq_weight_counter),
947 * bfqq's weight would have been the only weight making the
949 * however occur when bfqq becomes inactive again (the
952 * if !bfqq->weight_counter.
954 if (unlikely(!bfqq->weight_counter))
957 bfqq->weight_counter->weight = entity->weight;
958 rb_link_node(&bfqq->weight_counter->weights_node, parent, new);
959 rb_insert_color_cached(&bfqq->weight_counter->weights_node, root,
963 bfqq->weight_counter->num_active++;
964 bfqq->ref++;
973 void bfq_weights_tree_remove(struct bfq_queue *bfqq)
977 if (!bfqq->weight_counter)
980 root = &bfqq->bfqd->queue_weights_tree;
981 bfqq->weight_counter->num_active--;
982 if (bfqq->weight_counter->num_active > 0)
985 rb_erase_cached(&bfqq->weight_counter->weights_node, root);
986 kfree(bfqq->weight_counter);
989 bfqq->weight_counter = NULL;
990 bfq_put_queue(bfqq);
996 static struct request *bfq_check_fifo(struct bfq_queue *bfqq,
1001 if (bfq_bfqq_fifo_expire(bfqq))
1004 bfq_mark_bfqq_fifo_expire(bfqq);
1006 rq = rq_entry_fifo(bfqq->fifo.next);
1011 bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq);
1016 struct bfq_queue *bfqq,
1024 next = bfq_check_fifo(bfqq, last);
1034 rbnext = rb_first(&bfqq->sort_list);
1044 struct bfq_queue *bfqq)
1046 if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1 ||
1047 bfq_asymmetric_scenario(bfqq->bfqd, bfqq))
1056 * @bfqq: the queue to update.
1065 struct bfq_queue *bfqq)
1067 struct bfq_entity *entity = &bfqq->entity;
1068 struct request *next_rq = bfqq->next_rq;
1074 if (bfqq == bfqd->in_service_queue)
1082 max_t(unsigned long, bfqq->max_budget,
1083 bfq_serv_to_charge(next_rq, bfqq)),
1087 bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu",
1089 bfq_requeue_bfqq(bfqd, bfqq, false);
1124 static void switch_back_to_interactive_wr(struct bfq_queue *bfqq,
1127 bfqq->wr_coeff = bfqd->bfq_wr_coeff;
1128 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
1129 bfqq->last_wr_start_finish = bfqq->wr_start_at_switch_to_srt;
1133 bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
1137 bool busy = bfq_already_existing && bfq_bfqq_busy(bfqq);
1138 unsigned int a_idx = bfqq->actuator_idx;
1142 bfq_mark_bfqq_has_short_ttime(bfqq);
1144 bfq_clear_bfqq_has_short_ttime(bfqq);
1147 bfq_mark_bfqq_IO_bound(bfqq);
1149 bfq_clear_bfqq_IO_bound(bfqq);
1151 bfqq->last_serv_time_ns = bfqq_data->saved_last_serv_time_ns;
1152 bfqq->inject_limit = bfqq_data->saved_inject_limit;
1153 bfqq->decrease_time_jif = bfqq_data->saved_decrease_time_jif;
1155 bfqq->entity.new_weight = bfqq_data->saved_weight;
1156 bfqq->ttime = bfqq_data->saved_ttime;
1157 bfqq->io_start_time = bfqq_data->saved_io_start_time;
1158 bfqq->tot_idle_time = bfqq_data->saved_tot_idle_time;
1163 old_wr_coeff = bfqq->wr_coeff;
1164 bfqq->wr_coeff = bfqq_data->saved_wr_coeff;
1166 bfqq->service_from_wr = bfqq_data->saved_service_from_wr;
1167 bfqq->wr_start_at_switch_to_srt =
1169 bfqq->last_wr_start_finish = bfqq_data->saved_last_wr_start_finish;
1170 bfqq->wr_cur_max_time = bfqq_data->saved_wr_cur_max_time;
1172 if (bfqq->wr_coeff > 1 && (bfq_bfqq_in_large_burst(bfqq) ||
1173 time_is_before_jiffies(bfqq->last_wr_start_finish +
1174 bfqq->wr_cur_max_time))) {
1175 if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
1176 !bfq_bfqq_in_large_burst(bfqq) &&
1177 time_is_after_eq_jiffies(bfqq->wr_start_at_switch_to_srt +
1179 switch_back_to_interactive_wr(bfqq, bfqd);
1181 bfqq->wr_coeff = 1;
1182 bfq_log_bfqq(bfqq->bfqd, bfqq,
1188 bfqq->entity.prio_changed = 1;
1193 if (old_wr_coeff == 1 && bfqq->wr_coeff > 1)
1195 else if (old_wr_coeff > 1 && bfqq->wr_coeff == 1)
1199 static int bfqq_process_refs(struct bfq_queue *bfqq)
1201 return bfqq->ref - bfqq->entity.allocated -
1202 bfqq->entity.on_st_or_in_serv -
1203 (bfqq->weight_counter != NULL) - bfqq->stable_ref;
1206 /* Empty burst list and add just bfqq (see comments on bfq_handle_burst) */
1207 static void bfq_reset_burst_list(struct bfq_data *bfqd, struct bfq_queue *bfqq)
1221 hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
1226 bfqd->burst_parent_entity = bfqq->entity.parent;
1229 /* Add bfqq to the list of queues in current burst (see bfq_handle_burst) */
1230 static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
1232 /* Increment burst size to take into account also bfqq */
1252 bfq_mark_bfqq_in_large_burst(bfqq);
1265 * Burst not yet large: add bfqq to the burst list. Do
1266 * not increment the ref counter for bfqq, because bfqq
1267 * is removed from the burst list before freeing bfqq
1270 hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
1333 * same, possible burst bfqq would belong to), and it implements all
1382 static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
1385 * If bfqq is already in the burst list or is part of a large
1389 if (!hlist_unhashed(&bfqq->burst_list_node) ||
1390 bfq_bfqq_in_large_burst(bfqq) ||
1391 time_is_after_eq_jiffies(bfqq->split_time +
1396 * If bfqq's creation happens late enough, or bfqq belongs to
1401 * In this respect, consider the special case where bfqq is
1406 * following condition is true, bfqq will end up being
1408 * happen to contain only bfqq. And this is exactly what has
1409 * to happen, as bfqq may be the first queue of the first
1414 bfqq->entity.parent != bfqd->burst_parent_entity) {
1416 bfq_reset_burst_list(bfqd, bfqq);
1421 * If we get here, then bfqq is being activated shortly after the
1423 * bfqq as belonging to this large burst immediately.
1426 bfq_mark_bfqq_in_large_burst(bfqq);
1432 * reached, but bfqq is being activated shortly after the last
1433 * queue. Then we add bfqq to the burst.
1435 bfq_add_to_burst(bfqd, bfqq);
1438 * At this point, bfqq either has been added to the current
1441 * case, bfqq has become the first queue in the possible new
1448 static int bfq_bfqq_budget_left(struct bfq_queue *bfqq)
1450 struct bfq_entity *entity = &bfqq->entity;
1481 * The next function, invoked after the input queue bfqq switches from
1482 * idle to busy, updates the budget of bfqq. The function also tells
1484 * true. The purpose of expiring the in-service queue is to give bfqq
1489 * 1. Guarantee to bfqq its reserved bandwidth even if bfqq has
1490 * expired because it has remained idle. In particular, bfqq may have
1493 * - BFQQE_NO_MORE_REQUESTS bfqq did not enjoy any device idling
1497 * - BFQQE_TOO_IDLE bfqq did enjoy device idling, but did not issue
1500 * Even if bfqq has expired for one of the above reasons, the process
1502 * and thus be sensitive to the bandwidth it receives (bfqq may have
1503 * remained idle for other reasons: CPU high load, bfqq not enjoying
1506 * the above two reasons, bfqq has to wait for the service of at least
1508 * bfqq is likely to get a much lower bandwidth or resource time than
1512 * First, the budget and the timestamps of bfqq need to be updated in
1513 * a special way on bfqq reactivation: they need to be updated as if
1514 * bfqq did not remain idle and did not expire. In fact, if they are
1515 * computed as if bfqq expired and remained idle until reactivation,
1516 * then the process associated with bfqq is treated as if, instead of
1517 * being greedy, it stopped issuing requests when bfqq remained idle,
1520 * hole" between bfqq expiration and reactivation. As a consequence,
1523 * bfqq was not expired at all before this reactivation, i.e., it must
1524 * be set to the value of the remaining budget when bfqq was
1526 * value they had the last time bfqq was selected for service, i.e.,
1532 * queue must be expired too, to give bfqq the chance to preempt it
1533 * immediately. In fact, if bfqq has to wait for a full budget of the
1536 * timestamps of bfqq are lower than those of the in-service queue. If
1543 * The last important point is detecting whether bfqq does need this
1545 * process associated with bfqq greedy, and thus allows it to recover
1547 * request (which implies that bfqq expired for one of the above two
1557 * the process associated with bfqq recover a service hole, bfqq may
1560 * bfqq may have to be completed before the one of the in-service
1569 * rescheduled, and bfqq must be scheduled too. This is one of the
1585 struct bfq_queue *bfqq,
1588 struct bfq_entity *entity = &bfqq->entity;
1593 * trying to go on serving bfqq with this same budget: bfqq
1597 if (bfq_bfqq_non_blocking_wait_rq(bfqq) && arrived_in_time &&
1598 bfq_bfqq_budget_left(bfqq) > 0) {
1609 * on expiration if bfqq is empty (see
1617 bfq_bfqq_budget_left(bfqq),
1618 bfqq->max_budget);
1625 * because bfqq would otherwise be charged again for
1638 entity->budget = max_t(unsigned long, bfqq->max_budget,
1639 bfq_serv_to_charge(bfqq->next_rq, bfqq));
1640 bfq_clear_bfqq_non_blocking_wait_rq(bfqq);
1654 struct bfq_queue *bfqq,
1664 bfqq->service_from_wr = 0;
1665 bfqq->wr_coeff = bfqd->bfq_wr_coeff;
1666 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
1679 bfqq->wr_start_at_switch_to_srt =
1681 bfqq->wr_coeff = bfqd->bfq_wr_coeff *
1683 bfqq->wr_cur_max_time =
1689 * close to bfqq's backlog, so as to reduce the
1696 bfqq->entity.budget = min_t(unsigned long,
1697 bfqq->entity.budget,
1701 bfqq->wr_coeff = bfqd->bfq_wr_coeff;
1702 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
1704 bfqq->wr_coeff = 1;
1735 if (bfqq->wr_cur_max_time !=
1737 bfqq->wr_start_at_switch_to_srt =
1738 bfqq->last_wr_start_finish;
1740 bfqq->wr_cur_max_time =
1742 bfqq->wr_coeff = bfqd->bfq_wr_coeff *
1745 bfqq->last_wr_start_finish = jiffies;
1751 struct bfq_queue *bfqq)
1753 return bfqq->dispatched == 0 &&
1755 bfqq->budget_timeout +
1761 * Return true if bfqq is in a higher priority class, or has a higher
1764 static bool bfq_bfqq_higher_class_or_weight(struct bfq_queue *bfqq,
1769 if (bfqq->ioprio_class < in_serv_bfqq->ioprio_class)
1772 if (in_serv_bfqq->entity.parent == bfqq->entity.parent) {
1773 bfqq_weight = bfqq->entity.weight;
1776 if (bfqq->entity.parent)
1777 bfqq_weight = bfqq->entity.parent->weight;
1779 bfqq_weight = bfqq->entity.weight;
1816 static bool bfq_better_to_idle(struct bfq_queue *bfqq);
1819 struct bfq_queue *bfqq,
1826 idle_for_long_time = bfq_bfqq_idle_for_long_time(bfqd, bfqq),
1833 bfqq->ttime.last_end_request +
1837 bfqq->bic || RQ_BIC(rq)->bfqq_data[act_idx].stably_merged;
1840 * bfqq deserves to be weight-raised if:
1848 in_burst = bfq_bfqq_in_large_burst(bfqq);
1850 !BFQQ_TOTALLY_SEEKY(bfqq) &&
1852 time_is_before_jiffies(bfqq->soft_rt_next_start) &&
1853 bfqq->dispatched == 0 &&
1854 bfqq->entity.new_weight == 40;
1856 bfqq->entity.new_weight == 40;
1870 (bfqq->wr_coeff > 1 ||
1871 (bfq_bfqq_sync(bfqq) && bfqq_non_merged_or_stably_merged &&
1875 * Using the last flag, update budget and check whether bfqq
1879 bfq_bfqq_update_budg_for_activation(bfqd, bfqq,
1883 * If bfqq happened to be activated in a burst, but has been
1886 * I/O associated with bfqq is finished. So bfqq does not need
1888 * anymore. Accordingly, we reset bfqq's in_large_burst flag
1889 * if set, and remove bfqq from the burst list if it's
1891 * that bfqq does not need to belong to the burst list any
1892 * more does not invalidate the fact that bfqq was created in
1895 if (likely(!bfq_bfqq_just_created(bfqq)) &&
1898 bfqq->budget_timeout +
1900 hlist_del_init(&bfqq->burst_list_node);
1901 bfq_clear_bfqq_in_large_burst(bfqq);
1904 bfq_clear_bfqq_just_created(bfqq);
1907 if (unlikely(time_is_after_jiffies(bfqq->split_time)))
1909 bfqq->split_time =
1912 if (time_is_before_jiffies(bfqq->split_time +
1914 bfq_update_bfqq_wr_on_rq_arrival(bfqd, bfqq,
1921 if (old_wr_coeff != bfqq->wr_coeff)
1922 bfqq->entity.prio_changed = 1;
1926 bfqq->last_idle_bklogged = jiffies;
1927 bfqq->service_from_backlogged = 0;
1928 bfq_clear_bfqq_softrt_update(bfqq);
1930 bfq_add_bfqq_busy(bfqq);
1935 * explicitly about two cases. The first is that bfqq has to
1938 * bfqq_wants_to_preempt is true. However, if bfqq does not
1939 * carry time-critical I/O, then bfqq's bandwidth is less
1942 * bfqq is at least as weight-raised, i.e., at least as time
1945 * The second case is that bfqq is in a higher priority class,
1948 * bfqq does not start to be served immediately, the resulting
1949 * delay for bfqq's I/O is however lower or much lower than
1950 * the ideal completion time to be guaranteed to bfqq's I/O.
1953 * the timestamps of both bfqq and of the in-service queue,
1954 * bfqq actually is the next queue to serve. So, to reduce
1958 * simple, necessary condition for bfqq to be the next queue
1973 * (2) this switch of bfqq to busy changes the scenario.
1977 bfqq->wr_coeff >= bfqd->in_service_queue->wr_coeff) ||
1978 bfq_bfqq_higher_class_or_weight(bfqq, bfqd->in_service_queue) ||
1986 struct bfq_queue *bfqq)
1989 bfqq->last_serv_time_ns = 0;
1998 * If bfqq has a short think time, then start by setting the
2001 * of bfqq, and therefore, if one request was injected when
2002 * bfqq remains empty, this injected request might delay the
2003 * service of the next I/O request for bfqq significantly. In
2004 * case bfqq can actually tolerate some injection, then the
2006 * lucky circumstance holds exactly because bfqq has a short
2015 * left to 1 even if the think time is short: bfqq's I/O is
2016 * synchronized with that of some other queue, i.e., bfqq may
2019 * blocking I/O to be served while bfqq is in service. And
2020 * this is very convenient both for bfqq and for overall
2024 * On the opposite end, if bfqq has a long think time, then
2028 * latency of bfqq's requests, as the service time of a single
2029 * request is likely to be lower than the think time of bfqq;
2030 * b) on the downside, after becoming empty, bfqq is likely to
2037 * occurs with bfqq. On the downside, this proactive step
2043 if (bfq_bfqq_has_short_ttime(bfqq))
2044 bfqq->inject_limit = 0;
2046 bfqq->inject_limit = 1;
2048 bfqq->decrease_time_jif = jiffies;
2051 static void bfq_update_io_intensity(struct bfq_queue *bfqq, u64 now_ns)
2053 u64 tot_io_time = now_ns - bfqq->io_start_time;
2055 if (RB_EMPTY_ROOT(&bfqq->sort_list) && bfqq->dispatched == 0)
2056 bfqq->tot_idle_time +=
2057 now_ns - bfqq->ttime.last_end_request;
2059 if (unlikely(bfq_bfqq_just_created(bfqq)))
2066 if (bfqq->tot_idle_time * 5 > tot_io_time)
2067 bfq_clear_bfqq_IO_bound(bfqq);
2069 bfq_mark_bfqq_IO_bound(bfqq);
2076 bfqq->io_start_time = now_ns - (tot_io_time>>1);
2077 bfqq->tot_idle_time >>= 1;
2082 * Detect whether bfqq's I/O seems synchronized with that of some
2083 * other queue, i.e., whether bfqq, after remaining empty, happens to
2086 * we assume, for simplicity, that bfqq may have at most one waker
2092 * plugged for bfqq. In addition to boosting throughput, this
2093 * unblocks bfqq's I/O, thereby improving bandwidth and latency for
2094 * bfqq. Note that these same results may be achieved with the general
2100 * waker queue for bfqq if, for three consecutive times, bfqq happens to become
2102 * timeout. In this respect, even if bfqq is empty, we do not check for a waker
2103 * if it still has some in-flight I/O. In fact, in this case bfqq is actually
2108 * is a waker queue for bfqq. These detection steps are performed only if bfqq
2109 * has a long think time, so as to make it more likely that bfqq's I/O is
2119 * reasons. While blocked by synchronization, bfqq has a long think
2120 * time. This implies that bfqq's inject limit is at least equal to 1
2123 * first I/O-plugging time interval for bfqq. This triggers the first
2126 * during the next I/O-plugging interval for bfqq.
2132 static void bfq_check_waker(struct bfq_data *bfqd, struct bfq_queue *bfqq,
2138 bfqd->last_completed_rq_bfqq == bfqq ||
2139 bfq_bfqq_has_short_ttime(bfqq) ||
2142 bfqq == &bfqd->oom_bfqq)
2152 bfqq->tentative_waker_bfqq ||
2153 now_ns > bfqq->waker_detection_started +
2160 bfqq->tentative_waker_bfqq =
2162 bfqq->num_waker_detections = 1;
2163 bfqq->waker_detection_started = now_ns;
2164 bfq_bfqq_name(bfqq->tentative_waker_bfqq, waker_name,
2166 bfq_log_bfqq(bfqd, bfqq, "set tentative waker %s", waker_name);
2168 bfqq->num_waker_detections++;
2170 if (bfqq->num_waker_detections == 3) {
2171 bfqq->waker_bfqq = bfqd->last_completed_rq_bfqq;
2172 bfqq->tentative_waker_bfqq = NULL;
2173 bfq_bfqq_name(bfqq->waker_bfqq, waker_name,
2175 bfq_log_bfqq(bfqd, bfqq, "set waker %s", waker_name);
2179 * bfqq->waker_bfqq must be reset. To
2189 * In addition, if bfqq is already in
2193 * queue, bfqq must be removed from
2197 if (!hlist_unhashed(&bfqq->woken_list_node))
2198 hlist_del_init(&bfqq->woken_list_node);
2199 hlist_add_head(&bfqq->woken_list_node,
2206 struct bfq_queue *bfqq = RQ_BFQQ(rq);
2207 struct bfq_data *bfqd = bfqq->bfqd;
2209 unsigned int old_wr_coeff = bfqq->wr_coeff;
2213 bfq_log_bfqq(bfqd, bfqq, "add_request %d", rq_is_sync(rq));
2214 bfqq->queued[rq_is_sync(rq)]++;
2221 if (bfq_bfqq_sync(bfqq) && RQ_BIC(rq)->requests <= 1) {
2222 bfq_check_waker(bfqd, bfqq, now_ns);
2230 if (time_is_before_eq_jiffies(bfqq->decrease_time_jif +
2232 bfq_reset_inject_limit(bfqd, bfqq);
2238 * - bfqq is in service, because the total service
2259 if (bfqq == bfqd->in_service_queue &&
2261 (bfqq->last_serv_time_ns > 0 &&
2263 time_is_before_eq_jiffies(bfqq->decrease_time_jif +
2284 * on bfqq before rq is completed).
2291 if (bfq_bfqq_sync(bfqq))
2292 bfq_update_io_intensity(bfqq, now_ns);
2294 elv_rb_add(&bfqq->sort_list, rq);
2299 prev = bfqq->next_rq;
2300 next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position);
2301 bfqq->next_rq = next_rq;
2307 if (unlikely(!bfqd->nonrot_with_queueing && prev != bfqq->next_rq))
2308 bfq_pos_tree_add_move(bfqd, bfqq);
2310 if (!bfq_bfqq_busy(bfqq)) /* switching to busy ... */
2311 bfq_bfqq_handle_idle_busy_switch(bfqd, bfqq, old_wr_coeff,
2316 bfqq->last_wr_start_finish +
2318 bfqq->wr_coeff = bfqd->bfq_wr_coeff;
2319 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
2322 bfqq->entity.prio_changed = 1;
2324 if (prev != bfqq->next_rq)
2325 bfq_updated_next_req(bfqd, bfqq);
2332 * . if bfqq is not going to be weight-raised, because, for
2338 * . if bfqq is not weight-raised, because, if bfqq is now
2342 * . if bfqq is interactive, because, regardless of whether
2343 * bfqq is currently weight-raised, the weight-raising
2346 * conditions, if bfqq is already weight-raised)
2348 * last_wr_start_finish has to be updated also if bfqq is soft
2355 (old_wr_coeff == 1 || bfqq->wr_coeff == 1 || interactive))
2356 bfqq->last_wr_start_finish = jiffies;
2363 struct bfq_queue *bfqq = bfqd->bio_bfqq;
2366 if (bfqq)
2367 return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio));
2383 struct bfq_queue *bfqq = RQ_BFQQ(rq);
2384 struct bfq_data *bfqd = bfqq->bfqd;
2387 if (bfqq->next_rq == rq) {
2388 bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq);
2389 bfq_updated_next_req(bfqd, bfqq);
2394 bfqq->queued[sync]--;
2400 elv_rb_del(&bfqq->sort_list, rq);
2406 if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
2407 bfqq->next_rq = NULL;
2409 if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue) {
2410 bfq_del_bfqq_busy(bfqq, false);
2412 * bfqq emptied. In normal operation, when
2413 * bfqq is empty, bfqq->entity.service and
2414 * bfqq->entity.budget must contain,
2416 * budget used last time bfqq emptied. These
2418 * this last removal occurred while bfqq is
2420 * reset both bfqq->entity.service and
2421 * bfqq->entity.budget, if bfqq has still a
2424 bfqq->entity.budget = bfqq->entity.service = 0;
2430 if (bfqq->pos_root) {
2431 rb_erase(&bfqq->pos_node, bfqq->pos_root);
2432 bfqq->pos_root = NULL;
2437 bfq_pos_tree_add_move(bfqd, bfqq);
2441 bfqq->meta_pending--;
2511 struct bfq_queue *bfqq = RQ_BFQQ(req);
2515 if (!bfqq)
2518 bfqd = bfqq->bfqd;
2521 elv_rb_del(&bfqq->sort_list, req);
2522 elv_rb_add(&bfqq->sort_list, req);
2524 /* Choose next request to be served for bfqq */
2525 prev = bfqq->next_rq;
2526 next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req,
2528 bfqq->next_rq = next_rq;
2534 if (prev != bfqq->next_rq) {
2535 bfq_updated_next_req(bfqd, bfqq);
2541 bfq_pos_tree_add_move(bfqd, bfqq);
2563 struct bfq_queue *bfqq = RQ_BFQQ(rq),
2566 if (!bfqq)
2578 if (bfqq == next_bfqq &&
2586 if (bfqq->next_rq == next)
2587 bfqq->next_rq = rq;
2589 bfqg_stats_update_io_merged(bfqq_group(bfqq), next->cmd_flags);
2600 /* Must be called with bfqq != NULL */
2601 static void bfq_bfqq_end_wr(struct bfq_queue *bfqq)
2604 * If bfqq has been enjoying interactive weight-raising, then
2606 * reason. bfqq may have been conveying the I/O needed to load
2609 * loading, and finally starts doing its job. But, if bfqq has
2612 * high value that. So, without this reset, bfqq would be
2617 if (bfqq->wr_cur_max_time !=
2618 bfqq->bfqd->bfq_wr_rt_max_time)
2619 bfqq->soft_rt_next_start = jiffies;
2621 if (bfq_bfqq_busy(bfqq))
2622 bfqq->bfqd->wr_busy_queues--;
2623 bfqq->wr_coeff = 1;
2624 bfqq->wr_cur_max_time = 0;
2625 bfqq->last_wr_start_finish = jiffies;
2630 bfqq->entity.prio_changed = 1;
2650 struct bfq_queue *bfqq;
2656 list_for_each_entry(bfqq, &bfqd->active_list[i], bfqq_list)
2657 bfq_bfqq_end_wr(bfqq);
2659 list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list)
2660 bfq_bfqq_end_wr(bfqq);
2682 struct bfq_queue *bfqq,
2685 struct rb_root *root = &bfqq_group(bfqq)->rq_pos_tree;
2727 struct bfq_queue *bfqq;
2736 bfqq = bfqq_find_close(bfqd, cur_bfqq, sector);
2737 if (!bfqq || bfqq == cur_bfqq)
2740 return bfqq;
2744 bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
2751 * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain
2760 if (__bfqq == bfqq)
2765 process_refs = bfqq_process_refs(bfqq);
2768 * If the process for the bfqq has gone away, there is no
2779 if (new_bfqq->entity.parent != bfqq->entity.parent)
2782 bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d",
2792 * We redirect bfqq to new_bfqq and not the opposite, because
2793 * we are in the context of the process owning bfqq, thus we
2805 bfqq->new_bfqq = new_bfqq;
2808 * each time some I/O for bfqq arrives, the process that
2809 * generated that I/O is disassociated from bfqq and
2819 static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq,
2825 if (bfq_class_idle(bfqq) || bfq_class_idle(new_bfqq) ||
2826 (bfqq->ioprio_class != new_bfqq->ioprio_class))
2834 if (BFQQ_SEEKY(bfqq) || BFQQ_SEEKY(new_bfqq))
2842 if (!bfq_bfqq_sync(bfqq) || !bfq_bfqq_sync(new_bfqq))
2849 struct bfq_queue *bfqq);
2852 bfq_setup_stable_merge(struct bfq_data *bfqd, struct bfq_queue *bfqq,
2856 int proc_ref = min(bfqq_process_refs(bfqq),
2861 if (idling_boosts_thr_without_issues(bfqd, bfqq) || proc_ref == 0)
2865 new_bfqq = bfq_setup_merge(bfqq, stable_merge_bfqq);
2886 * Attempt to schedule a merge of bfqq with the currently in-service
2906 bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
2910 unsigned int a_idx = bfqq->actuator_idx;
2914 if (bfqq->new_bfqq)
2915 return bfqq->new_bfqq;
2919 * devs. For this branch to be executed, bfqq must not be
2920 * currently merged with some other queue (i.e., bfqq->bic
2922 * then we should also check whether bfqq has already been
2928 * Make sure also that bfqq is sync, because
2931 * sync queue, but this bfqq is async
2933 if (bfq_bfqq_sync(bfqq) && bfqq_data->stable_merge_bfqq &&
2934 !bfq_bfqq_just_created(bfqq) &&
2935 time_is_before_jiffies(bfqq->split_time +
2937 time_is_before_jiffies(bfqq->creation_time +
2942 return bfq_setup_stable_merge(bfqd, bfqq,
2989 * Prevent bfqq from being merged if it has been created too
2999 if (bfq_too_late_for_merging(bfqq))
3002 if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq))
3011 if (in_service_bfqq && in_service_bfqq != bfqq &&
3015 bfqq->entity.parent == in_service_bfqq->entity.parent &&
3016 bfq_may_be_close_cooperator(bfqq, in_service_bfqq)) {
3017 new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq);
3026 new_bfqq = bfq_find_close_cooperator(bfqd, bfqq,
3030 bfq_may_be_close_cooperator(bfqq, new_bfqq))
3031 return bfq_setup_merge(bfqq, new_bfqq);
3036 static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
3038 struct bfq_io_cq *bic = bfqq->bic;
3039 unsigned int a_idx = bfqq->actuator_idx;
3043 * If !bfqq->bic, the queue is already shared or its requests
3050 bfqq_data->saved_last_serv_time_ns = bfqq->last_serv_time_ns;
3051 bfqq_data->saved_inject_limit = bfqq->inject_limit;
3052 bfqq_data->saved_decrease_time_jif = bfqq->decrease_time_jif;
3054 bfqq_data->saved_weight = bfqq->entity.orig_weight;
3055 bfqq_data->saved_ttime = bfqq->ttime;
3057 bfq_bfqq_has_short_ttime(bfqq);
3058 bfqq_data->saved_IO_bound = bfq_bfqq_IO_bound(bfqq);
3059 bfqq_data->saved_io_start_time = bfqq->io_start_time;
3060 bfqq_data->saved_tot_idle_time = bfqq->tot_idle_time;
3061 bfqq_data->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq);
3063 !hlist_unhashed(&bfqq->burst_list_node);
3065 if (unlikely(bfq_bfqq_just_created(bfqq) &&
3066 !bfq_bfqq_in_large_burst(bfqq) &&
3067 bfqq->bfqd->low_latency)) {
3069 * bfqq being merged right after being created: bfqq
3074 * to bfqq, so that to avoid that bfqq unjustly fails
3077 bfqq_data->saved_wr_coeff = bfqq->bfqd->bfq_wr_coeff;
3081 bfq_wr_duration(bfqq->bfqd);
3084 bfqq_data->saved_wr_coeff = bfqq->wr_coeff;
3086 bfqq->wr_start_at_switch_to_srt;
3088 bfqq->service_from_wr;
3090 bfqq->last_wr_start_finish;
3091 bfqq_data->saved_wr_cur_max_time = bfqq->wr_cur_max_time;
3106 void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq)
3109 * To prevent bfqq's service guarantees from being violated,
3110 * bfqq may be left busy, i.e., queued for service, even if
3112 * details). But, if no process will send requests to bfqq any
3113 * longer, then there is no point in keeping bfqq queued for
3114 * service. In addition, keeping bfqq queued for service, but
3115 * with no process ref any longer, may have caused bfqq to be
3119 if (bfq_bfqq_busy(bfqq) && RB_EMPTY_ROOT(&bfqq->sort_list) &&
3120 bfqq != bfqd->in_service_queue)
3121 bfq_del_bfqq_busy(bfqq, false);
3123 bfq_reassign_last_bfqq(bfqq, NULL);
3125 bfq_put_queue(bfqq);
3130 struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
3132 bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
3135 bfq_bfqq_save_state(bfqq);
3137 if (bfq_bfqq_IO_bound(bfqq))
3139 bfq_clear_bfqq_IO_bound(bfqq);
3142 * The processes associated with bfqq are cooperators of the
3143 * processes associated with new_bfqq. So, if bfqq has a
3145 * to let bfqq's waker freely inject I/O when they have no
3148 if (bfqq->waker_bfqq && !new_bfqq->waker_bfqq &&
3149 bfqq->waker_bfqq != new_bfqq) {
3150 new_bfqq->waker_bfqq = bfqq->waker_bfqq;
3165 * If bfqq is weight-raised, then let new_bfqq inherit
3167 * where bfqq has just been created, but has not yet made it
3169 * bfqq even before bfq_add_request is executed for the first
3170 * time for bfqq). Handling this case would however be very
3173 if (new_bfqq->wr_coeff == 1 && bfqq->wr_coeff > 1) {
3174 new_bfqq->wr_coeff = bfqq->wr_coeff;
3175 new_bfqq->wr_cur_max_time = bfqq->wr_cur_max_time;
3176 new_bfqq->last_wr_start_finish = bfqq->last_wr_start_finish;
3178 bfqq->wr_start_at_switch_to_srt;
3184 if (bfqq->wr_coeff > 1) { /* bfqq has given its wr to new_bfqq */
3185 bfqq->wr_coeff = 1;
3186 bfqq->entity.prio_changed = 1;
3187 if (bfq_bfqq_busy(bfqq))
3197 bic_set_bfqq(bic, new_bfqq, true, bfqq->actuator_idx);
3201 * set new_bfqq->bic to NULL. bfqq either:
3202 * - does not belong to any bic any more, and hence bfqq->bic must
3206 * any bic soon and bfqq->bic is already NULL (therefore the next
3220 bfqq->bic = NULL;
3222 bfq_reassign_last_bfqq(bfqq, new_bfqq);
3224 bfq_release_process_ref(bfqd, bfqq);
3232 struct bfq_queue *bfqq = bfqd->bio_bfqq, *new_bfqq;
3241 * Lookup the bfqq that this bio will be queued with. Allow
3244 if (!bfqq)
3251 new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false, bfqd->bio_bic);
3254 * bic still points to bfqq, then it has not yet been
3256 * merge between bfqq and new_bfqq can be safely
3258 * and bfqq can be put.
3260 bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq,
3267 bfqq = new_bfqq;
3275 bfqd->bio_bfqq = bfqq;
3278 return bfqq == RQ_BFQQ(rq);
3288 struct bfq_queue *bfqq)
3292 if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time)
3295 timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight;
3299 bfqq->budget_timeout = jiffies +
3304 struct bfq_queue *bfqq)
3306 if (bfqq) {
3307 bfq_clear_bfqq_fifo_expire(bfqq);
3311 if (time_is_before_jiffies(bfqq->last_wr_start_finish) &&
3312 bfqq->wr_coeff > 1 &&
3313 bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
3314 time_is_before_jiffies(bfqq->budget_timeout)) {
3335 * budget_timeout is set to jiffies if bfqq
3339 if (time_after(bfqq->budget_timeout,
3340 bfqq->last_wr_start_finish))
3341 bfqq->last_wr_start_finish +=
3342 jiffies - bfqq->budget_timeout;
3344 bfqq->last_wr_start_finish = jiffies;
3347 bfq_set_budget_timeout(bfqd, bfqq);
3348 bfq_log_bfqq(bfqd, bfqq,
3350 bfqq->entity.budget);
3353 bfqd->in_service_queue = bfqq;
3362 struct bfq_queue *bfqq = bfq_get_next_queue(bfqd);
3364 __bfq_set_in_service_queue(bfqd, bfqq);
3365 return bfqq;
3370 struct bfq_queue *bfqq = bfqd->in_service_queue;
3373 bfq_mark_bfqq_wait_request(bfqq);
3391 if (BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 &&
3392 !bfq_asymmetric_scenario(bfqd, bfqq))
3394 else if (bfqq->wr_coeff > 1)
3402 bfqg_stats_set_start_idle_time(bfqq_group(bfqq));
3655 struct bfq_queue *bfqq = RQ_BFQQ(rq);
3663 * dispatch occur for a non in-service bfqq, this anticipated
3664 * increment prevents two counters related to bfqq->dispatched
3666 * incremented again when the (new) value of bfqq->dispatched
3669 bfqq->dispatched++;
3678 * the process associated with bfqq.
3700 * associated with bfqq must receive a lower or equal
3713 * bfqq.
3717 * that bfqq receives its assigned fraction of the device throughput
3723 * queueing. So, unless bfqq falls in cases where idling also boosts
3755 * conditions (i-a), (i-b) or (ii). So the device is idled for bfqq.
3772 * Not checking condition (ii) evidently exposes bfqq to the
3824 * bfq_weights_tree_add()). Then the fact that bfqq is weight-raised
3826 * below takes into account also the fact that, even if bfqq is being
3835 * belonging to bfqq. If so, I/O dispatching is to be plugged, for the
3837 * non-bfqq's I/O requests before bfqq's ones, thereby delaying the
3838 * arrival of new I/O requests for bfqq (recall that bfqq is sync). If
3839 * I/O-dispatching is not plugged, then, while bfqq remains empty, a
3841 * dispatched too, possibly causing the service of bfqq's I/O to be
3847 * in-flight I/O, and enables bfqq to recover the bandwidth it may
3863 * asymmetric: the case where only bfqq is busy. In this case, not
3864 * expiring bfqq does not cause any harm to any other queues in terms
3866 * sequence of events: (1) bfqq is expired, (2) a new queue with a
3867 * lower weight than bfqq becomes busy (or more queues), (3) the new
3868 * queue is served until a new request arrives for bfqq, (4) when bfqq
3870 * the drive that the pending requests for bfqq take a lot of time to
3872 * dispatched requests of bfqq to be delayed, inside the drive. So, to
3874 * as asymmetric also if bfqq is the only busy queues
3877 struct bfq_queue *bfqq)
3881 /* No point in idling for bfqq if it won't get requests any longer */
3882 if (unlikely(!bfqq_process_refs(bfqq)))
3885 return (bfqq->wr_coeff > 1 &&
3887 bfqd->tot_rq_in_driver >= bfqq->dispatched + 4)) ||
3888 bfq_asymmetric_scenario(bfqd, bfqq) ||
3892 static bool __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq,
3896 * If this bfqq is shared between multiple processes, check
3901 if (bfq_bfqq_coop(bfqq) && BFQQ_SEEKY(bfqq))
3902 bfq_mark_bfqq_split_coop(bfqq);
3906 * bfqq. If idling_needed_for_service_guarantees(bfqq) returns
3907 * true, then bfqq's bandwidth would be violated if an
3909 * dispatched while bfqq is waiting for its new I/O to
3911 * expiration caused by a preemption attempt, and if bfqq is
3913 * bfqq if it needs I/O-dispatch plugging, even if it is
3914 * empty. By doing so, bfqq is granted to be served before the
3915 * above queues (provided that bfqq is of course eligible).
3917 if (RB_EMPTY_ROOT(&bfqq->sort_list) &&
3919 idling_needed_for_service_guarantees(bfqd, bfqq))) {
3920 if (bfqq->dispatched == 0)
3927 bfqq->budget_timeout = jiffies;
3929 bfq_del_bfqq_busy(bfqq, true);
3931 bfq_requeue_bfqq(bfqd, bfqq, true);
3937 !RB_EMPTY_ROOT(&bfqq->sort_list)))
3938 bfq_pos_tree_add_move(bfqd, bfqq);
3945 * may cause bfqq to be freed. If this happens, the next
3952 * __bfq_bfqq_recalc_budget - try to adapt the budget to the @bfqq behavior.
3954 * @bfqq: queue to update.
3957 * Handle the feedback on @bfqq budget at queue expiration.
3961 struct bfq_queue *bfqq,
3969 if (bfqq->wr_coeff == 1)
3970 budget = bfqq->max_budget;
3979 bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %d, budg left %d",
3980 bfqq->entity.budget, bfq_bfqq_budget_left(bfqq));
3981 bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %d, min budg %d",
3983 bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d",
3984 bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue));
3986 if (bfq_bfqq_sync(bfqq) && bfqq->wr_coeff == 1) {
4017 if (bfqq->dispatched > 0) /* still outstanding reqs */
4071 * The service needed by bfqq is measured
4072 * quite precisely by bfqq->entity.service.
4073 * Since bfqq does not enjoy device idling,
4074 * bfqq->entity.service is equal to the number
4076 * bfqq requested to read/write before waiting
4080 budget = max_t(int, bfqq->entity.service, min_budget);
4085 } else if (!bfq_bfqq_sync(bfqq)) {
4095 bfqq->max_budget = budget;
4099 bfqq->max_budget = min(bfqq->max_budget, bfqd->bfq_max_budget);
4104 * the finish time of bfqq must be kept in sync with the
4111 next_rq = bfqq->next_rq;
4113 bfqq->entity.budget = max_t(unsigned long, bfqq->max_budget,
4114 bfq_serv_to_charge(next_rq, bfqq));
4116 bfq_log_bfqq(bfqd, bfqq, "head sect: %u, new budget %d",
4118 bfqq->entity.budget);
4122 * Return true if the process associated with bfqq is "slow". The slow
4152 static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq,
4157 bool slow = BFQQ_SEEKY(bfqq); /* if delta too short, use seekyness */
4159 if (!bfq_bfqq_sync(bfqq))
4200 slow = bfqq->entity.service < bfqd->bfq_max_budget / 2;
4203 bfq_log_bfqq(bfqd, bfqq, "bfq_bfqq_is_slow: slow %d", slow);
4257 * (b) Current value of bfqq->soft_rt_next_start. As pointed out
4271 * stored in bfqq->soft_rt_next_start after each invocation of
4273 * bfqq->soft_rt_next_start is constantly used to lower-bound the
4275 * beginning of a low-speed interval, bfqq->soft_rt_next_start is
4302 struct bfq_queue *bfqq)
4304 return max3(bfqq->soft_rt_next_start,
4305 bfqq->last_idle_bklogged +
4306 HZ * bfqq->service_from_backlogged /
4308 jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4);
4314 * @bfqq: the queue to expire.
4318 * If the process associated with bfqq does slow I/O (e.g., because it
4319 * issues random requests), we charge bfqq with the time it has been
4322 * a consequence, bfqq will typically get higher timestamps upon
4325 * end, bfqq receives less service in proportion to how slowly its
4329 * contrast, if the process associated with bfqq is not slow, we
4330 * charge bfqq exactly with the service it has received.
4338 struct bfq_queue *bfqq,
4344 struct bfq_entity *entity = &bfqq->entity;
4349 slow = bfq_bfqq_is_slow(bfqd, bfqq, compensate, &delta);
4366 if (bfqq->wr_coeff == 1 &&
4369 bfq_bfqq_budget_left(bfqq) >= entity->budget / 3)))
4370 bfq_bfqq_charge_time(bfqd, bfqq, delta);
4372 if (bfqd->low_latency && bfqq->wr_coeff == 1)
4373 bfqq->last_wr_start_finish = jiffies;
4376 RB_EMPTY_ROOT(&bfqq->sort_list)) {
4389 if (bfqq->dispatched == 0)
4390 bfqq->soft_rt_next_start =
4391 bfq_bfqq_softrt_next_start(bfqd, bfqq);
4392 else if (bfqq->dispatched > 0) {
4397 bfq_mark_bfqq_softrt_update(bfqq);
4401 bfq_log_bfqq(bfqd, bfqq,
4403 slow, bfqq->dispatched, bfq_bfqq_has_short_ttime(bfqq));
4406 * bfqq expired, so no total service time needs to be computed
4417 __bfq_bfqq_recalc_budget(bfqd, bfqq, reason);
4418 if (__bfq_bfqq_expire(bfqd, bfqq, reason))
4419 /* bfqq is gone, no more actions on it */
4422 /* mark bfqq as waiting a request only if a bic still points to it */
4423 if (!bfq_bfqq_busy(bfqq) &&
4426 bfq_mark_bfqq_non_blocking_wait_rq(bfqq);
4437 * Differently from what happens with bfqq->entity.service,
4439 * for parent entities. In fact, in case bfqq may have a
4441 * consumed budget, bfqq->entity.service needs to be kept,
4442 * because if bfqq then actually goes on being served using
4443 * the same budget, the last value of bfqq->entity.service is
4444 * needed to properly decrement bfqq->entity.budget by the
4447 * the bubble up of the new value of bfqq->entity.budget will
4449 * even in case bfqq and thus parent entities go on receiving
4462 static bool bfq_bfqq_budget_timeout(struct bfq_queue *bfqq)
4464 return time_is_before_eq_jiffies(bfqq->budget_timeout);
4475 static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
4477 bfq_log_bfqq(bfqq->bfqd, bfqq,
4479 bfq_bfqq_wait_request(bfqq),
4480 bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3,
4481 bfq_bfqq_budget_timeout(bfqq));
4483 return (!bfq_bfqq_wait_request(bfqq) ||
4484 bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3)
4486 bfq_bfqq_budget_timeout(bfqq);
4490 struct bfq_queue *bfqq)
4497 /* No point in idling for bfqq if it won't get requests any longer */
4498 if (unlikely(!bfqq_process_refs(bfqq)))
4501 bfqq_sequential_and_IO_bound = !BFQQ_SEEKY(bfqq) &&
4502 bfq_bfqq_IO_bound(bfqq) && bfq_bfqq_has_short_ttime(bfqq);
4512 * the request pattern for bfqq is I/O-bound and sequential, or
4514 * not NCQ-capable and the request pattern for bfqq is
4550 * bfqq is not weight-raised, this guarantees that the device
4551 * is not idled for bfqq (if, instead, bfqq is weight-raised,
4554 * [1] for details), this behavior causes bfqq, and hence any
4589 static bool bfq_better_to_idle(struct bfq_queue *bfqq)
4591 struct bfq_data *bfqd = bfqq->bfqd;
4594 /* No point in idling for bfqq if it won't get requests any longer */
4595 if (unlikely(!bfqq_process_refs(bfqq)))
4604 * (a) bfqq is async
4605 * (b) bfqq is in the idle io prio class: in this case we do
4609 if (bfqd->bfq_slice_idle == 0 || !bfq_bfqq_sync(bfqq) ||
4610 bfq_class_idle(bfqq))
4614 idling_boosts_thr_without_issues(bfqd, bfqq);
4617 idling_needed_for_service_guarantees(bfqd, bfqq);
4640 static bool bfq_bfqq_must_idle(struct bfq_queue *bfqq)
4642 return RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_better_to_idle(bfqq);
4655 struct bfq_queue *bfqq, *in_serv_bfqq = bfqd->in_service_queue;
4661 * - bfqq is not weight-raised and therefore does not carry
4664 * - regardless of whether bfqq is weight-raised, bfqq has
4706 list_for_each_entry(bfqq, &bfqd->active_list[i], bfqq_list)
4707 if (!RB_EMPTY_ROOT(&bfqq->sort_list) &&
4708 (in_serv_always_inject || bfqq->wr_coeff > 1) &&
4709 bfq_serv_to_charge(bfqq->next_rq, bfqq) <=
4710 bfq_bfqq_budget_left(bfqq)) {
4722 * long to break bfqq's service guarantees. On
4729 blk_rq_sectors(bfqq->next_rq) >=
4735 return bfqq;
4746 struct bfq_queue *bfqq;
4752 list_for_each_entry(bfqq, &bfqd->active_list[idx], bfqq_list) {
4753 if (!RB_EMPTY_ROOT(&bfqq->sort_list) &&
4754 bfq_serv_to_charge(bfqq->next_rq, bfqq) <=
4755 bfq_bfqq_budget_left(bfqq)) {
4756 return bfqq;
4785 struct bfq_queue *bfqq =
4788 if (bfqq)
4789 return bfqq;
4803 struct bfq_queue *bfqq, *inject_bfqq;
4807 bfqq = bfqd->in_service_queue;
4808 if (!bfqq)
4811 bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue");
4814 * Do not expire bfqq for budget timeout if bfqq may be about
4816 * prevent bfqq from expiring is the same as in the comments
4820 if (bfq_may_expire_for_budg_timeout(bfqq) &&
4821 !bfq_bfqq_must_idle(bfqq))
4831 if (inject_bfqq && inject_bfqq != bfqq)
4840 next_rq = bfqq->next_rq;
4842 * If bfqq has requests queued and it has enough budget left to
4846 if (bfq_serv_to_charge(next_rq, bfqq) >
4847 bfq_bfqq_budget_left(bfqq)) {
4862 if (bfq_bfqq_wait_request(bfqq)) {
4876 bfq_clear_bfqq_wait_request(bfqq);
4891 if (bfq_bfqq_wait_request(bfqq) ||
4892 (bfqq->dispatched != 0 && bfq_better_to_idle(bfqq))) {
4893 unsigned int act_idx = bfqq->actuator_idx;
4896 !hlist_empty(&bfqq->woken_list) ?
4897 container_of(bfqq->woken_list.first,
4902 if (bfqq->bic && bfqq->bic->bfqq[0][act_idx] &&
4903 bfq_bfqq_busy(bfqq->bic->bfqq[0][act_idx]) &&
4904 bfqq->bic->bfqq[0][act_idx]->next_rq)
4905 async_bfqq = bfqq->bic->bfqq[0][act_idx];
4912 * with bfqq has also async I/O pending. If so, it
4919 * non-empty waker queue for bfqq, i.e., a queue whose
4920 * I/O needs to be completed for bfqq to receive new
4921 * I/O. This happens, e.g., if bfqq is associated with
4924 * the process associated with bfqq can go on with its
4926 * then bfqq remains empty, and no I/O is dispatched,
4927 * until the idle timeout fires for bfqq. This is
4929 * latencies for bfqq, and in a severe loss of total
4936 * cause any delay to bfqq's I/O. On the contrary,
4937 * next bfqq's I/O is brought forward dramatically,
4941 * by bfqq, and currently with pending I/O. Such a
4942 * woken queue does not steal bandwidth from bfqq,
4943 * because it remains soon without I/O if bfqq is not
4945 * bandwidth for bfqq if this woken queue has I/O
4946 * dispatched while bfqq is waiting for new I/O.
4948 * The fourth if checks whether bfqq is a queue for
4950 * bfqq delivers more throughput when served without
4952 * if the service times of bfqq's I/O requests both
4954 * easily increased by injection (this happens if bfqq
4960 * limit for bfqq is currently 0).
4966 * that, if I/O is being plugged for bfqq and the
4968 * blocking bfqq's I/O, then the fourth alternative
4982 * i.e., the time before bfqq finally receives new I/O,
4987 icq_to_bic(async_bfqq->next_rq->elv.icq) == bfqq->bic &&
4990 bfqq = async_bfqq;
4991 else if (bfqq->waker_bfqq &&
4992 bfq_bfqq_busy(bfqq->waker_bfqq) &&
4993 bfqq->waker_bfqq->next_rq &&
4994 bfq_serv_to_charge(bfqq->waker_bfqq->next_rq,
4995 bfqq->waker_bfqq) <=
4996 bfq_bfqq_budget_left(bfqq->waker_bfqq)
4998 bfqq = bfqq->waker_bfqq;
5006 bfqq = blocked_bfqq;
5007 else if (!idling_boosts_thr_without_issues(bfqd, bfqq) &&
5008 (bfqq->wr_coeff == 1 || bfqd->wr_busy_queues > 1 ||
5009 !bfq_bfqq_has_short_ttime(bfqq)))
5010 bfqq = bfq_choose_bfqq_for_injection(bfqd);
5012 bfqq = NULL;
5019 bfq_bfqq_expire(bfqd, bfqq, false, reason);
5021 bfqq = bfq_set_in_service_queue(bfqd);
5022 if (bfqq) {
5023 bfq_log_bfqq(bfqd, bfqq, "select_queue: checking new queue");
5027 if (bfqq)
5028 bfq_log_bfqq(bfqd, bfqq, "select_queue: returned this queue");
5032 return bfqq;
5035 static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
5037 struct bfq_entity *entity = &bfqq->entity;
5039 if (bfqq->wr_coeff > 1) { /* queue is being weight-raised */
5040 bfq_log_bfqq(bfqd, bfqq,
5042 jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish),
5043 jiffies_to_msecs(bfqq->wr_cur_max_time),
5044 bfqq->wr_coeff,
5045 bfqq->entity.weight, bfqq->entity.orig_weight);
5048 bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change");
5055 if (bfq_bfqq_in_large_burst(bfqq))
5056 bfq_bfqq_end_wr(bfqq);
5057 else if (time_is_before_jiffies(bfqq->last_wr_start_finish +
5058 bfqq->wr_cur_max_time)) {
5059 if (bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time ||
5060 time_is_before_jiffies(bfqq->wr_start_at_switch_to_srt +
5070 bfq_bfqq_end_wr(bfqq);
5076 switch_back_to_interactive_wr(bfqq, bfqd);
5077 bfqq->entity.prio_changed = 1;
5080 if (bfqq->wr_coeff > 1 &&
5081 bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time &&
5082 bfqq->service_from_wr > max_service_from_wr) {
5084 bfq_bfqq_end_wr(bfqq);
5095 if ((entity->weight > entity->orig_weight) != (bfqq->wr_coeff > 1))
5101 * Dispatch next request from bfqq.
5104 struct bfq_queue *bfqq)
5106 struct request *rq = bfqq->next_rq;
5109 service_to_charge = bfq_serv_to_charge(rq, bfqq);
5111 bfq_bfqq_served(bfqq, service_to_charge);
5113 if (bfqq == bfqd->in_service_queue && bfqd->wait_dispatch) {
5120 if (bfqq != bfqd->in_service_queue)
5124 * If weight raising has to terminate for bfqq, then next
5125 * function causes an immediate update of bfqq's weight,
5127 * expiration, bfqq will be timestamped as if has never been
5130 * weight-raised queue. This inflates bfqq's timestamps, which
5131 * is beneficial, as bfqq is then more willing to leave the
5134 bfq_update_wr_data(bfqd, bfqq);
5137 * Expire bfqq, pretending that its budget expired, if bfqq
5141 if (bfq_tot_busy_queues(bfqd) > 1 && bfq_class_idle(bfqq))
5142 bfq_bfqq_expire(bfqd, bfqq, false, BFQQE_BUDGET_EXHAUSTED);
5163 struct bfq_queue *bfqq = NULL;
5170 bfqq = RQ_BFQQ(rq);
5172 if (bfqq) {
5179 bfqq->dispatched++;
5231 bfqq = bfq_select_queue(bfqd);
5232 if (!bfqq)
5235 rq = bfq_dispatch_rq_from_bfqq(bfqd, bfqq);
5239 bfqd->rq_in_driver[bfqq->actuator_idx]++;
5254 struct bfq_queue *bfqq = rq ? RQ_BFQQ(rq) : NULL;
5256 if (!idle_timer_disabled && !bfqq)
5260 * rq and bfqq are guaranteed to exist until this function
5267 * bfqq, the same guarantee holds for bfqq too.
5270 * bfqq_group(bfqq) exists as well.
5279 * in_serv_queue. Thus in_serv_queue == bfqq, and is
5284 if (bfqq) {
5285 struct bfq_group *bfqg = bfqq_group(bfqq);
5330 * Scheduler lock must be held here. Recall not to use bfqq after calling
5333 void bfq_put_queue(struct bfq_queue *bfqq)
5337 struct bfq_group *bfqg = bfqq_group(bfqq);
5339 bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d", bfqq, bfqq->ref);
5341 bfqq->ref--;
5342 if (bfqq->ref)
5345 if (!hlist_unhashed(&bfqq->burst_list_node)) {
5346 hlist_del_init(&bfqq->burst_list_node);
5349 * process associated with bfqq is exiting, and thus
5360 * 1) bfqq is not a merged queue, because, if it is,
5361 * then this free of bfqq is not triggered by the exit
5362 * of the process bfqq is associated with, but exactly
5363 * by the fact that bfqq has just been merged.
5366 * happen in te following case: bfqq is inserted into
5369 * burst list is not the burst list bfqq belonged to
5373 if (bfqq->bic && bfqq->bfqd->burst_size > 0)
5374 bfqq->bfqd->burst_size--;
5378 * bfqq does not exist any longer, so it cannot be woken by
5379 * any other queue, and cannot wake any other queue. Then bfqq
5381 * queue, and all queues in the woken list of bfqq must stop
5383 * should be performed when bfqq remains with no I/O source
5384 * attached to it, which happens before bfqq gets freed. In
5386 * with bfqq exits or gets associated with a different
5387 * queue. However, both events lead to bfqq being freed soon,
5388 * and dangling references would come out only after bfqq gets
5392 /* remove bfqq from woken list */
5393 if (!hlist_unhashed(&bfqq->woken_list_node))
5394 hlist_del_init(&bfqq->woken_list_node);
5397 hlist_for_each_entry_safe(item, n, &bfqq->woken_list,
5403 if (bfqq->bfqd->last_completed_rq_bfqq == bfqq)
5404 bfqq->bfqd->last_completed_rq_bfqq = NULL;
5406 WARN_ON_ONCE(!list_empty(&bfqq->fifo));
5407 WARN_ON_ONCE(!RB_EMPTY_ROOT(&bfqq->sort_list));
5408 WARN_ON_ONCE(bfqq->dispatched);
5410 kmem_cache_free(bfq_pool, bfqq);
5414 static void bfq_put_stable_ref(struct bfq_queue *bfqq)
5416 bfqq->stable_ref--;
5417 bfq_put_queue(bfqq);
5420 void bfq_put_cooperator(struct bfq_queue *bfqq)
5429 __bfqq = bfqq->new_bfqq;
5437 static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
5439 if (bfqq == bfqd->in_service_queue) {
5440 __bfq_bfqq_expire(bfqd, bfqq, BFQQE_BUDGET_TIMEOUT);
5444 bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfqq->ref);
5446 bfq_put_cooperator(bfqq);
5448 bfq_release_process_ref(bfqd, bfqq);
5454 struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync, actuator_idx);
5457 if (bfqq)
5458 bfqd = bfqq->bfqd; /* NULL if scheduler already exited */
5460 if (bfqq && bfqd) {
5462 bfq_exit_bfqq(bfqd, bfqq);
5507 bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
5511 struct bfq_data *bfqd = bfqq->bfqd;
5520 bdi_dev_name(bfqq->bfqd->queue->disk->bdi),
5527 bfqq->new_ioprio = task_nice_ioprio(tsk);
5528 bfqq->new_ioprio_class = task_nice_ioclass(tsk);
5531 bfqq->new_ioprio = IOPRIO_PRIO_LEVEL(bic->ioprio);
5532 bfqq->new_ioprio_class = IOPRIO_CLASS_RT;
5535 bfqq->new_ioprio = IOPRIO_PRIO_LEVEL(bic->ioprio);
5536 bfqq->new_ioprio_class = IOPRIO_CLASS_BE;
5539 bfqq->new_ioprio_class = IOPRIO_CLASS_IDLE;
5540 bfqq->new_ioprio = IOPRIO_NR_LEVELS - 1;
5544 if (bfqq->new_ioprio >= IOPRIO_NR_LEVELS) {
5546 bfqq->new_ioprio);
5547 bfqq->new_ioprio = IOPRIO_NR_LEVELS - 1;
5550 bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio);
5551 bfq_log_bfqq(bfqd, bfqq, "new_ioprio %d new_weight %d",
5552 bfqq->new_ioprio, bfqq->entity.new_weight);
5553 bfqq->entity.prio_changed = 1;
5564 struct bfq_queue *bfqq;
5576 bfqq = bic_to_bfqq(bic, false, bfq_actuator_index(bfqd, bio));
5577 if (bfqq) {
5578 struct bfq_queue *old_bfqq = bfqq;
5580 bfqq = bfq_get_queue(bfqd, bio, false, bic, true);
5581 bic_set_bfqq(bic, bfqq, false, bfq_actuator_index(bfqd, bio));
5585 bfqq = bic_to_bfqq(bic, true, bfq_actuator_index(bfqd, bio));
5586 if (bfqq)
5587 bfq_set_next_ioprio_data(bfqq, bic);
5590 static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
5596 bfqq->actuator_idx = act_idx;
5597 RB_CLEAR_NODE(&bfqq->entity.rb_node);
5598 INIT_LIST_HEAD(&bfqq->fifo);
5599 INIT_HLIST_NODE(&bfqq->burst_list_node);
5600 INIT_HLIST_NODE(&bfqq->woken_list_node);
5601 INIT_HLIST_HEAD(&bfqq->woken_list);
5603 bfqq->ref = 0;
5604 bfqq->bfqd = bfqd;
5607 bfq_set_next_ioprio_data(bfqq, bic);
5615 if (!bfq_class_idle(bfqq))
5617 bfq_mark_bfqq_has_short_ttime(bfqq);
5618 bfq_mark_bfqq_sync(bfqq);
5619 bfq_mark_bfqq_just_created(bfqq);
5621 bfq_clear_bfqq_sync(bfqq);
5624 bfqq->ttime.last_end_request = now_ns + 1;
5626 bfqq->creation_time = jiffies;
5628 bfqq->io_start_time = now_ns;
5630 bfq_mark_bfqq_IO_bound(bfqq);
5632 bfqq->pid = pid;
5635 bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3;
5636 bfqq->budget_timeout = bfq_smallest_from_now();
5638 bfqq->wr_coeff = 1;
5639 bfqq->last_wr_start_finish = jiffies;
5640 bfqq->wr_start_at_switch_to_srt = bfq_smallest_from_now();
5641 bfqq->split_time = bfq_smallest_from_now();
5647 * to the current value of bfqq->soft_rt_next_start (see
5649 * soft_rt_next_start to now, to mean that bfqq has consumed
5652 bfqq->soft_rt_next_start = jiffies;
5655 bfqq->seek_history = 1;
5657 bfqq->decrease_time_jif = jiffies;
5680 bfq_do_early_stable_merge(struct bfq_data *bfqd, struct bfq_queue *bfqq,
5686 bfq_setup_merge(bfqq, last_bfqq_created);
5689 return bfqq;
5697 * bfqq->bic must be set too, for
5698 * bfq_merge_bfqqs to correctly save bfqq's
5701 bfqq->bic = bic;
5702 bfq_merge_bfqqs(bfqd, bic, bfqq, new_bfqq);
5754 struct bfq_queue *bfqq,
5757 struct bfq_queue **source_bfqq = bfqq->entity.parent ?
5758 &bfqq->entity.parent->last_bfqq_created :
5766 * forward to bfqq. Finally, move also if bfqq belongs to a
5767 * different group than last_bfqq_created, or if bfqq has a
5777 * early merge), in case bfqq might soon prove to be more
5780 * such a drive, not merging bfqq is better for throughput if
5781 * bfqq happens to contain sequential I/O. So, we wait a
5782 * little bit for enough I/O to flow through bfqq. After that,
5789 bfqq->creation_time) ||
5790 bfqq->entity.parent != last_bfqq_created->entity.parent ||
5791 bfqq->ioprio != last_bfqq_created->ioprio ||
5792 bfqq->ioprio_class != last_bfqq_created->ioprio_class ||
5793 bfqq->actuator_idx != last_bfqq_created->actuator_idx)
5794 *source_bfqq = bfqq;
5797 bfqq->creation_time)) {
5801 * bfqq alone may provide no
5803 * merging bfqq. So merge bfqq now.
5805 bfqq = bfq_do_early_stable_merge(bfqd, bfqq,
5821 * Record the bfqq to merge to.
5828 return bfqq;
5840 struct bfq_queue *bfqq;
5848 bfqq = *async_bfqq;
5849 if (bfqq)
5853 bfqq = kmem_cache_alloc_node(bfq_pool,
5857 if (bfqq) {
5858 bfq_init_bfqq(bfqd, bfqq, bic, current->pid,
5860 bfq_init_entity(&bfqq->entity, bfqg);
5861 bfq_log_bfqq(bfqd, bfqq, "allocated");
5863 bfqq = &bfqd->oom_bfqq;
5864 bfq_log_bfqq(bfqd, bfqq, "using oom bfqq");
5873 bfqq->ref++; /*
5876 * only if bfqq->bfqg disappears, to
5880 bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d",
5881 bfqq, bfqq->ref);
5882 *async_bfqq = bfqq;
5886 bfqq->ref++; /* get a process reference to this queue */
5888 if (bfqq != &bfqd->oom_bfqq && is_sync && !respawn)
5889 bfqq = bfq_do_or_sched_stable_merge(bfqd, bfqq, bic);
5890 return bfqq;
5894 struct bfq_queue *bfqq)
5896 struct bfq_ttime *ttime = &bfqq->ttime;
5904 if (bfqq->dispatched || bfq_bfqq_busy(bfqq))
5906 elapsed = ktime_get_ns() - bfqq->ttime.last_end_request;
5916 bfq_update_io_seektime(struct bfq_data *bfqd, struct bfq_queue *bfqq,
5919 bfqq->seek_history <<= 1;
5920 bfqq->seek_history |= BFQ_RQ_SEEKY(bfqd, bfqq->last_request_pos, rq);
5922 if (bfqq->wr_coeff > 1 &&
5923 bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
5924 BFQQ_TOTALLY_SEEKY(bfqq)) {
5925 if (time_is_before_jiffies(bfqq->wr_start_at_switch_to_srt +
5933 bfq_bfqq_end_wr(bfqq);
5940 switch_back_to_interactive_wr(bfqq, bfqd);
5941 bfqq->entity.prio_changed = 1;
5947 struct bfq_queue *bfqq,
5953 * No need to update has_short_ttime if bfqq is async or in
5955 * no device idling is performed for bfqq in this case.
5957 if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq) ||
5962 if (time_is_after_eq_jiffies(bfqq->split_time +
5967 * bfqq. Otherwise check average think time to decide whether
5972 (bfq_sample_valid(bfqq->ttime.ttime_samples) &&
5973 bfqq->ttime.ttime_mean > bfqd->bfq_slice_idle>>1))
5976 state_changed = has_short_ttime != bfq_bfqq_has_short_ttime(bfqq);
5979 bfq_mark_bfqq_has_short_ttime(bfqq);
5981 bfq_clear_bfqq_has_short_ttime(bfqq);
5985 * finally computed for bfqq, the inject limit does depend on
5999 * bfqq may have a long think time because of a
6001 * I/O of some other queue may need to be completed for bfqq
6006 * actually in place, then, without injection on bfqq, the
6007 * blocking I/O cannot happen to served while bfqq is in
6008 * service. As a consequence, if bfqq is granted
6009 * I/O-dispatch-plugging, then bfqq remains empty, and no I/O
6011 * to result in lower bandwidth and higher latencies for bfqq,
6015 * I/O that blocks bfqq to be executed soon, and therefore
6016 * bfqq to receive new I/O soon.
6019 * next think-time sample for bfqq may be very low. This in
6020 * turn may cause bfqq's think time to be deemed
6025 * think time of bfqq to become long again, and therefore the
6028 * states would be to prevent effective injection on bfqq.
6041 * general, with the fact that the think time of bfqq is
6042 * short, because injection may be likely to delay bfqq's I/O
6045 * this special case, because bfqq's low think time is due to
6047 * injection. In this special case, bfqq's I/O does not get
6048 * delayed by injection; on the contrary, bfqq's I/O is
6064 * finally computed for bfqq, freeing the inject limit from
6067 if (state_changed && bfqq->last_serv_time_ns == 0 &&
6068 (time_is_before_eq_jiffies(bfqq->decrease_time_jif +
6071 bfq_reset_inject_limit(bfqd, bfqq);
6075 * Called when a new fs request (rq) is added to bfqq. Check if there's
6078 static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
6082 bfqq->meta_pending++;
6084 bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
6086 if (bfqq == bfqd->in_service_queue && bfq_bfqq_wait_request(bfqq)) {
6087 bool small_req = bfqq->queued[rq_is_sync(rq)] == 1 &&
6089 bool budget_timeout = bfq_bfqq_budget_timeout(bfqq);
6107 if (small_req && idling_boosts_thr_without_issues(bfqd, bfqq) &&
6118 bfq_clear_bfqq_wait_request(bfqq);
6129 bfq_bfqq_expire(bfqd, bfqq, false,
6134 static void bfqq_request_allocated(struct bfq_queue *bfqq)
6136 struct bfq_entity *entity = &bfqq->entity;
6142 static void bfqq_request_freed(struct bfq_queue *bfqq)
6144 struct bfq_entity *entity = &bfqq->entity;
6153 struct bfq_queue *bfqq = RQ_BFQQ(rq),
6154 *new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true,
6160 * Release the request's reference to the old bfqq
6164 bfqq_request_freed(bfqq);
6168 * issuing this request still points to bfqq
6175 bfq_actuator_index(bfqd, rq->bio)) == bfqq)
6177 bfqq, new_bfqq);
6179 bfq_clear_bfqq_just_created(bfqq);
6182 * release rq reference on bfqq
6184 bfq_put_queue(bfqq);
6186 bfqq = new_bfqq;
6189 bfq_update_io_thinktime(bfqd, bfqq);
6190 bfq_update_has_short_ttime(bfqd, bfqq, RQ_BIC(rq));
6191 bfq_update_io_seektime(bfqd, bfqq, rq);
6193 waiting = bfqq && bfq_bfqq_wait_request(bfqq);
6195 idle_timer_disabled = waiting && !bfq_bfqq_wait_request(bfqq);
6198 list_add_tail(&rq->queuelist, &bfqq->fifo);
6200 bfq_rq_enqueued(bfqd, bfqq, rq);
6207 struct bfq_queue *bfqq,
6211 if (!bfqq)
6215 * bfqq still exists, because it can disappear only after
6222 * bfqq_group(bfqq) exists as well.
6225 bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, cmd_flags);
6227 bfqg_stats_update_idle_time(bfqq_group(bfqq));
6232 struct bfq_queue *bfqq,
6244 struct bfq_queue *bfqq;
6254 bfqq = bfq_init_rq(rq);
6265 } else if (!bfqq) {
6270 * Update bfqq, because, if a queue merge has occurred
6274 bfqq = RQ_BFQQ(rq);
6291 bfq_update_insert_stats(q, bfqq, idle_timer_disabled,
6310 struct bfq_queue *bfqq = bfqd->in_service_queue;
6332 if (bfqq && bfq_bfqq_has_short_ttime(bfqq) &&
6333 bfqq->dispatched + bfqq->queued[0] + bfqq->queued[1] <
6349 static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
6356 bfqd->rq_in_driver[bfqq->actuator_idx]--;
6358 bfqq->dispatched--;
6360 if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) {
6367 bfqq->budget_timeout = jiffies;
6369 bfq_del_bfqq_in_groups_with_pending_reqs(bfqq);
6370 bfq_weights_tree_remove(bfqq);
6375 bfqq->ttime.last_end_request = now_ns;
6411 * last_completed_rq_bfqq if bfqq is a shared queue.
6413 if (!bfq_bfqq_coop(bfqq))
6414 bfqd->last_completed_rq_bfqq = bfqq;
6424 * do not compute soft_rt_next_start if bfqq is in interactive
6426 * an explanation). We schedule this delayed update when bfqq
6429 if (bfq_bfqq_softrt_update(bfqq) && bfqq->dispatched == 0 &&
6430 RB_EMPTY_ROOT(&bfqq->sort_list) &&
6431 bfqq->wr_coeff != bfqd->bfq_wr_coeff)
6432 bfqq->soft_rt_next_start =
6433 bfq_bfqq_softrt_next_start(bfqd, bfqq);
6439 if (bfqd->in_service_queue == bfqq) {
6440 if (bfq_bfqq_must_idle(bfqq)) {
6441 if (bfqq->dispatched == 0)
6444 * If we get here, we do not expire bfqq, even
6445 * if bfqq was in budget timeout or had no
6448 * not expiring bfqq is as follows.
6450 * Here bfqq->dispatched > 0 holds, but
6453 * for bfqq before bfqq->dispatched reaches 0,
6454 * bfqq will, however, not be expired on the
6455 * completion event that causes bfqq->dispatch
6457 * bfqq will start enjoying device idling
6460 * But, if we expired bfqq here, bfqq would
6462 * when bfqq->dispatched finally reaches
6463 * zero. This would expose bfqq to violation
6467 } else if (bfq_may_expire_for_budg_timeout(bfqq))
6468 bfq_bfqq_expire(bfqd, bfqq, false,
6470 else if (RB_EMPTY_ROOT(&bfqq->sort_list) &&
6471 (bfqq->dispatched == 0 ||
6472 !bfq_better_to_idle(bfqq)))
6473 bfq_bfqq_expire(bfqd, bfqq, false,
6482 * The processes associated with bfqq may happen to generate their
6485 * one process is associated with bfqq and the device is an SSD. It
6486 * results in bfqq becoming often empty while in service. In this
6487 * respect, if BFQ is allowed to switch to another queue when bfqq
6490 * allowed to switch to another queue---because bfqq is sync and
6491 * I/O-dispatch needs to be plugged while bfqq is temporarily
6492 * empty---then, during the service of bfqq, there will be frequent
6493 * "service holes", i.e., time intervals during which bfqq gets empty
6496 * remaining idle. In the end, during the service of bfqq, the device
6498 * of I/O flowing through bfqq.
6504 * both boost throughput and not break bfqq's bandwidth and latency
6506 * inject limit, computed as below. While bfqq is empty, the injection
6513 * bfqq, an I/O request for bfqq that arrives while bfqq is in
6514 * service, and causes bfqq to switch from empty to non-empty. The
6517 * bfqq. The reason for this restriction is that these are the
6524 * request is enqueued into bfqq, to when it is completed. This
6527 * actually injected while bfqq is empty, and that a new request R
6528 * then arrives for bfqq. If the device does start to serve all or
6540 * first request of bfqq, the algorithm measures the total time of the
6545 * total service time of the requests of bfqq. If the baseline has
6556 * time. If the inflation is below a certain threshold, then bfqq
6586 struct bfq_queue *bfqq)
6589 unsigned int old_limit = bfqq->inject_limit;
6591 if (bfqq->last_serv_time_ns > 0 && bfqd->rqs_injected) {
6592 u64 threshold = (bfqq->last_serv_time_ns * 3)>>1;
6595 bfqq->inject_limit--;
6596 bfqq->decrease_time_jif = jiffies;
6599 bfqq->inject_limit++;
6610 * path that handles the completion of a request of bfqq, and,
6614 if ((bfqq->last_serv_time_ns == 0 && bfqd->tot_rq_in_driver == 1) ||
6615 tot_time_ns < bfqq->last_serv_time_ns) {
6616 if (bfqq->last_serv_time_ns == 0) {
6621 bfqq->inject_limit = max_t(unsigned int, 1, old_limit);
6623 bfqq->last_serv_time_ns = tot_time_ns;
6629 * for bfqq. So let's update this value, because it is
6631 * or the spatial locality of the I/O requests in bfqq
6634 bfqq->last_serv_time_ns = tot_time_ns;
6650 struct bfq_queue *bfqq = RQ_BFQQ(rq);
6659 if (!rq->elv.icq || !bfqq)
6662 bfqd = bfqq->bfqd;
6665 bfqg_stats_update_completion(bfqq_group(bfqq),
6673 bfq_update_inject_limit(bfqd, bfqq);
6675 bfq_completed_request(bfqq, bfqd);
6677 bfqq_request_freed(bfqq);
6678 bfq_put_queue(bfqq);
6714 * Removes the association between the current task and bfqq, assuming
6716 * Returns NULL if a new bfqq should be allocated, or the old bfqq if this
6717 * was the last process referring to that bfqq.
6720 bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
6722 bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
6724 if (bfqq_process_refs(bfqq) == 1) {
6725 bfqq->pid = current->pid;
6726 bfq_clear_bfqq_coop(bfqq);
6727 bfq_clear_bfqq_split_coop(bfqq);
6728 return bfqq;
6731 bic_set_bfqq(bic, NULL, true, bfqq->actuator_idx);
6733 bfq_put_cooperator(bfqq);
6735 bfq_release_process_ref(bfqq->bfqd, bfqq);
6746 struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync, act_idx);
6749 if (likely(bfqq && bfqq != &bfqd->oom_bfqq))
6750 return bfqq;
6755 if (bfqq)
6756 bfq_put_queue(bfqq);
6757 bfqq = bfq_get_queue(bfqd, bio, is_sync, bic, split);
6759 bic_set_bfqq(bic, bfqq, is_sync, act_idx);
6763 bfq_mark_bfqq_in_large_burst(bfqq);
6765 bfq_clear_bfqq_in_large_burst(bfqq);
6768 * If bfqq was in the current
6775 * bfqq from the burst list as
6783 * which bfqq was removed on
6785 * cost, if bfqq was in a
6787 * bfqq to the current burst
6795 hlist_add_head(&bfqq->burst_list_node,
6798 bfqq->split_time = jiffies;
6801 return bfqq;
6817 * previously allocated bic/bfqq structs.
6852 struct bfq_queue *bfqq;
6876 bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, false, is_sync,
6881 if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq) &&
6883 struct bfq_queue *old_bfqq = bfqq;
6885 /* Update bic before losing reference to bfqq */
6886 if (bfq_bfqq_in_large_burst(bfqq))
6890 bfqq = bfq_split_bfqq(bic, bfqq);
6893 if (!bfqq) {
6894 bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio,
6897 if (unlikely(bfqq == &bfqd->oom_bfqq))
6903 bfqq->waker_bfqq = old_bfqq->waker_bfqq;
6904 bfqq->tentative_waker_bfqq = NULL;
6913 if (bfqq->waker_bfqq)
6914 hlist_add_head(&bfqq->woken_list_node,
6915 &bfqq->waker_bfqq->woken_list);
6920 bfqq_request_allocated(bfqq);
6921 bfqq->ref++;
6923 bfq_log_bfqq(bfqd, bfqq, "get_request %p: bfqq %p, %d",
6924 rq, bfqq, bfqq->ref);
6927 rq->elv.priv[1] = bfqq;
6931 * by only this bic: we can then set bfqq->bic = bic. in
6935 if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) {
6936 bfqq->bic = bic;
6943 bfq_bfqq_resume_state(bfqq, bfqd, bic,
6949 * Consider bfqq as possibly belonging to a burst of newly
6955 * possible burst bfqq may belong to, then there is no gain
6956 * in considering bfqq as belonging to a burst, and
6957 * therefore in not weight-raising bfqq. See comments on
6961 * occurring when bfqq does not belong to an actual large
6964 * bfqq and its possible companion queues are created. See
6968 if (unlikely(bfq_bfqq_just_created(bfqq) &&
6971 bfq_handle_burst(bfqd, bfqq);
6973 return bfqq;
6977 bfq_idle_slice_timer_body(struct bfq_data *bfqd, struct bfq_queue *bfqq)
6985 * Considering that bfqq may be in race, we should firstly check
6986 * whether bfqq is in service before doing something on it. If
6987 * the bfqq in race is not in service, it has already been expired
6991 if (bfqq != bfqd->in_service_queue) {
6996 bfq_clear_bfqq_wait_request(bfqq);
6998 if (bfq_bfqq_budget_timeout(bfqq))
7005 else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0)
7016 bfq_bfqq_expire(bfqd, bfqq, true, reason);
7031 struct bfq_queue *bfqq = bfqd->in_service_queue;
7041 if (bfqq)
7042 bfq_idle_slice_timer_body(bfqd, bfqq);
7050 struct bfq_queue *bfqq = *bfqq_ptr;
7052 bfq_log(bfqd, "put_async_bfqq: %p", bfqq);
7053 if (bfqq) {
7054 bfq_bfqq_move(bfqd, bfqq, bfqd->root_group);
7056 bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d",
7057 bfqq, bfqq->ref);
7058 bfq_put_queue(bfqq);
7141 struct bfq_queue *bfqq, *n;
7147 list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list)
7148 bfq_deactivate_bfqq(bfqd, bfqq, false, false);
7215 * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues.