Lines Matching refs:bfqq

136 void bfq_mark_bfqq_##name(struct bfq_queue *bfqq)			\
138 __set_bit(BFQQF_##name, &(bfqq)->flags); \
140 void bfq_clear_bfqq_##name(struct bfq_queue *bfqq) \
142 __clear_bit(BFQQF_##name, &(bfqq)->flags); \
144 int bfq_bfqq_##name(const struct bfq_queue *bfqq) \
146 return test_bit(BFQQF_##name, &(bfqq)->flags); \
237 #define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 19)
245 #define BFQQ_TOTALLY_SEEKY(bfqq) (bfqq->seek_history == -1)
371 return bic->bfqq[is_sync];
374 void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync)
376 struct bfq_queue *old_bfqq = bic->bfqq[is_sync];
378 /* Clear bic pointer if bfqq is detached from this bic */
382 bic->bfqq[is_sync] = bfqq;
438 #define bfq_class_idle(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
564 struct bfq_queue *bfqq = NULL;
572 bfqq = rb_entry(parent, struct bfq_queue, pos_node);
578 if (sector > blk_rq_pos(bfqq->next_rq))
580 else if (sector < blk_rq_pos(bfqq->next_rq))
585 bfqq = NULL;
594 bfqq ? bfqq->pid : 0);
596 return bfqq;
599 static bool bfq_too_late_for_merging(struct bfq_queue *bfqq)
601 return bfqq->service_from_backlogged > 0 &&
602 time_is_before_jiffies(bfqq->first_IO_time +
615 bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
620 if (bfqq->pos_root) {
621 rb_erase(&bfqq->pos_node, bfqq->pos_root);
622 bfqq->pos_root = NULL;
626 if (bfqq == &bfqd->oom_bfqq)
630 * bfqq cannot be merged any longer (see comments in
631 * bfq_setup_cooperator): no point in adding bfqq into the
634 if (bfq_too_late_for_merging(bfqq))
637 if (bfq_class_idle(bfqq))
639 if (!bfqq->next_rq)
642 bfqq->pos_root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree;
643 __bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root,
644 blk_rq_pos(bfqq->next_rq), &parent, &p);
646 rb_link_node(&bfqq->pos_node, parent, p);
647 rb_insert_color(&bfqq->pos_node, bfqq->pos_root);
649 bfqq->pos_root = NULL;
655 * or, as a special case, if bfqq must receive a share of the
657 * queue must receive. If bfqq does sync I/O, then these are the only
658 * two cases where bfqq happens to be guaranteed its share of the
659 * throughput even if I/O dispatching is not plugged when bfqq remains
686 struct bfq_queue *bfqq)
688 bool smallest_weight = bfqq &&
689 bfqq->weight_counter &&
690 bfqq->weight_counter ==
730 void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq,
733 struct bfq_entity *entity = &bfqq->entity;
749 if (bfqq->weight_counter)
759 bfqq->weight_counter = __counter;
770 bfqq->weight_counter = kzalloc(sizeof(struct bfq_weight_counter),
778 * bfqq's weight would have been the only weight making the
780 * however occur when bfqq becomes inactive again (the
783 * if !bfqq->weight_counter.
785 if (unlikely(!bfqq->weight_counter))
788 bfqq->weight_counter->weight = entity->weight;
789 rb_link_node(&bfqq->weight_counter->weights_node, parent, new);
790 rb_insert_color_cached(&bfqq->weight_counter->weights_node, root,
794 bfqq->weight_counter->num_active++;
795 bfqq->ref++;
805 struct bfq_queue *bfqq,
808 if (!bfqq->weight_counter)
811 bfqq->weight_counter->num_active--;
812 if (bfqq->weight_counter->num_active > 0)
815 rb_erase_cached(&bfqq->weight_counter->weights_node, root);
816 kfree(bfqq->weight_counter);
819 bfqq->weight_counter = NULL;
820 bfq_put_queue(bfqq);
824 * Invoke __bfq_weights_tree_remove on bfqq and decrement the number
828 struct bfq_queue *bfqq)
830 struct bfq_entity *entity = bfqq->entity.parent;
854 * that the first leaf descendant bfqq of entity gets
867 * Next function is invoked last, because it causes bfqq to be
868 * freed if the following holds: bfqq is not in service and
869 * has no dispatched request. DO NOT use bfqq after the next
872 __bfq_weights_tree_remove(bfqd, bfqq,
879 static struct request *bfq_check_fifo(struct bfq_queue *bfqq,
884 if (bfq_bfqq_fifo_expire(bfqq))
887 bfq_mark_bfqq_fifo_expire(bfqq);
889 rq = rq_entry_fifo(bfqq->fifo.next);
894 bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq);
899 struct bfq_queue *bfqq,
907 next = bfq_check_fifo(bfqq, last);
917 rbnext = rb_first(&bfqq->sort_list);
927 struct bfq_queue *bfqq)
929 if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1 ||
930 bfq_asymmetric_scenario(bfqq->bfqd, bfqq))
939 * @bfqq: the queue to update.
948 struct bfq_queue *bfqq)
950 struct bfq_entity *entity = &bfqq->entity;
951 struct request *next_rq = bfqq->next_rq;
957 if (bfqq == bfqd->in_service_queue)
965 max_t(unsigned long, bfqq->max_budget,
966 bfq_serv_to_charge(next_rq, bfqq)),
970 bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu",
972 bfq_requeue_bfqq(bfqd, bfqq, false);
1010 static void switch_back_to_interactive_wr(struct bfq_queue *bfqq,
1013 bfqq->wr_coeff = bfqd->bfq_wr_coeff;
1014 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
1015 bfqq->last_wr_start_finish = bfqq->wr_start_at_switch_to_srt;
1019 bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
1022 unsigned int old_wr_coeff = bfqq->wr_coeff;
1023 bool busy = bfq_already_existing && bfq_bfqq_busy(bfqq);
1026 bfq_mark_bfqq_has_short_ttime(bfqq);
1028 bfq_clear_bfqq_has_short_ttime(bfqq);
1031 bfq_mark_bfqq_IO_bound(bfqq);
1033 bfq_clear_bfqq_IO_bound(bfqq);
1035 bfqq->entity.new_weight = bic->saved_weight;
1036 bfqq->ttime = bic->saved_ttime;
1037 bfqq->wr_coeff = bic->saved_wr_coeff;
1038 bfqq->wr_start_at_switch_to_srt = bic->saved_wr_start_at_switch_to_srt;
1039 bfqq->last_wr_start_finish = bic->saved_last_wr_start_finish;
1040 bfqq->wr_cur_max_time = bic->saved_wr_cur_max_time;
1042 if (bfqq->wr_coeff > 1 && (bfq_bfqq_in_large_burst(bfqq) ||
1043 time_is_before_jiffies(bfqq->last_wr_start_finish +
1044 bfqq->wr_cur_max_time))) {
1045 if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
1046 !bfq_bfqq_in_large_burst(bfqq) &&
1047 time_is_after_eq_jiffies(bfqq->wr_start_at_switch_to_srt +
1049 switch_back_to_interactive_wr(bfqq, bfqd);
1051 bfqq->wr_coeff = 1;
1052 bfq_log_bfqq(bfqq->bfqd, bfqq,
1058 bfqq->entity.prio_changed = 1;
1063 if (old_wr_coeff == 1 && bfqq->wr_coeff > 1)
1065 else if (old_wr_coeff > 1 && bfqq->wr_coeff == 1)
1069 static int bfqq_process_refs(struct bfq_queue *bfqq)
1071 return bfqq->ref - bfqq->allocated - bfqq->entity.on_st_or_in_serv -
1072 (bfqq->weight_counter != NULL);
1075 /* Empty burst list and add just bfqq (see comments on bfq_handle_burst) */
1076 static void bfq_reset_burst_list(struct bfq_data *bfqd, struct bfq_queue *bfqq)
1090 hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
1095 bfqd->burst_parent_entity = bfqq->entity.parent;
1098 /* Add bfqq to the list of queues in current burst (see bfq_handle_burst) */
1099 static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
1101 /* Increment burst size to take into account also bfqq */
1121 bfq_mark_bfqq_in_large_burst(bfqq);
1134 * Burst not yet large: add bfqq to the burst list. Do
1135 * not increment the ref counter for bfqq, because bfqq
1136 * is removed from the burst list before freeing bfqq
1139 hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
1202 * same, possible burst bfqq would belong to), and it implements all
1251 static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
1254 * If bfqq is already in the burst list or is part of a large
1258 if (!hlist_unhashed(&bfqq->burst_list_node) ||
1259 bfq_bfqq_in_large_burst(bfqq) ||
1260 time_is_after_eq_jiffies(bfqq->split_time +
1265 * If bfqq's creation happens late enough, or bfqq belongs to
1270 * In this respect, consider the special case where bfqq is
1275 * following condition is true, bfqq will end up being
1277 * happen to contain only bfqq. And this is exactly what has
1278 * to happen, as bfqq may be the first queue of the first
1283 bfqq->entity.parent != bfqd->burst_parent_entity) {
1285 bfq_reset_burst_list(bfqd, bfqq);
1290 * If we get here, then bfqq is being activated shortly after the
1292 * bfqq as belonging to this large burst immediately.
1295 bfq_mark_bfqq_in_large_burst(bfqq);
1301 * reached, but bfqq is being activated shortly after the last
1302 * queue. Then we add bfqq to the burst.
1304 bfq_add_to_burst(bfqd, bfqq);
1307 * At this point, bfqq either has been added to the current
1310 * case, bfqq has become the first queue in the possible new
1317 static int bfq_bfqq_budget_left(struct bfq_queue *bfqq)
1319 struct bfq_entity *entity = &bfqq->entity;
1350 * The next function, invoked after the input queue bfqq switches from
1351 * idle to busy, updates the budget of bfqq. The function also tells
1353 * true. The purpose of expiring the in-service queue is to give bfqq
1358 * 1. Guarantee to bfqq its reserved bandwidth even if bfqq has
1359 * expired because it has remained idle. In particular, bfqq may have
1362 * - BFQQE_NO_MORE_REQUESTS bfqq did not enjoy any device idling
1366 * - BFQQE_TOO_IDLE bfqq did enjoy device idling, but did not issue
1369 * Even if bfqq has expired for one of the above reasons, the process
1371 * and thus be sensitive to the bandwidth it receives (bfqq may have
1372 * remained idle for other reasons: CPU high load, bfqq not enjoying
1375 * the above two reasons, bfqq has to wait for the service of at least
1377 * bfqq is likely to get a much lower bandwidth or resource time than
1381 * First, the budget and the timestamps of bfqq need to be updated in
1382 * a special way on bfqq reactivation: they need to be updated as if
1383 * bfqq did not remain idle and did not expire. In fact, if they are
1384 * computed as if bfqq expired and remained idle until reactivation,
1385 * then the process associated with bfqq is treated as if, instead of
1386 * being greedy, it stopped issuing requests when bfqq remained idle,
1389 * hole" between bfqq expiration and reactivation. As a consequence,
1392 * bfqq was not expired at all before this reactivation, i.e., it must
1393 * be set to the value of the remaining budget when bfqq was
1395 * value they had the last time bfqq was selected for service, i.e.,
1401 * queue must be expired too, to give bfqq the chance to preempt it
1402 * immediately. In fact, if bfqq has to wait for a full budget of the
1405 * timestamps of bfqq are lower than those of the in-service queue. If
1412 * The last important point is detecting whether bfqq does need this
1414 * process associated with bfqq greedy, and thus allows it to recover
1416 * request (which implies that bfqq expired for one of the above two
1426 * the process associated with bfqq recover a service hole, bfqq may
1429 * bfqq may have to be completed before the one of the in-service
1438 * rescheduled, and bfqq must be scheduled too. This is one of the
1454 struct bfq_queue *bfqq,
1457 struct bfq_entity *entity = &bfqq->entity;
1462 * trying to go on serving bfqq with this same budget: bfqq
1466 if (bfq_bfqq_non_blocking_wait_rq(bfqq) && arrived_in_time &&
1467 bfq_bfqq_budget_left(bfqq) > 0) {
1478 * on expiration if bfqq is empty (see
1486 bfq_bfqq_budget_left(bfqq),
1487 bfqq->max_budget);
1494 * because bfqq would otherwise be charged again for
1507 entity->budget = max_t(unsigned long, bfqq->max_budget,
1508 bfq_serv_to_charge(bfqq->next_rq, bfqq));
1509 bfq_clear_bfqq_non_blocking_wait_rq(bfqq);
1523 struct bfq_queue *bfqq,
1533 bfqq->service_from_wr = 0;
1534 bfqq->wr_coeff = bfqd->bfq_wr_coeff;
1535 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
1548 bfqq->wr_start_at_switch_to_srt =
1550 bfqq->wr_coeff = bfqd->bfq_wr_coeff *
1552 bfqq->wr_cur_max_time =
1558 * close to bfqq's backlog, so as to reduce the
1565 bfqq->entity.budget = min_t(unsigned long,
1566 bfqq->entity.budget,
1570 bfqq->wr_coeff = bfqd->bfq_wr_coeff;
1571 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
1573 bfqq->wr_coeff = 1;
1604 if (bfqq->wr_cur_max_time !=
1606 bfqq->wr_start_at_switch_to_srt =
1607 bfqq->last_wr_start_finish;
1609 bfqq->wr_cur_max_time =
1611 bfqq->wr_coeff = bfqd->bfq_wr_coeff *
1614 bfqq->last_wr_start_finish = jiffies;
1620 struct bfq_queue *bfqq)
1622 return bfqq->dispatched == 0 &&
1624 bfqq->budget_timeout +
1630 * Return true if bfqq is in a higher priority class, or has a higher
1633 static bool bfq_bfqq_higher_class_or_weight(struct bfq_queue *bfqq,
1638 if (bfqq->ioprio_class < in_serv_bfqq->ioprio_class)
1641 if (in_serv_bfqq->entity.parent == bfqq->entity.parent) {
1642 bfqq_weight = bfqq->entity.weight;
1645 if (bfqq->entity.parent)
1646 bfqq_weight = bfqq->entity.parent->weight;
1648 bfqq_weight = bfqq->entity.weight;
1659 struct bfq_queue *bfqq,
1666 idle_for_long_time = bfq_bfqq_idle_for_long_time(bfqd, bfqq),
1673 bfqq->ttime.last_end_request +
1678 * bfqq deserves to be weight-raised if:
1684 in_burst = bfq_bfqq_in_large_burst(bfqq);
1686 !BFQQ_TOTALLY_SEEKY(bfqq) &&
1688 time_is_before_jiffies(bfqq->soft_rt_next_start) &&
1689 bfqq->dispatched == 0;
1692 (bfqq->wr_coeff > 1 ||
1693 (bfq_bfqq_sync(bfqq) &&
1694 bfqq->bic && (*interactive || soft_rt)));
1697 * Using the last flag, update budget and check whether bfqq
1701 bfq_bfqq_update_budg_for_activation(bfqd, bfqq,
1705 * If bfqq happened to be activated in a burst, but has been
1708 * I/O associated with bfqq is finished. So bfqq does not need
1710 * anymore. Accordingly, we reset bfqq's in_large_burst flag
1711 * if set, and remove bfqq from the burst list if it's
1713 * that bfqq does not need to belong to the burst list any
1714 * more does not invalidate the fact that bfqq was created in
1717 if (likely(!bfq_bfqq_just_created(bfqq)) &&
1720 bfqq->budget_timeout +
1722 hlist_del_init(&bfqq->burst_list_node);
1723 bfq_clear_bfqq_in_large_burst(bfqq);
1726 bfq_clear_bfqq_just_created(bfqq);
1729 if (!bfq_bfqq_IO_bound(bfqq)) {
1731 bfqq->requests_within_timer++;
1732 if (bfqq->requests_within_timer >=
1734 bfq_mark_bfqq_IO_bound(bfqq);
1736 bfqq->requests_within_timer = 0;
1740 if (unlikely(time_is_after_jiffies(bfqq->split_time)))
1742 bfqq->split_time =
1745 if (time_is_before_jiffies(bfqq->split_time +
1747 bfq_update_bfqq_wr_on_rq_arrival(bfqd, bfqq,
1754 if (old_wr_coeff != bfqq->wr_coeff)
1755 bfqq->entity.prio_changed = 1;
1759 bfqq->last_idle_bklogged = jiffies;
1760 bfqq->service_from_backlogged = 0;
1761 bfq_clear_bfqq_softrt_update(bfqq);
1763 bfq_add_bfqq_busy(bfqd, bfqq);
1768 * cases. The first is that bfqq has to recover a service
1771 * bfqq_wants_to_preempt is true. However, if bfqq does not
1772 * carry time-critical I/O, then bfqq's bandwidth is less
1775 * bfqq is at least as weight-raised, i.e., at least as time
1778 * The second case is that bfqq is in a higher priority class,
1781 * bfqq does not start to be served immediately, the resulting
1782 * delay for bfqq's I/O is however lower or much lower than
1783 * the ideal completion time to be guaranteed to bfqq's I/O.
1786 * the timestamps of both bfqq and of the in-service queue,
1787 * bfqq actually is the next queue to serve. So, to reduce
1791 * simple, necessary condition for bfqq to be the next queue
1799 bfqq->wr_coeff >= bfqd->in_service_queue->wr_coeff) ||
1800 bfq_bfqq_higher_class_or_weight(bfqq, bfqd->in_service_queue)) &&
1807 struct bfq_queue *bfqq)
1810 bfqq->last_serv_time_ns = 0;
1819 * If bfqq has a short think time, then start by setting the
1822 * of bfqq, and therefore, if one request was injected when
1823 * bfqq remains empty, this injected request might delay the
1824 * service of the next I/O request for bfqq significantly. In
1825 * case bfqq can actually tolerate some injection, then the
1827 * lucky circumstance holds exactly because bfqq has a short
1836 * left to 1 even if the think time is short: bfqq's I/O is
1837 * synchronized with that of some other queue, i.e., bfqq may
1840 * blocking I/O to be served while bfqq is in service. And
1841 * this is very convenient both for bfqq and for overall
1845 * On the opposite end, if bfqq has a long think time, then
1849 * latency of bfqq's requests, as the service time of a single
1850 * request is likely to be lower than the think time of bfqq;
1851 * b) on the downside, after becoming empty, bfqq is likely to
1858 * occurs with bfqq. On the downside, this proactive step
1864 if (bfq_bfqq_has_short_ttime(bfqq))
1865 bfqq->inject_limit = 0;
1867 bfqq->inject_limit = 1;
1869 bfqq->decrease_time_jif = jiffies;
1874 struct bfq_queue *bfqq = RQ_BFQQ(rq);
1875 struct bfq_data *bfqd = bfqq->bfqd;
1877 unsigned int old_wr_coeff = bfqq->wr_coeff;
1880 bfq_log_bfqq(bfqd, bfqq, "add_request %d", rq_is_sync(rq));
1881 bfqq->queued[rq_is_sync(rq)]++;
1884 if (RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_bfqq_sync(bfqq)) {
1886 * Detect whether bfqq's I/O seems synchronized with
1887 * that of some other queue, i.e., whether bfqq, after
1891 * queue, and we assume, for simplicity, that bfqq may
1898 * for bfqq. In addition to boosting throughput, this
1899 * unblocks bfqq's I/O, thereby improving bandwidth
1900 * and latency for bfqq. Note that these same results
1907 * queue Q is deemed as a waker queue for bfqq if, for
1908 * two consecutive times, bfqq happens to become non
1913 * bfq_bfqq_has_waker(bfqq) is set to confirm that Q
1914 * is a waker queue for bfqq. These detection steps
1915 * are performed only if bfqq has a long think time,
1916 * so as to make it more likely that bfqq's I/O is
1927 * blocked by synchronization, bfqq has a long think
1928 * time. This implies that bfqq's inject limit is at
1933 * for bfqq. This triggers the first step of the
1937 * I/O-plugging interval for bfqq.
1940 !bfq_bfqq_has_short_ttime(bfqq) &&
1943 if (bfqd->last_completed_rq_bfqq != bfqq &&
1945 bfqq->waker_bfqq) {
1952 bfqq->waker_bfqq = bfqd->last_completed_rq_bfqq;
1956 * bfqq->waker_bfqq must be reset. To
1966 * In addition, if bfqq is already in
1970 * queue, bfqq must be removed from
1974 if (!hlist_unhashed(&bfqq->woken_list_node))
1975 hlist_del_init(&bfqq->woken_list_node);
1976 hlist_add_head(&bfqq->woken_list_node,
1979 bfq_clear_bfqq_has_waker(bfqq);
1981 bfqq->waker_bfqq &&
1982 !bfq_bfqq_has_waker(bfqq)) {
1987 bfq_mark_bfqq_has_waker(bfqq);
1997 if (time_is_before_eq_jiffies(bfqq->decrease_time_jif +
1999 bfq_reset_inject_limit(bfqd, bfqq);
2005 * - bfqq is in service, because the total service
2026 if (bfqq == bfqd->in_service_queue &&
2028 (bfqq->last_serv_time_ns > 0 &&
2030 time_is_before_eq_jiffies(bfqq->decrease_time_jif +
2051 * on bfqq before rq is completed).
2058 elv_rb_add(&bfqq->sort_list, rq);
2063 prev = bfqq->next_rq;
2064 next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position);
2065 bfqq->next_rq = next_rq;
2071 if (unlikely(!bfqd->nonrot_with_queueing && prev != bfqq->next_rq))
2072 bfq_pos_tree_add_move(bfqd, bfqq);
2074 if (!bfq_bfqq_busy(bfqq)) /* switching to busy ... */
2075 bfq_bfqq_handle_idle_busy_switch(bfqd, bfqq, old_wr_coeff,
2080 bfqq->last_wr_start_finish +
2082 bfqq->wr_coeff = bfqd->bfq_wr_coeff;
2083 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
2086 bfqq->entity.prio_changed = 1;
2088 if (prev != bfqq->next_rq)
2089 bfq_updated_next_req(bfqd, bfqq);
2096 * . if bfqq is not going to be weight-raised, because, for
2102 * . if bfqq is not weight-raised, because, if bfqq is now
2106 * . if bfqq is interactive, because, regardless of whether
2107 * bfqq is currently weight-raised, the weight-raising
2110 * conditions, if bfqq is already weight-raised)
2112 * last_wr_start_finish has to be updated also if bfqq is soft
2119 (old_wr_coeff == 1 || bfqq->wr_coeff == 1 || interactive))
2120 bfqq->last_wr_start_finish = jiffies;
2127 struct bfq_queue *bfqq = bfqd->bio_bfqq;
2130 if (bfqq)
2131 return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio));
2163 struct bfq_queue *bfqq = RQ_BFQQ(rq);
2164 struct bfq_data *bfqd = bfqq->bfqd;
2167 if (bfqq->next_rq == rq) {
2168 bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq);
2169 bfq_updated_next_req(bfqd, bfqq);
2174 bfqq->queued[sync]--;
2176 elv_rb_del(&bfqq->sort_list, rq);
2182 if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
2183 bfqq->next_rq = NULL;
2185 if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue) {
2186 bfq_del_bfqq_busy(bfqd, bfqq, false);
2188 * bfqq emptied. In normal operation, when
2189 * bfqq is empty, bfqq->entity.service and
2190 * bfqq->entity.budget must contain,
2192 * budget used last time bfqq emptied. These
2194 * this last removal occurred while bfqq is
2196 * reset both bfqq->entity.service and
2197 * bfqq->entity.budget, if bfqq has still a
2200 bfqq->entity.budget = bfqq->entity.service = 0;
2206 if (bfqq->pos_root) {
2207 rb_erase(&bfqq->pos_node, bfqq->pos_root);
2208 bfqq->pos_root = NULL;
2213 bfq_pos_tree_add_move(bfqd, bfqq);
2217 bfqq->meta_pending--;
2286 struct bfq_queue *bfqq = RQ_BFQQ(req);
2290 if (!bfqq)
2293 bfqd = bfqq->bfqd;
2296 elv_rb_del(&bfqq->sort_list, req);
2297 elv_rb_add(&bfqq->sort_list, req);
2299 /* Choose next request to be served for bfqq */
2300 prev = bfqq->next_rq;
2301 next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req,
2303 bfqq->next_rq = next_rq;
2309 if (prev != bfqq->next_rq) {
2310 bfq_updated_next_req(bfqd, bfqq);
2316 bfq_pos_tree_add_move(bfqd, bfqq);
2338 struct bfq_queue *bfqq = RQ_BFQQ(rq),
2341 if (!bfqq)
2353 if (bfqq == next_bfqq &&
2361 if (bfqq->next_rq == next)
2362 bfqq->next_rq = rq;
2364 bfqg_stats_update_io_merged(bfqq_group(bfqq), next->cmd_flags);
2375 /* Must be called with bfqq != NULL */
2376 static void bfq_bfqq_end_wr(struct bfq_queue *bfqq)
2378 if (bfq_bfqq_busy(bfqq))
2379 bfqq->bfqd->wr_busy_queues--;
2380 bfqq->wr_coeff = 1;
2381 bfqq->wr_cur_max_time = 0;
2382 bfqq->last_wr_start_finish = jiffies;
2387 bfqq->entity.prio_changed = 1;
2405 struct bfq_queue *bfqq;
2409 list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list)
2410 bfq_bfqq_end_wr(bfqq);
2411 list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list)
2412 bfq_bfqq_end_wr(bfqq);
2434 struct bfq_queue *bfqq,
2437 struct rb_root *root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree;
2479 struct bfq_queue *bfqq;
2488 bfqq = bfqq_find_close(bfqd, cur_bfqq, sector);
2489 if (!bfqq || bfqq == cur_bfqq)
2492 return bfqq;
2496 bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
2503 * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain
2512 if (__bfqq == bfqq)
2517 process_refs = bfqq_process_refs(bfqq);
2520 * If the process for the bfqq has gone away, there is no
2531 if (new_bfqq->entity.parent != bfqq->entity.parent)
2534 bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d",
2544 * We redirect bfqq to new_bfqq and not the opposite, because
2545 * we are in the context of the process owning bfqq, thus we
2557 bfqq->new_bfqq = new_bfqq;
2560 * each time some I/O for bfqq arrives, the process that
2561 * generated that I/O is disassociated from bfqq and
2571 static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq,
2577 if (bfq_class_idle(bfqq) || bfq_class_idle(new_bfqq) ||
2578 (bfqq->ioprio_class != new_bfqq->ioprio_class))
2586 if (BFQQ_SEEKY(bfqq) || BFQQ_SEEKY(new_bfqq))
2594 if (!bfq_bfqq_sync(bfqq) || !bfq_bfqq_sync(new_bfqq))
2601 * Attempt to schedule a merge of bfqq with the currently in-service
2621 bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
2627 if (bfqq->new_bfqq)
2628 return bfqq->new_bfqq;
2671 * Prevent bfqq from being merged if it has been created too
2681 if (bfq_too_late_for_merging(bfqq))
2684 if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq))
2693 if (in_service_bfqq && in_service_bfqq != bfqq &&
2697 bfqq->entity.parent == in_service_bfqq->entity.parent &&
2698 bfq_may_be_close_cooperator(bfqq, in_service_bfqq)) {
2699 new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq);
2708 new_bfqq = bfq_find_close_cooperator(bfqd, bfqq,
2712 bfq_may_be_close_cooperator(bfqq, new_bfqq))
2713 return bfq_setup_merge(bfqq, new_bfqq);
2718 static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
2720 struct bfq_io_cq *bic = bfqq->bic;
2723 * If !bfqq->bic, the queue is already shared or its requests
2730 bic->saved_weight = bfqq->entity.orig_weight;
2731 bic->saved_ttime = bfqq->ttime;
2732 bic->saved_has_short_ttime = bfq_bfqq_has_short_ttime(bfqq);
2733 bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq);
2734 bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq);
2735 bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node);
2736 if (unlikely(bfq_bfqq_just_created(bfqq) &&
2737 !bfq_bfqq_in_large_burst(bfqq) &&
2738 bfqq->bfqd->low_latency)) {
2740 * bfqq being merged right after being created: bfqq
2745 * to bfqq, so that to avoid that bfqq unjustly fails
2748 bic->saved_wr_coeff = bfqq->bfqd->bfq_wr_coeff;
2750 bic->saved_wr_cur_max_time = bfq_wr_duration(bfqq->bfqd);
2753 bic->saved_wr_coeff = bfqq->wr_coeff;
2755 bfqq->wr_start_at_switch_to_srt;
2756 bic->saved_last_wr_start_finish = bfqq->last_wr_start_finish;
2757 bic->saved_wr_cur_max_time = bfqq->wr_cur_max_time;
2761 void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq)
2764 * To prevent bfqq's service guarantees from being violated,
2765 * bfqq may be left busy, i.e., queued for service, even if
2767 * details). But, if no process will send requests to bfqq any
2768 * longer, then there is no point in keeping bfqq queued for
2769 * service. In addition, keeping bfqq queued for service, but
2770 * with no process ref any longer, may have caused bfqq to be
2774 if (bfq_bfqq_busy(bfqq) && RB_EMPTY_ROOT(&bfqq->sort_list) &&
2775 bfqq != bfqd->in_service_queue)
2776 bfq_del_bfqq_busy(bfqd, bfqq, false);
2778 bfq_put_queue(bfqq);
2783 struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
2785 bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
2788 bfq_bfqq_save_state(bfqq);
2790 if (bfq_bfqq_IO_bound(bfqq))
2792 bfq_clear_bfqq_IO_bound(bfqq);
2795 * If bfqq is weight-raised, then let new_bfqq inherit
2797 * where bfqq has just been created, but has not yet made it
2799 * bfqq even before bfq_add_request is executed for the first
2800 * time for bfqq). Handling this case would however be very
2803 if (new_bfqq->wr_coeff == 1 && bfqq->wr_coeff > 1) {
2804 new_bfqq->wr_coeff = bfqq->wr_coeff;
2805 new_bfqq->wr_cur_max_time = bfqq->wr_cur_max_time;
2806 new_bfqq->last_wr_start_finish = bfqq->last_wr_start_finish;
2808 bfqq->wr_start_at_switch_to_srt;
2814 if (bfqq->wr_coeff > 1) { /* bfqq has given its wr to new_bfqq */
2815 bfqq->wr_coeff = 1;
2816 bfqq->entity.prio_changed = 1;
2817 if (bfq_bfqq_busy(bfqq))
2831 * set new_bfqq->bic to NULL. bfqq either:
2832 * - does not belong to any bic any more, and hence bfqq->bic must
2836 * any bic soon and bfqq->bic is already NULL (therefore the next
2850 bfqq->bic = NULL;
2851 bfq_release_process_ref(bfqd, bfqq);
2859 struct bfq_queue *bfqq = bfqd->bio_bfqq, *new_bfqq;
2868 * Lookup the bfqq that this bio will be queued with. Allow
2871 if (!bfqq)
2878 new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false);
2881 * bic still points to bfqq, then it has not yet been
2883 * merge between bfqq and new_bfqq can be safely
2885 * and bfqq can be put.
2887 bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq,
2894 bfqq = new_bfqq;
2902 bfqd->bio_bfqq = bfqq;
2905 return bfqq == RQ_BFQQ(rq);
2915 struct bfq_queue *bfqq)
2919 if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time)
2922 timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight;
2926 bfqq->budget_timeout = jiffies +
2931 struct bfq_queue *bfqq)
2933 if (bfqq) {
2934 bfq_clear_bfqq_fifo_expire(bfqq);
2938 if (time_is_before_jiffies(bfqq->last_wr_start_finish) &&
2939 bfqq->wr_coeff > 1 &&
2940 bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
2941 time_is_before_jiffies(bfqq->budget_timeout)) {
2962 * budget_timeout is set to jiffies if bfqq
2966 if (time_after(bfqq->budget_timeout,
2967 bfqq->last_wr_start_finish))
2968 bfqq->last_wr_start_finish +=
2969 jiffies - bfqq->budget_timeout;
2971 bfqq->last_wr_start_finish = jiffies;
2974 bfq_set_budget_timeout(bfqd, bfqq);
2975 bfq_log_bfqq(bfqd, bfqq,
2977 bfqq->entity.budget);
2980 bfqd->in_service_queue = bfqq;
2989 struct bfq_queue *bfqq = bfq_get_next_queue(bfqd);
2991 __bfq_set_in_service_queue(bfqd, bfqq);
2992 return bfqq;
2997 struct bfq_queue *bfqq = bfqd->in_service_queue;
3000 bfq_mark_bfqq_wait_request(bfqq);
3018 if (BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 &&
3019 !bfq_asymmetric_scenario(bfqd, bfqq))
3021 else if (bfqq->wr_coeff > 1)
3029 bfqg_stats_set_start_idle_time(bfqq_group(bfqq));
3282 struct bfq_queue *bfqq = RQ_BFQQ(rq);
3290 * dispatch occur for a non in-service bfqq, this anticipated
3291 * increment prevents two counters related to bfqq->dispatched
3293 * incremented again when the (new) value of bfqq->dispatched
3296 bfqq->dispatched++;
3305 * the process associated with bfqq.
3327 * associated with bfqq must receive a lower or equal
3340 * bfqq.
3344 * that bfqq receives its assigned fraction of the device throughput
3350 * queueing. So, unless bfqq falls in cases where idling also boosts
3382 * conditions (i-a), (i-b) or (ii). So the device is idled for bfqq.
3399 * Not checking condition (ii) evidently exposes bfqq to the
3451 * bfq_weights_tree_add()). Then the fact that bfqq is weight-raised
3453 * below takes into account also the fact that, even if bfqq is being
3462 * belonging to bfqq. If so, I/O dispatching is to be plugged, for the
3464 * non-bfqq's I/O requests before bfqq's ones, thereby delaying the
3465 * arrival of new I/O requests for bfqq (recall that bfqq is sync). If
3466 * I/O-dispatching is not plugged, then, while bfqq remains empty, a
3468 * dispatched too, possibly causing the service of bfqq's I/O to be
3474 * in-flight I/O, and enables bfqq to recover the bandwidth it may
3489 struct bfq_queue *bfqq)
3491 /* No point in idling for bfqq if it won't get requests any longer */
3492 if (unlikely(!bfqq_process_refs(bfqq)))
3495 return (bfqq->wr_coeff > 1 &&
3499 bfqq->dispatched + 4)) ||
3500 bfq_asymmetric_scenario(bfqd, bfqq);
3503 static bool __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq,
3507 * If this bfqq is shared between multiple processes, check
3512 if (bfq_bfqq_coop(bfqq) && BFQQ_SEEKY(bfqq))
3513 bfq_mark_bfqq_split_coop(bfqq);
3517 * bfqq. If idling_needed_for_service_guarantees(bfqq) returns
3518 * true, then bfqq's bandwidth would be violated if an
3520 * dispatched while bfqq is waiting for its new I/O to
3522 * expiration caused by a preemption attempt, and if bfqq is
3524 * bfqq if it needs I/O-dispatch plugging, even if it is
3525 * empty. By doing so, bfqq is granted to be served before the
3526 * above queues (provided that bfqq is of course eligible).
3528 if (RB_EMPTY_ROOT(&bfqq->sort_list) &&
3530 idling_needed_for_service_guarantees(bfqd, bfqq))) {
3531 if (bfqq->dispatched == 0)
3538 bfqq->budget_timeout = jiffies;
3540 bfq_del_bfqq_busy(bfqd, bfqq, true);
3542 bfq_requeue_bfqq(bfqd, bfqq, true);
3548 !RB_EMPTY_ROOT(&bfqq->sort_list)))
3549 bfq_pos_tree_add_move(bfqd, bfqq);
3556 * may cause bfqq to be freed. If this happens, the next
3563 * __bfq_bfqq_recalc_budget - try to adapt the budget to the @bfqq behavior.
3565 * @bfqq: queue to update.
3568 * Handle the feedback on @bfqq budget at queue expiration.
3572 struct bfq_queue *bfqq,
3580 if (bfqq->wr_coeff == 1)
3581 budget = bfqq->max_budget;
3590 bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %d, budg left %d",
3591 bfqq->entity.budget, bfq_bfqq_budget_left(bfqq));
3592 bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %d, min budg %d",
3594 bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d",
3595 bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue));
3597 if (bfq_bfqq_sync(bfqq) && bfqq->wr_coeff == 1) {
3628 if (bfqq->dispatched > 0) /* still outstanding reqs */
3682 * The service needed by bfqq is measured
3683 * quite precisely by bfqq->entity.service.
3684 * Since bfqq does not enjoy device idling,
3685 * bfqq->entity.service is equal to the number
3687 * bfqq requested to read/write before waiting
3691 budget = max_t(int, bfqq->entity.service, min_budget);
3696 } else if (!bfq_bfqq_sync(bfqq)) {
3706 bfqq->max_budget = budget;
3710 bfqq->max_budget = min(bfqq->max_budget, bfqd->bfq_max_budget);
3715 * the finish time of bfqq must be kept in sync with the
3722 next_rq = bfqq->next_rq;
3724 bfqq->entity.budget = max_t(unsigned long, bfqq->max_budget,
3725 bfq_serv_to_charge(next_rq, bfqq));
3727 bfq_log_bfqq(bfqd, bfqq, "head sect: %u, new budget %d",
3729 bfqq->entity.budget);
3733 * Return true if the process associated with bfqq is "slow". The slow
3763 static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq,
3769 bool slow = BFQQ_SEEKY(bfqq); /* if delta too short, use seekyness */
3771 if (!bfq_bfqq_sync(bfqq))
3812 slow = bfqq->entity.service < bfqd->bfq_max_budget / 2;
3815 bfq_log_bfqq(bfqd, bfqq, "bfq_bfqq_is_slow: slow %d", slow);
3869 * (b) Current value of bfqq->soft_rt_next_start. As pointed out
3883 * stored in bfqq->soft_rt_next_start after each invocation of
3885 * bfqq->soft_rt_next_start is constantly used to lower-bound the
3887 * beginning of a low-speed interval, bfqq->soft_rt_next_start is
3914 struct bfq_queue *bfqq)
3916 return max3(bfqq->soft_rt_next_start,
3917 bfqq->last_idle_bklogged +
3918 HZ * bfqq->service_from_backlogged /
3920 jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4);
3926 * @bfqq: the queue to expire.
3930 * If the process associated with bfqq does slow I/O (e.g., because it
3931 * issues random requests), we charge bfqq with the time it has been
3934 * a consequence, bfqq will typically get higher timestamps upon
3937 * end, bfqq receives less service in proportion to how slowly its
3941 * contrast, if the process associated with bfqq is not slow, we
3942 * charge bfqq exactly with the service it has received.
3950 struct bfq_queue *bfqq,
3956 struct bfq_entity *entity = &bfqq->entity;
3961 slow = bfq_bfqq_is_slow(bfqd, bfqq, compensate, reason, &delta);
3978 if (bfqq->wr_coeff == 1 &&
3981 bfq_bfqq_budget_left(bfqq) >= entity->budget / 3)))
3982 bfq_bfqq_charge_time(bfqd, bfqq, delta);
3986 bfq_clear_bfqq_IO_bound(bfqq);
3988 if (bfqd->low_latency && bfqq->wr_coeff == 1)
3989 bfqq->last_wr_start_finish = jiffies;
3992 RB_EMPTY_ROOT(&bfqq->sort_list)) {
3998 * soft_rt_next_start. And we do it, unless bfqq is in
4000 * latter subcase, for the following reason. bfqq may
4006 * interactive bfqq, and bfqq had received a lot of
4010 * value that, for a very long time, bfqq would be
4019 if (bfqq->dispatched == 0 &&
4020 bfqq->wr_coeff != bfqd->bfq_wr_coeff)
4021 bfqq->soft_rt_next_start =
4022 bfq_bfqq_softrt_next_start(bfqd, bfqq);
4023 else if (bfqq->dispatched > 0) {
4028 bfq_mark_bfqq_softrt_update(bfqq);
4032 bfq_log_bfqq(bfqd, bfqq,
4034 slow, bfqq->dispatched, bfq_bfqq_has_short_ttime(bfqq));
4037 * bfqq expired, so no total service time needs to be computed
4048 __bfq_bfqq_recalc_budget(bfqd, bfqq, reason);
4049 if (__bfq_bfqq_expire(bfqd, bfqq, reason))
4050 /* bfqq is gone, no more actions on it */
4053 /* mark bfqq as waiting a request only if a bic still points to it */
4054 if (!bfq_bfqq_busy(bfqq) &&
4057 bfq_mark_bfqq_non_blocking_wait_rq(bfqq);
4068 * Differently from what happens with bfqq->entity.service,
4070 * for parent entities. In fact, in case bfqq may have a
4072 * consumed budget, bfqq->entity.service needs to be kept,
4073 * because if bfqq then actually goes on being served using
4074 * the same budget, the last value of bfqq->entity.service is
4075 * needed to properly decrement bfqq->entity.budget by the
4078 * the bubble up of the new value of bfqq->entity.budget will
4080 * even in case bfqq and thus parent entities go on receiving
4093 static bool bfq_bfqq_budget_timeout(struct bfq_queue *bfqq)
4095 return time_is_before_eq_jiffies(bfqq->budget_timeout);
4106 static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
4108 bfq_log_bfqq(bfqq->bfqd, bfqq,
4110 bfq_bfqq_wait_request(bfqq),
4111 bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3,
4112 bfq_bfqq_budget_timeout(bfqq));
4114 return (!bfq_bfqq_wait_request(bfqq) ||
4115 bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3)
4117 bfq_bfqq_budget_timeout(bfqq);
4121 struct bfq_queue *bfqq)
4128 /* No point in idling for bfqq if it won't get requests any longer */
4129 if (unlikely(!bfqq_process_refs(bfqq)))
4132 bfqq_sequential_and_IO_bound = !BFQQ_SEEKY(bfqq) &&
4133 bfq_bfqq_IO_bound(bfqq) && bfq_bfqq_has_short_ttime(bfqq);
4143 * the request pattern for bfqq is I/O-bound and sequential, or
4145 * not NCQ-capable and the request pattern for bfqq is
4181 * bfqq is not weight-raised, this guarantees that the device
4182 * is not idled for bfqq (if, instead, bfqq is weight-raised,
4185 * [1] for details), this behavior causes bfqq, and hence any
4220 static bool bfq_better_to_idle(struct bfq_queue *bfqq)
4222 struct bfq_data *bfqd = bfqq->bfqd;
4225 /* No point in idling for bfqq if it won't get requests any longer */
4226 if (unlikely(!bfqq_process_refs(bfqq)))
4235 * (a) bfqq is async
4236 * (b) bfqq is in the idle io prio class: in this case we do
4240 if (bfqd->bfq_slice_idle == 0 || !bfq_bfqq_sync(bfqq) ||
4241 bfq_class_idle(bfqq))
4245 idling_boosts_thr_without_issues(bfqd, bfqq);
4248 idling_needed_for_service_guarantees(bfqd, bfqq);
4271 static bool bfq_bfqq_must_idle(struct bfq_queue *bfqq)
4273 return RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_better_to_idle(bfqq);
4286 struct bfq_queue *bfqq, *in_serv_bfqq = bfqd->in_service_queue;
4290 * - bfqq is not weight-raised and therefore does not carry
4293 * - regardless of whether bfqq is weight-raised, bfqq has
4334 list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list)
4335 if (!RB_EMPTY_ROOT(&bfqq->sort_list) &&
4336 (in_serv_always_inject || bfqq->wr_coeff > 1) &&
4337 bfq_serv_to_charge(bfqq->next_rq, bfqq) <=
4338 bfq_bfqq_budget_left(bfqq)) {
4350 * long to break bfqq's service guarantees. On
4357 blk_rq_sectors(bfqq->next_rq) >=
4365 return bfqq;
4378 struct bfq_queue *bfqq;
4382 bfqq = bfqd->in_service_queue;
4383 if (!bfqq)
4386 bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue");
4389 * Do not expire bfqq for budget timeout if bfqq may be about
4391 * prevent bfqq from expiring is the same as in the comments
4395 if (bfq_may_expire_for_budg_timeout(bfqq) &&
4396 !bfq_bfqq_must_idle(bfqq))
4406 next_rq = bfqq->next_rq;
4408 * If bfqq has requests queued and it has enough budget left to
4412 if (bfq_serv_to_charge(next_rq, bfqq) >
4413 bfq_bfqq_budget_left(bfqq)) {
4428 if (bfq_bfqq_wait_request(bfqq)) {
4442 bfq_clear_bfqq_wait_request(bfqq);
4457 if (bfq_bfqq_wait_request(bfqq) ||
4458 (bfqq->dispatched != 0 && bfq_better_to_idle(bfqq))) {
4460 bfqq->bic && bfqq->bic->bfqq[0] &&
4461 bfq_bfqq_busy(bfqq->bic->bfqq[0]) &&
4462 bfqq->bic->bfqq[0]->next_rq ?
4463 bfqq->bic->bfqq[0] : NULL;
4471 * with bfqq has also async I/O pending. If so, it
4478 * non-empty waker queue for bfqq, i.e., a queue whose
4479 * I/O needs to be completed for bfqq to receive new
4480 * I/O. This happens, e.g., if bfqq is associated with
4483 * the process associated with bfqq can go on with its
4485 * then bfqq remains empty, and no I/O is dispatched,
4486 * until the idle timeout fires for bfqq. This is
4488 * latencies for bfqq, and in a severe loss of total
4495 * cause any delay to bfqq's I/O. On the contrary,
4496 * next bfqq's I/O is brought forward dramatically,
4499 * The third if checks whether bfqq is a queue for
4501 * bfqq delivers more throughput when served without
4503 * if the service times of bfqq's I/O requests both
4505 * easily increased by injection (this happens if bfqq
4511 * limit for bfqq is currently 0).
4517 * that, if I/O is being plugged for bfqq and the
4519 * blocking bfqq's I/O, then the third alternative
4533 * i.e., the time before bfqq finally receives new I/O,
4538 icq_to_bic(async_bfqq->next_rq->elv.icq) == bfqq->bic &&
4541 bfqq = bfqq->bic->bfqq[0];
4542 else if (bfq_bfqq_has_waker(bfqq) &&
4543 bfq_bfqq_busy(bfqq->waker_bfqq) &&
4544 bfqq->waker_bfqq->next_rq &&
4545 bfq_serv_to_charge(bfqq->waker_bfqq->next_rq,
4546 bfqq->waker_bfqq) <=
4547 bfq_bfqq_budget_left(bfqq->waker_bfqq)
4549 bfqq = bfqq->waker_bfqq;
4550 else if (!idling_boosts_thr_without_issues(bfqd, bfqq) &&
4551 (bfqq->wr_coeff == 1 || bfqd->wr_busy_queues > 1 ||
4552 !bfq_bfqq_has_short_ttime(bfqq)))
4553 bfqq = bfq_choose_bfqq_for_injection(bfqd);
4555 bfqq = NULL;
4562 bfq_bfqq_expire(bfqd, bfqq, false, reason);
4564 bfqq = bfq_set_in_service_queue(bfqd);
4565 if (bfqq) {
4566 bfq_log_bfqq(bfqd, bfqq, "select_queue: checking new queue");
4570 if (bfqq)
4571 bfq_log_bfqq(bfqd, bfqq, "select_queue: returned this queue");
4575 return bfqq;
4578 static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
4580 struct bfq_entity *entity = &bfqq->entity;
4582 if (bfqq->wr_coeff > 1) { /* queue is being weight-raised */
4583 bfq_log_bfqq(bfqd, bfqq,
4585 jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish),
4586 jiffies_to_msecs(bfqq->wr_cur_max_time),
4587 bfqq->wr_coeff,
4588 bfqq->entity.weight, bfqq->entity.orig_weight);
4591 bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change");
4598 if (bfq_bfqq_in_large_burst(bfqq))
4599 bfq_bfqq_end_wr(bfqq);
4600 else if (time_is_before_jiffies(bfqq->last_wr_start_finish +
4601 bfqq->wr_cur_max_time)) {
4602 if (bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time ||
4603 time_is_before_jiffies(bfqq->wr_start_at_switch_to_srt +
4605 bfq_bfqq_end_wr(bfqq);
4607 switch_back_to_interactive_wr(bfqq, bfqd);
4608 bfqq->entity.prio_changed = 1;
4611 if (bfqq->wr_coeff > 1 &&
4612 bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time &&
4613 bfqq->service_from_wr > max_service_from_wr) {
4615 bfq_bfqq_end_wr(bfqq);
4626 if ((entity->weight > entity->orig_weight) != (bfqq->wr_coeff > 1))
4632 * Dispatch next request from bfqq.
4635 struct bfq_queue *bfqq)
4637 struct request *rq = bfqq->next_rq;
4640 service_to_charge = bfq_serv_to_charge(rq, bfqq);
4642 bfq_bfqq_served(bfqq, service_to_charge);
4644 if (bfqq == bfqd->in_service_queue && bfqd->wait_dispatch) {
4651 if (bfqq != bfqd->in_service_queue)
4655 * If weight raising has to terminate for bfqq, then next
4656 * function causes an immediate update of bfqq's weight,
4658 * expiration, bfqq will be timestamped as if has never been
4661 * weight-raised queue. This inflates bfqq's timestamps, which
4662 * is beneficial, as bfqq is then more willing to leave the
4665 bfq_update_wr_data(bfqd, bfqq);
4668 * Expire bfqq, pretending that its budget expired, if bfqq
4672 if (!(bfq_tot_busy_queues(bfqd) > 1 && bfq_class_idle(bfqq)))
4675 bfq_bfqq_expire(bfqd, bfqq, false, BFQQE_BUDGET_EXHAUSTED);
4700 struct bfq_queue *bfqq = NULL;
4707 bfqq = RQ_BFQQ(rq);
4709 if (bfqq) {
4716 bfqq->dispatched++;
4768 bfqq = bfq_select_queue(bfqd);
4769 if (!bfqq)
4772 rq = bfq_dispatch_rq_from_bfqq(bfqd, bfqq);
4790 struct bfq_queue *bfqq = rq ? RQ_BFQQ(rq) : NULL;
4792 if (!idle_timer_disabled && !bfqq)
4796 * rq and bfqq are guaranteed to exist until this function
4803 * bfqq, the same guarantee holds for bfqq too.
4806 * bfqq_group(bfqq) exists as well.
4815 * in_serv_queue. Thus in_serv_queue == bfqq, and is
4820 if (bfqq) {
4821 struct bfq_group *bfqg = bfqq_group(bfqq);
4866 * Scheduler lock must be held here. Recall not to use bfqq after calling
4869 void bfq_put_queue(struct bfq_queue *bfqq)
4873 struct bfq_group *bfqg = bfqq_group(bfqq);
4875 if (bfqq->bfqd)
4876 bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d",
4877 bfqq, bfqq->ref);
4879 bfqq->ref--;
4880 if (bfqq->ref)
4883 if (!hlist_unhashed(&bfqq->burst_list_node)) {
4884 hlist_del_init(&bfqq->burst_list_node);
4887 * process associated with bfqq is exiting, and thus
4898 * 1) bfqq is not a merged queue, because, if it is,
4899 * then this free of bfqq is not triggered by the exit
4900 * of the process bfqq is associated with, but exactly
4901 * by the fact that bfqq has just been merged.
4904 * happen in te following case: bfqq is inserted into
4907 * burst list is not the burst list bfqq belonged to
4911 if (bfqq->bic && bfqq->bfqd->burst_size > 0)
4912 bfqq->bfqd->burst_size--;
4916 * bfqq does not exist any longer, so it cannot be woken by
4917 * any other queue, and cannot wake any other queue. Then bfqq
4919 * queue, and all queues in the woken list of bfqq must stop
4921 * should be performed when bfqq remains with no I/O source
4922 * attached to it, which happens before bfqq gets freed. In
4924 * with bfqq exits or gets associated with a different
4925 * queue. However, both events lead to bfqq being freed soon,
4926 * and dangling references would come out only after bfqq gets
4930 /* remove bfqq from woken list */
4931 if (!hlist_unhashed(&bfqq->woken_list_node))
4932 hlist_del_init(&bfqq->woken_list_node);
4935 hlist_for_each_entry_safe(item, n, &bfqq->woken_list,
4942 if (bfqq->bfqd && bfqq->bfqd->last_completed_rq_bfqq == bfqq)
4943 bfqq->bfqd->last_completed_rq_bfqq = NULL;
4945 kmem_cache_free(bfq_pool, bfqq);
4949 void bfq_put_cooperator(struct bfq_queue *bfqq)
4958 __bfqq = bfqq->new_bfqq;
4960 if (__bfqq == bfqq)
4968 static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
4970 if (bfqq == bfqd->in_service_queue) {
4971 __bfq_bfqq_expire(bfqd, bfqq, BFQQE_BUDGET_TIMEOUT);
4975 bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfqq->ref);
4977 bfq_put_cooperator(bfqq);
4979 bfq_release_process_ref(bfqd, bfqq);
4984 struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync);
4987 if (bfqq)
4988 bfqd = bfqq->bfqd; /* NULL if scheduler already exited */
4990 if (bfqq && bfqd) {
4995 bfq_exit_bfqq(bfqd, bfqq);
5013 bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
5017 struct bfq_data *bfqd = bfqq->bfqd;
5026 bdi_dev_name(bfqq->bfqd->queue->backing_dev_info),
5033 bfqq->new_ioprio = task_nice_ioprio(tsk);
5034 bfqq->new_ioprio_class = task_nice_ioclass(tsk);
5037 bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
5038 bfqq->new_ioprio_class = IOPRIO_CLASS_RT;
5041 bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
5042 bfqq->new_ioprio_class = IOPRIO_CLASS_BE;
5045 bfqq->new_ioprio_class = IOPRIO_CLASS_IDLE;
5046 bfqq->new_ioprio = 7;
5050 if (bfqq->new_ioprio >= IOPRIO_BE_NR) {
5052 bfqq->new_ioprio);
5053 bfqq->new_ioprio = IOPRIO_BE_NR - 1;
5056 bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio);
5057 bfqq->entity.prio_changed = 1;
5067 struct bfq_queue *bfqq;
5079 bfqq = bic_to_bfqq(bic, false);
5080 if (bfqq) {
5081 struct bfq_queue *old_bfqq = bfqq;
5083 bfqq = bfq_get_queue(bfqd, bio, false, bic);
5084 bic_set_bfqq(bic, bfqq, false);
5088 bfqq = bic_to_bfqq(bic, true);
5089 if (bfqq)
5090 bfq_set_next_ioprio_data(bfqq, bic);
5093 static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
5096 RB_CLEAR_NODE(&bfqq->entity.rb_node);
5097 INIT_LIST_HEAD(&bfqq->fifo);
5098 INIT_HLIST_NODE(&bfqq->burst_list_node);
5099 INIT_HLIST_NODE(&bfqq->woken_list_node);
5100 INIT_HLIST_HEAD(&bfqq->woken_list);
5102 bfqq->ref = 0;
5103 bfqq->bfqd = bfqd;
5106 bfq_set_next_ioprio_data(bfqq, bic);
5114 if (!bfq_class_idle(bfqq))
5116 bfq_mark_bfqq_has_short_ttime(bfqq);
5117 bfq_mark_bfqq_sync(bfqq);
5118 bfq_mark_bfqq_just_created(bfqq);
5120 bfq_clear_bfqq_sync(bfqq);
5123 bfqq->ttime.last_end_request = ktime_get_ns() + 1;
5125 bfq_mark_bfqq_IO_bound(bfqq);
5127 bfqq->pid = pid;
5130 bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3;
5131 bfqq->budget_timeout = bfq_smallest_from_now();
5133 bfqq->wr_coeff = 1;
5134 bfqq->last_wr_start_finish = jiffies;
5135 bfqq->wr_start_at_switch_to_srt = bfq_smallest_from_now();
5136 bfqq->split_time = bfq_smallest_from_now();
5142 * to the current value of bfqq->soft_rt_next_start (see
5144 * soft_rt_next_start to now, to mean that bfqq has consumed
5147 bfqq->soft_rt_next_start = jiffies;
5150 bfqq->seek_history = 1;
5179 struct bfq_queue *bfqq;
5186 bfqq = *async_bfqq;
5187 if (bfqq)
5191 bfqq = kmem_cache_alloc_node(bfq_pool,
5195 if (bfqq) {
5196 bfq_init_bfqq(bfqd, bfqq, bic, current->pid,
5198 bfq_init_entity(&bfqq->entity, bfqg);
5199 bfq_log_bfqq(bfqd, bfqq, "allocated");
5201 bfqq = &bfqd->oom_bfqq;
5202 bfq_log_bfqq(bfqd, bfqq, "using oom bfqq");
5211 bfqq->ref++; /*
5214 * only if bfqq->bfqg disappears, to
5218 bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d",
5219 bfqq, bfqq->ref);
5220 *async_bfqq = bfqq;
5224 bfqq->ref++; /* get a process reference to this queue */
5225 bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq, bfqq->ref);
5226 return bfqq;
5230 struct bfq_queue *bfqq)
5232 struct bfq_ttime *ttime = &bfqq->ttime;
5233 u64 elapsed = ktime_get_ns() - bfqq->ttime.last_end_request;
5237 ttime->ttime_samples = (7*bfqq->ttime.ttime_samples + 256) / 8;
5244 bfq_update_io_seektime(struct bfq_data *bfqd, struct bfq_queue *bfqq,
5247 bfqq->seek_history <<= 1;
5248 bfqq->seek_history |= BFQ_RQ_SEEKY(bfqd, bfqq->last_request_pos, rq);
5250 if (bfqq->wr_coeff > 1 &&
5251 bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
5252 BFQQ_TOTALLY_SEEKY(bfqq))
5253 bfq_bfqq_end_wr(bfqq);
5257 struct bfq_queue *bfqq,
5263 * No need to update has_short_ttime if bfqq is async or in
5265 * no device idling is performed for bfqq in this case.
5267 if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq) ||
5272 if (time_is_after_eq_jiffies(bfqq->split_time +
5277 * bfqq. Otherwise check average think time to
5281 (bfq_sample_valid(bfqq->ttime.ttime_samples) &&
5282 bfqq->ttime.ttime_mean > bfqd->bfq_slice_idle))
5285 state_changed = has_short_ttime != bfq_bfqq_has_short_ttime(bfqq);
5288 bfq_mark_bfqq_has_short_ttime(bfqq);
5290 bfq_clear_bfqq_has_short_ttime(bfqq);
5294 * finally computed for bfqq, the inject limit does depend on
5308 * bfqq may have a long think time because of a
5310 * I/O of some other queue may need to be completed for bfqq
5315 * actually in place, then, without injection on bfqq, the
5316 * blocking I/O cannot happen to served while bfqq is in
5317 * service. As a consequence, if bfqq is granted
5318 * I/O-dispatch-plugging, then bfqq remains empty, and no I/O
5320 * to result in lower bandwidth and higher latencies for bfqq,
5324 * I/O that blocks bfqq to be executed soon, and therefore
5325 * bfqq to receive new I/O soon.
5328 * next think-time sample for bfqq may be very low. This in
5329 * turn may cause bfqq's think time to be deemed
5334 * think time of bfqq to become long again, and therefore the
5337 * states would be to prevent effective injection on bfqq.
5350 * general, with the fact that the think time of bfqq is
5351 * short, because injection may be likely to delay bfqq's I/O
5354 * this special case, because bfqq's low think time is due to
5356 * injection. In this special case, bfqq's I/O does not get
5357 * delayed by injection; on the contrary, bfqq's I/O is
5373 * finally computed for bfqq, freeing the inject limit from
5376 if (state_changed && bfqq->last_serv_time_ns == 0 &&
5377 (time_is_before_eq_jiffies(bfqq->decrease_time_jif +
5380 bfq_reset_inject_limit(bfqd, bfqq);
5384 * Called when a new fs request (rq) is added to bfqq. Check if there's
5387 static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
5391 bfqq->meta_pending++;
5393 bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
5395 if (bfqq == bfqd->in_service_queue && bfq_bfqq_wait_request(bfqq)) {
5396 bool small_req = bfqq->queued[rq_is_sync(rq)] == 1 &&
5398 bool budget_timeout = bfq_bfqq_budget_timeout(bfqq);
5416 if (small_req && idling_boosts_thr_without_issues(bfqd, bfqq) &&
5427 bfq_clear_bfqq_wait_request(bfqq);
5438 bfq_bfqq_expire(bfqd, bfqq, false,
5446 struct bfq_queue *bfqq = RQ_BFQQ(rq),
5447 *new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true);
5452 * Release the request's reference to the old bfqq
5456 bfqq->allocated--;
5460 * issuing this request still points to bfqq
5466 if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq)
5468 bfqq, new_bfqq);
5470 bfq_clear_bfqq_just_created(bfqq);
5473 * release rq reference on bfqq
5475 bfq_put_queue(bfqq);
5477 bfqq = new_bfqq;
5480 bfq_update_io_thinktime(bfqd, bfqq);
5481 bfq_update_has_short_ttime(bfqd, bfqq, RQ_BIC(rq));
5482 bfq_update_io_seektime(bfqd, bfqq, rq);
5484 waiting = bfqq && bfq_bfqq_wait_request(bfqq);
5486 idle_timer_disabled = waiting && !bfq_bfqq_wait_request(bfqq);
5489 list_add_tail(&rq->queuelist, &bfqq->fifo);
5491 bfq_rq_enqueued(bfqd, bfqq, rq);
5498 struct bfq_queue *bfqq,
5502 if (!bfqq)
5506 * bfqq still exists, because it can disappear only after
5513 * bfqq_group(bfqq) exists as well.
5516 bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, cmd_flags);
5518 bfqg_stats_update_idle_time(bfqq_group(bfqq));
5523 struct bfq_queue *bfqq,
5535 struct bfq_queue *bfqq;
5545 bfqq = bfq_init_rq(rq);
5554 if (!bfqq || at_head || blk_rq_is_passthrough(rq)) {
5562 * Update bfqq, because, if a queue merge has occurred
5566 bfqq = RQ_BFQQ(rq);
5584 bfq_update_insert_stats(q, bfqq, idle_timer_disabled,
5603 struct bfq_queue *bfqq = bfqd->in_service_queue;
5625 if (bfqq && bfq_bfqq_has_short_ttime(bfqq) &&
5626 bfqq->dispatched + bfqq->queued[0] + bfqq->queued[1] <
5642 static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
5650 bfqq->dispatched--;
5652 if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) {
5659 bfqq->budget_timeout = jiffies;
5661 bfq_weights_tree_remove(bfqd, bfqq);
5666 bfqq->ttime.last_end_request = now_ns;
5695 bfqd->last_completed_rq_bfqq = bfqq;
5703 * do not compute soft_rt_next_start if bfqq is in interactive
5705 * an explanation). We schedule this delayed update when bfqq
5708 if (bfq_bfqq_softrt_update(bfqq) && bfqq->dispatched == 0 &&
5709 RB_EMPTY_ROOT(&bfqq->sort_list) &&
5710 bfqq->wr_coeff != bfqd->bfq_wr_coeff)
5711 bfqq->soft_rt_next_start =
5712 bfq_bfqq_softrt_next_start(bfqd, bfqq);
5718 if (bfqd->in_service_queue == bfqq) {
5719 if (bfq_bfqq_must_idle(bfqq)) {
5720 if (bfqq->dispatched == 0)
5723 * If we get here, we do not expire bfqq, even
5724 * if bfqq was in budget timeout or had no
5727 * not expiring bfqq is as follows.
5729 * Here bfqq->dispatched > 0 holds, but
5732 * for bfqq before bfqq->dispatched reaches 0,
5733 * bfqq will, however, not be expired on the
5734 * completion event that causes bfqq->dispatch
5736 * bfqq will start enjoying device idling
5739 * But, if we expired bfqq here, bfqq would
5741 * when bfqq->dispatched finally reaches
5742 * zero. This would expose bfqq to violation
5746 } else if (bfq_may_expire_for_budg_timeout(bfqq))
5747 bfq_bfqq_expire(bfqd, bfqq, false,
5749 else if (RB_EMPTY_ROOT(&bfqq->sort_list) &&
5750 (bfqq->dispatched == 0 ||
5751 !bfq_better_to_idle(bfqq)))
5752 bfq_bfqq_expire(bfqd, bfqq, false,
5760 static void bfq_finish_requeue_request_body(struct bfq_queue *bfqq)
5762 bfqq->allocated--;
5764 bfq_put_queue(bfqq);
5768 * The processes associated with bfqq may happen to generate their
5771 * one process is associated with bfqq and the device is an SSD. It
5772 * results in bfqq becoming often empty while in service. In this
5773 * respect, if BFQ is allowed to switch to another queue when bfqq
5776 * allowed to switch to another queue---because bfqq is sync and
5777 * I/O-dispatch needs to be plugged while bfqq is temporarily
5778 * empty---then, during the service of bfqq, there will be frequent
5779 * "service holes", i.e., time intervals during which bfqq gets empty
5782 * remaining idle. In the end, during the service of bfqq, the device
5784 * of I/O flowing through bfqq.
5790 * both boost throughput and not break bfqq's bandwidth and latency
5792 * inject limit, computed as below. While bfqq is empty, the injection
5799 * bfqq, an I/O request for bfqq that arrives while bfqq is in
5800 * service, and causes bfqq to switch from empty to non-empty. The
5803 * bfqq. The reason for this restriction is that these are the
5810 * request is enqueued into bfqq, to when it is completed. This
5813 * actually injected while bfqq is empty, and that a new request R
5814 * then arrives for bfqq. If the device does start to serve all or
5826 * first request of bfqq, the algorithm measures the total time of the
5831 * total service time of the requests of bfqq. If the baseline has
5842 * time. If the inflation is below a certain threshold, then bfqq
5872 struct bfq_queue *bfqq)
5875 unsigned int old_limit = bfqq->inject_limit;
5877 if (bfqq->last_serv_time_ns > 0 && bfqd->rqs_injected) {
5878 u64 threshold = (bfqq->last_serv_time_ns * 3)>>1;
5881 bfqq->inject_limit--;
5882 bfqq->decrease_time_jif = jiffies;
5885 bfqq->inject_limit++;
5896 * path that handles the completion of a request of bfqq, and,
5900 if ((bfqq->last_serv_time_ns == 0 && bfqd->rq_in_driver == 1) ||
5901 tot_time_ns < bfqq->last_serv_time_ns) {
5902 if (bfqq->last_serv_time_ns == 0) {
5907 bfqq->inject_limit = max_t(unsigned int, 1, old_limit);
5909 bfqq->last_serv_time_ns = tot_time_ns;
5915 * for bfqq. So let's update this value, because it is
5917 * or the spatial locality of the I/O requests in bfqq
5920 bfqq->last_serv_time_ns = tot_time_ns;
5936 struct bfq_queue *bfqq = RQ_BFQQ(rq);
5945 if (!rq->elv.icq || !bfqq)
5948 bfqd = bfqq->bfqd;
5951 bfqg_stats_update_completion(bfqq_group(bfqq),
5959 bfq_update_inject_limit(bfqd, bfqq);
5961 bfq_completed_request(bfqq, bfqd);
5964 bfq_finish_requeue_request_body(bfqq);
5989 * Removes the association between the current task and bfqq, assuming
5991 * Returns NULL if a new bfqq should be allocated, or the old bfqq if this
5992 * was the last process referring to that bfqq.
5995 bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
5997 bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
5999 if (bfqq_process_refs(bfqq) == 1) {
6000 bfqq->pid = current->pid;
6001 bfq_clear_bfqq_coop(bfqq);
6002 bfq_clear_bfqq_split_coop(bfqq);
6003 return bfqq;
6008 bfq_put_cooperator(bfqq);
6010 bfq_release_process_ref(bfqq->bfqd, bfqq);
6020 struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync);
6022 if (likely(bfqq && bfqq != &bfqd->oom_bfqq))
6023 return bfqq;
6028 if (bfqq)
6029 bfq_put_queue(bfqq);
6030 bfqq = bfq_get_queue(bfqd, bio, is_sync, bic);
6032 bic_set_bfqq(bic, bfqq, is_sync);
6036 bfq_mark_bfqq_in_large_burst(bfqq);
6038 bfq_clear_bfqq_in_large_burst(bfqq);
6041 * If bfqq was in the current
6048 * bfqq from the burst list as
6056 * which bfqq was removed on
6058 * cost, if bfqq was in a
6060 * bfqq to the current burst
6068 hlist_add_head(&bfqq->burst_list_node,
6071 bfqq->split_time = jiffies;
6074 return bfqq;
6088 * previously allocated bic/bfqq structs.
6123 struct bfq_queue *bfqq;
6146 bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, false, is_sync,
6151 if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) {
6152 bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq");
6154 /* Update bic before losing reference to bfqq */
6155 if (bfq_bfqq_in_large_burst(bfqq))
6158 bfqq = bfq_split_bfqq(bic, bfqq);
6161 if (!bfqq)
6162 bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio,
6170 bfqq->allocated++;
6171 bfqq->ref++;
6172 bfq_log_bfqq(bfqd, bfqq, "get_request %p: bfqq %p, %d",
6173 rq, bfqq, bfqq->ref);
6176 rq->elv.priv[1] = bfqq;
6180 * by only this bic: we can then set bfqq->bic = bic. in
6184 if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) {
6185 bfqq->bic = bic;
6192 bfq_bfqq_resume_state(bfqq, bfqd, bic,
6198 * Consider bfqq as possibly belonging to a burst of newly
6204 * possible burst bfqq may belong to, then there is no gain
6205 * in considering bfqq as belonging to a burst, and
6206 * therefore in not weight-raising bfqq. See comments on
6210 * occurring when bfqq does not belong to an actual large
6213 * bfqq and its possible companion queues are created. See
6217 if (unlikely(bfq_bfqq_just_created(bfqq) &&
6220 bfq_handle_burst(bfqd, bfqq);
6222 return bfqq;
6226 bfq_idle_slice_timer_body(struct bfq_data *bfqd, struct bfq_queue *bfqq)
6234 * Considering that bfqq may be in race, we should firstly check
6235 * whether bfqq is in service before doing something on it. If
6236 * the bfqq in race is not in service, it has already been expired
6240 if (bfqq != bfqd->in_service_queue) {
6245 bfq_clear_bfqq_wait_request(bfqq);
6247 if (bfq_bfqq_budget_timeout(bfqq))
6254 else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0)
6265 bfq_bfqq_expire(bfqd, bfqq, true, reason);
6280 struct bfq_queue *bfqq = bfqd->in_service_queue;
6290 if (bfqq)
6291 bfq_idle_slice_timer_body(bfqd, bfqq);
6299 struct bfq_queue *bfqq = *bfqq_ptr;
6301 bfq_log(bfqd, "put_async_bfqq: %p", bfqq);
6302 if (bfqq) {
6303 bfq_bfqq_move(bfqd, bfqq, bfqd->root_group);
6305 bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d",
6306 bfqq, bfqq->ref);
6307 bfq_put_queue(bfqq);
6395 struct bfq_queue *bfqq, *n;
6400 list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list)
6401 bfq_deactivate_bfqq(bfqd, bfqq, false, false);
6460 * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues.