Lines Matching defs:bfqd

231 #define BFQ_RQ_SEEKY(bfqd, last_pos, rq) \
234 (!blk_queue_nonrot(bfqd->queue) || \
452 * bfq_bic_lookup - search into @ioc a bic associated to @bfqd.
474 void bfq_schedule_dispatch(struct bfq_data *bfqd)
476 lockdep_assert_held(&bfqd->lock);
478 if (bfqd->queued != 0) {
479 bfq_log(bfqd, "schedule dispatch");
480 blk_mq_run_hw_queues(bfqd->queue, true);
493 static struct request *bfq_choose_req(struct bfq_data *bfqd,
524 back_max = bfqd->bfq_back_max * 2;
534 d1 = (last - s1) * bfqd->bfq_back_penalty;
541 d2 = (last - s2) * bfqd->bfq_back_penalty;
587 struct bfq_data *bfqd = bfqq->bfqd;
601 spin_lock_irq(&bfqd->lock);
605 spin_unlock_irq(&bfqd->lock);
655 bfq_log_bfqq(bfqq->bfqd, bfqq,
663 spin_unlock_irq(&bfqd->lock);
692 struct bfq_data *bfqd = data->q->elevator->elevator_data;
702 depth = bfqd->word_depths[!!bfqd->wr_busy_queues][op_is_sync(opf)];
703 limit = (limit * depth) >> bfqd->full_depth_shift;
706 for (act_idx = 0; bic && act_idx < bfqd->num_actuators; act_idx++) {
721 bfq_log(bfqd, "[%s] wr_busy %d sync %d depth %u",
722 __func__, bfqd->wr_busy_queues, op_is_sync(opf), depth);
728 bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root,
761 bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d",
784 bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
795 if (bfqq == &bfqd->oom_bfqq)
812 __bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root,
854 static bool bfq_asymmetric_scenario(struct bfq_data *bfqd,
861 rb_first_cached(&bfqd->queue_weights_tree),
870 !RB_EMPTY_ROOT(&bfqd->queue_weights_tree.rb_root) &&
871 (bfqd->queue_weights_tree.rb_root.rb_node->rb_left ||
872 bfqd->queue_weights_tree.rb_root.rb_node->rb_right);
875 (bfqd->busy_queues[0] && bfqd->busy_queues[1]) ||
876 (bfqd->busy_queues[0] && bfqd->busy_queues[2]) ||
877 (bfqd->busy_queues[1] && bfqd->busy_queues[2]);
881 || bfqd->num_groups_with_pending_reqs > 1
901 struct rb_root_cached *root = &bfqq->bfqd->queue_weights_tree;
980 root = &bfqq->bfqd->queue_weights_tree;
1011 bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq);
1015 static struct request *bfq_find_next_rq(struct bfq_data *bfqd,
1039 return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last));
1047 bfq_asymmetric_scenario(bfqq->bfqd, bfqq))
1055 * @bfqd: the device data the queue belongs to.
1064 static void bfq_updated_next_req(struct bfq_data *bfqd,
1074 if (bfqq == bfqd->in_service_queue)
1087 bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu",
1089 bfq_requeue_bfqq(bfqd, bfqq, false);
1093 static unsigned int bfq_wr_duration(struct bfq_data *bfqd)
1097 dur = bfqd->rate_dur_prod;
1098 do_div(dur, bfqd->peak_rate);
1125 struct bfq_data *bfqd)
1127 bfqq->wr_coeff = bfqd->bfq_wr_coeff;
1128 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
1133 bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
1162 if (bfqd->low_latency) {
1175 if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
1178 bfq_wr_duration(bfqd))) {
1179 switch_back_to_interactive_wr(bfqq, bfqd);
1182 bfq_log_bfqq(bfqq->bfqd, bfqq,
1194 bfqd->wr_busy_queues++;
1196 bfqd->wr_busy_queues--;
1207 static void bfq_reset_burst_list(struct bfq_data *bfqd, struct bfq_queue *bfqq)
1212 hlist_for_each_entry_safe(item, n, &bfqd->burst_list, burst_list_node)
1220 if (bfq_tot_busy_queues(bfqd) == 0) {
1221 hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
1222 bfqd->burst_size = 1;
1224 bfqd->burst_size = 0;
1226 bfqd->burst_parent_entity = bfqq->entity.parent;
1230 static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
1233 bfqd->burst_size++;
1235 if (bfqd->burst_size == bfqd->bfq_large_burst_thresh) {
1243 bfqd->large_burst = true;
1249 hlist_for_each_entry(bfqq_item, &bfqd->burst_list,
1261 hlist_for_each_entry_safe(pos, n, &bfqd->burst_list,
1270 hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
1382 static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
1412 if (time_is_before_jiffies(bfqd->last_ins_in_burst +
1413 bfqd->bfq_burst_interval) ||
1414 bfqq->entity.parent != bfqd->burst_parent_entity) {
1415 bfqd->large_burst = false;
1416 bfq_reset_burst_list(bfqd, bfqq);
1425 if (bfqd->large_burst) {
1435 bfq_add_to_burst(bfqd, bfqq);
1445 bfqd->last_ins_in_burst = jiffies;
1457 * stored in bfqd, which is dynamically updated according to the
1460 static int bfq_max_budget(struct bfq_data *bfqd)
1462 if (bfqd->budgets_assigned < bfq_stats_min_budgets)
1465 return bfqd->bfq_max_budget;
1472 static int bfq_min_budget(struct bfq_data *bfqd)
1474 if (bfqd->budgets_assigned < bfq_stats_min_budgets)
1477 return bfqd->bfq_max_budget / 32;
1584 static bool bfq_bfqq_update_budg_for_activation(struct bfq_data *bfqd,
1653 static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd,
1665 bfqq->wr_coeff = bfqd->bfq_wr_coeff;
1666 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
1681 bfqq->wr_coeff = bfqd->bfq_wr_coeff *
1684 bfqd->bfq_wr_rt_max_time;
1698 2 * bfq_min_budget(bfqd));
1701 bfqq->wr_coeff = bfqd->bfq_wr_coeff;
1702 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
1736 bfqd->bfq_wr_rt_max_time) {
1741 bfqd->bfq_wr_rt_max_time;
1742 bfqq->wr_coeff = bfqd->bfq_wr_coeff *
1750 static bool bfq_bfqq_idle_for_long_time(struct bfq_data *bfqd,
1756 bfqd->bfq_wr_min_idle_time);
1792 static unsigned int bfq_actuator_index(struct bfq_data *bfqd, struct bio *bio)
1798 if (bfqd->num_actuators == 1)
1804 for (i = 0; i < bfqd->num_actuators; i++) {
1805 if (end >= bfqd->sector[i] &&
1806 end < bfqd->sector[i] + bfqd->nr_sectors[i])
1818 static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd,
1826 idle_for_long_time = bfq_bfqq_idle_for_long_time(bfqd, bfqq),
1834 bfqd->bfq_slice_idle * 3;
1835 unsigned int act_idx = bfq_actuator_index(bfqd, rq->bio);
1849 soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 &&
1869 wr_or_deserves_wr = bfqd->low_latency &&
1879 bfq_bfqq_update_budg_for_activation(bfqd, bfqq,
1906 if (bfqd->low_latency) {
1910 jiffies - bfqd->bfq_wr_min_idle_time - 1;
1913 bfqd->bfq_wr_min_idle_time)) {
1914 bfq_update_bfqq_wr_on_rq_arrival(bfqd, bfqq,
1975 if (bfqd->in_service_queue &&
1977 bfqq->wr_coeff >= bfqd->in_service_queue->wr_coeff) ||
1978 bfq_bfqq_higher_class_or_weight(bfqq, bfqd->in_service_queue) ||
1979 !bfq_better_to_idle(bfqd->in_service_queue)) &&
1980 next_queue_may_preempt(bfqd))
1981 bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
1985 static void bfq_reset_inject_limit(struct bfq_data *bfqd,
1995 bfqd->waited_rq = NULL;
2132 static void bfq_check_waker(struct bfq_data *bfqd, struct bfq_queue *bfqq,
2137 if (!bfqd->last_completed_rq_bfqq ||
2138 bfqd->last_completed_rq_bfqq == bfqq ||
2140 now_ns - bfqd->last_completion >= 4 * NSEC_PER_MSEC ||
2141 bfqd->last_completed_rq_bfqq == &bfqd->oom_bfqq ||
2142 bfqq == &bfqd->oom_bfqq)
2151 if (bfqd->last_completed_rq_bfqq !=
2154 128 * (u64)bfqd->bfq_slice_idle) {
2161 bfqd->last_completed_rq_bfqq;
2166 bfq_log_bfqq(bfqd, bfqq, "set tentative waker %s", waker_name);
2171 bfqq->waker_bfqq = bfqd->last_completed_rq_bfqq;
2175 bfq_log_bfqq(bfqd, bfqq, "set waker %s", waker_name);
2200 &bfqd->last_completed_rq_bfqq->woken_list);
2207 struct bfq_data *bfqd = bfqq->bfqd;
2213 bfq_log_bfqq(bfqd, bfqq, "add_request %d", rq_is_sync(rq));
2216 * Updating of 'bfqd->queued' is protected by 'bfqd->lock', however, it
2219 WRITE_ONCE(bfqd->queued, bfqd->queued + 1);
2222 bfq_check_waker(bfqd, bfqq, now_ns);
2232 bfq_reset_inject_limit(bfqd, bfqq);
2259 if (bfqq == bfqd->in_service_queue &&
2260 (bfqd->tot_rq_in_driver == 0 ||
2262 bfqd->rqs_injected && bfqd->tot_rq_in_driver > 0)) &&
2265 bfqd->last_empty_occupied_ns = ktime_get_ns();
2269 * wait_dispatch will cause bfqd->waited_rq to
2272 bfqd->wait_dispatch = true;
2286 if (bfqd->tot_rq_in_driver == 0)
2287 bfqd->rqs_injected = false;
2300 next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position);
2307 if (unlikely(!bfqd->nonrot_with_queueing && prev != bfqq->next_rq))
2308 bfq_pos_tree_add_move(bfqd, bfqq);
2311 bfq_bfqq_handle_idle_busy_switch(bfqd, bfqq, old_wr_coeff,
2314 if (bfqd->low_latency && old_wr_coeff == 1 && !rq_is_sync(rq) &&
2317 bfqd->bfq_wr_min_inter_arr_async)) {
2318 bfqq->wr_coeff = bfqd->bfq_wr_coeff;
2319 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
2321 bfqd->wr_busy_queues++;
2325 bfq_updated_next_req(bfqd, bfqq);
2354 if (bfqd->low_latency &&
2359 static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd,
2363 struct bfq_queue *bfqq = bfqd->bio_bfqq;
2384 struct bfq_data *bfqd = bfqq->bfqd;
2388 bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq);
2389 bfq_updated_next_req(bfqd, bfqq);
2396 * Updating of 'bfqd->queued' is protected by 'bfqd->lock', however, it
2399 WRITE_ONCE(bfqd->queued, bfqd->queued - 1);
2409 if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue) {
2436 if (unlikely(!bfqd->nonrot_with_queueing))
2437 bfq_pos_tree_add_move(bfqd, bfqq);
2448 struct bfq_data *bfqd = q->elevator->elevator_data;
2453 * queue_lock inside the bfqd->lock. We assume that the bic
2455 * bfqd->lock is taken.
2460 spin_lock_irq(&bfqd->lock);
2469 bfqd->bio_bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf),
2470 bfq_actuator_index(bfqd, bio));
2472 bfqd->bio_bfqq = NULL;
2474 bfqd->bio_bic = bic;
2478 spin_unlock_irq(&bfqd->lock);
2488 struct bfq_data *bfqd = q->elevator->elevator_data;
2491 __rq = bfq_find_rq_fmerge(bfqd, bio, q);
2512 struct bfq_data *bfqd;
2518 bfqd = bfqq->bfqd;
2526 next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req,
2527 bfqd->last_position);
2535 bfq_updated_next_req(bfqd, bfqq);
2540 if (unlikely(!bfqd->nonrot_with_queueing))
2541 bfq_pos_tree_add_move(bfqd, bfqq);
2618 bfqq->bfqd->bfq_wr_rt_max_time)
2622 bfqq->bfqd->wr_busy_queues--;
2633 void bfq_end_wr_async_queues(struct bfq_data *bfqd,
2638 for (k = 0; k < bfqd->num_actuators; k++) {
2648 static void bfq_end_wr(struct bfq_data *bfqd)
2653 spin_lock_irq(&bfqd->lock);
2655 for (i = 0; i < bfqd->num_actuators; i++) {
2656 list_for_each_entry(bfqq, &bfqd->active_list[i], bfqq_list)
2659 list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list)
2661 bfq_end_wr_async(bfqd);
2663 spin_unlock_irq(&bfqd->lock);
2681 static struct bfq_queue *bfqq_find_close(struct bfq_data *bfqd,
2696 __bfqq = bfq_rq_pos_tree_lookup(bfqd, root, sector, &parent, NULL);
2723 static struct bfq_queue *bfq_find_close_cooperator(struct bfq_data *bfqd,
2736 bfqq = bfqq_find_close(bfqd, cur_bfqq, sector);
2782 bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d",
2848 static bool idling_boosts_thr_without_issues(struct bfq_data *bfqd,
2852 bfq_setup_stable_merge(struct bfq_data *bfqd, struct bfq_queue *bfqq,
2861 if (idling_boosts_thr_without_issues(bfqd, bfqq) || proc_ref == 0)
2906 bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
2926 if (unlikely(!bfqd->nonrot_with_queueing)) {
2942 return bfq_setup_stable_merge(bfqd, bfqq,
2985 if (likely(bfqd->nonrot_with_queueing))
3002 if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq))
3006 if (bfq_tot_busy_queues(bfqd) == 1)
3009 in_service_bfqq = bfqd->in_service_queue;
3012 likely(in_service_bfqq != &bfqd->oom_bfqq) &&
3014 bfqd->in_serv_last_pos) &&
3026 new_bfqq = bfq_find_close_cooperator(bfqd, bfqq,
3029 if (new_bfqq && likely(new_bfqq != &bfqd->oom_bfqq) &&
3067 bfqq->bfqd->low_latency)) {
3077 bfqq_data->saved_wr_coeff = bfqq->bfqd->bfq_wr_coeff;
3081 bfq_wr_duration(bfqq->bfqd);
3102 else if (cur_bfqq->bfqd && cur_bfqq->bfqd->last_bfqq_created == cur_bfqq)
3103 cur_bfqq->bfqd->last_bfqq_created = new_bfqq;
3106 void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq)
3120 bfqq != bfqd->in_service_queue)
3129 bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
3132 bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
3180 bfqd->wr_busy_queues++;
3188 bfqd->wr_busy_queues--;
3191 bfq_log_bfqq(bfqd, new_bfqq, "merge_bfqqs: wr_busy %d",
3192 bfqd->wr_busy_queues);
3224 bfq_release_process_ref(bfqd, bfqq);
3230 struct bfq_data *bfqd = q->elevator->elevator_data;
3232 struct bfq_queue *bfqq = bfqd->bio_bfqq, *new_bfqq;
3251 new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false, bfqd->bio_bic);
3260 bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq,
3271 * bfqd->bio_bic now points to new_bfqq, and
3275 bfqd->bio_bfqq = bfqq;
3287 static void bfq_set_budget_timeout(struct bfq_data *bfqd,
3292 if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time)
3297 bfqd->last_budget_start = ktime_get();
3300 bfqd->bfq_timeout * timeout_coeff;
3303 static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
3309 bfqd->budgets_assigned = (bfqd->budgets_assigned * 7 + 256) / 8;
3313 bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
3347 bfq_set_budget_timeout(bfqd, bfqq);
3348 bfq_log_bfqq(bfqd, bfqq,
3353 bfqd->in_service_queue = bfqq;
3354 bfqd->in_serv_last_pos = 0;
3360 static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd)
3362 struct bfq_queue *bfqq = bfq_get_next_queue(bfqd);
3364 __bfq_set_in_service_queue(bfqd, bfqq);
3368 static void bfq_arm_slice_timer(struct bfq_data *bfqd)
3370 struct bfq_queue *bfqq = bfqd->in_service_queue;
3380 sl = bfqd->bfq_slice_idle;
3392 !bfq_asymmetric_scenario(bfqd, bfqq))
3397 bfqd->last_idling_start = ktime_get();
3398 bfqd->last_idling_start_jiffies = jiffies;
3400 hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl),
3412 static unsigned long bfq_calc_max_budget(struct bfq_data *bfqd)
3414 return (u64)bfqd->peak_rate * USEC_PER_MSEC *
3415 jiffies_to_msecs(bfqd->bfq_timeout)>>BFQ_RATE_SHIFT;
3423 static void update_thr_responsiveness_params(struct bfq_data *bfqd)
3425 if (bfqd->bfq_user_max_budget == 0) {
3426 bfqd->bfq_max_budget =
3427 bfq_calc_max_budget(bfqd);
3428 bfq_log(bfqd, "new max_budget = %d", bfqd->bfq_max_budget);
3432 static void bfq_reset_rate_computation(struct bfq_data *bfqd,
3436 bfqd->last_dispatch = bfqd->first_dispatch = ktime_get_ns();
3437 bfqd->peak_rate_samples = 1;
3438 bfqd->sequential_samples = 0;
3439 bfqd->tot_sectors_dispatched = bfqd->last_rq_max_size =
3442 bfqd->peak_rate_samples = 0; /* full re-init on next disp. */
3444 bfq_log(bfqd,
3446 bfqd->peak_rate_samples, bfqd->sequential_samples,
3447 bfqd->tot_sectors_dispatched);
3450 static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq)
3462 if (bfqd->peak_rate_samples < BFQ_RATE_MIN_SAMPLES ||
3463 bfqd->delta_from_first < BFQ_RATE_MIN_INTERVAL)
3472 bfqd->delta_from_first =
3473 max_t(u64, bfqd->delta_from_first,
3474 bfqd->last_completion - bfqd->first_dispatch);
3480 rate = div64_ul(bfqd->tot_sectors_dispatched<<BFQ_RATE_SHIFT,
3481 div_u64(bfqd->delta_from_first, NSEC_PER_USEC));
3489 if ((bfqd->sequential_samples < (3 * bfqd->peak_rate_samples)>>2 &&
3490 rate <= bfqd->peak_rate) ||
3512 * cannot reach 9, because bfqd->sequential_samples cannot
3513 * become equal to bfqd->peak_rate_samples, which, in its
3514 * turn, holds true because bfqd->sequential_samples is not
3517 weight = (9 * bfqd->sequential_samples) / bfqd->peak_rate_samples;
3524 div_u64(weight * bfqd->delta_from_first,
3538 bfqd->peak_rate *= divisor-1;
3539 bfqd->peak_rate /= divisor;
3542 bfqd->peak_rate += rate;
3545 * For a very slow device, bfqd->peak_rate can reach 0 (see
3548 * divisions by zero where bfqd->peak_rate is used as a
3551 bfqd->peak_rate = max_t(u32, 1, bfqd->peak_rate);
3553 update_thr_responsiveness_params(bfqd);
3556 bfq_reset_rate_computation(bfqd, rq);
3591 static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq)
3595 if (bfqd->peak_rate_samples == 0) { /* first dispatch */
3596 bfq_log(bfqd, "update_peak_rate: goto reset, samples %d",
3597 bfqd->peak_rate_samples);
3598 bfq_reset_rate_computation(bfqd, rq);
3614 if (now_ns - bfqd->last_dispatch > 100*NSEC_PER_MSEC &&
3615 bfqd->tot_rq_in_driver == 0)
3619 bfqd->peak_rate_samples++;
3621 if ((bfqd->tot_rq_in_driver > 0 ||
3622 now_ns - bfqd->last_completion < BFQ_MIN_TT)
3623 && !BFQ_RQ_SEEKY(bfqd, bfqd->last_position, rq))
3624 bfqd->sequential_samples++;
3626 bfqd->tot_sectors_dispatched += blk_rq_sectors(rq);
3629 if (likely(bfqd->peak_rate_samples % 32))
3630 bfqd->last_rq_max_size =
3631 max_t(u32, blk_rq_sectors(rq), bfqd->last_rq_max_size);
3633 bfqd->last_rq_max_size = blk_rq_sectors(rq);
3635 bfqd->delta_from_first = now_ns - bfqd->first_dispatch;
3638 if (bfqd->delta_from_first < BFQ_RATE_REF_INTERVAL)
3642 bfq_update_rate_reset(bfqd, rq);
3644 bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
3645 if (RQ_BFQQ(rq) == bfqd->in_service_queue)
3646 bfqd->in_serv_last_pos = bfqd->last_position;
3647 bfqd->last_dispatch = now_ns;
3876 static bool idling_needed_for_service_guarantees(struct bfq_data *bfqd,
3879 int tot_busy_queues = bfq_tot_busy_queues(bfqd);
3886 (bfqd->wr_busy_queues < tot_busy_queues ||
3887 bfqd->tot_rq_in_driver >= bfqq->dispatched + 4)) ||
3888 bfq_asymmetric_scenario(bfqd, bfqq) ||
3892 static bool __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq,
3919 idling_needed_for_service_guarantees(bfqd, bfqq))) {
3931 bfq_requeue_bfqq(bfqd, bfqq, true);
3936 if (unlikely(!bfqd->nonrot_with_queueing &&
3938 bfq_pos_tree_add_move(bfqd, bfqq);
3948 return __bfq_bfqd_reset_in_service(bfqd);
3953 * @bfqd: device data.
3960 static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd,
3967 min_budget = bfq_min_budget(bfqd);
3979 bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %d, budg left %d",
3981 bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %d, min budg %d",
3982 budget, bfq_min_budget(bfqd));
3983 bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d",
3984 bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue));
4018 budget = min(budget * 2, bfqd->bfq_max_budget);
4033 budget = min(budget * 2, bfqd->bfq_max_budget);
4045 budget = min(budget * 4, bfqd->bfq_max_budget);
4092 budget = bfqd->bfq_max_budget;
4097 if (bfqd->budgets_assigned >= bfq_stats_min_budgets &&
4098 !bfqd->bfq_user_max_budget)
4099 bfqq->max_budget = min(bfqq->max_budget, bfqd->bfq_max_budget);
4116 bfq_log_bfqq(bfqd, bfqq, "head sect: %u, new budget %d",
4152 static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq,
4163 delta_ktime = bfqd->last_idling_start;
4166 delta_ktime = ktime_sub(delta_ktime, bfqd->last_budget_start);
4171 if (blk_queue_nonrot(bfqd->queue))
4200 slow = bfqq->entity.service < bfqd->bfq_max_budget / 2;
4203 bfq_log_bfqq(bfqd, bfqq, "bfq_bfqq_is_slow: slow %d", slow);
4247 * namely bfqd->bfq_slice_idle, and (2) a few extra jiffies. We
4251 * bfqd->bfq_slice_idle tends to filter out greedy applications,
4286 * bfqd->bfq_slice_idle:
4288 * higher than bfqd->bfq_slice_idle. This happens, e.g., on slow
4290 * that the approximation, in jiffies, of bfqd->bfq_slice_idle
4296 * reference time interval just bfqd->bfq_slice_idle, but
4297 * bfqd->bfq_slice_idle plus a few jiffies. In particular, we add the
4301 static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
4307 bfqd->bfq_wr_max_softrt_rate,
4308 jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4);
4313 * @bfqd: device owning the queue.
4337 void bfq_bfqq_expire(struct bfq_data *bfqd,
4349 slow = bfq_bfqq_is_slow(bfqd, bfqq, compensate, &delta);
4370 bfq_bfqq_charge_time(bfqd, bfqq, delta);
4372 if (bfqd->low_latency && bfqq->wr_coeff == 1)
4375 if (bfqd->low_latency && bfqd->bfq_wr_max_softrt_rate > 0 &&
4391 bfq_bfqq_softrt_next_start(bfqd, bfqq);
4401 bfq_log_bfqq(bfqd, bfqq,
4410 bfqd->rqs_injected = bfqd->wait_dispatch = false;
4411 bfqd->waited_rq = NULL;
4417 __bfq_bfqq_recalc_budget(bfqd, bfqq, reason);
4418 if (__bfq_bfqq_expire(bfqd, bfqq, reason))
4477 bfq_log_bfqq(bfqq->bfqd, bfqq,
4489 static bool idling_boosts_thr_without_issues(struct bfq_data *bfqd,
4493 !blk_queue_nonrot(bfqd->queue) && !bfqd->hw_tag,
4523 * particular, happens to be false if bfqd is an NCQ-capable
4527 ((!blk_queue_nonrot(bfqd->queue) || !bfqd->hw_tag) &&
4565 bfqd->wr_busy_queues == 0;
4591 struct bfq_data *bfqd = bfqq->bfqd;
4598 if (unlikely(bfqd->strict_guarantees))
4609 if (bfqd->bfq_slice_idle == 0 || !bfq_bfqq_sync(bfqq) ||
4614 idling_boosts_thr_without_issues(bfqd, bfqq);
4617 idling_needed_for_service_guarantees(bfqd, bfqq);
4653 bfq_choose_bfqq_for_injection(struct bfq_data *bfqd)
4655 struct bfq_queue *bfqq, *in_serv_bfqq = bfqd->in_service_queue;
4685 time_is_before_eq_jiffies(bfqd->last_idling_start_jiffies +
4686 bfqd->bfq_slice_idle)
4690 if (bfqd->tot_rq_in_driver >= limit)
4705 for (i = 0; i < bfqd->num_actuators; i++) {
4706 list_for_each_entry(bfqq, &bfqd->active_list[i], bfqq_list)
4728 if (blk_queue_nonrot(bfqd->queue) &&
4731 bfqd->tot_rq_in_driver >= 1)
4734 bfqd->rqs_injected = true;
4744 bfq_find_active_bfqq_for_actuator(struct bfq_data *bfqd, int idx)
4748 if (bfqd->in_service_queue &&
4749 bfqd->in_service_queue->actuator_idx == idx)
4750 return bfqd->in_service_queue;
4752 list_for_each_entry(bfqq, &bfqd->active_list[idx], bfqq_list) {
4777 bfq_find_bfqq_for_underused_actuator(struct bfq_data *bfqd)
4781 for (i = 0 ; i < bfqd->num_actuators; i++) {
4782 if (bfqd->rq_in_driver[i] < bfqd->actuator_load_threshold &&
4783 (i == bfqd->num_actuators - 1 ||
4784 bfqd->rq_in_driver[i] < bfqd->rq_in_driver[i+1])) {
4786 bfq_find_active_bfqq_for_actuator(bfqd, i);
4801 static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
4807 bfqq = bfqd->in_service_queue;
4811 bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue");
4830 inject_bfqq = bfq_find_bfqq_for_underused_actuator(bfqd);
4877 hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
5007 else if (!idling_boosts_thr_without_issues(bfqd, bfqq) &&
5008 (bfqq->wr_coeff == 1 || bfqd->wr_busy_queues > 1 ||
5010 bfqq = bfq_choose_bfqq_for_injection(bfqd);
5019 bfq_bfqq_expire(bfqd, bfqq, false, reason);
5021 bfqq = bfq_set_in_service_queue(bfqd);
5023 bfq_log_bfqq(bfqd, bfqq, "select_queue: checking new queue");
5028 bfq_log_bfqq(bfqd, bfqq, "select_queue: returned this queue");
5030 bfq_log(bfqd, "select_queue: no queue returned");
5035 static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
5040 bfq_log_bfqq(bfqd, bfqq,
5048 bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change");
5059 if (bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time ||
5061 bfq_wr_duration(bfqd))) {
5076 switch_back_to_interactive_wr(bfqq, bfqd);
5081 bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time &&
5103 static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd,
5113 if (bfqq == bfqd->in_service_queue && bfqd->wait_dispatch) {
5114 bfqd->wait_dispatch = false;
5115 bfqd->waited_rq = rq;
5118 bfq_dispatch_remove(bfqd->queue, rq);
5120 if (bfqq != bfqd->in_service_queue)
5134 bfq_update_wr_data(bfqd, bfqq);
5141 if (bfq_tot_busy_queues(bfqd) > 1 && bfq_class_idle(bfqq))
5142 bfq_bfqq_expire(bfqd, bfqq, false, BFQQE_BUDGET_EXHAUSTED);
5149 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
5152 * Avoiding lock: a race on bfqd->queued should cause at
5155 return !list_empty_careful(&bfqd->dispatch) ||
5156 READ_ONCE(bfqd->queued);
5161 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
5165 if (!list_empty(&bfqd->dispatch)) {
5166 rq = list_first_entry(&bfqd->dispatch, struct request,
5210 bfq_log(bfqd, "dispatch requests: %d busy queues",
5211 bfq_tot_busy_queues(bfqd));
5213 if (bfq_tot_busy_queues(bfqd) == 0)
5228 if (bfqd->strict_guarantees && bfqd->tot_rq_in_driver > 0)
5231 bfqq = bfq_select_queue(bfqd);
5235 rq = bfq_dispatch_rq_from_bfqq(bfqd, bfqq);
5239 bfqd->rq_in_driver[bfqq->actuator_idx]++;
5240 bfqd->tot_rq_in_driver++;
5302 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
5307 spin_lock_irq(&bfqd->lock);
5309 in_serv_queue = bfqd->in_service_queue;
5313 if (in_serv_queue == bfqd->in_service_queue) {
5318 spin_unlock_irq(&bfqd->lock);
5339 bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d", bfqq, bfqq->ref);
5373 if (bfqq->bic && bfqq->bfqd->burst_size > 0)
5374 bfqq->bfqd->burst_size--;
5403 if (bfqq->bfqd->last_completed_rq_bfqq == bfqq)
5404 bfqq->bfqd->last_completed_rq_bfqq = NULL;
5437 static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
5439 if (bfqq == bfqd->in_service_queue) {
5440 __bfq_bfqq_expire(bfqd, bfqq, BFQQE_BUDGET_TIMEOUT);
5441 bfq_schedule_dispatch(bfqd);
5444 bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfqq->ref);
5448 bfq_release_process_ref(bfqd, bfqq);
5455 struct bfq_data *bfqd;
5458 bfqd = bfqq->bfqd; /* NULL if scheduler already exited */
5460 if (bfqq && bfqd) {
5462 bfq_exit_bfqq(bfqd, bfqq);
5469 struct bfq_data *bfqd = bic_to_bfqd(bic);
5473 * If bfqd and thus bfqd->num_actuators is not available any
5482 * bfqd is NULL if scheduler already exited, and in that case
5485 if (bfqd) {
5486 spin_lock_irqsave(&bfqd->lock, flags);
5487 num_actuators = bfqd->num_actuators;
5498 if (bfqd)
5499 spin_unlock_irqrestore(&bfqd->lock, flags);
5511 struct bfq_data *bfqd = bfqq->bfqd;
5513 if (!bfqd)
5520 bdi_dev_name(bfqq->bfqd->queue->disk->bdi),
5551 bfq_log_bfqq(bfqd, bfqq, "new_ioprio %d new_weight %d",
5556 static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
5563 struct bfq_data *bfqd = bic_to_bfqd(bic);
5571 if (unlikely(!bfqd) || likely(bic->ioprio == ioprio))
5576 bfqq = bic_to_bfqq(bic, false, bfq_actuator_index(bfqd, bio));
5580 bfqq = bfq_get_queue(bfqd, bio, false, bic, true);
5581 bic_set_bfqq(bic, bfqq, false, bfq_actuator_index(bfqd, bio));
5582 bfq_release_process_ref(bfqd, old_bfqq);
5585 bfqq = bic_to_bfqq(bic, true, bfq_actuator_index(bfqd, bio));
5590 static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
5604 bfqq->bfqd = bfqd;
5635 bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3;
5660 static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd,
5680 bfq_do_early_stable_merge(struct bfq_data *bfqd, struct bfq_queue *bfqq,
5702 bfq_merge_bfqqs(bfqd, bic, bfqq, new_bfqq);
5753 static struct bfq_queue *bfq_do_or_sched_stable_merge(struct bfq_data *bfqd,
5759 &bfqd->last_bfqq_created;
5779 * possible only if bfqd is rotational with no queueing. For
5796 bfqd->bfq_burst_interval,
5798 if (likely(bfqd->nonrot_with_queueing))
5805 bfqq = bfq_do_early_stable_merge(bfqd, bfqq,
5832 static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
5843 bfqg = bfq_bio_bfqg(bfqd, bio);
5845 async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class,
5847 bfq_actuator_index(bfqd, bio));
5855 bfqd->queue->node);
5858 bfq_init_bfqq(bfqd, bfqq, bic, current->pid,
5859 is_sync, bfq_actuator_index(bfqd, bio));
5861 bfq_log_bfqq(bfqd, bfqq, "allocated");
5863 bfqq = &bfqd->oom_bfqq;
5864 bfq_log_bfqq(bfqd, bfqq, "using oom bfqq");
5880 bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d",
5888 if (bfqq != &bfqd->oom_bfqq && is_sync && !respawn)
5889 bfqq = bfq_do_or_sched_stable_merge(bfqd, bfqq, bic);
5893 static void bfq_update_io_thinktime(struct bfq_data *bfqd,
5907 elapsed = min_t(u64, elapsed, 2ULL * bfqd->bfq_slice_idle);
5916 bfq_update_io_seektime(struct bfq_data *bfqd, struct bfq_queue *bfqq,
5920 bfqq->seek_history |= BFQ_RQ_SEEKY(bfqd, bfqq->last_request_pos, rq);
5923 bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
5926 bfq_wr_duration(bfqd))) {
5940 switch_back_to_interactive_wr(bfqq, bfqd);
5946 static void bfq_update_has_short_ttime(struct bfq_data *bfqd,
5958 bfqd->bfq_slice_idle == 0)
5963 bfqd->bfq_wr_min_idle_time))
5973 bfqq->ttime.ttime_mean > bfqd->bfq_slice_idle>>1))
6071 bfq_reset_inject_limit(bfqd, bfqq);
6078 static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
6086 if (bfqq == bfqd->in_service_queue && bfq_bfqq_wait_request(bfqq)) {
6107 if (small_req && idling_boosts_thr_without_issues(bfqd, bfqq) &&
6119 hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
6129 bfq_bfqq_expire(bfqd, bfqq, false,
6151 static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
6154 *new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true,
6175 bfq_actuator_index(bfqd, rq->bio)) == bfqq)
6176 bfq_merge_bfqqs(bfqd, RQ_BIC(rq),
6189 bfq_update_io_thinktime(bfqd, bfqq);
6190 bfq_update_has_short_ttime(bfqd, bfqq, RQ_BIC(rq));
6191 bfq_update_io_seektime(bfqd, bfqq, rq);
6197 rq->fifo_time = ktime_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)];
6200 bfq_rq_enqueued(bfqd, bfqq, rq);
6243 struct bfq_data *bfqd = q->elevator->elevator_data;
6253 spin_lock_irq(&bfqd->lock);
6256 spin_unlock_irq(&bfqd->lock);
6264 list_add(&rq->queuelist, &bfqd->dispatch);
6266 list_add_tail(&rq->queuelist, &bfqd->dispatch);
6268 idle_timer_disabled = __bfq_insert_request(bfqd, rq);
6289 spin_unlock_irq(&bfqd->lock);
6308 static void bfq_update_hw_tag(struct bfq_data *bfqd)
6310 struct bfq_queue *bfqq = bfqd->in_service_queue;
6312 bfqd->max_rq_in_driver = max_t(int, bfqd->max_rq_in_driver,
6313 bfqd->tot_rq_in_driver);
6315 if (bfqd->hw_tag == 1)
6324 if (bfqd->tot_rq_in_driver + bfqd->queued <= BFQ_HW_QUEUE_THRESHOLD)
6335 bfqd->tot_rq_in_driver < BFQ_HW_QUEUE_THRESHOLD)
6338 if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES)
6341 bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD;
6342 bfqd->max_rq_in_driver = 0;
6343 bfqd->hw_tag_samples = 0;
6345 bfqd->nonrot_with_queueing =
6346 blk_queue_nonrot(bfqd->queue) && bfqd->hw_tag;
6349 static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
6354 bfq_update_hw_tag(bfqd);
6356 bfqd->rq_in_driver[bfqq->actuator_idx]--;
6357 bfqd->tot_rq_in_driver--;
6381 delta_us = div_u64(now_ns - bfqd->last_completion, NSEC_PER_USEC);
6400 (bfqd->last_rq_max_size<<BFQ_RATE_SHIFT)/delta_us <
6402 bfq_update_rate_reset(bfqd, NULL);
6403 bfqd->last_completion = now_ns;
6414 bfqd->last_completed_rq_bfqq = bfqq;
6416 bfqd->last_completed_rq_bfqq = NULL;
6431 bfqq->wr_coeff != bfqd->bfq_wr_coeff)
6433 bfq_bfqq_softrt_next_start(bfqd, bfqq);
6439 if (bfqd->in_service_queue == bfqq) {
6442 bfq_arm_slice_timer(bfqd);
6468 bfq_bfqq_expire(bfqd, bfqq, false,
6473 bfq_bfqq_expire(bfqd, bfqq, false,
6477 if (!bfqd->tot_rq_in_driver)
6478 bfq_schedule_dispatch(bfqd);
6585 static void bfq_update_inject_limit(struct bfq_data *bfqd,
6588 u64 tot_time_ns = ktime_get_ns() - bfqd->last_empty_occupied_ns;
6591 if (bfqq->last_serv_time_ns > 0 && bfqd->rqs_injected) {
6598 old_limit <= bfqd->max_rq_in_driver)
6608 * NOTE: (bfqd->tot_rq_in_driver == 1) means that there is no I/O
6612 * bfqd->tot_rq_in_driver is decremented in such a code path.
6614 if ((bfqq->last_serv_time_ns == 0 && bfqd->tot_rq_in_driver == 1) ||
6624 } else if (!bfqd->rqs_injected && bfqd->tot_rq_in_driver == 1)
6638 bfqd->waited_rq = NULL;
6639 bfqd->rqs_injected = false;
6651 struct bfq_data *bfqd;
6662 bfqd = bfqq->bfqd;
6670 spin_lock_irqsave(&bfqd->lock, flags);
6672 if (rq == bfqd->waited_rq)
6673 bfq_update_inject_limit(bfqd, bfqq);
6675 bfq_completed_request(bfqq, bfqd);
6680 spin_unlock_irqrestore(&bfqd->lock, flags);
6722 bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
6735 bfq_release_process_ref(bfqq->bfqd, bfqq);
6739 static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd,
6745 unsigned int act_idx = bfq_actuator_index(bfqd, bio);
6749 if (likely(bfqq && bfqq != &bfqd->oom_bfqq))
6757 bfqq = bfq_get_queue(bfqd, bio, is_sync, bic, split);
6761 if ((bfqq_data->was_in_burst_list && bfqd->large_burst) ||
6796 &bfqd->burst_list);
6849 struct bfq_data *bfqd = q->elevator->elevator_data;
6855 unsigned int a_idx = bfq_actuator_index(bfqd, bio);
6876 bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, false, is_sync,
6894 bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio,
6897 if (unlikely(bfqq == &bfqd->oom_bfqq))
6923 bfq_log_bfqq(bfqd, bfqq, "get_request %p: bfqq %p, %d",
6935 if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) {
6943 bfq_bfqq_resume_state(bfqq, bfqd, bic,
6951 * 1) A burst is actually happening (bfqd->burst_size > 0)
6969 (bfqd->burst_size > 0 ||
6970 bfq_tot_busy_queues(bfqd) == 0)))
6971 bfq_handle_burst(bfqd, bfqq);
6977 bfq_idle_slice_timer_body(struct bfq_data *bfqd, struct bfq_queue *bfqq)
6982 spin_lock_irqsave(&bfqd->lock, flags);
6991 if (bfqq != bfqd->in_service_queue) {
6992 spin_unlock_irqrestore(&bfqd->lock, flags);
7016 bfq_bfqq_expire(bfqd, bfqq, true, reason);
7019 bfq_schedule_dispatch(bfqd);
7020 spin_unlock_irqrestore(&bfqd->lock, flags);
7029 struct bfq_data *bfqd = container_of(timer, struct bfq_data,
7031 struct bfq_queue *bfqq = bfqd->in_service_queue;
7042 bfq_idle_slice_timer_body(bfqd, bfqq);
7047 static void __bfq_put_async_bfqq(struct bfq_data *bfqd,
7052 bfq_log(bfqd, "put_async_bfqq: %p", bfqq);
7054 bfq_bfqq_move(bfqd, bfqq, bfqd->root_group);
7056 bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d",
7069 void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
7073 for (k = 0; k < bfqd->num_actuators; k++) {
7076 __bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j][k]);
7078 __bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq[k]);
7086 static void bfq_update_depths(struct bfq_data *bfqd, struct sbitmap_queue *bt)
7090 bfqd->full_depth_shift = bt->sb.shift;
7102 bfqd->word_depths[0][0] = max(depth >> 1, 1U);
7108 bfqd->word_depths[0][1] = max((depth * 3) >> 2, 1U);
7118 bfqd->word_depths[1][0] = max((depth * 3) >> 4, 1U);
7120 bfqd->word_depths[1][1] = max((depth * 6) >> 4, 1U);
7125 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
7128 bfq_update_depths(bfqd, &tags->bitmap_tags);
7140 struct bfq_data *bfqd = e->elevator_data;
7144 hrtimer_cancel(&bfqd->idle_slice_timer);
7146 spin_lock_irq(&bfqd->lock);
7147 list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list)
7148 bfq_deactivate_bfqq(bfqd, bfqq, false, false);
7149 spin_unlock_irq(&bfqd->lock);
7151 for (actuator = 0; actuator < bfqd->num_actuators; actuator++)
7152 WARN_ON_ONCE(bfqd->rq_in_driver[actuator]);
7153 WARN_ON_ONCE(bfqd->tot_rq_in_driver);
7155 hrtimer_cancel(&bfqd->idle_slice_timer);
7158 bfqg_and_blkg_put(bfqd->root_group);
7161 blkcg_deactivate_policy(bfqd->queue->disk, &blkcg_policy_bfq);
7163 spin_lock_irq(&bfqd->lock);
7164 bfq_put_async_queues(bfqd, bfqd->root_group);
7165 kfree(bfqd->root_group);
7166 spin_unlock_irq(&bfqd->lock);
7169 blk_stat_disable_accounting(bfqd->queue);
7171 wbt_enable_default(bfqd->queue->disk);
7173 kfree(bfqd);
7177 struct bfq_data *bfqd)
7184 root_group->bfqd = bfqd;
7194 struct bfq_data *bfqd;
7203 bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node);
7204 if (!bfqd) {
7208 eq->elevator_data = bfqd;
7221 bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, NULL, 1, 0, 0);
7222 bfqd->oom_bfqq.ref++;
7223 bfqd->oom_bfqq.new_ioprio = BFQ_DEFAULT_QUEUE_IOPRIO;
7224 bfqd->oom_bfqq.new_ioprio_class = IOPRIO_CLASS_BE;
7225 bfqd->oom_bfqq.entity.new_weight =
7226 bfq_ioprio_to_weight(bfqd->oom_bfqq.new_ioprio);
7229 bfq_clear_bfqq_just_created(&bfqd->oom_bfqq);
7236 bfqd->oom_bfqq.entity.prio_changed = 1;
7238 bfqd->queue = q;
7240 bfqd->num_actuators = 1;
7256 bfqd->num_actuators = ia_ranges->nr_ia_ranges;
7258 for (i = 0; i < bfqd->num_actuators; i++) {
7259 bfqd->sector[i] = ia_ranges->ia_range[i].sector;
7260 bfqd->nr_sectors[i] =
7267 if (bfqd->num_actuators == 1) {
7268 bfqd->sector[0] = 0;
7269 bfqd->nr_sectors[0] = get_capacity(q->disk);
7273 INIT_LIST_HEAD(&bfqd->dispatch);
7275 hrtimer_init(&bfqd->idle_slice_timer, CLOCK_MONOTONIC,
7277 bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
7279 bfqd->queue_weights_tree = RB_ROOT_CACHED;
7281 bfqd->num_groups_with_pending_reqs = 0;
7284 INIT_LIST_HEAD(&bfqd->active_list[0]);
7285 INIT_LIST_HEAD(&bfqd->active_list[1]);
7286 INIT_LIST_HEAD(&bfqd->idle_list);
7287 INIT_HLIST_HEAD(&bfqd->burst_list);
7289 bfqd->hw_tag = -1;
7290 bfqd->nonrot_with_queueing = blk_queue_nonrot(bfqd->queue);
7292 bfqd->bfq_max_budget = bfq_default_max_budget;
7294 bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0];
7295 bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1];
7296 bfqd->bfq_back_max = bfq_back_max;
7297 bfqd->bfq_back_penalty = bfq_back_penalty;
7298 bfqd->bfq_slice_idle = bfq_slice_idle;
7299 bfqd->bfq_timeout = bfq_timeout;
7301 bfqd->bfq_large_burst_thresh = 8;
7302 bfqd->bfq_burst_interval = msecs_to_jiffies(180);
7304 bfqd->low_latency = true;
7309 bfqd->bfq_wr_coeff = 30;
7310 bfqd->bfq_wr_rt_max_time = msecs_to_jiffies(300);
7311 bfqd->bfq_wr_min_idle_time = msecs_to_jiffies(2000);
7312 bfqd->bfq_wr_min_inter_arr_async = msecs_to_jiffies(500);
7313 bfqd->bfq_wr_max_softrt_rate = 7000; /*
7319 bfqd->wr_busy_queues = 0;
7325 bfqd->rate_dur_prod = ref_rate[blk_queue_nonrot(bfqd->queue)] *
7326 ref_wr_duration[blk_queue_nonrot(bfqd->queue)];
7327 bfqd->peak_rate = ref_rate[blk_queue_nonrot(bfqd->queue)] * 2 / 3;
7330 bfqd->actuator_load_threshold = 4;
7332 spin_lock_init(&bfqd->lock);
7349 bfqd->root_group = bfq_create_group_hierarchy(bfqd, q->node);
7350 if (!bfqd->root_group)
7352 bfq_init_root_group(bfqd->root_group, bfqd);
7353 bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group);
7365 kfree(bfqd);
7402 struct bfq_data *bfqd = e->elevator_data; \
7410 SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 2);
7411 SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 2);
7412 SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0);
7413 SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0);
7414 SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 2);
7415 SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0);
7416 SHOW_FUNCTION(bfq_timeout_sync_show, bfqd->bfq_timeout, 1);
7417 SHOW_FUNCTION(bfq_strict_guarantees_show, bfqd->strict_guarantees, 0);
7418 SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0);
7424 struct bfq_data *bfqd = e->elevator_data; \
7429 USEC_SHOW_FUNCTION(bfq_slice_idle_us_show, bfqd->bfq_slice_idle);
7436 struct bfq_data *bfqd = e->elevator_data; \
7455 STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1,
7457 STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1,
7459 STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0);
7460 STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1,
7462 STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 2);
7468 struct bfq_data *bfqd = e->elevator_data; \
7482 USEC_STORE_FUNCTION(bfq_slice_idle_us_store, &bfqd->bfq_slice_idle, 0,
7489 struct bfq_data *bfqd = e->elevator_data;
7498 bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd);
7502 bfqd->bfq_max_budget = __data;
7505 bfqd->bfq_user_max_budget = __data;
7517 struct bfq_data *bfqd = e->elevator_data;
7530 bfqd->bfq_timeout = msecs_to_jiffies(__data);
7531 if (bfqd->bfq_user_max_budget == 0)
7532 bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd);
7540 struct bfq_data *bfqd = e->elevator_data;
7550 if (!bfqd->strict_guarantees && __data == 1
7551 && bfqd->bfq_slice_idle < 8 * NSEC_PER_MSEC)
7552 bfqd->bfq_slice_idle = 8 * NSEC_PER_MSEC;
7554 bfqd->strict_guarantees = __data;
7562 struct bfq_data *bfqd = e->elevator_data;
7572 if (__data == 0 && bfqd->low_latency != 0)
7573 bfq_end_wr(bfqd);
7574 bfqd->low_latency = __data;