Lines Matching defs:bfqd
231 #define BFQ_RQ_SEEKY(bfqd, last_pos, rq) \
234 (!blk_queue_nonrot(bfqd->queue) || \
401 * bfq_bic_lookup - search into @ioc a bic associated to @bfqd.
402 * @bfqd: the lookup key.
406 static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd,
428 void bfq_schedule_dispatch(struct bfq_data *bfqd)
430 lockdep_assert_held(&bfqd->lock);
432 if (bfqd->queued != 0) {
433 bfq_log(bfqd, "schedule dispatch");
434 blk_mq_run_hw_queues(bfqd->queue, true);
447 static struct request *bfq_choose_req(struct bfq_data *bfqd,
478 back_max = bfqd->bfq_back_max * 2;
488 d1 = (last - s1) * bfqd->bfq_back_penalty;
495 d2 = (last - s2) * bfqd->bfq_back_penalty;
545 struct bfq_data *bfqd = data->q->elevator->elevator_data;
551 bfqd->word_depths[!!bfqd->wr_busy_queues][op_is_sync(op)];
553 bfq_log(bfqd, "[%s] wr_busy %d sync %d depth %u",
554 __func__, bfqd->wr_busy_queues, op_is_sync(op),
559 bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root,
592 bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d",
615 bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
626 if (bfqq == &bfqd->oom_bfqq)
643 __bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root,
685 static bool bfq_asymmetric_scenario(struct bfq_data *bfqd,
692 rb_first_cached(&bfqd->queue_weights_tree),
701 !RB_EMPTY_ROOT(&bfqd->queue_weights_tree.rb_root) &&
702 (bfqd->queue_weights_tree.rb_root.rb_node->rb_left ||
703 bfqd->queue_weights_tree.rb_root.rb_node->rb_right);
706 (bfqd->busy_queues[0] && bfqd->busy_queues[1]) ||
707 (bfqd->busy_queues[0] && bfqd->busy_queues[2]) ||
708 (bfqd->busy_queues[1] && bfqd->busy_queues[2]);
712 || bfqd->num_groups_with_pending_reqs > 0
730 void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq,
804 void __bfq_weights_tree_remove(struct bfq_data *bfqd,
827 void bfq_weights_tree_remove(struct bfq_data *bfqd,
862 bfqd->num_groups_with_pending_reqs--;
872 __bfq_weights_tree_remove(bfqd, bfqq,
873 &bfqd->queue_weights_tree);
894 bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq);
898 static struct request *bfq_find_next_rq(struct bfq_data *bfqd,
922 return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last));
930 bfq_asymmetric_scenario(bfqq->bfqd, bfqq))
938 * @bfqd: the device data the queue belongs to.
947 static void bfq_updated_next_req(struct bfq_data *bfqd,
957 if (bfqq == bfqd->in_service_queue)
970 bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu",
972 bfq_requeue_bfqq(bfqd, bfqq, false);
976 static unsigned int bfq_wr_duration(struct bfq_data *bfqd)
980 if (bfqd->bfq_wr_max_time > 0)
981 return bfqd->bfq_wr_max_time;
983 dur = bfqd->rate_dur_prod;
984 do_div(dur, bfqd->peak_rate);
1011 struct bfq_data *bfqd)
1013 bfqq->wr_coeff = bfqd->bfq_wr_coeff;
1014 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
1019 bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
1045 if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
1048 bfq_wr_duration(bfqd))) {
1049 switch_back_to_interactive_wr(bfqq, bfqd);
1052 bfq_log_bfqq(bfqq->bfqd, bfqq,
1064 bfqd->wr_busy_queues++;
1066 bfqd->wr_busy_queues--;
1076 static void bfq_reset_burst_list(struct bfq_data *bfqd, struct bfq_queue *bfqq)
1081 hlist_for_each_entry_safe(item, n, &bfqd->burst_list, burst_list_node)
1089 if (bfq_tot_busy_queues(bfqd) == 0) {
1090 hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
1091 bfqd->burst_size = 1;
1093 bfqd->burst_size = 0;
1095 bfqd->burst_parent_entity = bfqq->entity.parent;
1099 static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
1102 bfqd->burst_size++;
1104 if (bfqd->burst_size == bfqd->bfq_large_burst_thresh) {
1112 bfqd->large_burst = true;
1118 hlist_for_each_entry(bfqq_item, &bfqd->burst_list,
1130 hlist_for_each_entry_safe(pos, n, &bfqd->burst_list,
1139 hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
1251 static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
1281 if (time_is_before_jiffies(bfqd->last_ins_in_burst +
1282 bfqd->bfq_burst_interval) ||
1283 bfqq->entity.parent != bfqd->burst_parent_entity) {
1284 bfqd->large_burst = false;
1285 bfq_reset_burst_list(bfqd, bfqq);
1294 if (bfqd->large_burst) {
1304 bfq_add_to_burst(bfqd, bfqq);
1314 bfqd->last_ins_in_burst = jiffies;
1326 * stored in bfqd, which is dynamically updated according to the
1329 static int bfq_max_budget(struct bfq_data *bfqd)
1331 if (bfqd->budgets_assigned < bfq_stats_min_budgets)
1334 return bfqd->bfq_max_budget;
1341 static int bfq_min_budget(struct bfq_data *bfqd)
1343 if (bfqd->budgets_assigned < bfq_stats_min_budgets)
1346 return bfqd->bfq_max_budget / 32;
1453 static bool bfq_bfqq_update_budg_for_activation(struct bfq_data *bfqd,
1522 static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd,
1534 bfqq->wr_coeff = bfqd->bfq_wr_coeff;
1535 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
1550 bfqq->wr_coeff = bfqd->bfq_wr_coeff *
1553 bfqd->bfq_wr_rt_max_time;
1567 2 * bfq_min_budget(bfqd));
1570 bfqq->wr_coeff = bfqd->bfq_wr_coeff;
1571 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
1605 bfqd->bfq_wr_rt_max_time) {
1610 bfqd->bfq_wr_rt_max_time;
1611 bfqq->wr_coeff = bfqd->bfq_wr_coeff *
1619 static bool bfq_bfqq_idle_for_long_time(struct bfq_data *bfqd,
1625 bfqd->bfq_wr_min_idle_time);
1658 static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd,
1666 idle_for_long_time = bfq_bfqq_idle_for_long_time(bfqd, bfqq),
1674 bfqd->bfq_slice_idle * 3;
1685 soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 &&
1691 wr_or_deserves_wr = bfqd->low_latency &&
1701 bfq_bfqq_update_budg_for_activation(bfqd, bfqq,
1733 bfqd->bfq_requests_within_timer)
1739 if (bfqd->low_latency) {
1743 jiffies - bfqd->bfq_wr_min_idle_time - 1;
1746 bfqd->bfq_wr_min_idle_time)) {
1747 bfq_update_bfqq_wr_on_rq_arrival(bfqd, bfqq,
1763 bfq_add_bfqq_busy(bfqd, bfqq);
1797 if (bfqd->in_service_queue &&
1799 bfqq->wr_coeff >= bfqd->in_service_queue->wr_coeff) ||
1800 bfq_bfqq_higher_class_or_weight(bfqq, bfqd->in_service_queue)) &&
1801 next_queue_may_preempt(bfqd))
1802 bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
1806 static void bfq_reset_inject_limit(struct bfq_data *bfqd,
1816 bfqd->waited_rq = NULL;
1875 struct bfq_data *bfqd = bfqq->bfqd;
1880 bfq_log_bfqq(bfqd, bfqq, "add_request %d", rq_is_sync(rq));
1882 bfqd->queued++;
1939 if (bfqd->last_completed_rq_bfqq &&
1941 ktime_get_ns() - bfqd->last_completion <
1943 if (bfqd->last_completed_rq_bfqq != bfqq &&
1944 bfqd->last_completed_rq_bfqq !=
1952 bfqq->waker_bfqq = bfqd->last_completed_rq_bfqq;
1977 &bfqd->last_completed_rq_bfqq->woken_list);
1980 } else if (bfqd->last_completed_rq_bfqq ==
1999 bfq_reset_inject_limit(bfqd, bfqq);
2026 if (bfqq == bfqd->in_service_queue &&
2027 (bfqd->rq_in_driver == 0 ||
2029 bfqd->rqs_injected && bfqd->rq_in_driver > 0)) &&
2032 bfqd->last_empty_occupied_ns = ktime_get_ns();
2036 * wait_dispatch will cause bfqd->waited_rq to
2039 bfqd->wait_dispatch = true;
2053 if (bfqd->rq_in_driver == 0)
2054 bfqd->rqs_injected = false;
2064 next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position);
2071 if (unlikely(!bfqd->nonrot_with_queueing && prev != bfqq->next_rq))
2072 bfq_pos_tree_add_move(bfqd, bfqq);
2075 bfq_bfqq_handle_idle_busy_switch(bfqd, bfqq, old_wr_coeff,
2078 if (bfqd->low_latency && old_wr_coeff == 1 && !rq_is_sync(rq) &&
2081 bfqd->bfq_wr_min_inter_arr_async)) {
2082 bfqq->wr_coeff = bfqd->bfq_wr_coeff;
2083 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
2085 bfqd->wr_busy_queues++;
2089 bfq_updated_next_req(bfqd, bfqq);
2118 if (bfqd->low_latency &&
2123 static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd,
2127 struct bfq_queue *bfqq = bfqd->bio_bfqq;
2147 struct bfq_data *bfqd = q->elevator->elevator_data;
2149 bfqd->rq_in_driver++;
2154 struct bfq_data *bfqd = q->elevator->elevator_data;
2156 bfqd->rq_in_driver--;
2164 struct bfq_data *bfqd = bfqq->bfqd;
2168 bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq);
2169 bfq_updated_next_req(bfqd, bfqq);
2175 bfqd->queued--;
2185 if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue) {
2186 bfq_del_bfqq_busy(bfqd, bfqq, false);
2212 if (unlikely(!bfqd->nonrot_with_queueing))
2213 bfq_pos_tree_add_move(bfqd, bfqq);
2224 struct bfq_data *bfqd = q->elevator->elevator_data;
2229 * queue_lock inside the bfqd->lock. We assume that the bic
2231 * bfqd->lock is taken.
2233 struct bfq_io_cq *bic = bfq_bic_lookup(bfqd, current->io_context, q);
2236 spin_lock_irq(&bfqd->lock);
2245 bfqd->bio_bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf));
2247 bfqd->bio_bfqq = NULL;
2249 bfqd->bio_bic = bic;
2253 spin_unlock_irq(&bfqd->lock);
2263 struct bfq_data *bfqd = q->elevator->elevator_data;
2266 __rq = bfq_find_rq_fmerge(bfqd, bio, q);
2287 struct bfq_data *bfqd;
2293 bfqd = bfqq->bfqd;
2301 next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req,
2302 bfqd->last_position);
2310 bfq_updated_next_req(bfqd, bfqq);
2315 if (unlikely(!bfqd->nonrot_with_queueing))
2316 bfq_pos_tree_add_move(bfqd, bfqq);
2379 bfqq->bfqd->wr_busy_queues--;
2390 void bfq_end_wr_async_queues(struct bfq_data *bfqd,
2403 static void bfq_end_wr(struct bfq_data *bfqd)
2407 spin_lock_irq(&bfqd->lock);
2409 list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list)
2411 list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list)
2413 bfq_end_wr_async(bfqd);
2415 spin_unlock_irq(&bfqd->lock);
2433 static struct bfq_queue *bfqq_find_close(struct bfq_data *bfqd,
2448 __bfqq = bfq_rq_pos_tree_lookup(bfqd, root, sector, &parent, NULL);
2475 static struct bfq_queue *bfq_find_close_cooperator(struct bfq_data *bfqd,
2488 bfqq = bfqq_find_close(bfqd, cur_bfqq, sector);
2534 bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d",
2621 bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
2667 if (likely(bfqd->nonrot_with_queueing))
2684 if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq))
2688 if (bfq_tot_busy_queues(bfqd) == 1)
2691 in_service_bfqq = bfqd->in_service_queue;
2694 likely(in_service_bfqq != &bfqd->oom_bfqq) &&
2696 bfqd->in_serv_last_pos) &&
2708 new_bfqq = bfq_find_close_cooperator(bfqd, bfqq,
2711 if (new_bfqq && likely(new_bfqq != &bfqd->oom_bfqq) &&
2738 bfqq->bfqd->low_latency)) {
2748 bic->saved_wr_coeff = bfqq->bfqd->bfq_wr_coeff;
2750 bic->saved_wr_cur_max_time = bfq_wr_duration(bfqq->bfqd);
2761 void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq)
2775 bfqq != bfqd->in_service_queue)
2776 bfq_del_bfqq_busy(bfqd, bfqq, false);
2782 bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
2785 bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
2810 bfqd->wr_busy_queues++;
2818 bfqd->wr_busy_queues--;
2821 bfq_log_bfqq(bfqd, new_bfqq, "merge_bfqqs: wr_busy %d",
2822 bfqd->wr_busy_queues);
2851 bfq_release_process_ref(bfqd, bfqq);
2857 struct bfq_data *bfqd = q->elevator->elevator_data;
2859 struct bfq_queue *bfqq = bfqd->bio_bfqq, *new_bfqq;
2878 new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false);
2887 bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq,
2898 * bfqd->bio_bic now points to new_bfqq, and
2902 bfqd->bio_bfqq = bfqq;
2914 static void bfq_set_budget_timeout(struct bfq_data *bfqd,
2919 if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time)
2924 bfqd->last_budget_start = ktime_get();
2927 bfqd->bfq_timeout * timeout_coeff;
2930 static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
2936 bfqd->budgets_assigned = (bfqd->budgets_assigned * 7 + 256) / 8;
2940 bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
2974 bfq_set_budget_timeout(bfqd, bfqq);
2975 bfq_log_bfqq(bfqd, bfqq,
2980 bfqd->in_service_queue = bfqq;
2981 bfqd->in_serv_last_pos = 0;
2987 static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd)
2989 struct bfq_queue *bfqq = bfq_get_next_queue(bfqd);
2991 __bfq_set_in_service_queue(bfqd, bfqq);
2995 static void bfq_arm_slice_timer(struct bfq_data *bfqd)
2997 struct bfq_queue *bfqq = bfqd->in_service_queue;
3007 sl = bfqd->bfq_slice_idle;
3019 !bfq_asymmetric_scenario(bfqd, bfqq))
3024 bfqd->last_idling_start = ktime_get();
3025 bfqd->last_idling_start_jiffies = jiffies;
3027 hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl),
3039 static unsigned long bfq_calc_max_budget(struct bfq_data *bfqd)
3041 return (u64)bfqd->peak_rate * USEC_PER_MSEC *
3042 jiffies_to_msecs(bfqd->bfq_timeout)>>BFQ_RATE_SHIFT;
3050 static void update_thr_responsiveness_params(struct bfq_data *bfqd)
3052 if (bfqd->bfq_user_max_budget == 0) {
3053 bfqd->bfq_max_budget =
3054 bfq_calc_max_budget(bfqd);
3055 bfq_log(bfqd, "new max_budget = %d", bfqd->bfq_max_budget);
3059 static void bfq_reset_rate_computation(struct bfq_data *bfqd,
3063 bfqd->last_dispatch = bfqd->first_dispatch = ktime_get_ns();
3064 bfqd->peak_rate_samples = 1;
3065 bfqd->sequential_samples = 0;
3066 bfqd->tot_sectors_dispatched = bfqd->last_rq_max_size =
3069 bfqd->peak_rate_samples = 0; /* full re-init on next disp. */
3071 bfq_log(bfqd,
3073 bfqd->peak_rate_samples, bfqd->sequential_samples,
3074 bfqd->tot_sectors_dispatched);
3077 static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq)
3089 if (bfqd->peak_rate_samples < BFQ_RATE_MIN_SAMPLES ||
3090 bfqd->delta_from_first < BFQ_RATE_MIN_INTERVAL)
3099 bfqd->delta_from_first =
3100 max_t(u64, bfqd->delta_from_first,
3101 bfqd->last_completion - bfqd->first_dispatch);
3107 rate = div64_ul(bfqd->tot_sectors_dispatched<<BFQ_RATE_SHIFT,
3108 div_u64(bfqd->delta_from_first, NSEC_PER_USEC));
3116 if ((bfqd->sequential_samples < (3 * bfqd->peak_rate_samples)>>2 &&
3117 rate <= bfqd->peak_rate) ||
3139 * cannot reach 9, because bfqd->sequential_samples cannot
3140 * become equal to bfqd->peak_rate_samples, which, in its
3141 * turn, holds true because bfqd->sequential_samples is not
3144 weight = (9 * bfqd->sequential_samples) / bfqd->peak_rate_samples;
3151 div_u64(weight * bfqd->delta_from_first,
3165 bfqd->peak_rate *= divisor-1;
3166 bfqd->peak_rate /= divisor;
3169 bfqd->peak_rate += rate;
3172 * For a very slow device, bfqd->peak_rate can reach 0 (see
3175 * divisions by zero where bfqd->peak_rate is used as a
3178 bfqd->peak_rate = max_t(u32, 1, bfqd->peak_rate);
3180 update_thr_responsiveness_params(bfqd);
3183 bfq_reset_rate_computation(bfqd, rq);
3218 static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq)
3222 if (bfqd->peak_rate_samples == 0) { /* first dispatch */
3223 bfq_log(bfqd, "update_peak_rate: goto reset, samples %d",
3224 bfqd->peak_rate_samples);
3225 bfq_reset_rate_computation(bfqd, rq);
3241 if (now_ns - bfqd->last_dispatch > 100*NSEC_PER_MSEC &&
3242 bfqd->rq_in_driver == 0)
3246 bfqd->peak_rate_samples++;
3248 if ((bfqd->rq_in_driver > 0 ||
3249 now_ns - bfqd->last_completion < BFQ_MIN_TT)
3250 && !BFQ_RQ_SEEKY(bfqd, bfqd->last_position, rq))
3251 bfqd->sequential_samples++;
3253 bfqd->tot_sectors_dispatched += blk_rq_sectors(rq);
3256 if (likely(bfqd->peak_rate_samples % 32))
3257 bfqd->last_rq_max_size =
3258 max_t(u32, blk_rq_sectors(rq), bfqd->last_rq_max_size);
3260 bfqd->last_rq_max_size = blk_rq_sectors(rq);
3262 bfqd->delta_from_first = now_ns - bfqd->first_dispatch;
3265 if (bfqd->delta_from_first < BFQ_RATE_REF_INTERVAL)
3269 bfq_update_rate_reset(bfqd, rq);
3271 bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
3272 if (RQ_BFQQ(rq) == bfqd->in_service_queue)
3273 bfqd->in_serv_last_pos = bfqd->last_position;
3274 bfqd->last_dispatch = now_ns;
3488 static bool idling_needed_for_service_guarantees(struct bfq_data *bfqd,
3496 (bfqd->wr_busy_queues <
3497 bfq_tot_busy_queues(bfqd) ||
3498 bfqd->rq_in_driver >=
3500 bfq_asymmetric_scenario(bfqd, bfqq);
3503 static bool __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq,
3530 idling_needed_for_service_guarantees(bfqd, bfqq))) {
3540 bfq_del_bfqq_busy(bfqd, bfqq, true);
3542 bfq_requeue_bfqq(bfqd, bfqq, true);
3547 if (unlikely(!bfqd->nonrot_with_queueing &&
3549 bfq_pos_tree_add_move(bfqd, bfqq);
3559 return __bfq_bfqd_reset_in_service(bfqd);
3564 * @bfqd: device data.
3571 static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd,
3578 min_budget = bfq_min_budget(bfqd);
3590 bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %d, budg left %d",
3592 bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %d, min budg %d",
3593 budget, bfq_min_budget(bfqd));
3594 bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d",
3595 bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue));
3629 budget = min(budget * 2, bfqd->bfq_max_budget);
3644 budget = min(budget * 2, bfqd->bfq_max_budget);
3656 budget = min(budget * 4, bfqd->bfq_max_budget);
3703 budget = bfqd->bfq_max_budget;
3708 if (bfqd->budgets_assigned >= bfq_stats_min_budgets &&
3709 !bfqd->bfq_user_max_budget)
3710 bfqq->max_budget = min(bfqq->max_budget, bfqd->bfq_max_budget);
3727 bfq_log_bfqq(bfqd, bfqq, "head sect: %u, new budget %d",
3763 static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq,
3775 delta_ktime = bfqd->last_idling_start;
3778 delta_ktime = ktime_sub(delta_ktime, bfqd->last_budget_start);
3783 if (blk_queue_nonrot(bfqd->queue))
3812 slow = bfqq->entity.service < bfqd->bfq_max_budget / 2;
3815 bfq_log_bfqq(bfqd, bfqq, "bfq_bfqq_is_slow: slow %d", slow);
3859 * namely bfqd->bfq_slice_idle, and (2) a few extra jiffies. We
3863 * bfqd->bfq_slice_idle tends to filter out greedy applications,
3898 * bfqd->bfq_slice_idle:
3900 * higher than bfqd->bfq_slice_idle. This happens, e.g., on slow
3902 * that the approximation, in jiffies, of bfqd->bfq_slice_idle
3908 * reference time interval just bfqd->bfq_slice_idle, but
3909 * bfqd->bfq_slice_idle plus a few jiffies. In particular, we add the
3913 static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
3919 bfqd->bfq_wr_max_softrt_rate,
3920 jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4);
3925 * @bfqd: device owning the queue.
3949 void bfq_bfqq_expire(struct bfq_data *bfqd,
3961 slow = bfq_bfqq_is_slow(bfqd, bfqq, compensate, reason, &delta);
3982 bfq_bfqq_charge_time(bfqd, bfqq, delta);
3988 if (bfqd->low_latency && bfqq->wr_coeff == 1)
3991 if (bfqd->low_latency && bfqd->bfq_wr_max_softrt_rate > 0 &&
4020 bfqq->wr_coeff != bfqd->bfq_wr_coeff)
4022 bfq_bfqq_softrt_next_start(bfqd, bfqq);
4032 bfq_log_bfqq(bfqd, bfqq,
4041 bfqd->rqs_injected = bfqd->wait_dispatch = false;
4042 bfqd->waited_rq = NULL;
4048 __bfq_bfqq_recalc_budget(bfqd, bfqq, reason);
4049 if (__bfq_bfqq_expire(bfqd, bfqq, reason))
4108 bfq_log_bfqq(bfqq->bfqd, bfqq,
4120 static bool idling_boosts_thr_without_issues(struct bfq_data *bfqd,
4124 !blk_queue_nonrot(bfqd->queue) && !bfqd->hw_tag,
4154 * particular, happens to be false if bfqd is an NCQ-capable
4158 ((!blk_queue_nonrot(bfqd->queue) || !bfqd->hw_tag) &&
4196 bfqd->wr_busy_queues == 0;
4222 struct bfq_data *bfqd = bfqq->bfqd;
4229 if (unlikely(bfqd->strict_guarantees))
4240 if (bfqd->bfq_slice_idle == 0 || !bfq_bfqq_sync(bfqq) ||
4245 idling_boosts_thr_without_issues(bfqd, bfqq);
4248 idling_needed_for_service_guarantees(bfqd, bfqq);
4284 bfq_choose_bfqq_for_injection(struct bfq_data *bfqd)
4286 struct bfq_queue *bfqq, *in_serv_bfqq = bfqd->in_service_queue;
4314 time_is_before_eq_jiffies(bfqd->last_idling_start_jiffies +
4315 bfqd->bfq_slice_idle)
4319 if (bfqd->rq_in_driver >= limit)
4334 list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list)
4356 if (blk_queue_nonrot(bfqd->queue) &&
4363 if (bfqd->rq_in_driver < limit) {
4364 bfqd->rqs_injected = true;
4376 static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
4382 bfqq = bfqd->in_service_queue;
4386 bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue");
4443 hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
4550 else if (!idling_boosts_thr_without_issues(bfqd, bfqq) &&
4551 (bfqq->wr_coeff == 1 || bfqd->wr_busy_queues > 1 ||
4553 bfqq = bfq_choose_bfqq_for_injection(bfqd);
4562 bfq_bfqq_expire(bfqd, bfqq, false, reason);
4564 bfqq = bfq_set_in_service_queue(bfqd);
4566 bfq_log_bfqq(bfqd, bfqq, "select_queue: checking new queue");
4571 bfq_log_bfqq(bfqd, bfqq, "select_queue: returned this queue");
4573 bfq_log(bfqd, "select_queue: no queue returned");
4578 static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
4583 bfq_log_bfqq(bfqd, bfqq,
4591 bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change");
4602 if (bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time ||
4604 bfq_wr_duration(bfqd)))
4607 switch_back_to_interactive_wr(bfqq, bfqd);
4612 bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time &&
4634 static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd,
4644 if (bfqq == bfqd->in_service_queue && bfqd->wait_dispatch) {
4645 bfqd->wait_dispatch = false;
4646 bfqd->waited_rq = rq;
4649 bfq_dispatch_remove(bfqd->queue, rq);
4651 if (bfqq != bfqd->in_service_queue)
4665 bfq_update_wr_data(bfqd, bfqq);
4672 if (!(bfq_tot_busy_queues(bfqd) > 1 && bfq_class_idle(bfqq)))
4675 bfq_bfqq_expire(bfqd, bfqq, false, BFQQE_BUDGET_EXHAUSTED);
4683 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
4689 * Avoiding lock: a race on bfqd->busy_queues should cause at
4692 return !list_empty_careful(&bfqd->dispatch) ||
4693 bfq_tot_busy_queues(bfqd) > 0;
4698 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
4702 if (!list_empty(&bfqd->dispatch)) {
4703 rq = list_first_entry(&bfqd->dispatch, struct request,
4747 bfq_log(bfqd, "dispatch requests: %d busy queues",
4748 bfq_tot_busy_queues(bfqd));
4750 if (bfq_tot_busy_queues(bfqd) == 0)
4765 if (bfqd->strict_guarantees && bfqd->rq_in_driver > 0)
4768 bfqq = bfq_select_queue(bfqd);
4772 rq = bfq_dispatch_rq_from_bfqq(bfqd, bfqq);
4776 bfqd->rq_in_driver++;
4838 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
4843 spin_lock_irq(&bfqd->lock);
4845 in_serv_queue = bfqd->in_service_queue;
4849 if (in_serv_queue == bfqd->in_service_queue) {
4854 spin_unlock_irq(&bfqd->lock);
4875 if (bfqq->bfqd)
4876 bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d",
4911 if (bfqq->bic && bfqq->bfqd->burst_size > 0)
4912 bfqq->bfqd->burst_size--;
4942 if (bfqq->bfqd && bfqq->bfqd->last_completed_rq_bfqq == bfqq)
4943 bfqq->bfqd->last_completed_rq_bfqq = NULL;
4968 static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
4970 if (bfqq == bfqd->in_service_queue) {
4971 __bfq_bfqq_expire(bfqd, bfqq, BFQQE_BUDGET_TIMEOUT);
4972 bfq_schedule_dispatch(bfqd);
4975 bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfqq->ref);
4979 bfq_release_process_ref(bfqd, bfqq);
4985 struct bfq_data *bfqd;
4988 bfqd = bfqq->bfqd; /* NULL if scheduler already exited */
4990 if (bfqq && bfqd) {
4993 spin_lock_irqsave(&bfqd->lock, flags);
4995 bfq_exit_bfqq(bfqd, bfqq);
4996 spin_unlock_irqrestore(&bfqd->lock, flags);
5017 struct bfq_data *bfqd = bfqq->bfqd;
5019 if (!bfqd)
5026 bdi_dev_name(bfqq->bfqd->queue->backing_dev_info),
5060 static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
5066 struct bfq_data *bfqd = bic_to_bfqd(bic);
5074 if (unlikely(!bfqd) || likely(bic->ioprio == ioprio))
5083 bfqq = bfq_get_queue(bfqd, bio, false, bic);
5085 bfq_release_process_ref(bfqd, old_bfqq);
5093 static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
5103 bfqq->bfqd = bfqd;
5130 bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3;
5153 static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd,
5172 static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
5182 bfqg = bfq_bio_bfqg(bfqd, bio);
5184 async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class,
5193 bfqd->queue->node);
5196 bfq_init_bfqq(bfqd, bfqq, bic, current->pid,
5199 bfq_log_bfqq(bfqd, bfqq, "allocated");
5201 bfqq = &bfqd->oom_bfqq;
5202 bfq_log_bfqq(bfqd, bfqq, "using oom bfqq");
5218 bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d",
5225 bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq, bfqq->ref);
5229 static void bfq_update_io_thinktime(struct bfq_data *bfqd,
5235 elapsed = min_t(u64, elapsed, 2ULL * bfqd->bfq_slice_idle);
5244 bfq_update_io_seektime(struct bfq_data *bfqd, struct bfq_queue *bfqq,
5248 bfqq->seek_history |= BFQ_RQ_SEEKY(bfqd, bfqq->last_request_pos, rq);
5251 bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
5256 static void bfq_update_has_short_ttime(struct bfq_data *bfqd,
5268 bfqd->bfq_slice_idle == 0)
5273 bfqd->bfq_wr_min_idle_time))
5282 bfqq->ttime.ttime_mean > bfqd->bfq_slice_idle))
5380 bfq_reset_inject_limit(bfqd, bfqq);
5387 static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
5395 if (bfqq == bfqd->in_service_queue && bfq_bfqq_wait_request(bfqq)) {
5416 if (small_req && idling_boosts_thr_without_issues(bfqd, bfqq) &&
5428 hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
5438 bfq_bfqq_expire(bfqd, bfqq, false,
5444 static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
5447 *new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true);
5467 bfq_merge_bfqqs(bfqd, RQ_BIC(rq),
5480 bfq_update_io_thinktime(bfqd, bfqq);
5481 bfq_update_has_short_ttime(bfqd, bfqq, RQ_BIC(rq));
5482 bfq_update_io_seektime(bfqd, bfqq, rq);
5488 rq->fifo_time = ktime_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)];
5491 bfq_rq_enqueued(bfqd, bfqq, rq);
5534 struct bfq_data *bfqd = q->elevator->elevator_data;
5544 spin_lock_irq(&bfqd->lock);
5547 spin_unlock_irq(&bfqd->lock);
5556 list_add(&rq->queuelist, &bfqd->dispatch);
5558 list_add_tail(&rq->queuelist, &bfqd->dispatch);
5560 idle_timer_disabled = __bfq_insert_request(bfqd, rq);
5582 spin_unlock_irq(&bfqd->lock);
5601 static void bfq_update_hw_tag(struct bfq_data *bfqd)
5603 struct bfq_queue *bfqq = bfqd->in_service_queue;
5605 bfqd->max_rq_in_driver = max_t(int, bfqd->max_rq_in_driver,
5606 bfqd->rq_in_driver);
5608 if (bfqd->hw_tag == 1)
5617 if (bfqd->rq_in_driver + bfqd->queued <= BFQ_HW_QUEUE_THRESHOLD)
5628 bfqd->rq_in_driver < BFQ_HW_QUEUE_THRESHOLD)
5631 if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES)
5634 bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD;
5635 bfqd->max_rq_in_driver = 0;
5636 bfqd->hw_tag_samples = 0;
5638 bfqd->nonrot_with_queueing =
5639 blk_queue_nonrot(bfqd->queue) && bfqd->hw_tag;
5642 static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
5647 bfq_update_hw_tag(bfqd);
5649 bfqd->rq_in_driver--;
5661 bfq_weights_tree_remove(bfqd, bfqq);
5672 delta_us = div_u64(now_ns - bfqd->last_completion, NSEC_PER_USEC);
5691 (bfqd->last_rq_max_size<<BFQ_RATE_SHIFT)/delta_us <
5693 bfq_update_rate_reset(bfqd, NULL);
5694 bfqd->last_completion = now_ns;
5695 bfqd->last_completed_rq_bfqq = bfqq;
5710 bfqq->wr_coeff != bfqd->bfq_wr_coeff)
5712 bfq_bfqq_softrt_next_start(bfqd, bfqq);
5718 if (bfqd->in_service_queue == bfqq) {
5721 bfq_arm_slice_timer(bfqd);
5747 bfq_bfqq_expire(bfqd, bfqq, false,
5752 bfq_bfqq_expire(bfqd, bfqq, false,
5756 if (!bfqd->rq_in_driver)
5757 bfq_schedule_dispatch(bfqd);
5871 static void bfq_update_inject_limit(struct bfq_data *bfqd,
5874 u64 tot_time_ns = ktime_get_ns() - bfqd->last_empty_occupied_ns;
5877 if (bfqq->last_serv_time_ns > 0 && bfqd->rqs_injected) {
5884 old_limit <= bfqd->max_rq_in_driver)
5894 * NOTE: (bfqd->rq_in_driver == 1) means that there is no I/O
5898 * bfqd->rq_in_driver is decremented in such a code path.
5900 if ((bfqq->last_serv_time_ns == 0 && bfqd->rq_in_driver == 1) ||
5910 } else if (!bfqd->rqs_injected && bfqd->rq_in_driver == 1)
5924 bfqd->waited_rq = NULL;
5925 bfqd->rqs_injected = false;
5937 struct bfq_data *bfqd;
5948 bfqd = bfqq->bfqd;
5956 spin_lock_irqsave(&bfqd->lock, flags);
5958 if (rq == bfqd->waited_rq)
5959 bfq_update_inject_limit(bfqd, bfqq);
5961 bfq_completed_request(bfqq, bfqd);
5965 spin_unlock_irqrestore(&bfqd->lock, flags);
5997 bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
6010 bfq_release_process_ref(bfqq->bfqd, bfqq);
6014 static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd,
6022 if (likely(bfqq && bfqq != &bfqd->oom_bfqq))
6030 bfqq = bfq_get_queue(bfqd, bio, is_sync, bic);
6034 if ((bic->was_in_burst_list && bfqd->large_burst) ||
6069 &bfqd->burst_list);
6120 struct bfq_data *bfqd = q->elevator->elevator_data;
6146 bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, false, is_sync,
6152 bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq");
6162 bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio,
6172 bfq_log_bfqq(bfqd, bfqq, "get_request %p: bfqq %p, %d",
6184 if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) {
6192 bfq_bfqq_resume_state(bfqq, bfqd, bic,
6200 * 1) A burst is actually happening (bfqd->burst_size > 0)
6218 (bfqd->burst_size > 0 ||
6219 bfq_tot_busy_queues(bfqd) == 0)))
6220 bfq_handle_burst(bfqd, bfqq);
6226 bfq_idle_slice_timer_body(struct bfq_data *bfqd, struct bfq_queue *bfqq)
6231 spin_lock_irqsave(&bfqd->lock, flags);
6240 if (bfqq != bfqd->in_service_queue) {
6241 spin_unlock_irqrestore(&bfqd->lock, flags);
6265 bfq_bfqq_expire(bfqd, bfqq, true, reason);
6268 bfq_schedule_dispatch(bfqd);
6269 spin_unlock_irqrestore(&bfqd->lock, flags);
6278 struct bfq_data *bfqd = container_of(timer, struct bfq_data,
6280 struct bfq_queue *bfqq = bfqd->in_service_queue;
6291 bfq_idle_slice_timer_body(bfqd, bfqq);
6296 static void __bfq_put_async_bfqq(struct bfq_data *bfqd,
6301 bfq_log(bfqd, "put_async_bfqq: %p", bfqq);
6303 bfq_bfqq_move(bfqd, bfqq, bfqd->root_group);
6305 bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d",
6318 void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
6324 __bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j]);
6326 __bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq);
6333 static unsigned int bfq_update_depths(struct bfq_data *bfqd,
6349 bfqd->word_depths[0][0] = max((1U << bt->sb.shift) >> 1, 1U);
6355 bfqd->word_depths[0][1] = max(((1U << bt->sb.shift) * 3) >> 2, 1U);
6365 bfqd->word_depths[1][0] = max(((1U << bt->sb.shift) * 3) >> 4, 1U);
6367 bfqd->word_depths[1][1] = max(((1U << bt->sb.shift) * 6) >> 4, 1U);
6371 min_shallow = min(min_shallow, bfqd->word_depths[i][j]);
6378 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
6382 min_shallow = bfq_update_depths(bfqd, tags->bitmap_tags);
6394 struct bfq_data *bfqd = e->elevator_data;
6397 hrtimer_cancel(&bfqd->idle_slice_timer);
6399 spin_lock_irq(&bfqd->lock);
6400 list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list)
6401 bfq_deactivate_bfqq(bfqd, bfqq, false, false);
6402 spin_unlock_irq(&bfqd->lock);
6404 hrtimer_cancel(&bfqd->idle_slice_timer);
6407 bfqg_and_blkg_put(bfqd->root_group);
6410 blkcg_deactivate_policy(bfqd->queue, &blkcg_policy_bfq);
6412 spin_lock_irq(&bfqd->lock);
6413 bfq_put_async_queues(bfqd, bfqd->root_group);
6414 kfree(bfqd->root_group);
6415 spin_unlock_irq(&bfqd->lock);
6418 wbt_enable_default(bfqd->queue);
6420 kfree(bfqd);
6424 struct bfq_data *bfqd)
6431 root_group->bfqd = bfqd;
6441 struct bfq_data *bfqd;
6448 bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node);
6449 if (!bfqd) {
6453 eq->elevator_data = bfqd;
6464 bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, NULL, 1, 0);
6465 bfqd->oom_bfqq.ref++;
6466 bfqd->oom_bfqq.new_ioprio = BFQ_DEFAULT_QUEUE_IOPRIO;
6467 bfqd->oom_bfqq.new_ioprio_class = IOPRIO_CLASS_BE;
6468 bfqd->oom_bfqq.entity.new_weight =
6469 bfq_ioprio_to_weight(bfqd->oom_bfqq.new_ioprio);
6472 bfq_clear_bfqq_just_created(&bfqd->oom_bfqq);
6479 bfqd->oom_bfqq.entity.prio_changed = 1;
6481 bfqd->queue = q;
6483 INIT_LIST_HEAD(&bfqd->dispatch);
6485 hrtimer_init(&bfqd->idle_slice_timer, CLOCK_MONOTONIC,
6487 bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
6489 bfqd->queue_weights_tree = RB_ROOT_CACHED;
6490 bfqd->num_groups_with_pending_reqs = 0;
6492 INIT_LIST_HEAD(&bfqd->active_list);
6493 INIT_LIST_HEAD(&bfqd->idle_list);
6494 INIT_HLIST_HEAD(&bfqd->burst_list);
6496 bfqd->hw_tag = -1;
6497 bfqd->nonrot_with_queueing = blk_queue_nonrot(bfqd->queue);
6499 bfqd->bfq_max_budget = bfq_default_max_budget;
6501 bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0];
6502 bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1];
6503 bfqd->bfq_back_max = bfq_back_max;
6504 bfqd->bfq_back_penalty = bfq_back_penalty;
6505 bfqd->bfq_slice_idle = bfq_slice_idle;
6506 bfqd->bfq_timeout = bfq_timeout;
6508 bfqd->bfq_requests_within_timer = 120;
6510 bfqd->bfq_large_burst_thresh = 8;
6511 bfqd->bfq_burst_interval = msecs_to_jiffies(180);
6513 bfqd->low_latency = true;
6518 bfqd->bfq_wr_coeff = 30;
6519 bfqd->bfq_wr_rt_max_time = msecs_to_jiffies(300);
6520 bfqd->bfq_wr_max_time = 0;
6521 bfqd->bfq_wr_min_idle_time = msecs_to_jiffies(2000);
6522 bfqd->bfq_wr_min_inter_arr_async = msecs_to_jiffies(500);
6523 bfqd->bfq_wr_max_softrt_rate = 7000; /*
6529 bfqd->wr_busy_queues = 0;
6535 bfqd->rate_dur_prod = ref_rate[blk_queue_nonrot(bfqd->queue)] *
6536 ref_wr_duration[blk_queue_nonrot(bfqd->queue)];
6537 bfqd->peak_rate = ref_rate[blk_queue_nonrot(bfqd->queue)] * 2 / 3;
6539 spin_lock_init(&bfqd->lock);
6556 bfqd->root_group = bfq_create_group_hierarchy(bfqd, q->node);
6557 if (!bfqd->root_group)
6559 bfq_init_root_group(bfqd->root_group, bfqd);
6560 bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group);
6566 kfree(bfqd);
6603 struct bfq_data *bfqd = e->elevator_data; \
6611 SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 2);
6612 SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 2);
6613 SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0);
6614 SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0);
6615 SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 2);
6616 SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0);
6617 SHOW_FUNCTION(bfq_timeout_sync_show, bfqd->bfq_timeout, 1);
6618 SHOW_FUNCTION(bfq_strict_guarantees_show, bfqd->strict_guarantees, 0);
6619 SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0);
6625 struct bfq_data *bfqd = e->elevator_data; \
6630 USEC_SHOW_FUNCTION(bfq_slice_idle_us_show, bfqd->bfq_slice_idle);
6637 struct bfq_data *bfqd = e->elevator_data; \
6656 STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1,
6658 STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1,
6660 STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0);
6661 STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1,
6663 STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 2);
6669 struct bfq_data *bfqd = e->elevator_data; \
6683 USEC_STORE_FUNCTION(bfq_slice_idle_us_store, &bfqd->bfq_slice_idle, 0,
6690 struct bfq_data *bfqd = e->elevator_data;
6699 bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd);
6703 bfqd->bfq_max_budget = __data;
6706 bfqd->bfq_user_max_budget = __data;
6718 struct bfq_data *bfqd = e->elevator_data;
6731 bfqd->bfq_timeout = msecs_to_jiffies(__data);
6732 if (bfqd->bfq_user_max_budget == 0)
6733 bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd);
6741 struct bfq_data *bfqd = e->elevator_data;
6751 if (!bfqd->strict_guarantees && __data == 1
6752 && bfqd->bfq_slice_idle < 8 * NSEC_PER_MSEC)
6753 bfqd->bfq_slice_idle = 8 * NSEC_PER_MSEC;
6755 bfqd->strict_guarantees = __data;
6763 struct bfq_data *bfqd = e->elevator_data;
6773 if (__data == 0 && bfqd->low_latency != 0)
6774 bfq_end_wr(bfqd);
6775 bfqd->low_latency = __data;