Lines Matching defs:bfq_slice_idle
174 static u64 bfq_slice_idle = NSEC_PER_SEC / 125;
1834 bfqd->bfq_slice_idle * 3;
2154 128 * (u64)bfqd->bfq_slice_idle) {
3380 sl = bfqd->bfq_slice_idle;
4178 *delta_ms = bfq_slice_idle / NSEC_PER_MSEC;
4247 * namely bfqd->bfq_slice_idle, and (2) a few extra jiffies. We
4251 * bfqd->bfq_slice_idle tends to filter out greedy applications,
4286 * bfqd->bfq_slice_idle:
4288 * higher than bfqd->bfq_slice_idle. This happens, e.g., on slow
4290 * that the approximation, in jiffies, of bfqd->bfq_slice_idle
4296 * reference time interval just bfqd->bfq_slice_idle, but
4297 * bfqd->bfq_slice_idle plus a few jiffies. In particular, we add the
4308 jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4);
4609 if (bfqd->bfq_slice_idle == 0 || !bfq_bfqq_sync(bfqq) ||
4686 bfqd->bfq_slice_idle)
5907 elapsed = min_t(u64, elapsed, 2ULL * bfqd->bfq_slice_idle);
5954 * idle io prio class, or if bfq_slice_idle is zero, because
5958 bfqd->bfq_slice_idle == 0)
5973 bfqq->ttime.ttime_mean > bfqd->bfq_slice_idle>>1))
7298 bfqd->bfq_slice_idle = bfq_slice_idle;
7414 SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 2);
7429 USEC_SHOW_FUNCTION(bfq_slice_idle_us_show, bfqd->bfq_slice_idle);
7462 STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 2);
7482 USEC_STORE_FUNCTION(bfq_slice_idle_us_store, &bfqd->bfq_slice_idle, 0,
7551 && bfqd->bfq_slice_idle < 8 * NSEC_PER_MSEC)
7552 bfqd->bfq_slice_idle = 8 * NSEC_PER_MSEC;