Lines Matching defs:kqd
210 static void flush_latency_buckets(struct kyber_queue_data *kqd,
214 unsigned int *buckets = kqd->latency_buckets[sched_domain][type];
226 static int calculate_percentile(struct kyber_queue_data *kqd,
230 unsigned int *buckets = kqd->latency_buckets[sched_domain][type];
243 if (!kqd->latency_timeout[sched_domain])
244 kqd->latency_timeout[sched_domain] = max(jiffies + HZ, 1UL);
246 time_is_after_jiffies(kqd->latency_timeout[sched_domain])) {
249 kqd->latency_timeout[sched_domain] = 0;
257 memset(buckets, 0, sizeof(kqd->latency_buckets[sched_domain][type]));
259 trace_kyber_latency(kqd->dev, kyber_domain_names[sched_domain],
266 static void kyber_resize_domain(struct kyber_queue_data *kqd,
270 if (depth != kqd->domain_tokens[sched_domain].sb.depth) {
271 sbitmap_queue_resize(&kqd->domain_tokens[sched_domain], depth);
272 trace_kyber_adjust(kqd->dev, kyber_domain_names[sched_domain],
279 struct kyber_queue_data *kqd = from_timer(kqd, t, timer);
288 cpu_latency = per_cpu_ptr(kqd->cpu_latency, cpu);
290 flush_latency_buckets(kqd, cpu_latency, sched_domain,
292 flush_latency_buckets(kqd, cpu_latency, sched_domain,
305 p90 = calculate_percentile(kqd, sched_domain, KYBER_IO_LATENCY,
320 p99 = calculate_percentile(kqd, sched_domain,
332 p99 = kqd->domain_p99[sched_domain];
333 kqd->domain_p99[sched_domain] = -1;
335 kqd->domain_p99[sched_domain] = p99;
350 orig_depth = kqd->domain_tokens[sched_domain].sb.depth;
352 kyber_resize_domain(kqd, sched_domain, depth);
359 struct kyber_queue_data *kqd;
363 kqd = kzalloc_node(sizeof(*kqd), GFP_KERNEL, q->node);
364 if (!kqd)
367 kqd->q = q;
368 kqd->dev = disk_devt(q->disk);
370 kqd->cpu_latency = alloc_percpu_gfp(struct kyber_cpu_latency,
372 if (!kqd->cpu_latency)
375 timer_setup(&kqd->timer, kyber_timer_fn, 0);
380 ret = sbitmap_queue_init_node(&kqd->domain_tokens[i],
385 sbitmap_queue_free(&kqd->domain_tokens[i]);
391 kqd->domain_p99[i] = -1;
392 kqd->latency_targets[i] = kyber_latency_targets[i];
395 return kqd;
398 free_percpu(kqd->cpu_latency);
400 kfree(kqd);
407 struct kyber_queue_data *kqd;
414 kqd = kyber_queue_data_alloc(q);
415 if (IS_ERR(kqd)) {
417 return PTR_ERR(kqd);
424 eq->elevator_data = kqd;
432 struct kyber_queue_data *kqd = e->elevator_data;
435 timer_shutdown_sync(&kqd->timer);
436 blk_stat_disable_accounting(kqd->q);
439 sbitmap_queue_free(&kqd->domain_tokens[i]);
440 free_percpu(kqd->cpu_latency);
441 kfree(kqd);
455 struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data;
459 kqd->async_depth = (1U << shift) * KYBER_ASYNC_PERCENT / 100U;
461 sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, kqd->async_depth);
540 static void rq_clear_domain_token(struct kyber_queue_data *kqd,
549 sbitmap_queue_clear(&kqd->domain_tokens[sched_domain], nr,
561 struct kyber_queue_data *kqd = data->q->elevator->elevator_data;
563 data->shallow_depth = kqd->async_depth;
616 struct kyber_queue_data *kqd = rq->q->elevator->elevator_data;
618 rq_clear_domain_token(kqd, rq);
641 struct kyber_queue_data *kqd = rq->q->elevator->elevator_data;
650 cpu_latency = get_cpu_ptr(kqd->cpu_latency);
651 target = kqd->latency_targets[sched_domain];
656 put_cpu_ptr(kqd->cpu_latency);
658 timer_reduce(&kqd->timer, jiffies + HZ / 10);
706 static int kyber_get_domain_token(struct kyber_queue_data *kqd,
711 struct sbitmap_queue *domain_tokens = &kqd->domain_tokens[sched_domain];
754 kyber_dispatch_cur_domain(struct kyber_queue_data *kqd,
774 nr = kyber_get_domain_token(kqd, khd, hctx);
781 trace_kyber_throttled(kqd->dev,
785 nr = kyber_get_domain_token(kqd, khd, hctx);
794 trace_kyber_throttled(kqd->dev,
805 struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data;
817 rq = kyber_dispatch_cur_domain(kqd, khd, hctx);
838 rq = kyber_dispatch_cur_domain(kqd, khd, hctx);
867 struct kyber_queue_data *kqd = e->elevator_data; \
869 return sprintf(page, "%llu\n", kqd->latency_targets[domain]); \
875 struct kyber_queue_data *kqd = e->elevator_data; \
883 kqd->latency_targets[domain] = nsec; \
904 struct kyber_queue_data *kqd = q->elevator->elevator_data; \
906 sbitmap_queue_show(&kqd->domain_tokens[domain], m); \
963 struct kyber_queue_data *kqd = q->elevator->elevator_data;
965 seq_printf(m, "%u\n", kqd->async_depth);