Lines Matching defs:kqd
209 static void flush_latency_buckets(struct kyber_queue_data *kqd,
213 unsigned int *buckets = kqd->latency_buckets[sched_domain][type];
225 static int calculate_percentile(struct kyber_queue_data *kqd,
229 unsigned int *buckets = kqd->latency_buckets[sched_domain][type];
242 if (!kqd->latency_timeout[sched_domain])
243 kqd->latency_timeout[sched_domain] = max(jiffies + HZ, 1UL);
245 time_is_after_jiffies(kqd->latency_timeout[sched_domain])) {
248 kqd->latency_timeout[sched_domain] = 0;
256 memset(buckets, 0, sizeof(kqd->latency_buckets[sched_domain][type]));
258 trace_kyber_latency(kqd->q, kyber_domain_names[sched_domain],
265 static void kyber_resize_domain(struct kyber_queue_data *kqd,
269 if (depth != kqd->domain_tokens[sched_domain].sb.depth) {
270 sbitmap_queue_resize(&kqd->domain_tokens[sched_domain], depth);
271 trace_kyber_adjust(kqd->q, kyber_domain_names[sched_domain],
278 struct kyber_queue_data *kqd = from_timer(kqd, t, timer);
287 cpu_latency = per_cpu_ptr(kqd->cpu_latency, cpu);
289 flush_latency_buckets(kqd, cpu_latency, sched_domain,
291 flush_latency_buckets(kqd, cpu_latency, sched_domain,
304 p90 = calculate_percentile(kqd, sched_domain, KYBER_IO_LATENCY,
319 p99 = calculate_percentile(kqd, sched_domain,
331 p99 = kqd->domain_p99[sched_domain];
332 kqd->domain_p99[sched_domain] = -1;
334 kqd->domain_p99[sched_domain] = p99;
349 orig_depth = kqd->domain_tokens[sched_domain].sb.depth;
351 kyber_resize_domain(kqd, sched_domain, depth);
358 struct kyber_queue_data *kqd;
362 kqd = kzalloc_node(sizeof(*kqd), GFP_KERNEL, q->node);
363 if (!kqd)
366 kqd->q = q;
368 kqd->cpu_latency = alloc_percpu_gfp(struct kyber_cpu_latency,
370 if (!kqd->cpu_latency)
373 timer_setup(&kqd->timer, kyber_timer_fn, 0);
378 ret = sbitmap_queue_init_node(&kqd->domain_tokens[i],
383 sbitmap_queue_free(&kqd->domain_tokens[i]);
389 kqd->domain_p99[i] = -1;
390 kqd->latency_targets[i] = kyber_latency_targets[i];
393 return kqd;
396 free_percpu(kqd->cpu_latency);
398 kfree(kqd);
405 struct kyber_queue_data *kqd;
412 kqd = kyber_queue_data_alloc(q);
413 if (IS_ERR(kqd)) {
415 return PTR_ERR(kqd);
420 eq->elevator_data = kqd;
428 struct kyber_queue_data *kqd = e->elevator_data;
431 del_timer_sync(&kqd->timer);
434 sbitmap_queue_free(&kqd->domain_tokens[i]);
435 free_percpu(kqd->cpu_latency);
436 kfree(kqd);
450 struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data;
454 kqd->async_depth = (1U << shift) * KYBER_ASYNC_PERCENT / 100U;
456 sbitmap_queue_min_shallow_depth(tags->bitmap_tags, kqd->async_depth);
534 static void rq_clear_domain_token(struct kyber_queue_data *kqd,
543 sbitmap_queue_clear(&kqd->domain_tokens[sched_domain], nr,
555 struct kyber_queue_data *kqd = data->q->elevator->elevator_data;
557 data->shallow_depth = kqd->async_depth;
609 struct kyber_queue_data *kqd = rq->q->elevator->elevator_data;
611 rq_clear_domain_token(kqd, rq);
634 struct kyber_queue_data *kqd = rq->q->elevator->elevator_data;
643 cpu_latency = get_cpu_ptr(kqd->cpu_latency);
644 target = kqd->latency_targets[sched_domain];
649 put_cpu_ptr(kqd->cpu_latency);
651 timer_reduce(&kqd->timer, jiffies + HZ / 10);
699 static int kyber_get_domain_token(struct kyber_queue_data *kqd,
704 struct sbitmap_queue *domain_tokens = &kqd->domain_tokens[sched_domain];
747 kyber_dispatch_cur_domain(struct kyber_queue_data *kqd,
767 nr = kyber_get_domain_token(kqd, khd, hctx);
774 trace_kyber_throttled(kqd->q,
778 nr = kyber_get_domain_token(kqd, khd, hctx);
787 trace_kyber_throttled(kqd->q,
798 struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data;
810 rq = kyber_dispatch_cur_domain(kqd, khd, hctx);
831 rq = kyber_dispatch_cur_domain(kqd, khd, hctx);
860 struct kyber_queue_data *kqd = e->elevator_data; \
862 return sprintf(page, "%llu\n", kqd->latency_targets[domain]); \
868 struct kyber_queue_data *kqd = e->elevator_data; \
876 kqd->latency_targets[domain] = nsec; \
897 struct kyber_queue_data *kqd = q->elevator->elevator_data; \
899 sbitmap_queue_show(&kqd->domain_tokens[domain], m); \
956 struct kyber_queue_data *kqd = q->elevator->elevator_data;
958 seq_printf(m, "%u\n", kqd->async_depth);